]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/pa/pa.c
[72/77] Pass scalar_mode to scalar_mode_supported_p
[thirdparty/gcc.git] / gcc / config / pa / pa.c
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2017 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "memmodel.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "df.h"
30 #include "tm_p.h"
31 #include "stringpool.h"
32 #include "attribs.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "diagnostic-core.h"
38 #include "insn-attr.h"
39 #include "alias.h"
40 #include "fold-const.h"
41 #include "stor-layout.h"
42 #include "varasm.h"
43 #include "calls.h"
44 #include "output.h"
45 #include "except.h"
46 #include "explow.h"
47 #include "expr.h"
48 #include "reload.h"
49 #include "common/common-target.h"
50 #include "langhooks.h"
51 #include "cfgrtl.h"
52 #include "opts.h"
53 #include "builtins.h"
54
55 /* This file should be included last. */
56 #include "target-def.h"
57
58 /* Return nonzero if there is a bypass for the output of
59 OUT_INSN and the fp store IN_INSN. */
60 int
61 pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
62 {
63 machine_mode store_mode;
64 machine_mode other_mode;
65 rtx set;
66
67 if (recog_memoized (in_insn) < 0
68 || (get_attr_type (in_insn) != TYPE_FPSTORE
69 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
70 || recog_memoized (out_insn) < 0)
71 return 0;
72
73 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
74
75 set = single_set (out_insn);
76 if (!set)
77 return 0;
78
79 other_mode = GET_MODE (SET_SRC (set));
80
81 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
82 }
83
84
85 #ifndef DO_FRAME_NOTES
86 #ifdef INCOMING_RETURN_ADDR_RTX
87 #define DO_FRAME_NOTES 1
88 #else
89 #define DO_FRAME_NOTES 0
90 #endif
91 #endif
92
93 static void pa_option_override (void);
94 static void copy_reg_pointer (rtx, rtx);
95 static void fix_range (const char *);
96 static int hppa_register_move_cost (machine_mode mode, reg_class_t,
97 reg_class_t);
98 static int hppa_address_cost (rtx, machine_mode mode, addr_space_t, bool);
99 static bool hppa_rtx_costs (rtx, machine_mode, int, int, int *, bool);
100 static inline rtx force_mode (machine_mode, rtx);
101 static void pa_reorg (void);
102 static void pa_combine_instructions (void);
103 static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
104 rtx, rtx);
105 static bool forward_branch_p (rtx_insn *);
106 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
107 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
108 static int compute_movmem_length (rtx_insn *);
109 static int compute_clrmem_length (rtx_insn *);
110 static bool pa_assemble_integer (rtx, unsigned int, int);
111 static void remove_useless_addtr_insns (int);
112 static void store_reg (int, HOST_WIDE_INT, int);
113 static void store_reg_modify (int, int, HOST_WIDE_INT);
114 static void load_reg (int, HOST_WIDE_INT, int);
115 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
116 static rtx pa_function_value (const_tree, const_tree, bool);
117 static rtx pa_libcall_value (machine_mode, const_rtx);
118 static bool pa_function_value_regno_p (const unsigned int);
119 static void pa_output_function_prologue (FILE *);
120 static void update_total_code_bytes (unsigned int);
121 static void pa_output_function_epilogue (FILE *);
122 static int pa_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
123 static int pa_adjust_priority (rtx_insn *, int);
124 static int pa_issue_rate (void);
125 static int pa_reloc_rw_mask (void);
126 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
127 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
128 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
129 ATTRIBUTE_UNUSED;
130 static void pa_encode_section_info (tree, rtx, int);
131 static const char *pa_strip_name_encoding (const char *);
132 static bool pa_function_ok_for_sibcall (tree, tree);
133 static void pa_globalize_label (FILE *, const char *)
134 ATTRIBUTE_UNUSED;
135 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
136 HOST_WIDE_INT, tree);
137 #if !defined(USE_COLLECT2)
138 static void pa_asm_out_constructor (rtx, int);
139 static void pa_asm_out_destructor (rtx, int);
140 #endif
141 static void pa_init_builtins (void);
142 static rtx pa_expand_builtin (tree, rtx, rtx, machine_mode mode, int);
143 static rtx hppa_builtin_saveregs (void);
144 static void hppa_va_start (tree, rtx);
145 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
146 static bool pa_scalar_mode_supported_p (scalar_mode);
147 static bool pa_commutative_p (const_rtx x, int outer_code);
148 static void copy_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
149 static int length_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
150 static rtx hppa_legitimize_address (rtx, rtx, machine_mode);
151 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
152 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
153 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
154 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
155 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
156 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
157 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
158 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
159 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
160 static void output_deferred_plabels (void);
161 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
162 #ifdef ASM_OUTPUT_EXTERNAL_REAL
163 static void pa_hpux_file_end (void);
164 #endif
165 static void pa_init_libfuncs (void);
166 static rtx pa_struct_value_rtx (tree, int);
167 static bool pa_pass_by_reference (cumulative_args_t, machine_mode,
168 const_tree, bool);
169 static int pa_arg_partial_bytes (cumulative_args_t, machine_mode,
170 tree, bool);
171 static void pa_function_arg_advance (cumulative_args_t, machine_mode,
172 const_tree, bool);
173 static rtx pa_function_arg (cumulative_args_t, machine_mode,
174 const_tree, bool);
175 static unsigned int pa_function_arg_boundary (machine_mode, const_tree);
176 static struct machine_function * pa_init_machine_status (void);
177 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
178 machine_mode,
179 secondary_reload_info *);
180 static void pa_extra_live_on_entry (bitmap);
181 static machine_mode pa_promote_function_mode (const_tree,
182 machine_mode, int *,
183 const_tree, int);
184
185 static void pa_asm_trampoline_template (FILE *);
186 static void pa_trampoline_init (rtx, tree, rtx);
187 static rtx pa_trampoline_adjust_address (rtx);
188 static rtx pa_delegitimize_address (rtx);
189 static bool pa_print_operand_punct_valid_p (unsigned char);
190 static rtx pa_internal_arg_pointer (void);
191 static bool pa_can_eliminate (const int, const int);
192 static void pa_conditional_register_usage (void);
193 static machine_mode pa_c_mode_for_suffix (char);
194 static section *pa_function_section (tree, enum node_frequency, bool, bool);
195 static bool pa_cannot_force_const_mem (machine_mode, rtx);
196 static bool pa_legitimate_constant_p (machine_mode, rtx);
197 static unsigned int pa_section_type_flags (tree, const char *, int);
198 static bool pa_legitimate_address_p (machine_mode, rtx, bool);
199 static bool pa_callee_copies (cumulative_args_t, machine_mode,
200 const_tree, bool);
201
202 /* The following extra sections are only used for SOM. */
203 static GTY(()) section *som_readonly_data_section;
204 static GTY(()) section *som_one_only_readonly_data_section;
205 static GTY(()) section *som_one_only_data_section;
206 static GTY(()) section *som_tm_clone_table_section;
207
208 /* Counts for the number of callee-saved general and floating point
209 registers which were saved by the current function's prologue. */
210 static int gr_saved, fr_saved;
211
212 /* Boolean indicating whether the return pointer was saved by the
213 current function's prologue. */
214 static bool rp_saved;
215
216 static rtx find_addr_reg (rtx);
217
218 /* Keep track of the number of bytes we have output in the CODE subspace
219 during this compilation so we'll know when to emit inline long-calls. */
220 unsigned long total_code_bytes;
221
222 /* The last address of the previous function plus the number of bytes in
223 associated thunks that have been output. This is used to determine if
224 a thunk can use an IA-relative branch to reach its target function. */
225 static unsigned int last_address;
226
227 /* Variables to handle plabels that we discover are necessary at assembly
228 output time. They are output after the current function. */
229 struct GTY(()) deferred_plabel
230 {
231 rtx internal_label;
232 rtx symbol;
233 };
234 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
235 deferred_plabels;
236 static size_t n_deferred_plabels = 0;
237 \f
238 /* Initialize the GCC target structure. */
239
240 #undef TARGET_OPTION_OVERRIDE
241 #define TARGET_OPTION_OVERRIDE pa_option_override
242
243 #undef TARGET_ASM_ALIGNED_HI_OP
244 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
245 #undef TARGET_ASM_ALIGNED_SI_OP
246 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
247 #undef TARGET_ASM_ALIGNED_DI_OP
248 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
249 #undef TARGET_ASM_UNALIGNED_HI_OP
250 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
251 #undef TARGET_ASM_UNALIGNED_SI_OP
252 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
253 #undef TARGET_ASM_UNALIGNED_DI_OP
254 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
255 #undef TARGET_ASM_INTEGER
256 #define TARGET_ASM_INTEGER pa_assemble_integer
257
258 #undef TARGET_ASM_FUNCTION_PROLOGUE
259 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
260 #undef TARGET_ASM_FUNCTION_EPILOGUE
261 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
262
263 #undef TARGET_FUNCTION_VALUE
264 #define TARGET_FUNCTION_VALUE pa_function_value
265 #undef TARGET_LIBCALL_VALUE
266 #define TARGET_LIBCALL_VALUE pa_libcall_value
267 #undef TARGET_FUNCTION_VALUE_REGNO_P
268 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
269
270 #undef TARGET_LEGITIMIZE_ADDRESS
271 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
272
273 #undef TARGET_SCHED_ADJUST_COST
274 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
275 #undef TARGET_SCHED_ADJUST_PRIORITY
276 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
277 #undef TARGET_SCHED_ISSUE_RATE
278 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
279
280 #undef TARGET_ENCODE_SECTION_INFO
281 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
282 #undef TARGET_STRIP_NAME_ENCODING
283 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
284
285 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
286 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
287
288 #undef TARGET_COMMUTATIVE_P
289 #define TARGET_COMMUTATIVE_P pa_commutative_p
290
291 #undef TARGET_ASM_OUTPUT_MI_THUNK
292 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
293 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
294 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
295
296 #undef TARGET_ASM_FILE_END
297 #ifdef ASM_OUTPUT_EXTERNAL_REAL
298 #define TARGET_ASM_FILE_END pa_hpux_file_end
299 #else
300 #define TARGET_ASM_FILE_END output_deferred_plabels
301 #endif
302
303 #undef TARGET_ASM_RELOC_RW_MASK
304 #define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
305
306 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
307 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
308
309 #if !defined(USE_COLLECT2)
310 #undef TARGET_ASM_CONSTRUCTOR
311 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
312 #undef TARGET_ASM_DESTRUCTOR
313 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
314 #endif
315
316 #undef TARGET_INIT_BUILTINS
317 #define TARGET_INIT_BUILTINS pa_init_builtins
318
319 #undef TARGET_EXPAND_BUILTIN
320 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
321
322 #undef TARGET_REGISTER_MOVE_COST
323 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
324 #undef TARGET_RTX_COSTS
325 #define TARGET_RTX_COSTS hppa_rtx_costs
326 #undef TARGET_ADDRESS_COST
327 #define TARGET_ADDRESS_COST hppa_address_cost
328
329 #undef TARGET_MACHINE_DEPENDENT_REORG
330 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
331
332 #undef TARGET_INIT_LIBFUNCS
333 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
334
335 #undef TARGET_PROMOTE_FUNCTION_MODE
336 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
337 #undef TARGET_PROMOTE_PROTOTYPES
338 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
339
340 #undef TARGET_STRUCT_VALUE_RTX
341 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
342 #undef TARGET_RETURN_IN_MEMORY
343 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
344 #undef TARGET_MUST_PASS_IN_STACK
345 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
346 #undef TARGET_PASS_BY_REFERENCE
347 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
348 #undef TARGET_CALLEE_COPIES
349 #define TARGET_CALLEE_COPIES pa_callee_copies
350 #undef TARGET_ARG_PARTIAL_BYTES
351 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
352 #undef TARGET_FUNCTION_ARG
353 #define TARGET_FUNCTION_ARG pa_function_arg
354 #undef TARGET_FUNCTION_ARG_ADVANCE
355 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
356 #undef TARGET_FUNCTION_ARG_BOUNDARY
357 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
358
359 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
360 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
361 #undef TARGET_EXPAND_BUILTIN_VA_START
362 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
363 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
364 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
365
366 #undef TARGET_SCALAR_MODE_SUPPORTED_P
367 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
368
369 #undef TARGET_CANNOT_FORCE_CONST_MEM
370 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
371
372 #undef TARGET_SECONDARY_RELOAD
373 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
374
375 #undef TARGET_EXTRA_LIVE_ON_ENTRY
376 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
377
378 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
379 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
380 #undef TARGET_TRAMPOLINE_INIT
381 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
382 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
383 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
384 #undef TARGET_DELEGITIMIZE_ADDRESS
385 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
386 #undef TARGET_INTERNAL_ARG_POINTER
387 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
388 #undef TARGET_CAN_ELIMINATE
389 #define TARGET_CAN_ELIMINATE pa_can_eliminate
390 #undef TARGET_CONDITIONAL_REGISTER_USAGE
391 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
392 #undef TARGET_C_MODE_FOR_SUFFIX
393 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
394 #undef TARGET_ASM_FUNCTION_SECTION
395 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
396
397 #undef TARGET_LEGITIMATE_CONSTANT_P
398 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
399 #undef TARGET_SECTION_TYPE_FLAGS
400 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
401 #undef TARGET_LEGITIMATE_ADDRESS_P
402 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
403
404 #undef TARGET_LRA_P
405 #define TARGET_LRA_P hook_bool_void_false
406
407 struct gcc_target targetm = TARGET_INITIALIZER;
408 \f
409 /* Parse the -mfixed-range= option string. */
410
411 static void
412 fix_range (const char *const_str)
413 {
414 int i, first, last;
415 char *str, *dash, *comma;
416
417 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
418 REG2 are either register names or register numbers. The effect
419 of this option is to mark the registers in the range from REG1 to
420 REG2 as ``fixed'' so they won't be used by the compiler. This is
421 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
422
423 i = strlen (const_str);
424 str = (char *) alloca (i + 1);
425 memcpy (str, const_str, i + 1);
426
427 while (1)
428 {
429 dash = strchr (str, '-');
430 if (!dash)
431 {
432 warning (0, "value of -mfixed-range must have form REG1-REG2");
433 return;
434 }
435 *dash = '\0';
436
437 comma = strchr (dash + 1, ',');
438 if (comma)
439 *comma = '\0';
440
441 first = decode_reg_name (str);
442 if (first < 0)
443 {
444 warning (0, "unknown register name: %s", str);
445 return;
446 }
447
448 last = decode_reg_name (dash + 1);
449 if (last < 0)
450 {
451 warning (0, "unknown register name: %s", dash + 1);
452 return;
453 }
454
455 *dash = '-';
456
457 if (first > last)
458 {
459 warning (0, "%s-%s is an empty range", str, dash + 1);
460 return;
461 }
462
463 for (i = first; i <= last; ++i)
464 fixed_regs[i] = call_used_regs[i] = 1;
465
466 if (!comma)
467 break;
468
469 *comma = ',';
470 str = comma + 1;
471 }
472
473 /* Check if all floating point registers have been fixed. */
474 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
475 if (!fixed_regs[i])
476 break;
477
478 if (i > FP_REG_LAST)
479 target_flags |= MASK_DISABLE_FPREGS;
480 }
481
482 /* Implement the TARGET_OPTION_OVERRIDE hook. */
483
484 static void
485 pa_option_override (void)
486 {
487 unsigned int i;
488 cl_deferred_option *opt;
489 vec<cl_deferred_option> *v
490 = (vec<cl_deferred_option> *) pa_deferred_options;
491
492 if (v)
493 FOR_EACH_VEC_ELT (*v, i, opt)
494 {
495 switch (opt->opt_index)
496 {
497 case OPT_mfixed_range_:
498 fix_range (opt->arg);
499 break;
500
501 default:
502 gcc_unreachable ();
503 }
504 }
505
506 if (flag_pic && TARGET_PORTABLE_RUNTIME)
507 {
508 warning (0, "PIC code generation is not supported in the portable runtime model");
509 }
510
511 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
512 {
513 warning (0, "PIC code generation is not compatible with fast indirect calls");
514 }
515
516 if (! TARGET_GAS && write_symbols != NO_DEBUG)
517 {
518 warning (0, "-g is only supported when using GAS on this processor,");
519 warning (0, "-g option disabled");
520 write_symbols = NO_DEBUG;
521 }
522
523 /* We only support the "big PIC" model now. And we always generate PIC
524 code when in 64bit mode. */
525 if (flag_pic == 1 || TARGET_64BIT)
526 flag_pic = 2;
527
528 /* Disable -freorder-blocks-and-partition as we don't support hot and
529 cold partitioning. */
530 if (flag_reorder_blocks_and_partition)
531 {
532 inform (input_location,
533 "-freorder-blocks-and-partition does not work "
534 "on this architecture");
535 flag_reorder_blocks_and_partition = 0;
536 flag_reorder_blocks = 1;
537 }
538
539 /* We can't guarantee that .dword is available for 32-bit targets. */
540 if (UNITS_PER_WORD == 4)
541 targetm.asm_out.aligned_op.di = NULL;
542
543 /* The unaligned ops are only available when using GAS. */
544 if (!TARGET_GAS)
545 {
546 targetm.asm_out.unaligned_op.hi = NULL;
547 targetm.asm_out.unaligned_op.si = NULL;
548 targetm.asm_out.unaligned_op.di = NULL;
549 }
550
551 init_machine_status = pa_init_machine_status;
552 }
553
554 enum pa_builtins
555 {
556 PA_BUILTIN_COPYSIGNQ,
557 PA_BUILTIN_FABSQ,
558 PA_BUILTIN_INFQ,
559 PA_BUILTIN_HUGE_VALQ,
560 PA_BUILTIN_max
561 };
562
563 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
564
565 static void
566 pa_init_builtins (void)
567 {
568 #ifdef DONT_HAVE_FPUTC_UNLOCKED
569 {
570 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
571 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
572 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
573 }
574 #endif
575 #if TARGET_HPUX_11
576 {
577 tree decl;
578
579 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
580 set_user_assembler_name (decl, "_Isfinite");
581 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
582 set_user_assembler_name (decl, "_Isfinitef");
583 }
584 #endif
585
586 if (HPUX_LONG_DOUBLE_LIBRARY)
587 {
588 tree decl, ftype;
589
590 /* Under HPUX, the __float128 type is a synonym for "long double". */
591 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
592 "__float128");
593
594 /* TFmode support builtins. */
595 ftype = build_function_type_list (long_double_type_node,
596 long_double_type_node,
597 NULL_TREE);
598 decl = add_builtin_function ("__builtin_fabsq", ftype,
599 PA_BUILTIN_FABSQ, BUILT_IN_MD,
600 "_U_Qfabs", NULL_TREE);
601 TREE_READONLY (decl) = 1;
602 pa_builtins[PA_BUILTIN_FABSQ] = decl;
603
604 ftype = build_function_type_list (long_double_type_node,
605 long_double_type_node,
606 long_double_type_node,
607 NULL_TREE);
608 decl = add_builtin_function ("__builtin_copysignq", ftype,
609 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
610 "_U_Qfcopysign", NULL_TREE);
611 TREE_READONLY (decl) = 1;
612 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
613
614 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
615 decl = add_builtin_function ("__builtin_infq", ftype,
616 PA_BUILTIN_INFQ, BUILT_IN_MD,
617 NULL, NULL_TREE);
618 pa_builtins[PA_BUILTIN_INFQ] = decl;
619
620 decl = add_builtin_function ("__builtin_huge_valq", ftype,
621 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
622 NULL, NULL_TREE);
623 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
624 }
625 }
626
627 static rtx
628 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
629 machine_mode mode ATTRIBUTE_UNUSED,
630 int ignore ATTRIBUTE_UNUSED)
631 {
632 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
633 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
634
635 switch (fcode)
636 {
637 case PA_BUILTIN_FABSQ:
638 case PA_BUILTIN_COPYSIGNQ:
639 return expand_call (exp, target, ignore);
640
641 case PA_BUILTIN_INFQ:
642 case PA_BUILTIN_HUGE_VALQ:
643 {
644 machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
645 REAL_VALUE_TYPE inf;
646 rtx tmp;
647
648 real_inf (&inf);
649 tmp = const_double_from_real_value (inf, target_mode);
650
651 tmp = validize_mem (force_const_mem (target_mode, tmp));
652
653 if (target == 0)
654 target = gen_reg_rtx (target_mode);
655
656 emit_move_insn (target, tmp);
657 return target;
658 }
659
660 default:
661 gcc_unreachable ();
662 }
663
664 return NULL_RTX;
665 }
666
667 /* Function to init struct machine_function.
668 This will be called, via a pointer variable,
669 from push_function_context. */
670
671 static struct machine_function *
672 pa_init_machine_status (void)
673 {
674 return ggc_cleared_alloc<machine_function> ();
675 }
676
677 /* If FROM is a probable pointer register, mark TO as a probable
678 pointer register with the same pointer alignment as FROM. */
679
680 static void
681 copy_reg_pointer (rtx to, rtx from)
682 {
683 if (REG_POINTER (from))
684 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
685 }
686
687 /* Return 1 if X contains a symbolic expression. We know these
688 expressions will have one of a few well defined forms, so
689 we need only check those forms. */
690 int
691 pa_symbolic_expression_p (rtx x)
692 {
693
694 /* Strip off any HIGH. */
695 if (GET_CODE (x) == HIGH)
696 x = XEXP (x, 0);
697
698 return symbolic_operand (x, VOIDmode);
699 }
700
701 /* Accept any constant that can be moved in one instruction into a
702 general register. */
703 int
704 pa_cint_ok_for_move (unsigned HOST_WIDE_INT ival)
705 {
706 /* OK if ldo, ldil, or zdepi, can be used. */
707 return (VAL_14_BITS_P (ival)
708 || pa_ldil_cint_p (ival)
709 || pa_zdepi_cint_p (ival));
710 }
711 \f
712 /* True iff ldil can be used to load this CONST_INT. The least
713 significant 11 bits of the value must be zero and the value must
714 not change sign when extended from 32 to 64 bits. */
715 int
716 pa_ldil_cint_p (unsigned HOST_WIDE_INT ival)
717 {
718 unsigned HOST_WIDE_INT x;
719
720 x = ival & (((unsigned HOST_WIDE_INT) -1 << 31) | 0x7ff);
721 return x == 0 || x == ((unsigned HOST_WIDE_INT) -1 << 31);
722 }
723
724 /* True iff zdepi can be used to generate this CONST_INT.
725 zdepi first sign extends a 5-bit signed number to a given field
726 length, then places this field anywhere in a zero. */
727 int
728 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
729 {
730 unsigned HOST_WIDE_INT lsb_mask, t;
731
732 /* This might not be obvious, but it's at least fast.
733 This function is critical; we don't have the time loops would take. */
734 lsb_mask = x & -x;
735 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
736 /* Return true iff t is a power of two. */
737 return ((t & (t - 1)) == 0);
738 }
739
740 /* True iff depi or extru can be used to compute (reg & mask).
741 Accept bit pattern like these:
742 0....01....1
743 1....10....0
744 1..10..01..1 */
745 int
746 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
747 {
748 mask = ~mask;
749 mask += mask & -mask;
750 return (mask & (mask - 1)) == 0;
751 }
752
753 /* True iff depi can be used to compute (reg | MASK). */
754 int
755 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
756 {
757 mask += mask & -mask;
758 return (mask & (mask - 1)) == 0;
759 }
760 \f
761 /* Legitimize PIC addresses. If the address is already
762 position-independent, we return ORIG. Newly generated
763 position-independent addresses go to REG. If we need more
764 than one register, we lose. */
765
766 static rtx
767 legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
768 {
769 rtx pic_ref = orig;
770
771 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
772
773 /* Labels need special handling. */
774 if (pic_label_operand (orig, mode))
775 {
776 rtx_insn *insn;
777
778 /* We do not want to go through the movXX expanders here since that
779 would create recursion.
780
781 Nor do we really want to call a generator for a named pattern
782 since that requires multiple patterns if we want to support
783 multiple word sizes.
784
785 So instead we just emit the raw set, which avoids the movXX
786 expanders completely. */
787 mark_reg_pointer (reg, BITS_PER_UNIT);
788 insn = emit_insn (gen_rtx_SET (reg, orig));
789
790 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
791 add_reg_note (insn, REG_EQUAL, orig);
792
793 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
794 and update LABEL_NUSES because this is not done automatically. */
795 if (reload_in_progress || reload_completed)
796 {
797 /* Extract LABEL_REF. */
798 if (GET_CODE (orig) == CONST)
799 orig = XEXP (XEXP (orig, 0), 0);
800 /* Extract CODE_LABEL. */
801 orig = XEXP (orig, 0);
802 add_reg_note (insn, REG_LABEL_OPERAND, orig);
803 /* Make sure we have label and not a note. */
804 if (LABEL_P (orig))
805 LABEL_NUSES (orig)++;
806 }
807 crtl->uses_pic_offset_table = 1;
808 return reg;
809 }
810 if (GET_CODE (orig) == SYMBOL_REF)
811 {
812 rtx_insn *insn;
813 rtx tmp_reg;
814
815 gcc_assert (reg);
816
817 /* Before reload, allocate a temporary register for the intermediate
818 result. This allows the sequence to be deleted when the final
819 result is unused and the insns are trivially dead. */
820 tmp_reg = ((reload_in_progress || reload_completed)
821 ? reg : gen_reg_rtx (Pmode));
822
823 if (function_label_operand (orig, VOIDmode))
824 {
825 /* Force function label into memory in word mode. */
826 orig = XEXP (force_const_mem (word_mode, orig), 0);
827 /* Load plabel address from DLT. */
828 emit_move_insn (tmp_reg,
829 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
830 gen_rtx_HIGH (word_mode, orig)));
831 pic_ref
832 = gen_const_mem (Pmode,
833 gen_rtx_LO_SUM (Pmode, tmp_reg,
834 gen_rtx_UNSPEC (Pmode,
835 gen_rtvec (1, orig),
836 UNSPEC_DLTIND14R)));
837 emit_move_insn (reg, pic_ref);
838 /* Now load address of function descriptor. */
839 pic_ref = gen_rtx_MEM (Pmode, reg);
840 }
841 else
842 {
843 /* Load symbol reference from DLT. */
844 emit_move_insn (tmp_reg,
845 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
846 gen_rtx_HIGH (word_mode, orig)));
847 pic_ref
848 = gen_const_mem (Pmode,
849 gen_rtx_LO_SUM (Pmode, tmp_reg,
850 gen_rtx_UNSPEC (Pmode,
851 gen_rtvec (1, orig),
852 UNSPEC_DLTIND14R)));
853 }
854
855 crtl->uses_pic_offset_table = 1;
856 mark_reg_pointer (reg, BITS_PER_UNIT);
857 insn = emit_move_insn (reg, pic_ref);
858
859 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
860 set_unique_reg_note (insn, REG_EQUAL, orig);
861
862 return reg;
863 }
864 else if (GET_CODE (orig) == CONST)
865 {
866 rtx base;
867
868 if (GET_CODE (XEXP (orig, 0)) == PLUS
869 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
870 return orig;
871
872 gcc_assert (reg);
873 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
874
875 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
876 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
877 base == reg ? 0 : reg);
878
879 if (GET_CODE (orig) == CONST_INT)
880 {
881 if (INT_14_BITS (orig))
882 return plus_constant (Pmode, base, INTVAL (orig));
883 orig = force_reg (Pmode, orig);
884 }
885 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
886 /* Likewise, should we set special REG_NOTEs here? */
887 }
888
889 return pic_ref;
890 }
891
892 static GTY(()) rtx gen_tls_tga;
893
894 static rtx
895 gen_tls_get_addr (void)
896 {
897 if (!gen_tls_tga)
898 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
899 return gen_tls_tga;
900 }
901
902 static rtx
903 hppa_tls_call (rtx arg)
904 {
905 rtx ret;
906
907 ret = gen_reg_rtx (Pmode);
908 emit_library_call_value (gen_tls_get_addr (), ret,
909 LCT_CONST, Pmode, 1, arg, Pmode);
910
911 return ret;
912 }
913
914 static rtx
915 legitimize_tls_address (rtx addr)
916 {
917 rtx ret, tmp, t1, t2, tp;
918 rtx_insn *insn;
919
920 /* Currently, we can't handle anything but a SYMBOL_REF. */
921 if (GET_CODE (addr) != SYMBOL_REF)
922 return addr;
923
924 switch (SYMBOL_REF_TLS_MODEL (addr))
925 {
926 case TLS_MODEL_GLOBAL_DYNAMIC:
927 tmp = gen_reg_rtx (Pmode);
928 if (flag_pic)
929 emit_insn (gen_tgd_load_pic (tmp, addr));
930 else
931 emit_insn (gen_tgd_load (tmp, addr));
932 ret = hppa_tls_call (tmp);
933 break;
934
935 case TLS_MODEL_LOCAL_DYNAMIC:
936 ret = gen_reg_rtx (Pmode);
937 tmp = gen_reg_rtx (Pmode);
938 start_sequence ();
939 if (flag_pic)
940 emit_insn (gen_tld_load_pic (tmp, addr));
941 else
942 emit_insn (gen_tld_load (tmp, addr));
943 t1 = hppa_tls_call (tmp);
944 insn = get_insns ();
945 end_sequence ();
946 t2 = gen_reg_rtx (Pmode);
947 emit_libcall_block (insn, t2, t1,
948 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
949 UNSPEC_TLSLDBASE));
950 emit_insn (gen_tld_offset_load (ret, addr, t2));
951 break;
952
953 case TLS_MODEL_INITIAL_EXEC:
954 tp = gen_reg_rtx (Pmode);
955 tmp = gen_reg_rtx (Pmode);
956 ret = gen_reg_rtx (Pmode);
957 emit_insn (gen_tp_load (tp));
958 if (flag_pic)
959 emit_insn (gen_tie_load_pic (tmp, addr));
960 else
961 emit_insn (gen_tie_load (tmp, addr));
962 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
963 break;
964
965 case TLS_MODEL_LOCAL_EXEC:
966 tp = gen_reg_rtx (Pmode);
967 ret = gen_reg_rtx (Pmode);
968 emit_insn (gen_tp_load (tp));
969 emit_insn (gen_tle_load (ret, addr, tp));
970 break;
971
972 default:
973 gcc_unreachable ();
974 }
975
976 return ret;
977 }
978
979 /* Helper for hppa_legitimize_address. Given X, return true if it
980 is a left shift by 1, 2 or 3 positions or a multiply by 2, 4 or 8.
981
982 This respectively represent canonical shift-add rtxs or scaled
983 memory addresses. */
984 static bool
985 mem_shadd_or_shadd_rtx_p (rtx x)
986 {
987 return ((GET_CODE (x) == ASHIFT
988 || GET_CODE (x) == MULT)
989 && GET_CODE (XEXP (x, 1)) == CONST_INT
990 && ((GET_CODE (x) == ASHIFT
991 && pa_shadd_constant_p (INTVAL (XEXP (x, 1))))
992 || (GET_CODE (x) == MULT
993 && pa_mem_shadd_constant_p (INTVAL (XEXP (x, 1))))));
994 }
995
996 /* Try machine-dependent ways of modifying an illegitimate address
997 to be legitimate. If we find one, return the new, valid address.
998 This macro is used in only one place: `memory_address' in explow.c.
999
1000 OLDX is the address as it was before break_out_memory_refs was called.
1001 In some cases it is useful to look at this to decide what needs to be done.
1002
1003 It is always safe for this macro to do nothing. It exists to recognize
1004 opportunities to optimize the output.
1005
1006 For the PA, transform:
1007
1008 memory(X + <large int>)
1009
1010 into:
1011
1012 if (<large int> & mask) >= 16
1013 Y = (<large int> & ~mask) + mask + 1 Round up.
1014 else
1015 Y = (<large int> & ~mask) Round down.
1016 Z = X + Y
1017 memory (Z + (<large int> - Y));
1018
1019 This is for CSE to find several similar references, and only use one Z.
1020
1021 X can either be a SYMBOL_REF or REG, but because combine cannot
1022 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1023 D will not fit in 14 bits.
1024
1025 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1026 0x1f as the mask.
1027
1028 MODE_INT references allow displacements which fit in 14 bits, so use
1029 0x3fff as the mask.
1030
1031 This relies on the fact that most mode MODE_FLOAT references will use FP
1032 registers and most mode MODE_INT references will use integer registers.
1033 (In the rare case of an FP register used in an integer MODE, we depend
1034 on secondary reloads to clean things up.)
1035
1036
1037 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1038 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1039 addressing modes to be used).
1040
1041 Note that the addresses passed into hppa_legitimize_address always
1042 come from a MEM, so we only have to match the MULT form on incoming
1043 addresses. But to be future proof we also match the ASHIFT form.
1044
1045 However, this routine always places those shift-add sequences into
1046 registers, so we have to generate the ASHIFT form as our output.
1047
1048 Put X and Z into registers. Then put the entire expression into
1049 a register. */
1050
1051 rtx
1052 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1053 machine_mode mode)
1054 {
1055 rtx orig = x;
1056
1057 /* We need to canonicalize the order of operands in unscaled indexed
1058 addresses since the code that checks if an address is valid doesn't
1059 always try both orders. */
1060 if (!TARGET_NO_SPACE_REGS
1061 && GET_CODE (x) == PLUS
1062 && GET_MODE (x) == Pmode
1063 && REG_P (XEXP (x, 0))
1064 && REG_P (XEXP (x, 1))
1065 && REG_POINTER (XEXP (x, 0))
1066 && !REG_POINTER (XEXP (x, 1)))
1067 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1068
1069 if (tls_referenced_p (x))
1070 return legitimize_tls_address (x);
1071 else if (flag_pic)
1072 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1073
1074 /* Strip off CONST. */
1075 if (GET_CODE (x) == CONST)
1076 x = XEXP (x, 0);
1077
1078 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1079 That should always be safe. */
1080 if (GET_CODE (x) == PLUS
1081 && GET_CODE (XEXP (x, 0)) == REG
1082 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1083 {
1084 rtx reg = force_reg (Pmode, XEXP (x, 1));
1085 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1086 }
1087
1088 /* Note we must reject symbols which represent function addresses
1089 since the assembler/linker can't handle arithmetic on plabels. */
1090 if (GET_CODE (x) == PLUS
1091 && GET_CODE (XEXP (x, 1)) == CONST_INT
1092 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1093 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1094 || GET_CODE (XEXP (x, 0)) == REG))
1095 {
1096 rtx int_part, ptr_reg;
1097 int newoffset;
1098 int offset = INTVAL (XEXP (x, 1));
1099 int mask;
1100
1101 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1102 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1103
1104 /* Choose which way to round the offset. Round up if we
1105 are >= halfway to the next boundary. */
1106 if ((offset & mask) >= ((mask + 1) / 2))
1107 newoffset = (offset & ~ mask) + mask + 1;
1108 else
1109 newoffset = (offset & ~ mask);
1110
1111 /* If the newoffset will not fit in 14 bits (ldo), then
1112 handling this would take 4 or 5 instructions (2 to load
1113 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1114 add the new offset and the SYMBOL_REF.) Combine can
1115 not handle 4->2 or 5->2 combinations, so do not create
1116 them. */
1117 if (! VAL_14_BITS_P (newoffset)
1118 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1119 {
1120 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1121 rtx tmp_reg
1122 = force_reg (Pmode,
1123 gen_rtx_HIGH (Pmode, const_part));
1124 ptr_reg
1125 = force_reg (Pmode,
1126 gen_rtx_LO_SUM (Pmode,
1127 tmp_reg, const_part));
1128 }
1129 else
1130 {
1131 if (! VAL_14_BITS_P (newoffset))
1132 int_part = force_reg (Pmode, GEN_INT (newoffset));
1133 else
1134 int_part = GEN_INT (newoffset);
1135
1136 ptr_reg = force_reg (Pmode,
1137 gen_rtx_PLUS (Pmode,
1138 force_reg (Pmode, XEXP (x, 0)),
1139 int_part));
1140 }
1141 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1142 }
1143
1144 /* Handle (plus (mult (a) (mem_shadd_constant)) (b)). */
1145
1146 if (GET_CODE (x) == PLUS
1147 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1148 && (OBJECT_P (XEXP (x, 1))
1149 || GET_CODE (XEXP (x, 1)) == SUBREG)
1150 && GET_CODE (XEXP (x, 1)) != CONST)
1151 {
1152 /* If we were given a MULT, we must fix the constant
1153 as we're going to create the ASHIFT form. */
1154 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1155 if (GET_CODE (XEXP (x, 0)) == MULT)
1156 shift_val = exact_log2 (shift_val);
1157
1158 rtx reg1, reg2;
1159 reg1 = XEXP (x, 1);
1160 if (GET_CODE (reg1) != REG)
1161 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1162
1163 reg2 = XEXP (XEXP (x, 0), 0);
1164 if (GET_CODE (reg2) != REG)
1165 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1166
1167 return force_reg (Pmode,
1168 gen_rtx_PLUS (Pmode,
1169 gen_rtx_ASHIFT (Pmode, reg2,
1170 GEN_INT (shift_val)),
1171 reg1));
1172 }
1173
1174 /* Similarly for (plus (plus (mult (a) (mem_shadd_constant)) (b)) (c)).
1175
1176 Only do so for floating point modes since this is more speculative
1177 and we lose if it's an integer store. */
1178 if (GET_CODE (x) == PLUS
1179 && GET_CODE (XEXP (x, 0)) == PLUS
1180 && mem_shadd_or_shadd_rtx_p (XEXP (XEXP (x, 0), 0))
1181 && (mode == SFmode || mode == DFmode))
1182 {
1183 int shift_val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
1184
1185 /* If we were given a MULT, we must fix the constant
1186 as we're going to create the ASHIFT form. */
1187 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
1188 shift_val = exact_log2 (shift_val);
1189
1190 /* Try and figure out what to use as a base register. */
1191 rtx reg1, reg2, base, idx;
1192
1193 reg1 = XEXP (XEXP (x, 0), 1);
1194 reg2 = XEXP (x, 1);
1195 base = NULL_RTX;
1196 idx = NULL_RTX;
1197
1198 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1199 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1200 it's a base register below. */
1201 if (GET_CODE (reg1) != REG)
1202 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1203
1204 if (GET_CODE (reg2) != REG)
1205 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1206
1207 /* Figure out what the base and index are. */
1208
1209 if (GET_CODE (reg1) == REG
1210 && REG_POINTER (reg1))
1211 {
1212 base = reg1;
1213 idx = gen_rtx_PLUS (Pmode,
1214 gen_rtx_ASHIFT (Pmode,
1215 XEXP (XEXP (XEXP (x, 0), 0), 0),
1216 GEN_INT (shift_val)),
1217 XEXP (x, 1));
1218 }
1219 else if (GET_CODE (reg2) == REG
1220 && REG_POINTER (reg2))
1221 {
1222 base = reg2;
1223 idx = XEXP (x, 0);
1224 }
1225
1226 if (base == 0)
1227 return orig;
1228
1229 /* If the index adds a large constant, try to scale the
1230 constant so that it can be loaded with only one insn. */
1231 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1232 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1233 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1234 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1235 {
1236 /* Divide the CONST_INT by the scale factor, then add it to A. */
1237 int val = INTVAL (XEXP (idx, 1));
1238 val /= (1 << shift_val);
1239
1240 reg1 = XEXP (XEXP (idx, 0), 0);
1241 if (GET_CODE (reg1) != REG)
1242 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1243
1244 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1245
1246 /* We can now generate a simple scaled indexed address. */
1247 return
1248 force_reg
1249 (Pmode, gen_rtx_PLUS (Pmode,
1250 gen_rtx_ASHIFT (Pmode, reg1,
1251 GEN_INT (shift_val)),
1252 base));
1253 }
1254
1255 /* If B + C is still a valid base register, then add them. */
1256 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1257 && INTVAL (XEXP (idx, 1)) <= 4096
1258 && INTVAL (XEXP (idx, 1)) >= -4096)
1259 {
1260 rtx reg1, reg2;
1261
1262 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1263
1264 reg2 = XEXP (XEXP (idx, 0), 0);
1265 if (GET_CODE (reg2) != CONST_INT)
1266 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1267
1268 return force_reg (Pmode,
1269 gen_rtx_PLUS (Pmode,
1270 gen_rtx_ASHIFT (Pmode, reg2,
1271 GEN_INT (shift_val)),
1272 reg1));
1273 }
1274
1275 /* Get the index into a register, then add the base + index and
1276 return a register holding the result. */
1277
1278 /* First get A into a register. */
1279 reg1 = XEXP (XEXP (idx, 0), 0);
1280 if (GET_CODE (reg1) != REG)
1281 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1282
1283 /* And get B into a register. */
1284 reg2 = XEXP (idx, 1);
1285 if (GET_CODE (reg2) != REG)
1286 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1287
1288 reg1 = force_reg (Pmode,
1289 gen_rtx_PLUS (Pmode,
1290 gen_rtx_ASHIFT (Pmode, reg1,
1291 GEN_INT (shift_val)),
1292 reg2));
1293
1294 /* Add the result to our base register and return. */
1295 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1296
1297 }
1298
1299 /* Uh-oh. We might have an address for x[n-100000]. This needs
1300 special handling to avoid creating an indexed memory address
1301 with x-100000 as the base.
1302
1303 If the constant part is small enough, then it's still safe because
1304 there is a guard page at the beginning and end of the data segment.
1305
1306 Scaled references are common enough that we want to try and rearrange the
1307 terms so that we can use indexing for these addresses too. Only
1308 do the optimization for floatint point modes. */
1309
1310 if (GET_CODE (x) == PLUS
1311 && pa_symbolic_expression_p (XEXP (x, 1)))
1312 {
1313 /* Ugly. We modify things here so that the address offset specified
1314 by the index expression is computed first, then added to x to form
1315 the entire address. */
1316
1317 rtx regx1, regx2, regy1, regy2, y;
1318
1319 /* Strip off any CONST. */
1320 y = XEXP (x, 1);
1321 if (GET_CODE (y) == CONST)
1322 y = XEXP (y, 0);
1323
1324 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1325 {
1326 /* See if this looks like
1327 (plus (mult (reg) (mem_shadd_const))
1328 (const (plus (symbol_ref) (const_int))))
1329
1330 Where const_int is small. In that case the const
1331 expression is a valid pointer for indexing.
1332
1333 If const_int is big, but can be divided evenly by shadd_const
1334 and added to (reg). This allows more scaled indexed addresses. */
1335 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1336 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1337 && GET_CODE (XEXP (y, 1)) == CONST_INT
1338 && INTVAL (XEXP (y, 1)) >= -4096
1339 && INTVAL (XEXP (y, 1)) <= 4095)
1340 {
1341 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1342
1343 /* If we were given a MULT, we must fix the constant
1344 as we're going to create the ASHIFT form. */
1345 if (GET_CODE (XEXP (x, 0)) == MULT)
1346 shift_val = exact_log2 (shift_val);
1347
1348 rtx reg1, reg2;
1349
1350 reg1 = XEXP (x, 1);
1351 if (GET_CODE (reg1) != REG)
1352 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1353
1354 reg2 = XEXP (XEXP (x, 0), 0);
1355 if (GET_CODE (reg2) != REG)
1356 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1357
1358 return
1359 force_reg (Pmode,
1360 gen_rtx_PLUS (Pmode,
1361 gen_rtx_ASHIFT (Pmode,
1362 reg2,
1363 GEN_INT (shift_val)),
1364 reg1));
1365 }
1366 else if ((mode == DFmode || mode == SFmode)
1367 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1368 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1369 && GET_CODE (XEXP (y, 1)) == CONST_INT
1370 && INTVAL (XEXP (y, 1)) % (1 << INTVAL (XEXP (XEXP (x, 0), 1))) == 0)
1371 {
1372 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1373
1374 /* If we were given a MULT, we must fix the constant
1375 as we're going to create the ASHIFT form. */
1376 if (GET_CODE (XEXP (x, 0)) == MULT)
1377 shift_val = exact_log2 (shift_val);
1378
1379 regx1
1380 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1381 / INTVAL (XEXP (XEXP (x, 0), 1))));
1382 regx2 = XEXP (XEXP (x, 0), 0);
1383 if (GET_CODE (regx2) != REG)
1384 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1385 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1386 regx2, regx1));
1387 return
1388 force_reg (Pmode,
1389 gen_rtx_PLUS (Pmode,
1390 gen_rtx_ASHIFT (Pmode, regx2,
1391 GEN_INT (shift_val)),
1392 force_reg (Pmode, XEXP (y, 0))));
1393 }
1394 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1395 && INTVAL (XEXP (y, 1)) >= -4096
1396 && INTVAL (XEXP (y, 1)) <= 4095)
1397 {
1398 /* This is safe because of the guard page at the
1399 beginning and end of the data space. Just
1400 return the original address. */
1401 return orig;
1402 }
1403 else
1404 {
1405 /* Doesn't look like one we can optimize. */
1406 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1407 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1408 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1409 regx1 = force_reg (Pmode,
1410 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1411 regx1, regy2));
1412 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1413 }
1414 }
1415 }
1416
1417 return orig;
1418 }
1419
1420 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1421
1422 Compute extra cost of moving data between one register class
1423 and another.
1424
1425 Make moves from SAR so expensive they should never happen. We used to
1426 have 0xffff here, but that generates overflow in rare cases.
1427
1428 Copies involving a FP register and a non-FP register are relatively
1429 expensive because they must go through memory.
1430
1431 Other copies are reasonably cheap. */
1432
1433 static int
1434 hppa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1435 reg_class_t from, reg_class_t to)
1436 {
1437 if (from == SHIFT_REGS)
1438 return 0x100;
1439 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1440 return 18;
1441 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1442 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1443 return 16;
1444 else
1445 return 2;
1446 }
1447
1448 /* For the HPPA, REG and REG+CONST is cost 0
1449 and addresses involving symbolic constants are cost 2.
1450
1451 PIC addresses are very expensive.
1452
1453 It is no coincidence that this has the same structure
1454 as pa_legitimate_address_p. */
1455
1456 static int
1457 hppa_address_cost (rtx X, machine_mode mode ATTRIBUTE_UNUSED,
1458 addr_space_t as ATTRIBUTE_UNUSED,
1459 bool speed ATTRIBUTE_UNUSED)
1460 {
1461 switch (GET_CODE (X))
1462 {
1463 case REG:
1464 case PLUS:
1465 case LO_SUM:
1466 return 1;
1467 case HIGH:
1468 return 2;
1469 default:
1470 return 4;
1471 }
1472 }
1473
1474 /* Compute a (partial) cost for rtx X. Return true if the complete
1475 cost has been computed, and false if subexpressions should be
1476 scanned. In either case, *TOTAL contains the cost result. */
1477
1478 static bool
1479 hppa_rtx_costs (rtx x, machine_mode mode, int outer_code,
1480 int opno ATTRIBUTE_UNUSED,
1481 int *total, bool speed ATTRIBUTE_UNUSED)
1482 {
1483 int factor;
1484 int code = GET_CODE (x);
1485
1486 switch (code)
1487 {
1488 case CONST_INT:
1489 if (INTVAL (x) == 0)
1490 *total = 0;
1491 else if (INT_14_BITS (x))
1492 *total = 1;
1493 else
1494 *total = 2;
1495 return true;
1496
1497 case HIGH:
1498 *total = 2;
1499 return true;
1500
1501 case CONST:
1502 case LABEL_REF:
1503 case SYMBOL_REF:
1504 *total = 4;
1505 return true;
1506
1507 case CONST_DOUBLE:
1508 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1509 && outer_code != SET)
1510 *total = 0;
1511 else
1512 *total = 8;
1513 return true;
1514
1515 case MULT:
1516 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1517 {
1518 *total = COSTS_N_INSNS (3);
1519 return true;
1520 }
1521
1522 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1523 factor = GET_MODE_SIZE (mode) / 4;
1524 if (factor == 0)
1525 factor = 1;
1526
1527 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1528 *total = factor * factor * COSTS_N_INSNS (8);
1529 else
1530 *total = factor * factor * COSTS_N_INSNS (20);
1531 return true;
1532
1533 case DIV:
1534 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1535 {
1536 *total = COSTS_N_INSNS (14);
1537 return true;
1538 }
1539 /* FALLTHRU */
1540
1541 case UDIV:
1542 case MOD:
1543 case UMOD:
1544 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1545 factor = GET_MODE_SIZE (mode) / 4;
1546 if (factor == 0)
1547 factor = 1;
1548
1549 *total = factor * factor * COSTS_N_INSNS (60);
1550 return true;
1551
1552 case PLUS: /* this includes shNadd insns */
1553 case MINUS:
1554 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1555 {
1556 *total = COSTS_N_INSNS (3);
1557 return true;
1558 }
1559
1560 /* A size N times larger than UNITS_PER_WORD needs N times as
1561 many insns, taking N times as long. */
1562 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
1563 if (factor == 0)
1564 factor = 1;
1565 *total = factor * COSTS_N_INSNS (1);
1566 return true;
1567
1568 case ASHIFT:
1569 case ASHIFTRT:
1570 case LSHIFTRT:
1571 *total = COSTS_N_INSNS (1);
1572 return true;
1573
1574 default:
1575 return false;
1576 }
1577 }
1578
1579 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1580 new rtx with the correct mode. */
1581 static inline rtx
1582 force_mode (machine_mode mode, rtx orig)
1583 {
1584 if (mode == GET_MODE (orig))
1585 return orig;
1586
1587 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1588
1589 return gen_rtx_REG (mode, REGNO (orig));
1590 }
1591
1592 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1593
1594 static bool
1595 pa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1596 {
1597 return tls_referenced_p (x);
1598 }
1599
1600 /* Emit insns to move operands[1] into operands[0].
1601
1602 Return 1 if we have written out everything that needs to be done to
1603 do the move. Otherwise, return 0 and the caller will emit the move
1604 normally.
1605
1606 Note SCRATCH_REG may not be in the proper mode depending on how it
1607 will be used. This routine is responsible for creating a new copy
1608 of SCRATCH_REG in the proper mode. */
1609
1610 int
1611 pa_emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
1612 {
1613 register rtx operand0 = operands[0];
1614 register rtx operand1 = operands[1];
1615 register rtx tem;
1616
1617 /* We can only handle indexed addresses in the destination operand
1618 of floating point stores. Thus, we need to break out indexed
1619 addresses from the destination operand. */
1620 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1621 {
1622 gcc_assert (can_create_pseudo_p ());
1623
1624 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1625 operand0 = replace_equiv_address (operand0, tem);
1626 }
1627
1628 /* On targets with non-equivalent space registers, break out unscaled
1629 indexed addresses from the source operand before the final CSE.
1630 We have to do this because the REG_POINTER flag is not correctly
1631 carried through various optimization passes and CSE may substitute
1632 a pseudo without the pointer set for one with the pointer set. As
1633 a result, we loose various opportunities to create insns with
1634 unscaled indexed addresses. */
1635 if (!TARGET_NO_SPACE_REGS
1636 && !cse_not_expected
1637 && GET_CODE (operand1) == MEM
1638 && GET_CODE (XEXP (operand1, 0)) == PLUS
1639 && REG_P (XEXP (XEXP (operand1, 0), 0))
1640 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1641 operand1
1642 = replace_equiv_address (operand1,
1643 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1644
1645 if (scratch_reg
1646 && reload_in_progress && GET_CODE (operand0) == REG
1647 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1648 operand0 = reg_equiv_mem (REGNO (operand0));
1649 else if (scratch_reg
1650 && reload_in_progress && GET_CODE (operand0) == SUBREG
1651 && GET_CODE (SUBREG_REG (operand0)) == REG
1652 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1653 {
1654 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1655 the code which tracks sets/uses for delete_output_reload. */
1656 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1657 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1658 SUBREG_BYTE (operand0));
1659 operand0 = alter_subreg (&temp, true);
1660 }
1661
1662 if (scratch_reg
1663 && reload_in_progress && GET_CODE (operand1) == REG
1664 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1665 operand1 = reg_equiv_mem (REGNO (operand1));
1666 else if (scratch_reg
1667 && reload_in_progress && GET_CODE (operand1) == SUBREG
1668 && GET_CODE (SUBREG_REG (operand1)) == REG
1669 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1670 {
1671 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1672 the code which tracks sets/uses for delete_output_reload. */
1673 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1674 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1675 SUBREG_BYTE (operand1));
1676 operand1 = alter_subreg (&temp, true);
1677 }
1678
1679 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1680 && ((tem = find_replacement (&XEXP (operand0, 0)))
1681 != XEXP (operand0, 0)))
1682 operand0 = replace_equiv_address (operand0, tem);
1683
1684 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1685 && ((tem = find_replacement (&XEXP (operand1, 0)))
1686 != XEXP (operand1, 0)))
1687 operand1 = replace_equiv_address (operand1, tem);
1688
1689 /* Handle secondary reloads for loads/stores of FP registers from
1690 REG+D addresses where D does not fit in 5 or 14 bits, including
1691 (subreg (mem (addr))) cases, and reloads for other unsupported
1692 memory operands. */
1693 if (scratch_reg
1694 && FP_REG_P (operand0)
1695 && (MEM_P (operand1)
1696 || (GET_CODE (operand1) == SUBREG
1697 && MEM_P (XEXP (operand1, 0)))))
1698 {
1699 rtx op1 = operand1;
1700
1701 if (GET_CODE (op1) == SUBREG)
1702 op1 = XEXP (op1, 0);
1703
1704 if (reg_plus_base_memory_operand (op1, GET_MODE (op1)))
1705 {
1706 if (!(TARGET_PA_20
1707 && !TARGET_ELF32
1708 && INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1709 && !INT_5_BITS (XEXP (XEXP (op1, 0), 1)))
1710 {
1711 /* SCRATCH_REG will hold an address and maybe the actual data.
1712 We want it in WORD_MODE regardless of what mode it was
1713 originally given to us. */
1714 scratch_reg = force_mode (word_mode, scratch_reg);
1715
1716 /* D might not fit in 14 bits either; for such cases load D
1717 into scratch reg. */
1718 if (!INT_14_BITS (XEXP (XEXP (op1, 0), 1)))
1719 {
1720 emit_move_insn (scratch_reg, XEXP (XEXP (op1, 0), 1));
1721 emit_move_insn (scratch_reg,
1722 gen_rtx_fmt_ee (GET_CODE (XEXP (op1, 0)),
1723 Pmode,
1724 XEXP (XEXP (op1, 0), 0),
1725 scratch_reg));
1726 }
1727 else
1728 emit_move_insn (scratch_reg, XEXP (op1, 0));
1729 emit_insn (gen_rtx_SET (operand0,
1730 replace_equiv_address (op1, scratch_reg)));
1731 return 1;
1732 }
1733 }
1734 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op1, VOIDmode))
1735 || IS_LO_SUM_DLT_ADDR_P (XEXP (op1, 0))
1736 || IS_INDEX_ADDR_P (XEXP (op1, 0)))
1737 {
1738 /* Load memory address into SCRATCH_REG. */
1739 scratch_reg = force_mode (word_mode, scratch_reg);
1740 emit_move_insn (scratch_reg, XEXP (op1, 0));
1741 emit_insn (gen_rtx_SET (operand0,
1742 replace_equiv_address (op1, scratch_reg)));
1743 return 1;
1744 }
1745 }
1746 else if (scratch_reg
1747 && FP_REG_P (operand1)
1748 && (MEM_P (operand0)
1749 || (GET_CODE (operand0) == SUBREG
1750 && MEM_P (XEXP (operand0, 0)))))
1751 {
1752 rtx op0 = operand0;
1753
1754 if (GET_CODE (op0) == SUBREG)
1755 op0 = XEXP (op0, 0);
1756
1757 if (reg_plus_base_memory_operand (op0, GET_MODE (op0)))
1758 {
1759 if (!(TARGET_PA_20
1760 && !TARGET_ELF32
1761 && INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1762 && !INT_5_BITS (XEXP (XEXP (op0, 0), 1)))
1763 {
1764 /* SCRATCH_REG will hold an address and maybe the actual data.
1765 We want it in WORD_MODE regardless of what mode it was
1766 originally given to us. */
1767 scratch_reg = force_mode (word_mode, scratch_reg);
1768
1769 /* D might not fit in 14 bits either; for such cases load D
1770 into scratch reg. */
1771 if (!INT_14_BITS (XEXP (XEXP (op0, 0), 1)))
1772 {
1773 emit_move_insn (scratch_reg, XEXP (XEXP (op0, 0), 1));
1774 emit_move_insn (scratch_reg,
1775 gen_rtx_fmt_ee (GET_CODE (XEXP (op0, 0)),
1776 Pmode,
1777 XEXP (XEXP (op0, 0), 0),
1778 scratch_reg));
1779 }
1780 else
1781 emit_move_insn (scratch_reg, XEXP (op0, 0));
1782 emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
1783 operand1));
1784 return 1;
1785 }
1786 }
1787 else if ((!INT14_OK_STRICT && symbolic_memory_operand (op0, VOIDmode))
1788 || IS_LO_SUM_DLT_ADDR_P (XEXP (op0, 0))
1789 || IS_INDEX_ADDR_P (XEXP (op0, 0)))
1790 {
1791 /* Load memory address into SCRATCH_REG. */
1792 scratch_reg = force_mode (word_mode, scratch_reg);
1793 emit_move_insn (scratch_reg, XEXP (op0, 0));
1794 emit_insn (gen_rtx_SET (replace_equiv_address (op0, scratch_reg),
1795 operand1));
1796 return 1;
1797 }
1798 }
1799 /* Handle secondary reloads for loads of FP registers from constant
1800 expressions by forcing the constant into memory. For the most part,
1801 this is only necessary for SImode and DImode.
1802
1803 Use scratch_reg to hold the address of the memory location. */
1804 else if (scratch_reg
1805 && CONSTANT_P (operand1)
1806 && FP_REG_P (operand0))
1807 {
1808 rtx const_mem, xoperands[2];
1809
1810 if (operand1 == CONST0_RTX (mode))
1811 {
1812 emit_insn (gen_rtx_SET (operand0, operand1));
1813 return 1;
1814 }
1815
1816 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1817 it in WORD_MODE regardless of what mode it was originally given
1818 to us. */
1819 scratch_reg = force_mode (word_mode, scratch_reg);
1820
1821 /* Force the constant into memory and put the address of the
1822 memory location into scratch_reg. */
1823 const_mem = force_const_mem (mode, operand1);
1824 xoperands[0] = scratch_reg;
1825 xoperands[1] = XEXP (const_mem, 0);
1826 pa_emit_move_sequence (xoperands, Pmode, 0);
1827
1828 /* Now load the destination register. */
1829 emit_insn (gen_rtx_SET (operand0,
1830 replace_equiv_address (const_mem, scratch_reg)));
1831 return 1;
1832 }
1833 /* Handle secondary reloads for SAR. These occur when trying to load
1834 the SAR from memory or a constant. */
1835 else if (scratch_reg
1836 && GET_CODE (operand0) == REG
1837 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1838 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1839 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1840 {
1841 /* D might not fit in 14 bits either; for such cases load D into
1842 scratch reg. */
1843 if (GET_CODE (operand1) == MEM
1844 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1845 {
1846 /* We are reloading the address into the scratch register, so we
1847 want to make sure the scratch register is a full register. */
1848 scratch_reg = force_mode (word_mode, scratch_reg);
1849
1850 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1851 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1852 0)),
1853 Pmode,
1854 XEXP (XEXP (operand1, 0),
1855 0),
1856 scratch_reg));
1857
1858 /* Now we are going to load the scratch register from memory,
1859 we want to load it in the same width as the original MEM,
1860 which must be the same as the width of the ultimate destination,
1861 OPERAND0. */
1862 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1863
1864 emit_move_insn (scratch_reg,
1865 replace_equiv_address (operand1, scratch_reg));
1866 }
1867 else
1868 {
1869 /* We want to load the scratch register using the same mode as
1870 the ultimate destination. */
1871 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1872
1873 emit_move_insn (scratch_reg, operand1);
1874 }
1875
1876 /* And emit the insn to set the ultimate destination. We know that
1877 the scratch register has the same mode as the destination at this
1878 point. */
1879 emit_move_insn (operand0, scratch_reg);
1880 return 1;
1881 }
1882
1883 /* Handle the most common case: storing into a register. */
1884 if (register_operand (operand0, mode))
1885 {
1886 /* Legitimize TLS symbol references. This happens for references
1887 that aren't a legitimate constant. */
1888 if (PA_SYMBOL_REF_TLS_P (operand1))
1889 operand1 = legitimize_tls_address (operand1);
1890
1891 if (register_operand (operand1, mode)
1892 || (GET_CODE (operand1) == CONST_INT
1893 && pa_cint_ok_for_move (UINTVAL (operand1)))
1894 || (operand1 == CONST0_RTX (mode))
1895 || (GET_CODE (operand1) == HIGH
1896 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1897 /* Only `general_operands' can come here, so MEM is ok. */
1898 || GET_CODE (operand1) == MEM)
1899 {
1900 /* Various sets are created during RTL generation which don't
1901 have the REG_POINTER flag correctly set. After the CSE pass,
1902 instruction recognition can fail if we don't consistently
1903 set this flag when performing register copies. This should
1904 also improve the opportunities for creating insns that use
1905 unscaled indexing. */
1906 if (REG_P (operand0) && REG_P (operand1))
1907 {
1908 if (REG_POINTER (operand1)
1909 && !REG_POINTER (operand0)
1910 && !HARD_REGISTER_P (operand0))
1911 copy_reg_pointer (operand0, operand1);
1912 }
1913
1914 /* When MEMs are broken out, the REG_POINTER flag doesn't
1915 get set. In some cases, we can set the REG_POINTER flag
1916 from the declaration for the MEM. */
1917 if (REG_P (operand0)
1918 && GET_CODE (operand1) == MEM
1919 && !REG_POINTER (operand0))
1920 {
1921 tree decl = MEM_EXPR (operand1);
1922
1923 /* Set the register pointer flag and register alignment
1924 if the declaration for this memory reference is a
1925 pointer type. */
1926 if (decl)
1927 {
1928 tree type;
1929
1930 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1931 tree operand 1. */
1932 if (TREE_CODE (decl) == COMPONENT_REF)
1933 decl = TREE_OPERAND (decl, 1);
1934
1935 type = TREE_TYPE (decl);
1936 type = strip_array_types (type);
1937
1938 if (POINTER_TYPE_P (type))
1939 mark_reg_pointer (operand0, BITS_PER_UNIT);
1940 }
1941 }
1942
1943 emit_insn (gen_rtx_SET (operand0, operand1));
1944 return 1;
1945 }
1946 }
1947 else if (GET_CODE (operand0) == MEM)
1948 {
1949 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1950 && !(reload_in_progress || reload_completed))
1951 {
1952 rtx temp = gen_reg_rtx (DFmode);
1953
1954 emit_insn (gen_rtx_SET (temp, operand1));
1955 emit_insn (gen_rtx_SET (operand0, temp));
1956 return 1;
1957 }
1958 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1959 {
1960 /* Run this case quickly. */
1961 emit_insn (gen_rtx_SET (operand0, operand1));
1962 return 1;
1963 }
1964 if (! (reload_in_progress || reload_completed))
1965 {
1966 operands[0] = validize_mem (operand0);
1967 operands[1] = operand1 = force_reg (mode, operand1);
1968 }
1969 }
1970
1971 /* Simplify the source if we need to.
1972 Note we do have to handle function labels here, even though we do
1973 not consider them legitimate constants. Loop optimizations can
1974 call the emit_move_xxx with one as a source. */
1975 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1976 || (GET_CODE (operand1) == HIGH
1977 && symbolic_operand (XEXP (operand1, 0), mode))
1978 || function_label_operand (operand1, VOIDmode)
1979 || tls_referenced_p (operand1))
1980 {
1981 int ishighonly = 0;
1982
1983 if (GET_CODE (operand1) == HIGH)
1984 {
1985 ishighonly = 1;
1986 operand1 = XEXP (operand1, 0);
1987 }
1988 if (symbolic_operand (operand1, mode))
1989 {
1990 /* Argh. The assembler and linker can't handle arithmetic
1991 involving plabels.
1992
1993 So we force the plabel into memory, load operand0 from
1994 the memory location, then add in the constant part. */
1995 if ((GET_CODE (operand1) == CONST
1996 && GET_CODE (XEXP (operand1, 0)) == PLUS
1997 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1998 VOIDmode))
1999 || function_label_operand (operand1, VOIDmode))
2000 {
2001 rtx temp, const_part;
2002
2003 /* Figure out what (if any) scratch register to use. */
2004 if (reload_in_progress || reload_completed)
2005 {
2006 scratch_reg = scratch_reg ? scratch_reg : operand0;
2007 /* SCRATCH_REG will hold an address and maybe the actual
2008 data. We want it in WORD_MODE regardless of what mode it
2009 was originally given to us. */
2010 scratch_reg = force_mode (word_mode, scratch_reg);
2011 }
2012 else if (flag_pic)
2013 scratch_reg = gen_reg_rtx (Pmode);
2014
2015 if (GET_CODE (operand1) == CONST)
2016 {
2017 /* Save away the constant part of the expression. */
2018 const_part = XEXP (XEXP (operand1, 0), 1);
2019 gcc_assert (GET_CODE (const_part) == CONST_INT);
2020
2021 /* Force the function label into memory. */
2022 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
2023 }
2024 else
2025 {
2026 /* No constant part. */
2027 const_part = NULL_RTX;
2028
2029 /* Force the function label into memory. */
2030 temp = force_const_mem (mode, operand1);
2031 }
2032
2033
2034 /* Get the address of the memory location. PIC-ify it if
2035 necessary. */
2036 temp = XEXP (temp, 0);
2037 if (flag_pic)
2038 temp = legitimize_pic_address (temp, mode, scratch_reg);
2039
2040 /* Put the address of the memory location into our destination
2041 register. */
2042 operands[1] = temp;
2043 pa_emit_move_sequence (operands, mode, scratch_reg);
2044
2045 /* Now load from the memory location into our destination
2046 register. */
2047 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
2048 pa_emit_move_sequence (operands, mode, scratch_reg);
2049
2050 /* And add back in the constant part. */
2051 if (const_part != NULL_RTX)
2052 expand_inc (operand0, const_part);
2053
2054 return 1;
2055 }
2056
2057 if (flag_pic)
2058 {
2059 rtx_insn *insn;
2060 rtx temp;
2061
2062 if (reload_in_progress || reload_completed)
2063 {
2064 temp = scratch_reg ? scratch_reg : operand0;
2065 /* TEMP will hold an address and maybe the actual
2066 data. We want it in WORD_MODE regardless of what mode it
2067 was originally given to us. */
2068 temp = force_mode (word_mode, temp);
2069 }
2070 else
2071 temp = gen_reg_rtx (Pmode);
2072
2073 /* Force (const (plus (symbol) (const_int))) to memory
2074 if the const_int will not fit in 14 bits. Although
2075 this requires a relocation, the instruction sequence
2076 needed to load the value is shorter. */
2077 if (GET_CODE (operand1) == CONST
2078 && GET_CODE (XEXP (operand1, 0)) == PLUS
2079 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2080 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1)))
2081 {
2082 rtx x, m = force_const_mem (mode, operand1);
2083
2084 x = legitimize_pic_address (XEXP (m, 0), mode, temp);
2085 x = replace_equiv_address (m, x);
2086 insn = emit_move_insn (operand0, x);
2087 }
2088 else
2089 {
2090 operands[1] = legitimize_pic_address (operand1, mode, temp);
2091 if (REG_P (operand0) && REG_P (operands[1]))
2092 copy_reg_pointer (operand0, operands[1]);
2093 insn = emit_move_insn (operand0, operands[1]);
2094 }
2095
2096 /* Put a REG_EQUAL note on this insn. */
2097 set_unique_reg_note (insn, REG_EQUAL, operand1);
2098 }
2099 /* On the HPPA, references to data space are supposed to use dp,
2100 register 27, but showing it in the RTL inhibits various cse
2101 and loop optimizations. */
2102 else
2103 {
2104 rtx temp, set;
2105
2106 if (reload_in_progress || reload_completed)
2107 {
2108 temp = scratch_reg ? scratch_reg : operand0;
2109 /* TEMP will hold an address and maybe the actual
2110 data. We want it in WORD_MODE regardless of what mode it
2111 was originally given to us. */
2112 temp = force_mode (word_mode, temp);
2113 }
2114 else
2115 temp = gen_reg_rtx (mode);
2116
2117 /* Loading a SYMBOL_REF into a register makes that register
2118 safe to be used as the base in an indexed address.
2119
2120 Don't mark hard registers though. That loses. */
2121 if (GET_CODE (operand0) == REG
2122 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2123 mark_reg_pointer (operand0, BITS_PER_UNIT);
2124 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2125 mark_reg_pointer (temp, BITS_PER_UNIT);
2126
2127 if (ishighonly)
2128 set = gen_rtx_SET (operand0, temp);
2129 else
2130 set = gen_rtx_SET (operand0,
2131 gen_rtx_LO_SUM (mode, temp, operand1));
2132
2133 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2134 emit_insn (set);
2135
2136 }
2137 return 1;
2138 }
2139 else if (tls_referenced_p (operand1))
2140 {
2141 rtx tmp = operand1;
2142 rtx addend = NULL;
2143
2144 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2145 {
2146 addend = XEXP (XEXP (tmp, 0), 1);
2147 tmp = XEXP (XEXP (tmp, 0), 0);
2148 }
2149
2150 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2151 tmp = legitimize_tls_address (tmp);
2152 if (addend)
2153 {
2154 tmp = gen_rtx_PLUS (mode, tmp, addend);
2155 tmp = force_operand (tmp, operands[0]);
2156 }
2157 operands[1] = tmp;
2158 }
2159 else if (GET_CODE (operand1) != CONST_INT
2160 || !pa_cint_ok_for_move (UINTVAL (operand1)))
2161 {
2162 rtx temp;
2163 rtx_insn *insn;
2164 rtx op1 = operand1;
2165 HOST_WIDE_INT value = 0;
2166 HOST_WIDE_INT insv = 0;
2167 int insert = 0;
2168
2169 if (GET_CODE (operand1) == CONST_INT)
2170 value = INTVAL (operand1);
2171
2172 if (TARGET_64BIT
2173 && GET_CODE (operand1) == CONST_INT
2174 && HOST_BITS_PER_WIDE_INT > 32
2175 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2176 {
2177 HOST_WIDE_INT nval;
2178
2179 /* Extract the low order 32 bits of the value and sign extend.
2180 If the new value is the same as the original value, we can
2181 can use the original value as-is. If the new value is
2182 different, we use it and insert the most-significant 32-bits
2183 of the original value into the final result. */
2184 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2185 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2186 if (value != nval)
2187 {
2188 #if HOST_BITS_PER_WIDE_INT > 32
2189 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2190 #endif
2191 insert = 1;
2192 value = nval;
2193 operand1 = GEN_INT (nval);
2194 }
2195 }
2196
2197 if (reload_in_progress || reload_completed)
2198 temp = scratch_reg ? scratch_reg : operand0;
2199 else
2200 temp = gen_reg_rtx (mode);
2201
2202 /* We don't directly split DImode constants on 32-bit targets
2203 because PLUS uses an 11-bit immediate and the insn sequence
2204 generated is not as efficient as the one using HIGH/LO_SUM. */
2205 if (GET_CODE (operand1) == CONST_INT
2206 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2207 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2208 && !insert)
2209 {
2210 /* Directly break constant into high and low parts. This
2211 provides better optimization opportunities because various
2212 passes recognize constants split with PLUS but not LO_SUM.
2213 We use a 14-bit signed low part except when the addition
2214 of 0x4000 to the high part might change the sign of the
2215 high part. */
2216 HOST_WIDE_INT low = value & 0x3fff;
2217 HOST_WIDE_INT high = value & ~ 0x3fff;
2218
2219 if (low >= 0x2000)
2220 {
2221 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2222 high += 0x2000;
2223 else
2224 high += 0x4000;
2225 }
2226
2227 low = value - high;
2228
2229 emit_insn (gen_rtx_SET (temp, GEN_INT (high)));
2230 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2231 }
2232 else
2233 {
2234 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2235 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2236 }
2237
2238 insn = emit_move_insn (operands[0], operands[1]);
2239
2240 /* Now insert the most significant 32 bits of the value
2241 into the register. When we don't have a second register
2242 available, it could take up to nine instructions to load
2243 a 64-bit integer constant. Prior to reload, we force
2244 constants that would take more than three instructions
2245 to load to the constant pool. During and after reload,
2246 we have to handle all possible values. */
2247 if (insert)
2248 {
2249 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2250 register and the value to be inserted is outside the
2251 range that can be loaded with three depdi instructions. */
2252 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2253 {
2254 operand1 = GEN_INT (insv);
2255
2256 emit_insn (gen_rtx_SET (temp,
2257 gen_rtx_HIGH (mode, operand1)));
2258 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2259 if (mode == DImode)
2260 insn = emit_insn (gen_insvdi (operand0, GEN_INT (32),
2261 const0_rtx, temp));
2262 else
2263 insn = emit_insn (gen_insvsi (operand0, GEN_INT (32),
2264 const0_rtx, temp));
2265 }
2266 else
2267 {
2268 int len = 5, pos = 27;
2269
2270 /* Insert the bits using the depdi instruction. */
2271 while (pos >= 0)
2272 {
2273 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2274 HOST_WIDE_INT sign = v5 < 0;
2275
2276 /* Left extend the insertion. */
2277 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2278 while (pos > 0 && (insv & 1) == sign)
2279 {
2280 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2281 len += 1;
2282 pos -= 1;
2283 }
2284
2285 if (mode == DImode)
2286 insn = emit_insn (gen_insvdi (operand0,
2287 GEN_INT (len),
2288 GEN_INT (pos),
2289 GEN_INT (v5)));
2290 else
2291 insn = emit_insn (gen_insvsi (operand0,
2292 GEN_INT (len),
2293 GEN_INT (pos),
2294 GEN_INT (v5)));
2295
2296 len = pos > 0 && pos < 5 ? pos : 5;
2297 pos -= len;
2298 }
2299 }
2300 }
2301
2302 set_unique_reg_note (insn, REG_EQUAL, op1);
2303
2304 return 1;
2305 }
2306 }
2307 /* Now have insn-emit do whatever it normally does. */
2308 return 0;
2309 }
2310
2311 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2312 it will need a link/runtime reloc). */
2313
2314 int
2315 pa_reloc_needed (tree exp)
2316 {
2317 int reloc = 0;
2318
2319 switch (TREE_CODE (exp))
2320 {
2321 case ADDR_EXPR:
2322 return 1;
2323
2324 case POINTER_PLUS_EXPR:
2325 case PLUS_EXPR:
2326 case MINUS_EXPR:
2327 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2328 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2329 break;
2330
2331 CASE_CONVERT:
2332 case NON_LVALUE_EXPR:
2333 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2334 break;
2335
2336 case CONSTRUCTOR:
2337 {
2338 tree value;
2339 unsigned HOST_WIDE_INT ix;
2340
2341 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2342 if (value)
2343 reloc |= pa_reloc_needed (value);
2344 }
2345 break;
2346
2347 case ERROR_MARK:
2348 break;
2349
2350 default:
2351 break;
2352 }
2353 return reloc;
2354 }
2355
2356 \f
2357 /* Return the best assembler insn template
2358 for moving operands[1] into operands[0] as a fullword. */
2359 const char *
2360 pa_singlemove_string (rtx *operands)
2361 {
2362 HOST_WIDE_INT intval;
2363
2364 if (GET_CODE (operands[0]) == MEM)
2365 return "stw %r1,%0";
2366 if (GET_CODE (operands[1]) == MEM)
2367 return "ldw %1,%0";
2368 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2369 {
2370 long i;
2371
2372 gcc_assert (GET_MODE (operands[1]) == SFmode);
2373
2374 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2375 bit pattern. */
2376 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (operands[1]), i);
2377
2378 operands[1] = GEN_INT (i);
2379 /* Fall through to CONST_INT case. */
2380 }
2381 if (GET_CODE (operands[1]) == CONST_INT)
2382 {
2383 intval = INTVAL (operands[1]);
2384
2385 if (VAL_14_BITS_P (intval))
2386 return "ldi %1,%0";
2387 else if ((intval & 0x7ff) == 0)
2388 return "ldil L'%1,%0";
2389 else if (pa_zdepi_cint_p (intval))
2390 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2391 else
2392 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2393 }
2394 return "copy %1,%0";
2395 }
2396 \f
2397
2398 /* Compute position (in OP[1]) and width (in OP[2])
2399 useful for copying IMM to a register using the zdepi
2400 instructions. Store the immediate value to insert in OP[0]. */
2401 static void
2402 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2403 {
2404 int lsb, len;
2405
2406 /* Find the least significant set bit in IMM. */
2407 for (lsb = 0; lsb < 32; lsb++)
2408 {
2409 if ((imm & 1) != 0)
2410 break;
2411 imm >>= 1;
2412 }
2413
2414 /* Choose variants based on *sign* of the 5-bit field. */
2415 if ((imm & 0x10) == 0)
2416 len = (lsb <= 28) ? 4 : 32 - lsb;
2417 else
2418 {
2419 /* Find the width of the bitstring in IMM. */
2420 for (len = 5; len < 32 - lsb; len++)
2421 {
2422 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2423 break;
2424 }
2425
2426 /* Sign extend IMM as a 5-bit value. */
2427 imm = (imm & 0xf) - 0x10;
2428 }
2429
2430 op[0] = imm;
2431 op[1] = 31 - lsb;
2432 op[2] = len;
2433 }
2434
2435 /* Compute position (in OP[1]) and width (in OP[2])
2436 useful for copying IMM to a register using the depdi,z
2437 instructions. Store the immediate value to insert in OP[0]. */
2438
2439 static void
2440 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2441 {
2442 int lsb, len, maxlen;
2443
2444 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2445
2446 /* Find the least significant set bit in IMM. */
2447 for (lsb = 0; lsb < maxlen; lsb++)
2448 {
2449 if ((imm & 1) != 0)
2450 break;
2451 imm >>= 1;
2452 }
2453
2454 /* Choose variants based on *sign* of the 5-bit field. */
2455 if ((imm & 0x10) == 0)
2456 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2457 else
2458 {
2459 /* Find the width of the bitstring in IMM. */
2460 for (len = 5; len < maxlen - lsb; len++)
2461 {
2462 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2463 break;
2464 }
2465
2466 /* Extend length if host is narrow and IMM is negative. */
2467 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2468 len += 32;
2469
2470 /* Sign extend IMM as a 5-bit value. */
2471 imm = (imm & 0xf) - 0x10;
2472 }
2473
2474 op[0] = imm;
2475 op[1] = 63 - lsb;
2476 op[2] = len;
2477 }
2478
2479 /* Output assembler code to perform a doubleword move insn
2480 with operands OPERANDS. */
2481
2482 const char *
2483 pa_output_move_double (rtx *operands)
2484 {
2485 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2486 rtx latehalf[2];
2487 rtx addreg0 = 0, addreg1 = 0;
2488 int highonly = 0;
2489
2490 /* First classify both operands. */
2491
2492 if (REG_P (operands[0]))
2493 optype0 = REGOP;
2494 else if (offsettable_memref_p (operands[0]))
2495 optype0 = OFFSOP;
2496 else if (GET_CODE (operands[0]) == MEM)
2497 optype0 = MEMOP;
2498 else
2499 optype0 = RNDOP;
2500
2501 if (REG_P (operands[1]))
2502 optype1 = REGOP;
2503 else if (CONSTANT_P (operands[1]))
2504 optype1 = CNSTOP;
2505 else if (offsettable_memref_p (operands[1]))
2506 optype1 = OFFSOP;
2507 else if (GET_CODE (operands[1]) == MEM)
2508 optype1 = MEMOP;
2509 else
2510 optype1 = RNDOP;
2511
2512 /* Check for the cases that the operand constraints are not
2513 supposed to allow to happen. */
2514 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2515
2516 /* Handle copies between general and floating registers. */
2517
2518 if (optype0 == REGOP && optype1 == REGOP
2519 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2520 {
2521 if (FP_REG_P (operands[0]))
2522 {
2523 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2524 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2525 return "{fldds|fldd} -16(%%sp),%0";
2526 }
2527 else
2528 {
2529 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2530 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2531 return "{ldws|ldw} -12(%%sp),%R0";
2532 }
2533 }
2534
2535 /* Handle auto decrementing and incrementing loads and stores
2536 specifically, since the structure of the function doesn't work
2537 for them without major modification. Do it better when we learn
2538 this port about the general inc/dec addressing of PA.
2539 (This was written by tege. Chide him if it doesn't work.) */
2540
2541 if (optype0 == MEMOP)
2542 {
2543 /* We have to output the address syntax ourselves, since print_operand
2544 doesn't deal with the addresses we want to use. Fix this later. */
2545
2546 rtx addr = XEXP (operands[0], 0);
2547 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2548 {
2549 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2550
2551 operands[0] = XEXP (addr, 0);
2552 gcc_assert (GET_CODE (operands[1]) == REG
2553 && GET_CODE (operands[0]) == REG);
2554
2555 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2556
2557 /* No overlap between high target register and address
2558 register. (We do this in a non-obvious way to
2559 save a register file writeback) */
2560 if (GET_CODE (addr) == POST_INC)
2561 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2562 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2563 }
2564 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2565 {
2566 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2567
2568 operands[0] = XEXP (addr, 0);
2569 gcc_assert (GET_CODE (operands[1]) == REG
2570 && GET_CODE (operands[0]) == REG);
2571
2572 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2573 /* No overlap between high target register and address
2574 register. (We do this in a non-obvious way to save a
2575 register file writeback) */
2576 if (GET_CODE (addr) == PRE_INC)
2577 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2578 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2579 }
2580 }
2581 if (optype1 == MEMOP)
2582 {
2583 /* We have to output the address syntax ourselves, since print_operand
2584 doesn't deal with the addresses we want to use. Fix this later. */
2585
2586 rtx addr = XEXP (operands[1], 0);
2587 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2588 {
2589 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2590
2591 operands[1] = XEXP (addr, 0);
2592 gcc_assert (GET_CODE (operands[0]) == REG
2593 && GET_CODE (operands[1]) == REG);
2594
2595 if (!reg_overlap_mentioned_p (high_reg, addr))
2596 {
2597 /* No overlap between high target register and address
2598 register. (We do this in a non-obvious way to
2599 save a register file writeback) */
2600 if (GET_CODE (addr) == POST_INC)
2601 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2602 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2603 }
2604 else
2605 {
2606 /* This is an undefined situation. We should load into the
2607 address register *and* update that register. Probably
2608 we don't need to handle this at all. */
2609 if (GET_CODE (addr) == POST_INC)
2610 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2611 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2612 }
2613 }
2614 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2615 {
2616 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2617
2618 operands[1] = XEXP (addr, 0);
2619 gcc_assert (GET_CODE (operands[0]) == REG
2620 && GET_CODE (operands[1]) == REG);
2621
2622 if (!reg_overlap_mentioned_p (high_reg, addr))
2623 {
2624 /* No overlap between high target register and address
2625 register. (We do this in a non-obvious way to
2626 save a register file writeback) */
2627 if (GET_CODE (addr) == PRE_INC)
2628 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2629 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2630 }
2631 else
2632 {
2633 /* This is an undefined situation. We should load into the
2634 address register *and* update that register. Probably
2635 we don't need to handle this at all. */
2636 if (GET_CODE (addr) == PRE_INC)
2637 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2638 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2639 }
2640 }
2641 else if (GET_CODE (addr) == PLUS
2642 && GET_CODE (XEXP (addr, 0)) == MULT)
2643 {
2644 rtx xoperands[4];
2645
2646 /* Load address into left half of destination register. */
2647 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2648 xoperands[1] = XEXP (addr, 1);
2649 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2650 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2651 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2652 xoperands);
2653 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2654 }
2655 else if (GET_CODE (addr) == PLUS
2656 && REG_P (XEXP (addr, 0))
2657 && REG_P (XEXP (addr, 1)))
2658 {
2659 rtx xoperands[3];
2660
2661 /* Load address into left half of destination register. */
2662 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2663 xoperands[1] = XEXP (addr, 0);
2664 xoperands[2] = XEXP (addr, 1);
2665 output_asm_insn ("{addl|add,l} %1,%2,%0",
2666 xoperands);
2667 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2668 }
2669 }
2670
2671 /* If an operand is an unoffsettable memory ref, find a register
2672 we can increment temporarily to make it refer to the second word. */
2673
2674 if (optype0 == MEMOP)
2675 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2676
2677 if (optype1 == MEMOP)
2678 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2679
2680 /* Ok, we can do one word at a time.
2681 Normally we do the low-numbered word first.
2682
2683 In either case, set up in LATEHALF the operands to use
2684 for the high-numbered word and in some cases alter the
2685 operands in OPERANDS to be suitable for the low-numbered word. */
2686
2687 if (optype0 == REGOP)
2688 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2689 else if (optype0 == OFFSOP)
2690 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2691 else
2692 latehalf[0] = operands[0];
2693
2694 if (optype1 == REGOP)
2695 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2696 else if (optype1 == OFFSOP)
2697 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2698 else if (optype1 == CNSTOP)
2699 {
2700 if (GET_CODE (operands[1]) == HIGH)
2701 {
2702 operands[1] = XEXP (operands[1], 0);
2703 highonly = 1;
2704 }
2705 split_double (operands[1], &operands[1], &latehalf[1]);
2706 }
2707 else
2708 latehalf[1] = operands[1];
2709
2710 /* If the first move would clobber the source of the second one,
2711 do them in the other order.
2712
2713 This can happen in two cases:
2714
2715 mem -> register where the first half of the destination register
2716 is the same register used in the memory's address. Reload
2717 can create such insns.
2718
2719 mem in this case will be either register indirect or register
2720 indirect plus a valid offset.
2721
2722 register -> register move where REGNO(dst) == REGNO(src + 1)
2723 someone (Tim/Tege?) claimed this can happen for parameter loads.
2724
2725 Handle mem -> register case first. */
2726 if (optype0 == REGOP
2727 && (optype1 == MEMOP || optype1 == OFFSOP)
2728 && refers_to_regno_p (REGNO (operands[0]), operands[1]))
2729 {
2730 /* Do the late half first. */
2731 if (addreg1)
2732 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2733 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2734
2735 /* Then clobber. */
2736 if (addreg1)
2737 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2738 return pa_singlemove_string (operands);
2739 }
2740
2741 /* Now handle register -> register case. */
2742 if (optype0 == REGOP && optype1 == REGOP
2743 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2744 {
2745 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2746 return pa_singlemove_string (operands);
2747 }
2748
2749 /* Normal case: do the two words, low-numbered first. */
2750
2751 output_asm_insn (pa_singlemove_string (operands), operands);
2752
2753 /* Make any unoffsettable addresses point at high-numbered word. */
2754 if (addreg0)
2755 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2756 if (addreg1)
2757 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2758
2759 /* Do high-numbered word. */
2760 if (highonly)
2761 output_asm_insn ("ldil L'%1,%0", latehalf);
2762 else
2763 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2764
2765 /* Undo the adds we just did. */
2766 if (addreg0)
2767 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2768 if (addreg1)
2769 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2770
2771 return "";
2772 }
2773 \f
2774 const char *
2775 pa_output_fp_move_double (rtx *operands)
2776 {
2777 if (FP_REG_P (operands[0]))
2778 {
2779 if (FP_REG_P (operands[1])
2780 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2781 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2782 else
2783 output_asm_insn ("fldd%F1 %1,%0", operands);
2784 }
2785 else if (FP_REG_P (operands[1]))
2786 {
2787 output_asm_insn ("fstd%F0 %1,%0", operands);
2788 }
2789 else
2790 {
2791 rtx xoperands[2];
2792
2793 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2794
2795 /* This is a pain. You have to be prepared to deal with an
2796 arbitrary address here including pre/post increment/decrement.
2797
2798 so avoid this in the MD. */
2799 gcc_assert (GET_CODE (operands[0]) == REG);
2800
2801 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2802 xoperands[0] = operands[0];
2803 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2804 }
2805 return "";
2806 }
2807 \f
2808 /* Return a REG that occurs in ADDR with coefficient 1.
2809 ADDR can be effectively incremented by incrementing REG. */
2810
2811 static rtx
2812 find_addr_reg (rtx addr)
2813 {
2814 while (GET_CODE (addr) == PLUS)
2815 {
2816 if (GET_CODE (XEXP (addr, 0)) == REG)
2817 addr = XEXP (addr, 0);
2818 else if (GET_CODE (XEXP (addr, 1)) == REG)
2819 addr = XEXP (addr, 1);
2820 else if (CONSTANT_P (XEXP (addr, 0)))
2821 addr = XEXP (addr, 1);
2822 else if (CONSTANT_P (XEXP (addr, 1)))
2823 addr = XEXP (addr, 0);
2824 else
2825 gcc_unreachable ();
2826 }
2827 gcc_assert (GET_CODE (addr) == REG);
2828 return addr;
2829 }
2830
2831 /* Emit code to perform a block move.
2832
2833 OPERANDS[0] is the destination pointer as a REG, clobbered.
2834 OPERANDS[1] is the source pointer as a REG, clobbered.
2835 OPERANDS[2] is a register for temporary storage.
2836 OPERANDS[3] is a register for temporary storage.
2837 OPERANDS[4] is the size as a CONST_INT
2838 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2839 OPERANDS[6] is another temporary register. */
2840
2841 const char *
2842 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2843 {
2844 int align = INTVAL (operands[5]);
2845 unsigned long n_bytes = INTVAL (operands[4]);
2846
2847 /* We can't move more than a word at a time because the PA
2848 has no longer integer move insns. (Could use fp mem ops?) */
2849 if (align > (TARGET_64BIT ? 8 : 4))
2850 align = (TARGET_64BIT ? 8 : 4);
2851
2852 /* Note that we know each loop below will execute at least twice
2853 (else we would have open-coded the copy). */
2854 switch (align)
2855 {
2856 case 8:
2857 /* Pre-adjust the loop counter. */
2858 operands[4] = GEN_INT (n_bytes - 16);
2859 output_asm_insn ("ldi %4,%2", operands);
2860
2861 /* Copying loop. */
2862 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2863 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2864 output_asm_insn ("std,ma %3,8(%0)", operands);
2865 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2866 output_asm_insn ("std,ma %6,8(%0)", operands);
2867
2868 /* Handle the residual. There could be up to 7 bytes of
2869 residual to copy! */
2870 if (n_bytes % 16 != 0)
2871 {
2872 operands[4] = GEN_INT (n_bytes % 8);
2873 if (n_bytes % 16 >= 8)
2874 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2875 if (n_bytes % 8 != 0)
2876 output_asm_insn ("ldd 0(%1),%6", operands);
2877 if (n_bytes % 16 >= 8)
2878 output_asm_insn ("std,ma %3,8(%0)", operands);
2879 if (n_bytes % 8 != 0)
2880 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2881 }
2882 return "";
2883
2884 case 4:
2885 /* Pre-adjust the loop counter. */
2886 operands[4] = GEN_INT (n_bytes - 8);
2887 output_asm_insn ("ldi %4,%2", operands);
2888
2889 /* Copying loop. */
2890 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2891 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2892 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2893 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2894 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2895
2896 /* Handle the residual. There could be up to 7 bytes of
2897 residual to copy! */
2898 if (n_bytes % 8 != 0)
2899 {
2900 operands[4] = GEN_INT (n_bytes % 4);
2901 if (n_bytes % 8 >= 4)
2902 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2903 if (n_bytes % 4 != 0)
2904 output_asm_insn ("ldw 0(%1),%6", operands);
2905 if (n_bytes % 8 >= 4)
2906 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2907 if (n_bytes % 4 != 0)
2908 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2909 }
2910 return "";
2911
2912 case 2:
2913 /* Pre-adjust the loop counter. */
2914 operands[4] = GEN_INT (n_bytes - 4);
2915 output_asm_insn ("ldi %4,%2", operands);
2916
2917 /* Copying loop. */
2918 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2919 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2920 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2921 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2922 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2923
2924 /* Handle the residual. */
2925 if (n_bytes % 4 != 0)
2926 {
2927 if (n_bytes % 4 >= 2)
2928 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2929 if (n_bytes % 2 != 0)
2930 output_asm_insn ("ldb 0(%1),%6", operands);
2931 if (n_bytes % 4 >= 2)
2932 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2933 if (n_bytes % 2 != 0)
2934 output_asm_insn ("stb %6,0(%0)", operands);
2935 }
2936 return "";
2937
2938 case 1:
2939 /* Pre-adjust the loop counter. */
2940 operands[4] = GEN_INT (n_bytes - 2);
2941 output_asm_insn ("ldi %4,%2", operands);
2942
2943 /* Copying loop. */
2944 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2945 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2946 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2947 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2948 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2949
2950 /* Handle the residual. */
2951 if (n_bytes % 2 != 0)
2952 {
2953 output_asm_insn ("ldb 0(%1),%3", operands);
2954 output_asm_insn ("stb %3,0(%0)", operands);
2955 }
2956 return "";
2957
2958 default:
2959 gcc_unreachable ();
2960 }
2961 }
2962
2963 /* Count the number of insns necessary to handle this block move.
2964
2965 Basic structure is the same as emit_block_move, except that we
2966 count insns rather than emit them. */
2967
2968 static int
2969 compute_movmem_length (rtx_insn *insn)
2970 {
2971 rtx pat = PATTERN (insn);
2972 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2973 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2974 unsigned int n_insns = 0;
2975
2976 /* We can't move more than four bytes at a time because the PA
2977 has no longer integer move insns. (Could use fp mem ops?) */
2978 if (align > (TARGET_64BIT ? 8 : 4))
2979 align = (TARGET_64BIT ? 8 : 4);
2980
2981 /* The basic copying loop. */
2982 n_insns = 6;
2983
2984 /* Residuals. */
2985 if (n_bytes % (2 * align) != 0)
2986 {
2987 if ((n_bytes % (2 * align)) >= align)
2988 n_insns += 2;
2989
2990 if ((n_bytes % align) != 0)
2991 n_insns += 2;
2992 }
2993
2994 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2995 return n_insns * 4;
2996 }
2997
2998 /* Emit code to perform a block clear.
2999
3000 OPERANDS[0] is the destination pointer as a REG, clobbered.
3001 OPERANDS[1] is a register for temporary storage.
3002 OPERANDS[2] is the size as a CONST_INT
3003 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
3004
3005 const char *
3006 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
3007 {
3008 int align = INTVAL (operands[3]);
3009 unsigned long n_bytes = INTVAL (operands[2]);
3010
3011 /* We can't clear more than a word at a time because the PA
3012 has no longer integer move insns. */
3013 if (align > (TARGET_64BIT ? 8 : 4))
3014 align = (TARGET_64BIT ? 8 : 4);
3015
3016 /* Note that we know each loop below will execute at least twice
3017 (else we would have open-coded the copy). */
3018 switch (align)
3019 {
3020 case 8:
3021 /* Pre-adjust the loop counter. */
3022 operands[2] = GEN_INT (n_bytes - 16);
3023 output_asm_insn ("ldi %2,%1", operands);
3024
3025 /* Loop. */
3026 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3027 output_asm_insn ("addib,>= -16,%1,.-4", operands);
3028 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3029
3030 /* Handle the residual. There could be up to 7 bytes of
3031 residual to copy! */
3032 if (n_bytes % 16 != 0)
3033 {
3034 operands[2] = GEN_INT (n_bytes % 8);
3035 if (n_bytes % 16 >= 8)
3036 output_asm_insn ("std,ma %%r0,8(%0)", operands);
3037 if (n_bytes % 8 != 0)
3038 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
3039 }
3040 return "";
3041
3042 case 4:
3043 /* Pre-adjust the loop counter. */
3044 operands[2] = GEN_INT (n_bytes - 8);
3045 output_asm_insn ("ldi %2,%1", operands);
3046
3047 /* Loop. */
3048 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3049 output_asm_insn ("addib,>= -8,%1,.-4", operands);
3050 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3051
3052 /* Handle the residual. There could be up to 7 bytes of
3053 residual to copy! */
3054 if (n_bytes % 8 != 0)
3055 {
3056 operands[2] = GEN_INT (n_bytes % 4);
3057 if (n_bytes % 8 >= 4)
3058 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3059 if (n_bytes % 4 != 0)
3060 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
3061 }
3062 return "";
3063
3064 case 2:
3065 /* Pre-adjust the loop counter. */
3066 operands[2] = GEN_INT (n_bytes - 4);
3067 output_asm_insn ("ldi %2,%1", operands);
3068
3069 /* Loop. */
3070 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3071 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3072 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3073
3074 /* Handle the residual. */
3075 if (n_bytes % 4 != 0)
3076 {
3077 if (n_bytes % 4 >= 2)
3078 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3079 if (n_bytes % 2 != 0)
3080 output_asm_insn ("stb %%r0,0(%0)", operands);
3081 }
3082 return "";
3083
3084 case 1:
3085 /* Pre-adjust the loop counter. */
3086 operands[2] = GEN_INT (n_bytes - 2);
3087 output_asm_insn ("ldi %2,%1", operands);
3088
3089 /* Loop. */
3090 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3091 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3092 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3093
3094 /* Handle the residual. */
3095 if (n_bytes % 2 != 0)
3096 output_asm_insn ("stb %%r0,0(%0)", operands);
3097
3098 return "";
3099
3100 default:
3101 gcc_unreachable ();
3102 }
3103 }
3104
3105 /* Count the number of insns necessary to handle this block move.
3106
3107 Basic structure is the same as emit_block_move, except that we
3108 count insns rather than emit them. */
3109
3110 static int
3111 compute_clrmem_length (rtx_insn *insn)
3112 {
3113 rtx pat = PATTERN (insn);
3114 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3115 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3116 unsigned int n_insns = 0;
3117
3118 /* We can't clear more than a word at a time because the PA
3119 has no longer integer move insns. */
3120 if (align > (TARGET_64BIT ? 8 : 4))
3121 align = (TARGET_64BIT ? 8 : 4);
3122
3123 /* The basic loop. */
3124 n_insns = 4;
3125
3126 /* Residuals. */
3127 if (n_bytes % (2 * align) != 0)
3128 {
3129 if ((n_bytes % (2 * align)) >= align)
3130 n_insns++;
3131
3132 if ((n_bytes % align) != 0)
3133 n_insns++;
3134 }
3135
3136 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3137 return n_insns * 4;
3138 }
3139 \f
3140
3141 const char *
3142 pa_output_and (rtx *operands)
3143 {
3144 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3145 {
3146 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3147 int ls0, ls1, ms0, p, len;
3148
3149 for (ls0 = 0; ls0 < 32; ls0++)
3150 if ((mask & (1 << ls0)) == 0)
3151 break;
3152
3153 for (ls1 = ls0; ls1 < 32; ls1++)
3154 if ((mask & (1 << ls1)) != 0)
3155 break;
3156
3157 for (ms0 = ls1; ms0 < 32; ms0++)
3158 if ((mask & (1 << ms0)) == 0)
3159 break;
3160
3161 gcc_assert (ms0 == 32);
3162
3163 if (ls1 == 32)
3164 {
3165 len = ls0;
3166
3167 gcc_assert (len);
3168
3169 operands[2] = GEN_INT (len);
3170 return "{extru|extrw,u} %1,31,%2,%0";
3171 }
3172 else
3173 {
3174 /* We could use this `depi' for the case above as well, but `depi'
3175 requires one more register file access than an `extru'. */
3176
3177 p = 31 - ls0;
3178 len = ls1 - ls0;
3179
3180 operands[2] = GEN_INT (p);
3181 operands[3] = GEN_INT (len);
3182 return "{depi|depwi} 0,%2,%3,%0";
3183 }
3184 }
3185 else
3186 return "and %1,%2,%0";
3187 }
3188
3189 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3190 storing the result in operands[0]. */
3191 const char *
3192 pa_output_64bit_and (rtx *operands)
3193 {
3194 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3195 {
3196 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3197 int ls0, ls1, ms0, p, len;
3198
3199 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3200 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3201 break;
3202
3203 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3204 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3205 break;
3206
3207 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3208 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3209 break;
3210
3211 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3212
3213 if (ls1 == HOST_BITS_PER_WIDE_INT)
3214 {
3215 len = ls0;
3216
3217 gcc_assert (len);
3218
3219 operands[2] = GEN_INT (len);
3220 return "extrd,u %1,63,%2,%0";
3221 }
3222 else
3223 {
3224 /* We could use this `depi' for the case above as well, but `depi'
3225 requires one more register file access than an `extru'. */
3226
3227 p = 63 - ls0;
3228 len = ls1 - ls0;
3229
3230 operands[2] = GEN_INT (p);
3231 operands[3] = GEN_INT (len);
3232 return "depdi 0,%2,%3,%0";
3233 }
3234 }
3235 else
3236 return "and %1,%2,%0";
3237 }
3238
3239 const char *
3240 pa_output_ior (rtx *operands)
3241 {
3242 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3243 int bs0, bs1, p, len;
3244
3245 if (INTVAL (operands[2]) == 0)
3246 return "copy %1,%0";
3247
3248 for (bs0 = 0; bs0 < 32; bs0++)
3249 if ((mask & (1 << bs0)) != 0)
3250 break;
3251
3252 for (bs1 = bs0; bs1 < 32; bs1++)
3253 if ((mask & (1 << bs1)) == 0)
3254 break;
3255
3256 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3257
3258 p = 31 - bs0;
3259 len = bs1 - bs0;
3260
3261 operands[2] = GEN_INT (p);
3262 operands[3] = GEN_INT (len);
3263 return "{depi|depwi} -1,%2,%3,%0";
3264 }
3265
3266 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3267 storing the result in operands[0]. */
3268 const char *
3269 pa_output_64bit_ior (rtx *operands)
3270 {
3271 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3272 int bs0, bs1, p, len;
3273
3274 if (INTVAL (operands[2]) == 0)
3275 return "copy %1,%0";
3276
3277 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3278 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3279 break;
3280
3281 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3282 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3283 break;
3284
3285 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3286 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3287
3288 p = 63 - bs0;
3289 len = bs1 - bs0;
3290
3291 operands[2] = GEN_INT (p);
3292 operands[3] = GEN_INT (len);
3293 return "depdi -1,%2,%3,%0";
3294 }
3295 \f
3296 /* Target hook for assembling integer objects. This code handles
3297 aligned SI and DI integers specially since function references
3298 must be preceded by P%. */
3299
3300 static bool
3301 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3302 {
3303 bool result;
3304 tree decl = NULL;
3305
3306 /* When we have a SYMBOL_REF with a SYMBOL_REF_DECL, we need to call
3307 call assemble_external and set the SYMBOL_REF_DECL to NULL before
3308 calling output_addr_const. Otherwise, it may call assemble_external
3309 in the midst of outputing the assembler code for the SYMBOL_REF.
3310 We restore the SYMBOL_REF_DECL after the output is done. */
3311 if (GET_CODE (x) == SYMBOL_REF)
3312 {
3313 decl = SYMBOL_REF_DECL (x);
3314 if (decl)
3315 {
3316 assemble_external (decl);
3317 SET_SYMBOL_REF_DECL (x, NULL);
3318 }
3319 }
3320
3321 if (size == UNITS_PER_WORD
3322 && aligned_p
3323 && function_label_operand (x, VOIDmode))
3324 {
3325 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3326
3327 /* We don't want an OPD when generating fast indirect calls. */
3328 if (!TARGET_FAST_INDIRECT_CALLS)
3329 fputs ("P%", asm_out_file);
3330
3331 output_addr_const (asm_out_file, x);
3332 fputc ('\n', asm_out_file);
3333 result = true;
3334 }
3335 else
3336 result = default_assemble_integer (x, size, aligned_p);
3337
3338 if (decl)
3339 SET_SYMBOL_REF_DECL (x, decl);
3340
3341 return result;
3342 }
3343 \f
3344 /* Output an ascii string. */
3345 void
3346 pa_output_ascii (FILE *file, const char *p, int size)
3347 {
3348 int i;
3349 int chars_output;
3350 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3351
3352 /* The HP assembler can only take strings of 256 characters at one
3353 time. This is a limitation on input line length, *not* the
3354 length of the string. Sigh. Even worse, it seems that the
3355 restriction is in number of input characters (see \xnn &
3356 \whatever). So we have to do this very carefully. */
3357
3358 fputs ("\t.STRING \"", file);
3359
3360 chars_output = 0;
3361 for (i = 0; i < size; i += 4)
3362 {
3363 int co = 0;
3364 int io = 0;
3365 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3366 {
3367 register unsigned int c = (unsigned char) p[i + io];
3368
3369 if (c == '\"' || c == '\\')
3370 partial_output[co++] = '\\';
3371 if (c >= ' ' && c < 0177)
3372 partial_output[co++] = c;
3373 else
3374 {
3375 unsigned int hexd;
3376 partial_output[co++] = '\\';
3377 partial_output[co++] = 'x';
3378 hexd = c / 16 - 0 + '0';
3379 if (hexd > '9')
3380 hexd -= '9' - 'a' + 1;
3381 partial_output[co++] = hexd;
3382 hexd = c % 16 - 0 + '0';
3383 if (hexd > '9')
3384 hexd -= '9' - 'a' + 1;
3385 partial_output[co++] = hexd;
3386 }
3387 }
3388 if (chars_output + co > 243)
3389 {
3390 fputs ("\"\n\t.STRING \"", file);
3391 chars_output = 0;
3392 }
3393 fwrite (partial_output, 1, (size_t) co, file);
3394 chars_output += co;
3395 co = 0;
3396 }
3397 fputs ("\"\n", file);
3398 }
3399
3400 /* Try to rewrite floating point comparisons & branches to avoid
3401 useless add,tr insns.
3402
3403 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3404 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3405 first attempt to remove useless add,tr insns. It is zero
3406 for the second pass as reorg sometimes leaves bogus REG_DEAD
3407 notes lying around.
3408
3409 When CHECK_NOTES is zero we can only eliminate add,tr insns
3410 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3411 instructions. */
3412 static void
3413 remove_useless_addtr_insns (int check_notes)
3414 {
3415 rtx_insn *insn;
3416 static int pass = 0;
3417
3418 /* This is fairly cheap, so always run it when optimizing. */
3419 if (optimize > 0)
3420 {
3421 int fcmp_count = 0;
3422 int fbranch_count = 0;
3423
3424 /* Walk all the insns in this function looking for fcmp & fbranch
3425 instructions. Keep track of how many of each we find. */
3426 for (insn = get_insns (); insn; insn = next_insn (insn))
3427 {
3428 rtx tmp;
3429
3430 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3431 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3432 continue;
3433
3434 tmp = PATTERN (insn);
3435
3436 /* It must be a set. */
3437 if (GET_CODE (tmp) != SET)
3438 continue;
3439
3440 /* If the destination is CCFP, then we've found an fcmp insn. */
3441 tmp = SET_DEST (tmp);
3442 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3443 {
3444 fcmp_count++;
3445 continue;
3446 }
3447
3448 tmp = PATTERN (insn);
3449 /* If this is an fbranch instruction, bump the fbranch counter. */
3450 if (GET_CODE (tmp) == SET
3451 && SET_DEST (tmp) == pc_rtx
3452 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3453 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3454 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3455 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3456 {
3457 fbranch_count++;
3458 continue;
3459 }
3460 }
3461
3462
3463 /* Find all floating point compare + branch insns. If possible,
3464 reverse the comparison & the branch to avoid add,tr insns. */
3465 for (insn = get_insns (); insn; insn = next_insn (insn))
3466 {
3467 rtx tmp;
3468 rtx_insn *next;
3469
3470 /* Ignore anything that isn't an INSN. */
3471 if (! NONJUMP_INSN_P (insn))
3472 continue;
3473
3474 tmp = PATTERN (insn);
3475
3476 /* It must be a set. */
3477 if (GET_CODE (tmp) != SET)
3478 continue;
3479
3480 /* The destination must be CCFP, which is register zero. */
3481 tmp = SET_DEST (tmp);
3482 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3483 continue;
3484
3485 /* INSN should be a set of CCFP.
3486
3487 See if the result of this insn is used in a reversed FP
3488 conditional branch. If so, reverse our condition and
3489 the branch. Doing so avoids useless add,tr insns. */
3490 next = next_insn (insn);
3491 while (next)
3492 {
3493 /* Jumps, calls and labels stop our search. */
3494 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3495 break;
3496
3497 /* As does another fcmp insn. */
3498 if (NONJUMP_INSN_P (next)
3499 && GET_CODE (PATTERN (next)) == SET
3500 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3501 && REGNO (SET_DEST (PATTERN (next))) == 0)
3502 break;
3503
3504 next = next_insn (next);
3505 }
3506
3507 /* Is NEXT_INSN a branch? */
3508 if (next && JUMP_P (next))
3509 {
3510 rtx pattern = PATTERN (next);
3511
3512 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3513 and CCFP dies, then reverse our conditional and the branch
3514 to avoid the add,tr. */
3515 if (GET_CODE (pattern) == SET
3516 && SET_DEST (pattern) == pc_rtx
3517 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3518 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3519 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3520 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3521 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3522 && (fcmp_count == fbranch_count
3523 || (check_notes
3524 && find_regno_note (next, REG_DEAD, 0))))
3525 {
3526 /* Reverse the branch. */
3527 tmp = XEXP (SET_SRC (pattern), 1);
3528 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3529 XEXP (SET_SRC (pattern), 2) = tmp;
3530 INSN_CODE (next) = -1;
3531
3532 /* Reverse our condition. */
3533 tmp = PATTERN (insn);
3534 PUT_CODE (XEXP (tmp, 1),
3535 (reverse_condition_maybe_unordered
3536 (GET_CODE (XEXP (tmp, 1)))));
3537 }
3538 }
3539 }
3540 }
3541
3542 pass = !pass;
3543
3544 }
3545 \f
3546 /* You may have trouble believing this, but this is the 32 bit HP-PA
3547 stack layout. Wow.
3548
3549 Offset Contents
3550
3551 Variable arguments (optional; any number may be allocated)
3552
3553 SP-(4*(N+9)) arg word N
3554 : :
3555 SP-56 arg word 5
3556 SP-52 arg word 4
3557
3558 Fixed arguments (must be allocated; may remain unused)
3559
3560 SP-48 arg word 3
3561 SP-44 arg word 2
3562 SP-40 arg word 1
3563 SP-36 arg word 0
3564
3565 Frame Marker
3566
3567 SP-32 External Data Pointer (DP)
3568 SP-28 External sr4
3569 SP-24 External/stub RP (RP')
3570 SP-20 Current RP
3571 SP-16 Static Link
3572 SP-12 Clean up
3573 SP-8 Calling Stub RP (RP'')
3574 SP-4 Previous SP
3575
3576 Top of Frame
3577
3578 SP-0 Stack Pointer (points to next available address)
3579
3580 */
3581
3582 /* This function saves registers as follows. Registers marked with ' are
3583 this function's registers (as opposed to the previous function's).
3584 If a frame_pointer isn't needed, r4 is saved as a general register;
3585 the space for the frame pointer is still allocated, though, to keep
3586 things simple.
3587
3588
3589 Top of Frame
3590
3591 SP (FP') Previous FP
3592 SP + 4 Alignment filler (sigh)
3593 SP + 8 Space for locals reserved here.
3594 .
3595 .
3596 .
3597 SP + n All call saved register used.
3598 .
3599 .
3600 .
3601 SP + o All call saved fp registers used.
3602 .
3603 .
3604 .
3605 SP + p (SP') points to next available address.
3606
3607 */
3608
3609 /* Global variables set by output_function_prologue(). */
3610 /* Size of frame. Need to know this to emit return insns from
3611 leaf procedures. */
3612 static HOST_WIDE_INT actual_fsize, local_fsize;
3613 static int save_fregs;
3614
3615 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3616 Handle case where DISP > 8k by using the add_high_const patterns.
3617
3618 Note in DISP > 8k case, we will leave the high part of the address
3619 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3620
3621 static void
3622 store_reg (int reg, HOST_WIDE_INT disp, int base)
3623 {
3624 rtx dest, src, basereg;
3625 rtx_insn *insn;
3626
3627 src = gen_rtx_REG (word_mode, reg);
3628 basereg = gen_rtx_REG (Pmode, base);
3629 if (VAL_14_BITS_P (disp))
3630 {
3631 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3632 insn = emit_move_insn (dest, src);
3633 }
3634 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3635 {
3636 rtx delta = GEN_INT (disp);
3637 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3638
3639 emit_move_insn (tmpreg, delta);
3640 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3641 if (DO_FRAME_NOTES)
3642 {
3643 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3644 gen_rtx_SET (tmpreg,
3645 gen_rtx_PLUS (Pmode, basereg, delta)));
3646 RTX_FRAME_RELATED_P (insn) = 1;
3647 }
3648 dest = gen_rtx_MEM (word_mode, tmpreg);
3649 insn = emit_move_insn (dest, src);
3650 }
3651 else
3652 {
3653 rtx delta = GEN_INT (disp);
3654 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3655 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3656
3657 emit_move_insn (tmpreg, high);
3658 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3659 insn = emit_move_insn (dest, src);
3660 if (DO_FRAME_NOTES)
3661 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3662 gen_rtx_SET (gen_rtx_MEM (word_mode,
3663 gen_rtx_PLUS (word_mode,
3664 basereg,
3665 delta)),
3666 src));
3667 }
3668
3669 if (DO_FRAME_NOTES)
3670 RTX_FRAME_RELATED_P (insn) = 1;
3671 }
3672
3673 /* Emit RTL to store REG at the memory location specified by BASE and then
3674 add MOD to BASE. MOD must be <= 8k. */
3675
3676 static void
3677 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3678 {
3679 rtx basereg, srcreg, delta;
3680 rtx_insn *insn;
3681
3682 gcc_assert (VAL_14_BITS_P (mod));
3683
3684 basereg = gen_rtx_REG (Pmode, base);
3685 srcreg = gen_rtx_REG (word_mode, reg);
3686 delta = GEN_INT (mod);
3687
3688 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3689 if (DO_FRAME_NOTES)
3690 {
3691 RTX_FRAME_RELATED_P (insn) = 1;
3692
3693 /* RTX_FRAME_RELATED_P must be set on each frame related set
3694 in a parallel with more than one element. */
3695 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3696 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3697 }
3698 }
3699
3700 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3701 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3702 whether to add a frame note or not.
3703
3704 In the DISP > 8k case, we leave the high part of the address in %r1.
3705 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3706
3707 static void
3708 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3709 {
3710 rtx_insn *insn;
3711
3712 if (VAL_14_BITS_P (disp))
3713 {
3714 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3715 plus_constant (Pmode,
3716 gen_rtx_REG (Pmode, base), disp));
3717 }
3718 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3719 {
3720 rtx basereg = gen_rtx_REG (Pmode, base);
3721 rtx delta = GEN_INT (disp);
3722 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3723
3724 emit_move_insn (tmpreg, delta);
3725 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3726 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3727 if (DO_FRAME_NOTES)
3728 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3729 gen_rtx_SET (tmpreg,
3730 gen_rtx_PLUS (Pmode, basereg, delta)));
3731 }
3732 else
3733 {
3734 rtx basereg = gen_rtx_REG (Pmode, base);
3735 rtx delta = GEN_INT (disp);
3736 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3737
3738 emit_move_insn (tmpreg,
3739 gen_rtx_PLUS (Pmode, basereg,
3740 gen_rtx_HIGH (Pmode, delta)));
3741 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3742 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3743 }
3744
3745 if (DO_FRAME_NOTES && note)
3746 RTX_FRAME_RELATED_P (insn) = 1;
3747 }
3748
3749 HOST_WIDE_INT
3750 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3751 {
3752 int freg_saved = 0;
3753 int i, j;
3754
3755 /* The code in pa_expand_prologue and pa_expand_epilogue must
3756 be consistent with the rounding and size calculation done here.
3757 Change them at the same time. */
3758
3759 /* We do our own stack alignment. First, round the size of the
3760 stack locals up to a word boundary. */
3761 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3762
3763 /* Space for previous frame pointer + filler. If any frame is
3764 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3765 waste some space here for the sake of HP compatibility. The
3766 first slot is only used when the frame pointer is needed. */
3767 if (size || frame_pointer_needed)
3768 size += STARTING_FRAME_OFFSET;
3769
3770 /* If the current function calls __builtin_eh_return, then we need
3771 to allocate stack space for registers that will hold data for
3772 the exception handler. */
3773 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3774 {
3775 unsigned int i;
3776
3777 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3778 continue;
3779 size += i * UNITS_PER_WORD;
3780 }
3781
3782 /* Account for space used by the callee general register saves. */
3783 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3784 if (df_regs_ever_live_p (i))
3785 size += UNITS_PER_WORD;
3786
3787 /* Account for space used by the callee floating point register saves. */
3788 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3789 if (df_regs_ever_live_p (i)
3790 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3791 {
3792 freg_saved = 1;
3793
3794 /* We always save both halves of the FP register, so always
3795 increment the frame size by 8 bytes. */
3796 size += 8;
3797 }
3798
3799 /* If any of the floating registers are saved, account for the
3800 alignment needed for the floating point register save block. */
3801 if (freg_saved)
3802 {
3803 size = (size + 7) & ~7;
3804 if (fregs_live)
3805 *fregs_live = 1;
3806 }
3807
3808 /* The various ABIs include space for the outgoing parameters in the
3809 size of the current function's stack frame. We don't need to align
3810 for the outgoing arguments as their alignment is set by the final
3811 rounding for the frame as a whole. */
3812 size += crtl->outgoing_args_size;
3813
3814 /* Allocate space for the fixed frame marker. This space must be
3815 allocated for any function that makes calls or allocates
3816 stack space. */
3817 if (!crtl->is_leaf || size)
3818 size += TARGET_64BIT ? 48 : 32;
3819
3820 /* Finally, round to the preferred stack boundary. */
3821 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3822 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3823 }
3824
3825 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3826 of memory. If any fpu reg is used in the function, we allocate
3827 such a block here, at the bottom of the frame, just in case it's needed.
3828
3829 If this function is a leaf procedure, then we may choose not
3830 to do a "save" insn. The decision about whether or not
3831 to do this is made in regclass.c. */
3832
3833 static void
3834 pa_output_function_prologue (FILE *file)
3835 {
3836 /* The function's label and associated .PROC must never be
3837 separated and must be output *after* any profiling declarations
3838 to avoid changing spaces/subspaces within a procedure. */
3839 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3840 fputs ("\t.PROC\n", file);
3841
3842 /* pa_expand_prologue does the dirty work now. We just need
3843 to output the assembler directives which denote the start
3844 of a function. */
3845 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3846 if (crtl->is_leaf)
3847 fputs (",NO_CALLS", file);
3848 else
3849 fputs (",CALLS", file);
3850 if (rp_saved)
3851 fputs (",SAVE_RP", file);
3852
3853 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3854 at the beginning of the frame and that it is used as the frame
3855 pointer for the frame. We do this because our current frame
3856 layout doesn't conform to that specified in the HP runtime
3857 documentation and we need a way to indicate to programs such as
3858 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3859 isn't used by HP compilers but is supported by the assembler.
3860 However, SAVE_SP is supposed to indicate that the previous stack
3861 pointer has been saved in the frame marker. */
3862 if (frame_pointer_needed)
3863 fputs (",SAVE_SP", file);
3864
3865 /* Pass on information about the number of callee register saves
3866 performed in the prologue.
3867
3868 The compiler is supposed to pass the highest register number
3869 saved, the assembler then has to adjust that number before
3870 entering it into the unwind descriptor (to account for any
3871 caller saved registers with lower register numbers than the
3872 first callee saved register). */
3873 if (gr_saved)
3874 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3875
3876 if (fr_saved)
3877 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3878
3879 fputs ("\n\t.ENTRY\n", file);
3880
3881 remove_useless_addtr_insns (0);
3882 }
3883
3884 void
3885 pa_expand_prologue (void)
3886 {
3887 int merge_sp_adjust_with_store = 0;
3888 HOST_WIDE_INT size = get_frame_size ();
3889 HOST_WIDE_INT offset;
3890 int i;
3891 rtx tmpreg;
3892 rtx_insn *insn;
3893
3894 gr_saved = 0;
3895 fr_saved = 0;
3896 save_fregs = 0;
3897
3898 /* Compute total size for frame pointer, filler, locals and rounding to
3899 the next word boundary. Similar code appears in pa_compute_frame_size
3900 and must be changed in tandem with this code. */
3901 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3902 if (local_fsize || frame_pointer_needed)
3903 local_fsize += STARTING_FRAME_OFFSET;
3904
3905 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3906 if (flag_stack_usage_info)
3907 current_function_static_stack_size = actual_fsize;
3908
3909 /* Compute a few things we will use often. */
3910 tmpreg = gen_rtx_REG (word_mode, 1);
3911
3912 /* Save RP first. The calling conventions manual states RP will
3913 always be stored into the caller's frame at sp - 20 or sp - 16
3914 depending on which ABI is in use. */
3915 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3916 {
3917 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3918 rp_saved = true;
3919 }
3920 else
3921 rp_saved = false;
3922
3923 /* Allocate the local frame and set up the frame pointer if needed. */
3924 if (actual_fsize != 0)
3925 {
3926 if (frame_pointer_needed)
3927 {
3928 /* Copy the old frame pointer temporarily into %r1. Set up the
3929 new stack pointer, then store away the saved old frame pointer
3930 into the stack at sp and at the same time update the stack
3931 pointer by actual_fsize bytes. Two versions, first
3932 handles small (<8k) frames. The second handles large (>=8k)
3933 frames. */
3934 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3935 if (DO_FRAME_NOTES)
3936 RTX_FRAME_RELATED_P (insn) = 1;
3937
3938 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3939 if (DO_FRAME_NOTES)
3940 RTX_FRAME_RELATED_P (insn) = 1;
3941
3942 if (VAL_14_BITS_P (actual_fsize))
3943 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3944 else
3945 {
3946 /* It is incorrect to store the saved frame pointer at *sp,
3947 then increment sp (writes beyond the current stack boundary).
3948
3949 So instead use stwm to store at *sp and post-increment the
3950 stack pointer as an atomic operation. Then increment sp to
3951 finish allocating the new frame. */
3952 HOST_WIDE_INT adjust1 = 8192 - 64;
3953 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3954
3955 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3956 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3957 adjust2, 1);
3958 }
3959
3960 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3961 we need to store the previous stack pointer (frame pointer)
3962 into the frame marker on targets that use the HP unwind
3963 library. This allows the HP unwind library to be used to
3964 unwind GCC frames. However, we are not fully compatible
3965 with the HP library because our frame layout differs from
3966 that specified in the HP runtime specification.
3967
3968 We don't want a frame note on this instruction as the frame
3969 marker moves during dynamic stack allocation.
3970
3971 This instruction also serves as a blockage to prevent
3972 register spills from being scheduled before the stack
3973 pointer is raised. This is necessary as we store
3974 registers using the frame pointer as a base register,
3975 and the frame pointer is set before sp is raised. */
3976 if (TARGET_HPUX_UNWIND_LIBRARY)
3977 {
3978 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3979 GEN_INT (TARGET_64BIT ? -8 : -4));
3980
3981 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3982 hard_frame_pointer_rtx);
3983 }
3984 else
3985 emit_insn (gen_blockage ());
3986 }
3987 /* no frame pointer needed. */
3988 else
3989 {
3990 /* In some cases we can perform the first callee register save
3991 and allocating the stack frame at the same time. If so, just
3992 make a note of it and defer allocating the frame until saving
3993 the callee registers. */
3994 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3995 merge_sp_adjust_with_store = 1;
3996 /* Can not optimize. Adjust the stack frame by actual_fsize
3997 bytes. */
3998 else
3999 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4000 actual_fsize, 1);
4001 }
4002 }
4003
4004 /* Normal register save.
4005
4006 Do not save the frame pointer in the frame_pointer_needed case. It
4007 was done earlier. */
4008 if (frame_pointer_needed)
4009 {
4010 offset = local_fsize;
4011
4012 /* Saving the EH return data registers in the frame is the simplest
4013 way to get the frame unwind information emitted. We put them
4014 just before the general registers. */
4015 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4016 {
4017 unsigned int i, regno;
4018
4019 for (i = 0; ; ++i)
4020 {
4021 regno = EH_RETURN_DATA_REGNO (i);
4022 if (regno == INVALID_REGNUM)
4023 break;
4024
4025 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4026 offset += UNITS_PER_WORD;
4027 }
4028 }
4029
4030 for (i = 18; i >= 4; i--)
4031 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4032 {
4033 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4034 offset += UNITS_PER_WORD;
4035 gr_saved++;
4036 }
4037 /* Account for %r3 which is saved in a special place. */
4038 gr_saved++;
4039 }
4040 /* No frame pointer needed. */
4041 else
4042 {
4043 offset = local_fsize - actual_fsize;
4044
4045 /* Saving the EH return data registers in the frame is the simplest
4046 way to get the frame unwind information emitted. */
4047 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4048 {
4049 unsigned int i, regno;
4050
4051 for (i = 0; ; ++i)
4052 {
4053 regno = EH_RETURN_DATA_REGNO (i);
4054 if (regno == INVALID_REGNUM)
4055 break;
4056
4057 /* If merge_sp_adjust_with_store is nonzero, then we can
4058 optimize the first save. */
4059 if (merge_sp_adjust_with_store)
4060 {
4061 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
4062 merge_sp_adjust_with_store = 0;
4063 }
4064 else
4065 store_reg (regno, offset, STACK_POINTER_REGNUM);
4066 offset += UNITS_PER_WORD;
4067 }
4068 }
4069
4070 for (i = 18; i >= 3; i--)
4071 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4072 {
4073 /* If merge_sp_adjust_with_store is nonzero, then we can
4074 optimize the first GR save. */
4075 if (merge_sp_adjust_with_store)
4076 {
4077 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
4078 merge_sp_adjust_with_store = 0;
4079 }
4080 else
4081 store_reg (i, offset, STACK_POINTER_REGNUM);
4082 offset += UNITS_PER_WORD;
4083 gr_saved++;
4084 }
4085
4086 /* If we wanted to merge the SP adjustment with a GR save, but we never
4087 did any GR saves, then just emit the adjustment here. */
4088 if (merge_sp_adjust_with_store)
4089 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4090 actual_fsize, 1);
4091 }
4092
4093 /* The hppa calling conventions say that %r19, the pic offset
4094 register, is saved at sp - 32 (in this function's frame)
4095 when generating PIC code. FIXME: What is the correct thing
4096 to do for functions which make no calls and allocate no
4097 frame? Do we need to allocate a frame, or can we just omit
4098 the save? For now we'll just omit the save.
4099
4100 We don't want a note on this insn as the frame marker can
4101 move if there is a dynamic stack allocation. */
4102 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4103 {
4104 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4105
4106 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4107
4108 }
4109
4110 /* Align pointer properly (doubleword boundary). */
4111 offset = (offset + 7) & ~7;
4112
4113 /* Floating point register store. */
4114 if (save_fregs)
4115 {
4116 rtx base;
4117
4118 /* First get the frame or stack pointer to the start of the FP register
4119 save area. */
4120 if (frame_pointer_needed)
4121 {
4122 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4123 base = hard_frame_pointer_rtx;
4124 }
4125 else
4126 {
4127 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4128 base = stack_pointer_rtx;
4129 }
4130
4131 /* Now actually save the FP registers. */
4132 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4133 {
4134 if (df_regs_ever_live_p (i)
4135 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4136 {
4137 rtx addr, reg;
4138 rtx_insn *insn;
4139 addr = gen_rtx_MEM (DFmode,
4140 gen_rtx_POST_INC (word_mode, tmpreg));
4141 reg = gen_rtx_REG (DFmode, i);
4142 insn = emit_move_insn (addr, reg);
4143 if (DO_FRAME_NOTES)
4144 {
4145 RTX_FRAME_RELATED_P (insn) = 1;
4146 if (TARGET_64BIT)
4147 {
4148 rtx mem = gen_rtx_MEM (DFmode,
4149 plus_constant (Pmode, base,
4150 offset));
4151 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4152 gen_rtx_SET (mem, reg));
4153 }
4154 else
4155 {
4156 rtx meml = gen_rtx_MEM (SFmode,
4157 plus_constant (Pmode, base,
4158 offset));
4159 rtx memr = gen_rtx_MEM (SFmode,
4160 plus_constant (Pmode, base,
4161 offset + 4));
4162 rtx regl = gen_rtx_REG (SFmode, i);
4163 rtx regr = gen_rtx_REG (SFmode, i + 1);
4164 rtx setl = gen_rtx_SET (meml, regl);
4165 rtx setr = gen_rtx_SET (memr, regr);
4166 rtvec vec;
4167
4168 RTX_FRAME_RELATED_P (setl) = 1;
4169 RTX_FRAME_RELATED_P (setr) = 1;
4170 vec = gen_rtvec (2, setl, setr);
4171 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4172 gen_rtx_SEQUENCE (VOIDmode, vec));
4173 }
4174 }
4175 offset += GET_MODE_SIZE (DFmode);
4176 fr_saved++;
4177 }
4178 }
4179 }
4180 }
4181
4182 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4183 Handle case where DISP > 8k by using the add_high_const patterns. */
4184
4185 static void
4186 load_reg (int reg, HOST_WIDE_INT disp, int base)
4187 {
4188 rtx dest = gen_rtx_REG (word_mode, reg);
4189 rtx basereg = gen_rtx_REG (Pmode, base);
4190 rtx src;
4191
4192 if (VAL_14_BITS_P (disp))
4193 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4194 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4195 {
4196 rtx delta = GEN_INT (disp);
4197 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4198
4199 emit_move_insn (tmpreg, delta);
4200 if (TARGET_DISABLE_INDEXING)
4201 {
4202 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4203 src = gen_rtx_MEM (word_mode, tmpreg);
4204 }
4205 else
4206 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4207 }
4208 else
4209 {
4210 rtx delta = GEN_INT (disp);
4211 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4212 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4213
4214 emit_move_insn (tmpreg, high);
4215 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4216 }
4217
4218 emit_move_insn (dest, src);
4219 }
4220
4221 /* Update the total code bytes output to the text section. */
4222
4223 static void
4224 update_total_code_bytes (unsigned int nbytes)
4225 {
4226 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4227 && !IN_NAMED_SECTION_P (cfun->decl))
4228 {
4229 unsigned int old_total = total_code_bytes;
4230
4231 total_code_bytes += nbytes;
4232
4233 /* Be prepared to handle overflows. */
4234 if (old_total > total_code_bytes)
4235 total_code_bytes = UINT_MAX;
4236 }
4237 }
4238
4239 /* This function generates the assembly code for function exit.
4240 Args are as for output_function_prologue ().
4241
4242 The function epilogue should not depend on the current stack
4243 pointer! It should use the frame pointer only. This is mandatory
4244 because of alloca; we also take advantage of it to omit stack
4245 adjustments before returning. */
4246
4247 static void
4248 pa_output_function_epilogue (FILE *file)
4249 {
4250 rtx_insn *insn = get_last_insn ();
4251 bool extra_nop;
4252
4253 /* pa_expand_epilogue does the dirty work now. We just need
4254 to output the assembler directives which denote the end
4255 of a function.
4256
4257 To make debuggers happy, emit a nop if the epilogue was completely
4258 eliminated due to a volatile call as the last insn in the
4259 current function. That way the return address (in %r2) will
4260 always point to a valid instruction in the current function. */
4261
4262 /* Get the last real insn. */
4263 if (NOTE_P (insn))
4264 insn = prev_real_insn (insn);
4265
4266 /* If it is a sequence, then look inside. */
4267 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4268 insn = as_a <rtx_sequence *> (PATTERN (insn))-> insn (0);
4269
4270 /* If insn is a CALL_INSN, then it must be a call to a volatile
4271 function (otherwise there would be epilogue insns). */
4272 if (insn && CALL_P (insn))
4273 {
4274 fputs ("\tnop\n", file);
4275 extra_nop = true;
4276 }
4277 else
4278 extra_nop = false;
4279
4280 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4281
4282 if (TARGET_SOM && TARGET_GAS)
4283 {
4284 /* We are done with this subspace except possibly for some additional
4285 debug information. Forget that we are in this subspace to ensure
4286 that the next function is output in its own subspace. */
4287 in_section = NULL;
4288 cfun->machine->in_nsubspa = 2;
4289 }
4290
4291 /* Thunks do their own insn accounting. */
4292 if (cfun->is_thunk)
4293 return;
4294
4295 if (INSN_ADDRESSES_SET_P ())
4296 {
4297 last_address = extra_nop ? 4 : 0;
4298 insn = get_last_nonnote_insn ();
4299 if (insn)
4300 {
4301 last_address += INSN_ADDRESSES (INSN_UID (insn));
4302 if (INSN_P (insn))
4303 last_address += insn_default_length (insn);
4304 }
4305 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4306 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4307 }
4308 else
4309 last_address = UINT_MAX;
4310
4311 /* Finally, update the total number of code bytes output so far. */
4312 update_total_code_bytes (last_address);
4313 }
4314
4315 void
4316 pa_expand_epilogue (void)
4317 {
4318 rtx tmpreg;
4319 HOST_WIDE_INT offset;
4320 HOST_WIDE_INT ret_off = 0;
4321 int i;
4322 int merge_sp_adjust_with_load = 0;
4323
4324 /* We will use this often. */
4325 tmpreg = gen_rtx_REG (word_mode, 1);
4326
4327 /* Try to restore RP early to avoid load/use interlocks when
4328 RP gets used in the return (bv) instruction. This appears to still
4329 be necessary even when we schedule the prologue and epilogue. */
4330 if (rp_saved)
4331 {
4332 ret_off = TARGET_64BIT ? -16 : -20;
4333 if (frame_pointer_needed)
4334 {
4335 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4336 ret_off = 0;
4337 }
4338 else
4339 {
4340 /* No frame pointer, and stack is smaller than 8k. */
4341 if (VAL_14_BITS_P (ret_off - actual_fsize))
4342 {
4343 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4344 ret_off = 0;
4345 }
4346 }
4347 }
4348
4349 /* General register restores. */
4350 if (frame_pointer_needed)
4351 {
4352 offset = local_fsize;
4353
4354 /* If the current function calls __builtin_eh_return, then we need
4355 to restore the saved EH data registers. */
4356 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4357 {
4358 unsigned int i, regno;
4359
4360 for (i = 0; ; ++i)
4361 {
4362 regno = EH_RETURN_DATA_REGNO (i);
4363 if (regno == INVALID_REGNUM)
4364 break;
4365
4366 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4367 offset += UNITS_PER_WORD;
4368 }
4369 }
4370
4371 for (i = 18; i >= 4; i--)
4372 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4373 {
4374 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4375 offset += UNITS_PER_WORD;
4376 }
4377 }
4378 else
4379 {
4380 offset = local_fsize - actual_fsize;
4381
4382 /* If the current function calls __builtin_eh_return, then we need
4383 to restore the saved EH data registers. */
4384 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4385 {
4386 unsigned int i, regno;
4387
4388 for (i = 0; ; ++i)
4389 {
4390 regno = EH_RETURN_DATA_REGNO (i);
4391 if (regno == INVALID_REGNUM)
4392 break;
4393
4394 /* Only for the first load.
4395 merge_sp_adjust_with_load holds the register load
4396 with which we will merge the sp adjustment. */
4397 if (merge_sp_adjust_with_load == 0
4398 && local_fsize == 0
4399 && VAL_14_BITS_P (-actual_fsize))
4400 merge_sp_adjust_with_load = regno;
4401 else
4402 load_reg (regno, offset, STACK_POINTER_REGNUM);
4403 offset += UNITS_PER_WORD;
4404 }
4405 }
4406
4407 for (i = 18; i >= 3; i--)
4408 {
4409 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4410 {
4411 /* Only for the first load.
4412 merge_sp_adjust_with_load holds the register load
4413 with which we will merge the sp adjustment. */
4414 if (merge_sp_adjust_with_load == 0
4415 && local_fsize == 0
4416 && VAL_14_BITS_P (-actual_fsize))
4417 merge_sp_adjust_with_load = i;
4418 else
4419 load_reg (i, offset, STACK_POINTER_REGNUM);
4420 offset += UNITS_PER_WORD;
4421 }
4422 }
4423 }
4424
4425 /* Align pointer properly (doubleword boundary). */
4426 offset = (offset + 7) & ~7;
4427
4428 /* FP register restores. */
4429 if (save_fregs)
4430 {
4431 /* Adjust the register to index off of. */
4432 if (frame_pointer_needed)
4433 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4434 else
4435 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4436
4437 /* Actually do the restores now. */
4438 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4439 if (df_regs_ever_live_p (i)
4440 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4441 {
4442 rtx src = gen_rtx_MEM (DFmode,
4443 gen_rtx_POST_INC (word_mode, tmpreg));
4444 rtx dest = gen_rtx_REG (DFmode, i);
4445 emit_move_insn (dest, src);
4446 }
4447 }
4448
4449 /* Emit a blockage insn here to keep these insns from being moved to
4450 an earlier spot in the epilogue, or into the main instruction stream.
4451
4452 This is necessary as we must not cut the stack back before all the
4453 restores are finished. */
4454 emit_insn (gen_blockage ());
4455
4456 /* Reset stack pointer (and possibly frame pointer). The stack
4457 pointer is initially set to fp + 64 to avoid a race condition. */
4458 if (frame_pointer_needed)
4459 {
4460 rtx delta = GEN_INT (-64);
4461
4462 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4463 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4464 stack_pointer_rtx, delta));
4465 }
4466 /* If we were deferring a callee register restore, do it now. */
4467 else if (merge_sp_adjust_with_load)
4468 {
4469 rtx delta = GEN_INT (-actual_fsize);
4470 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4471
4472 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4473 }
4474 else if (actual_fsize != 0)
4475 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4476 - actual_fsize, 0);
4477
4478 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4479 frame greater than 8k), do so now. */
4480 if (ret_off != 0)
4481 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4482
4483 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4484 {
4485 rtx sa = EH_RETURN_STACKADJ_RTX;
4486
4487 emit_insn (gen_blockage ());
4488 emit_insn (TARGET_64BIT
4489 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4490 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4491 }
4492 }
4493
4494 bool
4495 pa_can_use_return_insn (void)
4496 {
4497 if (!reload_completed)
4498 return false;
4499
4500 if (frame_pointer_needed)
4501 return false;
4502
4503 if (df_regs_ever_live_p (2))
4504 return false;
4505
4506 if (crtl->profile)
4507 return false;
4508
4509 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4510 }
4511
4512 rtx
4513 hppa_pic_save_rtx (void)
4514 {
4515 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4516 }
4517
4518 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4519 #define NO_DEFERRED_PROFILE_COUNTERS 0
4520 #endif
4521
4522
4523 /* Vector of funcdef numbers. */
4524 static vec<int> funcdef_nos;
4525
4526 /* Output deferred profile counters. */
4527 static void
4528 output_deferred_profile_counters (void)
4529 {
4530 unsigned int i;
4531 int align, n;
4532
4533 if (funcdef_nos.is_empty ())
4534 return;
4535
4536 switch_to_section (data_section);
4537 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4538 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4539
4540 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4541 {
4542 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4543 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4544 }
4545
4546 funcdef_nos.release ();
4547 }
4548
4549 void
4550 hppa_profile_hook (int label_no)
4551 {
4552 /* We use SImode for the address of the function in both 32 and
4553 64-bit code to avoid having to provide DImode versions of the
4554 lcla2 and load_offset_label_address insn patterns. */
4555 rtx reg = gen_reg_rtx (SImode);
4556 rtx_code_label *label_rtx = gen_label_rtx ();
4557 rtx mcount = gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_REF (Pmode, "_mcount"));
4558 int reg_parm_stack_space = REG_PARM_STACK_SPACE (NULL_TREE);
4559 rtx arg_bytes, begin_label_rtx;
4560 rtx_insn *call_insn;
4561 char begin_label_name[16];
4562 bool use_mcount_pcrel_call;
4563
4564 /* If we can reach _mcount with a pc-relative call, we can optimize
4565 loading the address of the current function. This requires linker
4566 long branch stub support. */
4567 if (!TARGET_PORTABLE_RUNTIME
4568 && !TARGET_LONG_CALLS
4569 && (TARGET_SOM || flag_function_sections))
4570 use_mcount_pcrel_call = TRUE;
4571 else
4572 use_mcount_pcrel_call = FALSE;
4573
4574 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4575 label_no);
4576 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4577
4578 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4579
4580 if (!use_mcount_pcrel_call)
4581 {
4582 /* The address of the function is loaded into %r25 with an instruction-
4583 relative sequence that avoids the use of relocations. The sequence
4584 is split so that the load_offset_label_address instruction can
4585 occupy the delay slot of the call to _mcount. */
4586 if (TARGET_PA_20)
4587 emit_insn (gen_lcla2 (reg, label_rtx));
4588 else
4589 emit_insn (gen_lcla1 (reg, label_rtx));
4590
4591 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4592 reg,
4593 begin_label_rtx,
4594 label_rtx));
4595 }
4596
4597 if (!NO_DEFERRED_PROFILE_COUNTERS)
4598 {
4599 rtx count_label_rtx, addr, r24;
4600 char count_label_name[16];
4601
4602 funcdef_nos.safe_push (label_no);
4603 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4604 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode,
4605 ggc_strdup (count_label_name));
4606
4607 addr = force_reg (Pmode, count_label_rtx);
4608 r24 = gen_rtx_REG (Pmode, 24);
4609 emit_move_insn (r24, addr);
4610
4611 arg_bytes = GEN_INT (TARGET_64BIT ? 24 : 12);
4612 if (use_mcount_pcrel_call)
4613 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4614 begin_label_rtx));
4615 else
4616 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4617
4618 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4619 }
4620 else
4621 {
4622 arg_bytes = GEN_INT (TARGET_64BIT ? 16 : 8);
4623 if (use_mcount_pcrel_call)
4624 call_insn = emit_call_insn (gen_call_mcount (mcount, arg_bytes,
4625 begin_label_rtx));
4626 else
4627 call_insn = emit_call_insn (gen_call (mcount, arg_bytes));
4628 }
4629
4630 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4631 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4632
4633 /* Indicate the _mcount call cannot throw, nor will it execute a
4634 non-local goto. */
4635 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4636
4637 /* Allocate space for fixed arguments. */
4638 if (reg_parm_stack_space > crtl->outgoing_args_size)
4639 crtl->outgoing_args_size = reg_parm_stack_space;
4640 }
4641
4642 /* Fetch the return address for the frame COUNT steps up from
4643 the current frame, after the prologue. FRAMEADDR is the
4644 frame pointer of the COUNT frame.
4645
4646 We want to ignore any export stub remnants here. To handle this,
4647 we examine the code at the return address, and if it is an export
4648 stub, we return a memory rtx for the stub return address stored
4649 at frame-24.
4650
4651 The value returned is used in two different ways:
4652
4653 1. To find a function's caller.
4654
4655 2. To change the return address for a function.
4656
4657 This function handles most instances of case 1; however, it will
4658 fail if there are two levels of stubs to execute on the return
4659 path. The only way I believe that can happen is if the return value
4660 needs a parameter relocation, which never happens for C code.
4661
4662 This function handles most instances of case 2; however, it will
4663 fail if we did not originally have stub code on the return path
4664 but will need stub code on the new return path. This can happen if
4665 the caller & callee are both in the main program, but the new
4666 return location is in a shared library. */
4667
4668 rtx
4669 pa_return_addr_rtx (int count, rtx frameaddr)
4670 {
4671 rtx label;
4672 rtx rp;
4673 rtx saved_rp;
4674 rtx ins;
4675
4676 /* The instruction stream at the return address of a PA1.X export stub is:
4677
4678 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4679 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4680 0x00011820 | stub+16: mtsp r1,sr0
4681 0xe0400002 | stub+20: be,n 0(sr0,rp)
4682
4683 0xe0400002 must be specified as -532676606 so that it won't be
4684 rejected as an invalid immediate operand on 64-bit hosts.
4685
4686 The instruction stream at the return address of a PA2.0 export stub is:
4687
4688 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4689 0xe840d002 | stub+12: bve,n (rp)
4690 */
4691
4692 HOST_WIDE_INT insns[4];
4693 int i, len;
4694
4695 if (count != 0)
4696 return NULL_RTX;
4697
4698 rp = get_hard_reg_initial_val (Pmode, 2);
4699
4700 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4701 return rp;
4702
4703 /* If there is no export stub then just use the value saved from
4704 the return pointer register. */
4705
4706 saved_rp = gen_reg_rtx (Pmode);
4707 emit_move_insn (saved_rp, rp);
4708
4709 /* Get pointer to the instruction stream. We have to mask out the
4710 privilege level from the two low order bits of the return address
4711 pointer here so that ins will point to the start of the first
4712 instruction that would have been executed if we returned. */
4713 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4714 label = gen_label_rtx ();
4715
4716 if (TARGET_PA_20)
4717 {
4718 insns[0] = 0x4bc23fd1;
4719 insns[1] = -398405630;
4720 len = 2;
4721 }
4722 else
4723 {
4724 insns[0] = 0x4bc23fd1;
4725 insns[1] = 0x004010a1;
4726 insns[2] = 0x00011820;
4727 insns[3] = -532676606;
4728 len = 4;
4729 }
4730
4731 /* Check the instruction stream at the normal return address for the
4732 export stub. If it is an export stub, than our return address is
4733 really in -24[frameaddr]. */
4734
4735 for (i = 0; i < len; i++)
4736 {
4737 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4738 rtx op1 = GEN_INT (insns[i]);
4739 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4740 }
4741
4742 /* Here we know that our return address points to an export
4743 stub. We don't want to return the address of the export stub,
4744 but rather the return address of the export stub. That return
4745 address is stored at -24[frameaddr]. */
4746
4747 emit_move_insn (saved_rp,
4748 gen_rtx_MEM (Pmode,
4749 memory_address (Pmode,
4750 plus_constant (Pmode, frameaddr,
4751 -24))));
4752
4753 emit_label (label);
4754
4755 return saved_rp;
4756 }
4757
4758 void
4759 pa_emit_bcond_fp (rtx operands[])
4760 {
4761 enum rtx_code code = GET_CODE (operands[0]);
4762 rtx operand0 = operands[1];
4763 rtx operand1 = operands[2];
4764 rtx label = operands[3];
4765
4766 emit_insn (gen_rtx_SET (gen_rtx_REG (CCFPmode, 0),
4767 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4768
4769 emit_jump_insn (gen_rtx_SET (pc_rtx,
4770 gen_rtx_IF_THEN_ELSE (VOIDmode,
4771 gen_rtx_fmt_ee (NE,
4772 VOIDmode,
4773 gen_rtx_REG (CCFPmode, 0),
4774 const0_rtx),
4775 gen_rtx_LABEL_REF (VOIDmode, label),
4776 pc_rtx)));
4777
4778 }
4779
4780 /* Adjust the cost of a scheduling dependency. Return the new cost of
4781 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4782
4783 static int
4784 pa_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
4785 unsigned int)
4786 {
4787 enum attr_type attr_type;
4788
4789 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4790 true dependencies as they are described with bypasses now. */
4791 if (pa_cpu >= PROCESSOR_8000 || dep_type == 0)
4792 return cost;
4793
4794 if (! recog_memoized (insn))
4795 return 0;
4796
4797 attr_type = get_attr_type (insn);
4798
4799 switch (dep_type)
4800 {
4801 case REG_DEP_ANTI:
4802 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4803 cycles later. */
4804
4805 if (attr_type == TYPE_FPLOAD)
4806 {
4807 rtx pat = PATTERN (insn);
4808 rtx dep_pat = PATTERN (dep_insn);
4809 if (GET_CODE (pat) == PARALLEL)
4810 {
4811 /* This happens for the fldXs,mb patterns. */
4812 pat = XVECEXP (pat, 0, 0);
4813 }
4814 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4815 /* If this happens, we have to extend this to schedule
4816 optimally. Return 0 for now. */
4817 return 0;
4818
4819 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4820 {
4821 if (! recog_memoized (dep_insn))
4822 return 0;
4823 switch (get_attr_type (dep_insn))
4824 {
4825 case TYPE_FPALU:
4826 case TYPE_FPMULSGL:
4827 case TYPE_FPMULDBL:
4828 case TYPE_FPDIVSGL:
4829 case TYPE_FPDIVDBL:
4830 case TYPE_FPSQRTSGL:
4831 case TYPE_FPSQRTDBL:
4832 /* A fpload can't be issued until one cycle before a
4833 preceding arithmetic operation has finished if
4834 the target of the fpload is any of the sources
4835 (or destination) of the arithmetic operation. */
4836 return insn_default_latency (dep_insn) - 1;
4837
4838 default:
4839 return 0;
4840 }
4841 }
4842 }
4843 else if (attr_type == TYPE_FPALU)
4844 {
4845 rtx pat = PATTERN (insn);
4846 rtx dep_pat = PATTERN (dep_insn);
4847 if (GET_CODE (pat) == PARALLEL)
4848 {
4849 /* This happens for the fldXs,mb patterns. */
4850 pat = XVECEXP (pat, 0, 0);
4851 }
4852 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4853 /* If this happens, we have to extend this to schedule
4854 optimally. Return 0 for now. */
4855 return 0;
4856
4857 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4858 {
4859 if (! recog_memoized (dep_insn))
4860 return 0;
4861 switch (get_attr_type (dep_insn))
4862 {
4863 case TYPE_FPDIVSGL:
4864 case TYPE_FPDIVDBL:
4865 case TYPE_FPSQRTSGL:
4866 case TYPE_FPSQRTDBL:
4867 /* An ALU flop can't be issued until two cycles before a
4868 preceding divide or sqrt operation has finished if
4869 the target of the ALU flop is any of the sources
4870 (or destination) of the divide or sqrt operation. */
4871 return insn_default_latency (dep_insn) - 2;
4872
4873 default:
4874 return 0;
4875 }
4876 }
4877 }
4878
4879 /* For other anti dependencies, the cost is 0. */
4880 return 0;
4881
4882 case REG_DEP_OUTPUT:
4883 /* Output dependency; DEP_INSN writes a register that INSN writes some
4884 cycles later. */
4885 if (attr_type == TYPE_FPLOAD)
4886 {
4887 rtx pat = PATTERN (insn);
4888 rtx dep_pat = PATTERN (dep_insn);
4889 if (GET_CODE (pat) == PARALLEL)
4890 {
4891 /* This happens for the fldXs,mb patterns. */
4892 pat = XVECEXP (pat, 0, 0);
4893 }
4894 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4895 /* If this happens, we have to extend this to schedule
4896 optimally. Return 0 for now. */
4897 return 0;
4898
4899 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4900 {
4901 if (! recog_memoized (dep_insn))
4902 return 0;
4903 switch (get_attr_type (dep_insn))
4904 {
4905 case TYPE_FPALU:
4906 case TYPE_FPMULSGL:
4907 case TYPE_FPMULDBL:
4908 case TYPE_FPDIVSGL:
4909 case TYPE_FPDIVDBL:
4910 case TYPE_FPSQRTSGL:
4911 case TYPE_FPSQRTDBL:
4912 /* A fpload can't be issued until one cycle before a
4913 preceding arithmetic operation has finished if
4914 the target of the fpload is the destination of the
4915 arithmetic operation.
4916
4917 Exception: For PA7100LC, PA7200 and PA7300, the cost
4918 is 3 cycles, unless they bundle together. We also
4919 pay the penalty if the second insn is a fpload. */
4920 return insn_default_latency (dep_insn) - 1;
4921
4922 default:
4923 return 0;
4924 }
4925 }
4926 }
4927 else if (attr_type == TYPE_FPALU)
4928 {
4929 rtx pat = PATTERN (insn);
4930 rtx dep_pat = PATTERN (dep_insn);
4931 if (GET_CODE (pat) == PARALLEL)
4932 {
4933 /* This happens for the fldXs,mb patterns. */
4934 pat = XVECEXP (pat, 0, 0);
4935 }
4936 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4937 /* If this happens, we have to extend this to schedule
4938 optimally. Return 0 for now. */
4939 return 0;
4940
4941 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4942 {
4943 if (! recog_memoized (dep_insn))
4944 return 0;
4945 switch (get_attr_type (dep_insn))
4946 {
4947 case TYPE_FPDIVSGL:
4948 case TYPE_FPDIVDBL:
4949 case TYPE_FPSQRTSGL:
4950 case TYPE_FPSQRTDBL:
4951 /* An ALU flop can't be issued until two cycles before a
4952 preceding divide or sqrt operation has finished if
4953 the target of the ALU flop is also the target of
4954 the divide or sqrt operation. */
4955 return insn_default_latency (dep_insn) - 2;
4956
4957 default:
4958 return 0;
4959 }
4960 }
4961 }
4962
4963 /* For other output dependencies, the cost is 0. */
4964 return 0;
4965
4966 default:
4967 gcc_unreachable ();
4968 }
4969 }
4970
4971 /* Adjust scheduling priorities. We use this to try and keep addil
4972 and the next use of %r1 close together. */
4973 static int
4974 pa_adjust_priority (rtx_insn *insn, int priority)
4975 {
4976 rtx set = single_set (insn);
4977 rtx src, dest;
4978 if (set)
4979 {
4980 src = SET_SRC (set);
4981 dest = SET_DEST (set);
4982 if (GET_CODE (src) == LO_SUM
4983 && symbolic_operand (XEXP (src, 1), VOIDmode)
4984 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4985 priority >>= 3;
4986
4987 else if (GET_CODE (src) == MEM
4988 && GET_CODE (XEXP (src, 0)) == LO_SUM
4989 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4990 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4991 priority >>= 1;
4992
4993 else if (GET_CODE (dest) == MEM
4994 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4995 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4996 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4997 priority >>= 3;
4998 }
4999 return priority;
5000 }
5001
5002 /* The 700 can only issue a single insn at a time.
5003 The 7XXX processors can issue two insns at a time.
5004 The 8000 can issue 4 insns at a time. */
5005 static int
5006 pa_issue_rate (void)
5007 {
5008 switch (pa_cpu)
5009 {
5010 case PROCESSOR_700: return 1;
5011 case PROCESSOR_7100: return 2;
5012 case PROCESSOR_7100LC: return 2;
5013 case PROCESSOR_7200: return 2;
5014 case PROCESSOR_7300: return 2;
5015 case PROCESSOR_8000: return 4;
5016
5017 default:
5018 gcc_unreachable ();
5019 }
5020 }
5021
5022
5023
5024 /* Return any length plus adjustment needed by INSN which already has
5025 its length computed as LENGTH. Return LENGTH if no adjustment is
5026 necessary.
5027
5028 Also compute the length of an inline block move here as it is too
5029 complicated to express as a length attribute in pa.md. */
5030 int
5031 pa_adjust_insn_length (rtx_insn *insn, int length)
5032 {
5033 rtx pat = PATTERN (insn);
5034
5035 /* If length is negative or undefined, provide initial length. */
5036 if ((unsigned int) length >= INT_MAX)
5037 {
5038 if (GET_CODE (pat) == SEQUENCE)
5039 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
5040
5041 switch (get_attr_type (insn))
5042 {
5043 case TYPE_MILLI:
5044 length = pa_attr_length_millicode_call (insn);
5045 break;
5046 case TYPE_CALL:
5047 length = pa_attr_length_call (insn, 0);
5048 break;
5049 case TYPE_SIBCALL:
5050 length = pa_attr_length_call (insn, 1);
5051 break;
5052 case TYPE_DYNCALL:
5053 length = pa_attr_length_indirect_call (insn);
5054 break;
5055 case TYPE_SH_FUNC_ADRS:
5056 length = pa_attr_length_millicode_call (insn) + 20;
5057 break;
5058 default:
5059 gcc_unreachable ();
5060 }
5061 }
5062
5063 /* Block move pattern. */
5064 if (NONJUMP_INSN_P (insn)
5065 && GET_CODE (pat) == PARALLEL
5066 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5067 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5068 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
5069 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
5070 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
5071 length += compute_movmem_length (insn) - 4;
5072 /* Block clear pattern. */
5073 else if (NONJUMP_INSN_P (insn)
5074 && GET_CODE (pat) == PARALLEL
5075 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5076 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5077 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
5078 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
5079 length += compute_clrmem_length (insn) - 4;
5080 /* Conditional branch with an unfilled delay slot. */
5081 else if (JUMP_P (insn) && ! simplejump_p (insn))
5082 {
5083 /* Adjust a short backwards conditional with an unfilled delay slot. */
5084 if (GET_CODE (pat) == SET
5085 && length == 4
5086 && JUMP_LABEL (insn) != NULL_RTX
5087 && ! forward_branch_p (insn))
5088 length += 4;
5089 else if (GET_CODE (pat) == PARALLEL
5090 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
5091 && length == 4)
5092 length += 4;
5093 /* Adjust dbra insn with short backwards conditional branch with
5094 unfilled delay slot -- only for case where counter is in a
5095 general register register. */
5096 else if (GET_CODE (pat) == PARALLEL
5097 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
5098 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
5099 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
5100 && length == 4
5101 && ! forward_branch_p (insn))
5102 length += 4;
5103 }
5104 return length;
5105 }
5106
5107 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
5108
5109 static bool
5110 pa_print_operand_punct_valid_p (unsigned char code)
5111 {
5112 if (code == '@'
5113 || code == '#'
5114 || code == '*'
5115 || code == '^')
5116 return true;
5117
5118 return false;
5119 }
5120
5121 /* Print operand X (an rtx) in assembler syntax to file FILE.
5122 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5123 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5124
5125 void
5126 pa_print_operand (FILE *file, rtx x, int code)
5127 {
5128 switch (code)
5129 {
5130 case '#':
5131 /* Output a 'nop' if there's nothing for the delay slot. */
5132 if (dbr_sequence_length () == 0)
5133 fputs ("\n\tnop", file);
5134 return;
5135 case '*':
5136 /* Output a nullification completer if there's nothing for the */
5137 /* delay slot or nullification is requested. */
5138 if (dbr_sequence_length () == 0 ||
5139 (final_sequence &&
5140 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5141 fputs (",n", file);
5142 return;
5143 case 'R':
5144 /* Print out the second register name of a register pair.
5145 I.e., R (6) => 7. */
5146 fputs (reg_names[REGNO (x) + 1], file);
5147 return;
5148 case 'r':
5149 /* A register or zero. */
5150 if (x == const0_rtx
5151 || (x == CONST0_RTX (DFmode))
5152 || (x == CONST0_RTX (SFmode)))
5153 {
5154 fputs ("%r0", file);
5155 return;
5156 }
5157 else
5158 break;
5159 case 'f':
5160 /* A register or zero (floating point). */
5161 if (x == const0_rtx
5162 || (x == CONST0_RTX (DFmode))
5163 || (x == CONST0_RTX (SFmode)))
5164 {
5165 fputs ("%fr0", file);
5166 return;
5167 }
5168 else
5169 break;
5170 case 'A':
5171 {
5172 rtx xoperands[2];
5173
5174 xoperands[0] = XEXP (XEXP (x, 0), 0);
5175 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5176 pa_output_global_address (file, xoperands[1], 0);
5177 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5178 return;
5179 }
5180
5181 case 'C': /* Plain (C)ondition */
5182 case 'X':
5183 switch (GET_CODE (x))
5184 {
5185 case EQ:
5186 fputs ("=", file); break;
5187 case NE:
5188 fputs ("<>", file); break;
5189 case GT:
5190 fputs (">", file); break;
5191 case GE:
5192 fputs (">=", file); break;
5193 case GEU:
5194 fputs (">>=", file); break;
5195 case GTU:
5196 fputs (">>", file); break;
5197 case LT:
5198 fputs ("<", file); break;
5199 case LE:
5200 fputs ("<=", file); break;
5201 case LEU:
5202 fputs ("<<=", file); break;
5203 case LTU:
5204 fputs ("<<", file); break;
5205 default:
5206 gcc_unreachable ();
5207 }
5208 return;
5209 case 'N': /* Condition, (N)egated */
5210 switch (GET_CODE (x))
5211 {
5212 case EQ:
5213 fputs ("<>", file); break;
5214 case NE:
5215 fputs ("=", file); break;
5216 case GT:
5217 fputs ("<=", file); break;
5218 case GE:
5219 fputs ("<", file); break;
5220 case GEU:
5221 fputs ("<<", file); break;
5222 case GTU:
5223 fputs ("<<=", file); break;
5224 case LT:
5225 fputs (">=", file); break;
5226 case LE:
5227 fputs (">", file); break;
5228 case LEU:
5229 fputs (">>", file); break;
5230 case LTU:
5231 fputs (">>=", file); break;
5232 default:
5233 gcc_unreachable ();
5234 }
5235 return;
5236 /* For floating point comparisons. Note that the output
5237 predicates are the complement of the desired mode. The
5238 conditions for GT, GE, LT, LE and LTGT cause an invalid
5239 operation exception if the result is unordered and this
5240 exception is enabled in the floating-point status register. */
5241 case 'Y':
5242 switch (GET_CODE (x))
5243 {
5244 case EQ:
5245 fputs ("!=", file); break;
5246 case NE:
5247 fputs ("=", file); break;
5248 case GT:
5249 fputs ("!>", file); break;
5250 case GE:
5251 fputs ("!>=", file); break;
5252 case LT:
5253 fputs ("!<", file); break;
5254 case LE:
5255 fputs ("!<=", file); break;
5256 case LTGT:
5257 fputs ("!<>", file); break;
5258 case UNLE:
5259 fputs ("!?<=", file); break;
5260 case UNLT:
5261 fputs ("!?<", file); break;
5262 case UNGE:
5263 fputs ("!?>=", file); break;
5264 case UNGT:
5265 fputs ("!?>", file); break;
5266 case UNEQ:
5267 fputs ("!?=", file); break;
5268 case UNORDERED:
5269 fputs ("!?", file); break;
5270 case ORDERED:
5271 fputs ("?", file); break;
5272 default:
5273 gcc_unreachable ();
5274 }
5275 return;
5276 case 'S': /* Condition, operands are (S)wapped. */
5277 switch (GET_CODE (x))
5278 {
5279 case EQ:
5280 fputs ("=", file); break;
5281 case NE:
5282 fputs ("<>", file); break;
5283 case GT:
5284 fputs ("<", file); break;
5285 case GE:
5286 fputs ("<=", file); break;
5287 case GEU:
5288 fputs ("<<=", file); break;
5289 case GTU:
5290 fputs ("<<", file); break;
5291 case LT:
5292 fputs (">", file); break;
5293 case LE:
5294 fputs (">=", file); break;
5295 case LEU:
5296 fputs (">>=", file); break;
5297 case LTU:
5298 fputs (">>", file); break;
5299 default:
5300 gcc_unreachable ();
5301 }
5302 return;
5303 case 'B': /* Condition, (B)oth swapped and negate. */
5304 switch (GET_CODE (x))
5305 {
5306 case EQ:
5307 fputs ("<>", file); break;
5308 case NE:
5309 fputs ("=", file); break;
5310 case GT:
5311 fputs (">=", file); break;
5312 case GE:
5313 fputs (">", file); break;
5314 case GEU:
5315 fputs (">>", file); break;
5316 case GTU:
5317 fputs (">>=", file); break;
5318 case LT:
5319 fputs ("<=", file); break;
5320 case LE:
5321 fputs ("<", file); break;
5322 case LEU:
5323 fputs ("<<", file); break;
5324 case LTU:
5325 fputs ("<<=", file); break;
5326 default:
5327 gcc_unreachable ();
5328 }
5329 return;
5330 case 'k':
5331 gcc_assert (GET_CODE (x) == CONST_INT);
5332 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5333 return;
5334 case 'Q':
5335 gcc_assert (GET_CODE (x) == CONST_INT);
5336 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5337 return;
5338 case 'L':
5339 gcc_assert (GET_CODE (x) == CONST_INT);
5340 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5341 return;
5342 case 'o':
5343 gcc_assert (GET_CODE (x) == CONST_INT
5344 && (INTVAL (x) == 1 || INTVAL (x) == 2 || INTVAL (x) == 3));
5345 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5346 return;
5347 case 'O':
5348 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5349 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5350 return;
5351 case 'p':
5352 gcc_assert (GET_CODE (x) == CONST_INT);
5353 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5354 return;
5355 case 'P':
5356 gcc_assert (GET_CODE (x) == CONST_INT);
5357 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5358 return;
5359 case 'I':
5360 if (GET_CODE (x) == CONST_INT)
5361 fputs ("i", file);
5362 return;
5363 case 'M':
5364 case 'F':
5365 switch (GET_CODE (XEXP (x, 0)))
5366 {
5367 case PRE_DEC:
5368 case PRE_INC:
5369 if (ASSEMBLER_DIALECT == 0)
5370 fputs ("s,mb", file);
5371 else
5372 fputs (",mb", file);
5373 break;
5374 case POST_DEC:
5375 case POST_INC:
5376 if (ASSEMBLER_DIALECT == 0)
5377 fputs ("s,ma", file);
5378 else
5379 fputs (",ma", file);
5380 break;
5381 case PLUS:
5382 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5383 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5384 {
5385 if (ASSEMBLER_DIALECT == 0)
5386 fputs ("x", file);
5387 }
5388 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5389 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5390 {
5391 if (ASSEMBLER_DIALECT == 0)
5392 fputs ("x,s", file);
5393 else
5394 fputs (",s", file);
5395 }
5396 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5397 fputs ("s", file);
5398 break;
5399 default:
5400 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5401 fputs ("s", file);
5402 break;
5403 }
5404 return;
5405 case 'G':
5406 pa_output_global_address (file, x, 0);
5407 return;
5408 case 'H':
5409 pa_output_global_address (file, x, 1);
5410 return;
5411 case 0: /* Don't do anything special */
5412 break;
5413 case 'Z':
5414 {
5415 unsigned op[3];
5416 compute_zdepwi_operands (INTVAL (x), op);
5417 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5418 return;
5419 }
5420 case 'z':
5421 {
5422 unsigned op[3];
5423 compute_zdepdi_operands (INTVAL (x), op);
5424 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5425 return;
5426 }
5427 case 'c':
5428 /* We can get here from a .vtable_inherit due to our
5429 CONSTANT_ADDRESS_P rejecting perfectly good constant
5430 addresses. */
5431 break;
5432 default:
5433 gcc_unreachable ();
5434 }
5435 if (GET_CODE (x) == REG)
5436 {
5437 fputs (reg_names [REGNO (x)], file);
5438 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5439 {
5440 fputs ("R", file);
5441 return;
5442 }
5443 if (FP_REG_P (x)
5444 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5445 && (REGNO (x) & 1) == 0)
5446 fputs ("L", file);
5447 }
5448 else if (GET_CODE (x) == MEM)
5449 {
5450 int size = GET_MODE_SIZE (GET_MODE (x));
5451 rtx base = NULL_RTX;
5452 switch (GET_CODE (XEXP (x, 0)))
5453 {
5454 case PRE_DEC:
5455 case POST_DEC:
5456 base = XEXP (XEXP (x, 0), 0);
5457 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5458 break;
5459 case PRE_INC:
5460 case POST_INC:
5461 base = XEXP (XEXP (x, 0), 0);
5462 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5463 break;
5464 case PLUS:
5465 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5466 fprintf (file, "%s(%s)",
5467 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5468 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5469 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5470 fprintf (file, "%s(%s)",
5471 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5472 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5473 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5474 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5475 {
5476 /* Because the REG_POINTER flag can get lost during reload,
5477 pa_legitimate_address_p canonicalizes the order of the
5478 index and base registers in the combined move patterns. */
5479 rtx base = XEXP (XEXP (x, 0), 1);
5480 rtx index = XEXP (XEXP (x, 0), 0);
5481
5482 fprintf (file, "%s(%s)",
5483 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5484 }
5485 else
5486 output_address (GET_MODE (x), XEXP (x, 0));
5487 break;
5488 default:
5489 output_address (GET_MODE (x), XEXP (x, 0));
5490 break;
5491 }
5492 }
5493 else
5494 output_addr_const (file, x);
5495 }
5496
5497 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5498
5499 void
5500 pa_output_global_address (FILE *file, rtx x, int round_constant)
5501 {
5502
5503 /* Imagine (high (const (plus ...))). */
5504 if (GET_CODE (x) == HIGH)
5505 x = XEXP (x, 0);
5506
5507 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5508 output_addr_const (file, x);
5509 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5510 {
5511 output_addr_const (file, x);
5512 fputs ("-$global$", file);
5513 }
5514 else if (GET_CODE (x) == CONST)
5515 {
5516 const char *sep = "";
5517 int offset = 0; /* assembler wants -$global$ at end */
5518 rtx base = NULL_RTX;
5519
5520 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5521 {
5522 case LABEL_REF:
5523 case SYMBOL_REF:
5524 base = XEXP (XEXP (x, 0), 0);
5525 output_addr_const (file, base);
5526 break;
5527 case CONST_INT:
5528 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5529 break;
5530 default:
5531 gcc_unreachable ();
5532 }
5533
5534 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5535 {
5536 case LABEL_REF:
5537 case SYMBOL_REF:
5538 base = XEXP (XEXP (x, 0), 1);
5539 output_addr_const (file, base);
5540 break;
5541 case CONST_INT:
5542 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5543 break;
5544 default:
5545 gcc_unreachable ();
5546 }
5547
5548 /* How bogus. The compiler is apparently responsible for
5549 rounding the constant if it uses an LR field selector.
5550
5551 The linker and/or assembler seem a better place since
5552 they have to do this kind of thing already.
5553
5554 If we fail to do this, HP's optimizing linker may eliminate
5555 an addil, but not update the ldw/stw/ldo instruction that
5556 uses the result of the addil. */
5557 if (round_constant)
5558 offset = ((offset + 0x1000) & ~0x1fff);
5559
5560 switch (GET_CODE (XEXP (x, 0)))
5561 {
5562 case PLUS:
5563 if (offset < 0)
5564 {
5565 offset = -offset;
5566 sep = "-";
5567 }
5568 else
5569 sep = "+";
5570 break;
5571
5572 case MINUS:
5573 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5574 sep = "-";
5575 break;
5576
5577 default:
5578 gcc_unreachable ();
5579 }
5580
5581 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5582 fputs ("-$global$", file);
5583 if (offset)
5584 fprintf (file, "%s%d", sep, offset);
5585 }
5586 else
5587 output_addr_const (file, x);
5588 }
5589
5590 /* Output boilerplate text to appear at the beginning of the file.
5591 There are several possible versions. */
5592 #define aputs(x) fputs(x, asm_out_file)
5593 static inline void
5594 pa_file_start_level (void)
5595 {
5596 if (TARGET_64BIT)
5597 aputs ("\t.LEVEL 2.0w\n");
5598 else if (TARGET_PA_20)
5599 aputs ("\t.LEVEL 2.0\n");
5600 else if (TARGET_PA_11)
5601 aputs ("\t.LEVEL 1.1\n");
5602 else
5603 aputs ("\t.LEVEL 1.0\n");
5604 }
5605
5606 static inline void
5607 pa_file_start_space (int sortspace)
5608 {
5609 aputs ("\t.SPACE $PRIVATE$");
5610 if (sortspace)
5611 aputs (",SORT=16");
5612 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5613 if (flag_tm)
5614 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5615 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5616 "\n\t.SPACE $TEXT$");
5617 if (sortspace)
5618 aputs (",SORT=8");
5619 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5620 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5621 }
5622
5623 static inline void
5624 pa_file_start_file (int want_version)
5625 {
5626 if (write_symbols != NO_DEBUG)
5627 {
5628 output_file_directive (asm_out_file, main_input_filename);
5629 if (want_version)
5630 aputs ("\t.version\t\"01.01\"\n");
5631 }
5632 }
5633
5634 static inline void
5635 pa_file_start_mcount (const char *aswhat)
5636 {
5637 if (profile_flag)
5638 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5639 }
5640
5641 static void
5642 pa_elf_file_start (void)
5643 {
5644 pa_file_start_level ();
5645 pa_file_start_mcount ("ENTRY");
5646 pa_file_start_file (0);
5647 }
5648
5649 static void
5650 pa_som_file_start (void)
5651 {
5652 pa_file_start_level ();
5653 pa_file_start_space (0);
5654 aputs ("\t.IMPORT $global$,DATA\n"
5655 "\t.IMPORT $$dyncall,MILLICODE\n");
5656 pa_file_start_mcount ("CODE");
5657 pa_file_start_file (0);
5658 }
5659
5660 static void
5661 pa_linux_file_start (void)
5662 {
5663 pa_file_start_file (1);
5664 pa_file_start_level ();
5665 pa_file_start_mcount ("CODE");
5666 }
5667
5668 static void
5669 pa_hpux64_gas_file_start (void)
5670 {
5671 pa_file_start_level ();
5672 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5673 if (profile_flag)
5674 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5675 #endif
5676 pa_file_start_file (1);
5677 }
5678
5679 static void
5680 pa_hpux64_hpas_file_start (void)
5681 {
5682 pa_file_start_level ();
5683 pa_file_start_space (1);
5684 pa_file_start_mcount ("CODE");
5685 pa_file_start_file (0);
5686 }
5687 #undef aputs
5688
5689 /* Search the deferred plabel list for SYMBOL and return its internal
5690 label. If an entry for SYMBOL is not found, a new entry is created. */
5691
5692 rtx
5693 pa_get_deferred_plabel (rtx symbol)
5694 {
5695 const char *fname = XSTR (symbol, 0);
5696 size_t i;
5697
5698 /* See if we have already put this function on the list of deferred
5699 plabels. This list is generally small, so a liner search is not
5700 too ugly. If it proves too slow replace it with something faster. */
5701 for (i = 0; i < n_deferred_plabels; i++)
5702 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5703 break;
5704
5705 /* If the deferred plabel list is empty, or this entry was not found
5706 on the list, create a new entry on the list. */
5707 if (deferred_plabels == NULL || i == n_deferred_plabels)
5708 {
5709 tree id;
5710
5711 if (deferred_plabels == 0)
5712 deferred_plabels = ggc_alloc<deferred_plabel> ();
5713 else
5714 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5715 deferred_plabels,
5716 n_deferred_plabels + 1);
5717
5718 i = n_deferred_plabels++;
5719 deferred_plabels[i].internal_label = gen_label_rtx ();
5720 deferred_plabels[i].symbol = symbol;
5721
5722 /* Gross. We have just implicitly taken the address of this
5723 function. Mark it in the same manner as assemble_name. */
5724 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5725 if (id)
5726 mark_referenced (id);
5727 }
5728
5729 return deferred_plabels[i].internal_label;
5730 }
5731
5732 static void
5733 output_deferred_plabels (void)
5734 {
5735 size_t i;
5736
5737 /* If we have some deferred plabels, then we need to switch into the
5738 data or readonly data section, and align it to a 4 byte boundary
5739 before outputting the deferred plabels. */
5740 if (n_deferred_plabels)
5741 {
5742 switch_to_section (flag_pic ? data_section : readonly_data_section);
5743 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5744 }
5745
5746 /* Now output the deferred plabels. */
5747 for (i = 0; i < n_deferred_plabels; i++)
5748 {
5749 targetm.asm_out.internal_label (asm_out_file, "L",
5750 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5751 assemble_integer (deferred_plabels[i].symbol,
5752 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5753 }
5754 }
5755
5756 /* Initialize optabs to point to emulation routines. */
5757
5758 static void
5759 pa_init_libfuncs (void)
5760 {
5761 if (HPUX_LONG_DOUBLE_LIBRARY)
5762 {
5763 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5764 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5765 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5766 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5767 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5768 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5769 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5770 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5771 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5772
5773 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5774 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5775 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5776 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5777 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5778 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5779 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5780
5781 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5782 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5783 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5784 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5785
5786 set_conv_libfunc (sfix_optab, SImode, TFmode,
5787 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5788 : "_U_Qfcnvfxt_quad_to_sgl");
5789 set_conv_libfunc (sfix_optab, DImode, TFmode,
5790 "_U_Qfcnvfxt_quad_to_dbl");
5791 set_conv_libfunc (ufix_optab, SImode, TFmode,
5792 "_U_Qfcnvfxt_quad_to_usgl");
5793 set_conv_libfunc (ufix_optab, DImode, TFmode,
5794 "_U_Qfcnvfxt_quad_to_udbl");
5795
5796 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5797 "_U_Qfcnvxf_sgl_to_quad");
5798 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5799 "_U_Qfcnvxf_dbl_to_quad");
5800 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5801 "_U_Qfcnvxf_usgl_to_quad");
5802 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5803 "_U_Qfcnvxf_udbl_to_quad");
5804 }
5805
5806 if (TARGET_SYNC_LIBCALL)
5807 init_sync_libfuncs (8);
5808 }
5809
5810 /* HP's millicode routines mean something special to the assembler.
5811 Keep track of which ones we have used. */
5812
5813 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5814 static void import_milli (enum millicodes);
5815 static char imported[(int) end1000];
5816 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5817 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5818 #define MILLI_START 10
5819
5820 static void
5821 import_milli (enum millicodes code)
5822 {
5823 char str[sizeof (import_string)];
5824
5825 if (!imported[(int) code])
5826 {
5827 imported[(int) code] = 1;
5828 strcpy (str, import_string);
5829 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5830 output_asm_insn (str, 0);
5831 }
5832 }
5833
5834 /* The register constraints have put the operands and return value in
5835 the proper registers. */
5836
5837 const char *
5838 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5839 {
5840 import_milli (mulI);
5841 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5842 }
5843
5844 /* Emit the rtl for doing a division by a constant. */
5845
5846 /* Do magic division millicodes exist for this value? */
5847 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5848
5849 /* We'll use an array to keep track of the magic millicodes and
5850 whether or not we've used them already. [n][0] is signed, [n][1] is
5851 unsigned. */
5852
5853 static int div_milli[16][2];
5854
5855 int
5856 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5857 {
5858 if (GET_CODE (operands[2]) == CONST_INT
5859 && INTVAL (operands[2]) > 0
5860 && INTVAL (operands[2]) < 16
5861 && pa_magic_milli[INTVAL (operands[2])])
5862 {
5863 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5864
5865 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5866 emit
5867 (gen_rtx_PARALLEL
5868 (VOIDmode,
5869 gen_rtvec (6, gen_rtx_SET (gen_rtx_REG (SImode, 29),
5870 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5871 SImode,
5872 gen_rtx_REG (SImode, 26),
5873 operands[2])),
5874 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5875 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5876 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5877 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5878 gen_rtx_CLOBBER (VOIDmode, ret))));
5879 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5880 return 1;
5881 }
5882 return 0;
5883 }
5884
5885 const char *
5886 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5887 {
5888 int divisor;
5889
5890 /* If the divisor is a constant, try to use one of the special
5891 opcodes .*/
5892 if (GET_CODE (operands[0]) == CONST_INT)
5893 {
5894 static char buf[100];
5895 divisor = INTVAL (operands[0]);
5896 if (!div_milli[divisor][unsignedp])
5897 {
5898 div_milli[divisor][unsignedp] = 1;
5899 if (unsignedp)
5900 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5901 else
5902 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5903 }
5904 if (unsignedp)
5905 {
5906 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5907 INTVAL (operands[0]));
5908 return pa_output_millicode_call (insn,
5909 gen_rtx_SYMBOL_REF (SImode, buf));
5910 }
5911 else
5912 {
5913 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5914 INTVAL (operands[0]));
5915 return pa_output_millicode_call (insn,
5916 gen_rtx_SYMBOL_REF (SImode, buf));
5917 }
5918 }
5919 /* Divisor isn't a special constant. */
5920 else
5921 {
5922 if (unsignedp)
5923 {
5924 import_milli (divU);
5925 return pa_output_millicode_call (insn,
5926 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5927 }
5928 else
5929 {
5930 import_milli (divI);
5931 return pa_output_millicode_call (insn,
5932 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5933 }
5934 }
5935 }
5936
5937 /* Output a $$rem millicode to do mod. */
5938
5939 const char *
5940 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5941 {
5942 if (unsignedp)
5943 {
5944 import_milli (remU);
5945 return pa_output_millicode_call (insn,
5946 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5947 }
5948 else
5949 {
5950 import_milli (remI);
5951 return pa_output_millicode_call (insn,
5952 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5953 }
5954 }
5955
5956 void
5957 pa_output_arg_descriptor (rtx_insn *call_insn)
5958 {
5959 const char *arg_regs[4];
5960 machine_mode arg_mode;
5961 rtx link;
5962 int i, output_flag = 0;
5963 int regno;
5964
5965 /* We neither need nor want argument location descriptors for the
5966 64bit runtime environment or the ELF32 environment. */
5967 if (TARGET_64BIT || TARGET_ELF32)
5968 return;
5969
5970 for (i = 0; i < 4; i++)
5971 arg_regs[i] = 0;
5972
5973 /* Specify explicitly that no argument relocations should take place
5974 if using the portable runtime calling conventions. */
5975 if (TARGET_PORTABLE_RUNTIME)
5976 {
5977 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5978 asm_out_file);
5979 return;
5980 }
5981
5982 gcc_assert (CALL_P (call_insn));
5983 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5984 link; link = XEXP (link, 1))
5985 {
5986 rtx use = XEXP (link, 0);
5987
5988 if (! (GET_CODE (use) == USE
5989 && GET_CODE (XEXP (use, 0)) == REG
5990 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5991 continue;
5992
5993 arg_mode = GET_MODE (XEXP (use, 0));
5994 regno = REGNO (XEXP (use, 0));
5995 if (regno >= 23 && regno <= 26)
5996 {
5997 arg_regs[26 - regno] = "GR";
5998 if (arg_mode == DImode)
5999 arg_regs[25 - regno] = "GR";
6000 }
6001 else if (regno >= 32 && regno <= 39)
6002 {
6003 if (arg_mode == SFmode)
6004 arg_regs[(regno - 32) / 2] = "FR";
6005 else
6006 {
6007 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
6008 arg_regs[(regno - 34) / 2] = "FR";
6009 arg_regs[(regno - 34) / 2 + 1] = "FU";
6010 #else
6011 arg_regs[(regno - 34) / 2] = "FU";
6012 arg_regs[(regno - 34) / 2 + 1] = "FR";
6013 #endif
6014 }
6015 }
6016 }
6017 fputs ("\t.CALL ", asm_out_file);
6018 for (i = 0; i < 4; i++)
6019 {
6020 if (arg_regs[i])
6021 {
6022 if (output_flag++)
6023 fputc (',', asm_out_file);
6024 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
6025 }
6026 }
6027 fputc ('\n', asm_out_file);
6028 }
6029 \f
6030 /* Inform reload about cases where moving X with a mode MODE to or from
6031 a register in RCLASS requires an extra scratch or immediate register.
6032 Return the class needed for the immediate register. */
6033
6034 static reg_class_t
6035 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
6036 machine_mode mode, secondary_reload_info *sri)
6037 {
6038 int regno;
6039 enum reg_class rclass = (enum reg_class) rclass_i;
6040
6041 /* Handle the easy stuff first. */
6042 if (rclass == R1_REGS)
6043 return NO_REGS;
6044
6045 if (REG_P (x))
6046 {
6047 regno = REGNO (x);
6048 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
6049 return NO_REGS;
6050 }
6051 else
6052 regno = -1;
6053
6054 /* If we have something like (mem (mem (...)), we can safely assume the
6055 inner MEM will end up in a general register after reloading, so there's
6056 no need for a secondary reload. */
6057 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
6058 return NO_REGS;
6059
6060 /* Trying to load a constant into a FP register during PIC code
6061 generation requires %r1 as a scratch register. For float modes,
6062 the only legitimate constant is CONST0_RTX. However, there are
6063 a few patterns that accept constant double operands. */
6064 if (flag_pic
6065 && FP_REG_CLASS_P (rclass)
6066 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
6067 {
6068 switch (mode)
6069 {
6070 case E_SImode:
6071 sri->icode = CODE_FOR_reload_insi_r1;
6072 break;
6073
6074 case E_DImode:
6075 sri->icode = CODE_FOR_reload_indi_r1;
6076 break;
6077
6078 case E_SFmode:
6079 sri->icode = CODE_FOR_reload_insf_r1;
6080 break;
6081
6082 case E_DFmode:
6083 sri->icode = CODE_FOR_reload_indf_r1;
6084 break;
6085
6086 default:
6087 gcc_unreachable ();
6088 }
6089 return NO_REGS;
6090 }
6091
6092 /* Secondary reloads of symbolic expressions require %r1 as a scratch
6093 register when we're generating PIC code or when the operand isn't
6094 readonly. */
6095 if (pa_symbolic_expression_p (x))
6096 {
6097 if (GET_CODE (x) == HIGH)
6098 x = XEXP (x, 0);
6099
6100 if (flag_pic || !read_only_operand (x, VOIDmode))
6101 {
6102 switch (mode)
6103 {
6104 case E_SImode:
6105 sri->icode = CODE_FOR_reload_insi_r1;
6106 break;
6107
6108 case E_DImode:
6109 sri->icode = CODE_FOR_reload_indi_r1;
6110 break;
6111
6112 default:
6113 gcc_unreachable ();
6114 }
6115 return NO_REGS;
6116 }
6117 }
6118
6119 /* Profiling showed the PA port spends about 1.3% of its compilation
6120 time in true_regnum from calls inside pa_secondary_reload_class. */
6121 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
6122 regno = true_regnum (x);
6123
6124 /* Handle reloads for floating point loads and stores. */
6125 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6126 && FP_REG_CLASS_P (rclass))
6127 {
6128 if (MEM_P (x))
6129 {
6130 x = XEXP (x, 0);
6131
6132 /* We don't need a secondary reload for indexed memory addresses.
6133
6134 When INT14_OK_STRICT is true, it might appear that we could
6135 directly allow register indirect memory addresses. However,
6136 this doesn't work because we don't support SUBREGs in
6137 floating-point register copies and reload doesn't tell us
6138 when it's going to use a SUBREG. */
6139 if (IS_INDEX_ADDR_P (x))
6140 return NO_REGS;
6141 }
6142
6143 /* Request a secondary reload with a general scratch register
6144 for everything else. ??? Could symbolic operands be handled
6145 directly when generating non-pic PA 2.0 code? */
6146 sri->icode = (in_p
6147 ? direct_optab_handler (reload_in_optab, mode)
6148 : direct_optab_handler (reload_out_optab, mode));
6149 return NO_REGS;
6150 }
6151
6152 /* A SAR<->FP register copy requires an intermediate general register
6153 and secondary memory. We need a secondary reload with a general
6154 scratch register for spills. */
6155 if (rclass == SHIFT_REGS)
6156 {
6157 /* Handle spill. */
6158 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6159 {
6160 sri->icode = (in_p
6161 ? direct_optab_handler (reload_in_optab, mode)
6162 : direct_optab_handler (reload_out_optab, mode));
6163 return NO_REGS;
6164 }
6165
6166 /* Handle FP copy. */
6167 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6168 return GENERAL_REGS;
6169 }
6170
6171 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6172 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6173 && FP_REG_CLASS_P (rclass))
6174 return GENERAL_REGS;
6175
6176 return NO_REGS;
6177 }
6178
6179 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6180 is only marked as live on entry by df-scan when it is a fixed
6181 register. It isn't a fixed register in the 64-bit runtime,
6182 so we need to mark it here. */
6183
6184 static void
6185 pa_extra_live_on_entry (bitmap regs)
6186 {
6187 if (TARGET_64BIT)
6188 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6189 }
6190
6191 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6192 to prevent it from being deleted. */
6193
6194 rtx
6195 pa_eh_return_handler_rtx (void)
6196 {
6197 rtx tmp;
6198
6199 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6200 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6201 tmp = gen_rtx_MEM (word_mode, tmp);
6202 tmp->volatil = 1;
6203 return tmp;
6204 }
6205
6206 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6207 by invisible reference. As a GCC extension, we also pass anything
6208 with a zero or variable size by reference.
6209
6210 The 64-bit runtime does not describe passing any types by invisible
6211 reference. The internals of GCC can't currently handle passing
6212 empty structures, and zero or variable length arrays when they are
6213 not passed entirely on the stack or by reference. Thus, as a GCC
6214 extension, we pass these types by reference. The HP compiler doesn't
6215 support these types, so hopefully there shouldn't be any compatibility
6216 issues. This may have to be revisited when HP releases a C99 compiler
6217 or updates the ABI. */
6218
6219 static bool
6220 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6221 machine_mode mode, const_tree type,
6222 bool named ATTRIBUTE_UNUSED)
6223 {
6224 HOST_WIDE_INT size;
6225
6226 if (type)
6227 size = int_size_in_bytes (type);
6228 else
6229 size = GET_MODE_SIZE (mode);
6230
6231 if (TARGET_64BIT)
6232 return size <= 0;
6233 else
6234 return size <= 0 || size > 8;
6235 }
6236
6237 enum direction
6238 pa_function_arg_padding (machine_mode mode, const_tree type)
6239 {
6240 if (mode == BLKmode
6241 || (TARGET_64BIT
6242 && type
6243 && (AGGREGATE_TYPE_P (type)
6244 || TREE_CODE (type) == COMPLEX_TYPE
6245 || TREE_CODE (type) == VECTOR_TYPE)))
6246 {
6247 /* Return none if justification is not required. */
6248 if (type
6249 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6250 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6251 return none;
6252
6253 /* The directions set here are ignored when a BLKmode argument larger
6254 than a word is placed in a register. Different code is used for
6255 the stack and registers. This makes it difficult to have a
6256 consistent data representation for both the stack and registers.
6257 For both runtimes, the justification and padding for arguments on
6258 the stack and in registers should be identical. */
6259 if (TARGET_64BIT)
6260 /* The 64-bit runtime specifies left justification for aggregates. */
6261 return upward;
6262 else
6263 /* The 32-bit runtime architecture specifies right justification.
6264 When the argument is passed on the stack, the argument is padded
6265 with garbage on the left. The HP compiler pads with zeros. */
6266 return downward;
6267 }
6268
6269 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6270 return downward;
6271 else
6272 return none;
6273 }
6274
6275 \f
6276 /* Do what is necessary for `va_start'. We look at the current function
6277 to determine if stdargs or varargs is used and fill in an initial
6278 va_list. A pointer to this constructor is returned. */
6279
6280 static rtx
6281 hppa_builtin_saveregs (void)
6282 {
6283 rtx offset, dest;
6284 tree fntype = TREE_TYPE (current_function_decl);
6285 int argadj = ((!stdarg_p (fntype))
6286 ? UNITS_PER_WORD : 0);
6287
6288 if (argadj)
6289 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6290 else
6291 offset = crtl->args.arg_offset_rtx;
6292
6293 if (TARGET_64BIT)
6294 {
6295 int i, off;
6296
6297 /* Adjust for varargs/stdarg differences. */
6298 if (argadj)
6299 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6300 else
6301 offset = crtl->args.arg_offset_rtx;
6302
6303 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6304 from the incoming arg pointer and growing to larger addresses. */
6305 for (i = 26, off = -64; i >= 19; i--, off += 8)
6306 emit_move_insn (gen_rtx_MEM (word_mode,
6307 plus_constant (Pmode,
6308 arg_pointer_rtx, off)),
6309 gen_rtx_REG (word_mode, i));
6310
6311 /* The incoming args pointer points just beyond the flushback area;
6312 normally this is not a serious concern. However, when we are doing
6313 varargs/stdargs we want to make the arg pointer point to the start
6314 of the incoming argument area. */
6315 emit_move_insn (virtual_incoming_args_rtx,
6316 plus_constant (Pmode, arg_pointer_rtx, -64));
6317
6318 /* Now return a pointer to the first anonymous argument. */
6319 return copy_to_reg (expand_binop (Pmode, add_optab,
6320 virtual_incoming_args_rtx,
6321 offset, 0, 0, OPTAB_LIB_WIDEN));
6322 }
6323
6324 /* Store general registers on the stack. */
6325 dest = gen_rtx_MEM (BLKmode,
6326 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6327 -16));
6328 set_mem_alias_set (dest, get_varargs_alias_set ());
6329 set_mem_align (dest, BITS_PER_WORD);
6330 move_block_from_reg (23, dest, 4);
6331
6332 /* move_block_from_reg will emit code to store the argument registers
6333 individually as scalar stores.
6334
6335 However, other insns may later load from the same addresses for
6336 a structure load (passing a struct to a varargs routine).
6337
6338 The alias code assumes that such aliasing can never happen, so we
6339 have to keep memory referencing insns from moving up beyond the
6340 last argument register store. So we emit a blockage insn here. */
6341 emit_insn (gen_blockage ());
6342
6343 return copy_to_reg (expand_binop (Pmode, add_optab,
6344 crtl->args.internal_arg_pointer,
6345 offset, 0, 0, OPTAB_LIB_WIDEN));
6346 }
6347
6348 static void
6349 hppa_va_start (tree valist, rtx nextarg)
6350 {
6351 nextarg = expand_builtin_saveregs ();
6352 std_expand_builtin_va_start (valist, nextarg);
6353 }
6354
6355 static tree
6356 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6357 gimple_seq *post_p)
6358 {
6359 if (TARGET_64BIT)
6360 {
6361 /* Args grow upward. We can use the generic routines. */
6362 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6363 }
6364 else /* !TARGET_64BIT */
6365 {
6366 tree ptr = build_pointer_type (type);
6367 tree valist_type;
6368 tree t, u;
6369 unsigned int size, ofs;
6370 bool indirect;
6371
6372 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6373 if (indirect)
6374 {
6375 type = ptr;
6376 ptr = build_pointer_type (type);
6377 }
6378 size = int_size_in_bytes (type);
6379 valist_type = TREE_TYPE (valist);
6380
6381 /* Args grow down. Not handled by generic routines. */
6382
6383 u = fold_convert (sizetype, size_in_bytes (type));
6384 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6385 t = fold_build_pointer_plus (valist, u);
6386
6387 /* Align to 4 or 8 byte boundary depending on argument size. */
6388
6389 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6390 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6391 t = fold_convert (valist_type, t);
6392
6393 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6394
6395 ofs = (8 - size) % 4;
6396 if (ofs != 0)
6397 t = fold_build_pointer_plus_hwi (t, ofs);
6398
6399 t = fold_convert (ptr, t);
6400 t = build_va_arg_indirect_ref (t);
6401
6402 if (indirect)
6403 t = build_va_arg_indirect_ref (t);
6404
6405 return t;
6406 }
6407 }
6408
6409 /* True if MODE is valid for the target. By "valid", we mean able to
6410 be manipulated in non-trivial ways. In particular, this means all
6411 the arithmetic is supported.
6412
6413 Currently, TImode is not valid as the HP 64-bit runtime documentation
6414 doesn't document the alignment and calling conventions for this type.
6415 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6416 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6417
6418 static bool
6419 pa_scalar_mode_supported_p (scalar_mode mode)
6420 {
6421 int precision = GET_MODE_PRECISION (mode);
6422
6423 switch (GET_MODE_CLASS (mode))
6424 {
6425 case MODE_PARTIAL_INT:
6426 case MODE_INT:
6427 if (precision == CHAR_TYPE_SIZE)
6428 return true;
6429 if (precision == SHORT_TYPE_SIZE)
6430 return true;
6431 if (precision == INT_TYPE_SIZE)
6432 return true;
6433 if (precision == LONG_TYPE_SIZE)
6434 return true;
6435 if (precision == LONG_LONG_TYPE_SIZE)
6436 return true;
6437 return false;
6438
6439 case MODE_FLOAT:
6440 if (precision == FLOAT_TYPE_SIZE)
6441 return true;
6442 if (precision == DOUBLE_TYPE_SIZE)
6443 return true;
6444 if (precision == LONG_DOUBLE_TYPE_SIZE)
6445 return true;
6446 return false;
6447
6448 case MODE_DECIMAL_FLOAT:
6449 return false;
6450
6451 default:
6452 gcc_unreachable ();
6453 }
6454 }
6455
6456 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6457 it branches into the delay slot. Otherwise, return FALSE. */
6458
6459 static bool
6460 branch_to_delay_slot_p (rtx_insn *insn)
6461 {
6462 rtx_insn *jump_insn;
6463
6464 if (dbr_sequence_length ())
6465 return FALSE;
6466
6467 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6468 while (insn)
6469 {
6470 insn = next_active_insn (insn);
6471 if (jump_insn == insn)
6472 return TRUE;
6473
6474 /* We can't rely on the length of asms. So, we return FALSE when
6475 the branch is followed by an asm. */
6476 if (!insn
6477 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6478 || asm_noperands (PATTERN (insn)) >= 0
6479 || get_attr_length (insn) > 0)
6480 break;
6481 }
6482
6483 return FALSE;
6484 }
6485
6486 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6487
6488 This occurs when INSN has an unfilled delay slot and is followed
6489 by an asm. Disaster can occur if the asm is empty and the jump
6490 branches into the delay slot. So, we add a nop in the delay slot
6491 when this occurs. */
6492
6493 static bool
6494 branch_needs_nop_p (rtx_insn *insn)
6495 {
6496 rtx_insn *jump_insn;
6497
6498 if (dbr_sequence_length ())
6499 return FALSE;
6500
6501 jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6502 while (insn)
6503 {
6504 insn = next_active_insn (insn);
6505 if (!insn || jump_insn == insn)
6506 return TRUE;
6507
6508 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6509 || asm_noperands (PATTERN (insn)) >= 0)
6510 && get_attr_length (insn) > 0)
6511 break;
6512 }
6513
6514 return FALSE;
6515 }
6516
6517 /* Return TRUE if INSN, a forward jump insn, can use nullification
6518 to skip the following instruction. This avoids an extra cycle due
6519 to a mis-predicted branch when we fall through. */
6520
6521 static bool
6522 use_skip_p (rtx_insn *insn)
6523 {
6524 rtx_insn *jump_insn = next_active_insn (JUMP_LABEL_AS_INSN (insn));
6525
6526 while (insn)
6527 {
6528 insn = next_active_insn (insn);
6529
6530 /* We can't rely on the length of asms, so we can't skip asms. */
6531 if (!insn
6532 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6533 || asm_noperands (PATTERN (insn)) >= 0)
6534 break;
6535 if (get_attr_length (insn) == 4
6536 && jump_insn == next_active_insn (insn))
6537 return TRUE;
6538 if (get_attr_length (insn) > 0)
6539 break;
6540 }
6541
6542 return FALSE;
6543 }
6544
6545 /* This routine handles all the normal conditional branch sequences we
6546 might need to generate. It handles compare immediate vs compare
6547 register, nullification of delay slots, varying length branches,
6548 negated branches, and all combinations of the above. It returns the
6549 output appropriate to emit the branch corresponding to all given
6550 parameters. */
6551
6552 const char *
6553 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6554 {
6555 static char buf[100];
6556 bool useskip;
6557 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6558 int length = get_attr_length (insn);
6559 int xdelay;
6560
6561 /* A conditional branch to the following instruction (e.g. the delay slot)
6562 is asking for a disaster. This can happen when not optimizing and
6563 when jump optimization fails.
6564
6565 While it is usually safe to emit nothing, this can fail if the
6566 preceding instruction is a nullified branch with an empty delay
6567 slot and the same branch target as this branch. We could check
6568 for this but jump optimization should eliminate nop jumps. It
6569 is always safe to emit a nop. */
6570 if (branch_to_delay_slot_p (insn))
6571 return "nop";
6572
6573 /* The doubleword form of the cmpib instruction doesn't have the LEU
6574 and GTU conditions while the cmpb instruction does. Since we accept
6575 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6576 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6577 operands[2] = gen_rtx_REG (DImode, 0);
6578 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6579 operands[1] = gen_rtx_REG (DImode, 0);
6580
6581 /* If this is a long branch with its delay slot unfilled, set `nullify'
6582 as it can nullify the delay slot and save a nop. */
6583 if (length == 8 && dbr_sequence_length () == 0)
6584 nullify = 1;
6585
6586 /* If this is a short forward conditional branch which did not get
6587 its delay slot filled, the delay slot can still be nullified. */
6588 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6589 nullify = forward_branch_p (insn);
6590
6591 /* A forward branch over a single nullified insn can be done with a
6592 comclr instruction. This avoids a single cycle penalty due to
6593 mis-predicted branch if we fall through (branch not taken). */
6594 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6595
6596 switch (length)
6597 {
6598 /* All short conditional branches except backwards with an unfilled
6599 delay slot. */
6600 case 4:
6601 if (useskip)
6602 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6603 else
6604 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6605 if (GET_MODE (operands[1]) == DImode)
6606 strcat (buf, "*");
6607 if (negated)
6608 strcat (buf, "%B3");
6609 else
6610 strcat (buf, "%S3");
6611 if (useskip)
6612 strcat (buf, " %2,%r1,%%r0");
6613 else if (nullify)
6614 {
6615 if (branch_needs_nop_p (insn))
6616 strcat (buf, ",n %2,%r1,%0%#");
6617 else
6618 strcat (buf, ",n %2,%r1,%0");
6619 }
6620 else
6621 strcat (buf, " %2,%r1,%0");
6622 break;
6623
6624 /* All long conditionals. Note a short backward branch with an
6625 unfilled delay slot is treated just like a long backward branch
6626 with an unfilled delay slot. */
6627 case 8:
6628 /* Handle weird backwards branch with a filled delay slot
6629 which is nullified. */
6630 if (dbr_sequence_length () != 0
6631 && ! forward_branch_p (insn)
6632 && nullify)
6633 {
6634 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6635 if (GET_MODE (operands[1]) == DImode)
6636 strcat (buf, "*");
6637 if (negated)
6638 strcat (buf, "%S3");
6639 else
6640 strcat (buf, "%B3");
6641 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6642 }
6643 /* Handle short backwards branch with an unfilled delay slot.
6644 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6645 taken and untaken branches. */
6646 else if (dbr_sequence_length () == 0
6647 && ! forward_branch_p (insn)
6648 && INSN_ADDRESSES_SET_P ()
6649 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6650 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6651 {
6652 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6653 if (GET_MODE (operands[1]) == DImode)
6654 strcat (buf, "*");
6655 if (negated)
6656 strcat (buf, "%B3 %2,%r1,%0%#");
6657 else
6658 strcat (buf, "%S3 %2,%r1,%0%#");
6659 }
6660 else
6661 {
6662 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6663 if (GET_MODE (operands[1]) == DImode)
6664 strcat (buf, "*");
6665 if (negated)
6666 strcat (buf, "%S3");
6667 else
6668 strcat (buf, "%B3");
6669 if (nullify)
6670 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6671 else
6672 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6673 }
6674 break;
6675
6676 default:
6677 /* The reversed conditional branch must branch over one additional
6678 instruction if the delay slot is filled and needs to be extracted
6679 by pa_output_lbranch. If the delay slot is empty or this is a
6680 nullified forward branch, the instruction after the reversed
6681 condition branch must be nullified. */
6682 if (dbr_sequence_length () == 0
6683 || (nullify && forward_branch_p (insn)))
6684 {
6685 nullify = 1;
6686 xdelay = 0;
6687 operands[4] = GEN_INT (length);
6688 }
6689 else
6690 {
6691 xdelay = 1;
6692 operands[4] = GEN_INT (length + 4);
6693 }
6694
6695 /* Create a reversed conditional branch which branches around
6696 the following insns. */
6697 if (GET_MODE (operands[1]) != DImode)
6698 {
6699 if (nullify)
6700 {
6701 if (negated)
6702 strcpy (buf,
6703 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6704 else
6705 strcpy (buf,
6706 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6707 }
6708 else
6709 {
6710 if (negated)
6711 strcpy (buf,
6712 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6713 else
6714 strcpy (buf,
6715 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6716 }
6717 }
6718 else
6719 {
6720 if (nullify)
6721 {
6722 if (negated)
6723 strcpy (buf,
6724 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6725 else
6726 strcpy (buf,
6727 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6728 }
6729 else
6730 {
6731 if (negated)
6732 strcpy (buf,
6733 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6734 else
6735 strcpy (buf,
6736 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6737 }
6738 }
6739
6740 output_asm_insn (buf, operands);
6741 return pa_output_lbranch (operands[0], insn, xdelay);
6742 }
6743 return buf;
6744 }
6745
6746 /* Output a PIC pc-relative instruction sequence to load the address of
6747 OPERANDS[0] to register OPERANDS[2]. OPERANDS[0] is a symbol ref
6748 or a code label. OPERANDS[1] specifies the register to use to load
6749 the program counter. OPERANDS[3] may be used for label generation
6750 The sequence is always three instructions in length. The program
6751 counter recorded for PA 1.X is eight bytes more than that for PA 2.0.
6752 Register %r1 is clobbered. */
6753
6754 static void
6755 pa_output_pic_pcrel_sequence (rtx *operands)
6756 {
6757 gcc_assert (SYMBOL_REF_P (operands[0]) || LABEL_P (operands[0]));
6758 if (TARGET_PA_20)
6759 {
6760 /* We can use mfia to determine the current program counter. */
6761 if (TARGET_SOM || !TARGET_GAS)
6762 {
6763 operands[3] = gen_label_rtx ();
6764 targetm.asm_out.internal_label (asm_out_file, "L",
6765 CODE_LABEL_NUMBER (operands[3]));
6766 output_asm_insn ("mfia %1", operands);
6767 output_asm_insn ("addil L'%0-%l3,%1", operands);
6768 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6769 }
6770 else
6771 {
6772 output_asm_insn ("mfia %1", operands);
6773 output_asm_insn ("addil L'%0-$PIC_pcrel$0+12,%1", operands);
6774 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+16(%%r1),%2", operands);
6775 }
6776 }
6777 else
6778 {
6779 /* We need to use a branch to determine the current program counter. */
6780 output_asm_insn ("{bl|b,l} .+8,%1", operands);
6781 if (TARGET_SOM || !TARGET_GAS)
6782 {
6783 operands[3] = gen_label_rtx ();
6784 output_asm_insn ("addil L'%0-%l3,%1", operands);
6785 targetm.asm_out.internal_label (asm_out_file, "L",
6786 CODE_LABEL_NUMBER (operands[3]));
6787 output_asm_insn ("ldo R'%0-%l3(%%r1),%2", operands);
6788 }
6789 else
6790 {
6791 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%1", operands);
6792 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%2", operands);
6793 }
6794 }
6795 }
6796
6797 /* This routine handles output of long unconditional branches that
6798 exceed the maximum range of a simple branch instruction. Since
6799 we don't have a register available for the branch, we save register
6800 %r1 in the frame marker, load the branch destination DEST into %r1,
6801 execute the branch, and restore %r1 in the delay slot of the branch.
6802
6803 Since long branches may have an insn in the delay slot and the
6804 delay slot is used to restore %r1, we in general need to extract
6805 this insn and execute it before the branch. However, to facilitate
6806 use of this function by conditional branches, we also provide an
6807 option to not extract the delay insn so that it will be emitted
6808 after the long branch. So, if there is an insn in the delay slot,
6809 it is extracted if XDELAY is nonzero.
6810
6811 The lengths of the various long-branch sequences are 20, 16 and 24
6812 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6813
6814 const char *
6815 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6816 {
6817 rtx xoperands[4];
6818
6819 xoperands[0] = dest;
6820
6821 /* First, free up the delay slot. */
6822 if (xdelay && dbr_sequence_length () != 0)
6823 {
6824 /* We can't handle a jump in the delay slot. */
6825 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6826
6827 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6828 optimize, 0, NULL);
6829
6830 /* Now delete the delay insn. */
6831 SET_INSN_DELETED (NEXT_INSN (insn));
6832 }
6833
6834 /* Output an insn to save %r1. The runtime documentation doesn't
6835 specify whether the "Clean Up" slot in the callers frame can
6836 be clobbered by the callee. It isn't copied by HP's builtin
6837 alloca, so this suggests that it can be clobbered if necessary.
6838 The "Static Link" location is copied by HP builtin alloca, so
6839 we avoid using it. Using the cleanup slot might be a problem
6840 if we have to interoperate with languages that pass cleanup
6841 information. However, it should be possible to handle these
6842 situations with GCC's asm feature.
6843
6844 The "Current RP" slot is reserved for the called procedure, so
6845 we try to use it when we don't have a frame of our own. It's
6846 rather unlikely that we won't have a frame when we need to emit
6847 a very long branch.
6848
6849 Really the way to go long term is a register scavenger; goto
6850 the target of the jump and find a register which we can use
6851 as a scratch to hold the value in %r1. Then, we wouldn't have
6852 to free up the delay slot or clobber a slot that may be needed
6853 for other purposes. */
6854 if (TARGET_64BIT)
6855 {
6856 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6857 /* Use the return pointer slot in the frame marker. */
6858 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6859 else
6860 /* Use the slot at -40 in the frame marker since HP builtin
6861 alloca doesn't copy it. */
6862 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6863 }
6864 else
6865 {
6866 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6867 /* Use the return pointer slot in the frame marker. */
6868 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6869 else
6870 /* Use the "Clean Up" slot in the frame marker. In GCC,
6871 the only other use of this location is for copying a
6872 floating point double argument from a floating-point
6873 register to two general registers. The copy is done
6874 as an "atomic" operation when outputting a call, so it
6875 won't interfere with our using the location here. */
6876 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6877 }
6878
6879 if (TARGET_PORTABLE_RUNTIME)
6880 {
6881 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6882 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6883 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6884 }
6885 else if (flag_pic)
6886 {
6887 xoperands[1] = gen_rtx_REG (Pmode, 1);
6888 xoperands[2] = xoperands[1];
6889 pa_output_pic_pcrel_sequence (xoperands);
6890 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6891 }
6892 else
6893 /* Now output a very long branch to the original target. */
6894 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6895
6896 /* Now restore the value of %r1 in the delay slot. */
6897 if (TARGET_64BIT)
6898 {
6899 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6900 return "ldd -16(%%r30),%%r1";
6901 else
6902 return "ldd -40(%%r30),%%r1";
6903 }
6904 else
6905 {
6906 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6907 return "ldw -20(%%r30),%%r1";
6908 else
6909 return "ldw -12(%%r30),%%r1";
6910 }
6911 }
6912
6913 /* This routine handles all the branch-on-bit conditional branch sequences we
6914 might need to generate. It handles nullification of delay slots,
6915 varying length branches, negated branches and all combinations of the
6916 above. it returns the appropriate output template to emit the branch. */
6917
6918 const char *
6919 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6920 {
6921 static char buf[100];
6922 bool useskip;
6923 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6924 int length = get_attr_length (insn);
6925 int xdelay;
6926
6927 /* A conditional branch to the following instruction (e.g. the delay slot) is
6928 asking for a disaster. I do not think this can happen as this pattern
6929 is only used when optimizing; jump optimization should eliminate the
6930 jump. But be prepared just in case. */
6931
6932 if (branch_to_delay_slot_p (insn))
6933 return "nop";
6934
6935 /* If this is a long branch with its delay slot unfilled, set `nullify'
6936 as it can nullify the delay slot and save a nop. */
6937 if (length == 8 && dbr_sequence_length () == 0)
6938 nullify = 1;
6939
6940 /* If this is a short forward conditional branch which did not get
6941 its delay slot filled, the delay slot can still be nullified. */
6942 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6943 nullify = forward_branch_p (insn);
6944
6945 /* A forward branch over a single nullified insn can be done with a
6946 extrs instruction. This avoids a single cycle penalty due to
6947 mis-predicted branch if we fall through (branch not taken). */
6948 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6949
6950 switch (length)
6951 {
6952
6953 /* All short conditional branches except backwards with an unfilled
6954 delay slot. */
6955 case 4:
6956 if (useskip)
6957 strcpy (buf, "{extrs,|extrw,s,}");
6958 else
6959 strcpy (buf, "bb,");
6960 if (useskip && GET_MODE (operands[0]) == DImode)
6961 strcpy (buf, "extrd,s,*");
6962 else if (GET_MODE (operands[0]) == DImode)
6963 strcpy (buf, "bb,*");
6964 if ((which == 0 && negated)
6965 || (which == 1 && ! negated))
6966 strcat (buf, ">=");
6967 else
6968 strcat (buf, "<");
6969 if (useskip)
6970 strcat (buf, " %0,%1,1,%%r0");
6971 else if (nullify && negated)
6972 {
6973 if (branch_needs_nop_p (insn))
6974 strcat (buf, ",n %0,%1,%3%#");
6975 else
6976 strcat (buf, ",n %0,%1,%3");
6977 }
6978 else if (nullify && ! negated)
6979 {
6980 if (branch_needs_nop_p (insn))
6981 strcat (buf, ",n %0,%1,%2%#");
6982 else
6983 strcat (buf, ",n %0,%1,%2");
6984 }
6985 else if (! nullify && negated)
6986 strcat (buf, " %0,%1,%3");
6987 else if (! nullify && ! negated)
6988 strcat (buf, " %0,%1,%2");
6989 break;
6990
6991 /* All long conditionals. Note a short backward branch with an
6992 unfilled delay slot is treated just like a long backward branch
6993 with an unfilled delay slot. */
6994 case 8:
6995 /* Handle weird backwards branch with a filled delay slot
6996 which is nullified. */
6997 if (dbr_sequence_length () != 0
6998 && ! forward_branch_p (insn)
6999 && nullify)
7000 {
7001 strcpy (buf, "bb,");
7002 if (GET_MODE (operands[0]) == DImode)
7003 strcat (buf, "*");
7004 if ((which == 0 && negated)
7005 || (which == 1 && ! negated))
7006 strcat (buf, "<");
7007 else
7008 strcat (buf, ">=");
7009 if (negated)
7010 strcat (buf, ",n %0,%1,.+12\n\tb %3");
7011 else
7012 strcat (buf, ",n %0,%1,.+12\n\tb %2");
7013 }
7014 /* Handle short backwards branch with an unfilled delay slot.
7015 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7016 taken and untaken branches. */
7017 else if (dbr_sequence_length () == 0
7018 && ! forward_branch_p (insn)
7019 && INSN_ADDRESSES_SET_P ()
7020 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7021 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7022 {
7023 strcpy (buf, "bb,");
7024 if (GET_MODE (operands[0]) == DImode)
7025 strcat (buf, "*");
7026 if ((which == 0 && negated)
7027 || (which == 1 && ! negated))
7028 strcat (buf, ">=");
7029 else
7030 strcat (buf, "<");
7031 if (negated)
7032 strcat (buf, " %0,%1,%3%#");
7033 else
7034 strcat (buf, " %0,%1,%2%#");
7035 }
7036 else
7037 {
7038 if (GET_MODE (operands[0]) == DImode)
7039 strcpy (buf, "extrd,s,*");
7040 else
7041 strcpy (buf, "{extrs,|extrw,s,}");
7042 if ((which == 0 && negated)
7043 || (which == 1 && ! negated))
7044 strcat (buf, "<");
7045 else
7046 strcat (buf, ">=");
7047 if (nullify && negated)
7048 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
7049 else if (nullify && ! negated)
7050 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
7051 else if (negated)
7052 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
7053 else
7054 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
7055 }
7056 break;
7057
7058 default:
7059 /* The reversed conditional branch must branch over one additional
7060 instruction if the delay slot is filled and needs to be extracted
7061 by pa_output_lbranch. If the delay slot is empty or this is a
7062 nullified forward branch, the instruction after the reversed
7063 condition branch must be nullified. */
7064 if (dbr_sequence_length () == 0
7065 || (nullify && forward_branch_p (insn)))
7066 {
7067 nullify = 1;
7068 xdelay = 0;
7069 operands[4] = GEN_INT (length);
7070 }
7071 else
7072 {
7073 xdelay = 1;
7074 operands[4] = GEN_INT (length + 4);
7075 }
7076
7077 if (GET_MODE (operands[0]) == DImode)
7078 strcpy (buf, "bb,*");
7079 else
7080 strcpy (buf, "bb,");
7081 if ((which == 0 && negated)
7082 || (which == 1 && !negated))
7083 strcat (buf, "<");
7084 else
7085 strcat (buf, ">=");
7086 if (nullify)
7087 strcat (buf, ",n %0,%1,.+%4");
7088 else
7089 strcat (buf, " %0,%1,.+%4");
7090 output_asm_insn (buf, operands);
7091 return pa_output_lbranch (negated ? operands[3] : operands[2],
7092 insn, xdelay);
7093 }
7094 return buf;
7095 }
7096
7097 /* This routine handles all the branch-on-variable-bit conditional branch
7098 sequences we might need to generate. It handles nullification of delay
7099 slots, varying length branches, negated branches and all combinations
7100 of the above. it returns the appropriate output template to emit the
7101 branch. */
7102
7103 const char *
7104 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
7105 int which)
7106 {
7107 static char buf[100];
7108 bool useskip;
7109 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7110 int length = get_attr_length (insn);
7111 int xdelay;
7112
7113 /* A conditional branch to the following instruction (e.g. the delay slot) is
7114 asking for a disaster. I do not think this can happen as this pattern
7115 is only used when optimizing; jump optimization should eliminate the
7116 jump. But be prepared just in case. */
7117
7118 if (branch_to_delay_slot_p (insn))
7119 return "nop";
7120
7121 /* If this is a long branch with its delay slot unfilled, set `nullify'
7122 as it can nullify the delay slot and save a nop. */
7123 if (length == 8 && dbr_sequence_length () == 0)
7124 nullify = 1;
7125
7126 /* If this is a short forward conditional branch which did not get
7127 its delay slot filled, the delay slot can still be nullified. */
7128 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7129 nullify = forward_branch_p (insn);
7130
7131 /* A forward branch over a single nullified insn can be done with a
7132 extrs instruction. This avoids a single cycle penalty due to
7133 mis-predicted branch if we fall through (branch not taken). */
7134 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
7135
7136 switch (length)
7137 {
7138
7139 /* All short conditional branches except backwards with an unfilled
7140 delay slot. */
7141 case 4:
7142 if (useskip)
7143 strcpy (buf, "{vextrs,|extrw,s,}");
7144 else
7145 strcpy (buf, "{bvb,|bb,}");
7146 if (useskip && GET_MODE (operands[0]) == DImode)
7147 strcpy (buf, "extrd,s,*");
7148 else if (GET_MODE (operands[0]) == DImode)
7149 strcpy (buf, "bb,*");
7150 if ((which == 0 && negated)
7151 || (which == 1 && ! negated))
7152 strcat (buf, ">=");
7153 else
7154 strcat (buf, "<");
7155 if (useskip)
7156 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
7157 else if (nullify && negated)
7158 {
7159 if (branch_needs_nop_p (insn))
7160 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7161 else
7162 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7163 }
7164 else if (nullify && ! negated)
7165 {
7166 if (branch_needs_nop_p (insn))
7167 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7168 else
7169 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7170 }
7171 else if (! nullify && negated)
7172 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7173 else if (! nullify && ! negated)
7174 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7175 break;
7176
7177 /* All long conditionals. Note a short backward branch with an
7178 unfilled delay slot is treated just like a long backward branch
7179 with an unfilled delay slot. */
7180 case 8:
7181 /* Handle weird backwards branch with a filled delay slot
7182 which is nullified. */
7183 if (dbr_sequence_length () != 0
7184 && ! forward_branch_p (insn)
7185 && nullify)
7186 {
7187 strcpy (buf, "{bvb,|bb,}");
7188 if (GET_MODE (operands[0]) == DImode)
7189 strcat (buf, "*");
7190 if ((which == 0 && negated)
7191 || (which == 1 && ! negated))
7192 strcat (buf, "<");
7193 else
7194 strcat (buf, ">=");
7195 if (negated)
7196 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7197 else
7198 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7199 }
7200 /* Handle short backwards branch with an unfilled delay slot.
7201 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7202 taken and untaken branches. */
7203 else if (dbr_sequence_length () == 0
7204 && ! forward_branch_p (insn)
7205 && INSN_ADDRESSES_SET_P ()
7206 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7207 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7208 {
7209 strcpy (buf, "{bvb,|bb,}");
7210 if (GET_MODE (operands[0]) == DImode)
7211 strcat (buf, "*");
7212 if ((which == 0 && negated)
7213 || (which == 1 && ! negated))
7214 strcat (buf, ">=");
7215 else
7216 strcat (buf, "<");
7217 if (negated)
7218 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7219 else
7220 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7221 }
7222 else
7223 {
7224 strcpy (buf, "{vextrs,|extrw,s,}");
7225 if (GET_MODE (operands[0]) == DImode)
7226 strcpy (buf, "extrd,s,*");
7227 if ((which == 0 && negated)
7228 || (which == 1 && ! negated))
7229 strcat (buf, "<");
7230 else
7231 strcat (buf, ">=");
7232 if (nullify && negated)
7233 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7234 else if (nullify && ! negated)
7235 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7236 else if (negated)
7237 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7238 else
7239 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7240 }
7241 break;
7242
7243 default:
7244 /* The reversed conditional branch must branch over one additional
7245 instruction if the delay slot is filled and needs to be extracted
7246 by pa_output_lbranch. If the delay slot is empty or this is a
7247 nullified forward branch, the instruction after the reversed
7248 condition branch must be nullified. */
7249 if (dbr_sequence_length () == 0
7250 || (nullify && forward_branch_p (insn)))
7251 {
7252 nullify = 1;
7253 xdelay = 0;
7254 operands[4] = GEN_INT (length);
7255 }
7256 else
7257 {
7258 xdelay = 1;
7259 operands[4] = GEN_INT (length + 4);
7260 }
7261
7262 if (GET_MODE (operands[0]) == DImode)
7263 strcpy (buf, "bb,*");
7264 else
7265 strcpy (buf, "{bvb,|bb,}");
7266 if ((which == 0 && negated)
7267 || (which == 1 && !negated))
7268 strcat (buf, "<");
7269 else
7270 strcat (buf, ">=");
7271 if (nullify)
7272 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7273 else
7274 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7275 output_asm_insn (buf, operands);
7276 return pa_output_lbranch (negated ? operands[3] : operands[2],
7277 insn, xdelay);
7278 }
7279 return buf;
7280 }
7281
7282 /* Return the output template for emitting a dbra type insn.
7283
7284 Note it may perform some output operations on its own before
7285 returning the final output string. */
7286 const char *
7287 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7288 {
7289 int length = get_attr_length (insn);
7290
7291 /* A conditional branch to the following instruction (e.g. the delay slot) is
7292 asking for a disaster. Be prepared! */
7293
7294 if (branch_to_delay_slot_p (insn))
7295 {
7296 if (which_alternative == 0)
7297 return "ldo %1(%0),%0";
7298 else if (which_alternative == 1)
7299 {
7300 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7301 output_asm_insn ("ldw -16(%%r30),%4", operands);
7302 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7303 return "{fldws|fldw} -16(%%r30),%0";
7304 }
7305 else
7306 {
7307 output_asm_insn ("ldw %0,%4", operands);
7308 return "ldo %1(%4),%4\n\tstw %4,%0";
7309 }
7310 }
7311
7312 if (which_alternative == 0)
7313 {
7314 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7315 int xdelay;
7316
7317 /* If this is a long branch with its delay slot unfilled, set `nullify'
7318 as it can nullify the delay slot and save a nop. */
7319 if (length == 8 && dbr_sequence_length () == 0)
7320 nullify = 1;
7321
7322 /* If this is a short forward conditional branch which did not get
7323 its delay slot filled, the delay slot can still be nullified. */
7324 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7325 nullify = forward_branch_p (insn);
7326
7327 switch (length)
7328 {
7329 case 4:
7330 if (nullify)
7331 {
7332 if (branch_needs_nop_p (insn))
7333 return "addib,%C2,n %1,%0,%3%#";
7334 else
7335 return "addib,%C2,n %1,%0,%3";
7336 }
7337 else
7338 return "addib,%C2 %1,%0,%3";
7339
7340 case 8:
7341 /* Handle weird backwards branch with a fulled delay slot
7342 which is nullified. */
7343 if (dbr_sequence_length () != 0
7344 && ! forward_branch_p (insn)
7345 && nullify)
7346 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7347 /* Handle short backwards branch with an unfilled delay slot.
7348 Using a addb;nop rather than addi;bl saves 1 cycle for both
7349 taken and untaken branches. */
7350 else if (dbr_sequence_length () == 0
7351 && ! forward_branch_p (insn)
7352 && INSN_ADDRESSES_SET_P ()
7353 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7354 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7355 return "addib,%C2 %1,%0,%3%#";
7356
7357 /* Handle normal cases. */
7358 if (nullify)
7359 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7360 else
7361 return "addi,%N2 %1,%0,%0\n\tb %3";
7362
7363 default:
7364 /* The reversed conditional branch must branch over one additional
7365 instruction if the delay slot is filled and needs to be extracted
7366 by pa_output_lbranch. If the delay slot is empty or this is a
7367 nullified forward branch, the instruction after the reversed
7368 condition branch must be nullified. */
7369 if (dbr_sequence_length () == 0
7370 || (nullify && forward_branch_p (insn)))
7371 {
7372 nullify = 1;
7373 xdelay = 0;
7374 operands[4] = GEN_INT (length);
7375 }
7376 else
7377 {
7378 xdelay = 1;
7379 operands[4] = GEN_INT (length + 4);
7380 }
7381
7382 if (nullify)
7383 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7384 else
7385 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7386
7387 return pa_output_lbranch (operands[3], insn, xdelay);
7388 }
7389
7390 }
7391 /* Deal with gross reload from FP register case. */
7392 else if (which_alternative == 1)
7393 {
7394 /* Move loop counter from FP register to MEM then into a GR,
7395 increment the GR, store the GR into MEM, and finally reload
7396 the FP register from MEM from within the branch's delay slot. */
7397 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7398 operands);
7399 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7400 if (length == 24)
7401 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7402 else if (length == 28)
7403 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7404 else
7405 {
7406 operands[5] = GEN_INT (length - 16);
7407 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7408 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7409 return pa_output_lbranch (operands[3], insn, 0);
7410 }
7411 }
7412 /* Deal with gross reload from memory case. */
7413 else
7414 {
7415 /* Reload loop counter from memory, the store back to memory
7416 happens in the branch's delay slot. */
7417 output_asm_insn ("ldw %0,%4", operands);
7418 if (length == 12)
7419 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7420 else if (length == 16)
7421 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7422 else
7423 {
7424 operands[5] = GEN_INT (length - 4);
7425 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7426 return pa_output_lbranch (operands[3], insn, 0);
7427 }
7428 }
7429 }
7430
7431 /* Return the output template for emitting a movb type insn.
7432
7433 Note it may perform some output operations on its own before
7434 returning the final output string. */
7435 const char *
7436 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7437 int reverse_comparison)
7438 {
7439 int length = get_attr_length (insn);
7440
7441 /* A conditional branch to the following instruction (e.g. the delay slot) is
7442 asking for a disaster. Be prepared! */
7443
7444 if (branch_to_delay_slot_p (insn))
7445 {
7446 if (which_alternative == 0)
7447 return "copy %1,%0";
7448 else if (which_alternative == 1)
7449 {
7450 output_asm_insn ("stw %1,-16(%%r30)", operands);
7451 return "{fldws|fldw} -16(%%r30),%0";
7452 }
7453 else if (which_alternative == 2)
7454 return "stw %1,%0";
7455 else
7456 return "mtsar %r1";
7457 }
7458
7459 /* Support the second variant. */
7460 if (reverse_comparison)
7461 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7462
7463 if (which_alternative == 0)
7464 {
7465 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7466 int xdelay;
7467
7468 /* If this is a long branch with its delay slot unfilled, set `nullify'
7469 as it can nullify the delay slot and save a nop. */
7470 if (length == 8 && dbr_sequence_length () == 0)
7471 nullify = 1;
7472
7473 /* If this is a short forward conditional branch which did not get
7474 its delay slot filled, the delay slot can still be nullified. */
7475 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7476 nullify = forward_branch_p (insn);
7477
7478 switch (length)
7479 {
7480 case 4:
7481 if (nullify)
7482 {
7483 if (branch_needs_nop_p (insn))
7484 return "movb,%C2,n %1,%0,%3%#";
7485 else
7486 return "movb,%C2,n %1,%0,%3";
7487 }
7488 else
7489 return "movb,%C2 %1,%0,%3";
7490
7491 case 8:
7492 /* Handle weird backwards branch with a filled delay slot
7493 which is nullified. */
7494 if (dbr_sequence_length () != 0
7495 && ! forward_branch_p (insn)
7496 && nullify)
7497 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7498
7499 /* Handle short backwards branch with an unfilled delay slot.
7500 Using a movb;nop rather than or;bl saves 1 cycle for both
7501 taken and untaken branches. */
7502 else if (dbr_sequence_length () == 0
7503 && ! forward_branch_p (insn)
7504 && INSN_ADDRESSES_SET_P ()
7505 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7506 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7507 return "movb,%C2 %1,%0,%3%#";
7508 /* Handle normal cases. */
7509 if (nullify)
7510 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7511 else
7512 return "or,%N2 %1,%%r0,%0\n\tb %3";
7513
7514 default:
7515 /* The reversed conditional branch must branch over one additional
7516 instruction if the delay slot is filled and needs to be extracted
7517 by pa_output_lbranch. If the delay slot is empty or this is a
7518 nullified forward branch, the instruction after the reversed
7519 condition branch must be nullified. */
7520 if (dbr_sequence_length () == 0
7521 || (nullify && forward_branch_p (insn)))
7522 {
7523 nullify = 1;
7524 xdelay = 0;
7525 operands[4] = GEN_INT (length);
7526 }
7527 else
7528 {
7529 xdelay = 1;
7530 operands[4] = GEN_INT (length + 4);
7531 }
7532
7533 if (nullify)
7534 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7535 else
7536 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7537
7538 return pa_output_lbranch (operands[3], insn, xdelay);
7539 }
7540 }
7541 /* Deal with gross reload for FP destination register case. */
7542 else if (which_alternative == 1)
7543 {
7544 /* Move source register to MEM, perform the branch test, then
7545 finally load the FP register from MEM from within the branch's
7546 delay slot. */
7547 output_asm_insn ("stw %1,-16(%%r30)", operands);
7548 if (length == 12)
7549 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7550 else if (length == 16)
7551 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7552 else
7553 {
7554 operands[4] = GEN_INT (length - 4);
7555 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7556 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7557 return pa_output_lbranch (operands[3], insn, 0);
7558 }
7559 }
7560 /* Deal with gross reload from memory case. */
7561 else if (which_alternative == 2)
7562 {
7563 /* Reload loop counter from memory, the store back to memory
7564 happens in the branch's delay slot. */
7565 if (length == 8)
7566 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7567 else if (length == 12)
7568 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7569 else
7570 {
7571 operands[4] = GEN_INT (length);
7572 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7573 operands);
7574 return pa_output_lbranch (operands[3], insn, 0);
7575 }
7576 }
7577 /* Handle SAR as a destination. */
7578 else
7579 {
7580 if (length == 8)
7581 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7582 else if (length == 12)
7583 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7584 else
7585 {
7586 operands[4] = GEN_INT (length);
7587 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7588 operands);
7589 return pa_output_lbranch (operands[3], insn, 0);
7590 }
7591 }
7592 }
7593
7594 /* Copy any FP arguments in INSN into integer registers. */
7595 static void
7596 copy_fp_args (rtx_insn *insn)
7597 {
7598 rtx link;
7599 rtx xoperands[2];
7600
7601 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7602 {
7603 int arg_mode, regno;
7604 rtx use = XEXP (link, 0);
7605
7606 if (! (GET_CODE (use) == USE
7607 && GET_CODE (XEXP (use, 0)) == REG
7608 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7609 continue;
7610
7611 arg_mode = GET_MODE (XEXP (use, 0));
7612 regno = REGNO (XEXP (use, 0));
7613
7614 /* Is it a floating point register? */
7615 if (regno >= 32 && regno <= 39)
7616 {
7617 /* Copy the FP register into an integer register via memory. */
7618 if (arg_mode == SFmode)
7619 {
7620 xoperands[0] = XEXP (use, 0);
7621 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7622 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7623 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7624 }
7625 else
7626 {
7627 xoperands[0] = XEXP (use, 0);
7628 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7629 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7630 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7631 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7632 }
7633 }
7634 }
7635 }
7636
7637 /* Compute length of the FP argument copy sequence for INSN. */
7638 static int
7639 length_fp_args (rtx_insn *insn)
7640 {
7641 int length = 0;
7642 rtx link;
7643
7644 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7645 {
7646 int arg_mode, regno;
7647 rtx use = XEXP (link, 0);
7648
7649 if (! (GET_CODE (use) == USE
7650 && GET_CODE (XEXP (use, 0)) == REG
7651 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7652 continue;
7653
7654 arg_mode = GET_MODE (XEXP (use, 0));
7655 regno = REGNO (XEXP (use, 0));
7656
7657 /* Is it a floating point register? */
7658 if (regno >= 32 && regno <= 39)
7659 {
7660 if (arg_mode == SFmode)
7661 length += 8;
7662 else
7663 length += 12;
7664 }
7665 }
7666
7667 return length;
7668 }
7669
7670 /* Return the attribute length for the millicode call instruction INSN.
7671 The length must match the code generated by pa_output_millicode_call.
7672 We include the delay slot in the returned length as it is better to
7673 over estimate the length than to under estimate it. */
7674
7675 int
7676 pa_attr_length_millicode_call (rtx_insn *insn)
7677 {
7678 unsigned long distance = -1;
7679 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7680
7681 if (INSN_ADDRESSES_SET_P ())
7682 {
7683 distance = (total + insn_current_reference_address (insn));
7684 if (distance < total)
7685 distance = -1;
7686 }
7687
7688 if (TARGET_64BIT)
7689 {
7690 if (!TARGET_LONG_CALLS && distance < 7600000)
7691 return 8;
7692
7693 return 20;
7694 }
7695 else if (TARGET_PORTABLE_RUNTIME)
7696 return 24;
7697 else
7698 {
7699 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7700 return 8;
7701
7702 if (!flag_pic)
7703 return 12;
7704
7705 return 24;
7706 }
7707 }
7708
7709 /* INSN is a function call.
7710
7711 CALL_DEST is the routine we are calling. */
7712
7713 const char *
7714 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7715 {
7716 int attr_length = get_attr_length (insn);
7717 int seq_length = dbr_sequence_length ();
7718 rtx xoperands[4];
7719
7720 xoperands[0] = call_dest;
7721
7722 /* Handle the common case where we are sure that the branch will
7723 reach the beginning of the $CODE$ subspace. The within reach
7724 form of the $$sh_func_adrs call has a length of 28. Because it
7725 has an attribute type of sh_func_adrs, it never has a nonzero
7726 sequence length (i.e., the delay slot is never filled). */
7727 if (!TARGET_LONG_CALLS
7728 && (attr_length == 8
7729 || (attr_length == 28
7730 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7731 {
7732 xoperands[1] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7733 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7734 }
7735 else
7736 {
7737 if (TARGET_64BIT)
7738 {
7739 /* It might seem that one insn could be saved by accessing
7740 the millicode function using the linkage table. However,
7741 this doesn't work in shared libraries and other dynamically
7742 loaded objects. Using a pc-relative sequence also avoids
7743 problems related to the implicit use of the gp register. */
7744 xoperands[1] = gen_rtx_REG (Pmode, 1);
7745 xoperands[2] = xoperands[1];
7746 pa_output_pic_pcrel_sequence (xoperands);
7747 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7748 }
7749 else if (TARGET_PORTABLE_RUNTIME)
7750 {
7751 /* Pure portable runtime doesn't allow be/ble; we also don't
7752 have PIC support in the assembler/linker, so this sequence
7753 is needed. */
7754
7755 /* Get the address of our target into %r1. */
7756 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7757 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7758
7759 /* Get our return address into %r31. */
7760 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7761 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7762
7763 /* Jump to our target address in %r1. */
7764 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7765 }
7766 else if (!flag_pic)
7767 {
7768 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7769 if (TARGET_PA_20)
7770 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7771 else
7772 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7773 }
7774 else
7775 {
7776 xoperands[1] = gen_rtx_REG (Pmode, 31);
7777 xoperands[2] = gen_rtx_REG (Pmode, 1);
7778 pa_output_pic_pcrel_sequence (xoperands);
7779
7780 /* Adjust return address. */
7781 output_asm_insn ("ldo {16|24}(%%r31),%%r31", xoperands);
7782
7783 /* Jump to our target address in %r1. */
7784 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7785 }
7786 }
7787
7788 if (seq_length == 0)
7789 output_asm_insn ("nop", xoperands);
7790
7791 return "";
7792 }
7793
7794 /* Return the attribute length of the call instruction INSN. The SIBCALL
7795 flag indicates whether INSN is a regular call or a sibling call. The
7796 length returned must be longer than the code actually generated by
7797 pa_output_call. Since branch shortening is done before delay branch
7798 sequencing, there is no way to determine whether or not the delay
7799 slot will be filled during branch shortening. Even when the delay
7800 slot is filled, we may have to add a nop if the delay slot contains
7801 a branch that can't reach its target. Thus, we always have to include
7802 the delay slot in the length estimate. This used to be done in
7803 pa_adjust_insn_length but we do it here now as some sequences always
7804 fill the delay slot and we can save four bytes in the estimate for
7805 these sequences. */
7806
7807 int
7808 pa_attr_length_call (rtx_insn *insn, int sibcall)
7809 {
7810 int local_call;
7811 rtx call, call_dest;
7812 tree call_decl;
7813 int length = 0;
7814 rtx pat = PATTERN (insn);
7815 unsigned long distance = -1;
7816
7817 gcc_assert (CALL_P (insn));
7818
7819 if (INSN_ADDRESSES_SET_P ())
7820 {
7821 unsigned long total;
7822
7823 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7824 distance = (total + insn_current_reference_address (insn));
7825 if (distance < total)
7826 distance = -1;
7827 }
7828
7829 gcc_assert (GET_CODE (pat) == PARALLEL);
7830
7831 /* Get the call rtx. */
7832 call = XVECEXP (pat, 0, 0);
7833 if (GET_CODE (call) == SET)
7834 call = SET_SRC (call);
7835
7836 gcc_assert (GET_CODE (call) == CALL);
7837
7838 /* Determine if this is a local call. */
7839 call_dest = XEXP (XEXP (call, 0), 0);
7840 call_decl = SYMBOL_REF_DECL (call_dest);
7841 local_call = call_decl && targetm.binds_local_p (call_decl);
7842
7843 /* pc-relative branch. */
7844 if (!TARGET_LONG_CALLS
7845 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7846 || distance < MAX_PCREL17F_OFFSET))
7847 length += 8;
7848
7849 /* 64-bit plabel sequence. */
7850 else if (TARGET_64BIT && !local_call)
7851 length += sibcall ? 28 : 24;
7852
7853 /* non-pic long absolute branch sequence. */
7854 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7855 length += 12;
7856
7857 /* long pc-relative branch sequence. */
7858 else if (TARGET_LONG_PIC_SDIFF_CALL
7859 || (TARGET_GAS && !TARGET_SOM && local_call))
7860 {
7861 length += 20;
7862
7863 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7864 length += 8;
7865 }
7866
7867 /* 32-bit plabel sequence. */
7868 else
7869 {
7870 length += 32;
7871
7872 if (TARGET_SOM)
7873 length += length_fp_args (insn);
7874
7875 if (flag_pic)
7876 length += 4;
7877
7878 if (!TARGET_PA_20)
7879 {
7880 if (!sibcall)
7881 length += 8;
7882
7883 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7884 length += 8;
7885 }
7886 }
7887
7888 return length;
7889 }
7890
7891 /* INSN is a function call.
7892
7893 CALL_DEST is the routine we are calling. */
7894
7895 const char *
7896 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7897 {
7898 int seq_length = dbr_sequence_length ();
7899 tree call_decl = SYMBOL_REF_DECL (call_dest);
7900 int local_call = call_decl && targetm.binds_local_p (call_decl);
7901 rtx xoperands[4];
7902
7903 xoperands[0] = call_dest;
7904
7905 /* Handle the common case where we're sure that the branch will reach
7906 the beginning of the "$CODE$" subspace. This is the beginning of
7907 the current function if we are in a named section. */
7908 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7909 {
7910 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7911 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7912 }
7913 else
7914 {
7915 if (TARGET_64BIT && !local_call)
7916 {
7917 /* ??? As far as I can tell, the HP linker doesn't support the
7918 long pc-relative sequence described in the 64-bit runtime
7919 architecture. So, we use a slightly longer indirect call. */
7920 xoperands[0] = pa_get_deferred_plabel (call_dest);
7921 xoperands[1] = gen_label_rtx ();
7922
7923 /* If this isn't a sibcall, we put the load of %r27 into the
7924 delay slot. We can't do this in a sibcall as we don't
7925 have a second call-clobbered scratch register available.
7926 We don't need to do anything when generating fast indirect
7927 calls. */
7928 if (seq_length != 0 && !sibcall)
7929 {
7930 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7931 optimize, 0, NULL);
7932
7933 /* Now delete the delay insn. */
7934 SET_INSN_DELETED (NEXT_INSN (insn));
7935 seq_length = 0;
7936 }
7937
7938 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7939 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7940 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7941
7942 if (sibcall)
7943 {
7944 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7945 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7946 output_asm_insn ("bve (%%r1)", xoperands);
7947 }
7948 else
7949 {
7950 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7951 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7952 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7953 seq_length = 1;
7954 }
7955 }
7956 else
7957 {
7958 int indirect_call = 0;
7959
7960 /* Emit a long call. There are several different sequences
7961 of increasing length and complexity. In most cases,
7962 they don't allow an instruction in the delay slot. */
7963 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7964 && !TARGET_LONG_PIC_SDIFF_CALL
7965 && !(TARGET_GAS && !TARGET_SOM && local_call)
7966 && !TARGET_64BIT)
7967 indirect_call = 1;
7968
7969 if (seq_length != 0
7970 && !sibcall
7971 && (!TARGET_PA_20
7972 || indirect_call
7973 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7974 {
7975 /* A non-jump insn in the delay slot. By definition we can
7976 emit this insn before the call (and in fact before argument
7977 relocating. */
7978 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7979 NULL);
7980
7981 /* Now delete the delay insn. */
7982 SET_INSN_DELETED (NEXT_INSN (insn));
7983 seq_length = 0;
7984 }
7985
7986 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7987 {
7988 /* This is the best sequence for making long calls in
7989 non-pic code. Unfortunately, GNU ld doesn't provide
7990 the stub needed for external calls, and GAS's support
7991 for this with the SOM linker is buggy. It is safe
7992 to use this for local calls. */
7993 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7994 if (sibcall)
7995 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7996 else
7997 {
7998 if (TARGET_PA_20)
7999 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
8000 xoperands);
8001 else
8002 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
8003
8004 output_asm_insn ("copy %%r31,%%r2", xoperands);
8005 seq_length = 1;
8006 }
8007 }
8008 else
8009 {
8010 /* The HP assembler and linker can handle relocations for
8011 the difference of two symbols. The HP assembler
8012 recognizes the sequence as a pc-relative call and
8013 the linker provides stubs when needed. */
8014
8015 /* GAS currently can't generate the relocations that
8016 are needed for the SOM linker under HP-UX using this
8017 sequence. The GNU linker doesn't generate the stubs
8018 that are needed for external calls on TARGET_ELF32
8019 with this sequence. For now, we have to use a longer
8020 plabel sequence when using GAS for non local calls. */
8021 if (TARGET_LONG_PIC_SDIFF_CALL
8022 || (TARGET_GAS && !TARGET_SOM && local_call))
8023 {
8024 xoperands[1] = gen_rtx_REG (Pmode, 1);
8025 xoperands[2] = xoperands[1];
8026 pa_output_pic_pcrel_sequence (xoperands);
8027 }
8028 else
8029 {
8030 /* Emit a long plabel-based call sequence. This is
8031 essentially an inline implementation of $$dyncall.
8032 We don't actually try to call $$dyncall as this is
8033 as difficult as calling the function itself. */
8034 xoperands[0] = pa_get_deferred_plabel (call_dest);
8035 xoperands[1] = gen_label_rtx ();
8036
8037 /* Since the call is indirect, FP arguments in registers
8038 need to be copied to the general registers. Then, the
8039 argument relocation stub will copy them back. */
8040 if (TARGET_SOM)
8041 copy_fp_args (insn);
8042
8043 if (flag_pic)
8044 {
8045 output_asm_insn ("addil LT'%0,%%r19", xoperands);
8046 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
8047 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
8048 }
8049 else
8050 {
8051 output_asm_insn ("addil LR'%0-$global$,%%r27",
8052 xoperands);
8053 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
8054 xoperands);
8055 }
8056
8057 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
8058 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
8059 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
8060 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
8061
8062 if (!sibcall && !TARGET_PA_20)
8063 {
8064 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8065 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8066 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
8067 else
8068 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
8069 }
8070 }
8071
8072 if (TARGET_PA_20)
8073 {
8074 if (sibcall)
8075 output_asm_insn ("bve (%%r1)", xoperands);
8076 else
8077 {
8078 if (indirect_call)
8079 {
8080 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8081 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
8082 seq_length = 1;
8083 }
8084 else
8085 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8086 }
8087 }
8088 else
8089 {
8090 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8091 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8092 xoperands);
8093
8094 if (sibcall)
8095 {
8096 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8097 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8098 else
8099 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8100 }
8101 else
8102 {
8103 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8104 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8105 else
8106 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8107
8108 if (indirect_call)
8109 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8110 else
8111 output_asm_insn ("copy %%r31,%%r2", xoperands);
8112 seq_length = 1;
8113 }
8114 }
8115 }
8116 }
8117 }
8118
8119 if (seq_length == 0)
8120 output_asm_insn ("nop", xoperands);
8121
8122 return "";
8123 }
8124
8125 /* Return the attribute length of the indirect call instruction INSN.
8126 The length must match the code generated by output_indirect call.
8127 The returned length includes the delay slot. Currently, the delay
8128 slot of an indirect call sequence is not exposed and it is used by
8129 the sequence itself. */
8130
8131 int
8132 pa_attr_length_indirect_call (rtx_insn *insn)
8133 {
8134 unsigned long distance = -1;
8135 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8136
8137 if (INSN_ADDRESSES_SET_P ())
8138 {
8139 distance = (total + insn_current_reference_address (insn));
8140 if (distance < total)
8141 distance = -1;
8142 }
8143
8144 if (TARGET_64BIT)
8145 return 12;
8146
8147 if (TARGET_FAST_INDIRECT_CALLS)
8148 return 8;
8149
8150 if (TARGET_PORTABLE_RUNTIME)
8151 return 16;
8152
8153 /* Inline version of $$dyncall. */
8154 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8155 return 20;
8156
8157 if (!TARGET_LONG_CALLS
8158 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8159 || distance < MAX_PCREL17F_OFFSET))
8160 return 8;
8161
8162 /* Out of reach, can use ble. */
8163 if (!flag_pic)
8164 return 12;
8165
8166 /* Inline version of $$dyncall. */
8167 if (TARGET_NO_SPACE_REGS || TARGET_PA_20)
8168 return 20;
8169
8170 if (!optimize_size)
8171 return 36;
8172
8173 /* Long PIC pc-relative call. */
8174 return 20;
8175 }
8176
8177 const char *
8178 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8179 {
8180 rtx xoperands[4];
8181 int length;
8182
8183 if (TARGET_64BIT)
8184 {
8185 xoperands[0] = call_dest;
8186 output_asm_insn ("ldd 16(%0),%%r2\n\t"
8187 "bve,l (%%r2),%%r2\n\t"
8188 "ldd 24(%0),%%r27", xoperands);
8189 return "";
8190 }
8191
8192 /* First the special case for kernels, level 0 systems, etc. */
8193 if (TARGET_FAST_INDIRECT_CALLS)
8194 {
8195 pa_output_arg_descriptor (insn);
8196 if (TARGET_PA_20)
8197 return "bve,l,n (%%r22),%%r2\n\tnop";
8198 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8199 }
8200
8201 if (TARGET_PORTABLE_RUNTIME)
8202 {
8203 output_asm_insn ("ldil L'$$dyncall,%%r31\n\t"
8204 "ldo R'$$dyncall(%%r31),%%r31", xoperands);
8205 pa_output_arg_descriptor (insn);
8206 return "blr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8207 }
8208
8209 /* Maybe emit a fast inline version of $$dyncall. */
8210 if ((TARGET_NO_SPACE_REGS || TARGET_PA_20) && !optimize_size)
8211 {
8212 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8213 "ldw 2(%%r22),%%r19\n\t"
8214 "ldw -2(%%r22),%%r22", xoperands);
8215 pa_output_arg_descriptor (insn);
8216 if (TARGET_NO_SPACE_REGS)
8217 {
8218 if (TARGET_PA_20)
8219 return "bve,l,n (%%r22),%%r2\n\tnop";
8220 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8221 }
8222 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8223 }
8224
8225 /* Now the normal case -- we can reach $$dyncall directly or
8226 we're sure that we can get there via a long-branch stub.
8227
8228 No need to check target flags as the length uniquely identifies
8229 the remaining cases. */
8230 length = pa_attr_length_indirect_call (insn);
8231 if (length == 8)
8232 {
8233 pa_output_arg_descriptor (insn);
8234
8235 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8236 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8237 variant of the B,L instruction can't be used on the SOM target. */
8238 if (TARGET_PA_20 && !TARGET_SOM)
8239 return "b,l,n $$dyncall,%%r2\n\tnop";
8240 else
8241 return "bl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8242 }
8243
8244 /* Long millicode call, but we are not generating PIC or portable runtime
8245 code. */
8246 if (length == 12)
8247 {
8248 output_asm_insn ("ldil L'$$dyncall,%%r2", xoperands);
8249 pa_output_arg_descriptor (insn);
8250 return "ble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8251 }
8252
8253 /* Maybe emit a fast inline version of $$dyncall. The long PIC
8254 pc-relative call sequence is five instructions. The inline PA 2.0
8255 version of $$dyncall is also five instructions. The PA 1.X versions
8256 are longer but still an overall win. */
8257 if (TARGET_NO_SPACE_REGS || TARGET_PA_20 || !optimize_size)
8258 {
8259 output_asm_insn ("bb,>=,n %%r22,30,.+12\n\t"
8260 "ldw 2(%%r22),%%r19\n\t"
8261 "ldw -2(%%r22),%%r22", xoperands);
8262 if (TARGET_NO_SPACE_REGS)
8263 {
8264 pa_output_arg_descriptor (insn);
8265 if (TARGET_PA_20)
8266 return "bve,l,n (%%r22),%%r2\n\tnop";
8267 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8268 }
8269 if (TARGET_PA_20)
8270 {
8271 pa_output_arg_descriptor (insn);
8272 return "bve,l (%%r22),%%r2\n\tstw %%r2,-24(%%sp)";
8273 }
8274 output_asm_insn ("bl .+8,%%r2\n\t"
8275 "ldo 16(%%r2),%%r2\n\t"
8276 "ldsid (%%r22),%%r1\n\t"
8277 "mtsp %%r1,%%sr0", xoperands);
8278 pa_output_arg_descriptor (insn);
8279 return "be 0(%%sr0,%%r22)\n\tstw %%r2,-24(%%sp)";
8280 }
8281
8282 /* We need a long PIC call to $$dyncall. */
8283 xoperands[0] = gen_rtx_SYMBOL_REF (Pmode, "$$dyncall");
8284 xoperands[1] = gen_rtx_REG (Pmode, 2);
8285 xoperands[2] = gen_rtx_REG (Pmode, 1);
8286 pa_output_pic_pcrel_sequence (xoperands);
8287 pa_output_arg_descriptor (insn);
8288 return "bv %%r0(%%r1)\n\tldo {12|20}(%%r2),%%r2";
8289 }
8290
8291 /* In HPUX 8.0's shared library scheme, special relocations are needed
8292 for function labels if they might be passed to a function
8293 in a shared library (because shared libraries don't live in code
8294 space), and special magic is needed to construct their address. */
8295
8296 void
8297 pa_encode_label (rtx sym)
8298 {
8299 const char *str = XSTR (sym, 0);
8300 int len = strlen (str) + 1;
8301 char *newstr, *p;
8302
8303 p = newstr = XALLOCAVEC (char, len + 1);
8304 *p++ = '@';
8305 strcpy (p, str);
8306
8307 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8308 }
8309
8310 static void
8311 pa_encode_section_info (tree decl, rtx rtl, int first)
8312 {
8313 int old_referenced = 0;
8314
8315 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8316 old_referenced
8317 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8318
8319 default_encode_section_info (decl, rtl, first);
8320
8321 if (first && TEXT_SPACE_P (decl))
8322 {
8323 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8324 if (TREE_CODE (decl) == FUNCTION_DECL)
8325 pa_encode_label (XEXP (rtl, 0));
8326 }
8327 else if (old_referenced)
8328 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8329 }
8330
8331 /* This is sort of inverse to pa_encode_section_info. */
8332
8333 static const char *
8334 pa_strip_name_encoding (const char *str)
8335 {
8336 str += (*str == '@');
8337 str += (*str == '*');
8338 return str;
8339 }
8340
8341 /* Returns 1 if OP is a function label involved in a simple addition
8342 with a constant. Used to keep certain patterns from matching
8343 during instruction combination. */
8344 int
8345 pa_is_function_label_plus_const (rtx op)
8346 {
8347 /* Strip off any CONST. */
8348 if (GET_CODE (op) == CONST)
8349 op = XEXP (op, 0);
8350
8351 return (GET_CODE (op) == PLUS
8352 && function_label_operand (XEXP (op, 0), VOIDmode)
8353 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8354 }
8355
8356 /* Output assembly code for a thunk to FUNCTION. */
8357
8358 static void
8359 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8360 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8361 tree function)
8362 {
8363 static unsigned int current_thunk_number;
8364 int val_14 = VAL_14_BITS_P (delta);
8365 unsigned int old_last_address = last_address, nbytes = 0;
8366 char label[17];
8367 rtx xoperands[4];
8368
8369 xoperands[0] = XEXP (DECL_RTL (function), 0);
8370 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8371 xoperands[2] = GEN_INT (delta);
8372
8373 final_start_function (emit_barrier (), file, 1);
8374
8375 /* Output the thunk. We know that the function is in the same
8376 translation unit (i.e., the same space) as the thunk, and that
8377 thunks are output after their method. Thus, we don't need an
8378 external branch to reach the function. With SOM and GAS,
8379 functions and thunks are effectively in different sections.
8380 Thus, we can always use a IA-relative branch and the linker
8381 will add a long branch stub if necessary.
8382
8383 However, we have to be careful when generating PIC code on the
8384 SOM port to ensure that the sequence does not transfer to an
8385 import stub for the target function as this could clobber the
8386 return value saved at SP-24. This would also apply to the
8387 32-bit linux port if the multi-space model is implemented. */
8388 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8389 && !(flag_pic && TREE_PUBLIC (function))
8390 && (TARGET_GAS || last_address < 262132))
8391 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8392 && ((targetm_common.have_named_sections
8393 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8394 /* The GNU 64-bit linker has rather poor stub management.
8395 So, we use a long branch from thunks that aren't in
8396 the same section as the target function. */
8397 && ((!TARGET_64BIT
8398 && (DECL_SECTION_NAME (thunk_fndecl)
8399 != DECL_SECTION_NAME (function)))
8400 || ((DECL_SECTION_NAME (thunk_fndecl)
8401 == DECL_SECTION_NAME (function))
8402 && last_address < 262132)))
8403 /* In this case, we need to be able to reach the start of
8404 the stub table even though the function is likely closer
8405 and can be jumped to directly. */
8406 || (targetm_common.have_named_sections
8407 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8408 && DECL_SECTION_NAME (function) == NULL
8409 && total_code_bytes < MAX_PCREL17F_OFFSET)
8410 /* Likewise. */
8411 || (!targetm_common.have_named_sections
8412 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8413 {
8414 if (!val_14)
8415 output_asm_insn ("addil L'%2,%%r26", xoperands);
8416
8417 output_asm_insn ("b %0", xoperands);
8418
8419 if (val_14)
8420 {
8421 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8422 nbytes += 8;
8423 }
8424 else
8425 {
8426 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8427 nbytes += 12;
8428 }
8429 }
8430 else if (TARGET_64BIT)
8431 {
8432 rtx xop[4];
8433
8434 /* We only have one call-clobbered scratch register, so we can't
8435 make use of the delay slot if delta doesn't fit in 14 bits. */
8436 if (!val_14)
8437 {
8438 output_asm_insn ("addil L'%2,%%r26", xoperands);
8439 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8440 }
8441
8442 /* Load function address into %r1. */
8443 xop[0] = xoperands[0];
8444 xop[1] = gen_rtx_REG (Pmode, 1);
8445 xop[2] = xop[1];
8446 pa_output_pic_pcrel_sequence (xop);
8447
8448 if (val_14)
8449 {
8450 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8451 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8452 nbytes += 20;
8453 }
8454 else
8455 {
8456 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8457 nbytes += 24;
8458 }
8459 }
8460 else if (TARGET_PORTABLE_RUNTIME)
8461 {
8462 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8463 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8464
8465 if (!val_14)
8466 output_asm_insn ("ldil L'%2,%%r26", xoperands);
8467
8468 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8469
8470 if (val_14)
8471 {
8472 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8473 nbytes += 16;
8474 }
8475 else
8476 {
8477 output_asm_insn ("ldo R'%2(%%r26),%%r26", xoperands);
8478 nbytes += 20;
8479 }
8480 }
8481 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8482 {
8483 /* The function is accessible from outside this module. The only
8484 way to avoid an import stub between the thunk and function is to
8485 call the function directly with an indirect sequence similar to
8486 that used by $$dyncall. This is possible because $$dyncall acts
8487 as the import stub in an indirect call. */
8488 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8489 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8490 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8491 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8492 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8493 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8494 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8495 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8496 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8497
8498 if (!val_14)
8499 {
8500 output_asm_insn ("addil L'%2,%%r26", xoperands);
8501 nbytes += 4;
8502 }
8503
8504 if (TARGET_PA_20)
8505 {
8506 output_asm_insn ("bve (%%r22)", xoperands);
8507 nbytes += 36;
8508 }
8509 else if (TARGET_NO_SPACE_REGS)
8510 {
8511 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8512 nbytes += 36;
8513 }
8514 else
8515 {
8516 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8517 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8518 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8519 nbytes += 44;
8520 }
8521
8522 if (val_14)
8523 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8524 else
8525 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8526 }
8527 else if (flag_pic)
8528 {
8529 rtx xop[4];
8530
8531 /* Load function address into %r22. */
8532 xop[0] = xoperands[0];
8533 xop[1] = gen_rtx_REG (Pmode, 1);
8534 xop[2] = gen_rtx_REG (Pmode, 22);
8535 pa_output_pic_pcrel_sequence (xop);
8536
8537 if (!val_14)
8538 output_asm_insn ("addil L'%2,%%r26", xoperands);
8539
8540 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8541
8542 if (val_14)
8543 {
8544 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8545 nbytes += 20;
8546 }
8547 else
8548 {
8549 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8550 nbytes += 24;
8551 }
8552 }
8553 else
8554 {
8555 if (!val_14)
8556 output_asm_insn ("addil L'%2,%%r26", xoperands);
8557
8558 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8559 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8560
8561 if (val_14)
8562 {
8563 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8564 nbytes += 12;
8565 }
8566 else
8567 {
8568 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8569 nbytes += 16;
8570 }
8571 }
8572
8573 final_end_function ();
8574
8575 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8576 {
8577 switch_to_section (data_section);
8578 output_asm_insn (".align 4", xoperands);
8579 ASM_OUTPUT_LABEL (file, label);
8580 output_asm_insn (".word P'%0", xoperands);
8581 }
8582
8583 current_thunk_number++;
8584 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8585 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8586 last_address += nbytes;
8587 if (old_last_address > last_address)
8588 last_address = UINT_MAX;
8589 update_total_code_bytes (nbytes);
8590 }
8591
8592 /* Only direct calls to static functions are allowed to be sibling (tail)
8593 call optimized.
8594
8595 This restriction is necessary because some linker generated stubs will
8596 store return pointers into rp' in some cases which might clobber a
8597 live value already in rp'.
8598
8599 In a sibcall the current function and the target function share stack
8600 space. Thus if the path to the current function and the path to the
8601 target function save a value in rp', they save the value into the
8602 same stack slot, which has undesirable consequences.
8603
8604 Because of the deferred binding nature of shared libraries any function
8605 with external scope could be in a different load module and thus require
8606 rp' to be saved when calling that function. So sibcall optimizations
8607 can only be safe for static function.
8608
8609 Note that GCC never needs return value relocations, so we don't have to
8610 worry about static calls with return value relocations (which require
8611 saving rp').
8612
8613 It is safe to perform a sibcall optimization when the target function
8614 will never return. */
8615 static bool
8616 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8617 {
8618 if (TARGET_PORTABLE_RUNTIME)
8619 return false;
8620
8621 /* Sibcalls are not ok because the arg pointer register is not a fixed
8622 register. This prevents the sibcall optimization from occurring. In
8623 addition, there are problems with stub placement using GNU ld. This
8624 is because a normal sibcall branch uses a 17-bit relocation while
8625 a regular call branch uses a 22-bit relocation. As a result, more
8626 care needs to be taken in the placement of long-branch stubs. */
8627 if (TARGET_64BIT)
8628 return false;
8629
8630 /* Sibcalls are only ok within a translation unit. */
8631 return (decl && !TREE_PUBLIC (decl));
8632 }
8633
8634 /* ??? Addition is not commutative on the PA due to the weird implicit
8635 space register selection rules for memory addresses. Therefore, we
8636 don't consider a + b == b + a, as this might be inside a MEM. */
8637 static bool
8638 pa_commutative_p (const_rtx x, int outer_code)
8639 {
8640 return (COMMUTATIVE_P (x)
8641 && (TARGET_NO_SPACE_REGS
8642 || (outer_code != UNKNOWN && outer_code != MEM)
8643 || GET_CODE (x) != PLUS));
8644 }
8645
8646 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8647 use in fmpyadd instructions. */
8648 int
8649 pa_fmpyaddoperands (rtx *operands)
8650 {
8651 machine_mode mode = GET_MODE (operands[0]);
8652
8653 /* Must be a floating point mode. */
8654 if (mode != SFmode && mode != DFmode)
8655 return 0;
8656
8657 /* All modes must be the same. */
8658 if (! (mode == GET_MODE (operands[1])
8659 && mode == GET_MODE (operands[2])
8660 && mode == GET_MODE (operands[3])
8661 && mode == GET_MODE (operands[4])
8662 && mode == GET_MODE (operands[5])))
8663 return 0;
8664
8665 /* All operands must be registers. */
8666 if (! (GET_CODE (operands[1]) == REG
8667 && GET_CODE (operands[2]) == REG
8668 && GET_CODE (operands[3]) == REG
8669 && GET_CODE (operands[4]) == REG
8670 && GET_CODE (operands[5]) == REG))
8671 return 0;
8672
8673 /* Only 2 real operands to the addition. One of the input operands must
8674 be the same as the output operand. */
8675 if (! rtx_equal_p (operands[3], operands[4])
8676 && ! rtx_equal_p (operands[3], operands[5]))
8677 return 0;
8678
8679 /* Inout operand of add cannot conflict with any operands from multiply. */
8680 if (rtx_equal_p (operands[3], operands[0])
8681 || rtx_equal_p (operands[3], operands[1])
8682 || rtx_equal_p (operands[3], operands[2]))
8683 return 0;
8684
8685 /* multiply cannot feed into addition operands. */
8686 if (rtx_equal_p (operands[4], operands[0])
8687 || rtx_equal_p (operands[5], operands[0]))
8688 return 0;
8689
8690 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8691 if (mode == SFmode
8692 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8693 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8694 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8695 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8696 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8697 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8698 return 0;
8699
8700 /* Passed. Operands are suitable for fmpyadd. */
8701 return 1;
8702 }
8703
8704 #if !defined(USE_COLLECT2)
8705 static void
8706 pa_asm_out_constructor (rtx symbol, int priority)
8707 {
8708 if (!function_label_operand (symbol, VOIDmode))
8709 pa_encode_label (symbol);
8710
8711 #ifdef CTORS_SECTION_ASM_OP
8712 default_ctor_section_asm_out_constructor (symbol, priority);
8713 #else
8714 # ifdef TARGET_ASM_NAMED_SECTION
8715 default_named_section_asm_out_constructor (symbol, priority);
8716 # else
8717 default_stabs_asm_out_constructor (symbol, priority);
8718 # endif
8719 #endif
8720 }
8721
8722 static void
8723 pa_asm_out_destructor (rtx symbol, int priority)
8724 {
8725 if (!function_label_operand (symbol, VOIDmode))
8726 pa_encode_label (symbol);
8727
8728 #ifdef DTORS_SECTION_ASM_OP
8729 default_dtor_section_asm_out_destructor (symbol, priority);
8730 #else
8731 # ifdef TARGET_ASM_NAMED_SECTION
8732 default_named_section_asm_out_destructor (symbol, priority);
8733 # else
8734 default_stabs_asm_out_destructor (symbol, priority);
8735 # endif
8736 #endif
8737 }
8738 #endif
8739
8740 /* This function places uninitialized global data in the bss section.
8741 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8742 function on the SOM port to prevent uninitialized global data from
8743 being placed in the data section. */
8744
8745 void
8746 pa_asm_output_aligned_bss (FILE *stream,
8747 const char *name,
8748 unsigned HOST_WIDE_INT size,
8749 unsigned int align)
8750 {
8751 switch_to_section (bss_section);
8752 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8753
8754 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8755 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8756 #endif
8757
8758 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8759 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8760 #endif
8761
8762 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8763 ASM_OUTPUT_LABEL (stream, name);
8764 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8765 }
8766
8767 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8768 that doesn't allow the alignment of global common storage to be directly
8769 specified. The SOM linker aligns common storage based on the rounded
8770 value of the NUM_BYTES parameter in the .comm directive. It's not
8771 possible to use the .align directive as it doesn't affect the alignment
8772 of the label associated with a .comm directive. */
8773
8774 void
8775 pa_asm_output_aligned_common (FILE *stream,
8776 const char *name,
8777 unsigned HOST_WIDE_INT size,
8778 unsigned int align)
8779 {
8780 unsigned int max_common_align;
8781
8782 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8783 if (align > max_common_align)
8784 {
8785 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8786 "for global common data. Using %u",
8787 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8788 align = max_common_align;
8789 }
8790
8791 switch_to_section (bss_section);
8792
8793 assemble_name (stream, name);
8794 fprintf (stream, "\t.comm " HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8795 MAX (size, align / BITS_PER_UNIT));
8796 }
8797
8798 /* We can't use .comm for local common storage as the SOM linker effectively
8799 treats the symbol as universal and uses the same storage for local symbols
8800 with the same name in different object files. The .block directive
8801 reserves an uninitialized block of storage. However, it's not common
8802 storage. Fortunately, GCC never requests common storage with the same
8803 name in any given translation unit. */
8804
8805 void
8806 pa_asm_output_aligned_local (FILE *stream,
8807 const char *name,
8808 unsigned HOST_WIDE_INT size,
8809 unsigned int align)
8810 {
8811 switch_to_section (bss_section);
8812 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8813
8814 #ifdef LOCAL_ASM_OP
8815 fprintf (stream, "%s", LOCAL_ASM_OP);
8816 assemble_name (stream, name);
8817 fprintf (stream, "\n");
8818 #endif
8819
8820 ASM_OUTPUT_LABEL (stream, name);
8821 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8822 }
8823
8824 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8825 use in fmpysub instructions. */
8826 int
8827 pa_fmpysuboperands (rtx *operands)
8828 {
8829 machine_mode mode = GET_MODE (operands[0]);
8830
8831 /* Must be a floating point mode. */
8832 if (mode != SFmode && mode != DFmode)
8833 return 0;
8834
8835 /* All modes must be the same. */
8836 if (! (mode == GET_MODE (operands[1])
8837 && mode == GET_MODE (operands[2])
8838 && mode == GET_MODE (operands[3])
8839 && mode == GET_MODE (operands[4])
8840 && mode == GET_MODE (operands[5])))
8841 return 0;
8842
8843 /* All operands must be registers. */
8844 if (! (GET_CODE (operands[1]) == REG
8845 && GET_CODE (operands[2]) == REG
8846 && GET_CODE (operands[3]) == REG
8847 && GET_CODE (operands[4]) == REG
8848 && GET_CODE (operands[5]) == REG))
8849 return 0;
8850
8851 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8852 operation, so operands[4] must be the same as operand[3]. */
8853 if (! rtx_equal_p (operands[3], operands[4]))
8854 return 0;
8855
8856 /* multiply cannot feed into subtraction. */
8857 if (rtx_equal_p (operands[5], operands[0]))
8858 return 0;
8859
8860 /* Inout operand of sub cannot conflict with any operands from multiply. */
8861 if (rtx_equal_p (operands[3], operands[0])
8862 || rtx_equal_p (operands[3], operands[1])
8863 || rtx_equal_p (operands[3], operands[2]))
8864 return 0;
8865
8866 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8867 if (mode == SFmode
8868 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8869 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8870 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8871 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8872 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8873 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8874 return 0;
8875
8876 /* Passed. Operands are suitable for fmpysub. */
8877 return 1;
8878 }
8879
8880 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8881 constants for a MULT embedded inside a memory address. */
8882 int
8883 pa_mem_shadd_constant_p (int val)
8884 {
8885 if (val == 2 || val == 4 || val == 8)
8886 return 1;
8887 else
8888 return 0;
8889 }
8890
8891 /* Return 1 if the given constant is 1, 2, or 3. These are the valid
8892 constants for shadd instructions. */
8893 int
8894 pa_shadd_constant_p (int val)
8895 {
8896 if (val == 1 || val == 2 || val == 3)
8897 return 1;
8898 else
8899 return 0;
8900 }
8901
8902 /* Return TRUE if INSN branches forward. */
8903
8904 static bool
8905 forward_branch_p (rtx_insn *insn)
8906 {
8907 rtx lab = JUMP_LABEL (insn);
8908
8909 /* The INSN must have a jump label. */
8910 gcc_assert (lab != NULL_RTX);
8911
8912 if (INSN_ADDRESSES_SET_P ())
8913 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8914
8915 while (insn)
8916 {
8917 if (insn == lab)
8918 return true;
8919 else
8920 insn = NEXT_INSN (insn);
8921 }
8922
8923 return false;
8924 }
8925
8926 /* Output an unconditional move and branch insn. */
8927
8928 const char *
8929 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8930 {
8931 int length = get_attr_length (insn);
8932
8933 /* These are the cases in which we win. */
8934 if (length == 4)
8935 return "mov%I1b,tr %1,%0,%2";
8936
8937 /* None of the following cases win, but they don't lose either. */
8938 if (length == 8)
8939 {
8940 if (dbr_sequence_length () == 0)
8941 {
8942 /* Nothing in the delay slot, fake it by putting the combined
8943 insn (the copy or add) in the delay slot of a bl. */
8944 if (GET_CODE (operands[1]) == CONST_INT)
8945 return "b %2\n\tldi %1,%0";
8946 else
8947 return "b %2\n\tcopy %1,%0";
8948 }
8949 else
8950 {
8951 /* Something in the delay slot, but we've got a long branch. */
8952 if (GET_CODE (operands[1]) == CONST_INT)
8953 return "ldi %1,%0\n\tb %2";
8954 else
8955 return "copy %1,%0\n\tb %2";
8956 }
8957 }
8958
8959 if (GET_CODE (operands[1]) == CONST_INT)
8960 output_asm_insn ("ldi %1,%0", operands);
8961 else
8962 output_asm_insn ("copy %1,%0", operands);
8963 return pa_output_lbranch (operands[2], insn, 1);
8964 }
8965
8966 /* Output an unconditional add and branch insn. */
8967
8968 const char *
8969 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
8970 {
8971 int length = get_attr_length (insn);
8972
8973 /* To make life easy we want operand0 to be the shared input/output
8974 operand and operand1 to be the readonly operand. */
8975 if (operands[0] == operands[1])
8976 operands[1] = operands[2];
8977
8978 /* These are the cases in which we win. */
8979 if (length == 4)
8980 return "add%I1b,tr %1,%0,%3";
8981
8982 /* None of the following cases win, but they don't lose either. */
8983 if (length == 8)
8984 {
8985 if (dbr_sequence_length () == 0)
8986 /* Nothing in the delay slot, fake it by putting the combined
8987 insn (the copy or add) in the delay slot of a bl. */
8988 return "b %3\n\tadd%I1 %1,%0,%0";
8989 else
8990 /* Something in the delay slot, but we've got a long branch. */
8991 return "add%I1 %1,%0,%0\n\tb %3";
8992 }
8993
8994 output_asm_insn ("add%I1 %1,%0,%0", operands);
8995 return pa_output_lbranch (operands[3], insn, 1);
8996 }
8997
8998 /* We use this hook to perform a PA specific optimization which is difficult
8999 to do in earlier passes. */
9000
9001 static void
9002 pa_reorg (void)
9003 {
9004 remove_useless_addtr_insns (1);
9005
9006 if (pa_cpu < PROCESSOR_8000)
9007 pa_combine_instructions ();
9008 }
9009
9010 /* The PA has a number of odd instructions which can perform multiple
9011 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
9012 it may be profitable to combine two instructions into one instruction
9013 with two outputs. It's not profitable PA2.0 machines because the
9014 two outputs would take two slots in the reorder buffers.
9015
9016 This routine finds instructions which can be combined and combines
9017 them. We only support some of the potential combinations, and we
9018 only try common ways to find suitable instructions.
9019
9020 * addb can add two registers or a register and a small integer
9021 and jump to a nearby (+-8k) location. Normally the jump to the
9022 nearby location is conditional on the result of the add, but by
9023 using the "true" condition we can make the jump unconditional.
9024 Thus addb can perform two independent operations in one insn.
9025
9026 * movb is similar to addb in that it can perform a reg->reg
9027 or small immediate->reg copy and jump to a nearby (+-8k location).
9028
9029 * fmpyadd and fmpysub can perform a FP multiply and either an
9030 FP add or FP sub if the operands of the multiply and add/sub are
9031 independent (there are other minor restrictions). Note both
9032 the fmpy and fadd/fsub can in theory move to better spots according
9033 to data dependencies, but for now we require the fmpy stay at a
9034 fixed location.
9035
9036 * Many of the memory operations can perform pre & post updates
9037 of index registers. GCC's pre/post increment/decrement addressing
9038 is far too simple to take advantage of all the possibilities. This
9039 pass may not be suitable since those insns may not be independent.
9040
9041 * comclr can compare two ints or an int and a register, nullify
9042 the following instruction and zero some other register. This
9043 is more difficult to use as it's harder to find an insn which
9044 will generate a comclr than finding something like an unconditional
9045 branch. (conditional moves & long branches create comclr insns).
9046
9047 * Most arithmetic operations can conditionally skip the next
9048 instruction. They can be viewed as "perform this operation
9049 and conditionally jump to this nearby location" (where nearby
9050 is an insns away). These are difficult to use due to the
9051 branch length restrictions. */
9052
9053 static void
9054 pa_combine_instructions (void)
9055 {
9056 rtx_insn *anchor;
9057
9058 /* This can get expensive since the basic algorithm is on the
9059 order of O(n^2) (or worse). Only do it for -O2 or higher
9060 levels of optimization. */
9061 if (optimize < 2)
9062 return;
9063
9064 /* Walk down the list of insns looking for "anchor" insns which
9065 may be combined with "floating" insns. As the name implies,
9066 "anchor" instructions don't move, while "floating" insns may
9067 move around. */
9068 rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9069 rtx_insn *new_rtx = make_insn_raw (par);
9070
9071 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9072 {
9073 enum attr_pa_combine_type anchor_attr;
9074 enum attr_pa_combine_type floater_attr;
9075
9076 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9077 Also ignore any special USE insns. */
9078 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
9079 || GET_CODE (PATTERN (anchor)) == USE
9080 || GET_CODE (PATTERN (anchor)) == CLOBBER)
9081 continue;
9082
9083 anchor_attr = get_attr_pa_combine_type (anchor);
9084 /* See if anchor is an insn suitable for combination. */
9085 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9086 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9087 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9088 && ! forward_branch_p (anchor)))
9089 {
9090 rtx_insn *floater;
9091
9092 for (floater = PREV_INSN (anchor);
9093 floater;
9094 floater = PREV_INSN (floater))
9095 {
9096 if (NOTE_P (floater)
9097 || (NONJUMP_INSN_P (floater)
9098 && (GET_CODE (PATTERN (floater)) == USE
9099 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9100 continue;
9101
9102 /* Anything except a regular INSN will stop our search. */
9103 if (! NONJUMP_INSN_P (floater))
9104 {
9105 floater = NULL;
9106 break;
9107 }
9108
9109 /* See if FLOATER is suitable for combination with the
9110 anchor. */
9111 floater_attr = get_attr_pa_combine_type (floater);
9112 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9113 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9114 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9115 && floater_attr == PA_COMBINE_TYPE_FMPY))
9116 {
9117 /* If ANCHOR and FLOATER can be combined, then we're
9118 done with this pass. */
9119 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9120 SET_DEST (PATTERN (floater)),
9121 XEXP (SET_SRC (PATTERN (floater)), 0),
9122 XEXP (SET_SRC (PATTERN (floater)), 1)))
9123 break;
9124 }
9125
9126 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9127 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9128 {
9129 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9130 {
9131 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9132 SET_DEST (PATTERN (floater)),
9133 XEXP (SET_SRC (PATTERN (floater)), 0),
9134 XEXP (SET_SRC (PATTERN (floater)), 1)))
9135 break;
9136 }
9137 else
9138 {
9139 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9140 SET_DEST (PATTERN (floater)),
9141 SET_SRC (PATTERN (floater)),
9142 SET_SRC (PATTERN (floater))))
9143 break;
9144 }
9145 }
9146 }
9147
9148 /* If we didn't find anything on the backwards scan try forwards. */
9149 if (!floater
9150 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9151 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9152 {
9153 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9154 {
9155 if (NOTE_P (floater)
9156 || (NONJUMP_INSN_P (floater)
9157 && (GET_CODE (PATTERN (floater)) == USE
9158 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9159
9160 continue;
9161
9162 /* Anything except a regular INSN will stop our search. */
9163 if (! NONJUMP_INSN_P (floater))
9164 {
9165 floater = NULL;
9166 break;
9167 }
9168
9169 /* See if FLOATER is suitable for combination with the
9170 anchor. */
9171 floater_attr = get_attr_pa_combine_type (floater);
9172 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9173 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9174 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9175 && floater_attr == PA_COMBINE_TYPE_FMPY))
9176 {
9177 /* If ANCHOR and FLOATER can be combined, then we're
9178 done with this pass. */
9179 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9180 SET_DEST (PATTERN (floater)),
9181 XEXP (SET_SRC (PATTERN (floater)),
9182 0),
9183 XEXP (SET_SRC (PATTERN (floater)),
9184 1)))
9185 break;
9186 }
9187 }
9188 }
9189
9190 /* FLOATER will be nonzero if we found a suitable floating
9191 insn for combination with ANCHOR. */
9192 if (floater
9193 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9194 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9195 {
9196 /* Emit the new instruction and delete the old anchor. */
9197 rtvec vtemp = gen_rtvec (2, copy_rtx (PATTERN (anchor)),
9198 copy_rtx (PATTERN (floater)));
9199 rtx temp = gen_rtx_PARALLEL (VOIDmode, vtemp);
9200 emit_insn_before (temp, anchor);
9201
9202 SET_INSN_DELETED (anchor);
9203
9204 /* Emit a special USE insn for FLOATER, then delete
9205 the floating insn. */
9206 temp = copy_rtx (PATTERN (floater));
9207 emit_insn_before (gen_rtx_USE (VOIDmode, temp), floater);
9208 delete_insn (floater);
9209
9210 continue;
9211 }
9212 else if (floater
9213 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9214 {
9215 /* Emit the new_jump instruction and delete the old anchor. */
9216 rtvec vtemp = gen_rtvec (2, copy_rtx (PATTERN (anchor)),
9217 copy_rtx (PATTERN (floater)));
9218 rtx temp = gen_rtx_PARALLEL (VOIDmode, vtemp);
9219 temp = emit_jump_insn_before (temp, anchor);
9220
9221 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9222 SET_INSN_DELETED (anchor);
9223
9224 /* Emit a special USE insn for FLOATER, then delete
9225 the floating insn. */
9226 temp = copy_rtx (PATTERN (floater));
9227 emit_insn_before (gen_rtx_USE (VOIDmode, temp), floater);
9228 delete_insn (floater);
9229 continue;
9230 }
9231 }
9232 }
9233 }
9234
9235 static int
9236 pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
9237 int reversed, rtx dest,
9238 rtx src1, rtx src2)
9239 {
9240 int insn_code_number;
9241 rtx_insn *start, *end;
9242
9243 /* Create a PARALLEL with the patterns of ANCHOR and
9244 FLOATER, try to recognize it, then test constraints
9245 for the resulting pattern.
9246
9247 If the pattern doesn't match or the constraints
9248 aren't met keep searching for a suitable floater
9249 insn. */
9250 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9251 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9252 INSN_CODE (new_rtx) = -1;
9253 insn_code_number = recog_memoized (new_rtx);
9254 basic_block bb = BLOCK_FOR_INSN (anchor);
9255 if (insn_code_number < 0
9256 || (extract_insn (new_rtx),
9257 !constrain_operands (1, get_preferred_alternatives (new_rtx, bb))))
9258 return 0;
9259
9260 if (reversed)
9261 {
9262 start = anchor;
9263 end = floater;
9264 }
9265 else
9266 {
9267 start = floater;
9268 end = anchor;
9269 }
9270
9271 /* There's up to three operands to consider. One
9272 output and two inputs.
9273
9274 The output must not be used between FLOATER & ANCHOR
9275 exclusive. The inputs must not be set between
9276 FLOATER and ANCHOR exclusive. */
9277
9278 if (reg_used_between_p (dest, start, end))
9279 return 0;
9280
9281 if (reg_set_between_p (src1, start, end))
9282 return 0;
9283
9284 if (reg_set_between_p (src2, start, end))
9285 return 0;
9286
9287 /* If we get here, then everything is good. */
9288 return 1;
9289 }
9290
9291 /* Return nonzero if references for INSN are delayed.
9292
9293 Millicode insns are actually function calls with some special
9294 constraints on arguments and register usage.
9295
9296 Millicode calls always expect their arguments in the integer argument
9297 registers, and always return their result in %r29 (ret1). They
9298 are expected to clobber their arguments, %r1, %r29, and the return
9299 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9300
9301 This function tells reorg that the references to arguments and
9302 millicode calls do not appear to happen until after the millicode call.
9303 This allows reorg to put insns which set the argument registers into the
9304 delay slot of the millicode call -- thus they act more like traditional
9305 CALL_INSNs.
9306
9307 Note we cannot consider side effects of the insn to be delayed because
9308 the branch and link insn will clobber the return pointer. If we happened
9309 to use the return pointer in the delay slot of the call, then we lose.
9310
9311 get_attr_type will try to recognize the given insn, so make sure to
9312 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9313 in particular. */
9314 int
9315 pa_insn_refs_are_delayed (rtx_insn *insn)
9316 {
9317 return ((NONJUMP_INSN_P (insn)
9318 && GET_CODE (PATTERN (insn)) != SEQUENCE
9319 && GET_CODE (PATTERN (insn)) != USE
9320 && GET_CODE (PATTERN (insn)) != CLOBBER
9321 && get_attr_type (insn) == TYPE_MILLI));
9322 }
9323
9324 /* Promote the return value, but not the arguments. */
9325
9326 static machine_mode
9327 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9328 machine_mode mode,
9329 int *punsignedp ATTRIBUTE_UNUSED,
9330 const_tree fntype ATTRIBUTE_UNUSED,
9331 int for_return)
9332 {
9333 if (for_return == 0)
9334 return mode;
9335 return promote_mode (type, mode, punsignedp);
9336 }
9337
9338 /* On the HP-PA the value is found in register(s) 28(-29), unless
9339 the mode is SF or DF. Then the value is returned in fr4 (32).
9340
9341 This must perform the same promotions as PROMOTE_MODE, else promoting
9342 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9343
9344 Small structures must be returned in a PARALLEL on PA64 in order
9345 to match the HP Compiler ABI. */
9346
9347 static rtx
9348 pa_function_value (const_tree valtype,
9349 const_tree func ATTRIBUTE_UNUSED,
9350 bool outgoing ATTRIBUTE_UNUSED)
9351 {
9352 machine_mode valmode;
9353
9354 if (AGGREGATE_TYPE_P (valtype)
9355 || TREE_CODE (valtype) == COMPLEX_TYPE
9356 || TREE_CODE (valtype) == VECTOR_TYPE)
9357 {
9358 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9359
9360 /* Handle aggregates that fit exactly in a word or double word. */
9361 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9362 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9363
9364 if (TARGET_64BIT)
9365 {
9366 /* Aggregates with a size less than or equal to 128 bits are
9367 returned in GR 28(-29). They are left justified. The pad
9368 bits are undefined. Larger aggregates are returned in
9369 memory. */
9370 rtx loc[2];
9371 int i, offset = 0;
9372 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9373
9374 for (i = 0; i < ub; i++)
9375 {
9376 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9377 gen_rtx_REG (DImode, 28 + i),
9378 GEN_INT (offset));
9379 offset += 8;
9380 }
9381
9382 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9383 }
9384 else if (valsize > UNITS_PER_WORD)
9385 {
9386 /* Aggregates 5 to 8 bytes in size are returned in general
9387 registers r28-r29 in the same manner as other non
9388 floating-point objects. The data is right-justified and
9389 zero-extended to 64 bits. This is opposite to the normal
9390 justification used on big endian targets and requires
9391 special treatment. */
9392 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9393 gen_rtx_REG (DImode, 28), const0_rtx);
9394 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9395 }
9396 }
9397
9398 if ((INTEGRAL_TYPE_P (valtype)
9399 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9400 || POINTER_TYPE_P (valtype))
9401 valmode = word_mode;
9402 else
9403 valmode = TYPE_MODE (valtype);
9404
9405 if (TREE_CODE (valtype) == REAL_TYPE
9406 && !AGGREGATE_TYPE_P (valtype)
9407 && TYPE_MODE (valtype) != TFmode
9408 && !TARGET_SOFT_FLOAT)
9409 return gen_rtx_REG (valmode, 32);
9410
9411 return gen_rtx_REG (valmode, 28);
9412 }
9413
9414 /* Implement the TARGET_LIBCALL_VALUE hook. */
9415
9416 static rtx
9417 pa_libcall_value (machine_mode mode,
9418 const_rtx fun ATTRIBUTE_UNUSED)
9419 {
9420 if (! TARGET_SOFT_FLOAT
9421 && (mode == SFmode || mode == DFmode))
9422 return gen_rtx_REG (mode, 32);
9423 else
9424 return gen_rtx_REG (mode, 28);
9425 }
9426
9427 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9428
9429 static bool
9430 pa_function_value_regno_p (const unsigned int regno)
9431 {
9432 if (regno == 28
9433 || (! TARGET_SOFT_FLOAT && regno == 32))
9434 return true;
9435
9436 return false;
9437 }
9438
9439 /* Update the data in CUM to advance over an argument
9440 of mode MODE and data type TYPE.
9441 (TYPE is null for libcalls where that information may not be available.) */
9442
9443 static void
9444 pa_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
9445 const_tree type, bool named ATTRIBUTE_UNUSED)
9446 {
9447 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9448 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9449
9450 cum->nargs_prototype--;
9451 cum->words += (arg_size
9452 + ((cum->words & 01)
9453 && type != NULL_TREE
9454 && arg_size > 1));
9455 }
9456
9457 /* Return the location of a parameter that is passed in a register or NULL
9458 if the parameter has any component that is passed in memory.
9459
9460 This is new code and will be pushed to into the net sources after
9461 further testing.
9462
9463 ??? We might want to restructure this so that it looks more like other
9464 ports. */
9465 static rtx
9466 pa_function_arg (cumulative_args_t cum_v, machine_mode mode,
9467 const_tree type, bool named ATTRIBUTE_UNUSED)
9468 {
9469 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9470 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9471 int alignment = 0;
9472 int arg_size;
9473 int fpr_reg_base;
9474 int gpr_reg_base;
9475 rtx retval;
9476
9477 if (mode == VOIDmode)
9478 return NULL_RTX;
9479
9480 arg_size = FUNCTION_ARG_SIZE (mode, type);
9481
9482 /* If this arg would be passed partially or totally on the stack, then
9483 this routine should return zero. pa_arg_partial_bytes will
9484 handle arguments which are split between regs and stack slots if
9485 the ABI mandates split arguments. */
9486 if (!TARGET_64BIT)
9487 {
9488 /* The 32-bit ABI does not split arguments. */
9489 if (cum->words + arg_size > max_arg_words)
9490 return NULL_RTX;
9491 }
9492 else
9493 {
9494 if (arg_size > 1)
9495 alignment = cum->words & 1;
9496 if (cum->words + alignment >= max_arg_words)
9497 return NULL_RTX;
9498 }
9499
9500 /* The 32bit ABIs and the 64bit ABIs are rather different,
9501 particularly in their handling of FP registers. We might
9502 be able to cleverly share code between them, but I'm not
9503 going to bother in the hope that splitting them up results
9504 in code that is more easily understood. */
9505
9506 if (TARGET_64BIT)
9507 {
9508 /* Advance the base registers to their current locations.
9509
9510 Remember, gprs grow towards smaller register numbers while
9511 fprs grow to higher register numbers. Also remember that
9512 although FP regs are 32-bit addressable, we pretend that
9513 the registers are 64-bits wide. */
9514 gpr_reg_base = 26 - cum->words;
9515 fpr_reg_base = 32 + cum->words;
9516
9517 /* Arguments wider than one word and small aggregates need special
9518 treatment. */
9519 if (arg_size > 1
9520 || mode == BLKmode
9521 || (type && (AGGREGATE_TYPE_P (type)
9522 || TREE_CODE (type) == COMPLEX_TYPE
9523 || TREE_CODE (type) == VECTOR_TYPE)))
9524 {
9525 /* Double-extended precision (80-bit), quad-precision (128-bit)
9526 and aggregates including complex numbers are aligned on
9527 128-bit boundaries. The first eight 64-bit argument slots
9528 are associated one-to-one, with general registers r26
9529 through r19, and also with floating-point registers fr4
9530 through fr11. Arguments larger than one word are always
9531 passed in general registers.
9532
9533 Using a PARALLEL with a word mode register results in left
9534 justified data on a big-endian target. */
9535
9536 rtx loc[8];
9537 int i, offset = 0, ub = arg_size;
9538
9539 /* Align the base register. */
9540 gpr_reg_base -= alignment;
9541
9542 ub = MIN (ub, max_arg_words - cum->words - alignment);
9543 for (i = 0; i < ub; i++)
9544 {
9545 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9546 gen_rtx_REG (DImode, gpr_reg_base),
9547 GEN_INT (offset));
9548 gpr_reg_base -= 1;
9549 offset += 8;
9550 }
9551
9552 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9553 }
9554 }
9555 else
9556 {
9557 /* If the argument is larger than a word, then we know precisely
9558 which registers we must use. */
9559 if (arg_size > 1)
9560 {
9561 if (cum->words)
9562 {
9563 gpr_reg_base = 23;
9564 fpr_reg_base = 38;
9565 }
9566 else
9567 {
9568 gpr_reg_base = 25;
9569 fpr_reg_base = 34;
9570 }
9571
9572 /* Structures 5 to 8 bytes in size are passed in the general
9573 registers in the same manner as other non floating-point
9574 objects. The data is right-justified and zero-extended
9575 to 64 bits. This is opposite to the normal justification
9576 used on big endian targets and requires special treatment.
9577 We now define BLOCK_REG_PADDING to pad these objects.
9578 Aggregates, complex and vector types are passed in the same
9579 manner as structures. */
9580 if (mode == BLKmode
9581 || (type && (AGGREGATE_TYPE_P (type)
9582 || TREE_CODE (type) == COMPLEX_TYPE
9583 || TREE_CODE (type) == VECTOR_TYPE)))
9584 {
9585 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9586 gen_rtx_REG (DImode, gpr_reg_base),
9587 const0_rtx);
9588 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9589 }
9590 }
9591 else
9592 {
9593 /* We have a single word (32 bits). A simple computation
9594 will get us the register #s we need. */
9595 gpr_reg_base = 26 - cum->words;
9596 fpr_reg_base = 32 + 2 * cum->words;
9597 }
9598 }
9599
9600 /* Determine if the argument needs to be passed in both general and
9601 floating point registers. */
9602 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9603 /* If we are doing soft-float with portable runtime, then there
9604 is no need to worry about FP regs. */
9605 && !TARGET_SOFT_FLOAT
9606 /* The parameter must be some kind of scalar float, else we just
9607 pass it in integer registers. */
9608 && GET_MODE_CLASS (mode) == MODE_FLOAT
9609 /* The target function must not have a prototype. */
9610 && cum->nargs_prototype <= 0
9611 /* libcalls do not need to pass items in both FP and general
9612 registers. */
9613 && type != NULL_TREE
9614 /* All this hair applies to "outgoing" args only. This includes
9615 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9616 && !cum->incoming)
9617 /* Also pass outgoing floating arguments in both registers in indirect
9618 calls with the 32 bit ABI and the HP assembler since there is no
9619 way to the specify argument locations in static functions. */
9620 || (!TARGET_64BIT
9621 && !TARGET_GAS
9622 && !cum->incoming
9623 && cum->indirect
9624 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9625 {
9626 retval
9627 = gen_rtx_PARALLEL
9628 (mode,
9629 gen_rtvec (2,
9630 gen_rtx_EXPR_LIST (VOIDmode,
9631 gen_rtx_REG (mode, fpr_reg_base),
9632 const0_rtx),
9633 gen_rtx_EXPR_LIST (VOIDmode,
9634 gen_rtx_REG (mode, gpr_reg_base),
9635 const0_rtx)));
9636 }
9637 else
9638 {
9639 /* See if we should pass this parameter in a general register. */
9640 if (TARGET_SOFT_FLOAT
9641 /* Indirect calls in the normal 32bit ABI require all arguments
9642 to be passed in general registers. */
9643 || (!TARGET_PORTABLE_RUNTIME
9644 && !TARGET_64BIT
9645 && !TARGET_ELF32
9646 && cum->indirect)
9647 /* If the parameter is not a scalar floating-point parameter,
9648 then it belongs in GPRs. */
9649 || GET_MODE_CLASS (mode) != MODE_FLOAT
9650 /* Structure with single SFmode field belongs in GPR. */
9651 || (type && AGGREGATE_TYPE_P (type)))
9652 retval = gen_rtx_REG (mode, gpr_reg_base);
9653 else
9654 retval = gen_rtx_REG (mode, fpr_reg_base);
9655 }
9656 return retval;
9657 }
9658
9659 /* Arguments larger than one word are double word aligned. */
9660
9661 static unsigned int
9662 pa_function_arg_boundary (machine_mode mode, const_tree type)
9663 {
9664 bool singleword = (type
9665 ? (integer_zerop (TYPE_SIZE (type))
9666 || !TREE_CONSTANT (TYPE_SIZE (type))
9667 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9668 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9669
9670 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9671 }
9672
9673 /* If this arg would be passed totally in registers or totally on the stack,
9674 then this routine should return zero. */
9675
9676 static int
9677 pa_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
9678 tree type, bool named ATTRIBUTE_UNUSED)
9679 {
9680 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9681 unsigned int max_arg_words = 8;
9682 unsigned int offset = 0;
9683
9684 if (!TARGET_64BIT)
9685 return 0;
9686
9687 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9688 offset = 1;
9689
9690 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9691 /* Arg fits fully into registers. */
9692 return 0;
9693 else if (cum->words + offset >= max_arg_words)
9694 /* Arg fully on the stack. */
9695 return 0;
9696 else
9697 /* Arg is split. */
9698 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9699 }
9700
9701
9702 /* A get_unnamed_section callback for switching to the text section.
9703
9704 This function is only used with SOM. Because we don't support
9705 named subspaces, we can only create a new subspace or switch back
9706 to the default text subspace. */
9707
9708 static void
9709 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9710 {
9711 gcc_assert (TARGET_SOM);
9712 if (TARGET_GAS)
9713 {
9714 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9715 {
9716 /* We only want to emit a .nsubspa directive once at the
9717 start of the function. */
9718 cfun->machine->in_nsubspa = 1;
9719
9720 /* Create a new subspace for the text. This provides
9721 better stub placement and one-only functions. */
9722 if (cfun->decl
9723 && DECL_ONE_ONLY (cfun->decl)
9724 && !DECL_WEAK (cfun->decl))
9725 {
9726 output_section_asm_op ("\t.SPACE $TEXT$\n"
9727 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9728 "ACCESS=44,SORT=24,COMDAT");
9729 return;
9730 }
9731 }
9732 else
9733 {
9734 /* There isn't a current function or the body of the current
9735 function has been completed. So, we are changing to the
9736 text section to output debugging information. Thus, we
9737 need to forget that we are in the text section so that
9738 varasm.c will call us when text_section is selected again. */
9739 gcc_assert (!cfun || !cfun->machine
9740 || cfun->machine->in_nsubspa == 2);
9741 in_section = NULL;
9742 }
9743 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9744 return;
9745 }
9746 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9747 }
9748
9749 /* A get_unnamed_section callback for switching to comdat data
9750 sections. This function is only used with SOM. */
9751
9752 static void
9753 som_output_comdat_data_section_asm_op (const void *data)
9754 {
9755 in_section = NULL;
9756 output_section_asm_op (data);
9757 }
9758
9759 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9760
9761 static void
9762 pa_som_asm_init_sections (void)
9763 {
9764 text_section
9765 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9766
9767 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9768 is not being generated. */
9769 som_readonly_data_section
9770 = get_unnamed_section (0, output_section_asm_op,
9771 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9772
9773 /* When secondary definitions are not supported, SOM makes readonly
9774 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9775 the comdat flag. */
9776 som_one_only_readonly_data_section
9777 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9778 "\t.SPACE $TEXT$\n"
9779 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9780 "ACCESS=0x2c,SORT=16,COMDAT");
9781
9782
9783 /* When secondary definitions are not supported, SOM makes data one-only
9784 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9785 som_one_only_data_section
9786 = get_unnamed_section (SECTION_WRITE,
9787 som_output_comdat_data_section_asm_op,
9788 "\t.SPACE $PRIVATE$\n"
9789 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9790 "ACCESS=31,SORT=24,COMDAT");
9791
9792 if (flag_tm)
9793 som_tm_clone_table_section
9794 = get_unnamed_section (0, output_section_asm_op,
9795 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9796
9797 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9798 which reference data within the $TEXT$ space (for example constant
9799 strings in the $LIT$ subspace).
9800
9801 The assemblers (GAS and HP as) both have problems with handling
9802 the difference of two symbols which is the other correct way to
9803 reference constant data during PIC code generation.
9804
9805 So, there's no way to reference constant data which is in the
9806 $TEXT$ space during PIC generation. Instead place all constant
9807 data into the $PRIVATE$ subspace (this reduces sharing, but it
9808 works correctly). */
9809 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9810
9811 /* We must not have a reference to an external symbol defined in a
9812 shared library in a readonly section, else the SOM linker will
9813 complain.
9814
9815 So, we force exception information into the data section. */
9816 exception_section = data_section;
9817 }
9818
9819 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9820
9821 static section *
9822 pa_som_tm_clone_table_section (void)
9823 {
9824 return som_tm_clone_table_section;
9825 }
9826
9827 /* On hpux10, the linker will give an error if we have a reference
9828 in the read-only data section to a symbol defined in a shared
9829 library. Therefore, expressions that might require a reloc can
9830 not be placed in the read-only data section. */
9831
9832 static section *
9833 pa_select_section (tree exp, int reloc,
9834 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9835 {
9836 if (TREE_CODE (exp) == VAR_DECL
9837 && TREE_READONLY (exp)
9838 && !TREE_THIS_VOLATILE (exp)
9839 && DECL_INITIAL (exp)
9840 && (DECL_INITIAL (exp) == error_mark_node
9841 || TREE_CONSTANT (DECL_INITIAL (exp)))
9842 && !reloc)
9843 {
9844 if (TARGET_SOM
9845 && DECL_ONE_ONLY (exp)
9846 && !DECL_WEAK (exp))
9847 return som_one_only_readonly_data_section;
9848 else
9849 return readonly_data_section;
9850 }
9851 else if (CONSTANT_CLASS_P (exp) && !reloc)
9852 return readonly_data_section;
9853 else if (TARGET_SOM
9854 && TREE_CODE (exp) == VAR_DECL
9855 && DECL_ONE_ONLY (exp)
9856 && !DECL_WEAK (exp))
9857 return som_one_only_data_section;
9858 else
9859 return data_section;
9860 }
9861
9862 /* Implement pa_reloc_rw_mask. */
9863
9864 static int
9865 pa_reloc_rw_mask (void)
9866 {
9867 /* We force (const (plus (symbol) (const_int))) to memory when the
9868 const_int doesn't fit in a 14-bit integer. The SOM linker can't
9869 handle this construct in read-only memory and we want to avoid
9870 this for ELF. So, we always force an RTX needing relocation to
9871 the data section. */
9872 return 3;
9873 }
9874
9875 static void
9876 pa_globalize_label (FILE *stream, const char *name)
9877 {
9878 /* We only handle DATA objects here, functions are globalized in
9879 ASM_DECLARE_FUNCTION_NAME. */
9880 if (! FUNCTION_NAME_P (name))
9881 {
9882 fputs ("\t.EXPORT ", stream);
9883 assemble_name (stream, name);
9884 fputs (",DATA\n", stream);
9885 }
9886 }
9887
9888 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9889
9890 static rtx
9891 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9892 int incoming ATTRIBUTE_UNUSED)
9893 {
9894 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9895 }
9896
9897 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9898
9899 bool
9900 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9901 {
9902 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9903 PA64 ABI says that objects larger than 128 bits are returned in memory.
9904 Note, int_size_in_bytes can return -1 if the size of the object is
9905 variable or larger than the maximum value that can be expressed as
9906 a HOST_WIDE_INT. It can also return zero for an empty type. The
9907 simplest way to handle variable and empty types is to pass them in
9908 memory. This avoids problems in defining the boundaries of argument
9909 slots, allocating registers, etc. */
9910 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9911 || int_size_in_bytes (type) <= 0);
9912 }
9913
9914 /* Structure to hold declaration and name of external symbols that are
9915 emitted by GCC. We generate a vector of these symbols and output them
9916 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9917 This avoids putting out names that are never really used. */
9918
9919 typedef struct GTY(()) extern_symbol
9920 {
9921 tree decl;
9922 const char *name;
9923 } extern_symbol;
9924
9925 /* Define gc'd vector type for extern_symbol. */
9926
9927 /* Vector of extern_symbol pointers. */
9928 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9929
9930 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9931 /* Mark DECL (name NAME) as an external reference (assembler output
9932 file FILE). This saves the names to output at the end of the file
9933 if actually referenced. */
9934
9935 void
9936 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9937 {
9938 gcc_assert (file == asm_out_file);
9939 extern_symbol p = {decl, name};
9940 vec_safe_push (extern_symbols, p);
9941 }
9942
9943 /* Output text required at the end of an assembler file.
9944 This includes deferred plabels and .import directives for
9945 all external symbols that were actually referenced. */
9946
9947 static void
9948 pa_hpux_file_end (void)
9949 {
9950 unsigned int i;
9951 extern_symbol *p;
9952
9953 if (!NO_DEFERRED_PROFILE_COUNTERS)
9954 output_deferred_profile_counters ();
9955
9956 output_deferred_plabels ();
9957
9958 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9959 {
9960 tree decl = p->decl;
9961
9962 if (!TREE_ASM_WRITTEN (decl)
9963 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9964 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9965 }
9966
9967 vec_free (extern_symbols);
9968 }
9969 #endif
9970
9971 /* Return true if a change from mode FROM to mode TO for a register
9972 in register class RCLASS is invalid. */
9973
9974 bool
9975 pa_cannot_change_mode_class (machine_mode from, machine_mode to,
9976 enum reg_class rclass)
9977 {
9978 if (from == to)
9979 return false;
9980
9981 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9982 return false;
9983
9984 /* Reject changes to/from modes with zero size. */
9985 if (!GET_MODE_SIZE (from) || !GET_MODE_SIZE (to))
9986 return true;
9987
9988 /* Reject changes to/from complex and vector modes. */
9989 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9990 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9991 return true;
9992
9993 /* There is no way to load QImode or HImode values directly from memory
9994 to a FP register. SImode loads to the FP registers are not zero
9995 extended. On the 64-bit target, this conflicts with the definition
9996 of LOAD_EXTEND_OP. Thus, we can't allow changing between modes with
9997 different sizes in the floating-point registers. */
9998 if (MAYBE_FP_REG_CLASS_P (rclass))
9999 return true;
10000
10001 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
10002 in specific sets of registers. Thus, we cannot allow changing
10003 to a larger mode when it's larger than a word. */
10004 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
10005 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
10006 return true;
10007
10008 return false;
10009 }
10010
10011 /* Returns TRUE if it is a good idea to tie two pseudo registers
10012 when one has mode MODE1 and one has mode MODE2.
10013 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
10014 for any hard reg, then this must be FALSE for correct output.
10015
10016 We should return FALSE for QImode and HImode because these modes
10017 are not ok in the floating-point registers. However, this prevents
10018 tieing these modes to SImode and DImode in the general registers.
10019 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
10020 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
10021 in the floating-point registers. */
10022
10023 bool
10024 pa_modes_tieable_p (machine_mode mode1, machine_mode mode2)
10025 {
10026 /* Don't tie modes in different classes. */
10027 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
10028 return false;
10029
10030 return true;
10031 }
10032
10033 \f
10034 /* Length in units of the trampoline instruction code. */
10035
10036 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
10037
10038
10039 /* Output assembler code for a block containing the constant parts
10040 of a trampoline, leaving space for the variable parts.\
10041
10042 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10043 and then branches to the specified routine.
10044
10045 This code template is copied from text segment to stack location
10046 and then patched with pa_trampoline_init to contain valid values,
10047 and then entered as a subroutine.
10048
10049 It is best to keep this as small as possible to avoid having to
10050 flush multiple lines in the cache. */
10051
10052 static void
10053 pa_asm_trampoline_template (FILE *f)
10054 {
10055 if (!TARGET_64BIT)
10056 {
10057 fputs ("\tldw 36(%r22),%r21\n", f);
10058 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10059 if (ASSEMBLER_DIALECT == 0)
10060 fputs ("\tdepi 0,31,2,%r21\n", f);
10061 else
10062 fputs ("\tdepwi 0,31,2,%r21\n", f);
10063 fputs ("\tldw 4(%r21),%r19\n", f);
10064 fputs ("\tldw 0(%r21),%r21\n", f);
10065 if (TARGET_PA_20)
10066 {
10067 fputs ("\tbve (%r21)\n", f);
10068 fputs ("\tldw 40(%r22),%r29\n", f);
10069 fputs ("\t.word 0\n", f);
10070 fputs ("\t.word 0\n", f);
10071 }
10072 else
10073 {
10074 fputs ("\tldsid (%r21),%r1\n", f);
10075 fputs ("\tmtsp %r1,%sr0\n", f);
10076 fputs ("\tbe 0(%sr0,%r21)\n", f);
10077 fputs ("\tldw 40(%r22),%r29\n", f);
10078 }
10079 fputs ("\t.word 0\n", f);
10080 fputs ("\t.word 0\n", f);
10081 fputs ("\t.word 0\n", f);
10082 fputs ("\t.word 0\n", f);
10083 }
10084 else
10085 {
10086 fputs ("\t.dword 0\n", f);
10087 fputs ("\t.dword 0\n", f);
10088 fputs ("\t.dword 0\n", f);
10089 fputs ("\t.dword 0\n", f);
10090 fputs ("\tmfia %r31\n", f);
10091 fputs ("\tldd 24(%r31),%r1\n", f);
10092 fputs ("\tldd 24(%r1),%r27\n", f);
10093 fputs ("\tldd 16(%r1),%r1\n", f);
10094 fputs ("\tbve (%r1)\n", f);
10095 fputs ("\tldd 32(%r31),%r31\n", f);
10096 fputs ("\t.dword 0 ; fptr\n", f);
10097 fputs ("\t.dword 0 ; static link\n", f);
10098 }
10099 }
10100
10101 /* Emit RTL insns to initialize the variable parts of a trampoline.
10102 FNADDR is an RTX for the address of the function's pure code.
10103 CXT is an RTX for the static chain value for the function.
10104
10105 Move the function address to the trampoline template at offset 36.
10106 Move the static chain value to trampoline template at offset 40.
10107 Move the trampoline address to trampoline template at offset 44.
10108 Move r19 to trampoline template at offset 48. The latter two
10109 words create a plabel for the indirect call to the trampoline.
10110
10111 A similar sequence is used for the 64-bit port but the plabel is
10112 at the beginning of the trampoline.
10113
10114 Finally, the cache entries for the trampoline code are flushed.
10115 This is necessary to ensure that the trampoline instruction sequence
10116 is written to memory prior to any attempts at prefetching the code
10117 sequence. */
10118
10119 static void
10120 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10121 {
10122 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10123 rtx start_addr = gen_reg_rtx (Pmode);
10124 rtx end_addr = gen_reg_rtx (Pmode);
10125 rtx line_length = gen_reg_rtx (Pmode);
10126 rtx r_tramp, tmp;
10127
10128 emit_block_move (m_tramp, assemble_trampoline_template (),
10129 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10130 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10131
10132 if (!TARGET_64BIT)
10133 {
10134 tmp = adjust_address (m_tramp, Pmode, 36);
10135 emit_move_insn (tmp, fnaddr);
10136 tmp = adjust_address (m_tramp, Pmode, 40);
10137 emit_move_insn (tmp, chain_value);
10138
10139 /* Create a fat pointer for the trampoline. */
10140 tmp = adjust_address (m_tramp, Pmode, 44);
10141 emit_move_insn (tmp, r_tramp);
10142 tmp = adjust_address (m_tramp, Pmode, 48);
10143 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10144
10145 /* fdc and fic only use registers for the address to flush,
10146 they do not accept integer displacements. We align the
10147 start and end addresses to the beginning of their respective
10148 cache lines to minimize the number of lines flushed. */
10149 emit_insn (gen_andsi3 (start_addr, r_tramp,
10150 GEN_INT (-MIN_CACHELINE_SIZE)));
10151 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10152 TRAMPOLINE_CODE_SIZE-1));
10153 emit_insn (gen_andsi3 (end_addr, tmp,
10154 GEN_INT (-MIN_CACHELINE_SIZE)));
10155 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10156 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10157 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10158 gen_reg_rtx (Pmode),
10159 gen_reg_rtx (Pmode)));
10160 }
10161 else
10162 {
10163 tmp = adjust_address (m_tramp, Pmode, 56);
10164 emit_move_insn (tmp, fnaddr);
10165 tmp = adjust_address (m_tramp, Pmode, 64);
10166 emit_move_insn (tmp, chain_value);
10167
10168 /* Create a fat pointer for the trampoline. */
10169 tmp = adjust_address (m_tramp, Pmode, 16);
10170 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10171 r_tramp, 32)));
10172 tmp = adjust_address (m_tramp, Pmode, 24);
10173 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10174
10175 /* fdc and fic only use registers for the address to flush,
10176 they do not accept integer displacements. We align the
10177 start and end addresses to the beginning of their respective
10178 cache lines to minimize the number of lines flushed. */
10179 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10180 emit_insn (gen_anddi3 (start_addr, tmp,
10181 GEN_INT (-MIN_CACHELINE_SIZE)));
10182 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10183 TRAMPOLINE_CODE_SIZE - 1));
10184 emit_insn (gen_anddi3 (end_addr, tmp,
10185 GEN_INT (-MIN_CACHELINE_SIZE)));
10186 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10187 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10188 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10189 gen_reg_rtx (Pmode),
10190 gen_reg_rtx (Pmode)));
10191 }
10192
10193 #ifdef HAVE_ENABLE_EXECUTE_STACK
10194  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10195      LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
10196 #endif
10197 }
10198
10199 /* Perform any machine-specific adjustment in the address of the trampoline.
10200 ADDR contains the address that was passed to pa_trampoline_init.
10201 Adjust the trampoline address to point to the plabel at offset 44. */
10202
10203 static rtx
10204 pa_trampoline_adjust_address (rtx addr)
10205 {
10206 if (!TARGET_64BIT)
10207 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10208 return addr;
10209 }
10210
10211 static rtx
10212 pa_delegitimize_address (rtx orig_x)
10213 {
10214 rtx x = delegitimize_mem_from_attrs (orig_x);
10215
10216 if (GET_CODE (x) == LO_SUM
10217 && GET_CODE (XEXP (x, 1)) == UNSPEC
10218 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10219 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10220 return x;
10221 }
10222 \f
10223 static rtx
10224 pa_internal_arg_pointer (void)
10225 {
10226 /* The argument pointer and the hard frame pointer are the same in
10227 the 32-bit runtime, so we don't need a copy. */
10228 if (TARGET_64BIT)
10229 return copy_to_reg (virtual_incoming_args_rtx);
10230 else
10231 return virtual_incoming_args_rtx;
10232 }
10233
10234 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10235 Frame pointer elimination is automatically handled. */
10236
10237 static bool
10238 pa_can_eliminate (const int from, const int to)
10239 {
10240 /* The argument cannot be eliminated in the 64-bit runtime. */
10241 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10242 return false;
10243
10244 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10245 ? ! frame_pointer_needed
10246 : true);
10247 }
10248
10249 /* Define the offset between two registers, FROM to be eliminated and its
10250 replacement TO, at the start of a routine. */
10251 HOST_WIDE_INT
10252 pa_initial_elimination_offset (int from, int to)
10253 {
10254 HOST_WIDE_INT offset;
10255
10256 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10257 && to == STACK_POINTER_REGNUM)
10258 offset = -pa_compute_frame_size (get_frame_size (), 0);
10259 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10260 offset = 0;
10261 else
10262 gcc_unreachable ();
10263
10264 return offset;
10265 }
10266
10267 static void
10268 pa_conditional_register_usage (void)
10269 {
10270 int i;
10271
10272 if (!TARGET_64BIT && !TARGET_PA_11)
10273 {
10274 for (i = 56; i <= FP_REG_LAST; i++)
10275 fixed_regs[i] = call_used_regs[i] = 1;
10276 for (i = 33; i < 56; i += 2)
10277 fixed_regs[i] = call_used_regs[i] = 1;
10278 }
10279 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10280 {
10281 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10282 fixed_regs[i] = call_used_regs[i] = 1;
10283 }
10284 if (flag_pic)
10285 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10286 }
10287
10288 /* Target hook for c_mode_for_suffix. */
10289
10290 static machine_mode
10291 pa_c_mode_for_suffix (char suffix)
10292 {
10293 if (HPUX_LONG_DOUBLE_LIBRARY)
10294 {
10295 if (suffix == 'q')
10296 return TFmode;
10297 }
10298
10299 return VOIDmode;
10300 }
10301
10302 /* Target hook for function_section. */
10303
10304 static section *
10305 pa_function_section (tree decl, enum node_frequency freq,
10306 bool startup, bool exit)
10307 {
10308 /* Put functions in text section if target doesn't have named sections. */
10309 if (!targetm_common.have_named_sections)
10310 return text_section;
10311
10312 /* Force nested functions into the same section as the containing
10313 function. */
10314 if (decl
10315 && DECL_SECTION_NAME (decl) == NULL
10316 && DECL_CONTEXT (decl) != NULL_TREE
10317 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10318 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10319 return function_section (DECL_CONTEXT (decl));
10320
10321 /* Otherwise, use the default function section. */
10322 return default_function_section (decl, freq, startup, exit);
10323 }
10324
10325 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10326
10327 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10328 that need more than three instructions to load prior to reload. This
10329 limit is somewhat arbitrary. It takes three instructions to load a
10330 CONST_INT from memory but two are memory accesses. It may be better
10331 to increase the allowed range for CONST_INTS. We may also be able
10332 to handle CONST_DOUBLES. */
10333
10334 static bool
10335 pa_legitimate_constant_p (machine_mode mode, rtx x)
10336 {
10337 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10338 return false;
10339
10340 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10341 return false;
10342
10343 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10344 legitimate constants. The other variants can't be handled by
10345 the move patterns after reload starts. */
10346 if (tls_referenced_p (x))
10347 return false;
10348
10349 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10350 return false;
10351
10352 if (TARGET_64BIT
10353 && HOST_BITS_PER_WIDE_INT > 32
10354 && GET_CODE (x) == CONST_INT
10355 && !reload_in_progress
10356 && !reload_completed
10357 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10358 && !pa_cint_ok_for_move (UINTVAL (x)))
10359 return false;
10360
10361 if (function_label_operand (x, mode))
10362 return false;
10363
10364 return true;
10365 }
10366
10367 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10368
10369 static unsigned int
10370 pa_section_type_flags (tree decl, const char *name, int reloc)
10371 {
10372 unsigned int flags;
10373
10374 flags = default_section_type_flags (decl, name, reloc);
10375
10376 /* Function labels are placed in the constant pool. This can
10377 cause a section conflict if decls are put in ".data.rel.ro"
10378 or ".data.rel.ro.local" using the __attribute__ construct. */
10379 if (strcmp (name, ".data.rel.ro") == 0
10380 || strcmp (name, ".data.rel.ro.local") == 0)
10381 flags |= SECTION_WRITE | SECTION_RELRO;
10382
10383 return flags;
10384 }
10385
10386 /* pa_legitimate_address_p recognizes an RTL expression that is a
10387 valid memory address for an instruction. The MODE argument is the
10388 machine mode for the MEM expression that wants to use this address.
10389
10390 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10391 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10392 available with floating point loads and stores, and integer loads.
10393 We get better code by allowing indexed addresses in the initial
10394 RTL generation.
10395
10396 The acceptance of indexed addresses as legitimate implies that we
10397 must provide patterns for doing indexed integer stores, or the move
10398 expanders must force the address of an indexed store to a register.
10399 We have adopted the latter approach.
10400
10401 Another function of pa_legitimate_address_p is to ensure that
10402 the base register is a valid pointer for indexed instructions.
10403 On targets that have non-equivalent space registers, we have to
10404 know at the time of assembler output which register in a REG+REG
10405 pair is the base register. The REG_POINTER flag is sometimes lost
10406 in reload and the following passes, so it can't be relied on during
10407 code generation. Thus, we either have to canonicalize the order
10408 of the registers in REG+REG indexed addresses, or treat REG+REG
10409 addresses separately and provide patterns for both permutations.
10410
10411 The latter approach requires several hundred additional lines of
10412 code in pa.md. The downside to canonicalizing is that a PLUS
10413 in the wrong order can't combine to form to make a scaled indexed
10414 memory operand. As we won't need to canonicalize the operands if
10415 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10416
10417 We initially break out scaled indexed addresses in canonical order
10418 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10419 scaled indexed addresses during RTL generation. However, fold_rtx
10420 has its own opinion on how the operands of a PLUS should be ordered.
10421 If one of the operands is equivalent to a constant, it will make
10422 that operand the second operand. As the base register is likely to
10423 be equivalent to a SYMBOL_REF, we have made it the second operand.
10424
10425 pa_legitimate_address_p accepts REG+REG as legitimate when the
10426 operands are in the order INDEX+BASE on targets with non-equivalent
10427 space registers, and in any order on targets with equivalent space
10428 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10429
10430 We treat a SYMBOL_REF as legitimate if it is part of the current
10431 function's constant-pool, because such addresses can actually be
10432 output as REG+SMALLINT. */
10433
10434 static bool
10435 pa_legitimate_address_p (machine_mode mode, rtx x, bool strict)
10436 {
10437 if ((REG_P (x)
10438 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10439 : REG_OK_FOR_BASE_P (x)))
10440 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10441 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10442 && REG_P (XEXP (x, 0))
10443 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10444 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10445 return true;
10446
10447 if (GET_CODE (x) == PLUS)
10448 {
10449 rtx base, index;
10450
10451 /* For REG+REG, the base register should be in XEXP (x, 1),
10452 so check it first. */
10453 if (REG_P (XEXP (x, 1))
10454 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10455 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10456 base = XEXP (x, 1), index = XEXP (x, 0);
10457 else if (REG_P (XEXP (x, 0))
10458 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10459 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10460 base = XEXP (x, 0), index = XEXP (x, 1);
10461 else
10462 return false;
10463
10464 if (GET_CODE (index) == CONST_INT)
10465 {
10466 if (INT_5_BITS (index))
10467 return true;
10468
10469 /* When INT14_OK_STRICT is false, a secondary reload is needed
10470 to adjust the displacement of SImode and DImode floating point
10471 instructions but this may fail when the register also needs
10472 reloading. So, we return false when STRICT is true. We
10473 also reject long displacements for float mode addresses since
10474 the majority of accesses will use floating point instructions
10475 that don't support 14-bit offsets. */
10476 if (!INT14_OK_STRICT
10477 && (strict || !(reload_in_progress || reload_completed))
10478 && mode != QImode
10479 && mode != HImode)
10480 return false;
10481
10482 return base14_operand (index, mode);
10483 }
10484
10485 if (!TARGET_DISABLE_INDEXING
10486 /* Only accept the "canonical" INDEX+BASE operand order
10487 on targets with non-equivalent space registers. */
10488 && (TARGET_NO_SPACE_REGS
10489 ? REG_P (index)
10490 : (base == XEXP (x, 1) && REG_P (index)
10491 && (reload_completed
10492 || (reload_in_progress && HARD_REGISTER_P (base))
10493 || REG_POINTER (base))
10494 && (reload_completed
10495 || (reload_in_progress && HARD_REGISTER_P (index))
10496 || !REG_POINTER (index))))
10497 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10498 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10499 : REG_OK_FOR_INDEX_P (index))
10500 && borx_reg_operand (base, Pmode)
10501 && borx_reg_operand (index, Pmode))
10502 return true;
10503
10504 if (!TARGET_DISABLE_INDEXING
10505 && GET_CODE (index) == MULT
10506 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10507 && REG_P (XEXP (index, 0))
10508 && GET_MODE (XEXP (index, 0)) == Pmode
10509 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10510 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10511 && GET_CODE (XEXP (index, 1)) == CONST_INT
10512 && INTVAL (XEXP (index, 1))
10513 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10514 && borx_reg_operand (base, Pmode))
10515 return true;
10516
10517 return false;
10518 }
10519
10520 if (GET_CODE (x) == LO_SUM)
10521 {
10522 rtx y = XEXP (x, 0);
10523
10524 if (GET_CODE (y) == SUBREG)
10525 y = SUBREG_REG (y);
10526
10527 if (REG_P (y)
10528 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10529 : REG_OK_FOR_BASE_P (y)))
10530 {
10531 /* Needed for -fPIC */
10532 if (mode == Pmode
10533 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10534 return true;
10535
10536 if (!INT14_OK_STRICT
10537 && (strict || !(reload_in_progress || reload_completed))
10538 && mode != QImode
10539 && mode != HImode)
10540 return false;
10541
10542 if (CONSTANT_P (XEXP (x, 1)))
10543 return true;
10544 }
10545 return false;
10546 }
10547
10548 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10549 return true;
10550
10551 return false;
10552 }
10553
10554 /* Look for machine dependent ways to make the invalid address AD a
10555 valid address.
10556
10557 For the PA, transform:
10558
10559 memory(X + <large int>)
10560
10561 into:
10562
10563 if (<large int> & mask) >= 16
10564 Y = (<large int> & ~mask) + mask + 1 Round up.
10565 else
10566 Y = (<large int> & ~mask) Round down.
10567 Z = X + Y
10568 memory (Z + (<large int> - Y));
10569
10570 This makes reload inheritance and reload_cse work better since Z
10571 can be reused.
10572
10573 There may be more opportunities to improve code with this hook. */
10574
10575 rtx
10576 pa_legitimize_reload_address (rtx ad, machine_mode mode,
10577 int opnum, int type,
10578 int ind_levels ATTRIBUTE_UNUSED)
10579 {
10580 long offset, newoffset, mask;
10581 rtx new_rtx, temp = NULL_RTX;
10582
10583 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10584 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10585
10586 if (optimize && GET_CODE (ad) == PLUS)
10587 temp = simplify_binary_operation (PLUS, Pmode,
10588 XEXP (ad, 0), XEXP (ad, 1));
10589
10590 new_rtx = temp ? temp : ad;
10591
10592 if (optimize
10593 && GET_CODE (new_rtx) == PLUS
10594 && GET_CODE (XEXP (new_rtx, 0)) == REG
10595 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10596 {
10597 offset = INTVAL (XEXP ((new_rtx), 1));
10598
10599 /* Choose rounding direction. Round up if we are >= halfway. */
10600 if ((offset & mask) >= ((mask + 1) / 2))
10601 newoffset = (offset & ~mask) + mask + 1;
10602 else
10603 newoffset = offset & ~mask;
10604
10605 /* Ensure that long displacements are aligned. */
10606 if (mask == 0x3fff
10607 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10608 || (TARGET_64BIT && (mode) == DImode)))
10609 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10610
10611 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10612 {
10613 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10614 GEN_INT (newoffset));
10615 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10616 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10617 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10618 opnum, (enum reload_type) type);
10619 return ad;
10620 }
10621 }
10622
10623 return NULL_RTX;
10624 }
10625
10626 /* Output address vector. */
10627
10628 void
10629 pa_output_addr_vec (rtx lab, rtx body)
10630 {
10631 int idx, vlen = XVECLEN (body, 0);
10632
10633 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10634 if (TARGET_GAS)
10635 fputs ("\t.begin_brtab\n", asm_out_file);
10636 for (idx = 0; idx < vlen; idx++)
10637 {
10638 ASM_OUTPUT_ADDR_VEC_ELT
10639 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10640 }
10641 if (TARGET_GAS)
10642 fputs ("\t.end_brtab\n", asm_out_file);
10643 }
10644
10645 /* Output address difference vector. */
10646
10647 void
10648 pa_output_addr_diff_vec (rtx lab, rtx body)
10649 {
10650 rtx base = XEXP (XEXP (body, 0), 0);
10651 int idx, vlen = XVECLEN (body, 1);
10652
10653 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10654 if (TARGET_GAS)
10655 fputs ("\t.begin_brtab\n", asm_out_file);
10656 for (idx = 0; idx < vlen; idx++)
10657 {
10658 ASM_OUTPUT_ADDR_DIFF_ELT
10659 (asm_out_file,
10660 body,
10661 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10662 CODE_LABEL_NUMBER (base));
10663 }
10664 if (TARGET_GAS)
10665 fputs ("\t.end_brtab\n", asm_out_file);
10666 }
10667
10668 /* This is a helper function for the other atomic operations. This function
10669 emits a loop that contains SEQ that iterates until a compare-and-swap
10670 operation at the end succeeds. MEM is the memory to be modified. SEQ is
10671 a set of instructions that takes a value from OLD_REG as an input and
10672 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
10673 set to the current contents of MEM. After SEQ, a compare-and-swap will
10674 attempt to update MEM with NEW_REG. The function returns true when the
10675 loop was generated successfully. */
10676
10677 static bool
10678 pa_expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
10679 {
10680 machine_mode mode = GET_MODE (mem);
10681 rtx_code_label *label;
10682 rtx cmp_reg, success, oldval;
10683
10684 /* The loop we want to generate looks like
10685
10686 cmp_reg = mem;
10687 label:
10688 old_reg = cmp_reg;
10689 seq;
10690 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
10691 if (success)
10692 goto label;
10693
10694 Note that we only do the plain load from memory once. Subsequent
10695 iterations use the value loaded by the compare-and-swap pattern. */
10696
10697 label = gen_label_rtx ();
10698 cmp_reg = gen_reg_rtx (mode);
10699
10700 emit_move_insn (cmp_reg, mem);
10701 emit_label (label);
10702 emit_move_insn (old_reg, cmp_reg);
10703 if (seq)
10704 emit_insn (seq);
10705
10706 success = NULL_RTX;
10707 oldval = cmp_reg;
10708 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
10709 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
10710 MEMMODEL_RELAXED))
10711 return false;
10712
10713 if (oldval != cmp_reg)
10714 emit_move_insn (cmp_reg, oldval);
10715
10716 /* Mark this jump predicted not taken. */
10717 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
10718 GET_MODE (success), 1, label,
10719 profile_probability::guessed_never ());
10720 return true;
10721 }
10722
10723 /* This function tries to implement an atomic exchange operation using a
10724 compare_and_swap loop. VAL is written to *MEM. The previous contents of
10725 *MEM are returned, using TARGET if possible. No memory model is required
10726 since a compare_and_swap loop is seq-cst. */
10727
10728 rtx
10729 pa_maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
10730 {
10731 machine_mode mode = GET_MODE (mem);
10732
10733 if (can_compare_and_swap_p (mode, true))
10734 {
10735 if (!target || !register_operand (target, mode))
10736 target = gen_reg_rtx (mode);
10737 if (pa_expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
10738 return target;
10739 }
10740
10741 return NULL_RTX;
10742 }
10743
10744 /* Implement TARGET_CALLEE_COPIES. The callee is responsible for copying
10745 arguments passed by hidden reference in the 32-bit HP runtime. Users
10746 can override this behavior for better compatibility with openmp at the
10747 risk of library incompatibilities. Arguments are always passed by value
10748 in the 64-bit HP runtime. */
10749
10750 static bool
10751 pa_callee_copies (cumulative_args_t cum ATTRIBUTE_UNUSED,
10752 machine_mode mode ATTRIBUTE_UNUSED,
10753 const_tree type ATTRIBUTE_UNUSED,
10754 bool named ATTRIBUTE_UNUSED)
10755 {
10756 return !TARGET_CALLER_COPIES;
10757 }
10758
10759 #include "gt-pa.h"