]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/pa/pa.c
bitmap.c, [...]: Add space between string literal and macro name.
[thirdparty/gcc.git] / gcc / config / pa / pa.c
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "hash-set.h"
33 #include "machmode.h"
34 #include "vec.h"
35 #include "double-int.h"
36 #include "input.h"
37 #include "alias.h"
38 #include "symtab.h"
39 #include "wide-int.h"
40 #include "inchash.h"
41 #include "tree.h"
42 #include "fold-const.h"
43 #include "stor-layout.h"
44 #include "stringpool.h"
45 #include "varasm.h"
46 #include "calls.h"
47 #include "output.h"
48 #include "dbxout.h"
49 #include "except.h"
50 #include "hashtab.h"
51 #include "function.h"
52 #include "statistics.h"
53 #include "real.h"
54 #include "fixed-value.h"
55 #include "expmed.h"
56 #include "dojump.h"
57 #include "explow.h"
58 #include "emit-rtl.h"
59 #include "stmt.h"
60 #include "expr.h"
61 #include "insn-codes.h"
62 #include "optabs.h"
63 #include "reload.h"
64 #include "diagnostic-core.h"
65 #include "ggc.h"
66 #include "recog.h"
67 #include "predict.h"
68 #include "tm_p.h"
69 #include "target.h"
70 #include "common/common-target.h"
71 #include "target-def.h"
72 #include "langhooks.h"
73 #include "dominance.h"
74 #include "cfg.h"
75 #include "cfgrtl.h"
76 #include "cfganal.h"
77 #include "lcm.h"
78 #include "cfgbuild.h"
79 #include "cfgcleanup.h"
80 #include "basic-block.h"
81 #include "df.h"
82 #include "opts.h"
83 #include "builtins.h"
84
85 /* Return nonzero if there is a bypass for the output of
86 OUT_INSN and the fp store IN_INSN. */
87 int
88 pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
89 {
90 machine_mode store_mode;
91 machine_mode other_mode;
92 rtx set;
93
94 if (recog_memoized (in_insn) < 0
95 || (get_attr_type (in_insn) != TYPE_FPSTORE
96 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
97 || recog_memoized (out_insn) < 0)
98 return 0;
99
100 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
101
102 set = single_set (out_insn);
103 if (!set)
104 return 0;
105
106 other_mode = GET_MODE (SET_SRC (set));
107
108 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
109 }
110
111
112 #ifndef DO_FRAME_NOTES
113 #ifdef INCOMING_RETURN_ADDR_RTX
114 #define DO_FRAME_NOTES 1
115 #else
116 #define DO_FRAME_NOTES 0
117 #endif
118 #endif
119
120 static void pa_option_override (void);
121 static void copy_reg_pointer (rtx, rtx);
122 static void fix_range (const char *);
123 static int hppa_register_move_cost (machine_mode mode, reg_class_t,
124 reg_class_t);
125 static int hppa_address_cost (rtx, machine_mode mode, addr_space_t, bool);
126 static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
127 static inline rtx force_mode (machine_mode, rtx);
128 static void pa_reorg (void);
129 static void pa_combine_instructions (void);
130 static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
131 rtx, rtx);
132 static bool forward_branch_p (rtx_insn *);
133 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
134 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
135 static int compute_movmem_length (rtx_insn *);
136 static int compute_clrmem_length (rtx_insn *);
137 static bool pa_assemble_integer (rtx, unsigned int, int);
138 static void remove_useless_addtr_insns (int);
139 static void store_reg (int, HOST_WIDE_INT, int);
140 static void store_reg_modify (int, int, HOST_WIDE_INT);
141 static void load_reg (int, HOST_WIDE_INT, int);
142 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
143 static rtx pa_function_value (const_tree, const_tree, bool);
144 static rtx pa_libcall_value (machine_mode, const_rtx);
145 static bool pa_function_value_regno_p (const unsigned int);
146 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
147 static void update_total_code_bytes (unsigned int);
148 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
149 static int pa_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
150 static int pa_adjust_priority (rtx_insn *, int);
151 static int pa_issue_rate (void);
152 static int pa_reloc_rw_mask (void);
153 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
154 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
155 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
156 ATTRIBUTE_UNUSED;
157 static void pa_encode_section_info (tree, rtx, int);
158 static const char *pa_strip_name_encoding (const char *);
159 static bool pa_function_ok_for_sibcall (tree, tree);
160 static void pa_globalize_label (FILE *, const char *)
161 ATTRIBUTE_UNUSED;
162 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
163 HOST_WIDE_INT, tree);
164 #if !defined(USE_COLLECT2)
165 static void pa_asm_out_constructor (rtx, int);
166 static void pa_asm_out_destructor (rtx, int);
167 #endif
168 static void pa_init_builtins (void);
169 static rtx pa_expand_builtin (tree, rtx, rtx, machine_mode mode, int);
170 static rtx hppa_builtin_saveregs (void);
171 static void hppa_va_start (tree, rtx);
172 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
173 static bool pa_scalar_mode_supported_p (machine_mode);
174 static bool pa_commutative_p (const_rtx x, int outer_code);
175 static void copy_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
176 static int length_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
177 static rtx hppa_legitimize_address (rtx, rtx, machine_mode);
178 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
179 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
180 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
181 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
182 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
183 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
184 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
185 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
186 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
187 static void output_deferred_plabels (void);
188 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
189 #ifdef ASM_OUTPUT_EXTERNAL_REAL
190 static void pa_hpux_file_end (void);
191 #endif
192 static void pa_init_libfuncs (void);
193 static rtx pa_struct_value_rtx (tree, int);
194 static bool pa_pass_by_reference (cumulative_args_t, machine_mode,
195 const_tree, bool);
196 static int pa_arg_partial_bytes (cumulative_args_t, machine_mode,
197 tree, bool);
198 static void pa_function_arg_advance (cumulative_args_t, machine_mode,
199 const_tree, bool);
200 static rtx pa_function_arg (cumulative_args_t, machine_mode,
201 const_tree, bool);
202 static unsigned int pa_function_arg_boundary (machine_mode, const_tree);
203 static struct machine_function * pa_init_machine_status (void);
204 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
205 machine_mode,
206 secondary_reload_info *);
207 static void pa_extra_live_on_entry (bitmap);
208 static machine_mode pa_promote_function_mode (const_tree,
209 machine_mode, int *,
210 const_tree, int);
211
212 static void pa_asm_trampoline_template (FILE *);
213 static void pa_trampoline_init (rtx, tree, rtx);
214 static rtx pa_trampoline_adjust_address (rtx);
215 static rtx pa_delegitimize_address (rtx);
216 static bool pa_print_operand_punct_valid_p (unsigned char);
217 static rtx pa_internal_arg_pointer (void);
218 static bool pa_can_eliminate (const int, const int);
219 static void pa_conditional_register_usage (void);
220 static machine_mode pa_c_mode_for_suffix (char);
221 static section *pa_function_section (tree, enum node_frequency, bool, bool);
222 static bool pa_cannot_force_const_mem (machine_mode, rtx);
223 static bool pa_legitimate_constant_p (machine_mode, rtx);
224 static unsigned int pa_section_type_flags (tree, const char *, int);
225 static bool pa_legitimate_address_p (machine_mode, rtx, bool);
226
227 /* The following extra sections are only used for SOM. */
228 static GTY(()) section *som_readonly_data_section;
229 static GTY(()) section *som_one_only_readonly_data_section;
230 static GTY(()) section *som_one_only_data_section;
231 static GTY(()) section *som_tm_clone_table_section;
232
233 /* Counts for the number of callee-saved general and floating point
234 registers which were saved by the current function's prologue. */
235 static int gr_saved, fr_saved;
236
237 /* Boolean indicating whether the return pointer was saved by the
238 current function's prologue. */
239 static bool rp_saved;
240
241 static rtx find_addr_reg (rtx);
242
243 /* Keep track of the number of bytes we have output in the CODE subspace
244 during this compilation so we'll know when to emit inline long-calls. */
245 unsigned long total_code_bytes;
246
247 /* The last address of the previous function plus the number of bytes in
248 associated thunks that have been output. This is used to determine if
249 a thunk can use an IA-relative branch to reach its target function. */
250 static unsigned int last_address;
251
252 /* Variables to handle plabels that we discover are necessary at assembly
253 output time. They are output after the current function. */
254 struct GTY(()) deferred_plabel
255 {
256 rtx internal_label;
257 rtx symbol;
258 };
259 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
260 deferred_plabels;
261 static size_t n_deferred_plabels = 0;
262 \f
263 /* Initialize the GCC target structure. */
264
265 #undef TARGET_OPTION_OVERRIDE
266 #define TARGET_OPTION_OVERRIDE pa_option_override
267
268 #undef TARGET_ASM_ALIGNED_HI_OP
269 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
270 #undef TARGET_ASM_ALIGNED_SI_OP
271 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
272 #undef TARGET_ASM_ALIGNED_DI_OP
273 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
274 #undef TARGET_ASM_UNALIGNED_HI_OP
275 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
276 #undef TARGET_ASM_UNALIGNED_SI_OP
277 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
278 #undef TARGET_ASM_UNALIGNED_DI_OP
279 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
280 #undef TARGET_ASM_INTEGER
281 #define TARGET_ASM_INTEGER pa_assemble_integer
282
283 #undef TARGET_ASM_FUNCTION_PROLOGUE
284 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
285 #undef TARGET_ASM_FUNCTION_EPILOGUE
286 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
287
288 #undef TARGET_FUNCTION_VALUE
289 #define TARGET_FUNCTION_VALUE pa_function_value
290 #undef TARGET_LIBCALL_VALUE
291 #define TARGET_LIBCALL_VALUE pa_libcall_value
292 #undef TARGET_FUNCTION_VALUE_REGNO_P
293 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
294
295 #undef TARGET_LEGITIMIZE_ADDRESS
296 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
297
298 #undef TARGET_SCHED_ADJUST_COST
299 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
300 #undef TARGET_SCHED_ADJUST_PRIORITY
301 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
302 #undef TARGET_SCHED_ISSUE_RATE
303 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
304
305 #undef TARGET_ENCODE_SECTION_INFO
306 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
307 #undef TARGET_STRIP_NAME_ENCODING
308 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
309
310 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
311 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
312
313 #undef TARGET_COMMUTATIVE_P
314 #define TARGET_COMMUTATIVE_P pa_commutative_p
315
316 #undef TARGET_ASM_OUTPUT_MI_THUNK
317 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
318 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
319 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
320
321 #undef TARGET_ASM_FILE_END
322 #ifdef ASM_OUTPUT_EXTERNAL_REAL
323 #define TARGET_ASM_FILE_END pa_hpux_file_end
324 #else
325 #define TARGET_ASM_FILE_END output_deferred_plabels
326 #endif
327
328 #undef TARGET_ASM_RELOC_RW_MASK
329 #define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
330
331 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
332 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
333
334 #if !defined(USE_COLLECT2)
335 #undef TARGET_ASM_CONSTRUCTOR
336 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
337 #undef TARGET_ASM_DESTRUCTOR
338 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
339 #endif
340
341 #undef TARGET_INIT_BUILTINS
342 #define TARGET_INIT_BUILTINS pa_init_builtins
343
344 #undef TARGET_EXPAND_BUILTIN
345 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
346
347 #undef TARGET_REGISTER_MOVE_COST
348 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
349 #undef TARGET_RTX_COSTS
350 #define TARGET_RTX_COSTS hppa_rtx_costs
351 #undef TARGET_ADDRESS_COST
352 #define TARGET_ADDRESS_COST hppa_address_cost
353
354 #undef TARGET_MACHINE_DEPENDENT_REORG
355 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
356
357 #undef TARGET_INIT_LIBFUNCS
358 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
359
360 #undef TARGET_PROMOTE_FUNCTION_MODE
361 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
362 #undef TARGET_PROMOTE_PROTOTYPES
363 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
364
365 #undef TARGET_STRUCT_VALUE_RTX
366 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
367 #undef TARGET_RETURN_IN_MEMORY
368 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
369 #undef TARGET_MUST_PASS_IN_STACK
370 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
371 #undef TARGET_PASS_BY_REFERENCE
372 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
373 #undef TARGET_CALLEE_COPIES
374 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
375 #undef TARGET_ARG_PARTIAL_BYTES
376 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
377 #undef TARGET_FUNCTION_ARG
378 #define TARGET_FUNCTION_ARG pa_function_arg
379 #undef TARGET_FUNCTION_ARG_ADVANCE
380 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
381 #undef TARGET_FUNCTION_ARG_BOUNDARY
382 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
383
384 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
385 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
386 #undef TARGET_EXPAND_BUILTIN_VA_START
387 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
388 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
389 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
390
391 #undef TARGET_SCALAR_MODE_SUPPORTED_P
392 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
393
394 #undef TARGET_CANNOT_FORCE_CONST_MEM
395 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
396
397 #undef TARGET_SECONDARY_RELOAD
398 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
399
400 #undef TARGET_EXTRA_LIVE_ON_ENTRY
401 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
402
403 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
404 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
405 #undef TARGET_TRAMPOLINE_INIT
406 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
407 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
408 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
409 #undef TARGET_DELEGITIMIZE_ADDRESS
410 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
411 #undef TARGET_INTERNAL_ARG_POINTER
412 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
413 #undef TARGET_CAN_ELIMINATE
414 #define TARGET_CAN_ELIMINATE pa_can_eliminate
415 #undef TARGET_CONDITIONAL_REGISTER_USAGE
416 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
417 #undef TARGET_C_MODE_FOR_SUFFIX
418 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
419 #undef TARGET_ASM_FUNCTION_SECTION
420 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
421
422 #undef TARGET_LEGITIMATE_CONSTANT_P
423 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
424 #undef TARGET_SECTION_TYPE_FLAGS
425 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
426 #undef TARGET_LEGITIMATE_ADDRESS_P
427 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
428
429 struct gcc_target targetm = TARGET_INITIALIZER;
430 \f
431 /* Parse the -mfixed-range= option string. */
432
433 static void
434 fix_range (const char *const_str)
435 {
436 int i, first, last;
437 char *str, *dash, *comma;
438
439 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
440 REG2 are either register names or register numbers. The effect
441 of this option is to mark the registers in the range from REG1 to
442 REG2 as ``fixed'' so they won't be used by the compiler. This is
443 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
444
445 i = strlen (const_str);
446 str = (char *) alloca (i + 1);
447 memcpy (str, const_str, i + 1);
448
449 while (1)
450 {
451 dash = strchr (str, '-');
452 if (!dash)
453 {
454 warning (0, "value of -mfixed-range must have form REG1-REG2");
455 return;
456 }
457 *dash = '\0';
458
459 comma = strchr (dash + 1, ',');
460 if (comma)
461 *comma = '\0';
462
463 first = decode_reg_name (str);
464 if (first < 0)
465 {
466 warning (0, "unknown register name: %s", str);
467 return;
468 }
469
470 last = decode_reg_name (dash + 1);
471 if (last < 0)
472 {
473 warning (0, "unknown register name: %s", dash + 1);
474 return;
475 }
476
477 *dash = '-';
478
479 if (first > last)
480 {
481 warning (0, "%s-%s is an empty range", str, dash + 1);
482 return;
483 }
484
485 for (i = first; i <= last; ++i)
486 fixed_regs[i] = call_used_regs[i] = 1;
487
488 if (!comma)
489 break;
490
491 *comma = ',';
492 str = comma + 1;
493 }
494
495 /* Check if all floating point registers have been fixed. */
496 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
497 if (!fixed_regs[i])
498 break;
499
500 if (i > FP_REG_LAST)
501 target_flags |= MASK_DISABLE_FPREGS;
502 }
503
504 /* Implement the TARGET_OPTION_OVERRIDE hook. */
505
506 static void
507 pa_option_override (void)
508 {
509 unsigned int i;
510 cl_deferred_option *opt;
511 vec<cl_deferred_option> *v
512 = (vec<cl_deferred_option> *) pa_deferred_options;
513
514 if (v)
515 FOR_EACH_VEC_ELT (*v, i, opt)
516 {
517 switch (opt->opt_index)
518 {
519 case OPT_mfixed_range_:
520 fix_range (opt->arg);
521 break;
522
523 default:
524 gcc_unreachable ();
525 }
526 }
527
528 if (flag_pic && TARGET_PORTABLE_RUNTIME)
529 {
530 warning (0, "PIC code generation is not supported in the portable runtime model");
531 }
532
533 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
534 {
535 warning (0, "PIC code generation is not compatible with fast indirect calls");
536 }
537
538 if (! TARGET_GAS && write_symbols != NO_DEBUG)
539 {
540 warning (0, "-g is only supported when using GAS on this processor,");
541 warning (0, "-g option disabled");
542 write_symbols = NO_DEBUG;
543 }
544
545 /* We only support the "big PIC" model now. And we always generate PIC
546 code when in 64bit mode. */
547 if (flag_pic == 1 || TARGET_64BIT)
548 flag_pic = 2;
549
550 /* Disable -freorder-blocks-and-partition as we don't support hot and
551 cold partitioning. */
552 if (flag_reorder_blocks_and_partition)
553 {
554 inform (input_location,
555 "-freorder-blocks-and-partition does not work "
556 "on this architecture");
557 flag_reorder_blocks_and_partition = 0;
558 flag_reorder_blocks = 1;
559 }
560
561 /* We can't guarantee that .dword is available for 32-bit targets. */
562 if (UNITS_PER_WORD == 4)
563 targetm.asm_out.aligned_op.di = NULL;
564
565 /* The unaligned ops are only available when using GAS. */
566 if (!TARGET_GAS)
567 {
568 targetm.asm_out.unaligned_op.hi = NULL;
569 targetm.asm_out.unaligned_op.si = NULL;
570 targetm.asm_out.unaligned_op.di = NULL;
571 }
572
573 init_machine_status = pa_init_machine_status;
574 }
575
576 enum pa_builtins
577 {
578 PA_BUILTIN_COPYSIGNQ,
579 PA_BUILTIN_FABSQ,
580 PA_BUILTIN_INFQ,
581 PA_BUILTIN_HUGE_VALQ,
582 PA_BUILTIN_max
583 };
584
585 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
586
587 static void
588 pa_init_builtins (void)
589 {
590 #ifdef DONT_HAVE_FPUTC_UNLOCKED
591 {
592 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
593 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
594 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
595 }
596 #endif
597 #if TARGET_HPUX_11
598 {
599 tree decl;
600
601 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
602 set_user_assembler_name (decl, "_Isfinite");
603 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
604 set_user_assembler_name (decl, "_Isfinitef");
605 }
606 #endif
607
608 if (HPUX_LONG_DOUBLE_LIBRARY)
609 {
610 tree decl, ftype;
611
612 /* Under HPUX, the __float128 type is a synonym for "long double". */
613 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
614 "__float128");
615
616 /* TFmode support builtins. */
617 ftype = build_function_type_list (long_double_type_node,
618 long_double_type_node,
619 NULL_TREE);
620 decl = add_builtin_function ("__builtin_fabsq", ftype,
621 PA_BUILTIN_FABSQ, BUILT_IN_MD,
622 "_U_Qfabs", NULL_TREE);
623 TREE_READONLY (decl) = 1;
624 pa_builtins[PA_BUILTIN_FABSQ] = decl;
625
626 ftype = build_function_type_list (long_double_type_node,
627 long_double_type_node,
628 long_double_type_node,
629 NULL_TREE);
630 decl = add_builtin_function ("__builtin_copysignq", ftype,
631 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
632 "_U_Qfcopysign", NULL_TREE);
633 TREE_READONLY (decl) = 1;
634 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
635
636 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
637 decl = add_builtin_function ("__builtin_infq", ftype,
638 PA_BUILTIN_INFQ, BUILT_IN_MD,
639 NULL, NULL_TREE);
640 pa_builtins[PA_BUILTIN_INFQ] = decl;
641
642 decl = add_builtin_function ("__builtin_huge_valq", ftype,
643 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
644 NULL, NULL_TREE);
645 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
646 }
647 }
648
649 static rtx
650 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
651 machine_mode mode ATTRIBUTE_UNUSED,
652 int ignore ATTRIBUTE_UNUSED)
653 {
654 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
655 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
656
657 switch (fcode)
658 {
659 case PA_BUILTIN_FABSQ:
660 case PA_BUILTIN_COPYSIGNQ:
661 return expand_call (exp, target, ignore);
662
663 case PA_BUILTIN_INFQ:
664 case PA_BUILTIN_HUGE_VALQ:
665 {
666 machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
667 REAL_VALUE_TYPE inf;
668 rtx tmp;
669
670 real_inf (&inf);
671 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
672
673 tmp = validize_mem (force_const_mem (target_mode, tmp));
674
675 if (target == 0)
676 target = gen_reg_rtx (target_mode);
677
678 emit_move_insn (target, tmp);
679 return target;
680 }
681
682 default:
683 gcc_unreachable ();
684 }
685
686 return NULL_RTX;
687 }
688
689 /* Function to init struct machine_function.
690 This will be called, via a pointer variable,
691 from push_function_context. */
692
693 static struct machine_function *
694 pa_init_machine_status (void)
695 {
696 return ggc_cleared_alloc<machine_function> ();
697 }
698
699 /* If FROM is a probable pointer register, mark TO as a probable
700 pointer register with the same pointer alignment as FROM. */
701
702 static void
703 copy_reg_pointer (rtx to, rtx from)
704 {
705 if (REG_POINTER (from))
706 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
707 }
708
709 /* Return 1 if X contains a symbolic expression. We know these
710 expressions will have one of a few well defined forms, so
711 we need only check those forms. */
712 int
713 pa_symbolic_expression_p (rtx x)
714 {
715
716 /* Strip off any HIGH. */
717 if (GET_CODE (x) == HIGH)
718 x = XEXP (x, 0);
719
720 return symbolic_operand (x, VOIDmode);
721 }
722
723 /* Accept any constant that can be moved in one instruction into a
724 general register. */
725 int
726 pa_cint_ok_for_move (HOST_WIDE_INT ival)
727 {
728 /* OK if ldo, ldil, or zdepi, can be used. */
729 return (VAL_14_BITS_P (ival)
730 || pa_ldil_cint_p (ival)
731 || pa_zdepi_cint_p (ival));
732 }
733 \f
734 /* True iff ldil can be used to load this CONST_INT. The least
735 significant 11 bits of the value must be zero and the value must
736 not change sign when extended from 32 to 64 bits. */
737 int
738 pa_ldil_cint_p (HOST_WIDE_INT ival)
739 {
740 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
741
742 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
743 }
744
745 /* True iff zdepi can be used to generate this CONST_INT.
746 zdepi first sign extends a 5-bit signed number to a given field
747 length, then places this field anywhere in a zero. */
748 int
749 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
750 {
751 unsigned HOST_WIDE_INT lsb_mask, t;
752
753 /* This might not be obvious, but it's at least fast.
754 This function is critical; we don't have the time loops would take. */
755 lsb_mask = x & -x;
756 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
757 /* Return true iff t is a power of two. */
758 return ((t & (t - 1)) == 0);
759 }
760
761 /* True iff depi or extru can be used to compute (reg & mask).
762 Accept bit pattern like these:
763 0....01....1
764 1....10....0
765 1..10..01..1 */
766 int
767 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
768 {
769 mask = ~mask;
770 mask += mask & -mask;
771 return (mask & (mask - 1)) == 0;
772 }
773
774 /* True iff depi can be used to compute (reg | MASK). */
775 int
776 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
777 {
778 mask += mask & -mask;
779 return (mask & (mask - 1)) == 0;
780 }
781 \f
782 /* Legitimize PIC addresses. If the address is already
783 position-independent, we return ORIG. Newly generated
784 position-independent addresses go to REG. If we need more
785 than one register, we lose. */
786
787 static rtx
788 legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
789 {
790 rtx pic_ref = orig;
791
792 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
793
794 /* Labels need special handling. */
795 if (pic_label_operand (orig, mode))
796 {
797 rtx_insn *insn;
798
799 /* We do not want to go through the movXX expanders here since that
800 would create recursion.
801
802 Nor do we really want to call a generator for a named pattern
803 since that requires multiple patterns if we want to support
804 multiple word sizes.
805
806 So instead we just emit the raw set, which avoids the movXX
807 expanders completely. */
808 mark_reg_pointer (reg, BITS_PER_UNIT);
809 insn = emit_insn (gen_rtx_SET (reg, orig));
810
811 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
812 add_reg_note (insn, REG_EQUAL, orig);
813
814 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
815 and update LABEL_NUSES because this is not done automatically. */
816 if (reload_in_progress || reload_completed)
817 {
818 /* Extract LABEL_REF. */
819 if (GET_CODE (orig) == CONST)
820 orig = XEXP (XEXP (orig, 0), 0);
821 /* Extract CODE_LABEL. */
822 orig = XEXP (orig, 0);
823 add_reg_note (insn, REG_LABEL_OPERAND, orig);
824 /* Make sure we have label and not a note. */
825 if (LABEL_P (orig))
826 LABEL_NUSES (orig)++;
827 }
828 crtl->uses_pic_offset_table = 1;
829 return reg;
830 }
831 if (GET_CODE (orig) == SYMBOL_REF)
832 {
833 rtx_insn *insn;
834 rtx tmp_reg;
835
836 gcc_assert (reg);
837
838 /* Before reload, allocate a temporary register for the intermediate
839 result. This allows the sequence to be deleted when the final
840 result is unused and the insns are trivially dead. */
841 tmp_reg = ((reload_in_progress || reload_completed)
842 ? reg : gen_reg_rtx (Pmode));
843
844 if (function_label_operand (orig, VOIDmode))
845 {
846 /* Force function label into memory in word mode. */
847 orig = XEXP (force_const_mem (word_mode, orig), 0);
848 /* Load plabel address from DLT. */
849 emit_move_insn (tmp_reg,
850 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
851 gen_rtx_HIGH (word_mode, orig)));
852 pic_ref
853 = gen_const_mem (Pmode,
854 gen_rtx_LO_SUM (Pmode, tmp_reg,
855 gen_rtx_UNSPEC (Pmode,
856 gen_rtvec (1, orig),
857 UNSPEC_DLTIND14R)));
858 emit_move_insn (reg, pic_ref);
859 /* Now load address of function descriptor. */
860 pic_ref = gen_rtx_MEM (Pmode, reg);
861 }
862 else
863 {
864 /* Load symbol reference from DLT. */
865 emit_move_insn (tmp_reg,
866 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
867 gen_rtx_HIGH (word_mode, orig)));
868 pic_ref
869 = gen_const_mem (Pmode,
870 gen_rtx_LO_SUM (Pmode, tmp_reg,
871 gen_rtx_UNSPEC (Pmode,
872 gen_rtvec (1, orig),
873 UNSPEC_DLTIND14R)));
874 }
875
876 crtl->uses_pic_offset_table = 1;
877 mark_reg_pointer (reg, BITS_PER_UNIT);
878 insn = emit_move_insn (reg, pic_ref);
879
880 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
881 set_unique_reg_note (insn, REG_EQUAL, orig);
882
883 return reg;
884 }
885 else if (GET_CODE (orig) == CONST)
886 {
887 rtx base;
888
889 if (GET_CODE (XEXP (orig, 0)) == PLUS
890 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
891 return orig;
892
893 gcc_assert (reg);
894 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
895
896 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
897 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
898 base == reg ? 0 : reg);
899
900 if (GET_CODE (orig) == CONST_INT)
901 {
902 if (INT_14_BITS (orig))
903 return plus_constant (Pmode, base, INTVAL (orig));
904 orig = force_reg (Pmode, orig);
905 }
906 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
907 /* Likewise, should we set special REG_NOTEs here? */
908 }
909
910 return pic_ref;
911 }
912
913 static GTY(()) rtx gen_tls_tga;
914
915 static rtx
916 gen_tls_get_addr (void)
917 {
918 if (!gen_tls_tga)
919 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
920 return gen_tls_tga;
921 }
922
923 static rtx
924 hppa_tls_call (rtx arg)
925 {
926 rtx ret;
927
928 ret = gen_reg_rtx (Pmode);
929 emit_library_call_value (gen_tls_get_addr (), ret,
930 LCT_CONST, Pmode, 1, arg, Pmode);
931
932 return ret;
933 }
934
935 static rtx
936 legitimize_tls_address (rtx addr)
937 {
938 rtx ret, tmp, t1, t2, tp;
939 rtx_insn *insn;
940
941 /* Currently, we can't handle anything but a SYMBOL_REF. */
942 if (GET_CODE (addr) != SYMBOL_REF)
943 return addr;
944
945 switch (SYMBOL_REF_TLS_MODEL (addr))
946 {
947 case TLS_MODEL_GLOBAL_DYNAMIC:
948 tmp = gen_reg_rtx (Pmode);
949 if (flag_pic)
950 emit_insn (gen_tgd_load_pic (tmp, addr));
951 else
952 emit_insn (gen_tgd_load (tmp, addr));
953 ret = hppa_tls_call (tmp);
954 break;
955
956 case TLS_MODEL_LOCAL_DYNAMIC:
957 ret = gen_reg_rtx (Pmode);
958 tmp = gen_reg_rtx (Pmode);
959 start_sequence ();
960 if (flag_pic)
961 emit_insn (gen_tld_load_pic (tmp, addr));
962 else
963 emit_insn (gen_tld_load (tmp, addr));
964 t1 = hppa_tls_call (tmp);
965 insn = get_insns ();
966 end_sequence ();
967 t2 = gen_reg_rtx (Pmode);
968 emit_libcall_block (insn, t2, t1,
969 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
970 UNSPEC_TLSLDBASE));
971 emit_insn (gen_tld_offset_load (ret, addr, t2));
972 break;
973
974 case TLS_MODEL_INITIAL_EXEC:
975 tp = gen_reg_rtx (Pmode);
976 tmp = gen_reg_rtx (Pmode);
977 ret = gen_reg_rtx (Pmode);
978 emit_insn (gen_tp_load (tp));
979 if (flag_pic)
980 emit_insn (gen_tie_load_pic (tmp, addr));
981 else
982 emit_insn (gen_tie_load (tmp, addr));
983 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
984 break;
985
986 case TLS_MODEL_LOCAL_EXEC:
987 tp = gen_reg_rtx (Pmode);
988 ret = gen_reg_rtx (Pmode);
989 emit_insn (gen_tp_load (tp));
990 emit_insn (gen_tle_load (ret, addr, tp));
991 break;
992
993 default:
994 gcc_unreachable ();
995 }
996
997 return ret;
998 }
999
1000 /* Try machine-dependent ways of modifying an illegitimate address
1001 to be legitimate. If we find one, return the new, valid address.
1002 This macro is used in only one place: `memory_address' in explow.c.
1003
1004 OLDX is the address as it was before break_out_memory_refs was called.
1005 In some cases it is useful to look at this to decide what needs to be done.
1006
1007 It is always safe for this macro to do nothing. It exists to recognize
1008 opportunities to optimize the output.
1009
1010 For the PA, transform:
1011
1012 memory(X + <large int>)
1013
1014 into:
1015
1016 if (<large int> & mask) >= 16
1017 Y = (<large int> & ~mask) + mask + 1 Round up.
1018 else
1019 Y = (<large int> & ~mask) Round down.
1020 Z = X + Y
1021 memory (Z + (<large int> - Y));
1022
1023 This is for CSE to find several similar references, and only use one Z.
1024
1025 X can either be a SYMBOL_REF or REG, but because combine cannot
1026 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1027 D will not fit in 14 bits.
1028
1029 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1030 0x1f as the mask.
1031
1032 MODE_INT references allow displacements which fit in 14 bits, so use
1033 0x3fff as the mask.
1034
1035 This relies on the fact that most mode MODE_FLOAT references will use FP
1036 registers and most mode MODE_INT references will use integer registers.
1037 (In the rare case of an FP register used in an integer MODE, we depend
1038 on secondary reloads to clean things up.)
1039
1040
1041 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1042 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1043 addressing modes to be used).
1044
1045 Put X and Z into registers. Then put the entire expression into
1046 a register. */
1047
1048 rtx
1049 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1050 machine_mode mode)
1051 {
1052 rtx orig = x;
1053
1054 /* We need to canonicalize the order of operands in unscaled indexed
1055 addresses since the code that checks if an address is valid doesn't
1056 always try both orders. */
1057 if (!TARGET_NO_SPACE_REGS
1058 && GET_CODE (x) == PLUS
1059 && GET_MODE (x) == Pmode
1060 && REG_P (XEXP (x, 0))
1061 && REG_P (XEXP (x, 1))
1062 && REG_POINTER (XEXP (x, 0))
1063 && !REG_POINTER (XEXP (x, 1)))
1064 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1065
1066 if (tls_referenced_p (x))
1067 return legitimize_tls_address (x);
1068 else if (flag_pic)
1069 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1070
1071 /* Strip off CONST. */
1072 if (GET_CODE (x) == CONST)
1073 x = XEXP (x, 0);
1074
1075 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1076 That should always be safe. */
1077 if (GET_CODE (x) == PLUS
1078 && GET_CODE (XEXP (x, 0)) == REG
1079 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1080 {
1081 rtx reg = force_reg (Pmode, XEXP (x, 1));
1082 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1083 }
1084
1085 /* Note we must reject symbols which represent function addresses
1086 since the assembler/linker can't handle arithmetic on plabels. */
1087 if (GET_CODE (x) == PLUS
1088 && GET_CODE (XEXP (x, 1)) == CONST_INT
1089 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1090 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1091 || GET_CODE (XEXP (x, 0)) == REG))
1092 {
1093 rtx int_part, ptr_reg;
1094 int newoffset;
1095 int offset = INTVAL (XEXP (x, 1));
1096 int mask;
1097
1098 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1099 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1100
1101 /* Choose which way to round the offset. Round up if we
1102 are >= halfway to the next boundary. */
1103 if ((offset & mask) >= ((mask + 1) / 2))
1104 newoffset = (offset & ~ mask) + mask + 1;
1105 else
1106 newoffset = (offset & ~ mask);
1107
1108 /* If the newoffset will not fit in 14 bits (ldo), then
1109 handling this would take 4 or 5 instructions (2 to load
1110 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1111 add the new offset and the SYMBOL_REF.) Combine can
1112 not handle 4->2 or 5->2 combinations, so do not create
1113 them. */
1114 if (! VAL_14_BITS_P (newoffset)
1115 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1116 {
1117 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1118 rtx tmp_reg
1119 = force_reg (Pmode,
1120 gen_rtx_HIGH (Pmode, const_part));
1121 ptr_reg
1122 = force_reg (Pmode,
1123 gen_rtx_LO_SUM (Pmode,
1124 tmp_reg, const_part));
1125 }
1126 else
1127 {
1128 if (! VAL_14_BITS_P (newoffset))
1129 int_part = force_reg (Pmode, GEN_INT (newoffset));
1130 else
1131 int_part = GEN_INT (newoffset);
1132
1133 ptr_reg = force_reg (Pmode,
1134 gen_rtx_PLUS (Pmode,
1135 force_reg (Pmode, XEXP (x, 0)),
1136 int_part));
1137 }
1138 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1139 }
1140
1141 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1142
1143 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1144 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1145 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1146 && (OBJECT_P (XEXP (x, 1))
1147 || GET_CODE (XEXP (x, 1)) == SUBREG)
1148 && GET_CODE (XEXP (x, 1)) != CONST)
1149 {
1150 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1151 rtx reg1, reg2;
1152
1153 reg1 = XEXP (x, 1);
1154 if (GET_CODE (reg1) != REG)
1155 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1156
1157 reg2 = XEXP (XEXP (x, 0), 0);
1158 if (GET_CODE (reg2) != REG)
1159 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1160
1161 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1162 gen_rtx_MULT (Pmode,
1163 reg2,
1164 GEN_INT (val)),
1165 reg1));
1166 }
1167
1168 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1169
1170 Only do so for floating point modes since this is more speculative
1171 and we lose if it's an integer store. */
1172 if (GET_CODE (x) == PLUS
1173 && GET_CODE (XEXP (x, 0)) == PLUS
1174 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1175 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1176 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1177 && (mode == SFmode || mode == DFmode))
1178 {
1179
1180 /* First, try and figure out what to use as a base register. */
1181 rtx reg1, reg2, base, idx;
1182
1183 reg1 = XEXP (XEXP (x, 0), 1);
1184 reg2 = XEXP (x, 1);
1185 base = NULL_RTX;
1186 idx = NULL_RTX;
1187
1188 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1189 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1190 it's a base register below. */
1191 if (GET_CODE (reg1) != REG)
1192 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1193
1194 if (GET_CODE (reg2) != REG)
1195 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1196
1197 /* Figure out what the base and index are. */
1198
1199 if (GET_CODE (reg1) == REG
1200 && REG_POINTER (reg1))
1201 {
1202 base = reg1;
1203 idx = gen_rtx_PLUS (Pmode,
1204 gen_rtx_MULT (Pmode,
1205 XEXP (XEXP (XEXP (x, 0), 0), 0),
1206 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1207 XEXP (x, 1));
1208 }
1209 else if (GET_CODE (reg2) == REG
1210 && REG_POINTER (reg2))
1211 {
1212 base = reg2;
1213 idx = XEXP (x, 0);
1214 }
1215
1216 if (base == 0)
1217 return orig;
1218
1219 /* If the index adds a large constant, try to scale the
1220 constant so that it can be loaded with only one insn. */
1221 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1222 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1223 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1224 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1225 {
1226 /* Divide the CONST_INT by the scale factor, then add it to A. */
1227 int val = INTVAL (XEXP (idx, 1));
1228
1229 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1230 reg1 = XEXP (XEXP (idx, 0), 0);
1231 if (GET_CODE (reg1) != REG)
1232 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1233
1234 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1235
1236 /* We can now generate a simple scaled indexed address. */
1237 return
1238 force_reg
1239 (Pmode, gen_rtx_PLUS (Pmode,
1240 gen_rtx_MULT (Pmode, reg1,
1241 XEXP (XEXP (idx, 0), 1)),
1242 base));
1243 }
1244
1245 /* If B + C is still a valid base register, then add them. */
1246 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1247 && INTVAL (XEXP (idx, 1)) <= 4096
1248 && INTVAL (XEXP (idx, 1)) >= -4096)
1249 {
1250 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1251 rtx reg1, reg2;
1252
1253 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1254
1255 reg2 = XEXP (XEXP (idx, 0), 0);
1256 if (GET_CODE (reg2) != CONST_INT)
1257 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1258
1259 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1260 gen_rtx_MULT (Pmode,
1261 reg2,
1262 GEN_INT (val)),
1263 reg1));
1264 }
1265
1266 /* Get the index into a register, then add the base + index and
1267 return a register holding the result. */
1268
1269 /* First get A into a register. */
1270 reg1 = XEXP (XEXP (idx, 0), 0);
1271 if (GET_CODE (reg1) != REG)
1272 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1273
1274 /* And get B into a register. */
1275 reg2 = XEXP (idx, 1);
1276 if (GET_CODE (reg2) != REG)
1277 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1278
1279 reg1 = force_reg (Pmode,
1280 gen_rtx_PLUS (Pmode,
1281 gen_rtx_MULT (Pmode, reg1,
1282 XEXP (XEXP (idx, 0), 1)),
1283 reg2));
1284
1285 /* Add the result to our base register and return. */
1286 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1287
1288 }
1289
1290 /* Uh-oh. We might have an address for x[n-100000]. This needs
1291 special handling to avoid creating an indexed memory address
1292 with x-100000 as the base.
1293
1294 If the constant part is small enough, then it's still safe because
1295 there is a guard page at the beginning and end of the data segment.
1296
1297 Scaled references are common enough that we want to try and rearrange the
1298 terms so that we can use indexing for these addresses too. Only
1299 do the optimization for floatint point modes. */
1300
1301 if (GET_CODE (x) == PLUS
1302 && pa_symbolic_expression_p (XEXP (x, 1)))
1303 {
1304 /* Ugly. We modify things here so that the address offset specified
1305 by the index expression is computed first, then added to x to form
1306 the entire address. */
1307
1308 rtx regx1, regx2, regy1, regy2, y;
1309
1310 /* Strip off any CONST. */
1311 y = XEXP (x, 1);
1312 if (GET_CODE (y) == CONST)
1313 y = XEXP (y, 0);
1314
1315 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1316 {
1317 /* See if this looks like
1318 (plus (mult (reg) (shadd_const))
1319 (const (plus (symbol_ref) (const_int))))
1320
1321 Where const_int is small. In that case the const
1322 expression is a valid pointer for indexing.
1323
1324 If const_int is big, but can be divided evenly by shadd_const
1325 and added to (reg). This allows more scaled indexed addresses. */
1326 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1327 && GET_CODE (XEXP (x, 0)) == MULT
1328 && GET_CODE (XEXP (y, 1)) == CONST_INT
1329 && INTVAL (XEXP (y, 1)) >= -4096
1330 && INTVAL (XEXP (y, 1)) <= 4095
1331 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1332 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1333 {
1334 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1335 rtx reg1, reg2;
1336
1337 reg1 = XEXP (x, 1);
1338 if (GET_CODE (reg1) != REG)
1339 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1340
1341 reg2 = XEXP (XEXP (x, 0), 0);
1342 if (GET_CODE (reg2) != REG)
1343 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1344
1345 return force_reg (Pmode,
1346 gen_rtx_PLUS (Pmode,
1347 gen_rtx_MULT (Pmode,
1348 reg2,
1349 GEN_INT (val)),
1350 reg1));
1351 }
1352 else if ((mode == DFmode || mode == SFmode)
1353 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1354 && GET_CODE (XEXP (x, 0)) == MULT
1355 && GET_CODE (XEXP (y, 1)) == CONST_INT
1356 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1357 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1358 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1359 {
1360 regx1
1361 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1362 / INTVAL (XEXP (XEXP (x, 0), 1))));
1363 regx2 = XEXP (XEXP (x, 0), 0);
1364 if (GET_CODE (regx2) != REG)
1365 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1366 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1367 regx2, regx1));
1368 return
1369 force_reg (Pmode,
1370 gen_rtx_PLUS (Pmode,
1371 gen_rtx_MULT (Pmode, regx2,
1372 XEXP (XEXP (x, 0), 1)),
1373 force_reg (Pmode, XEXP (y, 0))));
1374 }
1375 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1376 && INTVAL (XEXP (y, 1)) >= -4096
1377 && INTVAL (XEXP (y, 1)) <= 4095)
1378 {
1379 /* This is safe because of the guard page at the
1380 beginning and end of the data space. Just
1381 return the original address. */
1382 return orig;
1383 }
1384 else
1385 {
1386 /* Doesn't look like one we can optimize. */
1387 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1388 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1389 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1390 regx1 = force_reg (Pmode,
1391 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1392 regx1, regy2));
1393 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1394 }
1395 }
1396 }
1397
1398 return orig;
1399 }
1400
1401 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1402
1403 Compute extra cost of moving data between one register class
1404 and another.
1405
1406 Make moves from SAR so expensive they should never happen. We used to
1407 have 0xffff here, but that generates overflow in rare cases.
1408
1409 Copies involving a FP register and a non-FP register are relatively
1410 expensive because they must go through memory.
1411
1412 Other copies are reasonably cheap. */
1413
1414 static int
1415 hppa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1416 reg_class_t from, reg_class_t to)
1417 {
1418 if (from == SHIFT_REGS)
1419 return 0x100;
1420 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1421 return 18;
1422 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1423 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1424 return 16;
1425 else
1426 return 2;
1427 }
1428
1429 /* For the HPPA, REG and REG+CONST is cost 0
1430 and addresses involving symbolic constants are cost 2.
1431
1432 PIC addresses are very expensive.
1433
1434 It is no coincidence that this has the same structure
1435 as pa_legitimate_address_p. */
1436
1437 static int
1438 hppa_address_cost (rtx X, machine_mode mode ATTRIBUTE_UNUSED,
1439 addr_space_t as ATTRIBUTE_UNUSED,
1440 bool speed ATTRIBUTE_UNUSED)
1441 {
1442 switch (GET_CODE (X))
1443 {
1444 case REG:
1445 case PLUS:
1446 case LO_SUM:
1447 return 1;
1448 case HIGH:
1449 return 2;
1450 default:
1451 return 4;
1452 }
1453 }
1454
1455 /* Compute a (partial) cost for rtx X. Return true if the complete
1456 cost has been computed, and false if subexpressions should be
1457 scanned. In either case, *TOTAL contains the cost result. */
1458
1459 static bool
1460 hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
1461 int *total, bool speed ATTRIBUTE_UNUSED)
1462 {
1463 int factor;
1464
1465 switch (code)
1466 {
1467 case CONST_INT:
1468 if (INTVAL (x) == 0)
1469 *total = 0;
1470 else if (INT_14_BITS (x))
1471 *total = 1;
1472 else
1473 *total = 2;
1474 return true;
1475
1476 case HIGH:
1477 *total = 2;
1478 return true;
1479
1480 case CONST:
1481 case LABEL_REF:
1482 case SYMBOL_REF:
1483 *total = 4;
1484 return true;
1485
1486 case CONST_DOUBLE:
1487 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1488 && outer_code != SET)
1489 *total = 0;
1490 else
1491 *total = 8;
1492 return true;
1493
1494 case MULT:
1495 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1496 {
1497 *total = COSTS_N_INSNS (3);
1498 return true;
1499 }
1500
1501 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1502 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1503 if (factor == 0)
1504 factor = 1;
1505
1506 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1507 *total = factor * factor * COSTS_N_INSNS (8);
1508 else
1509 *total = factor * factor * COSTS_N_INSNS (20);
1510 return true;
1511
1512 case DIV:
1513 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1514 {
1515 *total = COSTS_N_INSNS (14);
1516 return true;
1517 }
1518 /* FALLTHRU */
1519
1520 case UDIV:
1521 case MOD:
1522 case UMOD:
1523 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1524 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1525 if (factor == 0)
1526 factor = 1;
1527
1528 *total = factor * factor * COSTS_N_INSNS (60);
1529 return true;
1530
1531 case PLUS: /* this includes shNadd insns */
1532 case MINUS:
1533 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1534 {
1535 *total = COSTS_N_INSNS (3);
1536 return true;
1537 }
1538
1539 /* A size N times larger than UNITS_PER_WORD needs N times as
1540 many insns, taking N times as long. */
1541 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
1542 if (factor == 0)
1543 factor = 1;
1544 *total = factor * COSTS_N_INSNS (1);
1545 return true;
1546
1547 case ASHIFT:
1548 case ASHIFTRT:
1549 case LSHIFTRT:
1550 *total = COSTS_N_INSNS (1);
1551 return true;
1552
1553 default:
1554 return false;
1555 }
1556 }
1557
1558 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1559 new rtx with the correct mode. */
1560 static inline rtx
1561 force_mode (machine_mode mode, rtx orig)
1562 {
1563 if (mode == GET_MODE (orig))
1564 return orig;
1565
1566 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1567
1568 return gen_rtx_REG (mode, REGNO (orig));
1569 }
1570
1571 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1572
1573 static bool
1574 pa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1575 {
1576 return tls_referenced_p (x);
1577 }
1578
1579 /* Emit insns to move operands[1] into operands[0].
1580
1581 Return 1 if we have written out everything that needs to be done to
1582 do the move. Otherwise, return 0 and the caller will emit the move
1583 normally.
1584
1585 Note SCRATCH_REG may not be in the proper mode depending on how it
1586 will be used. This routine is responsible for creating a new copy
1587 of SCRATCH_REG in the proper mode. */
1588
1589 int
1590 pa_emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
1591 {
1592 register rtx operand0 = operands[0];
1593 register rtx operand1 = operands[1];
1594 register rtx tem;
1595
1596 /* We can only handle indexed addresses in the destination operand
1597 of floating point stores. Thus, we need to break out indexed
1598 addresses from the destination operand. */
1599 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1600 {
1601 gcc_assert (can_create_pseudo_p ());
1602
1603 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1604 operand0 = replace_equiv_address (operand0, tem);
1605 }
1606
1607 /* On targets with non-equivalent space registers, break out unscaled
1608 indexed addresses from the source operand before the final CSE.
1609 We have to do this because the REG_POINTER flag is not correctly
1610 carried through various optimization passes and CSE may substitute
1611 a pseudo without the pointer set for one with the pointer set. As
1612 a result, we loose various opportunities to create insns with
1613 unscaled indexed addresses. */
1614 if (!TARGET_NO_SPACE_REGS
1615 && !cse_not_expected
1616 && GET_CODE (operand1) == MEM
1617 && GET_CODE (XEXP (operand1, 0)) == PLUS
1618 && REG_P (XEXP (XEXP (operand1, 0), 0))
1619 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1620 operand1
1621 = replace_equiv_address (operand1,
1622 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1623
1624 if (scratch_reg
1625 && reload_in_progress && GET_CODE (operand0) == REG
1626 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1627 operand0 = reg_equiv_mem (REGNO (operand0));
1628 else if (scratch_reg
1629 && reload_in_progress && GET_CODE (operand0) == SUBREG
1630 && GET_CODE (SUBREG_REG (operand0)) == REG
1631 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1632 {
1633 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1634 the code which tracks sets/uses for delete_output_reload. */
1635 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1636 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1637 SUBREG_BYTE (operand0));
1638 operand0 = alter_subreg (&temp, true);
1639 }
1640
1641 if (scratch_reg
1642 && reload_in_progress && GET_CODE (operand1) == REG
1643 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1644 operand1 = reg_equiv_mem (REGNO (operand1));
1645 else if (scratch_reg
1646 && reload_in_progress && GET_CODE (operand1) == SUBREG
1647 && GET_CODE (SUBREG_REG (operand1)) == REG
1648 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1649 {
1650 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1651 the code which tracks sets/uses for delete_output_reload. */
1652 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1653 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1654 SUBREG_BYTE (operand1));
1655 operand1 = alter_subreg (&temp, true);
1656 }
1657
1658 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1659 && ((tem = find_replacement (&XEXP (operand0, 0)))
1660 != XEXP (operand0, 0)))
1661 operand0 = replace_equiv_address (operand0, tem);
1662
1663 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1664 && ((tem = find_replacement (&XEXP (operand1, 0)))
1665 != XEXP (operand1, 0)))
1666 operand1 = replace_equiv_address (operand1, tem);
1667
1668 /* Handle secondary reloads for loads/stores of FP registers from
1669 REG+D addresses where D does not fit in 5 or 14 bits, including
1670 (subreg (mem (addr))) cases. */
1671 if (scratch_reg
1672 && fp_reg_operand (operand0, mode)
1673 && (MEM_P (operand1)
1674 || (GET_CODE (operand1) == SUBREG
1675 && MEM_P (XEXP (operand1, 0))))
1676 && !floating_point_store_memory_operand (operand1, mode))
1677 {
1678 if (GET_CODE (operand1) == SUBREG)
1679 operand1 = XEXP (operand1, 0);
1680
1681 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1682 it in WORD_MODE regardless of what mode it was originally given
1683 to us. */
1684 scratch_reg = force_mode (word_mode, scratch_reg);
1685
1686 /* D might not fit in 14 bits either; for such cases load D into
1687 scratch reg. */
1688 if (reg_plus_base_memory_operand (operand1, mode)
1689 && !(TARGET_PA_20
1690 && !TARGET_ELF32
1691 && INT_14_BITS (XEXP (XEXP (operand1, 0), 1))))
1692 {
1693 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1694 emit_move_insn (scratch_reg,
1695 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1696 Pmode,
1697 XEXP (XEXP (operand1, 0), 0),
1698 scratch_reg));
1699 }
1700 else
1701 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1702 emit_insn (gen_rtx_SET (operand0,
1703 replace_equiv_address (operand1, scratch_reg)));
1704 return 1;
1705 }
1706 else if (scratch_reg
1707 && fp_reg_operand (operand1, mode)
1708 && (MEM_P (operand0)
1709 || (GET_CODE (operand0) == SUBREG
1710 && MEM_P (XEXP (operand0, 0))))
1711 && !floating_point_store_memory_operand (operand0, mode))
1712 {
1713 if (GET_CODE (operand0) == SUBREG)
1714 operand0 = XEXP (operand0, 0);
1715
1716 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1717 it in WORD_MODE regardless of what mode it was originally given
1718 to us. */
1719 scratch_reg = force_mode (word_mode, scratch_reg);
1720
1721 /* D might not fit in 14 bits either; for such cases load D into
1722 scratch reg. */
1723 if (reg_plus_base_memory_operand (operand0, mode)
1724 && !(TARGET_PA_20
1725 && !TARGET_ELF32
1726 && INT_14_BITS (XEXP (XEXP (operand0, 0), 1))))
1727 {
1728 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1729 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1730 0)),
1731 Pmode,
1732 XEXP (XEXP (operand0, 0),
1733 0),
1734 scratch_reg));
1735 }
1736 else
1737 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1738 emit_insn (gen_rtx_SET (replace_equiv_address (operand0, scratch_reg),
1739 operand1));
1740 return 1;
1741 }
1742 /* Handle secondary reloads for loads of FP registers from constant
1743 expressions by forcing the constant into memory. For the most part,
1744 this is only necessary for SImode and DImode.
1745
1746 Use scratch_reg to hold the address of the memory location. */
1747 else if (scratch_reg
1748 && CONSTANT_P (operand1)
1749 && fp_reg_operand (operand0, mode))
1750 {
1751 rtx const_mem, xoperands[2];
1752
1753 if (operand1 == CONST0_RTX (mode))
1754 {
1755 emit_insn (gen_rtx_SET (operand0, operand1));
1756 return 1;
1757 }
1758
1759 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1760 it in WORD_MODE regardless of what mode it was originally given
1761 to us. */
1762 scratch_reg = force_mode (word_mode, scratch_reg);
1763
1764 /* Force the constant into memory and put the address of the
1765 memory location into scratch_reg. */
1766 const_mem = force_const_mem (mode, operand1);
1767 xoperands[0] = scratch_reg;
1768 xoperands[1] = XEXP (const_mem, 0);
1769 pa_emit_move_sequence (xoperands, Pmode, 0);
1770
1771 /* Now load the destination register. */
1772 emit_insn (gen_rtx_SET (operand0,
1773 replace_equiv_address (const_mem, scratch_reg)));
1774 return 1;
1775 }
1776 /* Handle secondary reloads for SAR. These occur when trying to load
1777 the SAR from memory or a constant. */
1778 else if (scratch_reg
1779 && GET_CODE (operand0) == REG
1780 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1781 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1782 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1783 {
1784 /* D might not fit in 14 bits either; for such cases load D into
1785 scratch reg. */
1786 if (GET_CODE (operand1) == MEM
1787 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1788 {
1789 /* We are reloading the address into the scratch register, so we
1790 want to make sure the scratch register is a full register. */
1791 scratch_reg = force_mode (word_mode, scratch_reg);
1792
1793 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1794 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1795 0)),
1796 Pmode,
1797 XEXP (XEXP (operand1, 0),
1798 0),
1799 scratch_reg));
1800
1801 /* Now we are going to load the scratch register from memory,
1802 we want to load it in the same width as the original MEM,
1803 which must be the same as the width of the ultimate destination,
1804 OPERAND0. */
1805 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1806
1807 emit_move_insn (scratch_reg,
1808 replace_equiv_address (operand1, scratch_reg));
1809 }
1810 else
1811 {
1812 /* We want to load the scratch register using the same mode as
1813 the ultimate destination. */
1814 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1815
1816 emit_move_insn (scratch_reg, operand1);
1817 }
1818
1819 /* And emit the insn to set the ultimate destination. We know that
1820 the scratch register has the same mode as the destination at this
1821 point. */
1822 emit_move_insn (operand0, scratch_reg);
1823 return 1;
1824 }
1825 /* Handle the most common case: storing into a register. */
1826 else if (register_operand (operand0, mode))
1827 {
1828 /* Legitimize TLS symbol references. This happens for references
1829 that aren't a legitimate constant. */
1830 if (PA_SYMBOL_REF_TLS_P (operand1))
1831 operand1 = legitimize_tls_address (operand1);
1832
1833 if (register_operand (operand1, mode)
1834 || (GET_CODE (operand1) == CONST_INT
1835 && pa_cint_ok_for_move (INTVAL (operand1)))
1836 || (operand1 == CONST0_RTX (mode))
1837 || (GET_CODE (operand1) == HIGH
1838 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1839 /* Only `general_operands' can come here, so MEM is ok. */
1840 || GET_CODE (operand1) == MEM)
1841 {
1842 /* Various sets are created during RTL generation which don't
1843 have the REG_POINTER flag correctly set. After the CSE pass,
1844 instruction recognition can fail if we don't consistently
1845 set this flag when performing register copies. This should
1846 also improve the opportunities for creating insns that use
1847 unscaled indexing. */
1848 if (REG_P (operand0) && REG_P (operand1))
1849 {
1850 if (REG_POINTER (operand1)
1851 && !REG_POINTER (operand0)
1852 && !HARD_REGISTER_P (operand0))
1853 copy_reg_pointer (operand0, operand1);
1854 }
1855
1856 /* When MEMs are broken out, the REG_POINTER flag doesn't
1857 get set. In some cases, we can set the REG_POINTER flag
1858 from the declaration for the MEM. */
1859 if (REG_P (operand0)
1860 && GET_CODE (operand1) == MEM
1861 && !REG_POINTER (operand0))
1862 {
1863 tree decl = MEM_EXPR (operand1);
1864
1865 /* Set the register pointer flag and register alignment
1866 if the declaration for this memory reference is a
1867 pointer type. */
1868 if (decl)
1869 {
1870 tree type;
1871
1872 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1873 tree operand 1. */
1874 if (TREE_CODE (decl) == COMPONENT_REF)
1875 decl = TREE_OPERAND (decl, 1);
1876
1877 type = TREE_TYPE (decl);
1878 type = strip_array_types (type);
1879
1880 if (POINTER_TYPE_P (type))
1881 {
1882 int align;
1883
1884 type = TREE_TYPE (type);
1885 /* Using TYPE_ALIGN_OK is rather conservative as
1886 only the ada frontend actually sets it. */
1887 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1888 : BITS_PER_UNIT);
1889 mark_reg_pointer (operand0, align);
1890 }
1891 }
1892 }
1893
1894 emit_insn (gen_rtx_SET (operand0, operand1));
1895 return 1;
1896 }
1897 }
1898 else if (GET_CODE (operand0) == MEM)
1899 {
1900 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1901 && !(reload_in_progress || reload_completed))
1902 {
1903 rtx temp = gen_reg_rtx (DFmode);
1904
1905 emit_insn (gen_rtx_SET (temp, operand1));
1906 emit_insn (gen_rtx_SET (operand0, temp));
1907 return 1;
1908 }
1909 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1910 {
1911 /* Run this case quickly. */
1912 emit_insn (gen_rtx_SET (operand0, operand1));
1913 return 1;
1914 }
1915 if (! (reload_in_progress || reload_completed))
1916 {
1917 operands[0] = validize_mem (operand0);
1918 operands[1] = operand1 = force_reg (mode, operand1);
1919 }
1920 }
1921
1922 /* Simplify the source if we need to.
1923 Note we do have to handle function labels here, even though we do
1924 not consider them legitimate constants. Loop optimizations can
1925 call the emit_move_xxx with one as a source. */
1926 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1927 || (GET_CODE (operand1) == HIGH
1928 && symbolic_operand (XEXP (operand1, 0), mode))
1929 || function_label_operand (operand1, VOIDmode)
1930 || tls_referenced_p (operand1))
1931 {
1932 int ishighonly = 0;
1933
1934 if (GET_CODE (operand1) == HIGH)
1935 {
1936 ishighonly = 1;
1937 operand1 = XEXP (operand1, 0);
1938 }
1939 if (symbolic_operand (operand1, mode))
1940 {
1941 /* Argh. The assembler and linker can't handle arithmetic
1942 involving plabels.
1943
1944 So we force the plabel into memory, load operand0 from
1945 the memory location, then add in the constant part. */
1946 if ((GET_CODE (operand1) == CONST
1947 && GET_CODE (XEXP (operand1, 0)) == PLUS
1948 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1949 VOIDmode))
1950 || function_label_operand (operand1, VOIDmode))
1951 {
1952 rtx temp, const_part;
1953
1954 /* Figure out what (if any) scratch register to use. */
1955 if (reload_in_progress || reload_completed)
1956 {
1957 scratch_reg = scratch_reg ? scratch_reg : operand0;
1958 /* SCRATCH_REG will hold an address and maybe the actual
1959 data. We want it in WORD_MODE regardless of what mode it
1960 was originally given to us. */
1961 scratch_reg = force_mode (word_mode, scratch_reg);
1962 }
1963 else if (flag_pic)
1964 scratch_reg = gen_reg_rtx (Pmode);
1965
1966 if (GET_CODE (operand1) == CONST)
1967 {
1968 /* Save away the constant part of the expression. */
1969 const_part = XEXP (XEXP (operand1, 0), 1);
1970 gcc_assert (GET_CODE (const_part) == CONST_INT);
1971
1972 /* Force the function label into memory. */
1973 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1974 }
1975 else
1976 {
1977 /* No constant part. */
1978 const_part = NULL_RTX;
1979
1980 /* Force the function label into memory. */
1981 temp = force_const_mem (mode, operand1);
1982 }
1983
1984
1985 /* Get the address of the memory location. PIC-ify it if
1986 necessary. */
1987 temp = XEXP (temp, 0);
1988 if (flag_pic)
1989 temp = legitimize_pic_address (temp, mode, scratch_reg);
1990
1991 /* Put the address of the memory location into our destination
1992 register. */
1993 operands[1] = temp;
1994 pa_emit_move_sequence (operands, mode, scratch_reg);
1995
1996 /* Now load from the memory location into our destination
1997 register. */
1998 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1999 pa_emit_move_sequence (operands, mode, scratch_reg);
2000
2001 /* And add back in the constant part. */
2002 if (const_part != NULL_RTX)
2003 expand_inc (operand0, const_part);
2004
2005 return 1;
2006 }
2007
2008 if (flag_pic)
2009 {
2010 rtx_insn *insn;
2011 rtx temp;
2012
2013 if (reload_in_progress || reload_completed)
2014 {
2015 temp = scratch_reg ? scratch_reg : operand0;
2016 /* TEMP will hold an address and maybe the actual
2017 data. We want it in WORD_MODE regardless of what mode it
2018 was originally given to us. */
2019 temp = force_mode (word_mode, temp);
2020 }
2021 else
2022 temp = gen_reg_rtx (Pmode);
2023
2024 /* Force (const (plus (symbol) (const_int))) to memory
2025 if the const_int will not fit in 14 bits. Although
2026 this requires a relocation, the instruction sequence
2027 needed to load the value is shorter. */
2028 if (GET_CODE (operand1) == CONST
2029 && GET_CODE (XEXP (operand1, 0)) == PLUS
2030 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2031 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1)))
2032 {
2033 rtx x, m = force_const_mem (mode, operand1);
2034
2035 x = legitimize_pic_address (XEXP (m, 0), mode, temp);
2036 x = replace_equiv_address (m, x);
2037 insn = emit_move_insn (operand0, x);
2038 }
2039 else
2040 {
2041 operands[1] = legitimize_pic_address (operand1, mode, temp);
2042 if (REG_P (operand0) && REG_P (operands[1]))
2043 copy_reg_pointer (operand0, operands[1]);
2044 insn = emit_move_insn (operand0, operands[1]);
2045 }
2046
2047 /* Put a REG_EQUAL note on this insn. */
2048 set_unique_reg_note (insn, REG_EQUAL, operand1);
2049 }
2050 /* On the HPPA, references to data space are supposed to use dp,
2051 register 27, but showing it in the RTL inhibits various cse
2052 and loop optimizations. */
2053 else
2054 {
2055 rtx temp, set;
2056
2057 if (reload_in_progress || reload_completed)
2058 {
2059 temp = scratch_reg ? scratch_reg : operand0;
2060 /* TEMP will hold an address and maybe the actual
2061 data. We want it in WORD_MODE regardless of what mode it
2062 was originally given to us. */
2063 temp = force_mode (word_mode, temp);
2064 }
2065 else
2066 temp = gen_reg_rtx (mode);
2067
2068 /* Loading a SYMBOL_REF into a register makes that register
2069 safe to be used as the base in an indexed address.
2070
2071 Don't mark hard registers though. That loses. */
2072 if (GET_CODE (operand0) == REG
2073 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2074 mark_reg_pointer (operand0, BITS_PER_UNIT);
2075 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2076 mark_reg_pointer (temp, BITS_PER_UNIT);
2077
2078 if (ishighonly)
2079 set = gen_rtx_SET (operand0, temp);
2080 else
2081 set = gen_rtx_SET (operand0,
2082 gen_rtx_LO_SUM (mode, temp, operand1));
2083
2084 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2085 emit_insn (set);
2086
2087 }
2088 return 1;
2089 }
2090 else if (tls_referenced_p (operand1))
2091 {
2092 rtx tmp = operand1;
2093 rtx addend = NULL;
2094
2095 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2096 {
2097 addend = XEXP (XEXP (tmp, 0), 1);
2098 tmp = XEXP (XEXP (tmp, 0), 0);
2099 }
2100
2101 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2102 tmp = legitimize_tls_address (tmp);
2103 if (addend)
2104 {
2105 tmp = gen_rtx_PLUS (mode, tmp, addend);
2106 tmp = force_operand (tmp, operands[0]);
2107 }
2108 operands[1] = tmp;
2109 }
2110 else if (GET_CODE (operand1) != CONST_INT
2111 || !pa_cint_ok_for_move (INTVAL (operand1)))
2112 {
2113 rtx temp;
2114 rtx_insn *insn;
2115 rtx op1 = operand1;
2116 HOST_WIDE_INT value = 0;
2117 HOST_WIDE_INT insv = 0;
2118 int insert = 0;
2119
2120 if (GET_CODE (operand1) == CONST_INT)
2121 value = INTVAL (operand1);
2122
2123 if (TARGET_64BIT
2124 && GET_CODE (operand1) == CONST_INT
2125 && HOST_BITS_PER_WIDE_INT > 32
2126 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2127 {
2128 HOST_WIDE_INT nval;
2129
2130 /* Extract the low order 32 bits of the value and sign extend.
2131 If the new value is the same as the original value, we can
2132 can use the original value as-is. If the new value is
2133 different, we use it and insert the most-significant 32-bits
2134 of the original value into the final result. */
2135 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2136 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2137 if (value != nval)
2138 {
2139 #if HOST_BITS_PER_WIDE_INT > 32
2140 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2141 #endif
2142 insert = 1;
2143 value = nval;
2144 operand1 = GEN_INT (nval);
2145 }
2146 }
2147
2148 if (reload_in_progress || reload_completed)
2149 temp = scratch_reg ? scratch_reg : operand0;
2150 else
2151 temp = gen_reg_rtx (mode);
2152
2153 /* We don't directly split DImode constants on 32-bit targets
2154 because PLUS uses an 11-bit immediate and the insn sequence
2155 generated is not as efficient as the one using HIGH/LO_SUM. */
2156 if (GET_CODE (operand1) == CONST_INT
2157 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2158 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2159 && !insert)
2160 {
2161 /* Directly break constant into high and low parts. This
2162 provides better optimization opportunities because various
2163 passes recognize constants split with PLUS but not LO_SUM.
2164 We use a 14-bit signed low part except when the addition
2165 of 0x4000 to the high part might change the sign of the
2166 high part. */
2167 HOST_WIDE_INT low = value & 0x3fff;
2168 HOST_WIDE_INT high = value & ~ 0x3fff;
2169
2170 if (low >= 0x2000)
2171 {
2172 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2173 high += 0x2000;
2174 else
2175 high += 0x4000;
2176 }
2177
2178 low = value - high;
2179
2180 emit_insn (gen_rtx_SET (temp, GEN_INT (high)));
2181 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2182 }
2183 else
2184 {
2185 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2186 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2187 }
2188
2189 insn = emit_move_insn (operands[0], operands[1]);
2190
2191 /* Now insert the most significant 32 bits of the value
2192 into the register. When we don't have a second register
2193 available, it could take up to nine instructions to load
2194 a 64-bit integer constant. Prior to reload, we force
2195 constants that would take more than three instructions
2196 to load to the constant pool. During and after reload,
2197 we have to handle all possible values. */
2198 if (insert)
2199 {
2200 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2201 register and the value to be inserted is outside the
2202 range that can be loaded with three depdi instructions. */
2203 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2204 {
2205 operand1 = GEN_INT (insv);
2206
2207 emit_insn (gen_rtx_SET (temp,
2208 gen_rtx_HIGH (mode, operand1)));
2209 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2210 if (mode == DImode)
2211 emit_insn (gen_insvdi (operand0, GEN_INT (32),
2212 const0_rtx, temp));
2213 else
2214 emit_insn (gen_insvsi (operand0, GEN_INT (32),
2215 const0_rtx, temp));
2216 }
2217 else
2218 {
2219 int len = 5, pos = 27;
2220
2221 /* Insert the bits using the depdi instruction. */
2222 while (pos >= 0)
2223 {
2224 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2225 HOST_WIDE_INT sign = v5 < 0;
2226
2227 /* Left extend the insertion. */
2228 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2229 while (pos > 0 && (insv & 1) == sign)
2230 {
2231 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2232 len += 1;
2233 pos -= 1;
2234 }
2235
2236 if (mode == DImode)
2237 emit_insn (gen_insvdi (operand0, GEN_INT (len),
2238 GEN_INT (pos), GEN_INT (v5)));
2239 else
2240 emit_insn (gen_insvsi (operand0, GEN_INT (len),
2241 GEN_INT (pos), GEN_INT (v5)));
2242
2243 len = pos > 0 && pos < 5 ? pos : 5;
2244 pos -= len;
2245 }
2246 }
2247 }
2248
2249 set_unique_reg_note (insn, REG_EQUAL, op1);
2250
2251 return 1;
2252 }
2253 }
2254 /* Now have insn-emit do whatever it normally does. */
2255 return 0;
2256 }
2257
2258 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2259 it will need a link/runtime reloc). */
2260
2261 int
2262 pa_reloc_needed (tree exp)
2263 {
2264 int reloc = 0;
2265
2266 switch (TREE_CODE (exp))
2267 {
2268 case ADDR_EXPR:
2269 return 1;
2270
2271 case POINTER_PLUS_EXPR:
2272 case PLUS_EXPR:
2273 case MINUS_EXPR:
2274 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2275 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2276 break;
2277
2278 CASE_CONVERT:
2279 case NON_LVALUE_EXPR:
2280 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2281 break;
2282
2283 case CONSTRUCTOR:
2284 {
2285 tree value;
2286 unsigned HOST_WIDE_INT ix;
2287
2288 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2289 if (value)
2290 reloc |= pa_reloc_needed (value);
2291 }
2292 break;
2293
2294 case ERROR_MARK:
2295 break;
2296
2297 default:
2298 break;
2299 }
2300 return reloc;
2301 }
2302
2303 \f
2304 /* Return the best assembler insn template
2305 for moving operands[1] into operands[0] as a fullword. */
2306 const char *
2307 pa_singlemove_string (rtx *operands)
2308 {
2309 HOST_WIDE_INT intval;
2310
2311 if (GET_CODE (operands[0]) == MEM)
2312 return "stw %r1,%0";
2313 if (GET_CODE (operands[1]) == MEM)
2314 return "ldw %1,%0";
2315 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2316 {
2317 long i;
2318 REAL_VALUE_TYPE d;
2319
2320 gcc_assert (GET_MODE (operands[1]) == SFmode);
2321
2322 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2323 bit pattern. */
2324 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2325 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2326
2327 operands[1] = GEN_INT (i);
2328 /* Fall through to CONST_INT case. */
2329 }
2330 if (GET_CODE (operands[1]) == CONST_INT)
2331 {
2332 intval = INTVAL (operands[1]);
2333
2334 if (VAL_14_BITS_P (intval))
2335 return "ldi %1,%0";
2336 else if ((intval & 0x7ff) == 0)
2337 return "ldil L'%1,%0";
2338 else if (pa_zdepi_cint_p (intval))
2339 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2340 else
2341 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2342 }
2343 return "copy %1,%0";
2344 }
2345 \f
2346
2347 /* Compute position (in OP[1]) and width (in OP[2])
2348 useful for copying IMM to a register using the zdepi
2349 instructions. Store the immediate value to insert in OP[0]. */
2350 static void
2351 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2352 {
2353 int lsb, len;
2354
2355 /* Find the least significant set bit in IMM. */
2356 for (lsb = 0; lsb < 32; lsb++)
2357 {
2358 if ((imm & 1) != 0)
2359 break;
2360 imm >>= 1;
2361 }
2362
2363 /* Choose variants based on *sign* of the 5-bit field. */
2364 if ((imm & 0x10) == 0)
2365 len = (lsb <= 28) ? 4 : 32 - lsb;
2366 else
2367 {
2368 /* Find the width of the bitstring in IMM. */
2369 for (len = 5; len < 32 - lsb; len++)
2370 {
2371 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2372 break;
2373 }
2374
2375 /* Sign extend IMM as a 5-bit value. */
2376 imm = (imm & 0xf) - 0x10;
2377 }
2378
2379 op[0] = imm;
2380 op[1] = 31 - lsb;
2381 op[2] = len;
2382 }
2383
2384 /* Compute position (in OP[1]) and width (in OP[2])
2385 useful for copying IMM to a register using the depdi,z
2386 instructions. Store the immediate value to insert in OP[0]. */
2387
2388 static void
2389 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2390 {
2391 int lsb, len, maxlen;
2392
2393 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2394
2395 /* Find the least significant set bit in IMM. */
2396 for (lsb = 0; lsb < maxlen; lsb++)
2397 {
2398 if ((imm & 1) != 0)
2399 break;
2400 imm >>= 1;
2401 }
2402
2403 /* Choose variants based on *sign* of the 5-bit field. */
2404 if ((imm & 0x10) == 0)
2405 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2406 else
2407 {
2408 /* Find the width of the bitstring in IMM. */
2409 for (len = 5; len < maxlen - lsb; len++)
2410 {
2411 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2412 break;
2413 }
2414
2415 /* Extend length if host is narrow and IMM is negative. */
2416 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2417 len += 32;
2418
2419 /* Sign extend IMM as a 5-bit value. */
2420 imm = (imm & 0xf) - 0x10;
2421 }
2422
2423 op[0] = imm;
2424 op[1] = 63 - lsb;
2425 op[2] = len;
2426 }
2427
2428 /* Output assembler code to perform a doubleword move insn
2429 with operands OPERANDS. */
2430
2431 const char *
2432 pa_output_move_double (rtx *operands)
2433 {
2434 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2435 rtx latehalf[2];
2436 rtx addreg0 = 0, addreg1 = 0;
2437
2438 /* First classify both operands. */
2439
2440 if (REG_P (operands[0]))
2441 optype0 = REGOP;
2442 else if (offsettable_memref_p (operands[0]))
2443 optype0 = OFFSOP;
2444 else if (GET_CODE (operands[0]) == MEM)
2445 optype0 = MEMOP;
2446 else
2447 optype0 = RNDOP;
2448
2449 if (REG_P (operands[1]))
2450 optype1 = REGOP;
2451 else if (CONSTANT_P (operands[1]))
2452 optype1 = CNSTOP;
2453 else if (offsettable_memref_p (operands[1]))
2454 optype1 = OFFSOP;
2455 else if (GET_CODE (operands[1]) == MEM)
2456 optype1 = MEMOP;
2457 else
2458 optype1 = RNDOP;
2459
2460 /* Check for the cases that the operand constraints are not
2461 supposed to allow to happen. */
2462 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2463
2464 /* Handle copies between general and floating registers. */
2465
2466 if (optype0 == REGOP && optype1 == REGOP
2467 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2468 {
2469 if (FP_REG_P (operands[0]))
2470 {
2471 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2472 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2473 return "{fldds|fldd} -16(%%sp),%0";
2474 }
2475 else
2476 {
2477 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2478 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2479 return "{ldws|ldw} -12(%%sp),%R0";
2480 }
2481 }
2482
2483 /* Handle auto decrementing and incrementing loads and stores
2484 specifically, since the structure of the function doesn't work
2485 for them without major modification. Do it better when we learn
2486 this port about the general inc/dec addressing of PA.
2487 (This was written by tege. Chide him if it doesn't work.) */
2488
2489 if (optype0 == MEMOP)
2490 {
2491 /* We have to output the address syntax ourselves, since print_operand
2492 doesn't deal with the addresses we want to use. Fix this later. */
2493
2494 rtx addr = XEXP (operands[0], 0);
2495 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2496 {
2497 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2498
2499 operands[0] = XEXP (addr, 0);
2500 gcc_assert (GET_CODE (operands[1]) == REG
2501 && GET_CODE (operands[0]) == REG);
2502
2503 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2504
2505 /* No overlap between high target register and address
2506 register. (We do this in a non-obvious way to
2507 save a register file writeback) */
2508 if (GET_CODE (addr) == POST_INC)
2509 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2510 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2511 }
2512 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2513 {
2514 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2515
2516 operands[0] = XEXP (addr, 0);
2517 gcc_assert (GET_CODE (operands[1]) == REG
2518 && GET_CODE (operands[0]) == REG);
2519
2520 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2521 /* No overlap between high target register and address
2522 register. (We do this in a non-obvious way to save a
2523 register file writeback) */
2524 if (GET_CODE (addr) == PRE_INC)
2525 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2526 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2527 }
2528 }
2529 if (optype1 == MEMOP)
2530 {
2531 /* We have to output the address syntax ourselves, since print_operand
2532 doesn't deal with the addresses we want to use. Fix this later. */
2533
2534 rtx addr = XEXP (operands[1], 0);
2535 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2536 {
2537 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2538
2539 operands[1] = XEXP (addr, 0);
2540 gcc_assert (GET_CODE (operands[0]) == REG
2541 && GET_CODE (operands[1]) == REG);
2542
2543 if (!reg_overlap_mentioned_p (high_reg, addr))
2544 {
2545 /* No overlap between high target register and address
2546 register. (We do this in a non-obvious way to
2547 save a register file writeback) */
2548 if (GET_CODE (addr) == POST_INC)
2549 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2550 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2551 }
2552 else
2553 {
2554 /* This is an undefined situation. We should load into the
2555 address register *and* update that register. Probably
2556 we don't need to handle this at all. */
2557 if (GET_CODE (addr) == POST_INC)
2558 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2559 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2560 }
2561 }
2562 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2563 {
2564 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2565
2566 operands[1] = XEXP (addr, 0);
2567 gcc_assert (GET_CODE (operands[0]) == REG
2568 && GET_CODE (operands[1]) == REG);
2569
2570 if (!reg_overlap_mentioned_p (high_reg, addr))
2571 {
2572 /* No overlap between high target register and address
2573 register. (We do this in a non-obvious way to
2574 save a register file writeback) */
2575 if (GET_CODE (addr) == PRE_INC)
2576 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2577 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2578 }
2579 else
2580 {
2581 /* This is an undefined situation. We should load into the
2582 address register *and* update that register. Probably
2583 we don't need to handle this at all. */
2584 if (GET_CODE (addr) == PRE_INC)
2585 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2586 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2587 }
2588 }
2589 else if (GET_CODE (addr) == PLUS
2590 && GET_CODE (XEXP (addr, 0)) == MULT)
2591 {
2592 rtx xoperands[4];
2593
2594 /* Load address into left half of destination register. */
2595 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2596 xoperands[1] = XEXP (addr, 1);
2597 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2598 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2599 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2600 xoperands);
2601 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2602 }
2603 else if (GET_CODE (addr) == PLUS
2604 && REG_P (XEXP (addr, 0))
2605 && REG_P (XEXP (addr, 1)))
2606 {
2607 rtx xoperands[3];
2608
2609 /* Load address into left half of destination register. */
2610 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2611 xoperands[1] = XEXP (addr, 0);
2612 xoperands[2] = XEXP (addr, 1);
2613 output_asm_insn ("{addl|add,l} %1,%2,%0",
2614 xoperands);
2615 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2616 }
2617 }
2618
2619 /* If an operand is an unoffsettable memory ref, find a register
2620 we can increment temporarily to make it refer to the second word. */
2621
2622 if (optype0 == MEMOP)
2623 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2624
2625 if (optype1 == MEMOP)
2626 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2627
2628 /* Ok, we can do one word at a time.
2629 Normally we do the low-numbered word first.
2630
2631 In either case, set up in LATEHALF the operands to use
2632 for the high-numbered word and in some cases alter the
2633 operands in OPERANDS to be suitable for the low-numbered word. */
2634
2635 if (optype0 == REGOP)
2636 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2637 else if (optype0 == OFFSOP)
2638 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2639 else
2640 latehalf[0] = operands[0];
2641
2642 if (optype1 == REGOP)
2643 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2644 else if (optype1 == OFFSOP)
2645 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2646 else if (optype1 == CNSTOP)
2647 split_double (operands[1], &operands[1], &latehalf[1]);
2648 else
2649 latehalf[1] = operands[1];
2650
2651 /* If the first move would clobber the source of the second one,
2652 do them in the other order.
2653
2654 This can happen in two cases:
2655
2656 mem -> register where the first half of the destination register
2657 is the same register used in the memory's address. Reload
2658 can create such insns.
2659
2660 mem in this case will be either register indirect or register
2661 indirect plus a valid offset.
2662
2663 register -> register move where REGNO(dst) == REGNO(src + 1)
2664 someone (Tim/Tege?) claimed this can happen for parameter loads.
2665
2666 Handle mem -> register case first. */
2667 if (optype0 == REGOP
2668 && (optype1 == MEMOP || optype1 == OFFSOP)
2669 && refers_to_regno_p (REGNO (operands[0]), operands[1]))
2670 {
2671 /* Do the late half first. */
2672 if (addreg1)
2673 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2674 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2675
2676 /* Then clobber. */
2677 if (addreg1)
2678 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2679 return pa_singlemove_string (operands);
2680 }
2681
2682 /* Now handle register -> register case. */
2683 if (optype0 == REGOP && optype1 == REGOP
2684 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2685 {
2686 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2687 return pa_singlemove_string (operands);
2688 }
2689
2690 /* Normal case: do the two words, low-numbered first. */
2691
2692 output_asm_insn (pa_singlemove_string (operands), operands);
2693
2694 /* Make any unoffsettable addresses point at high-numbered word. */
2695 if (addreg0)
2696 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2697 if (addreg1)
2698 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2699
2700 /* Do that word. */
2701 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2702
2703 /* Undo the adds we just did. */
2704 if (addreg0)
2705 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2706 if (addreg1)
2707 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2708
2709 return "";
2710 }
2711 \f
2712 const char *
2713 pa_output_fp_move_double (rtx *operands)
2714 {
2715 if (FP_REG_P (operands[0]))
2716 {
2717 if (FP_REG_P (operands[1])
2718 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2719 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2720 else
2721 output_asm_insn ("fldd%F1 %1,%0", operands);
2722 }
2723 else if (FP_REG_P (operands[1]))
2724 {
2725 output_asm_insn ("fstd%F0 %1,%0", operands);
2726 }
2727 else
2728 {
2729 rtx xoperands[2];
2730
2731 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2732
2733 /* This is a pain. You have to be prepared to deal with an
2734 arbitrary address here including pre/post increment/decrement.
2735
2736 so avoid this in the MD. */
2737 gcc_assert (GET_CODE (operands[0]) == REG);
2738
2739 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2740 xoperands[0] = operands[0];
2741 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2742 }
2743 return "";
2744 }
2745 \f
2746 /* Return a REG that occurs in ADDR with coefficient 1.
2747 ADDR can be effectively incremented by incrementing REG. */
2748
2749 static rtx
2750 find_addr_reg (rtx addr)
2751 {
2752 while (GET_CODE (addr) == PLUS)
2753 {
2754 if (GET_CODE (XEXP (addr, 0)) == REG)
2755 addr = XEXP (addr, 0);
2756 else if (GET_CODE (XEXP (addr, 1)) == REG)
2757 addr = XEXP (addr, 1);
2758 else if (CONSTANT_P (XEXP (addr, 0)))
2759 addr = XEXP (addr, 1);
2760 else if (CONSTANT_P (XEXP (addr, 1)))
2761 addr = XEXP (addr, 0);
2762 else
2763 gcc_unreachable ();
2764 }
2765 gcc_assert (GET_CODE (addr) == REG);
2766 return addr;
2767 }
2768
2769 /* Emit code to perform a block move.
2770
2771 OPERANDS[0] is the destination pointer as a REG, clobbered.
2772 OPERANDS[1] is the source pointer as a REG, clobbered.
2773 OPERANDS[2] is a register for temporary storage.
2774 OPERANDS[3] is a register for temporary storage.
2775 OPERANDS[4] is the size as a CONST_INT
2776 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2777 OPERANDS[6] is another temporary register. */
2778
2779 const char *
2780 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2781 {
2782 int align = INTVAL (operands[5]);
2783 unsigned long n_bytes = INTVAL (operands[4]);
2784
2785 /* We can't move more than a word at a time because the PA
2786 has no longer integer move insns. (Could use fp mem ops?) */
2787 if (align > (TARGET_64BIT ? 8 : 4))
2788 align = (TARGET_64BIT ? 8 : 4);
2789
2790 /* Note that we know each loop below will execute at least twice
2791 (else we would have open-coded the copy). */
2792 switch (align)
2793 {
2794 case 8:
2795 /* Pre-adjust the loop counter. */
2796 operands[4] = GEN_INT (n_bytes - 16);
2797 output_asm_insn ("ldi %4,%2", operands);
2798
2799 /* Copying loop. */
2800 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2801 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2802 output_asm_insn ("std,ma %3,8(%0)", operands);
2803 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2804 output_asm_insn ("std,ma %6,8(%0)", operands);
2805
2806 /* Handle the residual. There could be up to 7 bytes of
2807 residual to copy! */
2808 if (n_bytes % 16 != 0)
2809 {
2810 operands[4] = GEN_INT (n_bytes % 8);
2811 if (n_bytes % 16 >= 8)
2812 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2813 if (n_bytes % 8 != 0)
2814 output_asm_insn ("ldd 0(%1),%6", operands);
2815 if (n_bytes % 16 >= 8)
2816 output_asm_insn ("std,ma %3,8(%0)", operands);
2817 if (n_bytes % 8 != 0)
2818 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2819 }
2820 return "";
2821
2822 case 4:
2823 /* Pre-adjust the loop counter. */
2824 operands[4] = GEN_INT (n_bytes - 8);
2825 output_asm_insn ("ldi %4,%2", operands);
2826
2827 /* Copying loop. */
2828 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2829 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2830 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2831 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2832 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2833
2834 /* Handle the residual. There could be up to 7 bytes of
2835 residual to copy! */
2836 if (n_bytes % 8 != 0)
2837 {
2838 operands[4] = GEN_INT (n_bytes % 4);
2839 if (n_bytes % 8 >= 4)
2840 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2841 if (n_bytes % 4 != 0)
2842 output_asm_insn ("ldw 0(%1),%6", operands);
2843 if (n_bytes % 8 >= 4)
2844 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2845 if (n_bytes % 4 != 0)
2846 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2847 }
2848 return "";
2849
2850 case 2:
2851 /* Pre-adjust the loop counter. */
2852 operands[4] = GEN_INT (n_bytes - 4);
2853 output_asm_insn ("ldi %4,%2", operands);
2854
2855 /* Copying loop. */
2856 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2857 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2858 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2859 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2860 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2861
2862 /* Handle the residual. */
2863 if (n_bytes % 4 != 0)
2864 {
2865 if (n_bytes % 4 >= 2)
2866 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2867 if (n_bytes % 2 != 0)
2868 output_asm_insn ("ldb 0(%1),%6", operands);
2869 if (n_bytes % 4 >= 2)
2870 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2871 if (n_bytes % 2 != 0)
2872 output_asm_insn ("stb %6,0(%0)", operands);
2873 }
2874 return "";
2875
2876 case 1:
2877 /* Pre-adjust the loop counter. */
2878 operands[4] = GEN_INT (n_bytes - 2);
2879 output_asm_insn ("ldi %4,%2", operands);
2880
2881 /* Copying loop. */
2882 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2883 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2884 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2885 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2886 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2887
2888 /* Handle the residual. */
2889 if (n_bytes % 2 != 0)
2890 {
2891 output_asm_insn ("ldb 0(%1),%3", operands);
2892 output_asm_insn ("stb %3,0(%0)", operands);
2893 }
2894 return "";
2895
2896 default:
2897 gcc_unreachable ();
2898 }
2899 }
2900
2901 /* Count the number of insns necessary to handle this block move.
2902
2903 Basic structure is the same as emit_block_move, except that we
2904 count insns rather than emit them. */
2905
2906 static int
2907 compute_movmem_length (rtx_insn *insn)
2908 {
2909 rtx pat = PATTERN (insn);
2910 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2911 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2912 unsigned int n_insns = 0;
2913
2914 /* We can't move more than four bytes at a time because the PA
2915 has no longer integer move insns. (Could use fp mem ops?) */
2916 if (align > (TARGET_64BIT ? 8 : 4))
2917 align = (TARGET_64BIT ? 8 : 4);
2918
2919 /* The basic copying loop. */
2920 n_insns = 6;
2921
2922 /* Residuals. */
2923 if (n_bytes % (2 * align) != 0)
2924 {
2925 if ((n_bytes % (2 * align)) >= align)
2926 n_insns += 2;
2927
2928 if ((n_bytes % align) != 0)
2929 n_insns += 2;
2930 }
2931
2932 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2933 return n_insns * 4;
2934 }
2935
2936 /* Emit code to perform a block clear.
2937
2938 OPERANDS[0] is the destination pointer as a REG, clobbered.
2939 OPERANDS[1] is a register for temporary storage.
2940 OPERANDS[2] is the size as a CONST_INT
2941 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2942
2943 const char *
2944 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2945 {
2946 int align = INTVAL (operands[3]);
2947 unsigned long n_bytes = INTVAL (operands[2]);
2948
2949 /* We can't clear more than a word at a time because the PA
2950 has no longer integer move insns. */
2951 if (align > (TARGET_64BIT ? 8 : 4))
2952 align = (TARGET_64BIT ? 8 : 4);
2953
2954 /* Note that we know each loop below will execute at least twice
2955 (else we would have open-coded the copy). */
2956 switch (align)
2957 {
2958 case 8:
2959 /* Pre-adjust the loop counter. */
2960 operands[2] = GEN_INT (n_bytes - 16);
2961 output_asm_insn ("ldi %2,%1", operands);
2962
2963 /* Loop. */
2964 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2965 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2966 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2967
2968 /* Handle the residual. There could be up to 7 bytes of
2969 residual to copy! */
2970 if (n_bytes % 16 != 0)
2971 {
2972 operands[2] = GEN_INT (n_bytes % 8);
2973 if (n_bytes % 16 >= 8)
2974 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2975 if (n_bytes % 8 != 0)
2976 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2977 }
2978 return "";
2979
2980 case 4:
2981 /* Pre-adjust the loop counter. */
2982 operands[2] = GEN_INT (n_bytes - 8);
2983 output_asm_insn ("ldi %2,%1", operands);
2984
2985 /* Loop. */
2986 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2987 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2988 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2989
2990 /* Handle the residual. There could be up to 7 bytes of
2991 residual to copy! */
2992 if (n_bytes % 8 != 0)
2993 {
2994 operands[2] = GEN_INT (n_bytes % 4);
2995 if (n_bytes % 8 >= 4)
2996 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2997 if (n_bytes % 4 != 0)
2998 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2999 }
3000 return "";
3001
3002 case 2:
3003 /* Pre-adjust the loop counter. */
3004 operands[2] = GEN_INT (n_bytes - 4);
3005 output_asm_insn ("ldi %2,%1", operands);
3006
3007 /* Loop. */
3008 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3009 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3010 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3011
3012 /* Handle the residual. */
3013 if (n_bytes % 4 != 0)
3014 {
3015 if (n_bytes % 4 >= 2)
3016 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3017 if (n_bytes % 2 != 0)
3018 output_asm_insn ("stb %%r0,0(%0)", operands);
3019 }
3020 return "";
3021
3022 case 1:
3023 /* Pre-adjust the loop counter. */
3024 operands[2] = GEN_INT (n_bytes - 2);
3025 output_asm_insn ("ldi %2,%1", operands);
3026
3027 /* Loop. */
3028 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3029 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3030 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3031
3032 /* Handle the residual. */
3033 if (n_bytes % 2 != 0)
3034 output_asm_insn ("stb %%r0,0(%0)", operands);
3035
3036 return "";
3037
3038 default:
3039 gcc_unreachable ();
3040 }
3041 }
3042
3043 /* Count the number of insns necessary to handle this block move.
3044
3045 Basic structure is the same as emit_block_move, except that we
3046 count insns rather than emit them. */
3047
3048 static int
3049 compute_clrmem_length (rtx_insn *insn)
3050 {
3051 rtx pat = PATTERN (insn);
3052 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3053 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3054 unsigned int n_insns = 0;
3055
3056 /* We can't clear more than a word at a time because the PA
3057 has no longer integer move insns. */
3058 if (align > (TARGET_64BIT ? 8 : 4))
3059 align = (TARGET_64BIT ? 8 : 4);
3060
3061 /* The basic loop. */
3062 n_insns = 4;
3063
3064 /* Residuals. */
3065 if (n_bytes % (2 * align) != 0)
3066 {
3067 if ((n_bytes % (2 * align)) >= align)
3068 n_insns++;
3069
3070 if ((n_bytes % align) != 0)
3071 n_insns++;
3072 }
3073
3074 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3075 return n_insns * 4;
3076 }
3077 \f
3078
3079 const char *
3080 pa_output_and (rtx *operands)
3081 {
3082 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3083 {
3084 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3085 int ls0, ls1, ms0, p, len;
3086
3087 for (ls0 = 0; ls0 < 32; ls0++)
3088 if ((mask & (1 << ls0)) == 0)
3089 break;
3090
3091 for (ls1 = ls0; ls1 < 32; ls1++)
3092 if ((mask & (1 << ls1)) != 0)
3093 break;
3094
3095 for (ms0 = ls1; ms0 < 32; ms0++)
3096 if ((mask & (1 << ms0)) == 0)
3097 break;
3098
3099 gcc_assert (ms0 == 32);
3100
3101 if (ls1 == 32)
3102 {
3103 len = ls0;
3104
3105 gcc_assert (len);
3106
3107 operands[2] = GEN_INT (len);
3108 return "{extru|extrw,u} %1,31,%2,%0";
3109 }
3110 else
3111 {
3112 /* We could use this `depi' for the case above as well, but `depi'
3113 requires one more register file access than an `extru'. */
3114
3115 p = 31 - ls0;
3116 len = ls1 - ls0;
3117
3118 operands[2] = GEN_INT (p);
3119 operands[3] = GEN_INT (len);
3120 return "{depi|depwi} 0,%2,%3,%0";
3121 }
3122 }
3123 else
3124 return "and %1,%2,%0";
3125 }
3126
3127 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3128 storing the result in operands[0]. */
3129 const char *
3130 pa_output_64bit_and (rtx *operands)
3131 {
3132 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3133 {
3134 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3135 int ls0, ls1, ms0, p, len;
3136
3137 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3138 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3139 break;
3140
3141 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3142 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3143 break;
3144
3145 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3146 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3147 break;
3148
3149 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3150
3151 if (ls1 == HOST_BITS_PER_WIDE_INT)
3152 {
3153 len = ls0;
3154
3155 gcc_assert (len);
3156
3157 operands[2] = GEN_INT (len);
3158 return "extrd,u %1,63,%2,%0";
3159 }
3160 else
3161 {
3162 /* We could use this `depi' for the case above as well, but `depi'
3163 requires one more register file access than an `extru'. */
3164
3165 p = 63 - ls0;
3166 len = ls1 - ls0;
3167
3168 operands[2] = GEN_INT (p);
3169 operands[3] = GEN_INT (len);
3170 return "depdi 0,%2,%3,%0";
3171 }
3172 }
3173 else
3174 return "and %1,%2,%0";
3175 }
3176
3177 const char *
3178 pa_output_ior (rtx *operands)
3179 {
3180 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3181 int bs0, bs1, p, len;
3182
3183 if (INTVAL (operands[2]) == 0)
3184 return "copy %1,%0";
3185
3186 for (bs0 = 0; bs0 < 32; bs0++)
3187 if ((mask & (1 << bs0)) != 0)
3188 break;
3189
3190 for (bs1 = bs0; bs1 < 32; bs1++)
3191 if ((mask & (1 << bs1)) == 0)
3192 break;
3193
3194 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3195
3196 p = 31 - bs0;
3197 len = bs1 - bs0;
3198
3199 operands[2] = GEN_INT (p);
3200 operands[3] = GEN_INT (len);
3201 return "{depi|depwi} -1,%2,%3,%0";
3202 }
3203
3204 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3205 storing the result in operands[0]. */
3206 const char *
3207 pa_output_64bit_ior (rtx *operands)
3208 {
3209 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3210 int bs0, bs1, p, len;
3211
3212 if (INTVAL (operands[2]) == 0)
3213 return "copy %1,%0";
3214
3215 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3216 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3217 break;
3218
3219 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3220 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3221 break;
3222
3223 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3224 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3225
3226 p = 63 - bs0;
3227 len = bs1 - bs0;
3228
3229 operands[2] = GEN_INT (p);
3230 operands[3] = GEN_INT (len);
3231 return "depdi -1,%2,%3,%0";
3232 }
3233 \f
3234 /* Target hook for assembling integer objects. This code handles
3235 aligned SI and DI integers specially since function references
3236 must be preceded by P%. */
3237
3238 static bool
3239 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3240 {
3241 if (size == UNITS_PER_WORD
3242 && aligned_p
3243 && function_label_operand (x, VOIDmode))
3244 {
3245 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3246
3247 /* We don't want an OPD when generating fast indirect calls. */
3248 if (!TARGET_FAST_INDIRECT_CALLS)
3249 fputs ("P%", asm_out_file);
3250
3251 output_addr_const (asm_out_file, x);
3252 fputc ('\n', asm_out_file);
3253 return true;
3254 }
3255 return default_assemble_integer (x, size, aligned_p);
3256 }
3257 \f
3258 /* Output an ascii string. */
3259 void
3260 pa_output_ascii (FILE *file, const char *p, int size)
3261 {
3262 int i;
3263 int chars_output;
3264 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3265
3266 /* The HP assembler can only take strings of 256 characters at one
3267 time. This is a limitation on input line length, *not* the
3268 length of the string. Sigh. Even worse, it seems that the
3269 restriction is in number of input characters (see \xnn &
3270 \whatever). So we have to do this very carefully. */
3271
3272 fputs ("\t.STRING \"", file);
3273
3274 chars_output = 0;
3275 for (i = 0; i < size; i += 4)
3276 {
3277 int co = 0;
3278 int io = 0;
3279 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3280 {
3281 register unsigned int c = (unsigned char) p[i + io];
3282
3283 if (c == '\"' || c == '\\')
3284 partial_output[co++] = '\\';
3285 if (c >= ' ' && c < 0177)
3286 partial_output[co++] = c;
3287 else
3288 {
3289 unsigned int hexd;
3290 partial_output[co++] = '\\';
3291 partial_output[co++] = 'x';
3292 hexd = c / 16 - 0 + '0';
3293 if (hexd > '9')
3294 hexd -= '9' - 'a' + 1;
3295 partial_output[co++] = hexd;
3296 hexd = c % 16 - 0 + '0';
3297 if (hexd > '9')
3298 hexd -= '9' - 'a' + 1;
3299 partial_output[co++] = hexd;
3300 }
3301 }
3302 if (chars_output + co > 243)
3303 {
3304 fputs ("\"\n\t.STRING \"", file);
3305 chars_output = 0;
3306 }
3307 fwrite (partial_output, 1, (size_t) co, file);
3308 chars_output += co;
3309 co = 0;
3310 }
3311 fputs ("\"\n", file);
3312 }
3313
3314 /* Try to rewrite floating point comparisons & branches to avoid
3315 useless add,tr insns.
3316
3317 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3318 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3319 first attempt to remove useless add,tr insns. It is zero
3320 for the second pass as reorg sometimes leaves bogus REG_DEAD
3321 notes lying around.
3322
3323 When CHECK_NOTES is zero we can only eliminate add,tr insns
3324 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3325 instructions. */
3326 static void
3327 remove_useless_addtr_insns (int check_notes)
3328 {
3329 rtx_insn *insn;
3330 static int pass = 0;
3331
3332 /* This is fairly cheap, so always run it when optimizing. */
3333 if (optimize > 0)
3334 {
3335 int fcmp_count = 0;
3336 int fbranch_count = 0;
3337
3338 /* Walk all the insns in this function looking for fcmp & fbranch
3339 instructions. Keep track of how many of each we find. */
3340 for (insn = get_insns (); insn; insn = next_insn (insn))
3341 {
3342 rtx tmp;
3343
3344 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3345 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3346 continue;
3347
3348 tmp = PATTERN (insn);
3349
3350 /* It must be a set. */
3351 if (GET_CODE (tmp) != SET)
3352 continue;
3353
3354 /* If the destination is CCFP, then we've found an fcmp insn. */
3355 tmp = SET_DEST (tmp);
3356 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3357 {
3358 fcmp_count++;
3359 continue;
3360 }
3361
3362 tmp = PATTERN (insn);
3363 /* If this is an fbranch instruction, bump the fbranch counter. */
3364 if (GET_CODE (tmp) == SET
3365 && SET_DEST (tmp) == pc_rtx
3366 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3367 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3368 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3369 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3370 {
3371 fbranch_count++;
3372 continue;
3373 }
3374 }
3375
3376
3377 /* Find all floating point compare + branch insns. If possible,
3378 reverse the comparison & the branch to avoid add,tr insns. */
3379 for (insn = get_insns (); insn; insn = next_insn (insn))
3380 {
3381 rtx tmp;
3382 rtx_insn *next;
3383
3384 /* Ignore anything that isn't an INSN. */
3385 if (! NONJUMP_INSN_P (insn))
3386 continue;
3387
3388 tmp = PATTERN (insn);
3389
3390 /* It must be a set. */
3391 if (GET_CODE (tmp) != SET)
3392 continue;
3393
3394 /* The destination must be CCFP, which is register zero. */
3395 tmp = SET_DEST (tmp);
3396 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3397 continue;
3398
3399 /* INSN should be a set of CCFP.
3400
3401 See if the result of this insn is used in a reversed FP
3402 conditional branch. If so, reverse our condition and
3403 the branch. Doing so avoids useless add,tr insns. */
3404 next = next_insn (insn);
3405 while (next)
3406 {
3407 /* Jumps, calls and labels stop our search. */
3408 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3409 break;
3410
3411 /* As does another fcmp insn. */
3412 if (NONJUMP_INSN_P (next)
3413 && GET_CODE (PATTERN (next)) == SET
3414 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3415 && REGNO (SET_DEST (PATTERN (next))) == 0)
3416 break;
3417
3418 next = next_insn (next);
3419 }
3420
3421 /* Is NEXT_INSN a branch? */
3422 if (next && JUMP_P (next))
3423 {
3424 rtx pattern = PATTERN (next);
3425
3426 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3427 and CCFP dies, then reverse our conditional and the branch
3428 to avoid the add,tr. */
3429 if (GET_CODE (pattern) == SET
3430 && SET_DEST (pattern) == pc_rtx
3431 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3432 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3433 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3434 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3435 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3436 && (fcmp_count == fbranch_count
3437 || (check_notes
3438 && find_regno_note (next, REG_DEAD, 0))))
3439 {
3440 /* Reverse the branch. */
3441 tmp = XEXP (SET_SRC (pattern), 1);
3442 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3443 XEXP (SET_SRC (pattern), 2) = tmp;
3444 INSN_CODE (next) = -1;
3445
3446 /* Reverse our condition. */
3447 tmp = PATTERN (insn);
3448 PUT_CODE (XEXP (tmp, 1),
3449 (reverse_condition_maybe_unordered
3450 (GET_CODE (XEXP (tmp, 1)))));
3451 }
3452 }
3453 }
3454 }
3455
3456 pass = !pass;
3457
3458 }
3459 \f
3460 /* You may have trouble believing this, but this is the 32 bit HP-PA
3461 stack layout. Wow.
3462
3463 Offset Contents
3464
3465 Variable arguments (optional; any number may be allocated)
3466
3467 SP-(4*(N+9)) arg word N
3468 : :
3469 SP-56 arg word 5
3470 SP-52 arg word 4
3471
3472 Fixed arguments (must be allocated; may remain unused)
3473
3474 SP-48 arg word 3
3475 SP-44 arg word 2
3476 SP-40 arg word 1
3477 SP-36 arg word 0
3478
3479 Frame Marker
3480
3481 SP-32 External Data Pointer (DP)
3482 SP-28 External sr4
3483 SP-24 External/stub RP (RP')
3484 SP-20 Current RP
3485 SP-16 Static Link
3486 SP-12 Clean up
3487 SP-8 Calling Stub RP (RP'')
3488 SP-4 Previous SP
3489
3490 Top of Frame
3491
3492 SP-0 Stack Pointer (points to next available address)
3493
3494 */
3495
3496 /* This function saves registers as follows. Registers marked with ' are
3497 this function's registers (as opposed to the previous function's).
3498 If a frame_pointer isn't needed, r4 is saved as a general register;
3499 the space for the frame pointer is still allocated, though, to keep
3500 things simple.
3501
3502
3503 Top of Frame
3504
3505 SP (FP') Previous FP
3506 SP + 4 Alignment filler (sigh)
3507 SP + 8 Space for locals reserved here.
3508 .
3509 .
3510 .
3511 SP + n All call saved register used.
3512 .
3513 .
3514 .
3515 SP + o All call saved fp registers used.
3516 .
3517 .
3518 .
3519 SP + p (SP') points to next available address.
3520
3521 */
3522
3523 /* Global variables set by output_function_prologue(). */
3524 /* Size of frame. Need to know this to emit return insns from
3525 leaf procedures. */
3526 static HOST_WIDE_INT actual_fsize, local_fsize;
3527 static int save_fregs;
3528
3529 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3530 Handle case where DISP > 8k by using the add_high_const patterns.
3531
3532 Note in DISP > 8k case, we will leave the high part of the address
3533 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3534
3535 static void
3536 store_reg (int reg, HOST_WIDE_INT disp, int base)
3537 {
3538 rtx dest, src, basereg;
3539 rtx_insn *insn;
3540
3541 src = gen_rtx_REG (word_mode, reg);
3542 basereg = gen_rtx_REG (Pmode, base);
3543 if (VAL_14_BITS_P (disp))
3544 {
3545 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3546 insn = emit_move_insn (dest, src);
3547 }
3548 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3549 {
3550 rtx delta = GEN_INT (disp);
3551 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3552
3553 emit_move_insn (tmpreg, delta);
3554 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3555 if (DO_FRAME_NOTES)
3556 {
3557 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3558 gen_rtx_SET (tmpreg,
3559 gen_rtx_PLUS (Pmode, basereg, delta)));
3560 RTX_FRAME_RELATED_P (insn) = 1;
3561 }
3562 dest = gen_rtx_MEM (word_mode, tmpreg);
3563 insn = emit_move_insn (dest, src);
3564 }
3565 else
3566 {
3567 rtx delta = GEN_INT (disp);
3568 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3569 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3570
3571 emit_move_insn (tmpreg, high);
3572 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3573 insn = emit_move_insn (dest, src);
3574 if (DO_FRAME_NOTES)
3575 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3576 gen_rtx_SET (gen_rtx_MEM (word_mode,
3577 gen_rtx_PLUS (word_mode,
3578 basereg,
3579 delta)),
3580 src));
3581 }
3582
3583 if (DO_FRAME_NOTES)
3584 RTX_FRAME_RELATED_P (insn) = 1;
3585 }
3586
3587 /* Emit RTL to store REG at the memory location specified by BASE and then
3588 add MOD to BASE. MOD must be <= 8k. */
3589
3590 static void
3591 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3592 {
3593 rtx basereg, srcreg, delta;
3594 rtx_insn *insn;
3595
3596 gcc_assert (VAL_14_BITS_P (mod));
3597
3598 basereg = gen_rtx_REG (Pmode, base);
3599 srcreg = gen_rtx_REG (word_mode, reg);
3600 delta = GEN_INT (mod);
3601
3602 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3603 if (DO_FRAME_NOTES)
3604 {
3605 RTX_FRAME_RELATED_P (insn) = 1;
3606
3607 /* RTX_FRAME_RELATED_P must be set on each frame related set
3608 in a parallel with more than one element. */
3609 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3610 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3611 }
3612 }
3613
3614 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3615 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3616 whether to add a frame note or not.
3617
3618 In the DISP > 8k case, we leave the high part of the address in %r1.
3619 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3620
3621 static void
3622 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3623 {
3624 rtx_insn *insn;
3625
3626 if (VAL_14_BITS_P (disp))
3627 {
3628 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3629 plus_constant (Pmode,
3630 gen_rtx_REG (Pmode, base), disp));
3631 }
3632 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3633 {
3634 rtx basereg = gen_rtx_REG (Pmode, base);
3635 rtx delta = GEN_INT (disp);
3636 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3637
3638 emit_move_insn (tmpreg, delta);
3639 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3640 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3641 if (DO_FRAME_NOTES)
3642 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3643 gen_rtx_SET (tmpreg,
3644 gen_rtx_PLUS (Pmode, basereg, delta)));
3645 }
3646 else
3647 {
3648 rtx basereg = gen_rtx_REG (Pmode, base);
3649 rtx delta = GEN_INT (disp);
3650 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3651
3652 emit_move_insn (tmpreg,
3653 gen_rtx_PLUS (Pmode, basereg,
3654 gen_rtx_HIGH (Pmode, delta)));
3655 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3656 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3657 }
3658
3659 if (DO_FRAME_NOTES && note)
3660 RTX_FRAME_RELATED_P (insn) = 1;
3661 }
3662
3663 HOST_WIDE_INT
3664 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3665 {
3666 int freg_saved = 0;
3667 int i, j;
3668
3669 /* The code in pa_expand_prologue and pa_expand_epilogue must
3670 be consistent with the rounding and size calculation done here.
3671 Change them at the same time. */
3672
3673 /* We do our own stack alignment. First, round the size of the
3674 stack locals up to a word boundary. */
3675 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3676
3677 /* Space for previous frame pointer + filler. If any frame is
3678 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3679 waste some space here for the sake of HP compatibility. The
3680 first slot is only used when the frame pointer is needed. */
3681 if (size || frame_pointer_needed)
3682 size += STARTING_FRAME_OFFSET;
3683
3684 /* If the current function calls __builtin_eh_return, then we need
3685 to allocate stack space for registers that will hold data for
3686 the exception handler. */
3687 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3688 {
3689 unsigned int i;
3690
3691 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3692 continue;
3693 size += i * UNITS_PER_WORD;
3694 }
3695
3696 /* Account for space used by the callee general register saves. */
3697 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3698 if (df_regs_ever_live_p (i))
3699 size += UNITS_PER_WORD;
3700
3701 /* Account for space used by the callee floating point register saves. */
3702 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3703 if (df_regs_ever_live_p (i)
3704 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3705 {
3706 freg_saved = 1;
3707
3708 /* We always save both halves of the FP register, so always
3709 increment the frame size by 8 bytes. */
3710 size += 8;
3711 }
3712
3713 /* If any of the floating registers are saved, account for the
3714 alignment needed for the floating point register save block. */
3715 if (freg_saved)
3716 {
3717 size = (size + 7) & ~7;
3718 if (fregs_live)
3719 *fregs_live = 1;
3720 }
3721
3722 /* The various ABIs include space for the outgoing parameters in the
3723 size of the current function's stack frame. We don't need to align
3724 for the outgoing arguments as their alignment is set by the final
3725 rounding for the frame as a whole. */
3726 size += crtl->outgoing_args_size;
3727
3728 /* Allocate space for the fixed frame marker. This space must be
3729 allocated for any function that makes calls or allocates
3730 stack space. */
3731 if (!crtl->is_leaf || size)
3732 size += TARGET_64BIT ? 48 : 32;
3733
3734 /* Finally, round to the preferred stack boundary. */
3735 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3736 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3737 }
3738
3739 /* Generate the assembly code for function entry. FILE is a stdio
3740 stream to output the code to. SIZE is an int: how many units of
3741 temporary storage to allocate.
3742
3743 Refer to the array `regs_ever_live' to determine which registers to
3744 save; `regs_ever_live[I]' is nonzero if register number I is ever
3745 used in the function. This function is responsible for knowing
3746 which registers should not be saved even if used. */
3747
3748 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3749 of memory. If any fpu reg is used in the function, we allocate
3750 such a block here, at the bottom of the frame, just in case it's needed.
3751
3752 If this function is a leaf procedure, then we may choose not
3753 to do a "save" insn. The decision about whether or not
3754 to do this is made in regclass.c. */
3755
3756 static void
3757 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3758 {
3759 /* The function's label and associated .PROC must never be
3760 separated and must be output *after* any profiling declarations
3761 to avoid changing spaces/subspaces within a procedure. */
3762 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3763 fputs ("\t.PROC\n", file);
3764
3765 /* pa_expand_prologue does the dirty work now. We just need
3766 to output the assembler directives which denote the start
3767 of a function. */
3768 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3769 if (crtl->is_leaf)
3770 fputs (",NO_CALLS", file);
3771 else
3772 fputs (",CALLS", file);
3773 if (rp_saved)
3774 fputs (",SAVE_RP", file);
3775
3776 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3777 at the beginning of the frame and that it is used as the frame
3778 pointer for the frame. We do this because our current frame
3779 layout doesn't conform to that specified in the HP runtime
3780 documentation and we need a way to indicate to programs such as
3781 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3782 isn't used by HP compilers but is supported by the assembler.
3783 However, SAVE_SP is supposed to indicate that the previous stack
3784 pointer has been saved in the frame marker. */
3785 if (frame_pointer_needed)
3786 fputs (",SAVE_SP", file);
3787
3788 /* Pass on information about the number of callee register saves
3789 performed in the prologue.
3790
3791 The compiler is supposed to pass the highest register number
3792 saved, the assembler then has to adjust that number before
3793 entering it into the unwind descriptor (to account for any
3794 caller saved registers with lower register numbers than the
3795 first callee saved register). */
3796 if (gr_saved)
3797 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3798
3799 if (fr_saved)
3800 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3801
3802 fputs ("\n\t.ENTRY\n", file);
3803
3804 remove_useless_addtr_insns (0);
3805 }
3806
3807 void
3808 pa_expand_prologue (void)
3809 {
3810 int merge_sp_adjust_with_store = 0;
3811 HOST_WIDE_INT size = get_frame_size ();
3812 HOST_WIDE_INT offset;
3813 int i;
3814 rtx tmpreg;
3815 rtx_insn *insn;
3816
3817 gr_saved = 0;
3818 fr_saved = 0;
3819 save_fregs = 0;
3820
3821 /* Compute total size for frame pointer, filler, locals and rounding to
3822 the next word boundary. Similar code appears in pa_compute_frame_size
3823 and must be changed in tandem with this code. */
3824 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3825 if (local_fsize || frame_pointer_needed)
3826 local_fsize += STARTING_FRAME_OFFSET;
3827
3828 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3829 if (flag_stack_usage_info)
3830 current_function_static_stack_size = actual_fsize;
3831
3832 /* Compute a few things we will use often. */
3833 tmpreg = gen_rtx_REG (word_mode, 1);
3834
3835 /* Save RP first. The calling conventions manual states RP will
3836 always be stored into the caller's frame at sp - 20 or sp - 16
3837 depending on which ABI is in use. */
3838 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3839 {
3840 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3841 rp_saved = true;
3842 }
3843 else
3844 rp_saved = false;
3845
3846 /* Allocate the local frame and set up the frame pointer if needed. */
3847 if (actual_fsize != 0)
3848 {
3849 if (frame_pointer_needed)
3850 {
3851 /* Copy the old frame pointer temporarily into %r1. Set up the
3852 new stack pointer, then store away the saved old frame pointer
3853 into the stack at sp and at the same time update the stack
3854 pointer by actual_fsize bytes. Two versions, first
3855 handles small (<8k) frames. The second handles large (>=8k)
3856 frames. */
3857 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3858 if (DO_FRAME_NOTES)
3859 RTX_FRAME_RELATED_P (insn) = 1;
3860
3861 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3862 if (DO_FRAME_NOTES)
3863 RTX_FRAME_RELATED_P (insn) = 1;
3864
3865 if (VAL_14_BITS_P (actual_fsize))
3866 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3867 else
3868 {
3869 /* It is incorrect to store the saved frame pointer at *sp,
3870 then increment sp (writes beyond the current stack boundary).
3871
3872 So instead use stwm to store at *sp and post-increment the
3873 stack pointer as an atomic operation. Then increment sp to
3874 finish allocating the new frame. */
3875 HOST_WIDE_INT adjust1 = 8192 - 64;
3876 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3877
3878 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3879 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3880 adjust2, 1);
3881 }
3882
3883 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3884 we need to store the previous stack pointer (frame pointer)
3885 into the frame marker on targets that use the HP unwind
3886 library. This allows the HP unwind library to be used to
3887 unwind GCC frames. However, we are not fully compatible
3888 with the HP library because our frame layout differs from
3889 that specified in the HP runtime specification.
3890
3891 We don't want a frame note on this instruction as the frame
3892 marker moves during dynamic stack allocation.
3893
3894 This instruction also serves as a blockage to prevent
3895 register spills from being scheduled before the stack
3896 pointer is raised. This is necessary as we store
3897 registers using the frame pointer as a base register,
3898 and the frame pointer is set before sp is raised. */
3899 if (TARGET_HPUX_UNWIND_LIBRARY)
3900 {
3901 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3902 GEN_INT (TARGET_64BIT ? -8 : -4));
3903
3904 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3905 hard_frame_pointer_rtx);
3906 }
3907 else
3908 emit_insn (gen_blockage ());
3909 }
3910 /* no frame pointer needed. */
3911 else
3912 {
3913 /* In some cases we can perform the first callee register save
3914 and allocating the stack frame at the same time. If so, just
3915 make a note of it and defer allocating the frame until saving
3916 the callee registers. */
3917 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3918 merge_sp_adjust_with_store = 1;
3919 /* Can not optimize. Adjust the stack frame by actual_fsize
3920 bytes. */
3921 else
3922 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3923 actual_fsize, 1);
3924 }
3925 }
3926
3927 /* Normal register save.
3928
3929 Do not save the frame pointer in the frame_pointer_needed case. It
3930 was done earlier. */
3931 if (frame_pointer_needed)
3932 {
3933 offset = local_fsize;
3934
3935 /* Saving the EH return data registers in the frame is the simplest
3936 way to get the frame unwind information emitted. We put them
3937 just before the general registers. */
3938 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3939 {
3940 unsigned int i, regno;
3941
3942 for (i = 0; ; ++i)
3943 {
3944 regno = EH_RETURN_DATA_REGNO (i);
3945 if (regno == INVALID_REGNUM)
3946 break;
3947
3948 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
3949 offset += UNITS_PER_WORD;
3950 }
3951 }
3952
3953 for (i = 18; i >= 4; i--)
3954 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3955 {
3956 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
3957 offset += UNITS_PER_WORD;
3958 gr_saved++;
3959 }
3960 /* Account for %r3 which is saved in a special place. */
3961 gr_saved++;
3962 }
3963 /* No frame pointer needed. */
3964 else
3965 {
3966 offset = local_fsize - actual_fsize;
3967
3968 /* Saving the EH return data registers in the frame is the simplest
3969 way to get the frame unwind information emitted. */
3970 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3971 {
3972 unsigned int i, regno;
3973
3974 for (i = 0; ; ++i)
3975 {
3976 regno = EH_RETURN_DATA_REGNO (i);
3977 if (regno == INVALID_REGNUM)
3978 break;
3979
3980 /* If merge_sp_adjust_with_store is nonzero, then we can
3981 optimize the first save. */
3982 if (merge_sp_adjust_with_store)
3983 {
3984 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3985 merge_sp_adjust_with_store = 0;
3986 }
3987 else
3988 store_reg (regno, offset, STACK_POINTER_REGNUM);
3989 offset += UNITS_PER_WORD;
3990 }
3991 }
3992
3993 for (i = 18; i >= 3; i--)
3994 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3995 {
3996 /* If merge_sp_adjust_with_store is nonzero, then we can
3997 optimize the first GR save. */
3998 if (merge_sp_adjust_with_store)
3999 {
4000 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
4001 merge_sp_adjust_with_store = 0;
4002 }
4003 else
4004 store_reg (i, offset, STACK_POINTER_REGNUM);
4005 offset += UNITS_PER_WORD;
4006 gr_saved++;
4007 }
4008
4009 /* If we wanted to merge the SP adjustment with a GR save, but we never
4010 did any GR saves, then just emit the adjustment here. */
4011 if (merge_sp_adjust_with_store)
4012 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4013 actual_fsize, 1);
4014 }
4015
4016 /* The hppa calling conventions say that %r19, the pic offset
4017 register, is saved at sp - 32 (in this function's frame)
4018 when generating PIC code. FIXME: What is the correct thing
4019 to do for functions which make no calls and allocate no
4020 frame? Do we need to allocate a frame, or can we just omit
4021 the save? For now we'll just omit the save.
4022
4023 We don't want a note on this insn as the frame marker can
4024 move if there is a dynamic stack allocation. */
4025 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4026 {
4027 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4028
4029 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4030
4031 }
4032
4033 /* Align pointer properly (doubleword boundary). */
4034 offset = (offset + 7) & ~7;
4035
4036 /* Floating point register store. */
4037 if (save_fregs)
4038 {
4039 rtx base;
4040
4041 /* First get the frame or stack pointer to the start of the FP register
4042 save area. */
4043 if (frame_pointer_needed)
4044 {
4045 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4046 base = hard_frame_pointer_rtx;
4047 }
4048 else
4049 {
4050 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4051 base = stack_pointer_rtx;
4052 }
4053
4054 /* Now actually save the FP registers. */
4055 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4056 {
4057 if (df_regs_ever_live_p (i)
4058 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4059 {
4060 rtx addr, reg;
4061 rtx_insn *insn;
4062 addr = gen_rtx_MEM (DFmode,
4063 gen_rtx_POST_INC (word_mode, tmpreg));
4064 reg = gen_rtx_REG (DFmode, i);
4065 insn = emit_move_insn (addr, reg);
4066 if (DO_FRAME_NOTES)
4067 {
4068 RTX_FRAME_RELATED_P (insn) = 1;
4069 if (TARGET_64BIT)
4070 {
4071 rtx mem = gen_rtx_MEM (DFmode,
4072 plus_constant (Pmode, base,
4073 offset));
4074 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4075 gen_rtx_SET (mem, reg));
4076 }
4077 else
4078 {
4079 rtx meml = gen_rtx_MEM (SFmode,
4080 plus_constant (Pmode, base,
4081 offset));
4082 rtx memr = gen_rtx_MEM (SFmode,
4083 plus_constant (Pmode, base,
4084 offset + 4));
4085 rtx regl = gen_rtx_REG (SFmode, i);
4086 rtx regr = gen_rtx_REG (SFmode, i + 1);
4087 rtx setl = gen_rtx_SET (meml, regl);
4088 rtx setr = gen_rtx_SET (memr, regr);
4089 rtvec vec;
4090
4091 RTX_FRAME_RELATED_P (setl) = 1;
4092 RTX_FRAME_RELATED_P (setr) = 1;
4093 vec = gen_rtvec (2, setl, setr);
4094 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4095 gen_rtx_SEQUENCE (VOIDmode, vec));
4096 }
4097 }
4098 offset += GET_MODE_SIZE (DFmode);
4099 fr_saved++;
4100 }
4101 }
4102 }
4103 }
4104
4105 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4106 Handle case where DISP > 8k by using the add_high_const patterns. */
4107
4108 static void
4109 load_reg (int reg, HOST_WIDE_INT disp, int base)
4110 {
4111 rtx dest = gen_rtx_REG (word_mode, reg);
4112 rtx basereg = gen_rtx_REG (Pmode, base);
4113 rtx src;
4114
4115 if (VAL_14_BITS_P (disp))
4116 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4117 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4118 {
4119 rtx delta = GEN_INT (disp);
4120 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4121
4122 emit_move_insn (tmpreg, delta);
4123 if (TARGET_DISABLE_INDEXING)
4124 {
4125 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4126 src = gen_rtx_MEM (word_mode, tmpreg);
4127 }
4128 else
4129 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4130 }
4131 else
4132 {
4133 rtx delta = GEN_INT (disp);
4134 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4135 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4136
4137 emit_move_insn (tmpreg, high);
4138 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4139 }
4140
4141 emit_move_insn (dest, src);
4142 }
4143
4144 /* Update the total code bytes output to the text section. */
4145
4146 static void
4147 update_total_code_bytes (unsigned int nbytes)
4148 {
4149 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4150 && !IN_NAMED_SECTION_P (cfun->decl))
4151 {
4152 unsigned int old_total = total_code_bytes;
4153
4154 total_code_bytes += nbytes;
4155
4156 /* Be prepared to handle overflows. */
4157 if (old_total > total_code_bytes)
4158 total_code_bytes = UINT_MAX;
4159 }
4160 }
4161
4162 /* This function generates the assembly code for function exit.
4163 Args are as for output_function_prologue ().
4164
4165 The function epilogue should not depend on the current stack
4166 pointer! It should use the frame pointer only. This is mandatory
4167 because of alloca; we also take advantage of it to omit stack
4168 adjustments before returning. */
4169
4170 static void
4171 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4172 {
4173 rtx_insn *insn = get_last_insn ();
4174 bool extra_nop;
4175
4176 /* pa_expand_epilogue does the dirty work now. We just need
4177 to output the assembler directives which denote the end
4178 of a function.
4179
4180 To make debuggers happy, emit a nop if the epilogue was completely
4181 eliminated due to a volatile call as the last insn in the
4182 current function. That way the return address (in %r2) will
4183 always point to a valid instruction in the current function. */
4184
4185 /* Get the last real insn. */
4186 if (NOTE_P (insn))
4187 insn = prev_real_insn (insn);
4188
4189 /* If it is a sequence, then look inside. */
4190 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4191 insn = as_a <rtx_sequence *> (PATTERN (insn))-> insn (0);
4192
4193 /* If insn is a CALL_INSN, then it must be a call to a volatile
4194 function (otherwise there would be epilogue insns). */
4195 if (insn && CALL_P (insn))
4196 {
4197 fputs ("\tnop\n", file);
4198 extra_nop = true;
4199 }
4200 else
4201 extra_nop = false;
4202
4203 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4204
4205 if (TARGET_SOM && TARGET_GAS)
4206 {
4207 /* We are done with this subspace except possibly for some additional
4208 debug information. Forget that we are in this subspace to ensure
4209 that the next function is output in its own subspace. */
4210 in_section = NULL;
4211 cfun->machine->in_nsubspa = 2;
4212 }
4213
4214 /* Thunks do their own insn accounting. */
4215 if (cfun->is_thunk)
4216 return;
4217
4218 if (INSN_ADDRESSES_SET_P ())
4219 {
4220 last_address = extra_nop ? 4 : 0;
4221 insn = get_last_nonnote_insn ();
4222 if (insn)
4223 {
4224 last_address += INSN_ADDRESSES (INSN_UID (insn));
4225 if (INSN_P (insn))
4226 last_address += insn_default_length (insn);
4227 }
4228 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4229 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4230 }
4231 else
4232 last_address = UINT_MAX;
4233
4234 /* Finally, update the total number of code bytes output so far. */
4235 update_total_code_bytes (last_address);
4236 }
4237
4238 void
4239 pa_expand_epilogue (void)
4240 {
4241 rtx tmpreg;
4242 HOST_WIDE_INT offset;
4243 HOST_WIDE_INT ret_off = 0;
4244 int i;
4245 int merge_sp_adjust_with_load = 0;
4246
4247 /* We will use this often. */
4248 tmpreg = gen_rtx_REG (word_mode, 1);
4249
4250 /* Try to restore RP early to avoid load/use interlocks when
4251 RP gets used in the return (bv) instruction. This appears to still
4252 be necessary even when we schedule the prologue and epilogue. */
4253 if (rp_saved)
4254 {
4255 ret_off = TARGET_64BIT ? -16 : -20;
4256 if (frame_pointer_needed)
4257 {
4258 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4259 ret_off = 0;
4260 }
4261 else
4262 {
4263 /* No frame pointer, and stack is smaller than 8k. */
4264 if (VAL_14_BITS_P (ret_off - actual_fsize))
4265 {
4266 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4267 ret_off = 0;
4268 }
4269 }
4270 }
4271
4272 /* General register restores. */
4273 if (frame_pointer_needed)
4274 {
4275 offset = local_fsize;
4276
4277 /* If the current function calls __builtin_eh_return, then we need
4278 to restore the saved EH data registers. */
4279 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4280 {
4281 unsigned int i, regno;
4282
4283 for (i = 0; ; ++i)
4284 {
4285 regno = EH_RETURN_DATA_REGNO (i);
4286 if (regno == INVALID_REGNUM)
4287 break;
4288
4289 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4290 offset += UNITS_PER_WORD;
4291 }
4292 }
4293
4294 for (i = 18; i >= 4; i--)
4295 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4296 {
4297 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4298 offset += UNITS_PER_WORD;
4299 }
4300 }
4301 else
4302 {
4303 offset = local_fsize - actual_fsize;
4304
4305 /* If the current function calls __builtin_eh_return, then we need
4306 to restore the saved EH data registers. */
4307 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4308 {
4309 unsigned int i, regno;
4310
4311 for (i = 0; ; ++i)
4312 {
4313 regno = EH_RETURN_DATA_REGNO (i);
4314 if (regno == INVALID_REGNUM)
4315 break;
4316
4317 /* Only for the first load.
4318 merge_sp_adjust_with_load holds the register load
4319 with which we will merge the sp adjustment. */
4320 if (merge_sp_adjust_with_load == 0
4321 && local_fsize == 0
4322 && VAL_14_BITS_P (-actual_fsize))
4323 merge_sp_adjust_with_load = regno;
4324 else
4325 load_reg (regno, offset, STACK_POINTER_REGNUM);
4326 offset += UNITS_PER_WORD;
4327 }
4328 }
4329
4330 for (i = 18; i >= 3; i--)
4331 {
4332 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4333 {
4334 /* Only for the first load.
4335 merge_sp_adjust_with_load holds the register load
4336 with which we will merge the sp adjustment. */
4337 if (merge_sp_adjust_with_load == 0
4338 && local_fsize == 0
4339 && VAL_14_BITS_P (-actual_fsize))
4340 merge_sp_adjust_with_load = i;
4341 else
4342 load_reg (i, offset, STACK_POINTER_REGNUM);
4343 offset += UNITS_PER_WORD;
4344 }
4345 }
4346 }
4347
4348 /* Align pointer properly (doubleword boundary). */
4349 offset = (offset + 7) & ~7;
4350
4351 /* FP register restores. */
4352 if (save_fregs)
4353 {
4354 /* Adjust the register to index off of. */
4355 if (frame_pointer_needed)
4356 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4357 else
4358 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4359
4360 /* Actually do the restores now. */
4361 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4362 if (df_regs_ever_live_p (i)
4363 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4364 {
4365 rtx src = gen_rtx_MEM (DFmode,
4366 gen_rtx_POST_INC (word_mode, tmpreg));
4367 rtx dest = gen_rtx_REG (DFmode, i);
4368 emit_move_insn (dest, src);
4369 }
4370 }
4371
4372 /* Emit a blockage insn here to keep these insns from being moved to
4373 an earlier spot in the epilogue, or into the main instruction stream.
4374
4375 This is necessary as we must not cut the stack back before all the
4376 restores are finished. */
4377 emit_insn (gen_blockage ());
4378
4379 /* Reset stack pointer (and possibly frame pointer). The stack
4380 pointer is initially set to fp + 64 to avoid a race condition. */
4381 if (frame_pointer_needed)
4382 {
4383 rtx delta = GEN_INT (-64);
4384
4385 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4386 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4387 stack_pointer_rtx, delta));
4388 }
4389 /* If we were deferring a callee register restore, do it now. */
4390 else if (merge_sp_adjust_with_load)
4391 {
4392 rtx delta = GEN_INT (-actual_fsize);
4393 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4394
4395 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4396 }
4397 else if (actual_fsize != 0)
4398 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4399 - actual_fsize, 0);
4400
4401 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4402 frame greater than 8k), do so now. */
4403 if (ret_off != 0)
4404 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4405
4406 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4407 {
4408 rtx sa = EH_RETURN_STACKADJ_RTX;
4409
4410 emit_insn (gen_blockage ());
4411 emit_insn (TARGET_64BIT
4412 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4413 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4414 }
4415 }
4416
4417 bool
4418 pa_can_use_return_insn (void)
4419 {
4420 if (!reload_completed)
4421 return false;
4422
4423 if (frame_pointer_needed)
4424 return false;
4425
4426 if (df_regs_ever_live_p (2))
4427 return false;
4428
4429 if (crtl->profile)
4430 return false;
4431
4432 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4433 }
4434
4435 rtx
4436 hppa_pic_save_rtx (void)
4437 {
4438 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4439 }
4440
4441 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4442 #define NO_DEFERRED_PROFILE_COUNTERS 0
4443 #endif
4444
4445
4446 /* Vector of funcdef numbers. */
4447 static vec<int> funcdef_nos;
4448
4449 /* Output deferred profile counters. */
4450 static void
4451 output_deferred_profile_counters (void)
4452 {
4453 unsigned int i;
4454 int align, n;
4455
4456 if (funcdef_nos.is_empty ())
4457 return;
4458
4459 switch_to_section (data_section);
4460 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4461 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4462
4463 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4464 {
4465 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4466 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4467 }
4468
4469 funcdef_nos.release ();
4470 }
4471
4472 void
4473 hppa_profile_hook (int label_no)
4474 {
4475 /* We use SImode for the address of the function in both 32 and
4476 64-bit code to avoid having to provide DImode versions of the
4477 lcla2 and load_offset_label_address insn patterns. */
4478 rtx reg = gen_reg_rtx (SImode);
4479 rtx_code_label *label_rtx = gen_label_rtx ();
4480 rtx begin_label_rtx;
4481 rtx_insn *call_insn;
4482 char begin_label_name[16];
4483
4484 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4485 label_no);
4486 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4487
4488 if (TARGET_64BIT)
4489 emit_move_insn (arg_pointer_rtx,
4490 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4491 GEN_INT (64)));
4492
4493 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4494
4495 /* The address of the function is loaded into %r25 with an instruction-
4496 relative sequence that avoids the use of relocations. The sequence
4497 is split so that the load_offset_label_address instruction can
4498 occupy the delay slot of the call to _mcount. */
4499 if (TARGET_PA_20)
4500 emit_insn (gen_lcla2 (reg, label_rtx));
4501 else
4502 emit_insn (gen_lcla1 (reg, label_rtx));
4503
4504 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4505 reg, begin_label_rtx, label_rtx));
4506
4507 #if !NO_DEFERRED_PROFILE_COUNTERS
4508 {
4509 rtx count_label_rtx, addr, r24;
4510 char count_label_name[16];
4511
4512 funcdef_nos.safe_push (label_no);
4513 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4514 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4515
4516 addr = force_reg (Pmode, count_label_rtx);
4517 r24 = gen_rtx_REG (Pmode, 24);
4518 emit_move_insn (r24, addr);
4519
4520 call_insn =
4521 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4522 gen_rtx_SYMBOL_REF (Pmode,
4523 "_mcount")),
4524 GEN_INT (TARGET_64BIT ? 24 : 12)));
4525
4526 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4527 }
4528 #else
4529
4530 call_insn =
4531 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4532 gen_rtx_SYMBOL_REF (Pmode,
4533 "_mcount")),
4534 GEN_INT (TARGET_64BIT ? 16 : 8)));
4535
4536 #endif
4537
4538 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4539 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4540
4541 /* Indicate the _mcount call cannot throw, nor will it execute a
4542 non-local goto. */
4543 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4544 }
4545
4546 /* Fetch the return address for the frame COUNT steps up from
4547 the current frame, after the prologue. FRAMEADDR is the
4548 frame pointer of the COUNT frame.
4549
4550 We want to ignore any export stub remnants here. To handle this,
4551 we examine the code at the return address, and if it is an export
4552 stub, we return a memory rtx for the stub return address stored
4553 at frame-24.
4554
4555 The value returned is used in two different ways:
4556
4557 1. To find a function's caller.
4558
4559 2. To change the return address for a function.
4560
4561 This function handles most instances of case 1; however, it will
4562 fail if there are two levels of stubs to execute on the return
4563 path. The only way I believe that can happen is if the return value
4564 needs a parameter relocation, which never happens for C code.
4565
4566 This function handles most instances of case 2; however, it will
4567 fail if we did not originally have stub code on the return path
4568 but will need stub code on the new return path. This can happen if
4569 the caller & callee are both in the main program, but the new
4570 return location is in a shared library. */
4571
4572 rtx
4573 pa_return_addr_rtx (int count, rtx frameaddr)
4574 {
4575 rtx label;
4576 rtx rp;
4577 rtx saved_rp;
4578 rtx ins;
4579
4580 /* The instruction stream at the return address of a PA1.X export stub is:
4581
4582 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4583 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4584 0x00011820 | stub+16: mtsp r1,sr0
4585 0xe0400002 | stub+20: be,n 0(sr0,rp)
4586
4587 0xe0400002 must be specified as -532676606 so that it won't be
4588 rejected as an invalid immediate operand on 64-bit hosts.
4589
4590 The instruction stream at the return address of a PA2.0 export stub is:
4591
4592 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4593 0xe840d002 | stub+12: bve,n (rp)
4594 */
4595
4596 HOST_WIDE_INT insns[4];
4597 int i, len;
4598
4599 if (count != 0)
4600 return NULL_RTX;
4601
4602 rp = get_hard_reg_initial_val (Pmode, 2);
4603
4604 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4605 return rp;
4606
4607 /* If there is no export stub then just use the value saved from
4608 the return pointer register. */
4609
4610 saved_rp = gen_reg_rtx (Pmode);
4611 emit_move_insn (saved_rp, rp);
4612
4613 /* Get pointer to the instruction stream. We have to mask out the
4614 privilege level from the two low order bits of the return address
4615 pointer here so that ins will point to the start of the first
4616 instruction that would have been executed if we returned. */
4617 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4618 label = gen_label_rtx ();
4619
4620 if (TARGET_PA_20)
4621 {
4622 insns[0] = 0x4bc23fd1;
4623 insns[1] = -398405630;
4624 len = 2;
4625 }
4626 else
4627 {
4628 insns[0] = 0x4bc23fd1;
4629 insns[1] = 0x004010a1;
4630 insns[2] = 0x00011820;
4631 insns[3] = -532676606;
4632 len = 4;
4633 }
4634
4635 /* Check the instruction stream at the normal return address for the
4636 export stub. If it is an export stub, than our return address is
4637 really in -24[frameaddr]. */
4638
4639 for (i = 0; i < len; i++)
4640 {
4641 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4642 rtx op1 = GEN_INT (insns[i]);
4643 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4644 }
4645
4646 /* Here we know that our return address points to an export
4647 stub. We don't want to return the address of the export stub,
4648 but rather the return address of the export stub. That return
4649 address is stored at -24[frameaddr]. */
4650
4651 emit_move_insn (saved_rp,
4652 gen_rtx_MEM (Pmode,
4653 memory_address (Pmode,
4654 plus_constant (Pmode, frameaddr,
4655 -24))));
4656
4657 emit_label (label);
4658
4659 return saved_rp;
4660 }
4661
4662 void
4663 pa_emit_bcond_fp (rtx operands[])
4664 {
4665 enum rtx_code code = GET_CODE (operands[0]);
4666 rtx operand0 = operands[1];
4667 rtx operand1 = operands[2];
4668 rtx label = operands[3];
4669
4670 emit_insn (gen_rtx_SET (gen_rtx_REG (CCFPmode, 0),
4671 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4672
4673 emit_jump_insn (gen_rtx_SET (pc_rtx,
4674 gen_rtx_IF_THEN_ELSE (VOIDmode,
4675 gen_rtx_fmt_ee (NE,
4676 VOIDmode,
4677 gen_rtx_REG (CCFPmode, 0),
4678 const0_rtx),
4679 gen_rtx_LABEL_REF (VOIDmode, label),
4680 pc_rtx)));
4681
4682 }
4683
4684 /* Adjust the cost of a scheduling dependency. Return the new cost of
4685 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4686
4687 static int
4688 pa_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
4689 {
4690 enum attr_type attr_type;
4691
4692 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4693 true dependencies as they are described with bypasses now. */
4694 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4695 return cost;
4696
4697 if (! recog_memoized (insn))
4698 return 0;
4699
4700 attr_type = get_attr_type (insn);
4701
4702 switch (REG_NOTE_KIND (link))
4703 {
4704 case REG_DEP_ANTI:
4705 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4706 cycles later. */
4707
4708 if (attr_type == TYPE_FPLOAD)
4709 {
4710 rtx pat = PATTERN (insn);
4711 rtx dep_pat = PATTERN (dep_insn);
4712 if (GET_CODE (pat) == PARALLEL)
4713 {
4714 /* This happens for the fldXs,mb patterns. */
4715 pat = XVECEXP (pat, 0, 0);
4716 }
4717 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4718 /* If this happens, we have to extend this to schedule
4719 optimally. Return 0 for now. */
4720 return 0;
4721
4722 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4723 {
4724 if (! recog_memoized (dep_insn))
4725 return 0;
4726 switch (get_attr_type (dep_insn))
4727 {
4728 case TYPE_FPALU:
4729 case TYPE_FPMULSGL:
4730 case TYPE_FPMULDBL:
4731 case TYPE_FPDIVSGL:
4732 case TYPE_FPDIVDBL:
4733 case TYPE_FPSQRTSGL:
4734 case TYPE_FPSQRTDBL:
4735 /* A fpload can't be issued until one cycle before a
4736 preceding arithmetic operation has finished if
4737 the target of the fpload is any of the sources
4738 (or destination) of the arithmetic operation. */
4739 return insn_default_latency (dep_insn) - 1;
4740
4741 default:
4742 return 0;
4743 }
4744 }
4745 }
4746 else if (attr_type == TYPE_FPALU)
4747 {
4748 rtx pat = PATTERN (insn);
4749 rtx dep_pat = PATTERN (dep_insn);
4750 if (GET_CODE (pat) == PARALLEL)
4751 {
4752 /* This happens for the fldXs,mb patterns. */
4753 pat = XVECEXP (pat, 0, 0);
4754 }
4755 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4756 /* If this happens, we have to extend this to schedule
4757 optimally. Return 0 for now. */
4758 return 0;
4759
4760 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4761 {
4762 if (! recog_memoized (dep_insn))
4763 return 0;
4764 switch (get_attr_type (dep_insn))
4765 {
4766 case TYPE_FPDIVSGL:
4767 case TYPE_FPDIVDBL:
4768 case TYPE_FPSQRTSGL:
4769 case TYPE_FPSQRTDBL:
4770 /* An ALU flop can't be issued until two cycles before a
4771 preceding divide or sqrt operation has finished if
4772 the target of the ALU flop is any of the sources
4773 (or destination) of the divide or sqrt operation. */
4774 return insn_default_latency (dep_insn) - 2;
4775
4776 default:
4777 return 0;
4778 }
4779 }
4780 }
4781
4782 /* For other anti dependencies, the cost is 0. */
4783 return 0;
4784
4785 case REG_DEP_OUTPUT:
4786 /* Output dependency; DEP_INSN writes a register that INSN writes some
4787 cycles later. */
4788 if (attr_type == TYPE_FPLOAD)
4789 {
4790 rtx pat = PATTERN (insn);
4791 rtx dep_pat = PATTERN (dep_insn);
4792 if (GET_CODE (pat) == PARALLEL)
4793 {
4794 /* This happens for the fldXs,mb patterns. */
4795 pat = XVECEXP (pat, 0, 0);
4796 }
4797 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4798 /* If this happens, we have to extend this to schedule
4799 optimally. Return 0 for now. */
4800 return 0;
4801
4802 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4803 {
4804 if (! recog_memoized (dep_insn))
4805 return 0;
4806 switch (get_attr_type (dep_insn))
4807 {
4808 case TYPE_FPALU:
4809 case TYPE_FPMULSGL:
4810 case TYPE_FPMULDBL:
4811 case TYPE_FPDIVSGL:
4812 case TYPE_FPDIVDBL:
4813 case TYPE_FPSQRTSGL:
4814 case TYPE_FPSQRTDBL:
4815 /* A fpload can't be issued until one cycle before a
4816 preceding arithmetic operation has finished if
4817 the target of the fpload is the destination of the
4818 arithmetic operation.
4819
4820 Exception: For PA7100LC, PA7200 and PA7300, the cost
4821 is 3 cycles, unless they bundle together. We also
4822 pay the penalty if the second insn is a fpload. */
4823 return insn_default_latency (dep_insn) - 1;
4824
4825 default:
4826 return 0;
4827 }
4828 }
4829 }
4830 else if (attr_type == TYPE_FPALU)
4831 {
4832 rtx pat = PATTERN (insn);
4833 rtx dep_pat = PATTERN (dep_insn);
4834 if (GET_CODE (pat) == PARALLEL)
4835 {
4836 /* This happens for the fldXs,mb patterns. */
4837 pat = XVECEXP (pat, 0, 0);
4838 }
4839 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4840 /* If this happens, we have to extend this to schedule
4841 optimally. Return 0 for now. */
4842 return 0;
4843
4844 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4845 {
4846 if (! recog_memoized (dep_insn))
4847 return 0;
4848 switch (get_attr_type (dep_insn))
4849 {
4850 case TYPE_FPDIVSGL:
4851 case TYPE_FPDIVDBL:
4852 case TYPE_FPSQRTSGL:
4853 case TYPE_FPSQRTDBL:
4854 /* An ALU flop can't be issued until two cycles before a
4855 preceding divide or sqrt operation has finished if
4856 the target of the ALU flop is also the target of
4857 the divide or sqrt operation. */
4858 return insn_default_latency (dep_insn) - 2;
4859
4860 default:
4861 return 0;
4862 }
4863 }
4864 }
4865
4866 /* For other output dependencies, the cost is 0. */
4867 return 0;
4868
4869 default:
4870 gcc_unreachable ();
4871 }
4872 }
4873
4874 /* Adjust scheduling priorities. We use this to try and keep addil
4875 and the next use of %r1 close together. */
4876 static int
4877 pa_adjust_priority (rtx_insn *insn, int priority)
4878 {
4879 rtx set = single_set (insn);
4880 rtx src, dest;
4881 if (set)
4882 {
4883 src = SET_SRC (set);
4884 dest = SET_DEST (set);
4885 if (GET_CODE (src) == LO_SUM
4886 && symbolic_operand (XEXP (src, 1), VOIDmode)
4887 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4888 priority >>= 3;
4889
4890 else if (GET_CODE (src) == MEM
4891 && GET_CODE (XEXP (src, 0)) == LO_SUM
4892 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4893 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4894 priority >>= 1;
4895
4896 else if (GET_CODE (dest) == MEM
4897 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4898 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4899 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4900 priority >>= 3;
4901 }
4902 return priority;
4903 }
4904
4905 /* The 700 can only issue a single insn at a time.
4906 The 7XXX processors can issue two insns at a time.
4907 The 8000 can issue 4 insns at a time. */
4908 static int
4909 pa_issue_rate (void)
4910 {
4911 switch (pa_cpu)
4912 {
4913 case PROCESSOR_700: return 1;
4914 case PROCESSOR_7100: return 2;
4915 case PROCESSOR_7100LC: return 2;
4916 case PROCESSOR_7200: return 2;
4917 case PROCESSOR_7300: return 2;
4918 case PROCESSOR_8000: return 4;
4919
4920 default:
4921 gcc_unreachable ();
4922 }
4923 }
4924
4925
4926
4927 /* Return any length plus adjustment needed by INSN which already has
4928 its length computed as LENGTH. Return LENGTH if no adjustment is
4929 necessary.
4930
4931 Also compute the length of an inline block move here as it is too
4932 complicated to express as a length attribute in pa.md. */
4933 int
4934 pa_adjust_insn_length (rtx_insn *insn, int length)
4935 {
4936 rtx pat = PATTERN (insn);
4937
4938 /* If length is negative or undefined, provide initial length. */
4939 if ((unsigned int) length >= INT_MAX)
4940 {
4941 if (GET_CODE (pat) == SEQUENCE)
4942 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
4943
4944 switch (get_attr_type (insn))
4945 {
4946 case TYPE_MILLI:
4947 length = pa_attr_length_millicode_call (insn);
4948 break;
4949 case TYPE_CALL:
4950 length = pa_attr_length_call (insn, 0);
4951 break;
4952 case TYPE_SIBCALL:
4953 length = pa_attr_length_call (insn, 1);
4954 break;
4955 case TYPE_DYNCALL:
4956 length = pa_attr_length_indirect_call (insn);
4957 break;
4958 case TYPE_SH_FUNC_ADRS:
4959 length = pa_attr_length_millicode_call (insn) + 20;
4960 break;
4961 default:
4962 gcc_unreachable ();
4963 }
4964 }
4965
4966 /* Block move pattern. */
4967 if (NONJUMP_INSN_P (insn)
4968 && GET_CODE (pat) == PARALLEL
4969 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4970 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4971 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4972 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4973 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4974 length += compute_movmem_length (insn) - 4;
4975 /* Block clear pattern. */
4976 else if (NONJUMP_INSN_P (insn)
4977 && GET_CODE (pat) == PARALLEL
4978 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4979 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4980 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4981 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4982 length += compute_clrmem_length (insn) - 4;
4983 /* Conditional branch with an unfilled delay slot. */
4984 else if (JUMP_P (insn) && ! simplejump_p (insn))
4985 {
4986 /* Adjust a short backwards conditional with an unfilled delay slot. */
4987 if (GET_CODE (pat) == SET
4988 && length == 4
4989 && JUMP_LABEL (insn) != NULL_RTX
4990 && ! forward_branch_p (insn))
4991 length += 4;
4992 else if (GET_CODE (pat) == PARALLEL
4993 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4994 && length == 4)
4995 length += 4;
4996 /* Adjust dbra insn with short backwards conditional branch with
4997 unfilled delay slot -- only for case where counter is in a
4998 general register register. */
4999 else if (GET_CODE (pat) == PARALLEL
5000 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
5001 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
5002 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
5003 && length == 4
5004 && ! forward_branch_p (insn))
5005 length += 4;
5006 }
5007 return length;
5008 }
5009
5010 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
5011
5012 static bool
5013 pa_print_operand_punct_valid_p (unsigned char code)
5014 {
5015 if (code == '@'
5016 || code == '#'
5017 || code == '*'
5018 || code == '^')
5019 return true;
5020
5021 return false;
5022 }
5023
5024 /* Print operand X (an rtx) in assembler syntax to file FILE.
5025 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5026 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5027
5028 void
5029 pa_print_operand (FILE *file, rtx x, int code)
5030 {
5031 switch (code)
5032 {
5033 case '#':
5034 /* Output a 'nop' if there's nothing for the delay slot. */
5035 if (dbr_sequence_length () == 0)
5036 fputs ("\n\tnop", file);
5037 return;
5038 case '*':
5039 /* Output a nullification completer if there's nothing for the */
5040 /* delay slot or nullification is requested. */
5041 if (dbr_sequence_length () == 0 ||
5042 (final_sequence &&
5043 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5044 fputs (",n", file);
5045 return;
5046 case 'R':
5047 /* Print out the second register name of a register pair.
5048 I.e., R (6) => 7. */
5049 fputs (reg_names[REGNO (x) + 1], file);
5050 return;
5051 case 'r':
5052 /* A register or zero. */
5053 if (x == const0_rtx
5054 || (x == CONST0_RTX (DFmode))
5055 || (x == CONST0_RTX (SFmode)))
5056 {
5057 fputs ("%r0", file);
5058 return;
5059 }
5060 else
5061 break;
5062 case 'f':
5063 /* A register or zero (floating point). */
5064 if (x == const0_rtx
5065 || (x == CONST0_RTX (DFmode))
5066 || (x == CONST0_RTX (SFmode)))
5067 {
5068 fputs ("%fr0", file);
5069 return;
5070 }
5071 else
5072 break;
5073 case 'A':
5074 {
5075 rtx xoperands[2];
5076
5077 xoperands[0] = XEXP (XEXP (x, 0), 0);
5078 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5079 pa_output_global_address (file, xoperands[1], 0);
5080 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5081 return;
5082 }
5083
5084 case 'C': /* Plain (C)ondition */
5085 case 'X':
5086 switch (GET_CODE (x))
5087 {
5088 case EQ:
5089 fputs ("=", file); break;
5090 case NE:
5091 fputs ("<>", file); break;
5092 case GT:
5093 fputs (">", file); break;
5094 case GE:
5095 fputs (">=", file); break;
5096 case GEU:
5097 fputs (">>=", file); break;
5098 case GTU:
5099 fputs (">>", file); break;
5100 case LT:
5101 fputs ("<", file); break;
5102 case LE:
5103 fputs ("<=", file); break;
5104 case LEU:
5105 fputs ("<<=", file); break;
5106 case LTU:
5107 fputs ("<<", file); break;
5108 default:
5109 gcc_unreachable ();
5110 }
5111 return;
5112 case 'N': /* Condition, (N)egated */
5113 switch (GET_CODE (x))
5114 {
5115 case EQ:
5116 fputs ("<>", file); break;
5117 case NE:
5118 fputs ("=", file); break;
5119 case GT:
5120 fputs ("<=", file); break;
5121 case GE:
5122 fputs ("<", file); break;
5123 case GEU:
5124 fputs ("<<", file); break;
5125 case GTU:
5126 fputs ("<<=", file); break;
5127 case LT:
5128 fputs (">=", file); break;
5129 case LE:
5130 fputs (">", file); break;
5131 case LEU:
5132 fputs (">>", file); break;
5133 case LTU:
5134 fputs (">>=", file); break;
5135 default:
5136 gcc_unreachable ();
5137 }
5138 return;
5139 /* For floating point comparisons. Note that the output
5140 predicates are the complement of the desired mode. The
5141 conditions for GT, GE, LT, LE and LTGT cause an invalid
5142 operation exception if the result is unordered and this
5143 exception is enabled in the floating-point status register. */
5144 case 'Y':
5145 switch (GET_CODE (x))
5146 {
5147 case EQ:
5148 fputs ("!=", file); break;
5149 case NE:
5150 fputs ("=", file); break;
5151 case GT:
5152 fputs ("!>", file); break;
5153 case GE:
5154 fputs ("!>=", file); break;
5155 case LT:
5156 fputs ("!<", file); break;
5157 case LE:
5158 fputs ("!<=", file); break;
5159 case LTGT:
5160 fputs ("!<>", file); break;
5161 case UNLE:
5162 fputs ("!?<=", file); break;
5163 case UNLT:
5164 fputs ("!?<", file); break;
5165 case UNGE:
5166 fputs ("!?>=", file); break;
5167 case UNGT:
5168 fputs ("!?>", file); break;
5169 case UNEQ:
5170 fputs ("!?=", file); break;
5171 case UNORDERED:
5172 fputs ("!?", file); break;
5173 case ORDERED:
5174 fputs ("?", file); break;
5175 default:
5176 gcc_unreachable ();
5177 }
5178 return;
5179 case 'S': /* Condition, operands are (S)wapped. */
5180 switch (GET_CODE (x))
5181 {
5182 case EQ:
5183 fputs ("=", file); break;
5184 case NE:
5185 fputs ("<>", file); break;
5186 case GT:
5187 fputs ("<", file); break;
5188 case GE:
5189 fputs ("<=", file); break;
5190 case GEU:
5191 fputs ("<<=", file); break;
5192 case GTU:
5193 fputs ("<<", file); break;
5194 case LT:
5195 fputs (">", file); break;
5196 case LE:
5197 fputs (">=", file); break;
5198 case LEU:
5199 fputs (">>=", file); break;
5200 case LTU:
5201 fputs (">>", file); break;
5202 default:
5203 gcc_unreachable ();
5204 }
5205 return;
5206 case 'B': /* Condition, (B)oth swapped and negate. */
5207 switch (GET_CODE (x))
5208 {
5209 case EQ:
5210 fputs ("<>", file); break;
5211 case NE:
5212 fputs ("=", file); break;
5213 case GT:
5214 fputs (">=", file); break;
5215 case GE:
5216 fputs (">", file); break;
5217 case GEU:
5218 fputs (">>", file); break;
5219 case GTU:
5220 fputs (">>=", file); break;
5221 case LT:
5222 fputs ("<=", file); break;
5223 case LE:
5224 fputs ("<", file); break;
5225 case LEU:
5226 fputs ("<<", file); break;
5227 case LTU:
5228 fputs ("<<=", file); break;
5229 default:
5230 gcc_unreachable ();
5231 }
5232 return;
5233 case 'k':
5234 gcc_assert (GET_CODE (x) == CONST_INT);
5235 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5236 return;
5237 case 'Q':
5238 gcc_assert (GET_CODE (x) == CONST_INT);
5239 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5240 return;
5241 case 'L':
5242 gcc_assert (GET_CODE (x) == CONST_INT);
5243 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5244 return;
5245 case 'O':
5246 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5247 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5248 return;
5249 case 'p':
5250 gcc_assert (GET_CODE (x) == CONST_INT);
5251 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5252 return;
5253 case 'P':
5254 gcc_assert (GET_CODE (x) == CONST_INT);
5255 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5256 return;
5257 case 'I':
5258 if (GET_CODE (x) == CONST_INT)
5259 fputs ("i", file);
5260 return;
5261 case 'M':
5262 case 'F':
5263 switch (GET_CODE (XEXP (x, 0)))
5264 {
5265 case PRE_DEC:
5266 case PRE_INC:
5267 if (ASSEMBLER_DIALECT == 0)
5268 fputs ("s,mb", file);
5269 else
5270 fputs (",mb", file);
5271 break;
5272 case POST_DEC:
5273 case POST_INC:
5274 if (ASSEMBLER_DIALECT == 0)
5275 fputs ("s,ma", file);
5276 else
5277 fputs (",ma", file);
5278 break;
5279 case PLUS:
5280 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5281 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5282 {
5283 if (ASSEMBLER_DIALECT == 0)
5284 fputs ("x", file);
5285 }
5286 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5287 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5288 {
5289 if (ASSEMBLER_DIALECT == 0)
5290 fputs ("x,s", file);
5291 else
5292 fputs (",s", file);
5293 }
5294 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5295 fputs ("s", file);
5296 break;
5297 default:
5298 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5299 fputs ("s", file);
5300 break;
5301 }
5302 return;
5303 case 'G':
5304 pa_output_global_address (file, x, 0);
5305 return;
5306 case 'H':
5307 pa_output_global_address (file, x, 1);
5308 return;
5309 case 0: /* Don't do anything special */
5310 break;
5311 case 'Z':
5312 {
5313 unsigned op[3];
5314 compute_zdepwi_operands (INTVAL (x), op);
5315 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5316 return;
5317 }
5318 case 'z':
5319 {
5320 unsigned op[3];
5321 compute_zdepdi_operands (INTVAL (x), op);
5322 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5323 return;
5324 }
5325 case 'c':
5326 /* We can get here from a .vtable_inherit due to our
5327 CONSTANT_ADDRESS_P rejecting perfectly good constant
5328 addresses. */
5329 break;
5330 default:
5331 gcc_unreachable ();
5332 }
5333 if (GET_CODE (x) == REG)
5334 {
5335 fputs (reg_names [REGNO (x)], file);
5336 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5337 {
5338 fputs ("R", file);
5339 return;
5340 }
5341 if (FP_REG_P (x)
5342 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5343 && (REGNO (x) & 1) == 0)
5344 fputs ("L", file);
5345 }
5346 else if (GET_CODE (x) == MEM)
5347 {
5348 int size = GET_MODE_SIZE (GET_MODE (x));
5349 rtx base = NULL_RTX;
5350 switch (GET_CODE (XEXP (x, 0)))
5351 {
5352 case PRE_DEC:
5353 case POST_DEC:
5354 base = XEXP (XEXP (x, 0), 0);
5355 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5356 break;
5357 case PRE_INC:
5358 case POST_INC:
5359 base = XEXP (XEXP (x, 0), 0);
5360 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5361 break;
5362 case PLUS:
5363 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5364 fprintf (file, "%s(%s)",
5365 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5366 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5367 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5368 fprintf (file, "%s(%s)",
5369 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5370 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5371 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5372 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5373 {
5374 /* Because the REG_POINTER flag can get lost during reload,
5375 pa_legitimate_address_p canonicalizes the order of the
5376 index and base registers in the combined move patterns. */
5377 rtx base = XEXP (XEXP (x, 0), 1);
5378 rtx index = XEXP (XEXP (x, 0), 0);
5379
5380 fprintf (file, "%s(%s)",
5381 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5382 }
5383 else
5384 output_address (XEXP (x, 0));
5385 break;
5386 default:
5387 output_address (XEXP (x, 0));
5388 break;
5389 }
5390 }
5391 else
5392 output_addr_const (file, x);
5393 }
5394
5395 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5396
5397 void
5398 pa_output_global_address (FILE *file, rtx x, int round_constant)
5399 {
5400
5401 /* Imagine (high (const (plus ...))). */
5402 if (GET_CODE (x) == HIGH)
5403 x = XEXP (x, 0);
5404
5405 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5406 output_addr_const (file, x);
5407 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5408 {
5409 output_addr_const (file, x);
5410 fputs ("-$global$", file);
5411 }
5412 else if (GET_CODE (x) == CONST)
5413 {
5414 const char *sep = "";
5415 int offset = 0; /* assembler wants -$global$ at end */
5416 rtx base = NULL_RTX;
5417
5418 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5419 {
5420 case SYMBOL_REF:
5421 base = XEXP (XEXP (x, 0), 0);
5422 output_addr_const (file, base);
5423 break;
5424 case CONST_INT:
5425 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5426 break;
5427 default:
5428 gcc_unreachable ();
5429 }
5430
5431 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5432 {
5433 case SYMBOL_REF:
5434 base = XEXP (XEXP (x, 0), 1);
5435 output_addr_const (file, base);
5436 break;
5437 case CONST_INT:
5438 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5439 break;
5440 default:
5441 gcc_unreachable ();
5442 }
5443
5444 /* How bogus. The compiler is apparently responsible for
5445 rounding the constant if it uses an LR field selector.
5446
5447 The linker and/or assembler seem a better place since
5448 they have to do this kind of thing already.
5449
5450 If we fail to do this, HP's optimizing linker may eliminate
5451 an addil, but not update the ldw/stw/ldo instruction that
5452 uses the result of the addil. */
5453 if (round_constant)
5454 offset = ((offset + 0x1000) & ~0x1fff);
5455
5456 switch (GET_CODE (XEXP (x, 0)))
5457 {
5458 case PLUS:
5459 if (offset < 0)
5460 {
5461 offset = -offset;
5462 sep = "-";
5463 }
5464 else
5465 sep = "+";
5466 break;
5467
5468 case MINUS:
5469 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5470 sep = "-";
5471 break;
5472
5473 default:
5474 gcc_unreachable ();
5475 }
5476
5477 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5478 fputs ("-$global$", file);
5479 if (offset)
5480 fprintf (file, "%s%d", sep, offset);
5481 }
5482 else
5483 output_addr_const (file, x);
5484 }
5485
5486 /* Output boilerplate text to appear at the beginning of the file.
5487 There are several possible versions. */
5488 #define aputs(x) fputs(x, asm_out_file)
5489 static inline void
5490 pa_file_start_level (void)
5491 {
5492 if (TARGET_64BIT)
5493 aputs ("\t.LEVEL 2.0w\n");
5494 else if (TARGET_PA_20)
5495 aputs ("\t.LEVEL 2.0\n");
5496 else if (TARGET_PA_11)
5497 aputs ("\t.LEVEL 1.1\n");
5498 else
5499 aputs ("\t.LEVEL 1.0\n");
5500 }
5501
5502 static inline void
5503 pa_file_start_space (int sortspace)
5504 {
5505 aputs ("\t.SPACE $PRIVATE$");
5506 if (sortspace)
5507 aputs (",SORT=16");
5508 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5509 if (flag_tm)
5510 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5511 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5512 "\n\t.SPACE $TEXT$");
5513 if (sortspace)
5514 aputs (",SORT=8");
5515 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5516 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5517 }
5518
5519 static inline void
5520 pa_file_start_file (int want_version)
5521 {
5522 if (write_symbols != NO_DEBUG)
5523 {
5524 output_file_directive (asm_out_file, main_input_filename);
5525 if (want_version)
5526 aputs ("\t.version\t\"01.01\"\n");
5527 }
5528 }
5529
5530 static inline void
5531 pa_file_start_mcount (const char *aswhat)
5532 {
5533 if (profile_flag)
5534 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5535 }
5536
5537 static void
5538 pa_elf_file_start (void)
5539 {
5540 pa_file_start_level ();
5541 pa_file_start_mcount ("ENTRY");
5542 pa_file_start_file (0);
5543 }
5544
5545 static void
5546 pa_som_file_start (void)
5547 {
5548 pa_file_start_level ();
5549 pa_file_start_space (0);
5550 aputs ("\t.IMPORT $global$,DATA\n"
5551 "\t.IMPORT $$dyncall,MILLICODE\n");
5552 pa_file_start_mcount ("CODE");
5553 pa_file_start_file (0);
5554 }
5555
5556 static void
5557 pa_linux_file_start (void)
5558 {
5559 pa_file_start_file (1);
5560 pa_file_start_level ();
5561 pa_file_start_mcount ("CODE");
5562 }
5563
5564 static void
5565 pa_hpux64_gas_file_start (void)
5566 {
5567 pa_file_start_level ();
5568 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5569 if (profile_flag)
5570 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5571 #endif
5572 pa_file_start_file (1);
5573 }
5574
5575 static void
5576 pa_hpux64_hpas_file_start (void)
5577 {
5578 pa_file_start_level ();
5579 pa_file_start_space (1);
5580 pa_file_start_mcount ("CODE");
5581 pa_file_start_file (0);
5582 }
5583 #undef aputs
5584
5585 /* Search the deferred plabel list for SYMBOL and return its internal
5586 label. If an entry for SYMBOL is not found, a new entry is created. */
5587
5588 rtx
5589 pa_get_deferred_plabel (rtx symbol)
5590 {
5591 const char *fname = XSTR (symbol, 0);
5592 size_t i;
5593
5594 /* See if we have already put this function on the list of deferred
5595 plabels. This list is generally small, so a liner search is not
5596 too ugly. If it proves too slow replace it with something faster. */
5597 for (i = 0; i < n_deferred_plabels; i++)
5598 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5599 break;
5600
5601 /* If the deferred plabel list is empty, or this entry was not found
5602 on the list, create a new entry on the list. */
5603 if (deferred_plabels == NULL || i == n_deferred_plabels)
5604 {
5605 tree id;
5606
5607 if (deferred_plabels == 0)
5608 deferred_plabels = ggc_alloc<deferred_plabel> ();
5609 else
5610 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5611 deferred_plabels,
5612 n_deferred_plabels + 1);
5613
5614 i = n_deferred_plabels++;
5615 deferred_plabels[i].internal_label = gen_label_rtx ();
5616 deferred_plabels[i].symbol = symbol;
5617
5618 /* Gross. We have just implicitly taken the address of this
5619 function. Mark it in the same manner as assemble_name. */
5620 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5621 if (id)
5622 mark_referenced (id);
5623 }
5624
5625 return deferred_plabels[i].internal_label;
5626 }
5627
5628 static void
5629 output_deferred_plabels (void)
5630 {
5631 size_t i;
5632
5633 /* If we have some deferred plabels, then we need to switch into the
5634 data or readonly data section, and align it to a 4 byte boundary
5635 before outputting the deferred plabels. */
5636 if (n_deferred_plabels)
5637 {
5638 switch_to_section (flag_pic ? data_section : readonly_data_section);
5639 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5640 }
5641
5642 /* Now output the deferred plabels. */
5643 for (i = 0; i < n_deferred_plabels; i++)
5644 {
5645 targetm.asm_out.internal_label (asm_out_file, "L",
5646 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5647 assemble_integer (deferred_plabels[i].symbol,
5648 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5649 }
5650 }
5651
5652 /* Initialize optabs to point to emulation routines. */
5653
5654 static void
5655 pa_init_libfuncs (void)
5656 {
5657 if (HPUX_LONG_DOUBLE_LIBRARY)
5658 {
5659 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5660 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5661 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5662 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5663 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5664 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5665 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5666 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5667 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5668
5669 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5670 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5671 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5672 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5673 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5674 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5675 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5676
5677 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5678 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5679 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5680 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5681
5682 set_conv_libfunc (sfix_optab, SImode, TFmode,
5683 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5684 : "_U_Qfcnvfxt_quad_to_sgl");
5685 set_conv_libfunc (sfix_optab, DImode, TFmode,
5686 "_U_Qfcnvfxt_quad_to_dbl");
5687 set_conv_libfunc (ufix_optab, SImode, TFmode,
5688 "_U_Qfcnvfxt_quad_to_usgl");
5689 set_conv_libfunc (ufix_optab, DImode, TFmode,
5690 "_U_Qfcnvfxt_quad_to_udbl");
5691
5692 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5693 "_U_Qfcnvxf_sgl_to_quad");
5694 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5695 "_U_Qfcnvxf_dbl_to_quad");
5696 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5697 "_U_Qfcnvxf_usgl_to_quad");
5698 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5699 "_U_Qfcnvxf_udbl_to_quad");
5700 }
5701
5702 if (TARGET_SYNC_LIBCALL)
5703 init_sync_libfuncs (UNITS_PER_WORD);
5704 }
5705
5706 /* HP's millicode routines mean something special to the assembler.
5707 Keep track of which ones we have used. */
5708
5709 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5710 static void import_milli (enum millicodes);
5711 static char imported[(int) end1000];
5712 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5713 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5714 #define MILLI_START 10
5715
5716 static void
5717 import_milli (enum millicodes code)
5718 {
5719 char str[sizeof (import_string)];
5720
5721 if (!imported[(int) code])
5722 {
5723 imported[(int) code] = 1;
5724 strcpy (str, import_string);
5725 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5726 output_asm_insn (str, 0);
5727 }
5728 }
5729
5730 /* The register constraints have put the operands and return value in
5731 the proper registers. */
5732
5733 const char *
5734 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5735 {
5736 import_milli (mulI);
5737 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5738 }
5739
5740 /* Emit the rtl for doing a division by a constant. */
5741
5742 /* Do magic division millicodes exist for this value? */
5743 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5744
5745 /* We'll use an array to keep track of the magic millicodes and
5746 whether or not we've used them already. [n][0] is signed, [n][1] is
5747 unsigned. */
5748
5749 static int div_milli[16][2];
5750
5751 int
5752 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5753 {
5754 if (GET_CODE (operands[2]) == CONST_INT
5755 && INTVAL (operands[2]) > 0
5756 && INTVAL (operands[2]) < 16
5757 && pa_magic_milli[INTVAL (operands[2])])
5758 {
5759 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5760
5761 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5762 emit
5763 (gen_rtx_PARALLEL
5764 (VOIDmode,
5765 gen_rtvec (6, gen_rtx_SET (gen_rtx_REG (SImode, 29),
5766 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5767 SImode,
5768 gen_rtx_REG (SImode, 26),
5769 operands[2])),
5770 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5771 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5772 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5773 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5774 gen_rtx_CLOBBER (VOIDmode, ret))));
5775 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5776 return 1;
5777 }
5778 return 0;
5779 }
5780
5781 const char *
5782 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5783 {
5784 int divisor;
5785
5786 /* If the divisor is a constant, try to use one of the special
5787 opcodes .*/
5788 if (GET_CODE (operands[0]) == CONST_INT)
5789 {
5790 static char buf[100];
5791 divisor = INTVAL (operands[0]);
5792 if (!div_milli[divisor][unsignedp])
5793 {
5794 div_milli[divisor][unsignedp] = 1;
5795 if (unsignedp)
5796 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5797 else
5798 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5799 }
5800 if (unsignedp)
5801 {
5802 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5803 INTVAL (operands[0]));
5804 return pa_output_millicode_call (insn,
5805 gen_rtx_SYMBOL_REF (SImode, buf));
5806 }
5807 else
5808 {
5809 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5810 INTVAL (operands[0]));
5811 return pa_output_millicode_call (insn,
5812 gen_rtx_SYMBOL_REF (SImode, buf));
5813 }
5814 }
5815 /* Divisor isn't a special constant. */
5816 else
5817 {
5818 if (unsignedp)
5819 {
5820 import_milli (divU);
5821 return pa_output_millicode_call (insn,
5822 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5823 }
5824 else
5825 {
5826 import_milli (divI);
5827 return pa_output_millicode_call (insn,
5828 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5829 }
5830 }
5831 }
5832
5833 /* Output a $$rem millicode to do mod. */
5834
5835 const char *
5836 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5837 {
5838 if (unsignedp)
5839 {
5840 import_milli (remU);
5841 return pa_output_millicode_call (insn,
5842 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5843 }
5844 else
5845 {
5846 import_milli (remI);
5847 return pa_output_millicode_call (insn,
5848 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5849 }
5850 }
5851
5852 void
5853 pa_output_arg_descriptor (rtx_insn *call_insn)
5854 {
5855 const char *arg_regs[4];
5856 machine_mode arg_mode;
5857 rtx link;
5858 int i, output_flag = 0;
5859 int regno;
5860
5861 /* We neither need nor want argument location descriptors for the
5862 64bit runtime environment or the ELF32 environment. */
5863 if (TARGET_64BIT || TARGET_ELF32)
5864 return;
5865
5866 for (i = 0; i < 4; i++)
5867 arg_regs[i] = 0;
5868
5869 /* Specify explicitly that no argument relocations should take place
5870 if using the portable runtime calling conventions. */
5871 if (TARGET_PORTABLE_RUNTIME)
5872 {
5873 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5874 asm_out_file);
5875 return;
5876 }
5877
5878 gcc_assert (CALL_P (call_insn));
5879 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5880 link; link = XEXP (link, 1))
5881 {
5882 rtx use = XEXP (link, 0);
5883
5884 if (! (GET_CODE (use) == USE
5885 && GET_CODE (XEXP (use, 0)) == REG
5886 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5887 continue;
5888
5889 arg_mode = GET_MODE (XEXP (use, 0));
5890 regno = REGNO (XEXP (use, 0));
5891 if (regno >= 23 && regno <= 26)
5892 {
5893 arg_regs[26 - regno] = "GR";
5894 if (arg_mode == DImode)
5895 arg_regs[25 - regno] = "GR";
5896 }
5897 else if (regno >= 32 && regno <= 39)
5898 {
5899 if (arg_mode == SFmode)
5900 arg_regs[(regno - 32) / 2] = "FR";
5901 else
5902 {
5903 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5904 arg_regs[(regno - 34) / 2] = "FR";
5905 arg_regs[(regno - 34) / 2 + 1] = "FU";
5906 #else
5907 arg_regs[(regno - 34) / 2] = "FU";
5908 arg_regs[(regno - 34) / 2 + 1] = "FR";
5909 #endif
5910 }
5911 }
5912 }
5913 fputs ("\t.CALL ", asm_out_file);
5914 for (i = 0; i < 4; i++)
5915 {
5916 if (arg_regs[i])
5917 {
5918 if (output_flag++)
5919 fputc (',', asm_out_file);
5920 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5921 }
5922 }
5923 fputc ('\n', asm_out_file);
5924 }
5925 \f
5926 /* Inform reload about cases where moving X with a mode MODE to or from
5927 a register in RCLASS requires an extra scratch or immediate register.
5928 Return the class needed for the immediate register. */
5929
5930 static reg_class_t
5931 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
5932 machine_mode mode, secondary_reload_info *sri)
5933 {
5934 int regno;
5935 enum reg_class rclass = (enum reg_class) rclass_i;
5936
5937 /* Handle the easy stuff first. */
5938 if (rclass == R1_REGS)
5939 return NO_REGS;
5940
5941 if (REG_P (x))
5942 {
5943 regno = REGNO (x);
5944 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5945 return NO_REGS;
5946 }
5947 else
5948 regno = -1;
5949
5950 /* If we have something like (mem (mem (...)), we can safely assume the
5951 inner MEM will end up in a general register after reloading, so there's
5952 no need for a secondary reload. */
5953 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5954 return NO_REGS;
5955
5956 /* Trying to load a constant into a FP register during PIC code
5957 generation requires %r1 as a scratch register. For float modes,
5958 the only legitimate constant is CONST0_RTX. However, there are
5959 a few patterns that accept constant double operands. */
5960 if (flag_pic
5961 && FP_REG_CLASS_P (rclass)
5962 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5963 {
5964 switch (mode)
5965 {
5966 case SImode:
5967 sri->icode = CODE_FOR_reload_insi_r1;
5968 break;
5969
5970 case DImode:
5971 sri->icode = CODE_FOR_reload_indi_r1;
5972 break;
5973
5974 case SFmode:
5975 sri->icode = CODE_FOR_reload_insf_r1;
5976 break;
5977
5978 case DFmode:
5979 sri->icode = CODE_FOR_reload_indf_r1;
5980 break;
5981
5982 default:
5983 gcc_unreachable ();
5984 }
5985 return NO_REGS;
5986 }
5987
5988 /* Secondary reloads of symbolic expressions require %r1 as a scratch
5989 register when we're generating PIC code or when the operand isn't
5990 readonly. */
5991 if (pa_symbolic_expression_p (x))
5992 {
5993 if (GET_CODE (x) == HIGH)
5994 x = XEXP (x, 0);
5995
5996 if (flag_pic || !read_only_operand (x, VOIDmode))
5997 {
5998 switch (mode)
5999 {
6000 case SImode:
6001 sri->icode = CODE_FOR_reload_insi_r1;
6002 break;
6003
6004 case DImode:
6005 sri->icode = CODE_FOR_reload_indi_r1;
6006 break;
6007
6008 default:
6009 gcc_unreachable ();
6010 }
6011 return NO_REGS;
6012 }
6013 }
6014
6015 /* Profiling showed the PA port spends about 1.3% of its compilation
6016 time in true_regnum from calls inside pa_secondary_reload_class. */
6017 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
6018 regno = true_regnum (x);
6019
6020 /* Handle reloads for floating point loads and stores. */
6021 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6022 && FP_REG_CLASS_P (rclass))
6023 {
6024 if (MEM_P (x))
6025 {
6026 x = XEXP (x, 0);
6027
6028 /* We don't need a secondary reload for indexed memory addresses.
6029
6030 When INT14_OK_STRICT is true, it might appear that we could
6031 directly allow register indirect memory addresses. However,
6032 this doesn't work because we don't support SUBREGs in
6033 floating-point register copies and reload doesn't tell us
6034 when it's going to use a SUBREG. */
6035 if (IS_INDEX_ADDR_P (x))
6036 return NO_REGS;
6037 }
6038
6039 /* Request a secondary reload with a general scratch register
6040 for everything else. ??? Could symbolic operands be handled
6041 directly when generating non-pic PA 2.0 code? */
6042 sri->icode = (in_p
6043 ? direct_optab_handler (reload_in_optab, mode)
6044 : direct_optab_handler (reload_out_optab, mode));
6045 return NO_REGS;
6046 }
6047
6048 /* A SAR<->FP register copy requires an intermediate general register
6049 and secondary memory. We need a secondary reload with a general
6050 scratch register for spills. */
6051 if (rclass == SHIFT_REGS)
6052 {
6053 /* Handle spill. */
6054 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6055 {
6056 sri->icode = (in_p
6057 ? direct_optab_handler (reload_in_optab, mode)
6058 : direct_optab_handler (reload_out_optab, mode));
6059 return NO_REGS;
6060 }
6061
6062 /* Handle FP copy. */
6063 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6064 return GENERAL_REGS;
6065 }
6066
6067 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6068 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6069 && FP_REG_CLASS_P (rclass))
6070 return GENERAL_REGS;
6071
6072 return NO_REGS;
6073 }
6074
6075 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6076 is only marked as live on entry by df-scan when it is a fixed
6077 register. It isn't a fixed register in the 64-bit runtime,
6078 so we need to mark it here. */
6079
6080 static void
6081 pa_extra_live_on_entry (bitmap regs)
6082 {
6083 if (TARGET_64BIT)
6084 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6085 }
6086
6087 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6088 to prevent it from being deleted. */
6089
6090 rtx
6091 pa_eh_return_handler_rtx (void)
6092 {
6093 rtx tmp;
6094
6095 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6096 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6097 tmp = gen_rtx_MEM (word_mode, tmp);
6098 tmp->volatil = 1;
6099 return tmp;
6100 }
6101
6102 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6103 by invisible reference. As a GCC extension, we also pass anything
6104 with a zero or variable size by reference.
6105
6106 The 64-bit runtime does not describe passing any types by invisible
6107 reference. The internals of GCC can't currently handle passing
6108 empty structures, and zero or variable length arrays when they are
6109 not passed entirely on the stack or by reference. Thus, as a GCC
6110 extension, we pass these types by reference. The HP compiler doesn't
6111 support these types, so hopefully there shouldn't be any compatibility
6112 issues. This may have to be revisited when HP releases a C99 compiler
6113 or updates the ABI. */
6114
6115 static bool
6116 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6117 machine_mode mode, const_tree type,
6118 bool named ATTRIBUTE_UNUSED)
6119 {
6120 HOST_WIDE_INT size;
6121
6122 if (type)
6123 size = int_size_in_bytes (type);
6124 else
6125 size = GET_MODE_SIZE (mode);
6126
6127 if (TARGET_64BIT)
6128 return size <= 0;
6129 else
6130 return size <= 0 || size > 8;
6131 }
6132
6133 enum direction
6134 pa_function_arg_padding (machine_mode mode, const_tree type)
6135 {
6136 if (mode == BLKmode
6137 || (TARGET_64BIT
6138 && type
6139 && (AGGREGATE_TYPE_P (type)
6140 || TREE_CODE (type) == COMPLEX_TYPE
6141 || TREE_CODE (type) == VECTOR_TYPE)))
6142 {
6143 /* Return none if justification is not required. */
6144 if (type
6145 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6146 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6147 return none;
6148
6149 /* The directions set here are ignored when a BLKmode argument larger
6150 than a word is placed in a register. Different code is used for
6151 the stack and registers. This makes it difficult to have a
6152 consistent data representation for both the stack and registers.
6153 For both runtimes, the justification and padding for arguments on
6154 the stack and in registers should be identical. */
6155 if (TARGET_64BIT)
6156 /* The 64-bit runtime specifies left justification for aggregates. */
6157 return upward;
6158 else
6159 /* The 32-bit runtime architecture specifies right justification.
6160 When the argument is passed on the stack, the argument is padded
6161 with garbage on the left. The HP compiler pads with zeros. */
6162 return downward;
6163 }
6164
6165 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6166 return downward;
6167 else
6168 return none;
6169 }
6170
6171 \f
6172 /* Do what is necessary for `va_start'. We look at the current function
6173 to determine if stdargs or varargs is used and fill in an initial
6174 va_list. A pointer to this constructor is returned. */
6175
6176 static rtx
6177 hppa_builtin_saveregs (void)
6178 {
6179 rtx offset, dest;
6180 tree fntype = TREE_TYPE (current_function_decl);
6181 int argadj = ((!stdarg_p (fntype))
6182 ? UNITS_PER_WORD : 0);
6183
6184 if (argadj)
6185 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6186 else
6187 offset = crtl->args.arg_offset_rtx;
6188
6189 if (TARGET_64BIT)
6190 {
6191 int i, off;
6192
6193 /* Adjust for varargs/stdarg differences. */
6194 if (argadj)
6195 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6196 else
6197 offset = crtl->args.arg_offset_rtx;
6198
6199 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6200 from the incoming arg pointer and growing to larger addresses. */
6201 for (i = 26, off = -64; i >= 19; i--, off += 8)
6202 emit_move_insn (gen_rtx_MEM (word_mode,
6203 plus_constant (Pmode,
6204 arg_pointer_rtx, off)),
6205 gen_rtx_REG (word_mode, i));
6206
6207 /* The incoming args pointer points just beyond the flushback area;
6208 normally this is not a serious concern. However, when we are doing
6209 varargs/stdargs we want to make the arg pointer point to the start
6210 of the incoming argument area. */
6211 emit_move_insn (virtual_incoming_args_rtx,
6212 plus_constant (Pmode, arg_pointer_rtx, -64));
6213
6214 /* Now return a pointer to the first anonymous argument. */
6215 return copy_to_reg (expand_binop (Pmode, add_optab,
6216 virtual_incoming_args_rtx,
6217 offset, 0, 0, OPTAB_LIB_WIDEN));
6218 }
6219
6220 /* Store general registers on the stack. */
6221 dest = gen_rtx_MEM (BLKmode,
6222 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6223 -16));
6224 set_mem_alias_set (dest, get_varargs_alias_set ());
6225 set_mem_align (dest, BITS_PER_WORD);
6226 move_block_from_reg (23, dest, 4);
6227
6228 /* move_block_from_reg will emit code to store the argument registers
6229 individually as scalar stores.
6230
6231 However, other insns may later load from the same addresses for
6232 a structure load (passing a struct to a varargs routine).
6233
6234 The alias code assumes that such aliasing can never happen, so we
6235 have to keep memory referencing insns from moving up beyond the
6236 last argument register store. So we emit a blockage insn here. */
6237 emit_insn (gen_blockage ());
6238
6239 return copy_to_reg (expand_binop (Pmode, add_optab,
6240 crtl->args.internal_arg_pointer,
6241 offset, 0, 0, OPTAB_LIB_WIDEN));
6242 }
6243
6244 static void
6245 hppa_va_start (tree valist, rtx nextarg)
6246 {
6247 nextarg = expand_builtin_saveregs ();
6248 std_expand_builtin_va_start (valist, nextarg);
6249 }
6250
6251 static tree
6252 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6253 gimple_seq *post_p)
6254 {
6255 if (TARGET_64BIT)
6256 {
6257 /* Args grow upward. We can use the generic routines. */
6258 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6259 }
6260 else /* !TARGET_64BIT */
6261 {
6262 tree ptr = build_pointer_type (type);
6263 tree valist_type;
6264 tree t, u;
6265 unsigned int size, ofs;
6266 bool indirect;
6267
6268 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6269 if (indirect)
6270 {
6271 type = ptr;
6272 ptr = build_pointer_type (type);
6273 }
6274 size = int_size_in_bytes (type);
6275 valist_type = TREE_TYPE (valist);
6276
6277 /* Args grow down. Not handled by generic routines. */
6278
6279 u = fold_convert (sizetype, size_in_bytes (type));
6280 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6281 t = fold_build_pointer_plus (valist, u);
6282
6283 /* Align to 4 or 8 byte boundary depending on argument size. */
6284
6285 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6286 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6287 t = fold_convert (valist_type, t);
6288
6289 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6290
6291 ofs = (8 - size) % 4;
6292 if (ofs != 0)
6293 t = fold_build_pointer_plus_hwi (t, ofs);
6294
6295 t = fold_convert (ptr, t);
6296 t = build_va_arg_indirect_ref (t);
6297
6298 if (indirect)
6299 t = build_va_arg_indirect_ref (t);
6300
6301 return t;
6302 }
6303 }
6304
6305 /* True if MODE is valid for the target. By "valid", we mean able to
6306 be manipulated in non-trivial ways. In particular, this means all
6307 the arithmetic is supported.
6308
6309 Currently, TImode is not valid as the HP 64-bit runtime documentation
6310 doesn't document the alignment and calling conventions for this type.
6311 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6312 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6313
6314 static bool
6315 pa_scalar_mode_supported_p (machine_mode mode)
6316 {
6317 int precision = GET_MODE_PRECISION (mode);
6318
6319 switch (GET_MODE_CLASS (mode))
6320 {
6321 case MODE_PARTIAL_INT:
6322 case MODE_INT:
6323 if (precision == CHAR_TYPE_SIZE)
6324 return true;
6325 if (precision == SHORT_TYPE_SIZE)
6326 return true;
6327 if (precision == INT_TYPE_SIZE)
6328 return true;
6329 if (precision == LONG_TYPE_SIZE)
6330 return true;
6331 if (precision == LONG_LONG_TYPE_SIZE)
6332 return true;
6333 return false;
6334
6335 case MODE_FLOAT:
6336 if (precision == FLOAT_TYPE_SIZE)
6337 return true;
6338 if (precision == DOUBLE_TYPE_SIZE)
6339 return true;
6340 if (precision == LONG_DOUBLE_TYPE_SIZE)
6341 return true;
6342 return false;
6343
6344 case MODE_DECIMAL_FLOAT:
6345 return false;
6346
6347 default:
6348 gcc_unreachable ();
6349 }
6350 }
6351
6352 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6353 it branches into the delay slot. Otherwise, return FALSE. */
6354
6355 static bool
6356 branch_to_delay_slot_p (rtx_insn *insn)
6357 {
6358 rtx_insn *jump_insn;
6359
6360 if (dbr_sequence_length ())
6361 return FALSE;
6362
6363 jump_insn = next_active_insn (JUMP_LABEL (insn));
6364 while (insn)
6365 {
6366 insn = next_active_insn (insn);
6367 if (jump_insn == insn)
6368 return TRUE;
6369
6370 /* We can't rely on the length of asms. So, we return FALSE when
6371 the branch is followed by an asm. */
6372 if (!insn
6373 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6374 || extract_asm_operands (PATTERN (insn)) != NULL_RTX
6375 || get_attr_length (insn) > 0)
6376 break;
6377 }
6378
6379 return FALSE;
6380 }
6381
6382 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6383
6384 This occurs when INSN has an unfilled delay slot and is followed
6385 by an asm. Disaster can occur if the asm is empty and the jump
6386 branches into the delay slot. So, we add a nop in the delay slot
6387 when this occurs. */
6388
6389 static bool
6390 branch_needs_nop_p (rtx_insn *insn)
6391 {
6392 rtx_insn *jump_insn;
6393
6394 if (dbr_sequence_length ())
6395 return FALSE;
6396
6397 jump_insn = next_active_insn (JUMP_LABEL (insn));
6398 while (insn)
6399 {
6400 insn = next_active_insn (insn);
6401 if (!insn || jump_insn == insn)
6402 return TRUE;
6403
6404 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6405 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6406 && get_attr_length (insn) > 0)
6407 break;
6408 }
6409
6410 return FALSE;
6411 }
6412
6413 /* Return TRUE if INSN, a forward jump insn, can use nullification
6414 to skip the following instruction. This avoids an extra cycle due
6415 to a mis-predicted branch when we fall through. */
6416
6417 static bool
6418 use_skip_p (rtx_insn *insn)
6419 {
6420 rtx_insn *jump_insn = next_active_insn (JUMP_LABEL (insn));
6421
6422 while (insn)
6423 {
6424 insn = next_active_insn (insn);
6425
6426 /* We can't rely on the length of asms, so we can't skip asms. */
6427 if (!insn
6428 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6429 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6430 break;
6431 if (get_attr_length (insn) == 4
6432 && jump_insn == next_active_insn (insn))
6433 return TRUE;
6434 if (get_attr_length (insn) > 0)
6435 break;
6436 }
6437
6438 return FALSE;
6439 }
6440
6441 /* This routine handles all the normal conditional branch sequences we
6442 might need to generate. It handles compare immediate vs compare
6443 register, nullification of delay slots, varying length branches,
6444 negated branches, and all combinations of the above. It returns the
6445 output appropriate to emit the branch corresponding to all given
6446 parameters. */
6447
6448 const char *
6449 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6450 {
6451 static char buf[100];
6452 bool useskip;
6453 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6454 int length = get_attr_length (insn);
6455 int xdelay;
6456
6457 /* A conditional branch to the following instruction (e.g. the delay slot)
6458 is asking for a disaster. This can happen when not optimizing and
6459 when jump optimization fails.
6460
6461 While it is usually safe to emit nothing, this can fail if the
6462 preceding instruction is a nullified branch with an empty delay
6463 slot and the same branch target as this branch. We could check
6464 for this but jump optimization should eliminate nop jumps. It
6465 is always safe to emit a nop. */
6466 if (branch_to_delay_slot_p (insn))
6467 return "nop";
6468
6469 /* The doubleword form of the cmpib instruction doesn't have the LEU
6470 and GTU conditions while the cmpb instruction does. Since we accept
6471 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6472 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6473 operands[2] = gen_rtx_REG (DImode, 0);
6474 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6475 operands[1] = gen_rtx_REG (DImode, 0);
6476
6477 /* If this is a long branch with its delay slot unfilled, set `nullify'
6478 as it can nullify the delay slot and save a nop. */
6479 if (length == 8 && dbr_sequence_length () == 0)
6480 nullify = 1;
6481
6482 /* If this is a short forward conditional branch which did not get
6483 its delay slot filled, the delay slot can still be nullified. */
6484 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6485 nullify = forward_branch_p (insn);
6486
6487 /* A forward branch over a single nullified insn can be done with a
6488 comclr instruction. This avoids a single cycle penalty due to
6489 mis-predicted branch if we fall through (branch not taken). */
6490 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6491
6492 switch (length)
6493 {
6494 /* All short conditional branches except backwards with an unfilled
6495 delay slot. */
6496 case 4:
6497 if (useskip)
6498 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6499 else
6500 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6501 if (GET_MODE (operands[1]) == DImode)
6502 strcat (buf, "*");
6503 if (negated)
6504 strcat (buf, "%B3");
6505 else
6506 strcat (buf, "%S3");
6507 if (useskip)
6508 strcat (buf, " %2,%r1,%%r0");
6509 else if (nullify)
6510 {
6511 if (branch_needs_nop_p (insn))
6512 strcat (buf, ",n %2,%r1,%0%#");
6513 else
6514 strcat (buf, ",n %2,%r1,%0");
6515 }
6516 else
6517 strcat (buf, " %2,%r1,%0");
6518 break;
6519
6520 /* All long conditionals. Note a short backward branch with an
6521 unfilled delay slot is treated just like a long backward branch
6522 with an unfilled delay slot. */
6523 case 8:
6524 /* Handle weird backwards branch with a filled delay slot
6525 which is nullified. */
6526 if (dbr_sequence_length () != 0
6527 && ! forward_branch_p (insn)
6528 && nullify)
6529 {
6530 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6531 if (GET_MODE (operands[1]) == DImode)
6532 strcat (buf, "*");
6533 if (negated)
6534 strcat (buf, "%S3");
6535 else
6536 strcat (buf, "%B3");
6537 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6538 }
6539 /* Handle short backwards branch with an unfilled delay slot.
6540 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6541 taken and untaken branches. */
6542 else if (dbr_sequence_length () == 0
6543 && ! forward_branch_p (insn)
6544 && INSN_ADDRESSES_SET_P ()
6545 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6546 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6547 {
6548 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6549 if (GET_MODE (operands[1]) == DImode)
6550 strcat (buf, "*");
6551 if (negated)
6552 strcat (buf, "%B3 %2,%r1,%0%#");
6553 else
6554 strcat (buf, "%S3 %2,%r1,%0%#");
6555 }
6556 else
6557 {
6558 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6559 if (GET_MODE (operands[1]) == DImode)
6560 strcat (buf, "*");
6561 if (negated)
6562 strcat (buf, "%S3");
6563 else
6564 strcat (buf, "%B3");
6565 if (nullify)
6566 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6567 else
6568 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6569 }
6570 break;
6571
6572 default:
6573 /* The reversed conditional branch must branch over one additional
6574 instruction if the delay slot is filled and needs to be extracted
6575 by pa_output_lbranch. If the delay slot is empty or this is a
6576 nullified forward branch, the instruction after the reversed
6577 condition branch must be nullified. */
6578 if (dbr_sequence_length () == 0
6579 || (nullify && forward_branch_p (insn)))
6580 {
6581 nullify = 1;
6582 xdelay = 0;
6583 operands[4] = GEN_INT (length);
6584 }
6585 else
6586 {
6587 xdelay = 1;
6588 operands[4] = GEN_INT (length + 4);
6589 }
6590
6591 /* Create a reversed conditional branch which branches around
6592 the following insns. */
6593 if (GET_MODE (operands[1]) != DImode)
6594 {
6595 if (nullify)
6596 {
6597 if (negated)
6598 strcpy (buf,
6599 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6600 else
6601 strcpy (buf,
6602 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6603 }
6604 else
6605 {
6606 if (negated)
6607 strcpy (buf,
6608 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6609 else
6610 strcpy (buf,
6611 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6612 }
6613 }
6614 else
6615 {
6616 if (nullify)
6617 {
6618 if (negated)
6619 strcpy (buf,
6620 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6621 else
6622 strcpy (buf,
6623 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6624 }
6625 else
6626 {
6627 if (negated)
6628 strcpy (buf,
6629 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6630 else
6631 strcpy (buf,
6632 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6633 }
6634 }
6635
6636 output_asm_insn (buf, operands);
6637 return pa_output_lbranch (operands[0], insn, xdelay);
6638 }
6639 return buf;
6640 }
6641
6642 /* This routine handles output of long unconditional branches that
6643 exceed the maximum range of a simple branch instruction. Since
6644 we don't have a register available for the branch, we save register
6645 %r1 in the frame marker, load the branch destination DEST into %r1,
6646 execute the branch, and restore %r1 in the delay slot of the branch.
6647
6648 Since long branches may have an insn in the delay slot and the
6649 delay slot is used to restore %r1, we in general need to extract
6650 this insn and execute it before the branch. However, to facilitate
6651 use of this function by conditional branches, we also provide an
6652 option to not extract the delay insn so that it will be emitted
6653 after the long branch. So, if there is an insn in the delay slot,
6654 it is extracted if XDELAY is nonzero.
6655
6656 The lengths of the various long-branch sequences are 20, 16 and 24
6657 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6658
6659 const char *
6660 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6661 {
6662 rtx xoperands[2];
6663
6664 xoperands[0] = dest;
6665
6666 /* First, free up the delay slot. */
6667 if (xdelay && dbr_sequence_length () != 0)
6668 {
6669 /* We can't handle a jump in the delay slot. */
6670 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6671
6672 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6673 optimize, 0, NULL);
6674
6675 /* Now delete the delay insn. */
6676 SET_INSN_DELETED (NEXT_INSN (insn));
6677 }
6678
6679 /* Output an insn to save %r1. The runtime documentation doesn't
6680 specify whether the "Clean Up" slot in the callers frame can
6681 be clobbered by the callee. It isn't copied by HP's builtin
6682 alloca, so this suggests that it can be clobbered if necessary.
6683 The "Static Link" location is copied by HP builtin alloca, so
6684 we avoid using it. Using the cleanup slot might be a problem
6685 if we have to interoperate with languages that pass cleanup
6686 information. However, it should be possible to handle these
6687 situations with GCC's asm feature.
6688
6689 The "Current RP" slot is reserved for the called procedure, so
6690 we try to use it when we don't have a frame of our own. It's
6691 rather unlikely that we won't have a frame when we need to emit
6692 a very long branch.
6693
6694 Really the way to go long term is a register scavenger; goto
6695 the target of the jump and find a register which we can use
6696 as a scratch to hold the value in %r1. Then, we wouldn't have
6697 to free up the delay slot or clobber a slot that may be needed
6698 for other purposes. */
6699 if (TARGET_64BIT)
6700 {
6701 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6702 /* Use the return pointer slot in the frame marker. */
6703 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6704 else
6705 /* Use the slot at -40 in the frame marker since HP builtin
6706 alloca doesn't copy it. */
6707 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6708 }
6709 else
6710 {
6711 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6712 /* Use the return pointer slot in the frame marker. */
6713 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6714 else
6715 /* Use the "Clean Up" slot in the frame marker. In GCC,
6716 the only other use of this location is for copying a
6717 floating point double argument from a floating-point
6718 register to two general registers. The copy is done
6719 as an "atomic" operation when outputting a call, so it
6720 won't interfere with our using the location here. */
6721 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6722 }
6723
6724 if (TARGET_PORTABLE_RUNTIME)
6725 {
6726 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6727 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6728 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6729 }
6730 else if (flag_pic)
6731 {
6732 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6733 if (TARGET_SOM || !TARGET_GAS)
6734 {
6735 xoperands[1] = gen_label_rtx ();
6736 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6737 targetm.asm_out.internal_label (asm_out_file, "L",
6738 CODE_LABEL_NUMBER (xoperands[1]));
6739 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6740 }
6741 else
6742 {
6743 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6744 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6745 }
6746 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6747 }
6748 else
6749 /* Now output a very long branch to the original target. */
6750 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6751
6752 /* Now restore the value of %r1 in the delay slot. */
6753 if (TARGET_64BIT)
6754 {
6755 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6756 return "ldd -16(%%r30),%%r1";
6757 else
6758 return "ldd -40(%%r30),%%r1";
6759 }
6760 else
6761 {
6762 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6763 return "ldw -20(%%r30),%%r1";
6764 else
6765 return "ldw -12(%%r30),%%r1";
6766 }
6767 }
6768
6769 /* This routine handles all the branch-on-bit conditional branch sequences we
6770 might need to generate. It handles nullification of delay slots,
6771 varying length branches, negated branches and all combinations of the
6772 above. it returns the appropriate output template to emit the branch. */
6773
6774 const char *
6775 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6776 {
6777 static char buf[100];
6778 bool useskip;
6779 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6780 int length = get_attr_length (insn);
6781 int xdelay;
6782
6783 /* A conditional branch to the following instruction (e.g. the delay slot) is
6784 asking for a disaster. I do not think this can happen as this pattern
6785 is only used when optimizing; jump optimization should eliminate the
6786 jump. But be prepared just in case. */
6787
6788 if (branch_to_delay_slot_p (insn))
6789 return "nop";
6790
6791 /* If this is a long branch with its delay slot unfilled, set `nullify'
6792 as it can nullify the delay slot and save a nop. */
6793 if (length == 8 && dbr_sequence_length () == 0)
6794 nullify = 1;
6795
6796 /* If this is a short forward conditional branch which did not get
6797 its delay slot filled, the delay slot can still be nullified. */
6798 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6799 nullify = forward_branch_p (insn);
6800
6801 /* A forward branch over a single nullified insn can be done with a
6802 extrs instruction. This avoids a single cycle penalty due to
6803 mis-predicted branch if we fall through (branch not taken). */
6804 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6805
6806 switch (length)
6807 {
6808
6809 /* All short conditional branches except backwards with an unfilled
6810 delay slot. */
6811 case 4:
6812 if (useskip)
6813 strcpy (buf, "{extrs,|extrw,s,}");
6814 else
6815 strcpy (buf, "bb,");
6816 if (useskip && GET_MODE (operands[0]) == DImode)
6817 strcpy (buf, "extrd,s,*");
6818 else if (GET_MODE (operands[0]) == DImode)
6819 strcpy (buf, "bb,*");
6820 if ((which == 0 && negated)
6821 || (which == 1 && ! negated))
6822 strcat (buf, ">=");
6823 else
6824 strcat (buf, "<");
6825 if (useskip)
6826 strcat (buf, " %0,%1,1,%%r0");
6827 else if (nullify && negated)
6828 {
6829 if (branch_needs_nop_p (insn))
6830 strcat (buf, ",n %0,%1,%3%#");
6831 else
6832 strcat (buf, ",n %0,%1,%3");
6833 }
6834 else if (nullify && ! negated)
6835 {
6836 if (branch_needs_nop_p (insn))
6837 strcat (buf, ",n %0,%1,%2%#");
6838 else
6839 strcat (buf, ",n %0,%1,%2");
6840 }
6841 else if (! nullify && negated)
6842 strcat (buf, " %0,%1,%3");
6843 else if (! nullify && ! negated)
6844 strcat (buf, " %0,%1,%2");
6845 break;
6846
6847 /* All long conditionals. Note a short backward branch with an
6848 unfilled delay slot is treated just like a long backward branch
6849 with an unfilled delay slot. */
6850 case 8:
6851 /* Handle weird backwards branch with a filled delay slot
6852 which is nullified. */
6853 if (dbr_sequence_length () != 0
6854 && ! forward_branch_p (insn)
6855 && nullify)
6856 {
6857 strcpy (buf, "bb,");
6858 if (GET_MODE (operands[0]) == DImode)
6859 strcat (buf, "*");
6860 if ((which == 0 && negated)
6861 || (which == 1 && ! negated))
6862 strcat (buf, "<");
6863 else
6864 strcat (buf, ">=");
6865 if (negated)
6866 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6867 else
6868 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6869 }
6870 /* Handle short backwards branch with an unfilled delay slot.
6871 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6872 taken and untaken branches. */
6873 else if (dbr_sequence_length () == 0
6874 && ! forward_branch_p (insn)
6875 && INSN_ADDRESSES_SET_P ()
6876 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6877 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6878 {
6879 strcpy (buf, "bb,");
6880 if (GET_MODE (operands[0]) == DImode)
6881 strcat (buf, "*");
6882 if ((which == 0 && negated)
6883 || (which == 1 && ! negated))
6884 strcat (buf, ">=");
6885 else
6886 strcat (buf, "<");
6887 if (negated)
6888 strcat (buf, " %0,%1,%3%#");
6889 else
6890 strcat (buf, " %0,%1,%2%#");
6891 }
6892 else
6893 {
6894 if (GET_MODE (operands[0]) == DImode)
6895 strcpy (buf, "extrd,s,*");
6896 else
6897 strcpy (buf, "{extrs,|extrw,s,}");
6898 if ((which == 0 && negated)
6899 || (which == 1 && ! negated))
6900 strcat (buf, "<");
6901 else
6902 strcat (buf, ">=");
6903 if (nullify && negated)
6904 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6905 else if (nullify && ! negated)
6906 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6907 else if (negated)
6908 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6909 else
6910 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6911 }
6912 break;
6913
6914 default:
6915 /* The reversed conditional branch must branch over one additional
6916 instruction if the delay slot is filled and needs to be extracted
6917 by pa_output_lbranch. If the delay slot is empty or this is a
6918 nullified forward branch, the instruction after the reversed
6919 condition branch must be nullified. */
6920 if (dbr_sequence_length () == 0
6921 || (nullify && forward_branch_p (insn)))
6922 {
6923 nullify = 1;
6924 xdelay = 0;
6925 operands[4] = GEN_INT (length);
6926 }
6927 else
6928 {
6929 xdelay = 1;
6930 operands[4] = GEN_INT (length + 4);
6931 }
6932
6933 if (GET_MODE (operands[0]) == DImode)
6934 strcpy (buf, "bb,*");
6935 else
6936 strcpy (buf, "bb,");
6937 if ((which == 0 && negated)
6938 || (which == 1 && !negated))
6939 strcat (buf, "<");
6940 else
6941 strcat (buf, ">=");
6942 if (nullify)
6943 strcat (buf, ",n %0,%1,.+%4");
6944 else
6945 strcat (buf, " %0,%1,.+%4");
6946 output_asm_insn (buf, operands);
6947 return pa_output_lbranch (negated ? operands[3] : operands[2],
6948 insn, xdelay);
6949 }
6950 return buf;
6951 }
6952
6953 /* This routine handles all the branch-on-variable-bit conditional branch
6954 sequences we might need to generate. It handles nullification of delay
6955 slots, varying length branches, negated branches and all combinations
6956 of the above. it returns the appropriate output template to emit the
6957 branch. */
6958
6959 const char *
6960 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
6961 int which)
6962 {
6963 static char buf[100];
6964 bool useskip;
6965 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6966 int length = get_attr_length (insn);
6967 int xdelay;
6968
6969 /* A conditional branch to the following instruction (e.g. the delay slot) is
6970 asking for a disaster. I do not think this can happen as this pattern
6971 is only used when optimizing; jump optimization should eliminate the
6972 jump. But be prepared just in case. */
6973
6974 if (branch_to_delay_slot_p (insn))
6975 return "nop";
6976
6977 /* If this is a long branch with its delay slot unfilled, set `nullify'
6978 as it can nullify the delay slot and save a nop. */
6979 if (length == 8 && dbr_sequence_length () == 0)
6980 nullify = 1;
6981
6982 /* If this is a short forward conditional branch which did not get
6983 its delay slot filled, the delay slot can still be nullified. */
6984 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6985 nullify = forward_branch_p (insn);
6986
6987 /* A forward branch over a single nullified insn can be done with a
6988 extrs instruction. This avoids a single cycle penalty due to
6989 mis-predicted branch if we fall through (branch not taken). */
6990 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6991
6992 switch (length)
6993 {
6994
6995 /* All short conditional branches except backwards with an unfilled
6996 delay slot. */
6997 case 4:
6998 if (useskip)
6999 strcpy (buf, "{vextrs,|extrw,s,}");
7000 else
7001 strcpy (buf, "{bvb,|bb,}");
7002 if (useskip && GET_MODE (operands[0]) == DImode)
7003 strcpy (buf, "extrd,s,*");
7004 else if (GET_MODE (operands[0]) == DImode)
7005 strcpy (buf, "bb,*");
7006 if ((which == 0 && negated)
7007 || (which == 1 && ! negated))
7008 strcat (buf, ">=");
7009 else
7010 strcat (buf, "<");
7011 if (useskip)
7012 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
7013 else if (nullify && negated)
7014 {
7015 if (branch_needs_nop_p (insn))
7016 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7017 else
7018 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7019 }
7020 else if (nullify && ! negated)
7021 {
7022 if (branch_needs_nop_p (insn))
7023 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7024 else
7025 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7026 }
7027 else if (! nullify && negated)
7028 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7029 else if (! nullify && ! negated)
7030 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7031 break;
7032
7033 /* All long conditionals. Note a short backward branch with an
7034 unfilled delay slot is treated just like a long backward branch
7035 with an unfilled delay slot. */
7036 case 8:
7037 /* Handle weird backwards branch with a filled delay slot
7038 which is nullified. */
7039 if (dbr_sequence_length () != 0
7040 && ! forward_branch_p (insn)
7041 && nullify)
7042 {
7043 strcpy (buf, "{bvb,|bb,}");
7044 if (GET_MODE (operands[0]) == DImode)
7045 strcat (buf, "*");
7046 if ((which == 0 && negated)
7047 || (which == 1 && ! negated))
7048 strcat (buf, "<");
7049 else
7050 strcat (buf, ">=");
7051 if (negated)
7052 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7053 else
7054 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7055 }
7056 /* Handle short backwards branch with an unfilled delay slot.
7057 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7058 taken and untaken branches. */
7059 else if (dbr_sequence_length () == 0
7060 && ! forward_branch_p (insn)
7061 && INSN_ADDRESSES_SET_P ()
7062 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7063 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7064 {
7065 strcpy (buf, "{bvb,|bb,}");
7066 if (GET_MODE (operands[0]) == DImode)
7067 strcat (buf, "*");
7068 if ((which == 0 && negated)
7069 || (which == 1 && ! negated))
7070 strcat (buf, ">=");
7071 else
7072 strcat (buf, "<");
7073 if (negated)
7074 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7075 else
7076 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7077 }
7078 else
7079 {
7080 strcpy (buf, "{vextrs,|extrw,s,}");
7081 if (GET_MODE (operands[0]) == DImode)
7082 strcpy (buf, "extrd,s,*");
7083 if ((which == 0 && negated)
7084 || (which == 1 && ! negated))
7085 strcat (buf, "<");
7086 else
7087 strcat (buf, ">=");
7088 if (nullify && negated)
7089 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7090 else if (nullify && ! negated)
7091 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7092 else if (negated)
7093 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7094 else
7095 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7096 }
7097 break;
7098
7099 default:
7100 /* The reversed conditional branch must branch over one additional
7101 instruction if the delay slot is filled and needs to be extracted
7102 by pa_output_lbranch. If the delay slot is empty or this is a
7103 nullified forward branch, the instruction after the reversed
7104 condition branch must be nullified. */
7105 if (dbr_sequence_length () == 0
7106 || (nullify && forward_branch_p (insn)))
7107 {
7108 nullify = 1;
7109 xdelay = 0;
7110 operands[4] = GEN_INT (length);
7111 }
7112 else
7113 {
7114 xdelay = 1;
7115 operands[4] = GEN_INT (length + 4);
7116 }
7117
7118 if (GET_MODE (operands[0]) == DImode)
7119 strcpy (buf, "bb,*");
7120 else
7121 strcpy (buf, "{bvb,|bb,}");
7122 if ((which == 0 && negated)
7123 || (which == 1 && !negated))
7124 strcat (buf, "<");
7125 else
7126 strcat (buf, ">=");
7127 if (nullify)
7128 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7129 else
7130 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7131 output_asm_insn (buf, operands);
7132 return pa_output_lbranch (negated ? operands[3] : operands[2],
7133 insn, xdelay);
7134 }
7135 return buf;
7136 }
7137
7138 /* Return the output template for emitting a dbra type insn.
7139
7140 Note it may perform some output operations on its own before
7141 returning the final output string. */
7142 const char *
7143 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7144 {
7145 int length = get_attr_length (insn);
7146
7147 /* A conditional branch to the following instruction (e.g. the delay slot) is
7148 asking for a disaster. Be prepared! */
7149
7150 if (branch_to_delay_slot_p (insn))
7151 {
7152 if (which_alternative == 0)
7153 return "ldo %1(%0),%0";
7154 else if (which_alternative == 1)
7155 {
7156 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7157 output_asm_insn ("ldw -16(%%r30),%4", operands);
7158 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7159 return "{fldws|fldw} -16(%%r30),%0";
7160 }
7161 else
7162 {
7163 output_asm_insn ("ldw %0,%4", operands);
7164 return "ldo %1(%4),%4\n\tstw %4,%0";
7165 }
7166 }
7167
7168 if (which_alternative == 0)
7169 {
7170 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7171 int xdelay;
7172
7173 /* If this is a long branch with its delay slot unfilled, set `nullify'
7174 as it can nullify the delay slot and save a nop. */
7175 if (length == 8 && dbr_sequence_length () == 0)
7176 nullify = 1;
7177
7178 /* If this is a short forward conditional branch which did not get
7179 its delay slot filled, the delay slot can still be nullified. */
7180 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7181 nullify = forward_branch_p (insn);
7182
7183 switch (length)
7184 {
7185 case 4:
7186 if (nullify)
7187 {
7188 if (branch_needs_nop_p (insn))
7189 return "addib,%C2,n %1,%0,%3%#";
7190 else
7191 return "addib,%C2,n %1,%0,%3";
7192 }
7193 else
7194 return "addib,%C2 %1,%0,%3";
7195
7196 case 8:
7197 /* Handle weird backwards branch with a fulled delay slot
7198 which is nullified. */
7199 if (dbr_sequence_length () != 0
7200 && ! forward_branch_p (insn)
7201 && nullify)
7202 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7203 /* Handle short backwards branch with an unfilled delay slot.
7204 Using a addb;nop rather than addi;bl saves 1 cycle for both
7205 taken and untaken branches. */
7206 else if (dbr_sequence_length () == 0
7207 && ! forward_branch_p (insn)
7208 && INSN_ADDRESSES_SET_P ()
7209 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7210 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7211 return "addib,%C2 %1,%0,%3%#";
7212
7213 /* Handle normal cases. */
7214 if (nullify)
7215 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7216 else
7217 return "addi,%N2 %1,%0,%0\n\tb %3";
7218
7219 default:
7220 /* The reversed conditional branch must branch over one additional
7221 instruction if the delay slot is filled and needs to be extracted
7222 by pa_output_lbranch. If the delay slot is empty or this is a
7223 nullified forward branch, the instruction after the reversed
7224 condition branch must be nullified. */
7225 if (dbr_sequence_length () == 0
7226 || (nullify && forward_branch_p (insn)))
7227 {
7228 nullify = 1;
7229 xdelay = 0;
7230 operands[4] = GEN_INT (length);
7231 }
7232 else
7233 {
7234 xdelay = 1;
7235 operands[4] = GEN_INT (length + 4);
7236 }
7237
7238 if (nullify)
7239 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7240 else
7241 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7242
7243 return pa_output_lbranch (operands[3], insn, xdelay);
7244 }
7245
7246 }
7247 /* Deal with gross reload from FP register case. */
7248 else if (which_alternative == 1)
7249 {
7250 /* Move loop counter from FP register to MEM then into a GR,
7251 increment the GR, store the GR into MEM, and finally reload
7252 the FP register from MEM from within the branch's delay slot. */
7253 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7254 operands);
7255 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7256 if (length == 24)
7257 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7258 else if (length == 28)
7259 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7260 else
7261 {
7262 operands[5] = GEN_INT (length - 16);
7263 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7264 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7265 return pa_output_lbranch (operands[3], insn, 0);
7266 }
7267 }
7268 /* Deal with gross reload from memory case. */
7269 else
7270 {
7271 /* Reload loop counter from memory, the store back to memory
7272 happens in the branch's delay slot. */
7273 output_asm_insn ("ldw %0,%4", operands);
7274 if (length == 12)
7275 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7276 else if (length == 16)
7277 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7278 else
7279 {
7280 operands[5] = GEN_INT (length - 4);
7281 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7282 return pa_output_lbranch (operands[3], insn, 0);
7283 }
7284 }
7285 }
7286
7287 /* Return the output template for emitting a movb type insn.
7288
7289 Note it may perform some output operations on its own before
7290 returning the final output string. */
7291 const char *
7292 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7293 int reverse_comparison)
7294 {
7295 int length = get_attr_length (insn);
7296
7297 /* A conditional branch to the following instruction (e.g. the delay slot) is
7298 asking for a disaster. Be prepared! */
7299
7300 if (branch_to_delay_slot_p (insn))
7301 {
7302 if (which_alternative == 0)
7303 return "copy %1,%0";
7304 else if (which_alternative == 1)
7305 {
7306 output_asm_insn ("stw %1,-16(%%r30)", operands);
7307 return "{fldws|fldw} -16(%%r30),%0";
7308 }
7309 else if (which_alternative == 2)
7310 return "stw %1,%0";
7311 else
7312 return "mtsar %r1";
7313 }
7314
7315 /* Support the second variant. */
7316 if (reverse_comparison)
7317 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7318
7319 if (which_alternative == 0)
7320 {
7321 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7322 int xdelay;
7323
7324 /* If this is a long branch with its delay slot unfilled, set `nullify'
7325 as it can nullify the delay slot and save a nop. */
7326 if (length == 8 && dbr_sequence_length () == 0)
7327 nullify = 1;
7328
7329 /* If this is a short forward conditional branch which did not get
7330 its delay slot filled, the delay slot can still be nullified. */
7331 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7332 nullify = forward_branch_p (insn);
7333
7334 switch (length)
7335 {
7336 case 4:
7337 if (nullify)
7338 {
7339 if (branch_needs_nop_p (insn))
7340 return "movb,%C2,n %1,%0,%3%#";
7341 else
7342 return "movb,%C2,n %1,%0,%3";
7343 }
7344 else
7345 return "movb,%C2 %1,%0,%3";
7346
7347 case 8:
7348 /* Handle weird backwards branch with a filled delay slot
7349 which is nullified. */
7350 if (dbr_sequence_length () != 0
7351 && ! forward_branch_p (insn)
7352 && nullify)
7353 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7354
7355 /* Handle short backwards branch with an unfilled delay slot.
7356 Using a movb;nop rather than or;bl saves 1 cycle for both
7357 taken and untaken branches. */
7358 else if (dbr_sequence_length () == 0
7359 && ! forward_branch_p (insn)
7360 && INSN_ADDRESSES_SET_P ()
7361 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7362 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7363 return "movb,%C2 %1,%0,%3%#";
7364 /* Handle normal cases. */
7365 if (nullify)
7366 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7367 else
7368 return "or,%N2 %1,%%r0,%0\n\tb %3";
7369
7370 default:
7371 /* The reversed conditional branch must branch over one additional
7372 instruction if the delay slot is filled and needs to be extracted
7373 by pa_output_lbranch. If the delay slot is empty or this is a
7374 nullified forward branch, the instruction after the reversed
7375 condition branch must be nullified. */
7376 if (dbr_sequence_length () == 0
7377 || (nullify && forward_branch_p (insn)))
7378 {
7379 nullify = 1;
7380 xdelay = 0;
7381 operands[4] = GEN_INT (length);
7382 }
7383 else
7384 {
7385 xdelay = 1;
7386 operands[4] = GEN_INT (length + 4);
7387 }
7388
7389 if (nullify)
7390 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7391 else
7392 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7393
7394 return pa_output_lbranch (operands[3], insn, xdelay);
7395 }
7396 }
7397 /* Deal with gross reload for FP destination register case. */
7398 else if (which_alternative == 1)
7399 {
7400 /* Move source register to MEM, perform the branch test, then
7401 finally load the FP register from MEM from within the branch's
7402 delay slot. */
7403 output_asm_insn ("stw %1,-16(%%r30)", operands);
7404 if (length == 12)
7405 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7406 else if (length == 16)
7407 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7408 else
7409 {
7410 operands[4] = GEN_INT (length - 4);
7411 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7412 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7413 return pa_output_lbranch (operands[3], insn, 0);
7414 }
7415 }
7416 /* Deal with gross reload from memory case. */
7417 else if (which_alternative == 2)
7418 {
7419 /* Reload loop counter from memory, the store back to memory
7420 happens in the branch's delay slot. */
7421 if (length == 8)
7422 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7423 else if (length == 12)
7424 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7425 else
7426 {
7427 operands[4] = GEN_INT (length);
7428 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7429 operands);
7430 return pa_output_lbranch (operands[3], insn, 0);
7431 }
7432 }
7433 /* Handle SAR as a destination. */
7434 else
7435 {
7436 if (length == 8)
7437 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7438 else if (length == 12)
7439 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7440 else
7441 {
7442 operands[4] = GEN_INT (length);
7443 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7444 operands);
7445 return pa_output_lbranch (operands[3], insn, 0);
7446 }
7447 }
7448 }
7449
7450 /* Copy any FP arguments in INSN into integer registers. */
7451 static void
7452 copy_fp_args (rtx_insn *insn)
7453 {
7454 rtx link;
7455 rtx xoperands[2];
7456
7457 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7458 {
7459 int arg_mode, regno;
7460 rtx use = XEXP (link, 0);
7461
7462 if (! (GET_CODE (use) == USE
7463 && GET_CODE (XEXP (use, 0)) == REG
7464 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7465 continue;
7466
7467 arg_mode = GET_MODE (XEXP (use, 0));
7468 regno = REGNO (XEXP (use, 0));
7469
7470 /* Is it a floating point register? */
7471 if (regno >= 32 && regno <= 39)
7472 {
7473 /* Copy the FP register into an integer register via memory. */
7474 if (arg_mode == SFmode)
7475 {
7476 xoperands[0] = XEXP (use, 0);
7477 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7478 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7479 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7480 }
7481 else
7482 {
7483 xoperands[0] = XEXP (use, 0);
7484 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7485 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7486 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7487 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7488 }
7489 }
7490 }
7491 }
7492
7493 /* Compute length of the FP argument copy sequence for INSN. */
7494 static int
7495 length_fp_args (rtx_insn *insn)
7496 {
7497 int length = 0;
7498 rtx link;
7499
7500 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7501 {
7502 int arg_mode, regno;
7503 rtx use = XEXP (link, 0);
7504
7505 if (! (GET_CODE (use) == USE
7506 && GET_CODE (XEXP (use, 0)) == REG
7507 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7508 continue;
7509
7510 arg_mode = GET_MODE (XEXP (use, 0));
7511 regno = REGNO (XEXP (use, 0));
7512
7513 /* Is it a floating point register? */
7514 if (regno >= 32 && regno <= 39)
7515 {
7516 if (arg_mode == SFmode)
7517 length += 8;
7518 else
7519 length += 12;
7520 }
7521 }
7522
7523 return length;
7524 }
7525
7526 /* Return the attribute length for the millicode call instruction INSN.
7527 The length must match the code generated by pa_output_millicode_call.
7528 We include the delay slot in the returned length as it is better to
7529 over estimate the length than to under estimate it. */
7530
7531 int
7532 pa_attr_length_millicode_call (rtx_insn *insn)
7533 {
7534 unsigned long distance = -1;
7535 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7536
7537 if (INSN_ADDRESSES_SET_P ())
7538 {
7539 distance = (total + insn_current_reference_address (insn));
7540 if (distance < total)
7541 distance = -1;
7542 }
7543
7544 if (TARGET_64BIT)
7545 {
7546 if (!TARGET_LONG_CALLS && distance < 7600000)
7547 return 8;
7548
7549 return 20;
7550 }
7551 else if (TARGET_PORTABLE_RUNTIME)
7552 return 24;
7553 else
7554 {
7555 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7556 return 8;
7557
7558 if (!flag_pic)
7559 return 12;
7560
7561 return 24;
7562 }
7563 }
7564
7565 /* INSN is a function call.
7566
7567 CALL_DEST is the routine we are calling. */
7568
7569 const char *
7570 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7571 {
7572 int attr_length = get_attr_length (insn);
7573 int seq_length = dbr_sequence_length ();
7574 rtx xoperands[3];
7575
7576 xoperands[0] = call_dest;
7577 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7578
7579 /* Handle the common case where we are sure that the branch will
7580 reach the beginning of the $CODE$ subspace. The within reach
7581 form of the $$sh_func_adrs call has a length of 28. Because it
7582 has an attribute type of sh_func_adrs, it never has a nonzero
7583 sequence length (i.e., the delay slot is never filled). */
7584 if (!TARGET_LONG_CALLS
7585 && (attr_length == 8
7586 || (attr_length == 28
7587 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7588 {
7589 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7590 }
7591 else
7592 {
7593 if (TARGET_64BIT)
7594 {
7595 /* It might seem that one insn could be saved by accessing
7596 the millicode function using the linkage table. However,
7597 this doesn't work in shared libraries and other dynamically
7598 loaded objects. Using a pc-relative sequence also avoids
7599 problems related to the implicit use of the gp register. */
7600 output_asm_insn ("b,l .+8,%%r1", xoperands);
7601
7602 if (TARGET_GAS)
7603 {
7604 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7605 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7606 }
7607 else
7608 {
7609 xoperands[1] = gen_label_rtx ();
7610 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7611 targetm.asm_out.internal_label (asm_out_file, "L",
7612 CODE_LABEL_NUMBER (xoperands[1]));
7613 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7614 }
7615
7616 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7617 }
7618 else if (TARGET_PORTABLE_RUNTIME)
7619 {
7620 /* Pure portable runtime doesn't allow be/ble; we also don't
7621 have PIC support in the assembler/linker, so this sequence
7622 is needed. */
7623
7624 /* Get the address of our target into %r1. */
7625 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7626 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7627
7628 /* Get our return address into %r31. */
7629 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7630 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7631
7632 /* Jump to our target address in %r1. */
7633 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7634 }
7635 else if (!flag_pic)
7636 {
7637 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7638 if (TARGET_PA_20)
7639 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7640 else
7641 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7642 }
7643 else
7644 {
7645 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7646 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7647
7648 if (TARGET_SOM || !TARGET_GAS)
7649 {
7650 /* The HP assembler can generate relocations for the
7651 difference of two symbols. GAS can do this for a
7652 millicode symbol but not an arbitrary external
7653 symbol when generating SOM output. */
7654 xoperands[1] = gen_label_rtx ();
7655 targetm.asm_out.internal_label (asm_out_file, "L",
7656 CODE_LABEL_NUMBER (xoperands[1]));
7657 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7658 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7659 }
7660 else
7661 {
7662 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7663 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7664 xoperands);
7665 }
7666
7667 /* Jump to our target address in %r1. */
7668 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7669 }
7670 }
7671
7672 if (seq_length == 0)
7673 output_asm_insn ("nop", xoperands);
7674
7675 return "";
7676 }
7677
7678 /* Return the attribute length of the call instruction INSN. The SIBCALL
7679 flag indicates whether INSN is a regular call or a sibling call. The
7680 length returned must be longer than the code actually generated by
7681 pa_output_call. Since branch shortening is done before delay branch
7682 sequencing, there is no way to determine whether or not the delay
7683 slot will be filled during branch shortening. Even when the delay
7684 slot is filled, we may have to add a nop if the delay slot contains
7685 a branch that can't reach its target. Thus, we always have to include
7686 the delay slot in the length estimate. This used to be done in
7687 pa_adjust_insn_length but we do it here now as some sequences always
7688 fill the delay slot and we can save four bytes in the estimate for
7689 these sequences. */
7690
7691 int
7692 pa_attr_length_call (rtx_insn *insn, int sibcall)
7693 {
7694 int local_call;
7695 rtx call, call_dest;
7696 tree call_decl;
7697 int length = 0;
7698 rtx pat = PATTERN (insn);
7699 unsigned long distance = -1;
7700
7701 gcc_assert (CALL_P (insn));
7702
7703 if (INSN_ADDRESSES_SET_P ())
7704 {
7705 unsigned long total;
7706
7707 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7708 distance = (total + insn_current_reference_address (insn));
7709 if (distance < total)
7710 distance = -1;
7711 }
7712
7713 gcc_assert (GET_CODE (pat) == PARALLEL);
7714
7715 /* Get the call rtx. */
7716 call = XVECEXP (pat, 0, 0);
7717 if (GET_CODE (call) == SET)
7718 call = SET_SRC (call);
7719
7720 gcc_assert (GET_CODE (call) == CALL);
7721
7722 /* Determine if this is a local call. */
7723 call_dest = XEXP (XEXP (call, 0), 0);
7724 call_decl = SYMBOL_REF_DECL (call_dest);
7725 local_call = call_decl && targetm.binds_local_p (call_decl);
7726
7727 /* pc-relative branch. */
7728 if (!TARGET_LONG_CALLS
7729 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7730 || distance < MAX_PCREL17F_OFFSET))
7731 length += 8;
7732
7733 /* 64-bit plabel sequence. */
7734 else if (TARGET_64BIT && !local_call)
7735 length += sibcall ? 28 : 24;
7736
7737 /* non-pic long absolute branch sequence. */
7738 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7739 length += 12;
7740
7741 /* long pc-relative branch sequence. */
7742 else if (TARGET_LONG_PIC_SDIFF_CALL
7743 || (TARGET_GAS && !TARGET_SOM
7744 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7745 {
7746 length += 20;
7747
7748 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7749 length += 8;
7750 }
7751
7752 /* 32-bit plabel sequence. */
7753 else
7754 {
7755 length += 32;
7756
7757 if (TARGET_SOM)
7758 length += length_fp_args (insn);
7759
7760 if (flag_pic)
7761 length += 4;
7762
7763 if (!TARGET_PA_20)
7764 {
7765 if (!sibcall)
7766 length += 8;
7767
7768 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7769 length += 8;
7770 }
7771 }
7772
7773 return length;
7774 }
7775
7776 /* INSN is a function call.
7777
7778 CALL_DEST is the routine we are calling. */
7779
7780 const char *
7781 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7782 {
7783 int seq_length = dbr_sequence_length ();
7784 tree call_decl = SYMBOL_REF_DECL (call_dest);
7785 int local_call = call_decl && targetm.binds_local_p (call_decl);
7786 rtx xoperands[2];
7787
7788 xoperands[0] = call_dest;
7789
7790 /* Handle the common case where we're sure that the branch will reach
7791 the beginning of the "$CODE$" subspace. This is the beginning of
7792 the current function if we are in a named section. */
7793 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7794 {
7795 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7796 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7797 }
7798 else
7799 {
7800 if (TARGET_64BIT && !local_call)
7801 {
7802 /* ??? As far as I can tell, the HP linker doesn't support the
7803 long pc-relative sequence described in the 64-bit runtime
7804 architecture. So, we use a slightly longer indirect call. */
7805 xoperands[0] = pa_get_deferred_plabel (call_dest);
7806 xoperands[1] = gen_label_rtx ();
7807
7808 /* If this isn't a sibcall, we put the load of %r27 into the
7809 delay slot. We can't do this in a sibcall as we don't
7810 have a second call-clobbered scratch register available.
7811 We don't need to do anything when generating fast indirect
7812 calls. */
7813 if (seq_length != 0 && !sibcall)
7814 {
7815 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7816 optimize, 0, NULL);
7817
7818 /* Now delete the delay insn. */
7819 SET_INSN_DELETED (NEXT_INSN (insn));
7820 seq_length = 0;
7821 }
7822
7823 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7824 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7825 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7826
7827 if (sibcall)
7828 {
7829 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7830 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7831 output_asm_insn ("bve (%%r1)", xoperands);
7832 }
7833 else
7834 {
7835 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7836 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7837 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7838 seq_length = 1;
7839 }
7840 }
7841 else
7842 {
7843 int indirect_call = 0;
7844
7845 /* Emit a long call. There are several different sequences
7846 of increasing length and complexity. In most cases,
7847 they don't allow an instruction in the delay slot. */
7848 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7849 && !TARGET_LONG_PIC_SDIFF_CALL
7850 && !(TARGET_GAS && !TARGET_SOM
7851 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7852 && !TARGET_64BIT)
7853 indirect_call = 1;
7854
7855 if (seq_length != 0
7856 && !sibcall
7857 && (!TARGET_PA_20
7858 || indirect_call
7859 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7860 {
7861 /* A non-jump insn in the delay slot. By definition we can
7862 emit this insn before the call (and in fact before argument
7863 relocating. */
7864 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7865 NULL);
7866
7867 /* Now delete the delay insn. */
7868 SET_INSN_DELETED (NEXT_INSN (insn));
7869 seq_length = 0;
7870 }
7871
7872 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7873 {
7874 /* This is the best sequence for making long calls in
7875 non-pic code. Unfortunately, GNU ld doesn't provide
7876 the stub needed for external calls, and GAS's support
7877 for this with the SOM linker is buggy. It is safe
7878 to use this for local calls. */
7879 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7880 if (sibcall)
7881 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7882 else
7883 {
7884 if (TARGET_PA_20)
7885 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7886 xoperands);
7887 else
7888 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7889
7890 output_asm_insn ("copy %%r31,%%r2", xoperands);
7891 seq_length = 1;
7892 }
7893 }
7894 else
7895 {
7896 if (TARGET_LONG_PIC_SDIFF_CALL)
7897 {
7898 /* The HP assembler and linker can handle relocations
7899 for the difference of two symbols. The HP assembler
7900 recognizes the sequence as a pc-relative call and
7901 the linker provides stubs when needed. */
7902 xoperands[1] = gen_label_rtx ();
7903 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7904 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7905 targetm.asm_out.internal_label (asm_out_file, "L",
7906 CODE_LABEL_NUMBER (xoperands[1]));
7907 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7908 }
7909 else if (TARGET_GAS && !TARGET_SOM
7910 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7911 {
7912 /* GAS currently can't generate the relocations that
7913 are needed for the SOM linker under HP-UX using this
7914 sequence. The GNU linker doesn't generate the stubs
7915 that are needed for external calls on TARGET_ELF32
7916 with this sequence. For now, we have to use a
7917 longer plabel sequence when using GAS. */
7918 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7919 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7920 xoperands);
7921 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7922 xoperands);
7923 }
7924 else
7925 {
7926 /* Emit a long plabel-based call sequence. This is
7927 essentially an inline implementation of $$dyncall.
7928 We don't actually try to call $$dyncall as this is
7929 as difficult as calling the function itself. */
7930 xoperands[0] = pa_get_deferred_plabel (call_dest);
7931 xoperands[1] = gen_label_rtx ();
7932
7933 /* Since the call is indirect, FP arguments in registers
7934 need to be copied to the general registers. Then, the
7935 argument relocation stub will copy them back. */
7936 if (TARGET_SOM)
7937 copy_fp_args (insn);
7938
7939 if (flag_pic)
7940 {
7941 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7942 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7943 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7944 }
7945 else
7946 {
7947 output_asm_insn ("addil LR'%0-$global$,%%r27",
7948 xoperands);
7949 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7950 xoperands);
7951 }
7952
7953 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7954 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7955 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7956 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7957
7958 if (!sibcall && !TARGET_PA_20)
7959 {
7960 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7961 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7962 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7963 else
7964 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7965 }
7966 }
7967
7968 if (TARGET_PA_20)
7969 {
7970 if (sibcall)
7971 output_asm_insn ("bve (%%r1)", xoperands);
7972 else
7973 {
7974 if (indirect_call)
7975 {
7976 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7977 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7978 seq_length = 1;
7979 }
7980 else
7981 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7982 }
7983 }
7984 else
7985 {
7986 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7987 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7988 xoperands);
7989
7990 if (sibcall)
7991 {
7992 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7993 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7994 else
7995 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7996 }
7997 else
7998 {
7999 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8000 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8001 else
8002 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8003
8004 if (indirect_call)
8005 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8006 else
8007 output_asm_insn ("copy %%r31,%%r2", xoperands);
8008 seq_length = 1;
8009 }
8010 }
8011 }
8012 }
8013 }
8014
8015 if (seq_length == 0)
8016 output_asm_insn ("nop", xoperands);
8017
8018 return "";
8019 }
8020
8021 /* Return the attribute length of the indirect call instruction INSN.
8022 The length must match the code generated by output_indirect call.
8023 The returned length includes the delay slot. Currently, the delay
8024 slot of an indirect call sequence is not exposed and it is used by
8025 the sequence itself. */
8026
8027 int
8028 pa_attr_length_indirect_call (rtx_insn *insn)
8029 {
8030 unsigned long distance = -1;
8031 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8032
8033 if (INSN_ADDRESSES_SET_P ())
8034 {
8035 distance = (total + insn_current_reference_address (insn));
8036 if (distance < total)
8037 distance = -1;
8038 }
8039
8040 if (TARGET_64BIT)
8041 return 12;
8042
8043 if (TARGET_FAST_INDIRECT_CALLS
8044 || (!TARGET_LONG_CALLS
8045 && !TARGET_PORTABLE_RUNTIME
8046 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8047 || distance < MAX_PCREL17F_OFFSET)))
8048 return 8;
8049
8050 if (flag_pic)
8051 return 20;
8052
8053 if (TARGET_PORTABLE_RUNTIME)
8054 return 16;
8055
8056 /* Out of reach, can use ble. */
8057 return 12;
8058 }
8059
8060 const char *
8061 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8062 {
8063 rtx xoperands[1];
8064
8065 if (TARGET_64BIT)
8066 {
8067 xoperands[0] = call_dest;
8068 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
8069 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
8070 return "";
8071 }
8072
8073 /* First the special case for kernels, level 0 systems, etc. */
8074 if (TARGET_FAST_INDIRECT_CALLS)
8075 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8076
8077 /* Now the normal case -- we can reach $$dyncall directly or
8078 we're sure that we can get there via a long-branch stub.
8079
8080 No need to check target flags as the length uniquely identifies
8081 the remaining cases. */
8082 if (pa_attr_length_indirect_call (insn) == 8)
8083 {
8084 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8085 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8086 variant of the B,L instruction can't be used on the SOM target. */
8087 if (TARGET_PA_20 && !TARGET_SOM)
8088 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8089 else
8090 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8091 }
8092
8093 /* Long millicode call, but we are not generating PIC or portable runtime
8094 code. */
8095 if (pa_attr_length_indirect_call (insn) == 12)
8096 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8097
8098 /* Long millicode call for portable runtime. */
8099 if (pa_attr_length_indirect_call (insn) == 16)
8100 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8101
8102 /* We need a long PIC call to $$dyncall. */
8103 xoperands[0] = NULL_RTX;
8104 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8105 if (TARGET_SOM || !TARGET_GAS)
8106 {
8107 xoperands[0] = gen_label_rtx ();
8108 output_asm_insn ("addil L'$$dyncall-%0,%%r2", xoperands);
8109 targetm.asm_out.internal_label (asm_out_file, "L",
8110 CODE_LABEL_NUMBER (xoperands[0]));
8111 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
8112 }
8113 else
8114 {
8115 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r2", xoperands);
8116 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8117 xoperands);
8118 }
8119 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8120 output_asm_insn ("ldo 12(%%r2),%%r2", xoperands);
8121 return "";
8122 }
8123
8124 /* In HPUX 8.0's shared library scheme, special relocations are needed
8125 for function labels if they might be passed to a function
8126 in a shared library (because shared libraries don't live in code
8127 space), and special magic is needed to construct their address. */
8128
8129 void
8130 pa_encode_label (rtx sym)
8131 {
8132 const char *str = XSTR (sym, 0);
8133 int len = strlen (str) + 1;
8134 char *newstr, *p;
8135
8136 p = newstr = XALLOCAVEC (char, len + 1);
8137 *p++ = '@';
8138 strcpy (p, str);
8139
8140 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8141 }
8142
8143 static void
8144 pa_encode_section_info (tree decl, rtx rtl, int first)
8145 {
8146 int old_referenced = 0;
8147
8148 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8149 old_referenced
8150 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8151
8152 default_encode_section_info (decl, rtl, first);
8153
8154 if (first && TEXT_SPACE_P (decl))
8155 {
8156 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8157 if (TREE_CODE (decl) == FUNCTION_DECL)
8158 pa_encode_label (XEXP (rtl, 0));
8159 }
8160 else if (old_referenced)
8161 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8162 }
8163
8164 /* This is sort of inverse to pa_encode_section_info. */
8165
8166 static const char *
8167 pa_strip_name_encoding (const char *str)
8168 {
8169 str += (*str == '@');
8170 str += (*str == '*');
8171 return str;
8172 }
8173
8174 /* Returns 1 if OP is a function label involved in a simple addition
8175 with a constant. Used to keep certain patterns from matching
8176 during instruction combination. */
8177 int
8178 pa_is_function_label_plus_const (rtx op)
8179 {
8180 /* Strip off any CONST. */
8181 if (GET_CODE (op) == CONST)
8182 op = XEXP (op, 0);
8183
8184 return (GET_CODE (op) == PLUS
8185 && function_label_operand (XEXP (op, 0), VOIDmode)
8186 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8187 }
8188
8189 /* Output assembly code for a thunk to FUNCTION. */
8190
8191 static void
8192 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8193 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8194 tree function)
8195 {
8196 static unsigned int current_thunk_number;
8197 int val_14 = VAL_14_BITS_P (delta);
8198 unsigned int old_last_address = last_address, nbytes = 0;
8199 char label[16];
8200 rtx xoperands[4];
8201
8202 xoperands[0] = XEXP (DECL_RTL (function), 0);
8203 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8204 xoperands[2] = GEN_INT (delta);
8205
8206 final_start_function (emit_barrier (), file, 1);
8207
8208 /* Output the thunk. We know that the function is in the same
8209 translation unit (i.e., the same space) as the thunk, and that
8210 thunks are output after their method. Thus, we don't need an
8211 external branch to reach the function. With SOM and GAS,
8212 functions and thunks are effectively in different sections.
8213 Thus, we can always use a IA-relative branch and the linker
8214 will add a long branch stub if necessary.
8215
8216 However, we have to be careful when generating PIC code on the
8217 SOM port to ensure that the sequence does not transfer to an
8218 import stub for the target function as this could clobber the
8219 return value saved at SP-24. This would also apply to the
8220 32-bit linux port if the multi-space model is implemented. */
8221 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8222 && !(flag_pic && TREE_PUBLIC (function))
8223 && (TARGET_GAS || last_address < 262132))
8224 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8225 && ((targetm_common.have_named_sections
8226 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8227 /* The GNU 64-bit linker has rather poor stub management.
8228 So, we use a long branch from thunks that aren't in
8229 the same section as the target function. */
8230 && ((!TARGET_64BIT
8231 && (DECL_SECTION_NAME (thunk_fndecl)
8232 != DECL_SECTION_NAME (function)))
8233 || ((DECL_SECTION_NAME (thunk_fndecl)
8234 == DECL_SECTION_NAME (function))
8235 && last_address < 262132)))
8236 /* In this case, we need to be able to reach the start of
8237 the stub table even though the function is likely closer
8238 and can be jumped to directly. */
8239 || (targetm_common.have_named_sections
8240 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8241 && DECL_SECTION_NAME (function) == NULL
8242 && total_code_bytes < MAX_PCREL17F_OFFSET)
8243 /* Likewise. */
8244 || (!targetm_common.have_named_sections
8245 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8246 {
8247 if (!val_14)
8248 output_asm_insn ("addil L'%2,%%r26", xoperands);
8249
8250 output_asm_insn ("b %0", xoperands);
8251
8252 if (val_14)
8253 {
8254 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8255 nbytes += 8;
8256 }
8257 else
8258 {
8259 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8260 nbytes += 12;
8261 }
8262 }
8263 else if (TARGET_64BIT)
8264 {
8265 /* We only have one call-clobbered scratch register, so we can't
8266 make use of the delay slot if delta doesn't fit in 14 bits. */
8267 if (!val_14)
8268 {
8269 output_asm_insn ("addil L'%2,%%r26", xoperands);
8270 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8271 }
8272
8273 output_asm_insn ("b,l .+8,%%r1", xoperands);
8274
8275 if (TARGET_GAS)
8276 {
8277 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8278 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8279 }
8280 else
8281 {
8282 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8283 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8284 }
8285
8286 if (val_14)
8287 {
8288 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8289 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8290 nbytes += 20;
8291 }
8292 else
8293 {
8294 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8295 nbytes += 24;
8296 }
8297 }
8298 else if (TARGET_PORTABLE_RUNTIME)
8299 {
8300 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8301 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8302
8303 if (!val_14)
8304 output_asm_insn ("addil L'%2,%%r26", xoperands);
8305
8306 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8307
8308 if (val_14)
8309 {
8310 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8311 nbytes += 16;
8312 }
8313 else
8314 {
8315 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8316 nbytes += 20;
8317 }
8318 }
8319 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8320 {
8321 /* The function is accessible from outside this module. The only
8322 way to avoid an import stub between the thunk and function is to
8323 call the function directly with an indirect sequence similar to
8324 that used by $$dyncall. This is possible because $$dyncall acts
8325 as the import stub in an indirect call. */
8326 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8327 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8328 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8329 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8330 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8331 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8332 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8333 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8334 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8335
8336 if (!val_14)
8337 {
8338 output_asm_insn ("addil L'%2,%%r26", xoperands);
8339 nbytes += 4;
8340 }
8341
8342 if (TARGET_PA_20)
8343 {
8344 output_asm_insn ("bve (%%r22)", xoperands);
8345 nbytes += 36;
8346 }
8347 else if (TARGET_NO_SPACE_REGS)
8348 {
8349 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8350 nbytes += 36;
8351 }
8352 else
8353 {
8354 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8355 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8356 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8357 nbytes += 44;
8358 }
8359
8360 if (val_14)
8361 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8362 else
8363 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8364 }
8365 else if (flag_pic)
8366 {
8367 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8368
8369 if (TARGET_SOM || !TARGET_GAS)
8370 {
8371 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8372 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8373 }
8374 else
8375 {
8376 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8377 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8378 }
8379
8380 if (!val_14)
8381 output_asm_insn ("addil L'%2,%%r26", xoperands);
8382
8383 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8384
8385 if (val_14)
8386 {
8387 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8388 nbytes += 20;
8389 }
8390 else
8391 {
8392 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8393 nbytes += 24;
8394 }
8395 }
8396 else
8397 {
8398 if (!val_14)
8399 output_asm_insn ("addil L'%2,%%r26", xoperands);
8400
8401 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8402 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8403
8404 if (val_14)
8405 {
8406 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8407 nbytes += 12;
8408 }
8409 else
8410 {
8411 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8412 nbytes += 16;
8413 }
8414 }
8415
8416 final_end_function ();
8417
8418 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8419 {
8420 switch_to_section (data_section);
8421 output_asm_insn (".align 4", xoperands);
8422 ASM_OUTPUT_LABEL (file, label);
8423 output_asm_insn (".word P'%0", xoperands);
8424 }
8425
8426 current_thunk_number++;
8427 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8428 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8429 last_address += nbytes;
8430 if (old_last_address > last_address)
8431 last_address = UINT_MAX;
8432 update_total_code_bytes (nbytes);
8433 }
8434
8435 /* Only direct calls to static functions are allowed to be sibling (tail)
8436 call optimized.
8437
8438 This restriction is necessary because some linker generated stubs will
8439 store return pointers into rp' in some cases which might clobber a
8440 live value already in rp'.
8441
8442 In a sibcall the current function and the target function share stack
8443 space. Thus if the path to the current function and the path to the
8444 target function save a value in rp', they save the value into the
8445 same stack slot, which has undesirable consequences.
8446
8447 Because of the deferred binding nature of shared libraries any function
8448 with external scope could be in a different load module and thus require
8449 rp' to be saved when calling that function. So sibcall optimizations
8450 can only be safe for static function.
8451
8452 Note that GCC never needs return value relocations, so we don't have to
8453 worry about static calls with return value relocations (which require
8454 saving rp').
8455
8456 It is safe to perform a sibcall optimization when the target function
8457 will never return. */
8458 static bool
8459 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8460 {
8461 if (TARGET_PORTABLE_RUNTIME)
8462 return false;
8463
8464 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8465 single subspace mode and the call is not indirect. As far as I know,
8466 there is no operating system support for the multiple subspace mode.
8467 It might be possible to support indirect calls if we didn't use
8468 $$dyncall (see the indirect sequence generated in pa_output_call). */
8469 if (TARGET_ELF32)
8470 return (decl != NULL_TREE);
8471
8472 /* Sibcalls are not ok because the arg pointer register is not a fixed
8473 register. This prevents the sibcall optimization from occurring. In
8474 addition, there are problems with stub placement using GNU ld. This
8475 is because a normal sibcall branch uses a 17-bit relocation while
8476 a regular call branch uses a 22-bit relocation. As a result, more
8477 care needs to be taken in the placement of long-branch stubs. */
8478 if (TARGET_64BIT)
8479 return false;
8480
8481 /* Sibcalls are only ok within a translation unit. */
8482 return (decl && !TREE_PUBLIC (decl));
8483 }
8484
8485 /* ??? Addition is not commutative on the PA due to the weird implicit
8486 space register selection rules for memory addresses. Therefore, we
8487 don't consider a + b == b + a, as this might be inside a MEM. */
8488 static bool
8489 pa_commutative_p (const_rtx x, int outer_code)
8490 {
8491 return (COMMUTATIVE_P (x)
8492 && (TARGET_NO_SPACE_REGS
8493 || (outer_code != UNKNOWN && outer_code != MEM)
8494 || GET_CODE (x) != PLUS));
8495 }
8496
8497 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8498 use in fmpyadd instructions. */
8499 int
8500 pa_fmpyaddoperands (rtx *operands)
8501 {
8502 machine_mode mode = GET_MODE (operands[0]);
8503
8504 /* Must be a floating point mode. */
8505 if (mode != SFmode && mode != DFmode)
8506 return 0;
8507
8508 /* All modes must be the same. */
8509 if (! (mode == GET_MODE (operands[1])
8510 && mode == GET_MODE (operands[2])
8511 && mode == GET_MODE (operands[3])
8512 && mode == GET_MODE (operands[4])
8513 && mode == GET_MODE (operands[5])))
8514 return 0;
8515
8516 /* All operands must be registers. */
8517 if (! (GET_CODE (operands[1]) == REG
8518 && GET_CODE (operands[2]) == REG
8519 && GET_CODE (operands[3]) == REG
8520 && GET_CODE (operands[4]) == REG
8521 && GET_CODE (operands[5]) == REG))
8522 return 0;
8523
8524 /* Only 2 real operands to the addition. One of the input operands must
8525 be the same as the output operand. */
8526 if (! rtx_equal_p (operands[3], operands[4])
8527 && ! rtx_equal_p (operands[3], operands[5]))
8528 return 0;
8529
8530 /* Inout operand of add cannot conflict with any operands from multiply. */
8531 if (rtx_equal_p (operands[3], operands[0])
8532 || rtx_equal_p (operands[3], operands[1])
8533 || rtx_equal_p (operands[3], operands[2]))
8534 return 0;
8535
8536 /* multiply cannot feed into addition operands. */
8537 if (rtx_equal_p (operands[4], operands[0])
8538 || rtx_equal_p (operands[5], operands[0]))
8539 return 0;
8540
8541 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8542 if (mode == SFmode
8543 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8544 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8545 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8546 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8547 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8548 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8549 return 0;
8550
8551 /* Passed. Operands are suitable for fmpyadd. */
8552 return 1;
8553 }
8554
8555 #if !defined(USE_COLLECT2)
8556 static void
8557 pa_asm_out_constructor (rtx symbol, int priority)
8558 {
8559 if (!function_label_operand (symbol, VOIDmode))
8560 pa_encode_label (symbol);
8561
8562 #ifdef CTORS_SECTION_ASM_OP
8563 default_ctor_section_asm_out_constructor (symbol, priority);
8564 #else
8565 # ifdef TARGET_ASM_NAMED_SECTION
8566 default_named_section_asm_out_constructor (symbol, priority);
8567 # else
8568 default_stabs_asm_out_constructor (symbol, priority);
8569 # endif
8570 #endif
8571 }
8572
8573 static void
8574 pa_asm_out_destructor (rtx symbol, int priority)
8575 {
8576 if (!function_label_operand (symbol, VOIDmode))
8577 pa_encode_label (symbol);
8578
8579 #ifdef DTORS_SECTION_ASM_OP
8580 default_dtor_section_asm_out_destructor (symbol, priority);
8581 #else
8582 # ifdef TARGET_ASM_NAMED_SECTION
8583 default_named_section_asm_out_destructor (symbol, priority);
8584 # else
8585 default_stabs_asm_out_destructor (symbol, priority);
8586 # endif
8587 #endif
8588 }
8589 #endif
8590
8591 /* This function places uninitialized global data in the bss section.
8592 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8593 function on the SOM port to prevent uninitialized global data from
8594 being placed in the data section. */
8595
8596 void
8597 pa_asm_output_aligned_bss (FILE *stream,
8598 const char *name,
8599 unsigned HOST_WIDE_INT size,
8600 unsigned int align)
8601 {
8602 switch_to_section (bss_section);
8603 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8604
8605 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8606 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8607 #endif
8608
8609 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8610 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8611 #endif
8612
8613 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8614 ASM_OUTPUT_LABEL (stream, name);
8615 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8616 }
8617
8618 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8619 that doesn't allow the alignment of global common storage to be directly
8620 specified. The SOM linker aligns common storage based on the rounded
8621 value of the NUM_BYTES parameter in the .comm directive. It's not
8622 possible to use the .align directive as it doesn't affect the alignment
8623 of the label associated with a .comm directive. */
8624
8625 void
8626 pa_asm_output_aligned_common (FILE *stream,
8627 const char *name,
8628 unsigned HOST_WIDE_INT size,
8629 unsigned int align)
8630 {
8631 unsigned int max_common_align;
8632
8633 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8634 if (align > max_common_align)
8635 {
8636 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8637 "for global common data. Using %u",
8638 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8639 align = max_common_align;
8640 }
8641
8642 switch_to_section (bss_section);
8643
8644 assemble_name (stream, name);
8645 fprintf (stream, "\t.comm " HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8646 MAX (size, align / BITS_PER_UNIT));
8647 }
8648
8649 /* We can't use .comm for local common storage as the SOM linker effectively
8650 treats the symbol as universal and uses the same storage for local symbols
8651 with the same name in different object files. The .block directive
8652 reserves an uninitialized block of storage. However, it's not common
8653 storage. Fortunately, GCC never requests common storage with the same
8654 name in any given translation unit. */
8655
8656 void
8657 pa_asm_output_aligned_local (FILE *stream,
8658 const char *name,
8659 unsigned HOST_WIDE_INT size,
8660 unsigned int align)
8661 {
8662 switch_to_section (bss_section);
8663 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8664
8665 #ifdef LOCAL_ASM_OP
8666 fprintf (stream, "%s", LOCAL_ASM_OP);
8667 assemble_name (stream, name);
8668 fprintf (stream, "\n");
8669 #endif
8670
8671 ASM_OUTPUT_LABEL (stream, name);
8672 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8673 }
8674
8675 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8676 use in fmpysub instructions. */
8677 int
8678 pa_fmpysuboperands (rtx *operands)
8679 {
8680 machine_mode mode = GET_MODE (operands[0]);
8681
8682 /* Must be a floating point mode. */
8683 if (mode != SFmode && mode != DFmode)
8684 return 0;
8685
8686 /* All modes must be the same. */
8687 if (! (mode == GET_MODE (operands[1])
8688 && mode == GET_MODE (operands[2])
8689 && mode == GET_MODE (operands[3])
8690 && mode == GET_MODE (operands[4])
8691 && mode == GET_MODE (operands[5])))
8692 return 0;
8693
8694 /* All operands must be registers. */
8695 if (! (GET_CODE (operands[1]) == REG
8696 && GET_CODE (operands[2]) == REG
8697 && GET_CODE (operands[3]) == REG
8698 && GET_CODE (operands[4]) == REG
8699 && GET_CODE (operands[5]) == REG))
8700 return 0;
8701
8702 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8703 operation, so operands[4] must be the same as operand[3]. */
8704 if (! rtx_equal_p (operands[3], operands[4]))
8705 return 0;
8706
8707 /* multiply cannot feed into subtraction. */
8708 if (rtx_equal_p (operands[5], operands[0]))
8709 return 0;
8710
8711 /* Inout operand of sub cannot conflict with any operands from multiply. */
8712 if (rtx_equal_p (operands[3], operands[0])
8713 || rtx_equal_p (operands[3], operands[1])
8714 || rtx_equal_p (operands[3], operands[2]))
8715 return 0;
8716
8717 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8718 if (mode == SFmode
8719 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8720 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8721 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8722 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8723 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8724 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8725 return 0;
8726
8727 /* Passed. Operands are suitable for fmpysub. */
8728 return 1;
8729 }
8730
8731 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8732 constants for shadd instructions. */
8733 int
8734 pa_shadd_constant_p (int val)
8735 {
8736 if (val == 2 || val == 4 || val == 8)
8737 return 1;
8738 else
8739 return 0;
8740 }
8741
8742 /* Return TRUE if INSN branches forward. */
8743
8744 static bool
8745 forward_branch_p (rtx_insn *insn)
8746 {
8747 rtx lab = JUMP_LABEL (insn);
8748
8749 /* The INSN must have a jump label. */
8750 gcc_assert (lab != NULL_RTX);
8751
8752 if (INSN_ADDRESSES_SET_P ())
8753 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8754
8755 while (insn)
8756 {
8757 if (insn == lab)
8758 return true;
8759 else
8760 insn = NEXT_INSN (insn);
8761 }
8762
8763 return false;
8764 }
8765
8766 /* Output an unconditional move and branch insn. */
8767
8768 const char *
8769 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8770 {
8771 int length = get_attr_length (insn);
8772
8773 /* These are the cases in which we win. */
8774 if (length == 4)
8775 return "mov%I1b,tr %1,%0,%2";
8776
8777 /* None of the following cases win, but they don't lose either. */
8778 if (length == 8)
8779 {
8780 if (dbr_sequence_length () == 0)
8781 {
8782 /* Nothing in the delay slot, fake it by putting the combined
8783 insn (the copy or add) in the delay slot of a bl. */
8784 if (GET_CODE (operands[1]) == CONST_INT)
8785 return "b %2\n\tldi %1,%0";
8786 else
8787 return "b %2\n\tcopy %1,%0";
8788 }
8789 else
8790 {
8791 /* Something in the delay slot, but we've got a long branch. */
8792 if (GET_CODE (operands[1]) == CONST_INT)
8793 return "ldi %1,%0\n\tb %2";
8794 else
8795 return "copy %1,%0\n\tb %2";
8796 }
8797 }
8798
8799 if (GET_CODE (operands[1]) == CONST_INT)
8800 output_asm_insn ("ldi %1,%0", operands);
8801 else
8802 output_asm_insn ("copy %1,%0", operands);
8803 return pa_output_lbranch (operands[2], insn, 1);
8804 }
8805
8806 /* Output an unconditional add and branch insn. */
8807
8808 const char *
8809 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
8810 {
8811 int length = get_attr_length (insn);
8812
8813 /* To make life easy we want operand0 to be the shared input/output
8814 operand and operand1 to be the readonly operand. */
8815 if (operands[0] == operands[1])
8816 operands[1] = operands[2];
8817
8818 /* These are the cases in which we win. */
8819 if (length == 4)
8820 return "add%I1b,tr %1,%0,%3";
8821
8822 /* None of the following cases win, but they don't lose either. */
8823 if (length == 8)
8824 {
8825 if (dbr_sequence_length () == 0)
8826 /* Nothing in the delay slot, fake it by putting the combined
8827 insn (the copy or add) in the delay slot of a bl. */
8828 return "b %3\n\tadd%I1 %1,%0,%0";
8829 else
8830 /* Something in the delay slot, but we've got a long branch. */
8831 return "add%I1 %1,%0,%0\n\tb %3";
8832 }
8833
8834 output_asm_insn ("add%I1 %1,%0,%0", operands);
8835 return pa_output_lbranch (operands[3], insn, 1);
8836 }
8837
8838 /* We use this hook to perform a PA specific optimization which is difficult
8839 to do in earlier passes. */
8840
8841 static void
8842 pa_reorg (void)
8843 {
8844 remove_useless_addtr_insns (1);
8845
8846 if (pa_cpu < PROCESSOR_8000)
8847 pa_combine_instructions ();
8848 }
8849
8850 /* The PA has a number of odd instructions which can perform multiple
8851 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8852 it may be profitable to combine two instructions into one instruction
8853 with two outputs. It's not profitable PA2.0 machines because the
8854 two outputs would take two slots in the reorder buffers.
8855
8856 This routine finds instructions which can be combined and combines
8857 them. We only support some of the potential combinations, and we
8858 only try common ways to find suitable instructions.
8859
8860 * addb can add two registers or a register and a small integer
8861 and jump to a nearby (+-8k) location. Normally the jump to the
8862 nearby location is conditional on the result of the add, but by
8863 using the "true" condition we can make the jump unconditional.
8864 Thus addb can perform two independent operations in one insn.
8865
8866 * movb is similar to addb in that it can perform a reg->reg
8867 or small immediate->reg copy and jump to a nearby (+-8k location).
8868
8869 * fmpyadd and fmpysub can perform a FP multiply and either an
8870 FP add or FP sub if the operands of the multiply and add/sub are
8871 independent (there are other minor restrictions). Note both
8872 the fmpy and fadd/fsub can in theory move to better spots according
8873 to data dependencies, but for now we require the fmpy stay at a
8874 fixed location.
8875
8876 * Many of the memory operations can perform pre & post updates
8877 of index registers. GCC's pre/post increment/decrement addressing
8878 is far too simple to take advantage of all the possibilities. This
8879 pass may not be suitable since those insns may not be independent.
8880
8881 * comclr can compare two ints or an int and a register, nullify
8882 the following instruction and zero some other register. This
8883 is more difficult to use as it's harder to find an insn which
8884 will generate a comclr than finding something like an unconditional
8885 branch. (conditional moves & long branches create comclr insns).
8886
8887 * Most arithmetic operations can conditionally skip the next
8888 instruction. They can be viewed as "perform this operation
8889 and conditionally jump to this nearby location" (where nearby
8890 is an insns away). These are difficult to use due to the
8891 branch length restrictions. */
8892
8893 static void
8894 pa_combine_instructions (void)
8895 {
8896 rtx_insn *anchor;
8897
8898 /* This can get expensive since the basic algorithm is on the
8899 order of O(n^2) (or worse). Only do it for -O2 or higher
8900 levels of optimization. */
8901 if (optimize < 2)
8902 return;
8903
8904 /* Walk down the list of insns looking for "anchor" insns which
8905 may be combined with "floating" insns. As the name implies,
8906 "anchor" instructions don't move, while "floating" insns may
8907 move around. */
8908 rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8909 rtx_insn *new_rtx = make_insn_raw (par);
8910
8911 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8912 {
8913 enum attr_pa_combine_type anchor_attr;
8914 enum attr_pa_combine_type floater_attr;
8915
8916 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8917 Also ignore any special USE insns. */
8918 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
8919 || GET_CODE (PATTERN (anchor)) == USE
8920 || GET_CODE (PATTERN (anchor)) == CLOBBER)
8921 continue;
8922
8923 anchor_attr = get_attr_pa_combine_type (anchor);
8924 /* See if anchor is an insn suitable for combination. */
8925 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8926 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8927 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8928 && ! forward_branch_p (anchor)))
8929 {
8930 rtx_insn *floater;
8931
8932 for (floater = PREV_INSN (anchor);
8933 floater;
8934 floater = PREV_INSN (floater))
8935 {
8936 if (NOTE_P (floater)
8937 || (NONJUMP_INSN_P (floater)
8938 && (GET_CODE (PATTERN (floater)) == USE
8939 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8940 continue;
8941
8942 /* Anything except a regular INSN will stop our search. */
8943 if (! NONJUMP_INSN_P (floater))
8944 {
8945 floater = NULL;
8946 break;
8947 }
8948
8949 /* See if FLOATER is suitable for combination with the
8950 anchor. */
8951 floater_attr = get_attr_pa_combine_type (floater);
8952 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8953 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8954 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8955 && floater_attr == PA_COMBINE_TYPE_FMPY))
8956 {
8957 /* If ANCHOR and FLOATER can be combined, then we're
8958 done with this pass. */
8959 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8960 SET_DEST (PATTERN (floater)),
8961 XEXP (SET_SRC (PATTERN (floater)), 0),
8962 XEXP (SET_SRC (PATTERN (floater)), 1)))
8963 break;
8964 }
8965
8966 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8967 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8968 {
8969 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8970 {
8971 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8972 SET_DEST (PATTERN (floater)),
8973 XEXP (SET_SRC (PATTERN (floater)), 0),
8974 XEXP (SET_SRC (PATTERN (floater)), 1)))
8975 break;
8976 }
8977 else
8978 {
8979 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8980 SET_DEST (PATTERN (floater)),
8981 SET_SRC (PATTERN (floater)),
8982 SET_SRC (PATTERN (floater))))
8983 break;
8984 }
8985 }
8986 }
8987
8988 /* If we didn't find anything on the backwards scan try forwards. */
8989 if (!floater
8990 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8991 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8992 {
8993 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8994 {
8995 if (NOTE_P (floater)
8996 || (NONJUMP_INSN_P (floater)
8997 && (GET_CODE (PATTERN (floater)) == USE
8998 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8999
9000 continue;
9001
9002 /* Anything except a regular INSN will stop our search. */
9003 if (! NONJUMP_INSN_P (floater))
9004 {
9005 floater = NULL;
9006 break;
9007 }
9008
9009 /* See if FLOATER is suitable for combination with the
9010 anchor. */
9011 floater_attr = get_attr_pa_combine_type (floater);
9012 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9013 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9014 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9015 && floater_attr == PA_COMBINE_TYPE_FMPY))
9016 {
9017 /* If ANCHOR and FLOATER can be combined, then we're
9018 done with this pass. */
9019 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9020 SET_DEST (PATTERN (floater)),
9021 XEXP (SET_SRC (PATTERN (floater)),
9022 0),
9023 XEXP (SET_SRC (PATTERN (floater)),
9024 1)))
9025 break;
9026 }
9027 }
9028 }
9029
9030 /* FLOATER will be nonzero if we found a suitable floating
9031 insn for combination with ANCHOR. */
9032 if (floater
9033 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9034 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9035 {
9036 /* Emit the new instruction and delete the old anchor. */
9037 emit_insn_before (gen_rtx_PARALLEL
9038 (VOIDmode,
9039 gen_rtvec (2, PATTERN (anchor),
9040 PATTERN (floater))),
9041 anchor);
9042
9043 SET_INSN_DELETED (anchor);
9044
9045 /* Emit a special USE insn for FLOATER, then delete
9046 the floating insn. */
9047 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9048 delete_insn (floater);
9049
9050 continue;
9051 }
9052 else if (floater
9053 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9054 {
9055 rtx temp;
9056 /* Emit the new_jump instruction and delete the old anchor. */
9057 temp
9058 = emit_jump_insn_before (gen_rtx_PARALLEL
9059 (VOIDmode,
9060 gen_rtvec (2, PATTERN (anchor),
9061 PATTERN (floater))),
9062 anchor);
9063
9064 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9065 SET_INSN_DELETED (anchor);
9066
9067 /* Emit a special USE insn for FLOATER, then delete
9068 the floating insn. */
9069 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9070 delete_insn (floater);
9071 continue;
9072 }
9073 }
9074 }
9075 }
9076
9077 static int
9078 pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
9079 int reversed, rtx dest,
9080 rtx src1, rtx src2)
9081 {
9082 int insn_code_number;
9083 rtx_insn *start, *end;
9084
9085 /* Create a PARALLEL with the patterns of ANCHOR and
9086 FLOATER, try to recognize it, then test constraints
9087 for the resulting pattern.
9088
9089 If the pattern doesn't match or the constraints
9090 aren't met keep searching for a suitable floater
9091 insn. */
9092 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9093 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9094 INSN_CODE (new_rtx) = -1;
9095 insn_code_number = recog_memoized (new_rtx);
9096 basic_block bb = BLOCK_FOR_INSN (anchor);
9097 if (insn_code_number < 0
9098 || (extract_insn (new_rtx),
9099 !constrain_operands (1, get_preferred_alternatives (new_rtx, bb))))
9100 return 0;
9101
9102 if (reversed)
9103 {
9104 start = anchor;
9105 end = floater;
9106 }
9107 else
9108 {
9109 start = floater;
9110 end = anchor;
9111 }
9112
9113 /* There's up to three operands to consider. One
9114 output and two inputs.
9115
9116 The output must not be used between FLOATER & ANCHOR
9117 exclusive. The inputs must not be set between
9118 FLOATER and ANCHOR exclusive. */
9119
9120 if (reg_used_between_p (dest, start, end))
9121 return 0;
9122
9123 if (reg_set_between_p (src1, start, end))
9124 return 0;
9125
9126 if (reg_set_between_p (src2, start, end))
9127 return 0;
9128
9129 /* If we get here, then everything is good. */
9130 return 1;
9131 }
9132
9133 /* Return nonzero if references for INSN are delayed.
9134
9135 Millicode insns are actually function calls with some special
9136 constraints on arguments and register usage.
9137
9138 Millicode calls always expect their arguments in the integer argument
9139 registers, and always return their result in %r29 (ret1). They
9140 are expected to clobber their arguments, %r1, %r29, and the return
9141 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9142
9143 This function tells reorg that the references to arguments and
9144 millicode calls do not appear to happen until after the millicode call.
9145 This allows reorg to put insns which set the argument registers into the
9146 delay slot of the millicode call -- thus they act more like traditional
9147 CALL_INSNs.
9148
9149 Note we cannot consider side effects of the insn to be delayed because
9150 the branch and link insn will clobber the return pointer. If we happened
9151 to use the return pointer in the delay slot of the call, then we lose.
9152
9153 get_attr_type will try to recognize the given insn, so make sure to
9154 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9155 in particular. */
9156 int
9157 pa_insn_refs_are_delayed (rtx_insn *insn)
9158 {
9159 return ((NONJUMP_INSN_P (insn)
9160 && GET_CODE (PATTERN (insn)) != SEQUENCE
9161 && GET_CODE (PATTERN (insn)) != USE
9162 && GET_CODE (PATTERN (insn)) != CLOBBER
9163 && get_attr_type (insn) == TYPE_MILLI));
9164 }
9165
9166 /* Promote the return value, but not the arguments. */
9167
9168 static machine_mode
9169 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9170 machine_mode mode,
9171 int *punsignedp ATTRIBUTE_UNUSED,
9172 const_tree fntype ATTRIBUTE_UNUSED,
9173 int for_return)
9174 {
9175 if (for_return == 0)
9176 return mode;
9177 return promote_mode (type, mode, punsignedp);
9178 }
9179
9180 /* On the HP-PA the value is found in register(s) 28(-29), unless
9181 the mode is SF or DF. Then the value is returned in fr4 (32).
9182
9183 This must perform the same promotions as PROMOTE_MODE, else promoting
9184 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9185
9186 Small structures must be returned in a PARALLEL on PA64 in order
9187 to match the HP Compiler ABI. */
9188
9189 static rtx
9190 pa_function_value (const_tree valtype,
9191 const_tree func ATTRIBUTE_UNUSED,
9192 bool outgoing ATTRIBUTE_UNUSED)
9193 {
9194 machine_mode valmode;
9195
9196 if (AGGREGATE_TYPE_P (valtype)
9197 || TREE_CODE (valtype) == COMPLEX_TYPE
9198 || TREE_CODE (valtype) == VECTOR_TYPE)
9199 {
9200 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9201
9202 /* Handle aggregates that fit exactly in a word or double word. */
9203 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9204 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9205
9206 if (TARGET_64BIT)
9207 {
9208 /* Aggregates with a size less than or equal to 128 bits are
9209 returned in GR 28(-29). They are left justified. The pad
9210 bits are undefined. Larger aggregates are returned in
9211 memory. */
9212 rtx loc[2];
9213 int i, offset = 0;
9214 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9215
9216 for (i = 0; i < ub; i++)
9217 {
9218 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9219 gen_rtx_REG (DImode, 28 + i),
9220 GEN_INT (offset));
9221 offset += 8;
9222 }
9223
9224 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9225 }
9226 else if (valsize > UNITS_PER_WORD)
9227 {
9228 /* Aggregates 5 to 8 bytes in size are returned in general
9229 registers r28-r29 in the same manner as other non
9230 floating-point objects. The data is right-justified and
9231 zero-extended to 64 bits. This is opposite to the normal
9232 justification used on big endian targets and requires
9233 special treatment. */
9234 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9235 gen_rtx_REG (DImode, 28), const0_rtx);
9236 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9237 }
9238 }
9239
9240 if ((INTEGRAL_TYPE_P (valtype)
9241 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9242 || POINTER_TYPE_P (valtype))
9243 valmode = word_mode;
9244 else
9245 valmode = TYPE_MODE (valtype);
9246
9247 if (TREE_CODE (valtype) == REAL_TYPE
9248 && !AGGREGATE_TYPE_P (valtype)
9249 && TYPE_MODE (valtype) != TFmode
9250 && !TARGET_SOFT_FLOAT)
9251 return gen_rtx_REG (valmode, 32);
9252
9253 return gen_rtx_REG (valmode, 28);
9254 }
9255
9256 /* Implement the TARGET_LIBCALL_VALUE hook. */
9257
9258 static rtx
9259 pa_libcall_value (machine_mode mode,
9260 const_rtx fun ATTRIBUTE_UNUSED)
9261 {
9262 if (! TARGET_SOFT_FLOAT
9263 && (mode == SFmode || mode == DFmode))
9264 return gen_rtx_REG (mode, 32);
9265 else
9266 return gen_rtx_REG (mode, 28);
9267 }
9268
9269 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9270
9271 static bool
9272 pa_function_value_regno_p (const unsigned int regno)
9273 {
9274 if (regno == 28
9275 || (! TARGET_SOFT_FLOAT && regno == 32))
9276 return true;
9277
9278 return false;
9279 }
9280
9281 /* Update the data in CUM to advance over an argument
9282 of mode MODE and data type TYPE.
9283 (TYPE is null for libcalls where that information may not be available.) */
9284
9285 static void
9286 pa_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
9287 const_tree type, bool named ATTRIBUTE_UNUSED)
9288 {
9289 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9290 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9291
9292 cum->nargs_prototype--;
9293 cum->words += (arg_size
9294 + ((cum->words & 01)
9295 && type != NULL_TREE
9296 && arg_size > 1));
9297 }
9298
9299 /* Return the location of a parameter that is passed in a register or NULL
9300 if the parameter has any component that is passed in memory.
9301
9302 This is new code and will be pushed to into the net sources after
9303 further testing.
9304
9305 ??? We might want to restructure this so that it looks more like other
9306 ports. */
9307 static rtx
9308 pa_function_arg (cumulative_args_t cum_v, machine_mode mode,
9309 const_tree type, bool named ATTRIBUTE_UNUSED)
9310 {
9311 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9312 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9313 int alignment = 0;
9314 int arg_size;
9315 int fpr_reg_base;
9316 int gpr_reg_base;
9317 rtx retval;
9318
9319 if (mode == VOIDmode)
9320 return NULL_RTX;
9321
9322 arg_size = FUNCTION_ARG_SIZE (mode, type);
9323
9324 /* If this arg would be passed partially or totally on the stack, then
9325 this routine should return zero. pa_arg_partial_bytes will
9326 handle arguments which are split between regs and stack slots if
9327 the ABI mandates split arguments. */
9328 if (!TARGET_64BIT)
9329 {
9330 /* The 32-bit ABI does not split arguments. */
9331 if (cum->words + arg_size > max_arg_words)
9332 return NULL_RTX;
9333 }
9334 else
9335 {
9336 if (arg_size > 1)
9337 alignment = cum->words & 1;
9338 if (cum->words + alignment >= max_arg_words)
9339 return NULL_RTX;
9340 }
9341
9342 /* The 32bit ABIs and the 64bit ABIs are rather different,
9343 particularly in their handling of FP registers. We might
9344 be able to cleverly share code between them, but I'm not
9345 going to bother in the hope that splitting them up results
9346 in code that is more easily understood. */
9347
9348 if (TARGET_64BIT)
9349 {
9350 /* Advance the base registers to their current locations.
9351
9352 Remember, gprs grow towards smaller register numbers while
9353 fprs grow to higher register numbers. Also remember that
9354 although FP regs are 32-bit addressable, we pretend that
9355 the registers are 64-bits wide. */
9356 gpr_reg_base = 26 - cum->words;
9357 fpr_reg_base = 32 + cum->words;
9358
9359 /* Arguments wider than one word and small aggregates need special
9360 treatment. */
9361 if (arg_size > 1
9362 || mode == BLKmode
9363 || (type && (AGGREGATE_TYPE_P (type)
9364 || TREE_CODE (type) == COMPLEX_TYPE
9365 || TREE_CODE (type) == VECTOR_TYPE)))
9366 {
9367 /* Double-extended precision (80-bit), quad-precision (128-bit)
9368 and aggregates including complex numbers are aligned on
9369 128-bit boundaries. The first eight 64-bit argument slots
9370 are associated one-to-one, with general registers r26
9371 through r19, and also with floating-point registers fr4
9372 through fr11. Arguments larger than one word are always
9373 passed in general registers.
9374
9375 Using a PARALLEL with a word mode register results in left
9376 justified data on a big-endian target. */
9377
9378 rtx loc[8];
9379 int i, offset = 0, ub = arg_size;
9380
9381 /* Align the base register. */
9382 gpr_reg_base -= alignment;
9383
9384 ub = MIN (ub, max_arg_words - cum->words - alignment);
9385 for (i = 0; i < ub; i++)
9386 {
9387 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9388 gen_rtx_REG (DImode, gpr_reg_base),
9389 GEN_INT (offset));
9390 gpr_reg_base -= 1;
9391 offset += 8;
9392 }
9393
9394 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9395 }
9396 }
9397 else
9398 {
9399 /* If the argument is larger than a word, then we know precisely
9400 which registers we must use. */
9401 if (arg_size > 1)
9402 {
9403 if (cum->words)
9404 {
9405 gpr_reg_base = 23;
9406 fpr_reg_base = 38;
9407 }
9408 else
9409 {
9410 gpr_reg_base = 25;
9411 fpr_reg_base = 34;
9412 }
9413
9414 /* Structures 5 to 8 bytes in size are passed in the general
9415 registers in the same manner as other non floating-point
9416 objects. The data is right-justified and zero-extended
9417 to 64 bits. This is opposite to the normal justification
9418 used on big endian targets and requires special treatment.
9419 We now define BLOCK_REG_PADDING to pad these objects.
9420 Aggregates, complex and vector types are passed in the same
9421 manner as structures. */
9422 if (mode == BLKmode
9423 || (type && (AGGREGATE_TYPE_P (type)
9424 || TREE_CODE (type) == COMPLEX_TYPE
9425 || TREE_CODE (type) == VECTOR_TYPE)))
9426 {
9427 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9428 gen_rtx_REG (DImode, gpr_reg_base),
9429 const0_rtx);
9430 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9431 }
9432 }
9433 else
9434 {
9435 /* We have a single word (32 bits). A simple computation
9436 will get us the register #s we need. */
9437 gpr_reg_base = 26 - cum->words;
9438 fpr_reg_base = 32 + 2 * cum->words;
9439 }
9440 }
9441
9442 /* Determine if the argument needs to be passed in both general and
9443 floating point registers. */
9444 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9445 /* If we are doing soft-float with portable runtime, then there
9446 is no need to worry about FP regs. */
9447 && !TARGET_SOFT_FLOAT
9448 /* The parameter must be some kind of scalar float, else we just
9449 pass it in integer registers. */
9450 && GET_MODE_CLASS (mode) == MODE_FLOAT
9451 /* The target function must not have a prototype. */
9452 && cum->nargs_prototype <= 0
9453 /* libcalls do not need to pass items in both FP and general
9454 registers. */
9455 && type != NULL_TREE
9456 /* All this hair applies to "outgoing" args only. This includes
9457 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9458 && !cum->incoming)
9459 /* Also pass outgoing floating arguments in both registers in indirect
9460 calls with the 32 bit ABI and the HP assembler since there is no
9461 way to the specify argument locations in static functions. */
9462 || (!TARGET_64BIT
9463 && !TARGET_GAS
9464 && !cum->incoming
9465 && cum->indirect
9466 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9467 {
9468 retval
9469 = gen_rtx_PARALLEL
9470 (mode,
9471 gen_rtvec (2,
9472 gen_rtx_EXPR_LIST (VOIDmode,
9473 gen_rtx_REG (mode, fpr_reg_base),
9474 const0_rtx),
9475 gen_rtx_EXPR_LIST (VOIDmode,
9476 gen_rtx_REG (mode, gpr_reg_base),
9477 const0_rtx)));
9478 }
9479 else
9480 {
9481 /* See if we should pass this parameter in a general register. */
9482 if (TARGET_SOFT_FLOAT
9483 /* Indirect calls in the normal 32bit ABI require all arguments
9484 to be passed in general registers. */
9485 || (!TARGET_PORTABLE_RUNTIME
9486 && !TARGET_64BIT
9487 && !TARGET_ELF32
9488 && cum->indirect)
9489 /* If the parameter is not a scalar floating-point parameter,
9490 then it belongs in GPRs. */
9491 || GET_MODE_CLASS (mode) != MODE_FLOAT
9492 /* Structure with single SFmode field belongs in GPR. */
9493 || (type && AGGREGATE_TYPE_P (type)))
9494 retval = gen_rtx_REG (mode, gpr_reg_base);
9495 else
9496 retval = gen_rtx_REG (mode, fpr_reg_base);
9497 }
9498 return retval;
9499 }
9500
9501 /* Arguments larger than one word are double word aligned. */
9502
9503 static unsigned int
9504 pa_function_arg_boundary (machine_mode mode, const_tree type)
9505 {
9506 bool singleword = (type
9507 ? (integer_zerop (TYPE_SIZE (type))
9508 || !TREE_CONSTANT (TYPE_SIZE (type))
9509 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9510 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9511
9512 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9513 }
9514
9515 /* If this arg would be passed totally in registers or totally on the stack,
9516 then this routine should return zero. */
9517
9518 static int
9519 pa_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
9520 tree type, bool named ATTRIBUTE_UNUSED)
9521 {
9522 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9523 unsigned int max_arg_words = 8;
9524 unsigned int offset = 0;
9525
9526 if (!TARGET_64BIT)
9527 return 0;
9528
9529 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9530 offset = 1;
9531
9532 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9533 /* Arg fits fully into registers. */
9534 return 0;
9535 else if (cum->words + offset >= max_arg_words)
9536 /* Arg fully on the stack. */
9537 return 0;
9538 else
9539 /* Arg is split. */
9540 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9541 }
9542
9543
9544 /* A get_unnamed_section callback for switching to the text section.
9545
9546 This function is only used with SOM. Because we don't support
9547 named subspaces, we can only create a new subspace or switch back
9548 to the default text subspace. */
9549
9550 static void
9551 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9552 {
9553 gcc_assert (TARGET_SOM);
9554 if (TARGET_GAS)
9555 {
9556 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9557 {
9558 /* We only want to emit a .nsubspa directive once at the
9559 start of the function. */
9560 cfun->machine->in_nsubspa = 1;
9561
9562 /* Create a new subspace for the text. This provides
9563 better stub placement and one-only functions. */
9564 if (cfun->decl
9565 && DECL_ONE_ONLY (cfun->decl)
9566 && !DECL_WEAK (cfun->decl))
9567 {
9568 output_section_asm_op ("\t.SPACE $TEXT$\n"
9569 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9570 "ACCESS=44,SORT=24,COMDAT");
9571 return;
9572 }
9573 }
9574 else
9575 {
9576 /* There isn't a current function or the body of the current
9577 function has been completed. So, we are changing to the
9578 text section to output debugging information. Thus, we
9579 need to forget that we are in the text section so that
9580 varasm.c will call us when text_section is selected again. */
9581 gcc_assert (!cfun || !cfun->machine
9582 || cfun->machine->in_nsubspa == 2);
9583 in_section = NULL;
9584 }
9585 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9586 return;
9587 }
9588 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9589 }
9590
9591 /* A get_unnamed_section callback for switching to comdat data
9592 sections. This function is only used with SOM. */
9593
9594 static void
9595 som_output_comdat_data_section_asm_op (const void *data)
9596 {
9597 in_section = NULL;
9598 output_section_asm_op (data);
9599 }
9600
9601 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9602
9603 static void
9604 pa_som_asm_init_sections (void)
9605 {
9606 text_section
9607 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9608
9609 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9610 is not being generated. */
9611 som_readonly_data_section
9612 = get_unnamed_section (0, output_section_asm_op,
9613 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9614
9615 /* When secondary definitions are not supported, SOM makes readonly
9616 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9617 the comdat flag. */
9618 som_one_only_readonly_data_section
9619 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9620 "\t.SPACE $TEXT$\n"
9621 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9622 "ACCESS=0x2c,SORT=16,COMDAT");
9623
9624
9625 /* When secondary definitions are not supported, SOM makes data one-only
9626 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9627 som_one_only_data_section
9628 = get_unnamed_section (SECTION_WRITE,
9629 som_output_comdat_data_section_asm_op,
9630 "\t.SPACE $PRIVATE$\n"
9631 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9632 "ACCESS=31,SORT=24,COMDAT");
9633
9634 if (flag_tm)
9635 som_tm_clone_table_section
9636 = get_unnamed_section (0, output_section_asm_op,
9637 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9638
9639 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9640 which reference data within the $TEXT$ space (for example constant
9641 strings in the $LIT$ subspace).
9642
9643 The assemblers (GAS and HP as) both have problems with handling
9644 the difference of two symbols which is the other correct way to
9645 reference constant data during PIC code generation.
9646
9647 So, there's no way to reference constant data which is in the
9648 $TEXT$ space during PIC generation. Instead place all constant
9649 data into the $PRIVATE$ subspace (this reduces sharing, but it
9650 works correctly). */
9651 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9652
9653 /* We must not have a reference to an external symbol defined in a
9654 shared library in a readonly section, else the SOM linker will
9655 complain.
9656
9657 So, we force exception information into the data section. */
9658 exception_section = data_section;
9659 }
9660
9661 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9662
9663 static section *
9664 pa_som_tm_clone_table_section (void)
9665 {
9666 return som_tm_clone_table_section;
9667 }
9668
9669 /* On hpux10, the linker will give an error if we have a reference
9670 in the read-only data section to a symbol defined in a shared
9671 library. Therefore, expressions that might require a reloc can
9672 not be placed in the read-only data section. */
9673
9674 static section *
9675 pa_select_section (tree exp, int reloc,
9676 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9677 {
9678 if (TREE_CODE (exp) == VAR_DECL
9679 && TREE_READONLY (exp)
9680 && !TREE_THIS_VOLATILE (exp)
9681 && DECL_INITIAL (exp)
9682 && (DECL_INITIAL (exp) == error_mark_node
9683 || TREE_CONSTANT (DECL_INITIAL (exp)))
9684 && !reloc)
9685 {
9686 if (TARGET_SOM
9687 && DECL_ONE_ONLY (exp)
9688 && !DECL_WEAK (exp))
9689 return som_one_only_readonly_data_section;
9690 else
9691 return readonly_data_section;
9692 }
9693 else if (CONSTANT_CLASS_P (exp) && !reloc)
9694 return readonly_data_section;
9695 else if (TARGET_SOM
9696 && TREE_CODE (exp) == VAR_DECL
9697 && DECL_ONE_ONLY (exp)
9698 && !DECL_WEAK (exp))
9699 return som_one_only_data_section;
9700 else
9701 return data_section;
9702 }
9703
9704 /* Implement pa_reloc_rw_mask. */
9705
9706 static int
9707 pa_reloc_rw_mask (void)
9708 {
9709 /* We force (const (plus (symbol) (const_int))) to memory when the
9710 const_int doesn't fit in a 14-bit integer. The SOM linker can't
9711 handle this construct in read-only memory and we want to avoid
9712 this for ELF. So, we always force an RTX needing relocation to
9713 the data section. */
9714 return 3;
9715 }
9716
9717 static void
9718 pa_globalize_label (FILE *stream, const char *name)
9719 {
9720 /* We only handle DATA objects here, functions are globalized in
9721 ASM_DECLARE_FUNCTION_NAME. */
9722 if (! FUNCTION_NAME_P (name))
9723 {
9724 fputs ("\t.EXPORT ", stream);
9725 assemble_name (stream, name);
9726 fputs (",DATA\n", stream);
9727 }
9728 }
9729
9730 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9731
9732 static rtx
9733 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9734 int incoming ATTRIBUTE_UNUSED)
9735 {
9736 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9737 }
9738
9739 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9740
9741 bool
9742 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9743 {
9744 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9745 PA64 ABI says that objects larger than 128 bits are returned in memory.
9746 Note, int_size_in_bytes can return -1 if the size of the object is
9747 variable or larger than the maximum value that can be expressed as
9748 a HOST_WIDE_INT. It can also return zero for an empty type. The
9749 simplest way to handle variable and empty types is to pass them in
9750 memory. This avoids problems in defining the boundaries of argument
9751 slots, allocating registers, etc. */
9752 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9753 || int_size_in_bytes (type) <= 0);
9754 }
9755
9756 /* Structure to hold declaration and name of external symbols that are
9757 emitted by GCC. We generate a vector of these symbols and output them
9758 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9759 This avoids putting out names that are never really used. */
9760
9761 typedef struct GTY(()) extern_symbol
9762 {
9763 tree decl;
9764 const char *name;
9765 } extern_symbol;
9766
9767 /* Define gc'd vector type for extern_symbol. */
9768
9769 /* Vector of extern_symbol pointers. */
9770 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9771
9772 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9773 /* Mark DECL (name NAME) as an external reference (assembler output
9774 file FILE). This saves the names to output at the end of the file
9775 if actually referenced. */
9776
9777 void
9778 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9779 {
9780 gcc_assert (file == asm_out_file);
9781 extern_symbol p = {decl, name};
9782 vec_safe_push (extern_symbols, p);
9783 }
9784
9785 /* Output text required at the end of an assembler file.
9786 This includes deferred plabels and .import directives for
9787 all external symbols that were actually referenced. */
9788
9789 static void
9790 pa_hpux_file_end (void)
9791 {
9792 unsigned int i;
9793 extern_symbol *p;
9794
9795 if (!NO_DEFERRED_PROFILE_COUNTERS)
9796 output_deferred_profile_counters ();
9797
9798 output_deferred_plabels ();
9799
9800 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9801 {
9802 tree decl = p->decl;
9803
9804 if (!TREE_ASM_WRITTEN (decl)
9805 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9806 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9807 }
9808
9809 vec_free (extern_symbols);
9810 }
9811 #endif
9812
9813 /* Return true if a change from mode FROM to mode TO for a register
9814 in register class RCLASS is invalid. */
9815
9816 bool
9817 pa_cannot_change_mode_class (machine_mode from, machine_mode to,
9818 enum reg_class rclass)
9819 {
9820 if (from == to)
9821 return false;
9822
9823 /* Reject changes to/from complex and vector modes. */
9824 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9825 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9826 return true;
9827
9828 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9829 return false;
9830
9831 /* There is no way to load QImode or HImode values directly from
9832 memory. SImode loads to the FP registers are not zero extended.
9833 On the 64-bit target, this conflicts with the definition of
9834 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9835 with different sizes in the floating-point registers. */
9836 if (MAYBE_FP_REG_CLASS_P (rclass))
9837 return true;
9838
9839 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9840 in specific sets of registers. Thus, we cannot allow changing
9841 to a larger mode when it's larger than a word. */
9842 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9843 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9844 return true;
9845
9846 return false;
9847 }
9848
9849 /* Returns TRUE if it is a good idea to tie two pseudo registers
9850 when one has mode MODE1 and one has mode MODE2.
9851 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9852 for any hard reg, then this must be FALSE for correct output.
9853
9854 We should return FALSE for QImode and HImode because these modes
9855 are not ok in the floating-point registers. However, this prevents
9856 tieing these modes to SImode and DImode in the general registers.
9857 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9858 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9859 in the floating-point registers. */
9860
9861 bool
9862 pa_modes_tieable_p (machine_mode mode1, machine_mode mode2)
9863 {
9864 /* Don't tie modes in different classes. */
9865 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9866 return false;
9867
9868 return true;
9869 }
9870
9871 \f
9872 /* Length in units of the trampoline instruction code. */
9873
9874 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9875
9876
9877 /* Output assembler code for a block containing the constant parts
9878 of a trampoline, leaving space for the variable parts.\
9879
9880 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9881 and then branches to the specified routine.
9882
9883 This code template is copied from text segment to stack location
9884 and then patched with pa_trampoline_init to contain valid values,
9885 and then entered as a subroutine.
9886
9887 It is best to keep this as small as possible to avoid having to
9888 flush multiple lines in the cache. */
9889
9890 static void
9891 pa_asm_trampoline_template (FILE *f)
9892 {
9893 if (!TARGET_64BIT)
9894 {
9895 fputs ("\tldw 36(%r22),%r21\n", f);
9896 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
9897 if (ASSEMBLER_DIALECT == 0)
9898 fputs ("\tdepi 0,31,2,%r21\n", f);
9899 else
9900 fputs ("\tdepwi 0,31,2,%r21\n", f);
9901 fputs ("\tldw 4(%r21),%r19\n", f);
9902 fputs ("\tldw 0(%r21),%r21\n", f);
9903 if (TARGET_PA_20)
9904 {
9905 fputs ("\tbve (%r21)\n", f);
9906 fputs ("\tldw 40(%r22),%r29\n", f);
9907 fputs ("\t.word 0\n", f);
9908 fputs ("\t.word 0\n", f);
9909 }
9910 else
9911 {
9912 fputs ("\tldsid (%r21),%r1\n", f);
9913 fputs ("\tmtsp %r1,%sr0\n", f);
9914 fputs ("\tbe 0(%sr0,%r21)\n", f);
9915 fputs ("\tldw 40(%r22),%r29\n", f);
9916 }
9917 fputs ("\t.word 0\n", f);
9918 fputs ("\t.word 0\n", f);
9919 fputs ("\t.word 0\n", f);
9920 fputs ("\t.word 0\n", f);
9921 }
9922 else
9923 {
9924 fputs ("\t.dword 0\n", f);
9925 fputs ("\t.dword 0\n", f);
9926 fputs ("\t.dword 0\n", f);
9927 fputs ("\t.dword 0\n", f);
9928 fputs ("\tmfia %r31\n", f);
9929 fputs ("\tldd 24(%r31),%r1\n", f);
9930 fputs ("\tldd 24(%r1),%r27\n", f);
9931 fputs ("\tldd 16(%r1),%r1\n", f);
9932 fputs ("\tbve (%r1)\n", f);
9933 fputs ("\tldd 32(%r31),%r31\n", f);
9934 fputs ("\t.dword 0 ; fptr\n", f);
9935 fputs ("\t.dword 0 ; static link\n", f);
9936 }
9937 }
9938
9939 /* Emit RTL insns to initialize the variable parts of a trampoline.
9940 FNADDR is an RTX for the address of the function's pure code.
9941 CXT is an RTX for the static chain value for the function.
9942
9943 Move the function address to the trampoline template at offset 36.
9944 Move the static chain value to trampoline template at offset 40.
9945 Move the trampoline address to trampoline template at offset 44.
9946 Move r19 to trampoline template at offset 48. The latter two
9947 words create a plabel for the indirect call to the trampoline.
9948
9949 A similar sequence is used for the 64-bit port but the plabel is
9950 at the beginning of the trampoline.
9951
9952 Finally, the cache entries for the trampoline code are flushed.
9953 This is necessary to ensure that the trampoline instruction sequence
9954 is written to memory prior to any attempts at prefetching the code
9955 sequence. */
9956
9957 static void
9958 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
9959 {
9960 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9961 rtx start_addr = gen_reg_rtx (Pmode);
9962 rtx end_addr = gen_reg_rtx (Pmode);
9963 rtx line_length = gen_reg_rtx (Pmode);
9964 rtx r_tramp, tmp;
9965
9966 emit_block_move (m_tramp, assemble_trampoline_template (),
9967 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
9968 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
9969
9970 if (!TARGET_64BIT)
9971 {
9972 tmp = adjust_address (m_tramp, Pmode, 36);
9973 emit_move_insn (tmp, fnaddr);
9974 tmp = adjust_address (m_tramp, Pmode, 40);
9975 emit_move_insn (tmp, chain_value);
9976
9977 /* Create a fat pointer for the trampoline. */
9978 tmp = adjust_address (m_tramp, Pmode, 44);
9979 emit_move_insn (tmp, r_tramp);
9980 tmp = adjust_address (m_tramp, Pmode, 48);
9981 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
9982
9983 /* fdc and fic only use registers for the address to flush,
9984 they do not accept integer displacements. We align the
9985 start and end addresses to the beginning of their respective
9986 cache lines to minimize the number of lines flushed. */
9987 emit_insn (gen_andsi3 (start_addr, r_tramp,
9988 GEN_INT (-MIN_CACHELINE_SIZE)));
9989 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
9990 TRAMPOLINE_CODE_SIZE-1));
9991 emit_insn (gen_andsi3 (end_addr, tmp,
9992 GEN_INT (-MIN_CACHELINE_SIZE)));
9993 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
9994 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
9995 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
9996 gen_reg_rtx (Pmode),
9997 gen_reg_rtx (Pmode)));
9998 }
9999 else
10000 {
10001 tmp = adjust_address (m_tramp, Pmode, 56);
10002 emit_move_insn (tmp, fnaddr);
10003 tmp = adjust_address (m_tramp, Pmode, 64);
10004 emit_move_insn (tmp, chain_value);
10005
10006 /* Create a fat pointer for the trampoline. */
10007 tmp = adjust_address (m_tramp, Pmode, 16);
10008 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10009 r_tramp, 32)));
10010 tmp = adjust_address (m_tramp, Pmode, 24);
10011 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10012
10013 /* fdc and fic only use registers for the address to flush,
10014 they do not accept integer displacements. We align the
10015 start and end addresses to the beginning of their respective
10016 cache lines to minimize the number of lines flushed. */
10017 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10018 emit_insn (gen_anddi3 (start_addr, tmp,
10019 GEN_INT (-MIN_CACHELINE_SIZE)));
10020 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10021 TRAMPOLINE_CODE_SIZE - 1));
10022 emit_insn (gen_anddi3 (end_addr, tmp,
10023 GEN_INT (-MIN_CACHELINE_SIZE)));
10024 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10025 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10026 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10027 gen_reg_rtx (Pmode),
10028 gen_reg_rtx (Pmode)));
10029 }
10030
10031 #ifdef HAVE_ENABLE_EXECUTE_STACK
10032  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10033      LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
10034 #endif
10035 }
10036
10037 /* Perform any machine-specific adjustment in the address of the trampoline.
10038 ADDR contains the address that was passed to pa_trampoline_init.
10039 Adjust the trampoline address to point to the plabel at offset 44. */
10040
10041 static rtx
10042 pa_trampoline_adjust_address (rtx addr)
10043 {
10044 if (!TARGET_64BIT)
10045 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10046 return addr;
10047 }
10048
10049 static rtx
10050 pa_delegitimize_address (rtx orig_x)
10051 {
10052 rtx x = delegitimize_mem_from_attrs (orig_x);
10053
10054 if (GET_CODE (x) == LO_SUM
10055 && GET_CODE (XEXP (x, 1)) == UNSPEC
10056 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10057 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10058 return x;
10059 }
10060 \f
10061 static rtx
10062 pa_internal_arg_pointer (void)
10063 {
10064 /* The argument pointer and the hard frame pointer are the same in
10065 the 32-bit runtime, so we don't need a copy. */
10066 if (TARGET_64BIT)
10067 return copy_to_reg (virtual_incoming_args_rtx);
10068 else
10069 return virtual_incoming_args_rtx;
10070 }
10071
10072 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10073 Frame pointer elimination is automatically handled. */
10074
10075 static bool
10076 pa_can_eliminate (const int from, const int to)
10077 {
10078 /* The argument cannot be eliminated in the 64-bit runtime. */
10079 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10080 return false;
10081
10082 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10083 ? ! frame_pointer_needed
10084 : true);
10085 }
10086
10087 /* Define the offset between two registers, FROM to be eliminated and its
10088 replacement TO, at the start of a routine. */
10089 HOST_WIDE_INT
10090 pa_initial_elimination_offset (int from, int to)
10091 {
10092 HOST_WIDE_INT offset;
10093
10094 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10095 && to == STACK_POINTER_REGNUM)
10096 offset = -pa_compute_frame_size (get_frame_size (), 0);
10097 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10098 offset = 0;
10099 else
10100 gcc_unreachable ();
10101
10102 return offset;
10103 }
10104
10105 static void
10106 pa_conditional_register_usage (void)
10107 {
10108 int i;
10109
10110 if (!TARGET_64BIT && !TARGET_PA_11)
10111 {
10112 for (i = 56; i <= FP_REG_LAST; i++)
10113 fixed_regs[i] = call_used_regs[i] = 1;
10114 for (i = 33; i < 56; i += 2)
10115 fixed_regs[i] = call_used_regs[i] = 1;
10116 }
10117 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10118 {
10119 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10120 fixed_regs[i] = call_used_regs[i] = 1;
10121 }
10122 if (flag_pic)
10123 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10124 }
10125
10126 /* Target hook for c_mode_for_suffix. */
10127
10128 static machine_mode
10129 pa_c_mode_for_suffix (char suffix)
10130 {
10131 if (HPUX_LONG_DOUBLE_LIBRARY)
10132 {
10133 if (suffix == 'q')
10134 return TFmode;
10135 }
10136
10137 return VOIDmode;
10138 }
10139
10140 /* Target hook for function_section. */
10141
10142 static section *
10143 pa_function_section (tree decl, enum node_frequency freq,
10144 bool startup, bool exit)
10145 {
10146 /* Put functions in text section if target doesn't have named sections. */
10147 if (!targetm_common.have_named_sections)
10148 return text_section;
10149
10150 /* Force nested functions into the same section as the containing
10151 function. */
10152 if (decl
10153 && DECL_SECTION_NAME (decl) == NULL
10154 && DECL_CONTEXT (decl) != NULL_TREE
10155 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10156 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10157 return function_section (DECL_CONTEXT (decl));
10158
10159 /* Otherwise, use the default function section. */
10160 return default_function_section (decl, freq, startup, exit);
10161 }
10162
10163 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10164
10165 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10166 that need more than three instructions to load prior to reload. This
10167 limit is somewhat arbitrary. It takes three instructions to load a
10168 CONST_INT from memory but two are memory accesses. It may be better
10169 to increase the allowed range for CONST_INTS. We may also be able
10170 to handle CONST_DOUBLES. */
10171
10172 static bool
10173 pa_legitimate_constant_p (machine_mode mode, rtx x)
10174 {
10175 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10176 return false;
10177
10178 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10179 return false;
10180
10181 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10182 legitimate constants. The other variants can't be handled by
10183 the move patterns after reload starts. */
10184 if (tls_referenced_p (x))
10185 return false;
10186
10187 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10188 return false;
10189
10190 if (TARGET_64BIT
10191 && HOST_BITS_PER_WIDE_INT > 32
10192 && GET_CODE (x) == CONST_INT
10193 && !reload_in_progress
10194 && !reload_completed
10195 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10196 && !pa_cint_ok_for_move (INTVAL (x)))
10197 return false;
10198
10199 if (function_label_operand (x, mode))
10200 return false;
10201
10202 return true;
10203 }
10204
10205 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10206
10207 static unsigned int
10208 pa_section_type_flags (tree decl, const char *name, int reloc)
10209 {
10210 unsigned int flags;
10211
10212 flags = default_section_type_flags (decl, name, reloc);
10213
10214 /* Function labels are placed in the constant pool. This can
10215 cause a section conflict if decls are put in ".data.rel.ro"
10216 or ".data.rel.ro.local" using the __attribute__ construct. */
10217 if (strcmp (name, ".data.rel.ro") == 0
10218 || strcmp (name, ".data.rel.ro.local") == 0)
10219 flags |= SECTION_WRITE | SECTION_RELRO;
10220
10221 return flags;
10222 }
10223
10224 /* pa_legitimate_address_p recognizes an RTL expression that is a
10225 valid memory address for an instruction. The MODE argument is the
10226 machine mode for the MEM expression that wants to use this address.
10227
10228 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10229 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10230 available with floating point loads and stores, and integer loads.
10231 We get better code by allowing indexed addresses in the initial
10232 RTL generation.
10233
10234 The acceptance of indexed addresses as legitimate implies that we
10235 must provide patterns for doing indexed integer stores, or the move
10236 expanders must force the address of an indexed store to a register.
10237 We have adopted the latter approach.
10238
10239 Another function of pa_legitimate_address_p is to ensure that
10240 the base register is a valid pointer for indexed instructions.
10241 On targets that have non-equivalent space registers, we have to
10242 know at the time of assembler output which register in a REG+REG
10243 pair is the base register. The REG_POINTER flag is sometimes lost
10244 in reload and the following passes, so it can't be relied on during
10245 code generation. Thus, we either have to canonicalize the order
10246 of the registers in REG+REG indexed addresses, or treat REG+REG
10247 addresses separately and provide patterns for both permutations.
10248
10249 The latter approach requires several hundred additional lines of
10250 code in pa.md. The downside to canonicalizing is that a PLUS
10251 in the wrong order can't combine to form to make a scaled indexed
10252 memory operand. As we won't need to canonicalize the operands if
10253 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10254
10255 We initially break out scaled indexed addresses in canonical order
10256 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10257 scaled indexed addresses during RTL generation. However, fold_rtx
10258 has its own opinion on how the operands of a PLUS should be ordered.
10259 If one of the operands is equivalent to a constant, it will make
10260 that operand the second operand. As the base register is likely to
10261 be equivalent to a SYMBOL_REF, we have made it the second operand.
10262
10263 pa_legitimate_address_p accepts REG+REG as legitimate when the
10264 operands are in the order INDEX+BASE on targets with non-equivalent
10265 space registers, and in any order on targets with equivalent space
10266 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10267
10268 We treat a SYMBOL_REF as legitimate if it is part of the current
10269 function's constant-pool, because such addresses can actually be
10270 output as REG+SMALLINT. */
10271
10272 static bool
10273 pa_legitimate_address_p (machine_mode mode, rtx x, bool strict)
10274 {
10275 if ((REG_P (x)
10276 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10277 : REG_OK_FOR_BASE_P (x)))
10278 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10279 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10280 && REG_P (XEXP (x, 0))
10281 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10282 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10283 return true;
10284
10285 if (GET_CODE (x) == PLUS)
10286 {
10287 rtx base, index;
10288
10289 /* For REG+REG, the base register should be in XEXP (x, 1),
10290 so check it first. */
10291 if (REG_P (XEXP (x, 1))
10292 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10293 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10294 base = XEXP (x, 1), index = XEXP (x, 0);
10295 else if (REG_P (XEXP (x, 0))
10296 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10297 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10298 base = XEXP (x, 0), index = XEXP (x, 1);
10299 else
10300 return false;
10301
10302 if (GET_CODE (index) == CONST_INT)
10303 {
10304 if (INT_5_BITS (index))
10305 return true;
10306
10307 /* When INT14_OK_STRICT is false, a secondary reload is needed
10308 to adjust the displacement of SImode and DImode floating point
10309 instructions but this may fail when the register also needs
10310 reloading. So, we return false when STRICT is true. We
10311 also reject long displacements for float mode addresses since
10312 the majority of accesses will use floating point instructions
10313 that don't support 14-bit offsets. */
10314 if (!INT14_OK_STRICT
10315 && (strict || !(reload_in_progress || reload_completed))
10316 && mode != QImode
10317 && mode != HImode)
10318 return false;
10319
10320 return base14_operand (index, mode);
10321 }
10322
10323 if (!TARGET_DISABLE_INDEXING
10324 /* Only accept the "canonical" INDEX+BASE operand order
10325 on targets with non-equivalent space registers. */
10326 && (TARGET_NO_SPACE_REGS
10327 ? REG_P (index)
10328 : (base == XEXP (x, 1) && REG_P (index)
10329 && (reload_completed
10330 || (reload_in_progress && HARD_REGISTER_P (base))
10331 || REG_POINTER (base))
10332 && (reload_completed
10333 || (reload_in_progress && HARD_REGISTER_P (index))
10334 || !REG_POINTER (index))))
10335 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10336 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10337 : REG_OK_FOR_INDEX_P (index))
10338 && borx_reg_operand (base, Pmode)
10339 && borx_reg_operand (index, Pmode))
10340 return true;
10341
10342 if (!TARGET_DISABLE_INDEXING
10343 && GET_CODE (index) == MULT
10344 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10345 && REG_P (XEXP (index, 0))
10346 && GET_MODE (XEXP (index, 0)) == Pmode
10347 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10348 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10349 && GET_CODE (XEXP (index, 1)) == CONST_INT
10350 && INTVAL (XEXP (index, 1))
10351 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10352 && borx_reg_operand (base, Pmode))
10353 return true;
10354
10355 return false;
10356 }
10357
10358 if (GET_CODE (x) == LO_SUM)
10359 {
10360 rtx y = XEXP (x, 0);
10361
10362 if (GET_CODE (y) == SUBREG)
10363 y = SUBREG_REG (y);
10364
10365 if (REG_P (y)
10366 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10367 : REG_OK_FOR_BASE_P (y)))
10368 {
10369 /* Needed for -fPIC */
10370 if (mode == Pmode
10371 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10372 return true;
10373
10374 if (!INT14_OK_STRICT
10375 && (strict || !(reload_in_progress || reload_completed))
10376 && mode != QImode
10377 && mode != HImode)
10378 return false;
10379
10380 if (CONSTANT_P (XEXP (x, 1)))
10381 return true;
10382 }
10383 return false;
10384 }
10385
10386 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10387 return true;
10388
10389 return false;
10390 }
10391
10392 /* Look for machine dependent ways to make the invalid address AD a
10393 valid address.
10394
10395 For the PA, transform:
10396
10397 memory(X + <large int>)
10398
10399 into:
10400
10401 if (<large int> & mask) >= 16
10402 Y = (<large int> & ~mask) + mask + 1 Round up.
10403 else
10404 Y = (<large int> & ~mask) Round down.
10405 Z = X + Y
10406 memory (Z + (<large int> - Y));
10407
10408 This makes reload inheritance and reload_cse work better since Z
10409 can be reused.
10410
10411 There may be more opportunities to improve code with this hook. */
10412
10413 rtx
10414 pa_legitimize_reload_address (rtx ad, machine_mode mode,
10415 int opnum, int type,
10416 int ind_levels ATTRIBUTE_UNUSED)
10417 {
10418 long offset, newoffset, mask;
10419 rtx new_rtx, temp = NULL_RTX;
10420
10421 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10422 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10423
10424 if (optimize && GET_CODE (ad) == PLUS)
10425 temp = simplify_binary_operation (PLUS, Pmode,
10426 XEXP (ad, 0), XEXP (ad, 1));
10427
10428 new_rtx = temp ? temp : ad;
10429
10430 if (optimize
10431 && GET_CODE (new_rtx) == PLUS
10432 && GET_CODE (XEXP (new_rtx, 0)) == REG
10433 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10434 {
10435 offset = INTVAL (XEXP ((new_rtx), 1));
10436
10437 /* Choose rounding direction. Round up if we are >= halfway. */
10438 if ((offset & mask) >= ((mask + 1) / 2))
10439 newoffset = (offset & ~mask) + mask + 1;
10440 else
10441 newoffset = offset & ~mask;
10442
10443 /* Ensure that long displacements are aligned. */
10444 if (mask == 0x3fff
10445 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10446 || (TARGET_64BIT && (mode) == DImode)))
10447 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10448
10449 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10450 {
10451 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10452 GEN_INT (newoffset));
10453 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10454 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10455 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10456 opnum, (enum reload_type) type);
10457 return ad;
10458 }
10459 }
10460
10461 return NULL_RTX;
10462 }
10463
10464 /* Output address vector. */
10465
10466 void
10467 pa_output_addr_vec (rtx lab, rtx body)
10468 {
10469 int idx, vlen = XVECLEN (body, 0);
10470
10471 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10472 if (TARGET_GAS)
10473 fputs ("\t.begin_brtab\n", asm_out_file);
10474 for (idx = 0; idx < vlen; idx++)
10475 {
10476 ASM_OUTPUT_ADDR_VEC_ELT
10477 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10478 }
10479 if (TARGET_GAS)
10480 fputs ("\t.end_brtab\n", asm_out_file);
10481 }
10482
10483 /* Output address difference vector. */
10484
10485 void
10486 pa_output_addr_diff_vec (rtx lab, rtx body)
10487 {
10488 rtx base = XEXP (XEXP (body, 0), 0);
10489 int idx, vlen = XVECLEN (body, 1);
10490
10491 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10492 if (TARGET_GAS)
10493 fputs ("\t.begin_brtab\n", asm_out_file);
10494 for (idx = 0; idx < vlen; idx++)
10495 {
10496 ASM_OUTPUT_ADDR_DIFF_ELT
10497 (asm_out_file,
10498 body,
10499 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10500 CODE_LABEL_NUMBER (base));
10501 }
10502 if (TARGET_GAS)
10503 fputs ("\t.end_brtab\n", asm_out_file);
10504 }
10505
10506 #include "gt-pa.h"