]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/pa/pa.c
hooks.c (hook_int_rtx_mode_as_bool_0): New function.
[thirdparty/gcc.git] / gcc / config / pa / pa.c
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-attr.h"
33 #include "flags.h"
34 #include "tree.h"
35 #include "output.h"
36 #include "dbxout.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "diagnostic-core.h"
43 #include "ggc.h"
44 #include "recog.h"
45 #include "predict.h"
46 #include "tm_p.h"
47 #include "target.h"
48 #include "common/common-target.h"
49 #include "target-def.h"
50 #include "langhooks.h"
51 #include "df.h"
52 #include "opts.h"
53
54 /* Return nonzero if there is a bypass for the output of
55 OUT_INSN and the fp store IN_INSN. */
56 int
57 pa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
58 {
59 enum machine_mode store_mode;
60 enum machine_mode other_mode;
61 rtx set;
62
63 if (recog_memoized (in_insn) < 0
64 || (get_attr_type (in_insn) != TYPE_FPSTORE
65 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
66 || recog_memoized (out_insn) < 0)
67 return 0;
68
69 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
70
71 set = single_set (out_insn);
72 if (!set)
73 return 0;
74
75 other_mode = GET_MODE (SET_SRC (set));
76
77 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
78 }
79
80
81 #ifndef DO_FRAME_NOTES
82 #ifdef INCOMING_RETURN_ADDR_RTX
83 #define DO_FRAME_NOTES 1
84 #else
85 #define DO_FRAME_NOTES 0
86 #endif
87 #endif
88
89 static void pa_option_override (void);
90 static void copy_reg_pointer (rtx, rtx);
91 static void fix_range (const char *);
92 static int hppa_register_move_cost (enum machine_mode mode, reg_class_t,
93 reg_class_t);
94 static int hppa_address_cost (rtx, enum machine_mode mode, addr_space_t, bool);
95 static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
96 static inline rtx force_mode (enum machine_mode, rtx);
97 static void pa_reorg (void);
98 static void pa_combine_instructions (void);
99 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
100 static bool forward_branch_p (rtx);
101 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
102 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
103 static int compute_movmem_length (rtx);
104 static int compute_clrmem_length (rtx);
105 static bool pa_assemble_integer (rtx, unsigned int, int);
106 static void remove_useless_addtr_insns (int);
107 static void store_reg (int, HOST_WIDE_INT, int);
108 static void store_reg_modify (int, int, HOST_WIDE_INT);
109 static void load_reg (int, HOST_WIDE_INT, int);
110 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
111 static rtx pa_function_value (const_tree, const_tree, bool);
112 static rtx pa_libcall_value (enum machine_mode, const_rtx);
113 static bool pa_function_value_regno_p (const unsigned int);
114 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
115 static void update_total_code_bytes (unsigned int);
116 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
117 static int pa_adjust_cost (rtx, rtx, rtx, int);
118 static int pa_adjust_priority (rtx, int);
119 static int pa_issue_rate (void);
120 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
121 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
122 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
123 ATTRIBUTE_UNUSED;
124 static void pa_encode_section_info (tree, rtx, int);
125 static const char *pa_strip_name_encoding (const char *);
126 static bool pa_function_ok_for_sibcall (tree, tree);
127 static void pa_globalize_label (FILE *, const char *)
128 ATTRIBUTE_UNUSED;
129 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
130 HOST_WIDE_INT, tree);
131 #if !defined(USE_COLLECT2)
132 static void pa_asm_out_constructor (rtx, int);
133 static void pa_asm_out_destructor (rtx, int);
134 #endif
135 static void pa_init_builtins (void);
136 static rtx pa_expand_builtin (tree, rtx, rtx, enum machine_mode mode, int);
137 static rtx hppa_builtin_saveregs (void);
138 static void hppa_va_start (tree, rtx);
139 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
140 static bool pa_scalar_mode_supported_p (enum machine_mode);
141 static bool pa_commutative_p (const_rtx x, int outer_code);
142 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
143 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
144 static rtx hppa_legitimize_address (rtx, rtx, enum machine_mode);
145 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
146 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
147 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
148 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
149 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
150 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
151 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
152 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
153 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
154 static void output_deferred_plabels (void);
155 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
156 #ifdef ASM_OUTPUT_EXTERNAL_REAL
157 static void pa_hpux_file_end (void);
158 #endif
159 static void pa_init_libfuncs (void);
160 static rtx pa_struct_value_rtx (tree, int);
161 static bool pa_pass_by_reference (cumulative_args_t, enum machine_mode,
162 const_tree, bool);
163 static int pa_arg_partial_bytes (cumulative_args_t, enum machine_mode,
164 tree, bool);
165 static void pa_function_arg_advance (cumulative_args_t, enum machine_mode,
166 const_tree, bool);
167 static rtx pa_function_arg (cumulative_args_t, enum machine_mode,
168 const_tree, bool);
169 static unsigned int pa_function_arg_boundary (enum machine_mode, const_tree);
170 static struct machine_function * pa_init_machine_status (void);
171 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
172 enum machine_mode,
173 secondary_reload_info *);
174 static void pa_extra_live_on_entry (bitmap);
175 static enum machine_mode pa_promote_function_mode (const_tree,
176 enum machine_mode, int *,
177 const_tree, int);
178
179 static void pa_asm_trampoline_template (FILE *);
180 static void pa_trampoline_init (rtx, tree, rtx);
181 static rtx pa_trampoline_adjust_address (rtx);
182 static rtx pa_delegitimize_address (rtx);
183 static bool pa_print_operand_punct_valid_p (unsigned char);
184 static rtx pa_internal_arg_pointer (void);
185 static bool pa_can_eliminate (const int, const int);
186 static void pa_conditional_register_usage (void);
187 static enum machine_mode pa_c_mode_for_suffix (char);
188 static section *pa_function_section (tree, enum node_frequency, bool, bool);
189 static bool pa_cannot_force_const_mem (enum machine_mode, rtx);
190 static bool pa_legitimate_constant_p (enum machine_mode, rtx);
191 static unsigned int pa_section_type_flags (tree, const char *, int);
192
193 /* The following extra sections are only used for SOM. */
194 static GTY(()) section *som_readonly_data_section;
195 static GTY(()) section *som_one_only_readonly_data_section;
196 static GTY(()) section *som_one_only_data_section;
197 static GTY(()) section *som_tm_clone_table_section;
198
199 /* Counts for the number of callee-saved general and floating point
200 registers which were saved by the current function's prologue. */
201 static int gr_saved, fr_saved;
202
203 /* Boolean indicating whether the return pointer was saved by the
204 current function's prologue. */
205 static bool rp_saved;
206
207 static rtx find_addr_reg (rtx);
208
209 /* Keep track of the number of bytes we have output in the CODE subspace
210 during this compilation so we'll know when to emit inline long-calls. */
211 unsigned long total_code_bytes;
212
213 /* The last address of the previous function plus the number of bytes in
214 associated thunks that have been output. This is used to determine if
215 a thunk can use an IA-relative branch to reach its target function. */
216 static unsigned int last_address;
217
218 /* Variables to handle plabels that we discover are necessary at assembly
219 output time. They are output after the current function. */
220 struct GTY(()) deferred_plabel
221 {
222 rtx internal_label;
223 rtx symbol;
224 };
225 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
226 deferred_plabels;
227 static size_t n_deferred_plabels = 0;
228 \f
229 /* Initialize the GCC target structure. */
230
231 #undef TARGET_OPTION_OVERRIDE
232 #define TARGET_OPTION_OVERRIDE pa_option_override
233
234 #undef TARGET_ASM_ALIGNED_HI_OP
235 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
236 #undef TARGET_ASM_ALIGNED_SI_OP
237 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
238 #undef TARGET_ASM_ALIGNED_DI_OP
239 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
240 #undef TARGET_ASM_UNALIGNED_HI_OP
241 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
242 #undef TARGET_ASM_UNALIGNED_SI_OP
243 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
244 #undef TARGET_ASM_UNALIGNED_DI_OP
245 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
246 #undef TARGET_ASM_INTEGER
247 #define TARGET_ASM_INTEGER pa_assemble_integer
248
249 #undef TARGET_ASM_FUNCTION_PROLOGUE
250 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
251 #undef TARGET_ASM_FUNCTION_EPILOGUE
252 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
253
254 #undef TARGET_FUNCTION_VALUE
255 #define TARGET_FUNCTION_VALUE pa_function_value
256 #undef TARGET_LIBCALL_VALUE
257 #define TARGET_LIBCALL_VALUE pa_libcall_value
258 #undef TARGET_FUNCTION_VALUE_REGNO_P
259 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
260
261 #undef TARGET_LEGITIMIZE_ADDRESS
262 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
263
264 #undef TARGET_SCHED_ADJUST_COST
265 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
266 #undef TARGET_SCHED_ADJUST_PRIORITY
267 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
268 #undef TARGET_SCHED_ISSUE_RATE
269 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
270
271 #undef TARGET_ENCODE_SECTION_INFO
272 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
273 #undef TARGET_STRIP_NAME_ENCODING
274 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
275
276 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
277 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
278
279 #undef TARGET_COMMUTATIVE_P
280 #define TARGET_COMMUTATIVE_P pa_commutative_p
281
282 #undef TARGET_ASM_OUTPUT_MI_THUNK
283 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
284 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
285 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
286
287 #undef TARGET_ASM_FILE_END
288 #ifdef ASM_OUTPUT_EXTERNAL_REAL
289 #define TARGET_ASM_FILE_END pa_hpux_file_end
290 #else
291 #define TARGET_ASM_FILE_END output_deferred_plabels
292 #endif
293
294 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
295 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
296
297 #if !defined(USE_COLLECT2)
298 #undef TARGET_ASM_CONSTRUCTOR
299 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
300 #undef TARGET_ASM_DESTRUCTOR
301 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
302 #endif
303
304 #undef TARGET_INIT_BUILTINS
305 #define TARGET_INIT_BUILTINS pa_init_builtins
306
307 #undef TARGET_EXPAND_BUILTIN
308 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
309
310 #undef TARGET_REGISTER_MOVE_COST
311 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
312 #undef TARGET_RTX_COSTS
313 #define TARGET_RTX_COSTS hppa_rtx_costs
314 #undef TARGET_ADDRESS_COST
315 #define TARGET_ADDRESS_COST hppa_address_cost
316
317 #undef TARGET_MACHINE_DEPENDENT_REORG
318 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
319
320 #undef TARGET_INIT_LIBFUNCS
321 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
322
323 #undef TARGET_PROMOTE_FUNCTION_MODE
324 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
325 #undef TARGET_PROMOTE_PROTOTYPES
326 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
327
328 #undef TARGET_STRUCT_VALUE_RTX
329 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
330 #undef TARGET_RETURN_IN_MEMORY
331 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
332 #undef TARGET_MUST_PASS_IN_STACK
333 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
334 #undef TARGET_PASS_BY_REFERENCE
335 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
336 #undef TARGET_CALLEE_COPIES
337 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
338 #undef TARGET_ARG_PARTIAL_BYTES
339 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
340 #undef TARGET_FUNCTION_ARG
341 #define TARGET_FUNCTION_ARG pa_function_arg
342 #undef TARGET_FUNCTION_ARG_ADVANCE
343 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
344 #undef TARGET_FUNCTION_ARG_BOUNDARY
345 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
346
347 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
348 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
349 #undef TARGET_EXPAND_BUILTIN_VA_START
350 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
351 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
352 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
353
354 #undef TARGET_SCALAR_MODE_SUPPORTED_P
355 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
356
357 #undef TARGET_CANNOT_FORCE_CONST_MEM
358 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
359
360 #undef TARGET_SECONDARY_RELOAD
361 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
362
363 #undef TARGET_EXTRA_LIVE_ON_ENTRY
364 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
365
366 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
367 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
368 #undef TARGET_TRAMPOLINE_INIT
369 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
370 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
371 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
372 #undef TARGET_DELEGITIMIZE_ADDRESS
373 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
374 #undef TARGET_INTERNAL_ARG_POINTER
375 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
376 #undef TARGET_CAN_ELIMINATE
377 #define TARGET_CAN_ELIMINATE pa_can_eliminate
378 #undef TARGET_CONDITIONAL_REGISTER_USAGE
379 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
380 #undef TARGET_C_MODE_FOR_SUFFIX
381 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
382 #undef TARGET_ASM_FUNCTION_SECTION
383 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
384
385 #undef TARGET_LEGITIMATE_CONSTANT_P
386 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
387 #undef TARGET_SECTION_TYPE_FLAGS
388 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
389
390 struct gcc_target targetm = TARGET_INITIALIZER;
391 \f
392 /* Parse the -mfixed-range= option string. */
393
394 static void
395 fix_range (const char *const_str)
396 {
397 int i, first, last;
398 char *str, *dash, *comma;
399
400 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
401 REG2 are either register names or register numbers. The effect
402 of this option is to mark the registers in the range from REG1 to
403 REG2 as ``fixed'' so they won't be used by the compiler. This is
404 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
405
406 i = strlen (const_str);
407 str = (char *) alloca (i + 1);
408 memcpy (str, const_str, i + 1);
409
410 while (1)
411 {
412 dash = strchr (str, '-');
413 if (!dash)
414 {
415 warning (0, "value of -mfixed-range must have form REG1-REG2");
416 return;
417 }
418 *dash = '\0';
419
420 comma = strchr (dash + 1, ',');
421 if (comma)
422 *comma = '\0';
423
424 first = decode_reg_name (str);
425 if (first < 0)
426 {
427 warning (0, "unknown register name: %s", str);
428 return;
429 }
430
431 last = decode_reg_name (dash + 1);
432 if (last < 0)
433 {
434 warning (0, "unknown register name: %s", dash + 1);
435 return;
436 }
437
438 *dash = '-';
439
440 if (first > last)
441 {
442 warning (0, "%s-%s is an empty range", str, dash + 1);
443 return;
444 }
445
446 for (i = first; i <= last; ++i)
447 fixed_regs[i] = call_used_regs[i] = 1;
448
449 if (!comma)
450 break;
451
452 *comma = ',';
453 str = comma + 1;
454 }
455
456 /* Check if all floating point registers have been fixed. */
457 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
458 if (!fixed_regs[i])
459 break;
460
461 if (i > FP_REG_LAST)
462 target_flags |= MASK_DISABLE_FPREGS;
463 }
464
465 /* Implement the TARGET_OPTION_OVERRIDE hook. */
466
467 static void
468 pa_option_override (void)
469 {
470 unsigned int i;
471 cl_deferred_option *opt;
472 VEC(cl_deferred_option,heap) *vec
473 = (VEC(cl_deferred_option,heap) *) pa_deferred_options;
474
475 FOR_EACH_VEC_ELT (cl_deferred_option, vec, i, opt)
476 {
477 switch (opt->opt_index)
478 {
479 case OPT_mfixed_range_:
480 fix_range (opt->arg);
481 break;
482
483 default:
484 gcc_unreachable ();
485 }
486 }
487
488 /* Unconditional branches in the delay slot are not compatible with dwarf2
489 call frame information. There is no benefit in using this optimization
490 on PA8000 and later processors. */
491 if (pa_cpu >= PROCESSOR_8000
492 || (targetm_common.except_unwind_info (&global_options) == UI_DWARF2
493 && flag_exceptions)
494 || flag_unwind_tables)
495 target_flags &= ~MASK_JUMP_IN_DELAY;
496
497 if (flag_pic && TARGET_PORTABLE_RUNTIME)
498 {
499 warning (0, "PIC code generation is not supported in the portable runtime model");
500 }
501
502 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
503 {
504 warning (0, "PIC code generation is not compatible with fast indirect calls");
505 }
506
507 if (! TARGET_GAS && write_symbols != NO_DEBUG)
508 {
509 warning (0, "-g is only supported when using GAS on this processor,");
510 warning (0, "-g option disabled");
511 write_symbols = NO_DEBUG;
512 }
513
514 /* We only support the "big PIC" model now. And we always generate PIC
515 code when in 64bit mode. */
516 if (flag_pic == 1 || TARGET_64BIT)
517 flag_pic = 2;
518
519 /* Disable -freorder-blocks-and-partition as we don't support hot and
520 cold partitioning. */
521 if (flag_reorder_blocks_and_partition)
522 {
523 inform (input_location,
524 "-freorder-blocks-and-partition does not work "
525 "on this architecture");
526 flag_reorder_blocks_and_partition = 0;
527 flag_reorder_blocks = 1;
528 }
529
530 /* We can't guarantee that .dword is available for 32-bit targets. */
531 if (UNITS_PER_WORD == 4)
532 targetm.asm_out.aligned_op.di = NULL;
533
534 /* The unaligned ops are only available when using GAS. */
535 if (!TARGET_GAS)
536 {
537 targetm.asm_out.unaligned_op.hi = NULL;
538 targetm.asm_out.unaligned_op.si = NULL;
539 targetm.asm_out.unaligned_op.di = NULL;
540 }
541
542 init_machine_status = pa_init_machine_status;
543 }
544
545 enum pa_builtins
546 {
547 PA_BUILTIN_COPYSIGNQ,
548 PA_BUILTIN_FABSQ,
549 PA_BUILTIN_INFQ,
550 PA_BUILTIN_HUGE_VALQ,
551 PA_BUILTIN_max
552 };
553
554 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
555
556 static void
557 pa_init_builtins (void)
558 {
559 #ifdef DONT_HAVE_FPUTC_UNLOCKED
560 {
561 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
562 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
563 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
564 }
565 #endif
566 #if TARGET_HPUX_11
567 {
568 tree decl;
569
570 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
571 set_user_assembler_name (decl, "_Isfinite");
572 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
573 set_user_assembler_name (decl, "_Isfinitef");
574 }
575 #endif
576
577 if (HPUX_LONG_DOUBLE_LIBRARY)
578 {
579 tree decl, ftype;
580
581 /* Under HPUX, the __float128 type is a synonym for "long double". */
582 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
583 "__float128");
584
585 /* TFmode support builtins. */
586 ftype = build_function_type_list (long_double_type_node,
587 long_double_type_node,
588 NULL_TREE);
589 decl = add_builtin_function ("__builtin_fabsq", ftype,
590 PA_BUILTIN_FABSQ, BUILT_IN_MD,
591 "_U_Qfabs", NULL_TREE);
592 TREE_READONLY (decl) = 1;
593 pa_builtins[PA_BUILTIN_FABSQ] = decl;
594
595 ftype = build_function_type_list (long_double_type_node,
596 long_double_type_node,
597 long_double_type_node,
598 NULL_TREE);
599 decl = add_builtin_function ("__builtin_copysignq", ftype,
600 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
601 "_U_Qfcopysign", NULL_TREE);
602 TREE_READONLY (decl) = 1;
603 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
604
605 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
606 decl = add_builtin_function ("__builtin_infq", ftype,
607 PA_BUILTIN_INFQ, BUILT_IN_MD,
608 NULL, NULL_TREE);
609 pa_builtins[PA_BUILTIN_INFQ] = decl;
610
611 decl = add_builtin_function ("__builtin_huge_valq", ftype,
612 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
613 NULL, NULL_TREE);
614 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
615 }
616 }
617
618 static rtx
619 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
620 enum machine_mode mode ATTRIBUTE_UNUSED,
621 int ignore ATTRIBUTE_UNUSED)
622 {
623 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
624 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
625
626 switch (fcode)
627 {
628 case PA_BUILTIN_FABSQ:
629 case PA_BUILTIN_COPYSIGNQ:
630 return expand_call (exp, target, ignore);
631
632 case PA_BUILTIN_INFQ:
633 case PA_BUILTIN_HUGE_VALQ:
634 {
635 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
636 REAL_VALUE_TYPE inf;
637 rtx tmp;
638
639 real_inf (&inf);
640 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
641
642 tmp = validize_mem (force_const_mem (target_mode, tmp));
643
644 if (target == 0)
645 target = gen_reg_rtx (target_mode);
646
647 emit_move_insn (target, tmp);
648 return target;
649 }
650
651 default:
652 gcc_unreachable ();
653 }
654
655 return NULL_RTX;
656 }
657
658 /* Function to init struct machine_function.
659 This will be called, via a pointer variable,
660 from push_function_context. */
661
662 static struct machine_function *
663 pa_init_machine_status (void)
664 {
665 return ggc_alloc_cleared_machine_function ();
666 }
667
668 /* If FROM is a probable pointer register, mark TO as a probable
669 pointer register with the same pointer alignment as FROM. */
670
671 static void
672 copy_reg_pointer (rtx to, rtx from)
673 {
674 if (REG_POINTER (from))
675 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
676 }
677
678 /* Return 1 if X contains a symbolic expression. We know these
679 expressions will have one of a few well defined forms, so
680 we need only check those forms. */
681 int
682 pa_symbolic_expression_p (rtx x)
683 {
684
685 /* Strip off any HIGH. */
686 if (GET_CODE (x) == HIGH)
687 x = XEXP (x, 0);
688
689 return (symbolic_operand (x, VOIDmode));
690 }
691
692 /* Accept any constant that can be moved in one instruction into a
693 general register. */
694 int
695 pa_cint_ok_for_move (HOST_WIDE_INT ival)
696 {
697 /* OK if ldo, ldil, or zdepi, can be used. */
698 return (VAL_14_BITS_P (ival)
699 || pa_ldil_cint_p (ival)
700 || pa_zdepi_cint_p (ival));
701 }
702 \f
703 /* True iff ldil can be used to load this CONST_INT. The least
704 significant 11 bits of the value must be zero and the value must
705 not change sign when extended from 32 to 64 bits. */
706 int
707 pa_ldil_cint_p (HOST_WIDE_INT ival)
708 {
709 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
710
711 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
712 }
713
714 /* True iff zdepi can be used to generate this CONST_INT.
715 zdepi first sign extends a 5-bit signed number to a given field
716 length, then places this field anywhere in a zero. */
717 int
718 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
719 {
720 unsigned HOST_WIDE_INT lsb_mask, t;
721
722 /* This might not be obvious, but it's at least fast.
723 This function is critical; we don't have the time loops would take. */
724 lsb_mask = x & -x;
725 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
726 /* Return true iff t is a power of two. */
727 return ((t & (t - 1)) == 0);
728 }
729
730 /* True iff depi or extru can be used to compute (reg & mask).
731 Accept bit pattern like these:
732 0....01....1
733 1....10....0
734 1..10..01..1 */
735 int
736 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
737 {
738 mask = ~mask;
739 mask += mask & -mask;
740 return (mask & (mask - 1)) == 0;
741 }
742
743 /* True iff depi can be used to compute (reg | MASK). */
744 int
745 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
746 {
747 mask += mask & -mask;
748 return (mask & (mask - 1)) == 0;
749 }
750 \f
751 /* Legitimize PIC addresses. If the address is already
752 position-independent, we return ORIG. Newly generated
753 position-independent addresses go to REG. If we need more
754 than one register, we lose. */
755
756 static rtx
757 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
758 {
759 rtx pic_ref = orig;
760
761 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
762
763 /* Labels need special handling. */
764 if (pic_label_operand (orig, mode))
765 {
766 rtx insn;
767
768 /* We do not want to go through the movXX expanders here since that
769 would create recursion.
770
771 Nor do we really want to call a generator for a named pattern
772 since that requires multiple patterns if we want to support
773 multiple word sizes.
774
775 So instead we just emit the raw set, which avoids the movXX
776 expanders completely. */
777 mark_reg_pointer (reg, BITS_PER_UNIT);
778 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
779
780 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
781 add_reg_note (insn, REG_EQUAL, orig);
782
783 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
784 and update LABEL_NUSES because this is not done automatically. */
785 if (reload_in_progress || reload_completed)
786 {
787 /* Extract LABEL_REF. */
788 if (GET_CODE (orig) == CONST)
789 orig = XEXP (XEXP (orig, 0), 0);
790 /* Extract CODE_LABEL. */
791 orig = XEXP (orig, 0);
792 add_reg_note (insn, REG_LABEL_OPERAND, orig);
793 LABEL_NUSES (orig)++;
794 }
795 crtl->uses_pic_offset_table = 1;
796 return reg;
797 }
798 if (GET_CODE (orig) == SYMBOL_REF)
799 {
800 rtx insn, tmp_reg;
801
802 gcc_assert (reg);
803
804 /* Before reload, allocate a temporary register for the intermediate
805 result. This allows the sequence to be deleted when the final
806 result is unused and the insns are trivially dead. */
807 tmp_reg = ((reload_in_progress || reload_completed)
808 ? reg : gen_reg_rtx (Pmode));
809
810 if (function_label_operand (orig, VOIDmode))
811 {
812 /* Force function label into memory in word mode. */
813 orig = XEXP (force_const_mem (word_mode, orig), 0);
814 /* Load plabel address from DLT. */
815 emit_move_insn (tmp_reg,
816 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
817 gen_rtx_HIGH (word_mode, orig)));
818 pic_ref
819 = gen_const_mem (Pmode,
820 gen_rtx_LO_SUM (Pmode, tmp_reg,
821 gen_rtx_UNSPEC (Pmode,
822 gen_rtvec (1, orig),
823 UNSPEC_DLTIND14R)));
824 emit_move_insn (reg, pic_ref);
825 /* Now load address of function descriptor. */
826 pic_ref = gen_rtx_MEM (Pmode, reg);
827 }
828 else
829 {
830 /* Load symbol reference from DLT. */
831 emit_move_insn (tmp_reg,
832 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
833 gen_rtx_HIGH (word_mode, orig)));
834 pic_ref
835 = gen_const_mem (Pmode,
836 gen_rtx_LO_SUM (Pmode, tmp_reg,
837 gen_rtx_UNSPEC (Pmode,
838 gen_rtvec (1, orig),
839 UNSPEC_DLTIND14R)));
840 }
841
842 crtl->uses_pic_offset_table = 1;
843 mark_reg_pointer (reg, BITS_PER_UNIT);
844 insn = emit_move_insn (reg, pic_ref);
845
846 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
847 set_unique_reg_note (insn, REG_EQUAL, orig);
848
849 return reg;
850 }
851 else if (GET_CODE (orig) == CONST)
852 {
853 rtx base;
854
855 if (GET_CODE (XEXP (orig, 0)) == PLUS
856 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
857 return orig;
858
859 gcc_assert (reg);
860 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
861
862 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
863 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
864 base == reg ? 0 : reg);
865
866 if (GET_CODE (orig) == CONST_INT)
867 {
868 if (INT_14_BITS (orig))
869 return plus_constant (Pmode, base, INTVAL (orig));
870 orig = force_reg (Pmode, orig);
871 }
872 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
873 /* Likewise, should we set special REG_NOTEs here? */
874 }
875
876 return pic_ref;
877 }
878
879 static GTY(()) rtx gen_tls_tga;
880
881 static rtx
882 gen_tls_get_addr (void)
883 {
884 if (!gen_tls_tga)
885 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
886 return gen_tls_tga;
887 }
888
889 static rtx
890 hppa_tls_call (rtx arg)
891 {
892 rtx ret;
893
894 ret = gen_reg_rtx (Pmode);
895 emit_library_call_value (gen_tls_get_addr (), ret,
896 LCT_CONST, Pmode, 1, arg, Pmode);
897
898 return ret;
899 }
900
901 static rtx
902 legitimize_tls_address (rtx addr)
903 {
904 rtx ret, insn, tmp, t1, t2, tp;
905 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
906
907 switch (model)
908 {
909 case TLS_MODEL_GLOBAL_DYNAMIC:
910 tmp = gen_reg_rtx (Pmode);
911 if (flag_pic)
912 emit_insn (gen_tgd_load_pic (tmp, addr));
913 else
914 emit_insn (gen_tgd_load (tmp, addr));
915 ret = hppa_tls_call (tmp);
916 break;
917
918 case TLS_MODEL_LOCAL_DYNAMIC:
919 ret = gen_reg_rtx (Pmode);
920 tmp = gen_reg_rtx (Pmode);
921 start_sequence ();
922 if (flag_pic)
923 emit_insn (gen_tld_load_pic (tmp, addr));
924 else
925 emit_insn (gen_tld_load (tmp, addr));
926 t1 = hppa_tls_call (tmp);
927 insn = get_insns ();
928 end_sequence ();
929 t2 = gen_reg_rtx (Pmode);
930 emit_libcall_block (insn, t2, t1,
931 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
932 UNSPEC_TLSLDBASE));
933 emit_insn (gen_tld_offset_load (ret, addr, t2));
934 break;
935
936 case TLS_MODEL_INITIAL_EXEC:
937 tp = gen_reg_rtx (Pmode);
938 tmp = gen_reg_rtx (Pmode);
939 ret = gen_reg_rtx (Pmode);
940 emit_insn (gen_tp_load (tp));
941 if (flag_pic)
942 emit_insn (gen_tie_load_pic (tmp, addr));
943 else
944 emit_insn (gen_tie_load (tmp, addr));
945 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
946 break;
947
948 case TLS_MODEL_LOCAL_EXEC:
949 tp = gen_reg_rtx (Pmode);
950 ret = gen_reg_rtx (Pmode);
951 emit_insn (gen_tp_load (tp));
952 emit_insn (gen_tle_load (ret, addr, tp));
953 break;
954
955 default:
956 gcc_unreachable ();
957 }
958
959 return ret;
960 }
961
962 /* Try machine-dependent ways of modifying an illegitimate address
963 to be legitimate. If we find one, return the new, valid address.
964 This macro is used in only one place: `memory_address' in explow.c.
965
966 OLDX is the address as it was before break_out_memory_refs was called.
967 In some cases it is useful to look at this to decide what needs to be done.
968
969 It is always safe for this macro to do nothing. It exists to recognize
970 opportunities to optimize the output.
971
972 For the PA, transform:
973
974 memory(X + <large int>)
975
976 into:
977
978 if (<large int> & mask) >= 16
979 Y = (<large int> & ~mask) + mask + 1 Round up.
980 else
981 Y = (<large int> & ~mask) Round down.
982 Z = X + Y
983 memory (Z + (<large int> - Y));
984
985 This is for CSE to find several similar references, and only use one Z.
986
987 X can either be a SYMBOL_REF or REG, but because combine cannot
988 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
989 D will not fit in 14 bits.
990
991 MODE_FLOAT references allow displacements which fit in 5 bits, so use
992 0x1f as the mask.
993
994 MODE_INT references allow displacements which fit in 14 bits, so use
995 0x3fff as the mask.
996
997 This relies on the fact that most mode MODE_FLOAT references will use FP
998 registers and most mode MODE_INT references will use integer registers.
999 (In the rare case of an FP register used in an integer MODE, we depend
1000 on secondary reloads to clean things up.)
1001
1002
1003 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1004 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1005 addressing modes to be used).
1006
1007 Put X and Z into registers. Then put the entire expression into
1008 a register. */
1009
1010 rtx
1011 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1012 enum machine_mode mode)
1013 {
1014 rtx orig = x;
1015
1016 /* We need to canonicalize the order of operands in unscaled indexed
1017 addresses since the code that checks if an address is valid doesn't
1018 always try both orders. */
1019 if (!TARGET_NO_SPACE_REGS
1020 && GET_CODE (x) == PLUS
1021 && GET_MODE (x) == Pmode
1022 && REG_P (XEXP (x, 0))
1023 && REG_P (XEXP (x, 1))
1024 && REG_POINTER (XEXP (x, 0))
1025 && !REG_POINTER (XEXP (x, 1)))
1026 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1027
1028 if (PA_SYMBOL_REF_TLS_P (x))
1029 return legitimize_tls_address (x);
1030 else if (flag_pic)
1031 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1032
1033 /* Strip off CONST. */
1034 if (GET_CODE (x) == CONST)
1035 x = XEXP (x, 0);
1036
1037 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1038 That should always be safe. */
1039 if (GET_CODE (x) == PLUS
1040 && GET_CODE (XEXP (x, 0)) == REG
1041 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1042 {
1043 rtx reg = force_reg (Pmode, XEXP (x, 1));
1044 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1045 }
1046
1047 /* Note we must reject symbols which represent function addresses
1048 since the assembler/linker can't handle arithmetic on plabels. */
1049 if (GET_CODE (x) == PLUS
1050 && GET_CODE (XEXP (x, 1)) == CONST_INT
1051 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1052 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1053 || GET_CODE (XEXP (x, 0)) == REG))
1054 {
1055 rtx int_part, ptr_reg;
1056 int newoffset;
1057 int offset = INTVAL (XEXP (x, 1));
1058 int mask;
1059
1060 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1061 ? (INT14_OK_STRICT ? 0x3fff : 0x1f) : 0x3fff);
1062
1063 /* Choose which way to round the offset. Round up if we
1064 are >= halfway to the next boundary. */
1065 if ((offset & mask) >= ((mask + 1) / 2))
1066 newoffset = (offset & ~ mask) + mask + 1;
1067 else
1068 newoffset = (offset & ~ mask);
1069
1070 /* If the newoffset will not fit in 14 bits (ldo), then
1071 handling this would take 4 or 5 instructions (2 to load
1072 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1073 add the new offset and the SYMBOL_REF.) Combine can
1074 not handle 4->2 or 5->2 combinations, so do not create
1075 them. */
1076 if (! VAL_14_BITS_P (newoffset)
1077 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1078 {
1079 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1080 rtx tmp_reg
1081 = force_reg (Pmode,
1082 gen_rtx_HIGH (Pmode, const_part));
1083 ptr_reg
1084 = force_reg (Pmode,
1085 gen_rtx_LO_SUM (Pmode,
1086 tmp_reg, const_part));
1087 }
1088 else
1089 {
1090 if (! VAL_14_BITS_P (newoffset))
1091 int_part = force_reg (Pmode, GEN_INT (newoffset));
1092 else
1093 int_part = GEN_INT (newoffset);
1094
1095 ptr_reg = force_reg (Pmode,
1096 gen_rtx_PLUS (Pmode,
1097 force_reg (Pmode, XEXP (x, 0)),
1098 int_part));
1099 }
1100 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1101 }
1102
1103 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1104
1105 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1106 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1107 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1108 && (OBJECT_P (XEXP (x, 1))
1109 || GET_CODE (XEXP (x, 1)) == SUBREG)
1110 && GET_CODE (XEXP (x, 1)) != CONST)
1111 {
1112 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1113 rtx reg1, reg2;
1114
1115 reg1 = XEXP (x, 1);
1116 if (GET_CODE (reg1) != REG)
1117 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1118
1119 reg2 = XEXP (XEXP (x, 0), 0);
1120 if (GET_CODE (reg2) != REG)
1121 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1122
1123 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1124 gen_rtx_MULT (Pmode,
1125 reg2,
1126 GEN_INT (val)),
1127 reg1));
1128 }
1129
1130 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1131
1132 Only do so for floating point modes since this is more speculative
1133 and we lose if it's an integer store. */
1134 if (GET_CODE (x) == PLUS
1135 && GET_CODE (XEXP (x, 0)) == PLUS
1136 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1137 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1138 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1139 && (mode == SFmode || mode == DFmode))
1140 {
1141
1142 /* First, try and figure out what to use as a base register. */
1143 rtx reg1, reg2, base, idx;
1144
1145 reg1 = XEXP (XEXP (x, 0), 1);
1146 reg2 = XEXP (x, 1);
1147 base = NULL_RTX;
1148 idx = NULL_RTX;
1149
1150 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1151 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1152 it's a base register below. */
1153 if (GET_CODE (reg1) != REG)
1154 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1155
1156 if (GET_CODE (reg2) != REG)
1157 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1158
1159 /* Figure out what the base and index are. */
1160
1161 if (GET_CODE (reg1) == REG
1162 && REG_POINTER (reg1))
1163 {
1164 base = reg1;
1165 idx = gen_rtx_PLUS (Pmode,
1166 gen_rtx_MULT (Pmode,
1167 XEXP (XEXP (XEXP (x, 0), 0), 0),
1168 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1169 XEXP (x, 1));
1170 }
1171 else if (GET_CODE (reg2) == REG
1172 && REG_POINTER (reg2))
1173 {
1174 base = reg2;
1175 idx = XEXP (x, 0);
1176 }
1177
1178 if (base == 0)
1179 return orig;
1180
1181 /* If the index adds a large constant, try to scale the
1182 constant so that it can be loaded with only one insn. */
1183 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1184 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1185 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1186 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1187 {
1188 /* Divide the CONST_INT by the scale factor, then add it to A. */
1189 int val = INTVAL (XEXP (idx, 1));
1190
1191 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1192 reg1 = XEXP (XEXP (idx, 0), 0);
1193 if (GET_CODE (reg1) != REG)
1194 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1195
1196 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1197
1198 /* We can now generate a simple scaled indexed address. */
1199 return
1200 force_reg
1201 (Pmode, gen_rtx_PLUS (Pmode,
1202 gen_rtx_MULT (Pmode, reg1,
1203 XEXP (XEXP (idx, 0), 1)),
1204 base));
1205 }
1206
1207 /* If B + C is still a valid base register, then add them. */
1208 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1209 && INTVAL (XEXP (idx, 1)) <= 4096
1210 && INTVAL (XEXP (idx, 1)) >= -4096)
1211 {
1212 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1213 rtx reg1, reg2;
1214
1215 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1216
1217 reg2 = XEXP (XEXP (idx, 0), 0);
1218 if (GET_CODE (reg2) != CONST_INT)
1219 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1220
1221 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1222 gen_rtx_MULT (Pmode,
1223 reg2,
1224 GEN_INT (val)),
1225 reg1));
1226 }
1227
1228 /* Get the index into a register, then add the base + index and
1229 return a register holding the result. */
1230
1231 /* First get A into a register. */
1232 reg1 = XEXP (XEXP (idx, 0), 0);
1233 if (GET_CODE (reg1) != REG)
1234 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1235
1236 /* And get B into a register. */
1237 reg2 = XEXP (idx, 1);
1238 if (GET_CODE (reg2) != REG)
1239 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1240
1241 reg1 = force_reg (Pmode,
1242 gen_rtx_PLUS (Pmode,
1243 gen_rtx_MULT (Pmode, reg1,
1244 XEXP (XEXP (idx, 0), 1)),
1245 reg2));
1246
1247 /* Add the result to our base register and return. */
1248 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1249
1250 }
1251
1252 /* Uh-oh. We might have an address for x[n-100000]. This needs
1253 special handling to avoid creating an indexed memory address
1254 with x-100000 as the base.
1255
1256 If the constant part is small enough, then it's still safe because
1257 there is a guard page at the beginning and end of the data segment.
1258
1259 Scaled references are common enough that we want to try and rearrange the
1260 terms so that we can use indexing for these addresses too. Only
1261 do the optimization for floatint point modes. */
1262
1263 if (GET_CODE (x) == PLUS
1264 && pa_symbolic_expression_p (XEXP (x, 1)))
1265 {
1266 /* Ugly. We modify things here so that the address offset specified
1267 by the index expression is computed first, then added to x to form
1268 the entire address. */
1269
1270 rtx regx1, regx2, regy1, regy2, y;
1271
1272 /* Strip off any CONST. */
1273 y = XEXP (x, 1);
1274 if (GET_CODE (y) == CONST)
1275 y = XEXP (y, 0);
1276
1277 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1278 {
1279 /* See if this looks like
1280 (plus (mult (reg) (shadd_const))
1281 (const (plus (symbol_ref) (const_int))))
1282
1283 Where const_int is small. In that case the const
1284 expression is a valid pointer for indexing.
1285
1286 If const_int is big, but can be divided evenly by shadd_const
1287 and added to (reg). This allows more scaled indexed addresses. */
1288 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1289 && GET_CODE (XEXP (x, 0)) == MULT
1290 && GET_CODE (XEXP (y, 1)) == CONST_INT
1291 && INTVAL (XEXP (y, 1)) >= -4096
1292 && INTVAL (XEXP (y, 1)) <= 4095
1293 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1294 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1295 {
1296 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1297 rtx reg1, reg2;
1298
1299 reg1 = XEXP (x, 1);
1300 if (GET_CODE (reg1) != REG)
1301 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1302
1303 reg2 = XEXP (XEXP (x, 0), 0);
1304 if (GET_CODE (reg2) != REG)
1305 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1306
1307 return force_reg (Pmode,
1308 gen_rtx_PLUS (Pmode,
1309 gen_rtx_MULT (Pmode,
1310 reg2,
1311 GEN_INT (val)),
1312 reg1));
1313 }
1314 else if ((mode == DFmode || mode == SFmode)
1315 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1316 && GET_CODE (XEXP (x, 0)) == MULT
1317 && GET_CODE (XEXP (y, 1)) == CONST_INT
1318 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1319 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1320 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1321 {
1322 regx1
1323 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1324 / INTVAL (XEXP (XEXP (x, 0), 1))));
1325 regx2 = XEXP (XEXP (x, 0), 0);
1326 if (GET_CODE (regx2) != REG)
1327 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1328 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1329 regx2, regx1));
1330 return
1331 force_reg (Pmode,
1332 gen_rtx_PLUS (Pmode,
1333 gen_rtx_MULT (Pmode, regx2,
1334 XEXP (XEXP (x, 0), 1)),
1335 force_reg (Pmode, XEXP (y, 0))));
1336 }
1337 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1338 && INTVAL (XEXP (y, 1)) >= -4096
1339 && INTVAL (XEXP (y, 1)) <= 4095)
1340 {
1341 /* This is safe because of the guard page at the
1342 beginning and end of the data space. Just
1343 return the original address. */
1344 return orig;
1345 }
1346 else
1347 {
1348 /* Doesn't look like one we can optimize. */
1349 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1350 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1351 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1352 regx1 = force_reg (Pmode,
1353 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1354 regx1, regy2));
1355 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1356 }
1357 }
1358 }
1359
1360 return orig;
1361 }
1362
1363 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1364
1365 Compute extra cost of moving data between one register class
1366 and another.
1367
1368 Make moves from SAR so expensive they should never happen. We used to
1369 have 0xffff here, but that generates overflow in rare cases.
1370
1371 Copies involving a FP register and a non-FP register are relatively
1372 expensive because they must go through memory.
1373
1374 Other copies are reasonably cheap. */
1375
1376 static int
1377 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
1378 reg_class_t from, reg_class_t to)
1379 {
1380 if (from == SHIFT_REGS)
1381 return 0x100;
1382 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1383 return 18;
1384 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1385 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1386 return 16;
1387 else
1388 return 2;
1389 }
1390
1391 /* For the HPPA, REG and REG+CONST is cost 0
1392 and addresses involving symbolic constants are cost 2.
1393
1394 PIC addresses are very expensive.
1395
1396 It is no coincidence that this has the same structure
1397 as GO_IF_LEGITIMATE_ADDRESS. */
1398
1399 static int
1400 hppa_address_cost (rtx X, enum machine_mode mode ATTRIBUTE_UNUSED,
1401 addr_space_t as ATTRIBUTE_UNUSED,
1402 bool speed ATTRIBUTE_UNUSED)
1403 {
1404 switch (GET_CODE (X))
1405 {
1406 case REG:
1407 case PLUS:
1408 case LO_SUM:
1409 return 1;
1410 case HIGH:
1411 return 2;
1412 default:
1413 return 4;
1414 }
1415 }
1416
1417 /* Compute a (partial) cost for rtx X. Return true if the complete
1418 cost has been computed, and false if subexpressions should be
1419 scanned. In either case, *TOTAL contains the cost result. */
1420
1421 static bool
1422 hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
1423 int *total, bool speed ATTRIBUTE_UNUSED)
1424 {
1425 switch (code)
1426 {
1427 case CONST_INT:
1428 if (INTVAL (x) == 0)
1429 *total = 0;
1430 else if (INT_14_BITS (x))
1431 *total = 1;
1432 else
1433 *total = 2;
1434 return true;
1435
1436 case HIGH:
1437 *total = 2;
1438 return true;
1439
1440 case CONST:
1441 case LABEL_REF:
1442 case SYMBOL_REF:
1443 *total = 4;
1444 return true;
1445
1446 case CONST_DOUBLE:
1447 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1448 && outer_code != SET)
1449 *total = 0;
1450 else
1451 *total = 8;
1452 return true;
1453
1454 case MULT:
1455 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1456 *total = COSTS_N_INSNS (3);
1457 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1458 *total = COSTS_N_INSNS (8);
1459 else
1460 *total = COSTS_N_INSNS (20);
1461 return true;
1462
1463 case DIV:
1464 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1465 {
1466 *total = COSTS_N_INSNS (14);
1467 return true;
1468 }
1469 /* FALLTHRU */
1470
1471 case UDIV:
1472 case MOD:
1473 case UMOD:
1474 *total = COSTS_N_INSNS (60);
1475 return true;
1476
1477 case PLUS: /* this includes shNadd insns */
1478 case MINUS:
1479 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1480 *total = COSTS_N_INSNS (3);
1481 else
1482 *total = COSTS_N_INSNS (1);
1483 return true;
1484
1485 case ASHIFT:
1486 case ASHIFTRT:
1487 case LSHIFTRT:
1488 *total = COSTS_N_INSNS (1);
1489 return true;
1490
1491 default:
1492 return false;
1493 }
1494 }
1495
1496 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1497 new rtx with the correct mode. */
1498 static inline rtx
1499 force_mode (enum machine_mode mode, rtx orig)
1500 {
1501 if (mode == GET_MODE (orig))
1502 return orig;
1503
1504 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1505
1506 return gen_rtx_REG (mode, REGNO (orig));
1507 }
1508
1509 /* Return 1 if *X is a thread-local symbol. */
1510
1511 static int
1512 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1513 {
1514 return PA_SYMBOL_REF_TLS_P (*x);
1515 }
1516
1517 /* Return 1 if X contains a thread-local symbol. */
1518
1519 bool
1520 pa_tls_referenced_p (rtx x)
1521 {
1522 if (!TARGET_HAVE_TLS)
1523 return false;
1524
1525 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1526 }
1527
1528 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1529
1530 static bool
1531 pa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1532 {
1533 return pa_tls_referenced_p (x);
1534 }
1535
1536 /* Emit insns to move operands[1] into operands[0].
1537
1538 Return 1 if we have written out everything that needs to be done to
1539 do the move. Otherwise, return 0 and the caller will emit the move
1540 normally.
1541
1542 Note SCRATCH_REG may not be in the proper mode depending on how it
1543 will be used. This routine is responsible for creating a new copy
1544 of SCRATCH_REG in the proper mode. */
1545
1546 int
1547 pa_emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1548 {
1549 register rtx operand0 = operands[0];
1550 register rtx operand1 = operands[1];
1551 register rtx tem;
1552
1553 /* We can only handle indexed addresses in the destination operand
1554 of floating point stores. Thus, we need to break out indexed
1555 addresses from the destination operand. */
1556 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1557 {
1558 gcc_assert (can_create_pseudo_p ());
1559
1560 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1561 operand0 = replace_equiv_address (operand0, tem);
1562 }
1563
1564 /* On targets with non-equivalent space registers, break out unscaled
1565 indexed addresses from the source operand before the final CSE.
1566 We have to do this because the REG_POINTER flag is not correctly
1567 carried through various optimization passes and CSE may substitute
1568 a pseudo without the pointer set for one with the pointer set. As
1569 a result, we loose various opportunities to create insns with
1570 unscaled indexed addresses. */
1571 if (!TARGET_NO_SPACE_REGS
1572 && !cse_not_expected
1573 && GET_CODE (operand1) == MEM
1574 && GET_CODE (XEXP (operand1, 0)) == PLUS
1575 && REG_P (XEXP (XEXP (operand1, 0), 0))
1576 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1577 operand1
1578 = replace_equiv_address (operand1,
1579 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1580
1581 if (scratch_reg
1582 && reload_in_progress && GET_CODE (operand0) == REG
1583 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1584 operand0 = reg_equiv_mem (REGNO (operand0));
1585 else if (scratch_reg
1586 && reload_in_progress && GET_CODE (operand0) == SUBREG
1587 && GET_CODE (SUBREG_REG (operand0)) == REG
1588 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1589 {
1590 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1591 the code which tracks sets/uses for delete_output_reload. */
1592 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1593 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1594 SUBREG_BYTE (operand0));
1595 operand0 = alter_subreg (&temp);
1596 }
1597
1598 if (scratch_reg
1599 && reload_in_progress && GET_CODE (operand1) == REG
1600 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1601 operand1 = reg_equiv_mem (REGNO (operand1));
1602 else if (scratch_reg
1603 && reload_in_progress && GET_CODE (operand1) == SUBREG
1604 && GET_CODE (SUBREG_REG (operand1)) == REG
1605 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1606 {
1607 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1608 the code which tracks sets/uses for delete_output_reload. */
1609 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1610 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1611 SUBREG_BYTE (operand1));
1612 operand1 = alter_subreg (&temp);
1613 }
1614
1615 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1616 && ((tem = find_replacement (&XEXP (operand0, 0)))
1617 != XEXP (operand0, 0)))
1618 operand0 = replace_equiv_address (operand0, tem);
1619
1620 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1621 && ((tem = find_replacement (&XEXP (operand1, 0)))
1622 != XEXP (operand1, 0)))
1623 operand1 = replace_equiv_address (operand1, tem);
1624
1625 /* Handle secondary reloads for loads/stores of FP registers from
1626 REG+D addresses where D does not fit in 5 or 14 bits, including
1627 (subreg (mem (addr))) cases. */
1628 if (scratch_reg
1629 && fp_reg_operand (operand0, mode)
1630 && ((GET_CODE (operand1) == MEM
1631 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1632 XEXP (operand1, 0)))
1633 || ((GET_CODE (operand1) == SUBREG
1634 && GET_CODE (XEXP (operand1, 0)) == MEM
1635 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1636 ? SFmode : DFmode),
1637 XEXP (XEXP (operand1, 0), 0))))))
1638 {
1639 if (GET_CODE (operand1) == SUBREG)
1640 operand1 = XEXP (operand1, 0);
1641
1642 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1643 it in WORD_MODE regardless of what mode it was originally given
1644 to us. */
1645 scratch_reg = force_mode (word_mode, scratch_reg);
1646
1647 /* D might not fit in 14 bits either; for such cases load D into
1648 scratch reg. */
1649 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1650 {
1651 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1652 emit_move_insn (scratch_reg,
1653 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1654 Pmode,
1655 XEXP (XEXP (operand1, 0), 0),
1656 scratch_reg));
1657 }
1658 else
1659 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1660 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1661 replace_equiv_address (operand1, scratch_reg)));
1662 return 1;
1663 }
1664 else if (scratch_reg
1665 && fp_reg_operand (operand1, mode)
1666 && ((GET_CODE (operand0) == MEM
1667 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1668 ? SFmode : DFmode),
1669 XEXP (operand0, 0)))
1670 || ((GET_CODE (operand0) == SUBREG)
1671 && GET_CODE (XEXP (operand0, 0)) == MEM
1672 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1673 ? SFmode : DFmode),
1674 XEXP (XEXP (operand0, 0), 0)))))
1675 {
1676 if (GET_CODE (operand0) == SUBREG)
1677 operand0 = XEXP (operand0, 0);
1678
1679 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1680 it in WORD_MODE regardless of what mode it was originally given
1681 to us. */
1682 scratch_reg = force_mode (word_mode, scratch_reg);
1683
1684 /* D might not fit in 14 bits either; for such cases load D into
1685 scratch reg. */
1686 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1687 {
1688 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1689 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1690 0)),
1691 Pmode,
1692 XEXP (XEXP (operand0, 0),
1693 0),
1694 scratch_reg));
1695 }
1696 else
1697 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1698 emit_insn (gen_rtx_SET (VOIDmode,
1699 replace_equiv_address (operand0, scratch_reg),
1700 operand1));
1701 return 1;
1702 }
1703 /* Handle secondary reloads for loads of FP registers from constant
1704 expressions by forcing the constant into memory.
1705
1706 Use scratch_reg to hold the address of the memory location.
1707
1708 The proper fix is to change TARGET_PREFERRED_RELOAD_CLASS to return
1709 NO_REGS when presented with a const_int and a register class
1710 containing only FP registers. Doing so unfortunately creates
1711 more problems than it solves. Fix this for 2.5. */
1712 else if (scratch_reg
1713 && CONSTANT_P (operand1)
1714 && fp_reg_operand (operand0, mode))
1715 {
1716 rtx const_mem, xoperands[2];
1717
1718 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1719 it in WORD_MODE regardless of what mode it was originally given
1720 to us. */
1721 scratch_reg = force_mode (word_mode, scratch_reg);
1722
1723 /* Force the constant into memory and put the address of the
1724 memory location into scratch_reg. */
1725 const_mem = force_const_mem (mode, operand1);
1726 xoperands[0] = scratch_reg;
1727 xoperands[1] = XEXP (const_mem, 0);
1728 pa_emit_move_sequence (xoperands, Pmode, 0);
1729
1730 /* Now load the destination register. */
1731 emit_insn (gen_rtx_SET (mode, operand0,
1732 replace_equiv_address (const_mem, scratch_reg)));
1733 return 1;
1734 }
1735 /* Handle secondary reloads for SAR. These occur when trying to load
1736 the SAR from memory or a constant. */
1737 else if (scratch_reg
1738 && GET_CODE (operand0) == REG
1739 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1740 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1741 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1742 {
1743 /* D might not fit in 14 bits either; for such cases load D into
1744 scratch reg. */
1745 if (GET_CODE (operand1) == MEM
1746 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1747 {
1748 /* We are reloading the address into the scratch register, so we
1749 want to make sure the scratch register is a full register. */
1750 scratch_reg = force_mode (word_mode, scratch_reg);
1751
1752 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1753 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1754 0)),
1755 Pmode,
1756 XEXP (XEXP (operand1, 0),
1757 0),
1758 scratch_reg));
1759
1760 /* Now we are going to load the scratch register from memory,
1761 we want to load it in the same width as the original MEM,
1762 which must be the same as the width of the ultimate destination,
1763 OPERAND0. */
1764 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1765
1766 emit_move_insn (scratch_reg,
1767 replace_equiv_address (operand1, scratch_reg));
1768 }
1769 else
1770 {
1771 /* We want to load the scratch register using the same mode as
1772 the ultimate destination. */
1773 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1774
1775 emit_move_insn (scratch_reg, operand1);
1776 }
1777
1778 /* And emit the insn to set the ultimate destination. We know that
1779 the scratch register has the same mode as the destination at this
1780 point. */
1781 emit_move_insn (operand0, scratch_reg);
1782 return 1;
1783 }
1784 /* Handle the most common case: storing into a register. */
1785 else if (register_operand (operand0, mode))
1786 {
1787 /* Legitimize TLS symbol references. This happens for references
1788 that aren't a legitimate constant. */
1789 if (PA_SYMBOL_REF_TLS_P (operand1))
1790 operand1 = legitimize_tls_address (operand1);
1791
1792 if (register_operand (operand1, mode)
1793 || (GET_CODE (operand1) == CONST_INT
1794 && pa_cint_ok_for_move (INTVAL (operand1)))
1795 || (operand1 == CONST0_RTX (mode))
1796 || (GET_CODE (operand1) == HIGH
1797 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1798 /* Only `general_operands' can come here, so MEM is ok. */
1799 || GET_CODE (operand1) == MEM)
1800 {
1801 /* Various sets are created during RTL generation which don't
1802 have the REG_POINTER flag correctly set. After the CSE pass,
1803 instruction recognition can fail if we don't consistently
1804 set this flag when performing register copies. This should
1805 also improve the opportunities for creating insns that use
1806 unscaled indexing. */
1807 if (REG_P (operand0) && REG_P (operand1))
1808 {
1809 if (REG_POINTER (operand1)
1810 && !REG_POINTER (operand0)
1811 && !HARD_REGISTER_P (operand0))
1812 copy_reg_pointer (operand0, operand1);
1813 }
1814
1815 /* When MEMs are broken out, the REG_POINTER flag doesn't
1816 get set. In some cases, we can set the REG_POINTER flag
1817 from the declaration for the MEM. */
1818 if (REG_P (operand0)
1819 && GET_CODE (operand1) == MEM
1820 && !REG_POINTER (operand0))
1821 {
1822 tree decl = MEM_EXPR (operand1);
1823
1824 /* Set the register pointer flag and register alignment
1825 if the declaration for this memory reference is a
1826 pointer type. */
1827 if (decl)
1828 {
1829 tree type;
1830
1831 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1832 tree operand 1. */
1833 if (TREE_CODE (decl) == COMPONENT_REF)
1834 decl = TREE_OPERAND (decl, 1);
1835
1836 type = TREE_TYPE (decl);
1837 type = strip_array_types (type);
1838
1839 if (POINTER_TYPE_P (type))
1840 {
1841 int align;
1842
1843 type = TREE_TYPE (type);
1844 /* Using TYPE_ALIGN_OK is rather conservative as
1845 only the ada frontend actually sets it. */
1846 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1847 : BITS_PER_UNIT);
1848 mark_reg_pointer (operand0, align);
1849 }
1850 }
1851 }
1852
1853 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1854 return 1;
1855 }
1856 }
1857 else if (GET_CODE (operand0) == MEM)
1858 {
1859 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1860 && !(reload_in_progress || reload_completed))
1861 {
1862 rtx temp = gen_reg_rtx (DFmode);
1863
1864 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1865 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1866 return 1;
1867 }
1868 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1869 {
1870 /* Run this case quickly. */
1871 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1872 return 1;
1873 }
1874 if (! (reload_in_progress || reload_completed))
1875 {
1876 operands[0] = validize_mem (operand0);
1877 operands[1] = operand1 = force_reg (mode, operand1);
1878 }
1879 }
1880
1881 /* Simplify the source if we need to.
1882 Note we do have to handle function labels here, even though we do
1883 not consider them legitimate constants. Loop optimizations can
1884 call the emit_move_xxx with one as a source. */
1885 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1886 || function_label_operand (operand1, VOIDmode)
1887 || (GET_CODE (operand1) == HIGH
1888 && symbolic_operand (XEXP (operand1, 0), mode)))
1889 {
1890 int ishighonly = 0;
1891
1892 if (GET_CODE (operand1) == HIGH)
1893 {
1894 ishighonly = 1;
1895 operand1 = XEXP (operand1, 0);
1896 }
1897 if (symbolic_operand (operand1, mode))
1898 {
1899 /* Argh. The assembler and linker can't handle arithmetic
1900 involving plabels.
1901
1902 So we force the plabel into memory, load operand0 from
1903 the memory location, then add in the constant part. */
1904 if ((GET_CODE (operand1) == CONST
1905 && GET_CODE (XEXP (operand1, 0)) == PLUS
1906 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1907 VOIDmode))
1908 || function_label_operand (operand1, VOIDmode))
1909 {
1910 rtx temp, const_part;
1911
1912 /* Figure out what (if any) scratch register to use. */
1913 if (reload_in_progress || reload_completed)
1914 {
1915 scratch_reg = scratch_reg ? scratch_reg : operand0;
1916 /* SCRATCH_REG will hold an address and maybe the actual
1917 data. We want it in WORD_MODE regardless of what mode it
1918 was originally given to us. */
1919 scratch_reg = force_mode (word_mode, scratch_reg);
1920 }
1921 else if (flag_pic)
1922 scratch_reg = gen_reg_rtx (Pmode);
1923
1924 if (GET_CODE (operand1) == CONST)
1925 {
1926 /* Save away the constant part of the expression. */
1927 const_part = XEXP (XEXP (operand1, 0), 1);
1928 gcc_assert (GET_CODE (const_part) == CONST_INT);
1929
1930 /* Force the function label into memory. */
1931 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1932 }
1933 else
1934 {
1935 /* No constant part. */
1936 const_part = NULL_RTX;
1937
1938 /* Force the function label into memory. */
1939 temp = force_const_mem (mode, operand1);
1940 }
1941
1942
1943 /* Get the address of the memory location. PIC-ify it if
1944 necessary. */
1945 temp = XEXP (temp, 0);
1946 if (flag_pic)
1947 temp = legitimize_pic_address (temp, mode, scratch_reg);
1948
1949 /* Put the address of the memory location into our destination
1950 register. */
1951 operands[1] = temp;
1952 pa_emit_move_sequence (operands, mode, scratch_reg);
1953
1954 /* Now load from the memory location into our destination
1955 register. */
1956 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1957 pa_emit_move_sequence (operands, mode, scratch_reg);
1958
1959 /* And add back in the constant part. */
1960 if (const_part != NULL_RTX)
1961 expand_inc (operand0, const_part);
1962
1963 return 1;
1964 }
1965
1966 if (flag_pic)
1967 {
1968 rtx temp;
1969
1970 if (reload_in_progress || reload_completed)
1971 {
1972 temp = scratch_reg ? scratch_reg : operand0;
1973 /* TEMP will hold an address and maybe the actual
1974 data. We want it in WORD_MODE regardless of what mode it
1975 was originally given to us. */
1976 temp = force_mode (word_mode, temp);
1977 }
1978 else
1979 temp = gen_reg_rtx (Pmode);
1980
1981 /* (const (plus (symbol) (const_int))) must be forced to
1982 memory during/after reload if the const_int will not fit
1983 in 14 bits. */
1984 if (GET_CODE (operand1) == CONST
1985 && GET_CODE (XEXP (operand1, 0)) == PLUS
1986 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1987 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1988 && (reload_completed || reload_in_progress)
1989 && flag_pic)
1990 {
1991 rtx const_mem = force_const_mem (mode, operand1);
1992 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
1993 mode, temp);
1994 operands[1] = replace_equiv_address (const_mem, operands[1]);
1995 pa_emit_move_sequence (operands, mode, temp);
1996 }
1997 else
1998 {
1999 operands[1] = legitimize_pic_address (operand1, mode, temp);
2000 if (REG_P (operand0) && REG_P (operands[1]))
2001 copy_reg_pointer (operand0, operands[1]);
2002 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
2003 }
2004 }
2005 /* On the HPPA, references to data space are supposed to use dp,
2006 register 27, but showing it in the RTL inhibits various cse
2007 and loop optimizations. */
2008 else
2009 {
2010 rtx temp, set;
2011
2012 if (reload_in_progress || reload_completed)
2013 {
2014 temp = scratch_reg ? scratch_reg : operand0;
2015 /* TEMP will hold an address and maybe the actual
2016 data. We want it in WORD_MODE regardless of what mode it
2017 was originally given to us. */
2018 temp = force_mode (word_mode, temp);
2019 }
2020 else
2021 temp = gen_reg_rtx (mode);
2022
2023 /* Loading a SYMBOL_REF into a register makes that register
2024 safe to be used as the base in an indexed address.
2025
2026 Don't mark hard registers though. That loses. */
2027 if (GET_CODE (operand0) == REG
2028 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2029 mark_reg_pointer (operand0, BITS_PER_UNIT);
2030 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2031 mark_reg_pointer (temp, BITS_PER_UNIT);
2032
2033 if (ishighonly)
2034 set = gen_rtx_SET (mode, operand0, temp);
2035 else
2036 set = gen_rtx_SET (VOIDmode,
2037 operand0,
2038 gen_rtx_LO_SUM (mode, temp, operand1));
2039
2040 emit_insn (gen_rtx_SET (VOIDmode,
2041 temp,
2042 gen_rtx_HIGH (mode, operand1)));
2043 emit_insn (set);
2044
2045 }
2046 return 1;
2047 }
2048 else if (pa_tls_referenced_p (operand1))
2049 {
2050 rtx tmp = operand1;
2051 rtx addend = NULL;
2052
2053 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2054 {
2055 addend = XEXP (XEXP (tmp, 0), 1);
2056 tmp = XEXP (XEXP (tmp, 0), 0);
2057 }
2058
2059 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2060 tmp = legitimize_tls_address (tmp);
2061 if (addend)
2062 {
2063 tmp = gen_rtx_PLUS (mode, tmp, addend);
2064 tmp = force_operand (tmp, operands[0]);
2065 }
2066 operands[1] = tmp;
2067 }
2068 else if (GET_CODE (operand1) != CONST_INT
2069 || !pa_cint_ok_for_move (INTVAL (operand1)))
2070 {
2071 rtx insn, temp;
2072 rtx op1 = operand1;
2073 HOST_WIDE_INT value = 0;
2074 HOST_WIDE_INT insv = 0;
2075 int insert = 0;
2076
2077 if (GET_CODE (operand1) == CONST_INT)
2078 value = INTVAL (operand1);
2079
2080 if (TARGET_64BIT
2081 && GET_CODE (operand1) == CONST_INT
2082 && HOST_BITS_PER_WIDE_INT > 32
2083 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2084 {
2085 HOST_WIDE_INT nval;
2086
2087 /* Extract the low order 32 bits of the value and sign extend.
2088 If the new value is the same as the original value, we can
2089 can use the original value as-is. If the new value is
2090 different, we use it and insert the most-significant 32-bits
2091 of the original value into the final result. */
2092 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2093 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2094 if (value != nval)
2095 {
2096 #if HOST_BITS_PER_WIDE_INT > 32
2097 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2098 #endif
2099 insert = 1;
2100 value = nval;
2101 operand1 = GEN_INT (nval);
2102 }
2103 }
2104
2105 if (reload_in_progress || reload_completed)
2106 temp = scratch_reg ? scratch_reg : operand0;
2107 else
2108 temp = gen_reg_rtx (mode);
2109
2110 /* We don't directly split DImode constants on 32-bit targets
2111 because PLUS uses an 11-bit immediate and the insn sequence
2112 generated is not as efficient as the one using HIGH/LO_SUM. */
2113 if (GET_CODE (operand1) == CONST_INT
2114 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2115 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2116 && !insert)
2117 {
2118 /* Directly break constant into high and low parts. This
2119 provides better optimization opportunities because various
2120 passes recognize constants split with PLUS but not LO_SUM.
2121 We use a 14-bit signed low part except when the addition
2122 of 0x4000 to the high part might change the sign of the
2123 high part. */
2124 HOST_WIDE_INT low = value & 0x3fff;
2125 HOST_WIDE_INT high = value & ~ 0x3fff;
2126
2127 if (low >= 0x2000)
2128 {
2129 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2130 high += 0x2000;
2131 else
2132 high += 0x4000;
2133 }
2134
2135 low = value - high;
2136
2137 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2138 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2139 }
2140 else
2141 {
2142 emit_insn (gen_rtx_SET (VOIDmode, temp,
2143 gen_rtx_HIGH (mode, operand1)));
2144 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2145 }
2146
2147 insn = emit_move_insn (operands[0], operands[1]);
2148
2149 /* Now insert the most significant 32 bits of the value
2150 into the register. When we don't have a second register
2151 available, it could take up to nine instructions to load
2152 a 64-bit integer constant. Prior to reload, we force
2153 constants that would take more than three instructions
2154 to load to the constant pool. During and after reload,
2155 we have to handle all possible values. */
2156 if (insert)
2157 {
2158 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2159 register and the value to be inserted is outside the
2160 range that can be loaded with three depdi instructions. */
2161 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2162 {
2163 operand1 = GEN_INT (insv);
2164
2165 emit_insn (gen_rtx_SET (VOIDmode, temp,
2166 gen_rtx_HIGH (mode, operand1)));
2167 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2168 emit_insn (gen_insv (operand0, GEN_INT (32),
2169 const0_rtx, temp));
2170 }
2171 else
2172 {
2173 int len = 5, pos = 27;
2174
2175 /* Insert the bits using the depdi instruction. */
2176 while (pos >= 0)
2177 {
2178 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2179 HOST_WIDE_INT sign = v5 < 0;
2180
2181 /* Left extend the insertion. */
2182 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2183 while (pos > 0 && (insv & 1) == sign)
2184 {
2185 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2186 len += 1;
2187 pos -= 1;
2188 }
2189
2190 emit_insn (gen_insv (operand0, GEN_INT (len),
2191 GEN_INT (pos), GEN_INT (v5)));
2192
2193 len = pos > 0 && pos < 5 ? pos : 5;
2194 pos -= len;
2195 }
2196 }
2197 }
2198
2199 set_unique_reg_note (insn, REG_EQUAL, op1);
2200
2201 return 1;
2202 }
2203 }
2204 /* Now have insn-emit do whatever it normally does. */
2205 return 0;
2206 }
2207
2208 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2209 it will need a link/runtime reloc). */
2210
2211 int
2212 pa_reloc_needed (tree exp)
2213 {
2214 int reloc = 0;
2215
2216 switch (TREE_CODE (exp))
2217 {
2218 case ADDR_EXPR:
2219 return 1;
2220
2221 case POINTER_PLUS_EXPR:
2222 case PLUS_EXPR:
2223 case MINUS_EXPR:
2224 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2225 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2226 break;
2227
2228 CASE_CONVERT:
2229 case NON_LVALUE_EXPR:
2230 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2231 break;
2232
2233 case CONSTRUCTOR:
2234 {
2235 tree value;
2236 unsigned HOST_WIDE_INT ix;
2237
2238 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2239 if (value)
2240 reloc |= pa_reloc_needed (value);
2241 }
2242 break;
2243
2244 case ERROR_MARK:
2245 break;
2246
2247 default:
2248 break;
2249 }
2250 return reloc;
2251 }
2252
2253 \f
2254 /* Return the best assembler insn template
2255 for moving operands[1] into operands[0] as a fullword. */
2256 const char *
2257 pa_singlemove_string (rtx *operands)
2258 {
2259 HOST_WIDE_INT intval;
2260
2261 if (GET_CODE (operands[0]) == MEM)
2262 return "stw %r1,%0";
2263 if (GET_CODE (operands[1]) == MEM)
2264 return "ldw %1,%0";
2265 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2266 {
2267 long i;
2268 REAL_VALUE_TYPE d;
2269
2270 gcc_assert (GET_MODE (operands[1]) == SFmode);
2271
2272 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2273 bit pattern. */
2274 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2275 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2276
2277 operands[1] = GEN_INT (i);
2278 /* Fall through to CONST_INT case. */
2279 }
2280 if (GET_CODE (operands[1]) == CONST_INT)
2281 {
2282 intval = INTVAL (operands[1]);
2283
2284 if (VAL_14_BITS_P (intval))
2285 return "ldi %1,%0";
2286 else if ((intval & 0x7ff) == 0)
2287 return "ldil L'%1,%0";
2288 else if (pa_zdepi_cint_p (intval))
2289 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2290 else
2291 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2292 }
2293 return "copy %1,%0";
2294 }
2295 \f
2296
2297 /* Compute position (in OP[1]) and width (in OP[2])
2298 useful for copying IMM to a register using the zdepi
2299 instructions. Store the immediate value to insert in OP[0]. */
2300 static void
2301 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2302 {
2303 int lsb, len;
2304
2305 /* Find the least significant set bit in IMM. */
2306 for (lsb = 0; lsb < 32; lsb++)
2307 {
2308 if ((imm & 1) != 0)
2309 break;
2310 imm >>= 1;
2311 }
2312
2313 /* Choose variants based on *sign* of the 5-bit field. */
2314 if ((imm & 0x10) == 0)
2315 len = (lsb <= 28) ? 4 : 32 - lsb;
2316 else
2317 {
2318 /* Find the width of the bitstring in IMM. */
2319 for (len = 5; len < 32 - lsb; len++)
2320 {
2321 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2322 break;
2323 }
2324
2325 /* Sign extend IMM as a 5-bit value. */
2326 imm = (imm & 0xf) - 0x10;
2327 }
2328
2329 op[0] = imm;
2330 op[1] = 31 - lsb;
2331 op[2] = len;
2332 }
2333
2334 /* Compute position (in OP[1]) and width (in OP[2])
2335 useful for copying IMM to a register using the depdi,z
2336 instructions. Store the immediate value to insert in OP[0]. */
2337
2338 static void
2339 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2340 {
2341 int lsb, len, maxlen;
2342
2343 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2344
2345 /* Find the least significant set bit in IMM. */
2346 for (lsb = 0; lsb < maxlen; lsb++)
2347 {
2348 if ((imm & 1) != 0)
2349 break;
2350 imm >>= 1;
2351 }
2352
2353 /* Choose variants based on *sign* of the 5-bit field. */
2354 if ((imm & 0x10) == 0)
2355 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2356 else
2357 {
2358 /* Find the width of the bitstring in IMM. */
2359 for (len = 5; len < maxlen - lsb; len++)
2360 {
2361 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2362 break;
2363 }
2364
2365 /* Extend length if host is narrow and IMM is negative. */
2366 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2367 len += 32;
2368
2369 /* Sign extend IMM as a 5-bit value. */
2370 imm = (imm & 0xf) - 0x10;
2371 }
2372
2373 op[0] = imm;
2374 op[1] = 63 - lsb;
2375 op[2] = len;
2376 }
2377
2378 /* Output assembler code to perform a doubleword move insn
2379 with operands OPERANDS. */
2380
2381 const char *
2382 pa_output_move_double (rtx *operands)
2383 {
2384 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2385 rtx latehalf[2];
2386 rtx addreg0 = 0, addreg1 = 0;
2387
2388 /* First classify both operands. */
2389
2390 if (REG_P (operands[0]))
2391 optype0 = REGOP;
2392 else if (offsettable_memref_p (operands[0]))
2393 optype0 = OFFSOP;
2394 else if (GET_CODE (operands[0]) == MEM)
2395 optype0 = MEMOP;
2396 else
2397 optype0 = RNDOP;
2398
2399 if (REG_P (operands[1]))
2400 optype1 = REGOP;
2401 else if (CONSTANT_P (operands[1]))
2402 optype1 = CNSTOP;
2403 else if (offsettable_memref_p (operands[1]))
2404 optype1 = OFFSOP;
2405 else if (GET_CODE (operands[1]) == MEM)
2406 optype1 = MEMOP;
2407 else
2408 optype1 = RNDOP;
2409
2410 /* Check for the cases that the operand constraints are not
2411 supposed to allow to happen. */
2412 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2413
2414 /* Handle copies between general and floating registers. */
2415
2416 if (optype0 == REGOP && optype1 == REGOP
2417 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2418 {
2419 if (FP_REG_P (operands[0]))
2420 {
2421 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2422 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2423 return "{fldds|fldd} -16(%%sp),%0";
2424 }
2425 else
2426 {
2427 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2428 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2429 return "{ldws|ldw} -12(%%sp),%R0";
2430 }
2431 }
2432
2433 /* Handle auto decrementing and incrementing loads and stores
2434 specifically, since the structure of the function doesn't work
2435 for them without major modification. Do it better when we learn
2436 this port about the general inc/dec addressing of PA.
2437 (This was written by tege. Chide him if it doesn't work.) */
2438
2439 if (optype0 == MEMOP)
2440 {
2441 /* We have to output the address syntax ourselves, since print_operand
2442 doesn't deal with the addresses we want to use. Fix this later. */
2443
2444 rtx addr = XEXP (operands[0], 0);
2445 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2446 {
2447 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2448
2449 operands[0] = XEXP (addr, 0);
2450 gcc_assert (GET_CODE (operands[1]) == REG
2451 && GET_CODE (operands[0]) == REG);
2452
2453 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2454
2455 /* No overlap between high target register and address
2456 register. (We do this in a non-obvious way to
2457 save a register file writeback) */
2458 if (GET_CODE (addr) == POST_INC)
2459 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2460 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2461 }
2462 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2463 {
2464 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2465
2466 operands[0] = XEXP (addr, 0);
2467 gcc_assert (GET_CODE (operands[1]) == REG
2468 && GET_CODE (operands[0]) == REG);
2469
2470 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2471 /* No overlap between high target register and address
2472 register. (We do this in a non-obvious way to save a
2473 register file writeback) */
2474 if (GET_CODE (addr) == PRE_INC)
2475 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2476 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2477 }
2478 }
2479 if (optype1 == MEMOP)
2480 {
2481 /* We have to output the address syntax ourselves, since print_operand
2482 doesn't deal with the addresses we want to use. Fix this later. */
2483
2484 rtx addr = XEXP (operands[1], 0);
2485 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2486 {
2487 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2488
2489 operands[1] = XEXP (addr, 0);
2490 gcc_assert (GET_CODE (operands[0]) == REG
2491 && GET_CODE (operands[1]) == REG);
2492
2493 if (!reg_overlap_mentioned_p (high_reg, addr))
2494 {
2495 /* No overlap between high target register and address
2496 register. (We do this in a non-obvious way to
2497 save a register file writeback) */
2498 if (GET_CODE (addr) == POST_INC)
2499 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2500 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2501 }
2502 else
2503 {
2504 /* This is an undefined situation. We should load into the
2505 address register *and* update that register. Probably
2506 we don't need to handle this at all. */
2507 if (GET_CODE (addr) == POST_INC)
2508 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2509 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2510 }
2511 }
2512 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2513 {
2514 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2515
2516 operands[1] = XEXP (addr, 0);
2517 gcc_assert (GET_CODE (operands[0]) == REG
2518 && GET_CODE (operands[1]) == REG);
2519
2520 if (!reg_overlap_mentioned_p (high_reg, addr))
2521 {
2522 /* No overlap between high target register and address
2523 register. (We do this in a non-obvious way to
2524 save a register file writeback) */
2525 if (GET_CODE (addr) == PRE_INC)
2526 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2527 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2528 }
2529 else
2530 {
2531 /* This is an undefined situation. We should load into the
2532 address register *and* update that register. Probably
2533 we don't need to handle this at all. */
2534 if (GET_CODE (addr) == PRE_INC)
2535 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2536 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2537 }
2538 }
2539 else if (GET_CODE (addr) == PLUS
2540 && GET_CODE (XEXP (addr, 0)) == MULT)
2541 {
2542 rtx xoperands[4];
2543 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2544
2545 if (!reg_overlap_mentioned_p (high_reg, addr))
2546 {
2547 xoperands[0] = high_reg;
2548 xoperands[1] = XEXP (addr, 1);
2549 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2550 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2551 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2552 xoperands);
2553 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2554 }
2555 else
2556 {
2557 xoperands[0] = high_reg;
2558 xoperands[1] = XEXP (addr, 1);
2559 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2560 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2561 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2562 xoperands);
2563 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2564 }
2565 }
2566 }
2567
2568 /* If an operand is an unoffsettable memory ref, find a register
2569 we can increment temporarily to make it refer to the second word. */
2570
2571 if (optype0 == MEMOP)
2572 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2573
2574 if (optype1 == MEMOP)
2575 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2576
2577 /* Ok, we can do one word at a time.
2578 Normally we do the low-numbered word first.
2579
2580 In either case, set up in LATEHALF the operands to use
2581 for the high-numbered word and in some cases alter the
2582 operands in OPERANDS to be suitable for the low-numbered word. */
2583
2584 if (optype0 == REGOP)
2585 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2586 else if (optype0 == OFFSOP)
2587 latehalf[0] = adjust_address (operands[0], SImode, 4);
2588 else
2589 latehalf[0] = operands[0];
2590
2591 if (optype1 == REGOP)
2592 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2593 else if (optype1 == OFFSOP)
2594 latehalf[1] = adjust_address (operands[1], SImode, 4);
2595 else if (optype1 == CNSTOP)
2596 split_double (operands[1], &operands[1], &latehalf[1]);
2597 else
2598 latehalf[1] = operands[1];
2599
2600 /* If the first move would clobber the source of the second one,
2601 do them in the other order.
2602
2603 This can happen in two cases:
2604
2605 mem -> register where the first half of the destination register
2606 is the same register used in the memory's address. Reload
2607 can create such insns.
2608
2609 mem in this case will be either register indirect or register
2610 indirect plus a valid offset.
2611
2612 register -> register move where REGNO(dst) == REGNO(src + 1)
2613 someone (Tim/Tege?) claimed this can happen for parameter loads.
2614
2615 Handle mem -> register case first. */
2616 if (optype0 == REGOP
2617 && (optype1 == MEMOP || optype1 == OFFSOP)
2618 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2619 operands[1], 0))
2620 {
2621 /* Do the late half first. */
2622 if (addreg1)
2623 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2624 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2625
2626 /* Then clobber. */
2627 if (addreg1)
2628 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2629 return pa_singlemove_string (operands);
2630 }
2631
2632 /* Now handle register -> register case. */
2633 if (optype0 == REGOP && optype1 == REGOP
2634 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2635 {
2636 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2637 return pa_singlemove_string (operands);
2638 }
2639
2640 /* Normal case: do the two words, low-numbered first. */
2641
2642 output_asm_insn (pa_singlemove_string (operands), operands);
2643
2644 /* Make any unoffsettable addresses point at high-numbered word. */
2645 if (addreg0)
2646 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2647 if (addreg1)
2648 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2649
2650 /* Do that word. */
2651 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2652
2653 /* Undo the adds we just did. */
2654 if (addreg0)
2655 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2656 if (addreg1)
2657 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2658
2659 return "";
2660 }
2661 \f
2662 const char *
2663 pa_output_fp_move_double (rtx *operands)
2664 {
2665 if (FP_REG_P (operands[0]))
2666 {
2667 if (FP_REG_P (operands[1])
2668 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2669 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2670 else
2671 output_asm_insn ("fldd%F1 %1,%0", operands);
2672 }
2673 else if (FP_REG_P (operands[1]))
2674 {
2675 output_asm_insn ("fstd%F0 %1,%0", operands);
2676 }
2677 else
2678 {
2679 rtx xoperands[2];
2680
2681 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2682
2683 /* This is a pain. You have to be prepared to deal with an
2684 arbitrary address here including pre/post increment/decrement.
2685
2686 so avoid this in the MD. */
2687 gcc_assert (GET_CODE (operands[0]) == REG);
2688
2689 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2690 xoperands[0] = operands[0];
2691 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2692 }
2693 return "";
2694 }
2695 \f
2696 /* Return a REG that occurs in ADDR with coefficient 1.
2697 ADDR can be effectively incremented by incrementing REG. */
2698
2699 static rtx
2700 find_addr_reg (rtx addr)
2701 {
2702 while (GET_CODE (addr) == PLUS)
2703 {
2704 if (GET_CODE (XEXP (addr, 0)) == REG)
2705 addr = XEXP (addr, 0);
2706 else if (GET_CODE (XEXP (addr, 1)) == REG)
2707 addr = XEXP (addr, 1);
2708 else if (CONSTANT_P (XEXP (addr, 0)))
2709 addr = XEXP (addr, 1);
2710 else if (CONSTANT_P (XEXP (addr, 1)))
2711 addr = XEXP (addr, 0);
2712 else
2713 gcc_unreachable ();
2714 }
2715 gcc_assert (GET_CODE (addr) == REG);
2716 return addr;
2717 }
2718
2719 /* Emit code to perform a block move.
2720
2721 OPERANDS[0] is the destination pointer as a REG, clobbered.
2722 OPERANDS[1] is the source pointer as a REG, clobbered.
2723 OPERANDS[2] is a register for temporary storage.
2724 OPERANDS[3] is a register for temporary storage.
2725 OPERANDS[4] is the size as a CONST_INT
2726 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2727 OPERANDS[6] is another temporary register. */
2728
2729 const char *
2730 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2731 {
2732 int align = INTVAL (operands[5]);
2733 unsigned long n_bytes = INTVAL (operands[4]);
2734
2735 /* We can't move more than a word at a time because the PA
2736 has no longer integer move insns. (Could use fp mem ops?) */
2737 if (align > (TARGET_64BIT ? 8 : 4))
2738 align = (TARGET_64BIT ? 8 : 4);
2739
2740 /* Note that we know each loop below will execute at least twice
2741 (else we would have open-coded the copy). */
2742 switch (align)
2743 {
2744 case 8:
2745 /* Pre-adjust the loop counter. */
2746 operands[4] = GEN_INT (n_bytes - 16);
2747 output_asm_insn ("ldi %4,%2", operands);
2748
2749 /* Copying loop. */
2750 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2751 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2752 output_asm_insn ("std,ma %3,8(%0)", operands);
2753 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2754 output_asm_insn ("std,ma %6,8(%0)", operands);
2755
2756 /* Handle the residual. There could be up to 7 bytes of
2757 residual to copy! */
2758 if (n_bytes % 16 != 0)
2759 {
2760 operands[4] = GEN_INT (n_bytes % 8);
2761 if (n_bytes % 16 >= 8)
2762 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2763 if (n_bytes % 8 != 0)
2764 output_asm_insn ("ldd 0(%1),%6", operands);
2765 if (n_bytes % 16 >= 8)
2766 output_asm_insn ("std,ma %3,8(%0)", operands);
2767 if (n_bytes % 8 != 0)
2768 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2769 }
2770 return "";
2771
2772 case 4:
2773 /* Pre-adjust the loop counter. */
2774 operands[4] = GEN_INT (n_bytes - 8);
2775 output_asm_insn ("ldi %4,%2", operands);
2776
2777 /* Copying loop. */
2778 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2779 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2780 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2781 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2782 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2783
2784 /* Handle the residual. There could be up to 7 bytes of
2785 residual to copy! */
2786 if (n_bytes % 8 != 0)
2787 {
2788 operands[4] = GEN_INT (n_bytes % 4);
2789 if (n_bytes % 8 >= 4)
2790 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2791 if (n_bytes % 4 != 0)
2792 output_asm_insn ("ldw 0(%1),%6", operands);
2793 if (n_bytes % 8 >= 4)
2794 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2795 if (n_bytes % 4 != 0)
2796 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2797 }
2798 return "";
2799
2800 case 2:
2801 /* Pre-adjust the loop counter. */
2802 operands[4] = GEN_INT (n_bytes - 4);
2803 output_asm_insn ("ldi %4,%2", operands);
2804
2805 /* Copying loop. */
2806 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2807 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2808 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2809 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2810 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2811
2812 /* Handle the residual. */
2813 if (n_bytes % 4 != 0)
2814 {
2815 if (n_bytes % 4 >= 2)
2816 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2817 if (n_bytes % 2 != 0)
2818 output_asm_insn ("ldb 0(%1),%6", operands);
2819 if (n_bytes % 4 >= 2)
2820 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2821 if (n_bytes % 2 != 0)
2822 output_asm_insn ("stb %6,0(%0)", operands);
2823 }
2824 return "";
2825
2826 case 1:
2827 /* Pre-adjust the loop counter. */
2828 operands[4] = GEN_INT (n_bytes - 2);
2829 output_asm_insn ("ldi %4,%2", operands);
2830
2831 /* Copying loop. */
2832 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2833 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2834 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2835 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2836 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2837
2838 /* Handle the residual. */
2839 if (n_bytes % 2 != 0)
2840 {
2841 output_asm_insn ("ldb 0(%1),%3", operands);
2842 output_asm_insn ("stb %3,0(%0)", operands);
2843 }
2844 return "";
2845
2846 default:
2847 gcc_unreachable ();
2848 }
2849 }
2850
2851 /* Count the number of insns necessary to handle this block move.
2852
2853 Basic structure is the same as emit_block_move, except that we
2854 count insns rather than emit them. */
2855
2856 static int
2857 compute_movmem_length (rtx insn)
2858 {
2859 rtx pat = PATTERN (insn);
2860 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2861 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2862 unsigned int n_insns = 0;
2863
2864 /* We can't move more than four bytes at a time because the PA
2865 has no longer integer move insns. (Could use fp mem ops?) */
2866 if (align > (TARGET_64BIT ? 8 : 4))
2867 align = (TARGET_64BIT ? 8 : 4);
2868
2869 /* The basic copying loop. */
2870 n_insns = 6;
2871
2872 /* Residuals. */
2873 if (n_bytes % (2 * align) != 0)
2874 {
2875 if ((n_bytes % (2 * align)) >= align)
2876 n_insns += 2;
2877
2878 if ((n_bytes % align) != 0)
2879 n_insns += 2;
2880 }
2881
2882 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2883 return n_insns * 4;
2884 }
2885
2886 /* Emit code to perform a block clear.
2887
2888 OPERANDS[0] is the destination pointer as a REG, clobbered.
2889 OPERANDS[1] is a register for temporary storage.
2890 OPERANDS[2] is the size as a CONST_INT
2891 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2892
2893 const char *
2894 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2895 {
2896 int align = INTVAL (operands[3]);
2897 unsigned long n_bytes = INTVAL (operands[2]);
2898
2899 /* We can't clear more than a word at a time because the PA
2900 has no longer integer move insns. */
2901 if (align > (TARGET_64BIT ? 8 : 4))
2902 align = (TARGET_64BIT ? 8 : 4);
2903
2904 /* Note that we know each loop below will execute at least twice
2905 (else we would have open-coded the copy). */
2906 switch (align)
2907 {
2908 case 8:
2909 /* Pre-adjust the loop counter. */
2910 operands[2] = GEN_INT (n_bytes - 16);
2911 output_asm_insn ("ldi %2,%1", operands);
2912
2913 /* Loop. */
2914 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2915 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2916 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2917
2918 /* Handle the residual. There could be up to 7 bytes of
2919 residual to copy! */
2920 if (n_bytes % 16 != 0)
2921 {
2922 operands[2] = GEN_INT (n_bytes % 8);
2923 if (n_bytes % 16 >= 8)
2924 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2925 if (n_bytes % 8 != 0)
2926 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2927 }
2928 return "";
2929
2930 case 4:
2931 /* Pre-adjust the loop counter. */
2932 operands[2] = GEN_INT (n_bytes - 8);
2933 output_asm_insn ("ldi %2,%1", operands);
2934
2935 /* Loop. */
2936 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2937 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2938 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2939
2940 /* Handle the residual. There could be up to 7 bytes of
2941 residual to copy! */
2942 if (n_bytes % 8 != 0)
2943 {
2944 operands[2] = GEN_INT (n_bytes % 4);
2945 if (n_bytes % 8 >= 4)
2946 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2947 if (n_bytes % 4 != 0)
2948 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2949 }
2950 return "";
2951
2952 case 2:
2953 /* Pre-adjust the loop counter. */
2954 operands[2] = GEN_INT (n_bytes - 4);
2955 output_asm_insn ("ldi %2,%1", operands);
2956
2957 /* Loop. */
2958 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2959 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2960 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2961
2962 /* Handle the residual. */
2963 if (n_bytes % 4 != 0)
2964 {
2965 if (n_bytes % 4 >= 2)
2966 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2967 if (n_bytes % 2 != 0)
2968 output_asm_insn ("stb %%r0,0(%0)", operands);
2969 }
2970 return "";
2971
2972 case 1:
2973 /* Pre-adjust the loop counter. */
2974 operands[2] = GEN_INT (n_bytes - 2);
2975 output_asm_insn ("ldi %2,%1", operands);
2976
2977 /* Loop. */
2978 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2979 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2980 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2981
2982 /* Handle the residual. */
2983 if (n_bytes % 2 != 0)
2984 output_asm_insn ("stb %%r0,0(%0)", operands);
2985
2986 return "";
2987
2988 default:
2989 gcc_unreachable ();
2990 }
2991 }
2992
2993 /* Count the number of insns necessary to handle this block move.
2994
2995 Basic structure is the same as emit_block_move, except that we
2996 count insns rather than emit them. */
2997
2998 static int
2999 compute_clrmem_length (rtx insn)
3000 {
3001 rtx pat = PATTERN (insn);
3002 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3003 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3004 unsigned int n_insns = 0;
3005
3006 /* We can't clear more than a word at a time because the PA
3007 has no longer integer move insns. */
3008 if (align > (TARGET_64BIT ? 8 : 4))
3009 align = (TARGET_64BIT ? 8 : 4);
3010
3011 /* The basic loop. */
3012 n_insns = 4;
3013
3014 /* Residuals. */
3015 if (n_bytes % (2 * align) != 0)
3016 {
3017 if ((n_bytes % (2 * align)) >= align)
3018 n_insns++;
3019
3020 if ((n_bytes % align) != 0)
3021 n_insns++;
3022 }
3023
3024 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3025 return n_insns * 4;
3026 }
3027 \f
3028
3029 const char *
3030 pa_output_and (rtx *operands)
3031 {
3032 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3033 {
3034 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3035 int ls0, ls1, ms0, p, len;
3036
3037 for (ls0 = 0; ls0 < 32; ls0++)
3038 if ((mask & (1 << ls0)) == 0)
3039 break;
3040
3041 for (ls1 = ls0; ls1 < 32; ls1++)
3042 if ((mask & (1 << ls1)) != 0)
3043 break;
3044
3045 for (ms0 = ls1; ms0 < 32; ms0++)
3046 if ((mask & (1 << ms0)) == 0)
3047 break;
3048
3049 gcc_assert (ms0 == 32);
3050
3051 if (ls1 == 32)
3052 {
3053 len = ls0;
3054
3055 gcc_assert (len);
3056
3057 operands[2] = GEN_INT (len);
3058 return "{extru|extrw,u} %1,31,%2,%0";
3059 }
3060 else
3061 {
3062 /* We could use this `depi' for the case above as well, but `depi'
3063 requires one more register file access than an `extru'. */
3064
3065 p = 31 - ls0;
3066 len = ls1 - ls0;
3067
3068 operands[2] = GEN_INT (p);
3069 operands[3] = GEN_INT (len);
3070 return "{depi|depwi} 0,%2,%3,%0";
3071 }
3072 }
3073 else
3074 return "and %1,%2,%0";
3075 }
3076
3077 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3078 storing the result in operands[0]. */
3079 const char *
3080 pa_output_64bit_and (rtx *operands)
3081 {
3082 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3083 {
3084 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3085 int ls0, ls1, ms0, p, len;
3086
3087 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3088 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3089 break;
3090
3091 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3092 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3093 break;
3094
3095 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3096 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3097 break;
3098
3099 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3100
3101 if (ls1 == HOST_BITS_PER_WIDE_INT)
3102 {
3103 len = ls0;
3104
3105 gcc_assert (len);
3106
3107 operands[2] = GEN_INT (len);
3108 return "extrd,u %1,63,%2,%0";
3109 }
3110 else
3111 {
3112 /* We could use this `depi' for the case above as well, but `depi'
3113 requires one more register file access than an `extru'. */
3114
3115 p = 63 - ls0;
3116 len = ls1 - ls0;
3117
3118 operands[2] = GEN_INT (p);
3119 operands[3] = GEN_INT (len);
3120 return "depdi 0,%2,%3,%0";
3121 }
3122 }
3123 else
3124 return "and %1,%2,%0";
3125 }
3126
3127 const char *
3128 pa_output_ior (rtx *operands)
3129 {
3130 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3131 int bs0, bs1, p, len;
3132
3133 if (INTVAL (operands[2]) == 0)
3134 return "copy %1,%0";
3135
3136 for (bs0 = 0; bs0 < 32; bs0++)
3137 if ((mask & (1 << bs0)) != 0)
3138 break;
3139
3140 for (bs1 = bs0; bs1 < 32; bs1++)
3141 if ((mask & (1 << bs1)) == 0)
3142 break;
3143
3144 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3145
3146 p = 31 - bs0;
3147 len = bs1 - bs0;
3148
3149 operands[2] = GEN_INT (p);
3150 operands[3] = GEN_INT (len);
3151 return "{depi|depwi} -1,%2,%3,%0";
3152 }
3153
3154 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3155 storing the result in operands[0]. */
3156 const char *
3157 pa_output_64bit_ior (rtx *operands)
3158 {
3159 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3160 int bs0, bs1, p, len;
3161
3162 if (INTVAL (operands[2]) == 0)
3163 return "copy %1,%0";
3164
3165 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3166 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3167 break;
3168
3169 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3170 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3171 break;
3172
3173 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3174 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3175
3176 p = 63 - bs0;
3177 len = bs1 - bs0;
3178
3179 operands[2] = GEN_INT (p);
3180 operands[3] = GEN_INT (len);
3181 return "depdi -1,%2,%3,%0";
3182 }
3183 \f
3184 /* Target hook for assembling integer objects. This code handles
3185 aligned SI and DI integers specially since function references
3186 must be preceded by P%. */
3187
3188 static bool
3189 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3190 {
3191 if (size == UNITS_PER_WORD
3192 && aligned_p
3193 && function_label_operand (x, VOIDmode))
3194 {
3195 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3196 output_addr_const (asm_out_file, x);
3197 fputc ('\n', asm_out_file);
3198 return true;
3199 }
3200 return default_assemble_integer (x, size, aligned_p);
3201 }
3202 \f
3203 /* Output an ascii string. */
3204 void
3205 pa_output_ascii (FILE *file, const char *p, int size)
3206 {
3207 int i;
3208 int chars_output;
3209 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3210
3211 /* The HP assembler can only take strings of 256 characters at one
3212 time. This is a limitation on input line length, *not* the
3213 length of the string. Sigh. Even worse, it seems that the
3214 restriction is in number of input characters (see \xnn &
3215 \whatever). So we have to do this very carefully. */
3216
3217 fputs ("\t.STRING \"", file);
3218
3219 chars_output = 0;
3220 for (i = 0; i < size; i += 4)
3221 {
3222 int co = 0;
3223 int io = 0;
3224 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3225 {
3226 register unsigned int c = (unsigned char) p[i + io];
3227
3228 if (c == '\"' || c == '\\')
3229 partial_output[co++] = '\\';
3230 if (c >= ' ' && c < 0177)
3231 partial_output[co++] = c;
3232 else
3233 {
3234 unsigned int hexd;
3235 partial_output[co++] = '\\';
3236 partial_output[co++] = 'x';
3237 hexd = c / 16 - 0 + '0';
3238 if (hexd > '9')
3239 hexd -= '9' - 'a' + 1;
3240 partial_output[co++] = hexd;
3241 hexd = c % 16 - 0 + '0';
3242 if (hexd > '9')
3243 hexd -= '9' - 'a' + 1;
3244 partial_output[co++] = hexd;
3245 }
3246 }
3247 if (chars_output + co > 243)
3248 {
3249 fputs ("\"\n\t.STRING \"", file);
3250 chars_output = 0;
3251 }
3252 fwrite (partial_output, 1, (size_t) co, file);
3253 chars_output += co;
3254 co = 0;
3255 }
3256 fputs ("\"\n", file);
3257 }
3258
3259 /* Try to rewrite floating point comparisons & branches to avoid
3260 useless add,tr insns.
3261
3262 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3263 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3264 first attempt to remove useless add,tr insns. It is zero
3265 for the second pass as reorg sometimes leaves bogus REG_DEAD
3266 notes lying around.
3267
3268 When CHECK_NOTES is zero we can only eliminate add,tr insns
3269 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3270 instructions. */
3271 static void
3272 remove_useless_addtr_insns (int check_notes)
3273 {
3274 rtx insn;
3275 static int pass = 0;
3276
3277 /* This is fairly cheap, so always run it when optimizing. */
3278 if (optimize > 0)
3279 {
3280 int fcmp_count = 0;
3281 int fbranch_count = 0;
3282
3283 /* Walk all the insns in this function looking for fcmp & fbranch
3284 instructions. Keep track of how many of each we find. */
3285 for (insn = get_insns (); insn; insn = next_insn (insn))
3286 {
3287 rtx tmp;
3288
3289 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3290 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3291 continue;
3292
3293 tmp = PATTERN (insn);
3294
3295 /* It must be a set. */
3296 if (GET_CODE (tmp) != SET)
3297 continue;
3298
3299 /* If the destination is CCFP, then we've found an fcmp insn. */
3300 tmp = SET_DEST (tmp);
3301 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3302 {
3303 fcmp_count++;
3304 continue;
3305 }
3306
3307 tmp = PATTERN (insn);
3308 /* If this is an fbranch instruction, bump the fbranch counter. */
3309 if (GET_CODE (tmp) == SET
3310 && SET_DEST (tmp) == pc_rtx
3311 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3312 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3313 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3314 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3315 {
3316 fbranch_count++;
3317 continue;
3318 }
3319 }
3320
3321
3322 /* Find all floating point compare + branch insns. If possible,
3323 reverse the comparison & the branch to avoid add,tr insns. */
3324 for (insn = get_insns (); insn; insn = next_insn (insn))
3325 {
3326 rtx tmp, next;
3327
3328 /* Ignore anything that isn't an INSN. */
3329 if (GET_CODE (insn) != INSN)
3330 continue;
3331
3332 tmp = PATTERN (insn);
3333
3334 /* It must be a set. */
3335 if (GET_CODE (tmp) != SET)
3336 continue;
3337
3338 /* The destination must be CCFP, which is register zero. */
3339 tmp = SET_DEST (tmp);
3340 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3341 continue;
3342
3343 /* INSN should be a set of CCFP.
3344
3345 See if the result of this insn is used in a reversed FP
3346 conditional branch. If so, reverse our condition and
3347 the branch. Doing so avoids useless add,tr insns. */
3348 next = next_insn (insn);
3349 while (next)
3350 {
3351 /* Jumps, calls and labels stop our search. */
3352 if (GET_CODE (next) == JUMP_INSN
3353 || GET_CODE (next) == CALL_INSN
3354 || GET_CODE (next) == CODE_LABEL)
3355 break;
3356
3357 /* As does another fcmp insn. */
3358 if (GET_CODE (next) == INSN
3359 && GET_CODE (PATTERN (next)) == SET
3360 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3361 && REGNO (SET_DEST (PATTERN (next))) == 0)
3362 break;
3363
3364 next = next_insn (next);
3365 }
3366
3367 /* Is NEXT_INSN a branch? */
3368 if (next
3369 && GET_CODE (next) == JUMP_INSN)
3370 {
3371 rtx pattern = PATTERN (next);
3372
3373 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3374 and CCFP dies, then reverse our conditional and the branch
3375 to avoid the add,tr. */
3376 if (GET_CODE (pattern) == SET
3377 && SET_DEST (pattern) == pc_rtx
3378 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3379 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3380 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3381 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3382 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3383 && (fcmp_count == fbranch_count
3384 || (check_notes
3385 && find_regno_note (next, REG_DEAD, 0))))
3386 {
3387 /* Reverse the branch. */
3388 tmp = XEXP (SET_SRC (pattern), 1);
3389 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3390 XEXP (SET_SRC (pattern), 2) = tmp;
3391 INSN_CODE (next) = -1;
3392
3393 /* Reverse our condition. */
3394 tmp = PATTERN (insn);
3395 PUT_CODE (XEXP (tmp, 1),
3396 (reverse_condition_maybe_unordered
3397 (GET_CODE (XEXP (tmp, 1)))));
3398 }
3399 }
3400 }
3401 }
3402
3403 pass = !pass;
3404
3405 }
3406 \f
3407 /* You may have trouble believing this, but this is the 32 bit HP-PA
3408 stack layout. Wow.
3409
3410 Offset Contents
3411
3412 Variable arguments (optional; any number may be allocated)
3413
3414 SP-(4*(N+9)) arg word N
3415 : :
3416 SP-56 arg word 5
3417 SP-52 arg word 4
3418
3419 Fixed arguments (must be allocated; may remain unused)
3420
3421 SP-48 arg word 3
3422 SP-44 arg word 2
3423 SP-40 arg word 1
3424 SP-36 arg word 0
3425
3426 Frame Marker
3427
3428 SP-32 External Data Pointer (DP)
3429 SP-28 External sr4
3430 SP-24 External/stub RP (RP')
3431 SP-20 Current RP
3432 SP-16 Static Link
3433 SP-12 Clean up
3434 SP-8 Calling Stub RP (RP'')
3435 SP-4 Previous SP
3436
3437 Top of Frame
3438
3439 SP-0 Stack Pointer (points to next available address)
3440
3441 */
3442
3443 /* This function saves registers as follows. Registers marked with ' are
3444 this function's registers (as opposed to the previous function's).
3445 If a frame_pointer isn't needed, r4 is saved as a general register;
3446 the space for the frame pointer is still allocated, though, to keep
3447 things simple.
3448
3449
3450 Top of Frame
3451
3452 SP (FP') Previous FP
3453 SP + 4 Alignment filler (sigh)
3454 SP + 8 Space for locals reserved here.
3455 .
3456 .
3457 .
3458 SP + n All call saved register used.
3459 .
3460 .
3461 .
3462 SP + o All call saved fp registers used.
3463 .
3464 .
3465 .
3466 SP + p (SP') points to next available address.
3467
3468 */
3469
3470 /* Global variables set by output_function_prologue(). */
3471 /* Size of frame. Need to know this to emit return insns from
3472 leaf procedures. */
3473 static HOST_WIDE_INT actual_fsize, local_fsize;
3474 static int save_fregs;
3475
3476 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3477 Handle case where DISP > 8k by using the add_high_const patterns.
3478
3479 Note in DISP > 8k case, we will leave the high part of the address
3480 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3481
3482 static void
3483 store_reg (int reg, HOST_WIDE_INT disp, int base)
3484 {
3485 rtx insn, dest, src, basereg;
3486
3487 src = gen_rtx_REG (word_mode, reg);
3488 basereg = gen_rtx_REG (Pmode, base);
3489 if (VAL_14_BITS_P (disp))
3490 {
3491 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3492 insn = emit_move_insn (dest, src);
3493 }
3494 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3495 {
3496 rtx delta = GEN_INT (disp);
3497 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3498
3499 emit_move_insn (tmpreg, delta);
3500 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3501 if (DO_FRAME_NOTES)
3502 {
3503 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3504 gen_rtx_SET (VOIDmode, tmpreg,
3505 gen_rtx_PLUS (Pmode, basereg, delta)));
3506 RTX_FRAME_RELATED_P (insn) = 1;
3507 }
3508 dest = gen_rtx_MEM (word_mode, tmpreg);
3509 insn = emit_move_insn (dest, src);
3510 }
3511 else
3512 {
3513 rtx delta = GEN_INT (disp);
3514 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3515 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3516
3517 emit_move_insn (tmpreg, high);
3518 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3519 insn = emit_move_insn (dest, src);
3520 if (DO_FRAME_NOTES)
3521 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3522 gen_rtx_SET (VOIDmode,
3523 gen_rtx_MEM (word_mode,
3524 gen_rtx_PLUS (word_mode,
3525 basereg,
3526 delta)),
3527 src));
3528 }
3529
3530 if (DO_FRAME_NOTES)
3531 RTX_FRAME_RELATED_P (insn) = 1;
3532 }
3533
3534 /* Emit RTL to store REG at the memory location specified by BASE and then
3535 add MOD to BASE. MOD must be <= 8k. */
3536
3537 static void
3538 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3539 {
3540 rtx insn, basereg, srcreg, delta;
3541
3542 gcc_assert (VAL_14_BITS_P (mod));
3543
3544 basereg = gen_rtx_REG (Pmode, base);
3545 srcreg = gen_rtx_REG (word_mode, reg);
3546 delta = GEN_INT (mod);
3547
3548 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3549 if (DO_FRAME_NOTES)
3550 {
3551 RTX_FRAME_RELATED_P (insn) = 1;
3552
3553 /* RTX_FRAME_RELATED_P must be set on each frame related set
3554 in a parallel with more than one element. */
3555 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3556 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3557 }
3558 }
3559
3560 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3561 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3562 whether to add a frame note or not.
3563
3564 In the DISP > 8k case, we leave the high part of the address in %r1.
3565 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3566
3567 static void
3568 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3569 {
3570 rtx insn;
3571
3572 if (VAL_14_BITS_P (disp))
3573 {
3574 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3575 plus_constant (Pmode,
3576 gen_rtx_REG (Pmode, base), disp));
3577 }
3578 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3579 {
3580 rtx basereg = gen_rtx_REG (Pmode, base);
3581 rtx delta = GEN_INT (disp);
3582 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3583
3584 emit_move_insn (tmpreg, delta);
3585 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3586 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3587 if (DO_FRAME_NOTES)
3588 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3589 gen_rtx_SET (VOIDmode, tmpreg,
3590 gen_rtx_PLUS (Pmode, basereg, delta)));
3591 }
3592 else
3593 {
3594 rtx basereg = gen_rtx_REG (Pmode, base);
3595 rtx delta = GEN_INT (disp);
3596 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3597
3598 emit_move_insn (tmpreg,
3599 gen_rtx_PLUS (Pmode, basereg,
3600 gen_rtx_HIGH (Pmode, delta)));
3601 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3602 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3603 }
3604
3605 if (DO_FRAME_NOTES && note)
3606 RTX_FRAME_RELATED_P (insn) = 1;
3607 }
3608
3609 HOST_WIDE_INT
3610 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3611 {
3612 int freg_saved = 0;
3613 int i, j;
3614
3615 /* The code in pa_expand_prologue and pa_expand_epilogue must
3616 be consistent with the rounding and size calculation done here.
3617 Change them at the same time. */
3618
3619 /* We do our own stack alignment. First, round the size of the
3620 stack locals up to a word boundary. */
3621 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3622
3623 /* Space for previous frame pointer + filler. If any frame is
3624 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3625 waste some space here for the sake of HP compatibility. The
3626 first slot is only used when the frame pointer is needed. */
3627 if (size || frame_pointer_needed)
3628 size += STARTING_FRAME_OFFSET;
3629
3630 /* If the current function calls __builtin_eh_return, then we need
3631 to allocate stack space for registers that will hold data for
3632 the exception handler. */
3633 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3634 {
3635 unsigned int i;
3636
3637 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3638 continue;
3639 size += i * UNITS_PER_WORD;
3640 }
3641
3642 /* Account for space used by the callee general register saves. */
3643 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3644 if (df_regs_ever_live_p (i))
3645 size += UNITS_PER_WORD;
3646
3647 /* Account for space used by the callee floating point register saves. */
3648 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3649 if (df_regs_ever_live_p (i)
3650 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3651 {
3652 freg_saved = 1;
3653
3654 /* We always save both halves of the FP register, so always
3655 increment the frame size by 8 bytes. */
3656 size += 8;
3657 }
3658
3659 /* If any of the floating registers are saved, account for the
3660 alignment needed for the floating point register save block. */
3661 if (freg_saved)
3662 {
3663 size = (size + 7) & ~7;
3664 if (fregs_live)
3665 *fregs_live = 1;
3666 }
3667
3668 /* The various ABIs include space for the outgoing parameters in the
3669 size of the current function's stack frame. We don't need to align
3670 for the outgoing arguments as their alignment is set by the final
3671 rounding for the frame as a whole. */
3672 size += crtl->outgoing_args_size;
3673
3674 /* Allocate space for the fixed frame marker. This space must be
3675 allocated for any function that makes calls or allocates
3676 stack space. */
3677 if (!crtl->is_leaf || size)
3678 size += TARGET_64BIT ? 48 : 32;
3679
3680 /* Finally, round to the preferred stack boundary. */
3681 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3682 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3683 }
3684
3685 /* Generate the assembly code for function entry. FILE is a stdio
3686 stream to output the code to. SIZE is an int: how many units of
3687 temporary storage to allocate.
3688
3689 Refer to the array `regs_ever_live' to determine which registers to
3690 save; `regs_ever_live[I]' is nonzero if register number I is ever
3691 used in the function. This function is responsible for knowing
3692 which registers should not be saved even if used. */
3693
3694 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3695 of memory. If any fpu reg is used in the function, we allocate
3696 such a block here, at the bottom of the frame, just in case it's needed.
3697
3698 If this function is a leaf procedure, then we may choose not
3699 to do a "save" insn. The decision about whether or not
3700 to do this is made in regclass.c. */
3701
3702 static void
3703 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3704 {
3705 /* The function's label and associated .PROC must never be
3706 separated and must be output *after* any profiling declarations
3707 to avoid changing spaces/subspaces within a procedure. */
3708 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3709 fputs ("\t.PROC\n", file);
3710
3711 /* pa_expand_prologue does the dirty work now. We just need
3712 to output the assembler directives which denote the start
3713 of a function. */
3714 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3715 if (crtl->is_leaf)
3716 fputs (",NO_CALLS", file);
3717 else
3718 fputs (",CALLS", file);
3719 if (rp_saved)
3720 fputs (",SAVE_RP", file);
3721
3722 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3723 at the beginning of the frame and that it is used as the frame
3724 pointer for the frame. We do this because our current frame
3725 layout doesn't conform to that specified in the HP runtime
3726 documentation and we need a way to indicate to programs such as
3727 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3728 isn't used by HP compilers but is supported by the assembler.
3729 However, SAVE_SP is supposed to indicate that the previous stack
3730 pointer has been saved in the frame marker. */
3731 if (frame_pointer_needed)
3732 fputs (",SAVE_SP", file);
3733
3734 /* Pass on information about the number of callee register saves
3735 performed in the prologue.
3736
3737 The compiler is supposed to pass the highest register number
3738 saved, the assembler then has to adjust that number before
3739 entering it into the unwind descriptor (to account for any
3740 caller saved registers with lower register numbers than the
3741 first callee saved register). */
3742 if (gr_saved)
3743 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3744
3745 if (fr_saved)
3746 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3747
3748 fputs ("\n\t.ENTRY\n", file);
3749
3750 remove_useless_addtr_insns (0);
3751 }
3752
3753 void
3754 pa_expand_prologue (void)
3755 {
3756 int merge_sp_adjust_with_store = 0;
3757 HOST_WIDE_INT size = get_frame_size ();
3758 HOST_WIDE_INT offset;
3759 int i;
3760 rtx insn, tmpreg;
3761
3762 gr_saved = 0;
3763 fr_saved = 0;
3764 save_fregs = 0;
3765
3766 /* Compute total size for frame pointer, filler, locals and rounding to
3767 the next word boundary. Similar code appears in pa_compute_frame_size
3768 and must be changed in tandem with this code. */
3769 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3770 if (local_fsize || frame_pointer_needed)
3771 local_fsize += STARTING_FRAME_OFFSET;
3772
3773 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3774 if (flag_stack_usage_info)
3775 current_function_static_stack_size = actual_fsize;
3776
3777 /* Compute a few things we will use often. */
3778 tmpreg = gen_rtx_REG (word_mode, 1);
3779
3780 /* Save RP first. The calling conventions manual states RP will
3781 always be stored into the caller's frame at sp - 20 or sp - 16
3782 depending on which ABI is in use. */
3783 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3784 {
3785 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3786 rp_saved = true;
3787 }
3788 else
3789 rp_saved = false;
3790
3791 /* Allocate the local frame and set up the frame pointer if needed. */
3792 if (actual_fsize != 0)
3793 {
3794 if (frame_pointer_needed)
3795 {
3796 /* Copy the old frame pointer temporarily into %r1. Set up the
3797 new stack pointer, then store away the saved old frame pointer
3798 into the stack at sp and at the same time update the stack
3799 pointer by actual_fsize bytes. Two versions, first
3800 handles small (<8k) frames. The second handles large (>=8k)
3801 frames. */
3802 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3803 if (DO_FRAME_NOTES)
3804 RTX_FRAME_RELATED_P (insn) = 1;
3805
3806 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3807 if (DO_FRAME_NOTES)
3808 RTX_FRAME_RELATED_P (insn) = 1;
3809
3810 if (VAL_14_BITS_P (actual_fsize))
3811 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3812 else
3813 {
3814 /* It is incorrect to store the saved frame pointer at *sp,
3815 then increment sp (writes beyond the current stack boundary).
3816
3817 So instead use stwm to store at *sp and post-increment the
3818 stack pointer as an atomic operation. Then increment sp to
3819 finish allocating the new frame. */
3820 HOST_WIDE_INT adjust1 = 8192 - 64;
3821 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3822
3823 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3824 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3825 adjust2, 1);
3826 }
3827
3828 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3829 we need to store the previous stack pointer (frame pointer)
3830 into the frame marker on targets that use the HP unwind
3831 library. This allows the HP unwind library to be used to
3832 unwind GCC frames. However, we are not fully compatible
3833 with the HP library because our frame layout differs from
3834 that specified in the HP runtime specification.
3835
3836 We don't want a frame note on this instruction as the frame
3837 marker moves during dynamic stack allocation.
3838
3839 This instruction also serves as a blockage to prevent
3840 register spills from being scheduled before the stack
3841 pointer is raised. This is necessary as we store
3842 registers using the frame pointer as a base register,
3843 and the frame pointer is set before sp is raised. */
3844 if (TARGET_HPUX_UNWIND_LIBRARY)
3845 {
3846 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3847 GEN_INT (TARGET_64BIT ? -8 : -4));
3848
3849 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3850 hard_frame_pointer_rtx);
3851 }
3852 else
3853 emit_insn (gen_blockage ());
3854 }
3855 /* no frame pointer needed. */
3856 else
3857 {
3858 /* In some cases we can perform the first callee register save
3859 and allocating the stack frame at the same time. If so, just
3860 make a note of it and defer allocating the frame until saving
3861 the callee registers. */
3862 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3863 merge_sp_adjust_with_store = 1;
3864 /* Can not optimize. Adjust the stack frame by actual_fsize
3865 bytes. */
3866 else
3867 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3868 actual_fsize, 1);
3869 }
3870 }
3871
3872 /* Normal register save.
3873
3874 Do not save the frame pointer in the frame_pointer_needed case. It
3875 was done earlier. */
3876 if (frame_pointer_needed)
3877 {
3878 offset = local_fsize;
3879
3880 /* Saving the EH return data registers in the frame is the simplest
3881 way to get the frame unwind information emitted. We put them
3882 just before the general registers. */
3883 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3884 {
3885 unsigned int i, regno;
3886
3887 for (i = 0; ; ++i)
3888 {
3889 regno = EH_RETURN_DATA_REGNO (i);
3890 if (regno == INVALID_REGNUM)
3891 break;
3892
3893 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
3894 offset += UNITS_PER_WORD;
3895 }
3896 }
3897
3898 for (i = 18; i >= 4; i--)
3899 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3900 {
3901 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
3902 offset += UNITS_PER_WORD;
3903 gr_saved++;
3904 }
3905 /* Account for %r3 which is saved in a special place. */
3906 gr_saved++;
3907 }
3908 /* No frame pointer needed. */
3909 else
3910 {
3911 offset = local_fsize - actual_fsize;
3912
3913 /* Saving the EH return data registers in the frame is the simplest
3914 way to get the frame unwind information emitted. */
3915 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3916 {
3917 unsigned int i, regno;
3918
3919 for (i = 0; ; ++i)
3920 {
3921 regno = EH_RETURN_DATA_REGNO (i);
3922 if (regno == INVALID_REGNUM)
3923 break;
3924
3925 /* If merge_sp_adjust_with_store is nonzero, then we can
3926 optimize the first save. */
3927 if (merge_sp_adjust_with_store)
3928 {
3929 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3930 merge_sp_adjust_with_store = 0;
3931 }
3932 else
3933 store_reg (regno, offset, STACK_POINTER_REGNUM);
3934 offset += UNITS_PER_WORD;
3935 }
3936 }
3937
3938 for (i = 18; i >= 3; i--)
3939 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3940 {
3941 /* If merge_sp_adjust_with_store is nonzero, then we can
3942 optimize the first GR save. */
3943 if (merge_sp_adjust_with_store)
3944 {
3945 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3946 merge_sp_adjust_with_store = 0;
3947 }
3948 else
3949 store_reg (i, offset, STACK_POINTER_REGNUM);
3950 offset += UNITS_PER_WORD;
3951 gr_saved++;
3952 }
3953
3954 /* If we wanted to merge the SP adjustment with a GR save, but we never
3955 did any GR saves, then just emit the adjustment here. */
3956 if (merge_sp_adjust_with_store)
3957 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3958 actual_fsize, 1);
3959 }
3960
3961 /* The hppa calling conventions say that %r19, the pic offset
3962 register, is saved at sp - 32 (in this function's frame)
3963 when generating PIC code. FIXME: What is the correct thing
3964 to do for functions which make no calls and allocate no
3965 frame? Do we need to allocate a frame, or can we just omit
3966 the save? For now we'll just omit the save.
3967
3968 We don't want a note on this insn as the frame marker can
3969 move if there is a dynamic stack allocation. */
3970 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3971 {
3972 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3973
3974 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3975
3976 }
3977
3978 /* Align pointer properly (doubleword boundary). */
3979 offset = (offset + 7) & ~7;
3980
3981 /* Floating point register store. */
3982 if (save_fregs)
3983 {
3984 rtx base;
3985
3986 /* First get the frame or stack pointer to the start of the FP register
3987 save area. */
3988 if (frame_pointer_needed)
3989 {
3990 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
3991 base = hard_frame_pointer_rtx;
3992 }
3993 else
3994 {
3995 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3996 base = stack_pointer_rtx;
3997 }
3998
3999 /* Now actually save the FP registers. */
4000 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4001 {
4002 if (df_regs_ever_live_p (i)
4003 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4004 {
4005 rtx addr, insn, reg;
4006 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4007 reg = gen_rtx_REG (DFmode, i);
4008 insn = emit_move_insn (addr, reg);
4009 if (DO_FRAME_NOTES)
4010 {
4011 RTX_FRAME_RELATED_P (insn) = 1;
4012 if (TARGET_64BIT)
4013 {
4014 rtx mem = gen_rtx_MEM (DFmode,
4015 plus_constant (Pmode, base,
4016 offset));
4017 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4018 gen_rtx_SET (VOIDmode, mem, reg));
4019 }
4020 else
4021 {
4022 rtx meml = gen_rtx_MEM (SFmode,
4023 plus_constant (Pmode, base,
4024 offset));
4025 rtx memr = gen_rtx_MEM (SFmode,
4026 plus_constant (Pmode, base,
4027 offset + 4));
4028 rtx regl = gen_rtx_REG (SFmode, i);
4029 rtx regr = gen_rtx_REG (SFmode, i + 1);
4030 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4031 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4032 rtvec vec;
4033
4034 RTX_FRAME_RELATED_P (setl) = 1;
4035 RTX_FRAME_RELATED_P (setr) = 1;
4036 vec = gen_rtvec (2, setl, setr);
4037 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4038 gen_rtx_SEQUENCE (VOIDmode, vec));
4039 }
4040 }
4041 offset += GET_MODE_SIZE (DFmode);
4042 fr_saved++;
4043 }
4044 }
4045 }
4046 }
4047
4048 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4049 Handle case where DISP > 8k by using the add_high_const patterns. */
4050
4051 static void
4052 load_reg (int reg, HOST_WIDE_INT disp, int base)
4053 {
4054 rtx dest = gen_rtx_REG (word_mode, reg);
4055 rtx basereg = gen_rtx_REG (Pmode, base);
4056 rtx src;
4057
4058 if (VAL_14_BITS_P (disp))
4059 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4060 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4061 {
4062 rtx delta = GEN_INT (disp);
4063 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4064
4065 emit_move_insn (tmpreg, delta);
4066 if (TARGET_DISABLE_INDEXING)
4067 {
4068 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4069 src = gen_rtx_MEM (word_mode, tmpreg);
4070 }
4071 else
4072 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4073 }
4074 else
4075 {
4076 rtx delta = GEN_INT (disp);
4077 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4078 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4079
4080 emit_move_insn (tmpreg, high);
4081 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4082 }
4083
4084 emit_move_insn (dest, src);
4085 }
4086
4087 /* Update the total code bytes output to the text section. */
4088
4089 static void
4090 update_total_code_bytes (unsigned int nbytes)
4091 {
4092 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4093 && !IN_NAMED_SECTION_P (cfun->decl))
4094 {
4095 unsigned int old_total = total_code_bytes;
4096
4097 total_code_bytes += nbytes;
4098
4099 /* Be prepared to handle overflows. */
4100 if (old_total > total_code_bytes)
4101 total_code_bytes = UINT_MAX;
4102 }
4103 }
4104
4105 /* This function generates the assembly code for function exit.
4106 Args are as for output_function_prologue ().
4107
4108 The function epilogue should not depend on the current stack
4109 pointer! It should use the frame pointer only. This is mandatory
4110 because of alloca; we also take advantage of it to omit stack
4111 adjustments before returning. */
4112
4113 static void
4114 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4115 {
4116 rtx insn = get_last_insn ();
4117
4118 last_address = 0;
4119
4120 /* pa_expand_epilogue does the dirty work now. We just need
4121 to output the assembler directives which denote the end
4122 of a function.
4123
4124 To make debuggers happy, emit a nop if the epilogue was completely
4125 eliminated due to a volatile call as the last insn in the
4126 current function. That way the return address (in %r2) will
4127 always point to a valid instruction in the current function. */
4128
4129 /* Get the last real insn. */
4130 if (GET_CODE (insn) == NOTE)
4131 insn = prev_real_insn (insn);
4132
4133 /* If it is a sequence, then look inside. */
4134 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
4135 insn = XVECEXP (PATTERN (insn), 0, 0);
4136
4137 /* If insn is a CALL_INSN, then it must be a call to a volatile
4138 function (otherwise there would be epilogue insns). */
4139 if (insn && GET_CODE (insn) == CALL_INSN)
4140 {
4141 fputs ("\tnop\n", file);
4142 last_address += 4;
4143 }
4144
4145 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4146
4147 if (TARGET_SOM && TARGET_GAS)
4148 {
4149 /* We done with this subspace except possibly for some additional
4150 debug information. Forget that we are in this subspace to ensure
4151 that the next function is output in its own subspace. */
4152 in_section = NULL;
4153 cfun->machine->in_nsubspa = 2;
4154 }
4155
4156 if (INSN_ADDRESSES_SET_P ())
4157 {
4158 insn = get_last_nonnote_insn ();
4159 last_address += INSN_ADDRESSES (INSN_UID (insn));
4160 if (INSN_P (insn))
4161 last_address += insn_default_length (insn);
4162 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4163 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4164 }
4165 else
4166 last_address = UINT_MAX;
4167
4168 /* Finally, update the total number of code bytes output so far. */
4169 update_total_code_bytes (last_address);
4170 }
4171
4172 void
4173 pa_expand_epilogue (void)
4174 {
4175 rtx tmpreg;
4176 HOST_WIDE_INT offset;
4177 HOST_WIDE_INT ret_off = 0;
4178 int i;
4179 int merge_sp_adjust_with_load = 0;
4180
4181 /* We will use this often. */
4182 tmpreg = gen_rtx_REG (word_mode, 1);
4183
4184 /* Try to restore RP early to avoid load/use interlocks when
4185 RP gets used in the return (bv) instruction. This appears to still
4186 be necessary even when we schedule the prologue and epilogue. */
4187 if (rp_saved)
4188 {
4189 ret_off = TARGET_64BIT ? -16 : -20;
4190 if (frame_pointer_needed)
4191 {
4192 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4193 ret_off = 0;
4194 }
4195 else
4196 {
4197 /* No frame pointer, and stack is smaller than 8k. */
4198 if (VAL_14_BITS_P (ret_off - actual_fsize))
4199 {
4200 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4201 ret_off = 0;
4202 }
4203 }
4204 }
4205
4206 /* General register restores. */
4207 if (frame_pointer_needed)
4208 {
4209 offset = local_fsize;
4210
4211 /* If the current function calls __builtin_eh_return, then we need
4212 to restore the saved EH data registers. */
4213 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4214 {
4215 unsigned int i, regno;
4216
4217 for (i = 0; ; ++i)
4218 {
4219 regno = EH_RETURN_DATA_REGNO (i);
4220 if (regno == INVALID_REGNUM)
4221 break;
4222
4223 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4224 offset += UNITS_PER_WORD;
4225 }
4226 }
4227
4228 for (i = 18; i >= 4; i--)
4229 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4230 {
4231 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4232 offset += UNITS_PER_WORD;
4233 }
4234 }
4235 else
4236 {
4237 offset = local_fsize - actual_fsize;
4238
4239 /* If the current function calls __builtin_eh_return, then we need
4240 to restore the saved EH data registers. */
4241 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4242 {
4243 unsigned int i, regno;
4244
4245 for (i = 0; ; ++i)
4246 {
4247 regno = EH_RETURN_DATA_REGNO (i);
4248 if (regno == INVALID_REGNUM)
4249 break;
4250
4251 /* Only for the first load.
4252 merge_sp_adjust_with_load holds the register load
4253 with which we will merge the sp adjustment. */
4254 if (merge_sp_adjust_with_load == 0
4255 && local_fsize == 0
4256 && VAL_14_BITS_P (-actual_fsize))
4257 merge_sp_adjust_with_load = regno;
4258 else
4259 load_reg (regno, offset, STACK_POINTER_REGNUM);
4260 offset += UNITS_PER_WORD;
4261 }
4262 }
4263
4264 for (i = 18; i >= 3; i--)
4265 {
4266 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4267 {
4268 /* Only for the first load.
4269 merge_sp_adjust_with_load holds the register load
4270 with which we will merge the sp adjustment. */
4271 if (merge_sp_adjust_with_load == 0
4272 && local_fsize == 0
4273 && VAL_14_BITS_P (-actual_fsize))
4274 merge_sp_adjust_with_load = i;
4275 else
4276 load_reg (i, offset, STACK_POINTER_REGNUM);
4277 offset += UNITS_PER_WORD;
4278 }
4279 }
4280 }
4281
4282 /* Align pointer properly (doubleword boundary). */
4283 offset = (offset + 7) & ~7;
4284
4285 /* FP register restores. */
4286 if (save_fregs)
4287 {
4288 /* Adjust the register to index off of. */
4289 if (frame_pointer_needed)
4290 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4291 else
4292 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4293
4294 /* Actually do the restores now. */
4295 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4296 if (df_regs_ever_live_p (i)
4297 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4298 {
4299 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4300 rtx dest = gen_rtx_REG (DFmode, i);
4301 emit_move_insn (dest, src);
4302 }
4303 }
4304
4305 /* Emit a blockage insn here to keep these insns from being moved to
4306 an earlier spot in the epilogue, or into the main instruction stream.
4307
4308 This is necessary as we must not cut the stack back before all the
4309 restores are finished. */
4310 emit_insn (gen_blockage ());
4311
4312 /* Reset stack pointer (and possibly frame pointer). The stack
4313 pointer is initially set to fp + 64 to avoid a race condition. */
4314 if (frame_pointer_needed)
4315 {
4316 rtx delta = GEN_INT (-64);
4317
4318 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4319 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4320 stack_pointer_rtx, delta));
4321 }
4322 /* If we were deferring a callee register restore, do it now. */
4323 else if (merge_sp_adjust_with_load)
4324 {
4325 rtx delta = GEN_INT (-actual_fsize);
4326 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4327
4328 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4329 }
4330 else if (actual_fsize != 0)
4331 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4332 - actual_fsize, 0);
4333
4334 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4335 frame greater than 8k), do so now. */
4336 if (ret_off != 0)
4337 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4338
4339 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4340 {
4341 rtx sa = EH_RETURN_STACKADJ_RTX;
4342
4343 emit_insn (gen_blockage ());
4344 emit_insn (TARGET_64BIT
4345 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4346 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4347 }
4348 }
4349
4350 bool
4351 pa_can_use_return_insn (void)
4352 {
4353 if (!reload_completed)
4354 return false;
4355
4356 if (frame_pointer_needed)
4357 return false;
4358
4359 if (df_regs_ever_live_p (2))
4360 return false;
4361
4362 if (crtl->profile)
4363 return false;
4364
4365 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4366 }
4367
4368 rtx
4369 hppa_pic_save_rtx (void)
4370 {
4371 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4372 }
4373
4374 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4375 #define NO_DEFERRED_PROFILE_COUNTERS 0
4376 #endif
4377
4378
4379 /* Vector of funcdef numbers. */
4380 static VEC(int,heap) *funcdef_nos;
4381
4382 /* Output deferred profile counters. */
4383 static void
4384 output_deferred_profile_counters (void)
4385 {
4386 unsigned int i;
4387 int align, n;
4388
4389 if (VEC_empty (int, funcdef_nos))
4390 return;
4391
4392 switch_to_section (data_section);
4393 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4394 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4395
4396 for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
4397 {
4398 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4399 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4400 }
4401
4402 VEC_free (int, heap, funcdef_nos);
4403 }
4404
4405 void
4406 hppa_profile_hook (int label_no)
4407 {
4408 /* We use SImode for the address of the function in both 32 and
4409 64-bit code to avoid having to provide DImode versions of the
4410 lcla2 and load_offset_label_address insn patterns. */
4411 rtx reg = gen_reg_rtx (SImode);
4412 rtx label_rtx = gen_label_rtx ();
4413 rtx begin_label_rtx, call_insn;
4414 char begin_label_name[16];
4415
4416 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4417 label_no);
4418 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4419
4420 if (TARGET_64BIT)
4421 emit_move_insn (arg_pointer_rtx,
4422 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4423 GEN_INT (64)));
4424
4425 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4426
4427 /* The address of the function is loaded into %r25 with an instruction-
4428 relative sequence that avoids the use of relocations. The sequence
4429 is split so that the load_offset_label_address instruction can
4430 occupy the delay slot of the call to _mcount. */
4431 if (TARGET_PA_20)
4432 emit_insn (gen_lcla2 (reg, label_rtx));
4433 else
4434 emit_insn (gen_lcla1 (reg, label_rtx));
4435
4436 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4437 reg, begin_label_rtx, label_rtx));
4438
4439 #if !NO_DEFERRED_PROFILE_COUNTERS
4440 {
4441 rtx count_label_rtx, addr, r24;
4442 char count_label_name[16];
4443
4444 VEC_safe_push (int, heap, funcdef_nos, label_no);
4445 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4446 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4447
4448 addr = force_reg (Pmode, count_label_rtx);
4449 r24 = gen_rtx_REG (Pmode, 24);
4450 emit_move_insn (r24, addr);
4451
4452 call_insn =
4453 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4454 gen_rtx_SYMBOL_REF (Pmode,
4455 "_mcount")),
4456 GEN_INT (TARGET_64BIT ? 24 : 12)));
4457
4458 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4459 }
4460 #else
4461
4462 call_insn =
4463 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4464 gen_rtx_SYMBOL_REF (Pmode,
4465 "_mcount")),
4466 GEN_INT (TARGET_64BIT ? 16 : 8)));
4467
4468 #endif
4469
4470 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4471 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4472
4473 /* Indicate the _mcount call cannot throw, nor will it execute a
4474 non-local goto. */
4475 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4476 }
4477
4478 /* Fetch the return address for the frame COUNT steps up from
4479 the current frame, after the prologue. FRAMEADDR is the
4480 frame pointer of the COUNT frame.
4481
4482 We want to ignore any export stub remnants here. To handle this,
4483 we examine the code at the return address, and if it is an export
4484 stub, we return a memory rtx for the stub return address stored
4485 at frame-24.
4486
4487 The value returned is used in two different ways:
4488
4489 1. To find a function's caller.
4490
4491 2. To change the return address for a function.
4492
4493 This function handles most instances of case 1; however, it will
4494 fail if there are two levels of stubs to execute on the return
4495 path. The only way I believe that can happen is if the return value
4496 needs a parameter relocation, which never happens for C code.
4497
4498 This function handles most instances of case 2; however, it will
4499 fail if we did not originally have stub code on the return path
4500 but will need stub code on the new return path. This can happen if
4501 the caller & callee are both in the main program, but the new
4502 return location is in a shared library. */
4503
4504 rtx
4505 pa_return_addr_rtx (int count, rtx frameaddr)
4506 {
4507 rtx label;
4508 rtx rp;
4509 rtx saved_rp;
4510 rtx ins;
4511
4512 /* The instruction stream at the return address of a PA1.X export stub is:
4513
4514 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4515 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4516 0x00011820 | stub+16: mtsp r1,sr0
4517 0xe0400002 | stub+20: be,n 0(sr0,rp)
4518
4519 0xe0400002 must be specified as -532676606 so that it won't be
4520 rejected as an invalid immediate operand on 64-bit hosts.
4521
4522 The instruction stream at the return address of a PA2.0 export stub is:
4523
4524 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4525 0xe840d002 | stub+12: bve,n (rp)
4526 */
4527
4528 HOST_WIDE_INT insns[4];
4529 int i, len;
4530
4531 if (count != 0)
4532 return NULL_RTX;
4533
4534 rp = get_hard_reg_initial_val (Pmode, 2);
4535
4536 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4537 return rp;
4538
4539 /* If there is no export stub then just use the value saved from
4540 the return pointer register. */
4541
4542 saved_rp = gen_reg_rtx (Pmode);
4543 emit_move_insn (saved_rp, rp);
4544
4545 /* Get pointer to the instruction stream. We have to mask out the
4546 privilege level from the two low order bits of the return address
4547 pointer here so that ins will point to the start of the first
4548 instruction that would have been executed if we returned. */
4549 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4550 label = gen_label_rtx ();
4551
4552 if (TARGET_PA_20)
4553 {
4554 insns[0] = 0x4bc23fd1;
4555 insns[1] = -398405630;
4556 len = 2;
4557 }
4558 else
4559 {
4560 insns[0] = 0x4bc23fd1;
4561 insns[1] = 0x004010a1;
4562 insns[2] = 0x00011820;
4563 insns[3] = -532676606;
4564 len = 4;
4565 }
4566
4567 /* Check the instruction stream at the normal return address for the
4568 export stub. If it is an export stub, than our return address is
4569 really in -24[frameaddr]. */
4570
4571 for (i = 0; i < len; i++)
4572 {
4573 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4574 rtx op1 = GEN_INT (insns[i]);
4575 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4576 }
4577
4578 /* Here we know that our return address points to an export
4579 stub. We don't want to return the address of the export stub,
4580 but rather the return address of the export stub. That return
4581 address is stored at -24[frameaddr]. */
4582
4583 emit_move_insn (saved_rp,
4584 gen_rtx_MEM (Pmode,
4585 memory_address (Pmode,
4586 plus_constant (Pmode, frameaddr,
4587 -24))));
4588
4589 emit_label (label);
4590
4591 return saved_rp;
4592 }
4593
4594 void
4595 pa_emit_bcond_fp (rtx operands[])
4596 {
4597 enum rtx_code code = GET_CODE (operands[0]);
4598 rtx operand0 = operands[1];
4599 rtx operand1 = operands[2];
4600 rtx label = operands[3];
4601
4602 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4603 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4604
4605 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4606 gen_rtx_IF_THEN_ELSE (VOIDmode,
4607 gen_rtx_fmt_ee (NE,
4608 VOIDmode,
4609 gen_rtx_REG (CCFPmode, 0),
4610 const0_rtx),
4611 gen_rtx_LABEL_REF (VOIDmode, label),
4612 pc_rtx)));
4613
4614 }
4615
4616 /* Adjust the cost of a scheduling dependency. Return the new cost of
4617 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4618
4619 static int
4620 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4621 {
4622 enum attr_type attr_type;
4623
4624 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4625 true dependencies as they are described with bypasses now. */
4626 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4627 return cost;
4628
4629 if (! recog_memoized (insn))
4630 return 0;
4631
4632 attr_type = get_attr_type (insn);
4633
4634 switch (REG_NOTE_KIND (link))
4635 {
4636 case REG_DEP_ANTI:
4637 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4638 cycles later. */
4639
4640 if (attr_type == TYPE_FPLOAD)
4641 {
4642 rtx pat = PATTERN (insn);
4643 rtx dep_pat = PATTERN (dep_insn);
4644 if (GET_CODE (pat) == PARALLEL)
4645 {
4646 /* This happens for the fldXs,mb patterns. */
4647 pat = XVECEXP (pat, 0, 0);
4648 }
4649 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4650 /* If this happens, we have to extend this to schedule
4651 optimally. Return 0 for now. */
4652 return 0;
4653
4654 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4655 {
4656 if (! recog_memoized (dep_insn))
4657 return 0;
4658 switch (get_attr_type (dep_insn))
4659 {
4660 case TYPE_FPALU:
4661 case TYPE_FPMULSGL:
4662 case TYPE_FPMULDBL:
4663 case TYPE_FPDIVSGL:
4664 case TYPE_FPDIVDBL:
4665 case TYPE_FPSQRTSGL:
4666 case TYPE_FPSQRTDBL:
4667 /* A fpload can't be issued until one cycle before a
4668 preceding arithmetic operation has finished if
4669 the target of the fpload is any of the sources
4670 (or destination) of the arithmetic operation. */
4671 return insn_default_latency (dep_insn) - 1;
4672
4673 default:
4674 return 0;
4675 }
4676 }
4677 }
4678 else if (attr_type == TYPE_FPALU)
4679 {
4680 rtx pat = PATTERN (insn);
4681 rtx dep_pat = PATTERN (dep_insn);
4682 if (GET_CODE (pat) == PARALLEL)
4683 {
4684 /* This happens for the fldXs,mb patterns. */
4685 pat = XVECEXP (pat, 0, 0);
4686 }
4687 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4688 /* If this happens, we have to extend this to schedule
4689 optimally. Return 0 for now. */
4690 return 0;
4691
4692 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4693 {
4694 if (! recog_memoized (dep_insn))
4695 return 0;
4696 switch (get_attr_type (dep_insn))
4697 {
4698 case TYPE_FPDIVSGL:
4699 case TYPE_FPDIVDBL:
4700 case TYPE_FPSQRTSGL:
4701 case TYPE_FPSQRTDBL:
4702 /* An ALU flop can't be issued until two cycles before a
4703 preceding divide or sqrt operation has finished if
4704 the target of the ALU flop is any of the sources
4705 (or destination) of the divide or sqrt operation. */
4706 return insn_default_latency (dep_insn) - 2;
4707
4708 default:
4709 return 0;
4710 }
4711 }
4712 }
4713
4714 /* For other anti dependencies, the cost is 0. */
4715 return 0;
4716
4717 case REG_DEP_OUTPUT:
4718 /* Output dependency; DEP_INSN writes a register that INSN writes some
4719 cycles later. */
4720 if (attr_type == TYPE_FPLOAD)
4721 {
4722 rtx pat = PATTERN (insn);
4723 rtx dep_pat = PATTERN (dep_insn);
4724 if (GET_CODE (pat) == PARALLEL)
4725 {
4726 /* This happens for the fldXs,mb patterns. */
4727 pat = XVECEXP (pat, 0, 0);
4728 }
4729 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4730 /* If this happens, we have to extend this to schedule
4731 optimally. Return 0 for now. */
4732 return 0;
4733
4734 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4735 {
4736 if (! recog_memoized (dep_insn))
4737 return 0;
4738 switch (get_attr_type (dep_insn))
4739 {
4740 case TYPE_FPALU:
4741 case TYPE_FPMULSGL:
4742 case TYPE_FPMULDBL:
4743 case TYPE_FPDIVSGL:
4744 case TYPE_FPDIVDBL:
4745 case TYPE_FPSQRTSGL:
4746 case TYPE_FPSQRTDBL:
4747 /* A fpload can't be issued until one cycle before a
4748 preceding arithmetic operation has finished if
4749 the target of the fpload is the destination of the
4750 arithmetic operation.
4751
4752 Exception: For PA7100LC, PA7200 and PA7300, the cost
4753 is 3 cycles, unless they bundle together. We also
4754 pay the penalty if the second insn is a fpload. */
4755 return insn_default_latency (dep_insn) - 1;
4756
4757 default:
4758 return 0;
4759 }
4760 }
4761 }
4762 else if (attr_type == TYPE_FPALU)
4763 {
4764 rtx pat = PATTERN (insn);
4765 rtx dep_pat = PATTERN (dep_insn);
4766 if (GET_CODE (pat) == PARALLEL)
4767 {
4768 /* This happens for the fldXs,mb patterns. */
4769 pat = XVECEXP (pat, 0, 0);
4770 }
4771 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4772 /* If this happens, we have to extend this to schedule
4773 optimally. Return 0 for now. */
4774 return 0;
4775
4776 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4777 {
4778 if (! recog_memoized (dep_insn))
4779 return 0;
4780 switch (get_attr_type (dep_insn))
4781 {
4782 case TYPE_FPDIVSGL:
4783 case TYPE_FPDIVDBL:
4784 case TYPE_FPSQRTSGL:
4785 case TYPE_FPSQRTDBL:
4786 /* An ALU flop can't be issued until two cycles before a
4787 preceding divide or sqrt operation has finished if
4788 the target of the ALU flop is also the target of
4789 the divide or sqrt operation. */
4790 return insn_default_latency (dep_insn) - 2;
4791
4792 default:
4793 return 0;
4794 }
4795 }
4796 }
4797
4798 /* For other output dependencies, the cost is 0. */
4799 return 0;
4800
4801 default:
4802 gcc_unreachable ();
4803 }
4804 }
4805
4806 /* Adjust scheduling priorities. We use this to try and keep addil
4807 and the next use of %r1 close together. */
4808 static int
4809 pa_adjust_priority (rtx insn, int priority)
4810 {
4811 rtx set = single_set (insn);
4812 rtx src, dest;
4813 if (set)
4814 {
4815 src = SET_SRC (set);
4816 dest = SET_DEST (set);
4817 if (GET_CODE (src) == LO_SUM
4818 && symbolic_operand (XEXP (src, 1), VOIDmode)
4819 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4820 priority >>= 3;
4821
4822 else if (GET_CODE (src) == MEM
4823 && GET_CODE (XEXP (src, 0)) == LO_SUM
4824 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4825 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4826 priority >>= 1;
4827
4828 else if (GET_CODE (dest) == MEM
4829 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4830 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4831 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4832 priority >>= 3;
4833 }
4834 return priority;
4835 }
4836
4837 /* The 700 can only issue a single insn at a time.
4838 The 7XXX processors can issue two insns at a time.
4839 The 8000 can issue 4 insns at a time. */
4840 static int
4841 pa_issue_rate (void)
4842 {
4843 switch (pa_cpu)
4844 {
4845 case PROCESSOR_700: return 1;
4846 case PROCESSOR_7100: return 2;
4847 case PROCESSOR_7100LC: return 2;
4848 case PROCESSOR_7200: return 2;
4849 case PROCESSOR_7300: return 2;
4850 case PROCESSOR_8000: return 4;
4851
4852 default:
4853 gcc_unreachable ();
4854 }
4855 }
4856
4857
4858
4859 /* Return any length adjustment needed by INSN which already has its length
4860 computed as LENGTH. Return zero if no adjustment is necessary.
4861
4862 For the PA: function calls, millicode calls, and backwards short
4863 conditional branches with unfilled delay slots need an adjustment by +1
4864 (to account for the NOP which will be inserted into the instruction stream).
4865
4866 Also compute the length of an inline block move here as it is too
4867 complicated to express as a length attribute in pa.md. */
4868 int
4869 pa_adjust_insn_length (rtx insn, int length)
4870 {
4871 rtx pat = PATTERN (insn);
4872
4873 /* Jumps inside switch tables which have unfilled delay slots need
4874 adjustment. */
4875 if (GET_CODE (insn) == JUMP_INSN
4876 && GET_CODE (pat) == PARALLEL
4877 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4878 return 4;
4879 /* Millicode insn with an unfilled delay slot. */
4880 else if (GET_CODE (insn) == INSN
4881 && GET_CODE (pat) != SEQUENCE
4882 && GET_CODE (pat) != USE
4883 && GET_CODE (pat) != CLOBBER
4884 && get_attr_type (insn) == TYPE_MILLI)
4885 return 4;
4886 /* Block move pattern. */
4887 else if (GET_CODE (insn) == INSN
4888 && GET_CODE (pat) == PARALLEL
4889 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4890 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4891 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4892 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4893 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4894 return compute_movmem_length (insn) - 4;
4895 /* Block clear pattern. */
4896 else if (GET_CODE (insn) == INSN
4897 && GET_CODE (pat) == PARALLEL
4898 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4899 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4900 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4901 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4902 return compute_clrmem_length (insn) - 4;
4903 /* Conditional branch with an unfilled delay slot. */
4904 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4905 {
4906 /* Adjust a short backwards conditional with an unfilled delay slot. */
4907 if (GET_CODE (pat) == SET
4908 && length == 4
4909 && JUMP_LABEL (insn) != NULL_RTX
4910 && ! forward_branch_p (insn))
4911 return 4;
4912 else if (GET_CODE (pat) == PARALLEL
4913 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4914 && length == 4)
4915 return 4;
4916 /* Adjust dbra insn with short backwards conditional branch with
4917 unfilled delay slot -- only for case where counter is in a
4918 general register register. */
4919 else if (GET_CODE (pat) == PARALLEL
4920 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4921 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4922 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4923 && length == 4
4924 && ! forward_branch_p (insn))
4925 return 4;
4926 else
4927 return 0;
4928 }
4929 return 0;
4930 }
4931
4932 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4933
4934 static bool
4935 pa_print_operand_punct_valid_p (unsigned char code)
4936 {
4937 if (code == '@'
4938 || code == '#'
4939 || code == '*'
4940 || code == '^')
4941 return true;
4942
4943 return false;
4944 }
4945
4946 /* Print operand X (an rtx) in assembler syntax to file FILE.
4947 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4948 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4949
4950 void
4951 pa_print_operand (FILE *file, rtx x, int code)
4952 {
4953 switch (code)
4954 {
4955 case '#':
4956 /* Output a 'nop' if there's nothing for the delay slot. */
4957 if (dbr_sequence_length () == 0)
4958 fputs ("\n\tnop", file);
4959 return;
4960 case '*':
4961 /* Output a nullification completer if there's nothing for the */
4962 /* delay slot or nullification is requested. */
4963 if (dbr_sequence_length () == 0 ||
4964 (final_sequence &&
4965 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4966 fputs (",n", file);
4967 return;
4968 case 'R':
4969 /* Print out the second register name of a register pair.
4970 I.e., R (6) => 7. */
4971 fputs (reg_names[REGNO (x) + 1], file);
4972 return;
4973 case 'r':
4974 /* A register or zero. */
4975 if (x == const0_rtx
4976 || (x == CONST0_RTX (DFmode))
4977 || (x == CONST0_RTX (SFmode)))
4978 {
4979 fputs ("%r0", file);
4980 return;
4981 }
4982 else
4983 break;
4984 case 'f':
4985 /* A register or zero (floating point). */
4986 if (x == const0_rtx
4987 || (x == CONST0_RTX (DFmode))
4988 || (x == CONST0_RTX (SFmode)))
4989 {
4990 fputs ("%fr0", file);
4991 return;
4992 }
4993 else
4994 break;
4995 case 'A':
4996 {
4997 rtx xoperands[2];
4998
4999 xoperands[0] = XEXP (XEXP (x, 0), 0);
5000 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5001 pa_output_global_address (file, xoperands[1], 0);
5002 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5003 return;
5004 }
5005
5006 case 'C': /* Plain (C)ondition */
5007 case 'X':
5008 switch (GET_CODE (x))
5009 {
5010 case EQ:
5011 fputs ("=", file); break;
5012 case NE:
5013 fputs ("<>", file); break;
5014 case GT:
5015 fputs (">", file); break;
5016 case GE:
5017 fputs (">=", file); break;
5018 case GEU:
5019 fputs (">>=", file); break;
5020 case GTU:
5021 fputs (">>", file); break;
5022 case LT:
5023 fputs ("<", file); break;
5024 case LE:
5025 fputs ("<=", file); break;
5026 case LEU:
5027 fputs ("<<=", file); break;
5028 case LTU:
5029 fputs ("<<", file); break;
5030 default:
5031 gcc_unreachable ();
5032 }
5033 return;
5034 case 'N': /* Condition, (N)egated */
5035 switch (GET_CODE (x))
5036 {
5037 case EQ:
5038 fputs ("<>", file); break;
5039 case NE:
5040 fputs ("=", file); break;
5041 case GT:
5042 fputs ("<=", file); break;
5043 case GE:
5044 fputs ("<", file); break;
5045 case GEU:
5046 fputs ("<<", file); break;
5047 case GTU:
5048 fputs ("<<=", file); break;
5049 case LT:
5050 fputs (">=", file); break;
5051 case LE:
5052 fputs (">", file); break;
5053 case LEU:
5054 fputs (">>", file); break;
5055 case LTU:
5056 fputs (">>=", file); break;
5057 default:
5058 gcc_unreachable ();
5059 }
5060 return;
5061 /* For floating point comparisons. Note that the output
5062 predicates are the complement of the desired mode. The
5063 conditions for GT, GE, LT, LE and LTGT cause an invalid
5064 operation exception if the result is unordered and this
5065 exception is enabled in the floating-point status register. */
5066 case 'Y':
5067 switch (GET_CODE (x))
5068 {
5069 case EQ:
5070 fputs ("!=", file); break;
5071 case NE:
5072 fputs ("=", file); break;
5073 case GT:
5074 fputs ("!>", file); break;
5075 case GE:
5076 fputs ("!>=", file); break;
5077 case LT:
5078 fputs ("!<", file); break;
5079 case LE:
5080 fputs ("!<=", file); break;
5081 case LTGT:
5082 fputs ("!<>", file); break;
5083 case UNLE:
5084 fputs ("!?<=", file); break;
5085 case UNLT:
5086 fputs ("!?<", file); break;
5087 case UNGE:
5088 fputs ("!?>=", file); break;
5089 case UNGT:
5090 fputs ("!?>", file); break;
5091 case UNEQ:
5092 fputs ("!?=", file); break;
5093 case UNORDERED:
5094 fputs ("!?", file); break;
5095 case ORDERED:
5096 fputs ("?", file); break;
5097 default:
5098 gcc_unreachable ();
5099 }
5100 return;
5101 case 'S': /* Condition, operands are (S)wapped. */
5102 switch (GET_CODE (x))
5103 {
5104 case EQ:
5105 fputs ("=", file); break;
5106 case NE:
5107 fputs ("<>", file); break;
5108 case GT:
5109 fputs ("<", file); break;
5110 case GE:
5111 fputs ("<=", file); break;
5112 case GEU:
5113 fputs ("<<=", file); break;
5114 case GTU:
5115 fputs ("<<", file); break;
5116 case LT:
5117 fputs (">", file); break;
5118 case LE:
5119 fputs (">=", file); break;
5120 case LEU:
5121 fputs (">>=", file); break;
5122 case LTU:
5123 fputs (">>", file); break;
5124 default:
5125 gcc_unreachable ();
5126 }
5127 return;
5128 case 'B': /* Condition, (B)oth swapped and negate. */
5129 switch (GET_CODE (x))
5130 {
5131 case EQ:
5132 fputs ("<>", file); break;
5133 case NE:
5134 fputs ("=", file); break;
5135 case GT:
5136 fputs (">=", file); break;
5137 case GE:
5138 fputs (">", file); break;
5139 case GEU:
5140 fputs (">>", file); break;
5141 case GTU:
5142 fputs (">>=", file); break;
5143 case LT:
5144 fputs ("<=", file); break;
5145 case LE:
5146 fputs ("<", file); break;
5147 case LEU:
5148 fputs ("<<", file); break;
5149 case LTU:
5150 fputs ("<<=", file); break;
5151 default:
5152 gcc_unreachable ();
5153 }
5154 return;
5155 case 'k':
5156 gcc_assert (GET_CODE (x) == CONST_INT);
5157 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5158 return;
5159 case 'Q':
5160 gcc_assert (GET_CODE (x) == CONST_INT);
5161 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5162 return;
5163 case 'L':
5164 gcc_assert (GET_CODE (x) == CONST_INT);
5165 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5166 return;
5167 case 'O':
5168 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5169 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5170 return;
5171 case 'p':
5172 gcc_assert (GET_CODE (x) == CONST_INT);
5173 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5174 return;
5175 case 'P':
5176 gcc_assert (GET_CODE (x) == CONST_INT);
5177 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5178 return;
5179 case 'I':
5180 if (GET_CODE (x) == CONST_INT)
5181 fputs ("i", file);
5182 return;
5183 case 'M':
5184 case 'F':
5185 switch (GET_CODE (XEXP (x, 0)))
5186 {
5187 case PRE_DEC:
5188 case PRE_INC:
5189 if (ASSEMBLER_DIALECT == 0)
5190 fputs ("s,mb", file);
5191 else
5192 fputs (",mb", file);
5193 break;
5194 case POST_DEC:
5195 case POST_INC:
5196 if (ASSEMBLER_DIALECT == 0)
5197 fputs ("s,ma", file);
5198 else
5199 fputs (",ma", file);
5200 break;
5201 case PLUS:
5202 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5203 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5204 {
5205 if (ASSEMBLER_DIALECT == 0)
5206 fputs ("x", file);
5207 }
5208 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5209 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5210 {
5211 if (ASSEMBLER_DIALECT == 0)
5212 fputs ("x,s", file);
5213 else
5214 fputs (",s", file);
5215 }
5216 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5217 fputs ("s", file);
5218 break;
5219 default:
5220 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5221 fputs ("s", file);
5222 break;
5223 }
5224 return;
5225 case 'G':
5226 pa_output_global_address (file, x, 0);
5227 return;
5228 case 'H':
5229 pa_output_global_address (file, x, 1);
5230 return;
5231 case 0: /* Don't do anything special */
5232 break;
5233 case 'Z':
5234 {
5235 unsigned op[3];
5236 compute_zdepwi_operands (INTVAL (x), op);
5237 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5238 return;
5239 }
5240 case 'z':
5241 {
5242 unsigned op[3];
5243 compute_zdepdi_operands (INTVAL (x), op);
5244 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5245 return;
5246 }
5247 case 'c':
5248 /* We can get here from a .vtable_inherit due to our
5249 CONSTANT_ADDRESS_P rejecting perfectly good constant
5250 addresses. */
5251 break;
5252 default:
5253 gcc_unreachable ();
5254 }
5255 if (GET_CODE (x) == REG)
5256 {
5257 fputs (reg_names [REGNO (x)], file);
5258 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5259 {
5260 fputs ("R", file);
5261 return;
5262 }
5263 if (FP_REG_P (x)
5264 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5265 && (REGNO (x) & 1) == 0)
5266 fputs ("L", file);
5267 }
5268 else if (GET_CODE (x) == MEM)
5269 {
5270 int size = GET_MODE_SIZE (GET_MODE (x));
5271 rtx base = NULL_RTX;
5272 switch (GET_CODE (XEXP (x, 0)))
5273 {
5274 case PRE_DEC:
5275 case POST_DEC:
5276 base = XEXP (XEXP (x, 0), 0);
5277 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5278 break;
5279 case PRE_INC:
5280 case POST_INC:
5281 base = XEXP (XEXP (x, 0), 0);
5282 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5283 break;
5284 case PLUS:
5285 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5286 fprintf (file, "%s(%s)",
5287 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5288 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5289 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5290 fprintf (file, "%s(%s)",
5291 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5292 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5293 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5294 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5295 {
5296 /* Because the REG_POINTER flag can get lost during reload,
5297 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5298 index and base registers in the combined move patterns. */
5299 rtx base = XEXP (XEXP (x, 0), 1);
5300 rtx index = XEXP (XEXP (x, 0), 0);
5301
5302 fprintf (file, "%s(%s)",
5303 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5304 }
5305 else
5306 output_address (XEXP (x, 0));
5307 break;
5308 default:
5309 output_address (XEXP (x, 0));
5310 break;
5311 }
5312 }
5313 else
5314 output_addr_const (file, x);
5315 }
5316
5317 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5318
5319 void
5320 pa_output_global_address (FILE *file, rtx x, int round_constant)
5321 {
5322
5323 /* Imagine (high (const (plus ...))). */
5324 if (GET_CODE (x) == HIGH)
5325 x = XEXP (x, 0);
5326
5327 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5328 output_addr_const (file, x);
5329 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5330 {
5331 output_addr_const (file, x);
5332 fputs ("-$global$", file);
5333 }
5334 else if (GET_CODE (x) == CONST)
5335 {
5336 const char *sep = "";
5337 int offset = 0; /* assembler wants -$global$ at end */
5338 rtx base = NULL_RTX;
5339
5340 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5341 {
5342 case SYMBOL_REF:
5343 base = XEXP (XEXP (x, 0), 0);
5344 output_addr_const (file, base);
5345 break;
5346 case CONST_INT:
5347 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5348 break;
5349 default:
5350 gcc_unreachable ();
5351 }
5352
5353 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5354 {
5355 case SYMBOL_REF:
5356 base = XEXP (XEXP (x, 0), 1);
5357 output_addr_const (file, base);
5358 break;
5359 case CONST_INT:
5360 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5361 break;
5362 default:
5363 gcc_unreachable ();
5364 }
5365
5366 /* How bogus. The compiler is apparently responsible for
5367 rounding the constant if it uses an LR field selector.
5368
5369 The linker and/or assembler seem a better place since
5370 they have to do this kind of thing already.
5371
5372 If we fail to do this, HP's optimizing linker may eliminate
5373 an addil, but not update the ldw/stw/ldo instruction that
5374 uses the result of the addil. */
5375 if (round_constant)
5376 offset = ((offset + 0x1000) & ~0x1fff);
5377
5378 switch (GET_CODE (XEXP (x, 0)))
5379 {
5380 case PLUS:
5381 if (offset < 0)
5382 {
5383 offset = -offset;
5384 sep = "-";
5385 }
5386 else
5387 sep = "+";
5388 break;
5389
5390 case MINUS:
5391 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5392 sep = "-";
5393 break;
5394
5395 default:
5396 gcc_unreachable ();
5397 }
5398
5399 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5400 fputs ("-$global$", file);
5401 if (offset)
5402 fprintf (file, "%s%d", sep, offset);
5403 }
5404 else
5405 output_addr_const (file, x);
5406 }
5407
5408 /* Output boilerplate text to appear at the beginning of the file.
5409 There are several possible versions. */
5410 #define aputs(x) fputs(x, asm_out_file)
5411 static inline void
5412 pa_file_start_level (void)
5413 {
5414 if (TARGET_64BIT)
5415 aputs ("\t.LEVEL 2.0w\n");
5416 else if (TARGET_PA_20)
5417 aputs ("\t.LEVEL 2.0\n");
5418 else if (TARGET_PA_11)
5419 aputs ("\t.LEVEL 1.1\n");
5420 else
5421 aputs ("\t.LEVEL 1.0\n");
5422 }
5423
5424 static inline void
5425 pa_file_start_space (int sortspace)
5426 {
5427 aputs ("\t.SPACE $PRIVATE$");
5428 if (sortspace)
5429 aputs (",SORT=16");
5430 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5431 if (flag_tm)
5432 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5433 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5434 "\n\t.SPACE $TEXT$");
5435 if (sortspace)
5436 aputs (",SORT=8");
5437 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5438 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5439 }
5440
5441 static inline void
5442 pa_file_start_file (int want_version)
5443 {
5444 if (write_symbols != NO_DEBUG)
5445 {
5446 output_file_directive (asm_out_file, main_input_filename);
5447 if (want_version)
5448 aputs ("\t.version\t\"01.01\"\n");
5449 }
5450 }
5451
5452 static inline void
5453 pa_file_start_mcount (const char *aswhat)
5454 {
5455 if (profile_flag)
5456 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5457 }
5458
5459 static void
5460 pa_elf_file_start (void)
5461 {
5462 pa_file_start_level ();
5463 pa_file_start_mcount ("ENTRY");
5464 pa_file_start_file (0);
5465 }
5466
5467 static void
5468 pa_som_file_start (void)
5469 {
5470 pa_file_start_level ();
5471 pa_file_start_space (0);
5472 aputs ("\t.IMPORT $global$,DATA\n"
5473 "\t.IMPORT $$dyncall,MILLICODE\n");
5474 pa_file_start_mcount ("CODE");
5475 pa_file_start_file (0);
5476 }
5477
5478 static void
5479 pa_linux_file_start (void)
5480 {
5481 pa_file_start_file (1);
5482 pa_file_start_level ();
5483 pa_file_start_mcount ("CODE");
5484 }
5485
5486 static void
5487 pa_hpux64_gas_file_start (void)
5488 {
5489 pa_file_start_level ();
5490 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5491 if (profile_flag)
5492 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5493 #endif
5494 pa_file_start_file (1);
5495 }
5496
5497 static void
5498 pa_hpux64_hpas_file_start (void)
5499 {
5500 pa_file_start_level ();
5501 pa_file_start_space (1);
5502 pa_file_start_mcount ("CODE");
5503 pa_file_start_file (0);
5504 }
5505 #undef aputs
5506
5507 /* Search the deferred plabel list for SYMBOL and return its internal
5508 label. If an entry for SYMBOL is not found, a new entry is created. */
5509
5510 rtx
5511 pa_get_deferred_plabel (rtx symbol)
5512 {
5513 const char *fname = XSTR (symbol, 0);
5514 size_t i;
5515
5516 /* See if we have already put this function on the list of deferred
5517 plabels. This list is generally small, so a liner search is not
5518 too ugly. If it proves too slow replace it with something faster. */
5519 for (i = 0; i < n_deferred_plabels; i++)
5520 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5521 break;
5522
5523 /* If the deferred plabel list is empty, or this entry was not found
5524 on the list, create a new entry on the list. */
5525 if (deferred_plabels == NULL || i == n_deferred_plabels)
5526 {
5527 tree id;
5528
5529 if (deferred_plabels == 0)
5530 deferred_plabels = ggc_alloc_deferred_plabel ();
5531 else
5532 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5533 deferred_plabels,
5534 n_deferred_plabels + 1);
5535
5536 i = n_deferred_plabels++;
5537 deferred_plabels[i].internal_label = gen_label_rtx ();
5538 deferred_plabels[i].symbol = symbol;
5539
5540 /* Gross. We have just implicitly taken the address of this
5541 function. Mark it in the same manner as assemble_name. */
5542 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5543 if (id)
5544 mark_referenced (id);
5545 }
5546
5547 return deferred_plabels[i].internal_label;
5548 }
5549
5550 static void
5551 output_deferred_plabels (void)
5552 {
5553 size_t i;
5554
5555 /* If we have some deferred plabels, then we need to switch into the
5556 data or readonly data section, and align it to a 4 byte boundary
5557 before outputting the deferred plabels. */
5558 if (n_deferred_plabels)
5559 {
5560 switch_to_section (flag_pic ? data_section : readonly_data_section);
5561 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5562 }
5563
5564 /* Now output the deferred plabels. */
5565 for (i = 0; i < n_deferred_plabels; i++)
5566 {
5567 targetm.asm_out.internal_label (asm_out_file, "L",
5568 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5569 assemble_integer (deferred_plabels[i].symbol,
5570 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5571 }
5572 }
5573
5574 /* Initialize optabs to point to emulation routines. */
5575
5576 static void
5577 pa_init_libfuncs (void)
5578 {
5579 if (HPUX_LONG_DOUBLE_LIBRARY)
5580 {
5581 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5582 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5583 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5584 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5585 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5586 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5587 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5588 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5589 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5590
5591 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5592 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5593 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5594 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5595 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5596 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5597 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5598
5599 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5600 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5601 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5602 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5603
5604 set_conv_libfunc (sfix_optab, SImode, TFmode,
5605 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5606 : "_U_Qfcnvfxt_quad_to_sgl");
5607 set_conv_libfunc (sfix_optab, DImode, TFmode,
5608 "_U_Qfcnvfxt_quad_to_dbl");
5609 set_conv_libfunc (ufix_optab, SImode, TFmode,
5610 "_U_Qfcnvfxt_quad_to_usgl");
5611 set_conv_libfunc (ufix_optab, DImode, TFmode,
5612 "_U_Qfcnvfxt_quad_to_udbl");
5613
5614 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5615 "_U_Qfcnvxf_sgl_to_quad");
5616 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5617 "_U_Qfcnvxf_dbl_to_quad");
5618 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5619 "_U_Qfcnvxf_usgl_to_quad");
5620 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5621 "_U_Qfcnvxf_udbl_to_quad");
5622 }
5623
5624 if (TARGET_SYNC_LIBCALL)
5625 init_sync_libfuncs (UNITS_PER_WORD);
5626 }
5627
5628 /* HP's millicode routines mean something special to the assembler.
5629 Keep track of which ones we have used. */
5630
5631 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5632 static void import_milli (enum millicodes);
5633 static char imported[(int) end1000];
5634 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5635 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5636 #define MILLI_START 10
5637
5638 static void
5639 import_milli (enum millicodes code)
5640 {
5641 char str[sizeof (import_string)];
5642
5643 if (!imported[(int) code])
5644 {
5645 imported[(int) code] = 1;
5646 strcpy (str, import_string);
5647 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5648 output_asm_insn (str, 0);
5649 }
5650 }
5651
5652 /* The register constraints have put the operands and return value in
5653 the proper registers. */
5654
5655 const char *
5656 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5657 {
5658 import_milli (mulI);
5659 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5660 }
5661
5662 /* Emit the rtl for doing a division by a constant. */
5663
5664 /* Do magic division millicodes exist for this value? */
5665 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5666
5667 /* We'll use an array to keep track of the magic millicodes and
5668 whether or not we've used them already. [n][0] is signed, [n][1] is
5669 unsigned. */
5670
5671 static int div_milli[16][2];
5672
5673 int
5674 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5675 {
5676 if (GET_CODE (operands[2]) == CONST_INT
5677 && INTVAL (operands[2]) > 0
5678 && INTVAL (operands[2]) < 16
5679 && pa_magic_milli[INTVAL (operands[2])])
5680 {
5681 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5682
5683 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5684 emit
5685 (gen_rtx_PARALLEL
5686 (VOIDmode,
5687 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5688 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5689 SImode,
5690 gen_rtx_REG (SImode, 26),
5691 operands[2])),
5692 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5693 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5694 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5695 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5696 gen_rtx_CLOBBER (VOIDmode, ret))));
5697 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5698 return 1;
5699 }
5700 return 0;
5701 }
5702
5703 const char *
5704 pa_output_div_insn (rtx *operands, int unsignedp, rtx insn)
5705 {
5706 int divisor;
5707
5708 /* If the divisor is a constant, try to use one of the special
5709 opcodes .*/
5710 if (GET_CODE (operands[0]) == CONST_INT)
5711 {
5712 static char buf[100];
5713 divisor = INTVAL (operands[0]);
5714 if (!div_milli[divisor][unsignedp])
5715 {
5716 div_milli[divisor][unsignedp] = 1;
5717 if (unsignedp)
5718 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5719 else
5720 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5721 }
5722 if (unsignedp)
5723 {
5724 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5725 INTVAL (operands[0]));
5726 return pa_output_millicode_call (insn,
5727 gen_rtx_SYMBOL_REF (SImode, buf));
5728 }
5729 else
5730 {
5731 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5732 INTVAL (operands[0]));
5733 return pa_output_millicode_call (insn,
5734 gen_rtx_SYMBOL_REF (SImode, buf));
5735 }
5736 }
5737 /* Divisor isn't a special constant. */
5738 else
5739 {
5740 if (unsignedp)
5741 {
5742 import_milli (divU);
5743 return pa_output_millicode_call (insn,
5744 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5745 }
5746 else
5747 {
5748 import_milli (divI);
5749 return pa_output_millicode_call (insn,
5750 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5751 }
5752 }
5753 }
5754
5755 /* Output a $$rem millicode to do mod. */
5756
5757 const char *
5758 pa_output_mod_insn (int unsignedp, rtx insn)
5759 {
5760 if (unsignedp)
5761 {
5762 import_milli (remU);
5763 return pa_output_millicode_call (insn,
5764 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5765 }
5766 else
5767 {
5768 import_milli (remI);
5769 return pa_output_millicode_call (insn,
5770 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5771 }
5772 }
5773
5774 void
5775 pa_output_arg_descriptor (rtx call_insn)
5776 {
5777 const char *arg_regs[4];
5778 enum machine_mode arg_mode;
5779 rtx link;
5780 int i, output_flag = 0;
5781 int regno;
5782
5783 /* We neither need nor want argument location descriptors for the
5784 64bit runtime environment or the ELF32 environment. */
5785 if (TARGET_64BIT || TARGET_ELF32)
5786 return;
5787
5788 for (i = 0; i < 4; i++)
5789 arg_regs[i] = 0;
5790
5791 /* Specify explicitly that no argument relocations should take place
5792 if using the portable runtime calling conventions. */
5793 if (TARGET_PORTABLE_RUNTIME)
5794 {
5795 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5796 asm_out_file);
5797 return;
5798 }
5799
5800 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5801 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5802 link; link = XEXP (link, 1))
5803 {
5804 rtx use = XEXP (link, 0);
5805
5806 if (! (GET_CODE (use) == USE
5807 && GET_CODE (XEXP (use, 0)) == REG
5808 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5809 continue;
5810
5811 arg_mode = GET_MODE (XEXP (use, 0));
5812 regno = REGNO (XEXP (use, 0));
5813 if (regno >= 23 && regno <= 26)
5814 {
5815 arg_regs[26 - regno] = "GR";
5816 if (arg_mode == DImode)
5817 arg_regs[25 - regno] = "GR";
5818 }
5819 else if (regno >= 32 && regno <= 39)
5820 {
5821 if (arg_mode == SFmode)
5822 arg_regs[(regno - 32) / 2] = "FR";
5823 else
5824 {
5825 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5826 arg_regs[(regno - 34) / 2] = "FR";
5827 arg_regs[(regno - 34) / 2 + 1] = "FU";
5828 #else
5829 arg_regs[(regno - 34) / 2] = "FU";
5830 arg_regs[(regno - 34) / 2 + 1] = "FR";
5831 #endif
5832 }
5833 }
5834 }
5835 fputs ("\t.CALL ", asm_out_file);
5836 for (i = 0; i < 4; i++)
5837 {
5838 if (arg_regs[i])
5839 {
5840 if (output_flag++)
5841 fputc (',', asm_out_file);
5842 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5843 }
5844 }
5845 fputc ('\n', asm_out_file);
5846 }
5847 \f
5848 /* Inform reload about cases where moving X with a mode MODE to a register in
5849 RCLASS requires an extra scratch or immediate register. Return the class
5850 needed for the immediate register. */
5851
5852 static reg_class_t
5853 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
5854 enum machine_mode mode, secondary_reload_info *sri)
5855 {
5856 int regno;
5857 enum reg_class rclass = (enum reg_class) rclass_i;
5858
5859 /* Handle the easy stuff first. */
5860 if (rclass == R1_REGS)
5861 return NO_REGS;
5862
5863 if (REG_P (x))
5864 {
5865 regno = REGNO (x);
5866 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5867 return NO_REGS;
5868 }
5869 else
5870 regno = -1;
5871
5872 /* If we have something like (mem (mem (...)), we can safely assume the
5873 inner MEM will end up in a general register after reloading, so there's
5874 no need for a secondary reload. */
5875 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5876 return NO_REGS;
5877
5878 /* Trying to load a constant into a FP register during PIC code
5879 generation requires %r1 as a scratch register. */
5880 if (flag_pic
5881 && (mode == SImode || mode == DImode)
5882 && FP_REG_CLASS_P (rclass)
5883 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5884 {
5885 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5886 : CODE_FOR_reload_indi_r1);
5887 return NO_REGS;
5888 }
5889
5890 /* Secondary reloads of symbolic operands require %r1 as a scratch
5891 register when we're generating PIC code and when the operand isn't
5892 readonly. */
5893 if (pa_symbolic_expression_p (x))
5894 {
5895 if (GET_CODE (x) == HIGH)
5896 x = XEXP (x, 0);
5897
5898 if (flag_pic || !read_only_operand (x, VOIDmode))
5899 {
5900 gcc_assert (mode == SImode || mode == DImode);
5901 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5902 : CODE_FOR_reload_indi_r1);
5903 return NO_REGS;
5904 }
5905 }
5906
5907 /* Profiling showed the PA port spends about 1.3% of its compilation
5908 time in true_regnum from calls inside pa_secondary_reload_class. */
5909 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5910 regno = true_regnum (x);
5911
5912 /* In order to allow 14-bit displacements in integer loads and stores,
5913 we need to prevent reload from generating out of range integer mode
5914 loads and stores to the floating point registers. Previously, we
5915 used to call for a secondary reload and have pa_emit_move_sequence()
5916 fix the instruction sequence. However, reload occasionally wouldn't
5917 generate the reload and we would end up with an invalid REG+D memory
5918 address. So, now we use an intermediate general register for most
5919 memory loads and stores. */
5920 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5921 && GET_MODE_CLASS (mode) == MODE_INT
5922 && FP_REG_CLASS_P (rclass))
5923 {
5924 /* Reload passes (mem:SI (reg/f:DI 30 %r30) when it wants to check
5925 the secondary reload needed for a pseudo. It never passes a
5926 REG+D address. */
5927 if (GET_CODE (x) == MEM)
5928 {
5929 x = XEXP (x, 0);
5930
5931 /* We don't need an intermediate for indexed and LO_SUM DLT
5932 memory addresses. When INT14_OK_STRICT is true, it might
5933 appear that we could directly allow register indirect
5934 memory addresses. However, this doesn't work because we
5935 don't support SUBREGs in floating-point register copies
5936 and reload doesn't tell us when it's going to use a SUBREG. */
5937 if (IS_INDEX_ADDR_P (x)
5938 || IS_LO_SUM_DLT_ADDR_P (x))
5939 return NO_REGS;
5940
5941 /* Otherwise, we need an intermediate general register. */
5942 return GENERAL_REGS;
5943 }
5944
5945 /* Request a secondary reload with a general scratch register
5946 for everything else. ??? Could symbolic operands be handled
5947 directly when generating non-pic PA 2.0 code? */
5948 sri->icode = (in_p
5949 ? direct_optab_handler (reload_in_optab, mode)
5950 : direct_optab_handler (reload_out_optab, mode));
5951 return NO_REGS;
5952 }
5953
5954 /* A SAR<->FP register copy requires an intermediate general register
5955 and secondary memory. We need a secondary reload with a general
5956 scratch register for spills. */
5957 if (rclass == SHIFT_REGS)
5958 {
5959 /* Handle spill. */
5960 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
5961 {
5962 sri->icode = (in_p
5963 ? direct_optab_handler (reload_in_optab, mode)
5964 : direct_optab_handler (reload_out_optab, mode));
5965 return NO_REGS;
5966 }
5967
5968 /* Handle FP copy. */
5969 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
5970 return GENERAL_REGS;
5971 }
5972
5973 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5974 && REGNO_REG_CLASS (regno) == SHIFT_REGS
5975 && FP_REG_CLASS_P (rclass))
5976 return GENERAL_REGS;
5977
5978 return NO_REGS;
5979 }
5980
5981 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
5982 is only marked as live on entry by df-scan when it is a fixed
5983 register. It isn't a fixed register in the 64-bit runtime,
5984 so we need to mark it here. */
5985
5986 static void
5987 pa_extra_live_on_entry (bitmap regs)
5988 {
5989 if (TARGET_64BIT)
5990 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
5991 }
5992
5993 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
5994 to prevent it from being deleted. */
5995
5996 rtx
5997 pa_eh_return_handler_rtx (void)
5998 {
5999 rtx tmp;
6000
6001 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6002 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6003 tmp = gen_rtx_MEM (word_mode, tmp);
6004 tmp->volatil = 1;
6005 return tmp;
6006 }
6007
6008 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6009 by invisible reference. As a GCC extension, we also pass anything
6010 with a zero or variable size by reference.
6011
6012 The 64-bit runtime does not describe passing any types by invisible
6013 reference. The internals of GCC can't currently handle passing
6014 empty structures, and zero or variable length arrays when they are
6015 not passed entirely on the stack or by reference. Thus, as a GCC
6016 extension, we pass these types by reference. The HP compiler doesn't
6017 support these types, so hopefully there shouldn't be any compatibility
6018 issues. This may have to be revisited when HP releases a C99 compiler
6019 or updates the ABI. */
6020
6021 static bool
6022 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6023 enum machine_mode mode, const_tree type,
6024 bool named ATTRIBUTE_UNUSED)
6025 {
6026 HOST_WIDE_INT size;
6027
6028 if (type)
6029 size = int_size_in_bytes (type);
6030 else
6031 size = GET_MODE_SIZE (mode);
6032
6033 if (TARGET_64BIT)
6034 return size <= 0;
6035 else
6036 return size <= 0 || size > 8;
6037 }
6038
6039 enum direction
6040 pa_function_arg_padding (enum machine_mode mode, const_tree type)
6041 {
6042 if (mode == BLKmode
6043 || (TARGET_64BIT
6044 && type
6045 && (AGGREGATE_TYPE_P (type)
6046 || TREE_CODE (type) == COMPLEX_TYPE
6047 || TREE_CODE (type) == VECTOR_TYPE)))
6048 {
6049 /* Return none if justification is not required. */
6050 if (type
6051 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6052 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6053 return none;
6054
6055 /* The directions set here are ignored when a BLKmode argument larger
6056 than a word is placed in a register. Different code is used for
6057 the stack and registers. This makes it difficult to have a
6058 consistent data representation for both the stack and registers.
6059 For both runtimes, the justification and padding for arguments on
6060 the stack and in registers should be identical. */
6061 if (TARGET_64BIT)
6062 /* The 64-bit runtime specifies left justification for aggregates. */
6063 return upward;
6064 else
6065 /* The 32-bit runtime architecture specifies right justification.
6066 When the argument is passed on the stack, the argument is padded
6067 with garbage on the left. The HP compiler pads with zeros. */
6068 return downward;
6069 }
6070
6071 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6072 return downward;
6073 else
6074 return none;
6075 }
6076
6077 \f
6078 /* Do what is necessary for `va_start'. We look at the current function
6079 to determine if stdargs or varargs is used and fill in an initial
6080 va_list. A pointer to this constructor is returned. */
6081
6082 static rtx
6083 hppa_builtin_saveregs (void)
6084 {
6085 rtx offset, dest;
6086 tree fntype = TREE_TYPE (current_function_decl);
6087 int argadj = ((!stdarg_p (fntype))
6088 ? UNITS_PER_WORD : 0);
6089
6090 if (argadj)
6091 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6092 else
6093 offset = crtl->args.arg_offset_rtx;
6094
6095 if (TARGET_64BIT)
6096 {
6097 int i, off;
6098
6099 /* Adjust for varargs/stdarg differences. */
6100 if (argadj)
6101 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6102 else
6103 offset = crtl->args.arg_offset_rtx;
6104
6105 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6106 from the incoming arg pointer and growing to larger addresses. */
6107 for (i = 26, off = -64; i >= 19; i--, off += 8)
6108 emit_move_insn (gen_rtx_MEM (word_mode,
6109 plus_constant (Pmode,
6110 arg_pointer_rtx, off)),
6111 gen_rtx_REG (word_mode, i));
6112
6113 /* The incoming args pointer points just beyond the flushback area;
6114 normally this is not a serious concern. However, when we are doing
6115 varargs/stdargs we want to make the arg pointer point to the start
6116 of the incoming argument area. */
6117 emit_move_insn (virtual_incoming_args_rtx,
6118 plus_constant (Pmode, arg_pointer_rtx, -64));
6119
6120 /* Now return a pointer to the first anonymous argument. */
6121 return copy_to_reg (expand_binop (Pmode, add_optab,
6122 virtual_incoming_args_rtx,
6123 offset, 0, 0, OPTAB_LIB_WIDEN));
6124 }
6125
6126 /* Store general registers on the stack. */
6127 dest = gen_rtx_MEM (BLKmode,
6128 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6129 -16));
6130 set_mem_alias_set (dest, get_varargs_alias_set ());
6131 set_mem_align (dest, BITS_PER_WORD);
6132 move_block_from_reg (23, dest, 4);
6133
6134 /* move_block_from_reg will emit code to store the argument registers
6135 individually as scalar stores.
6136
6137 However, other insns may later load from the same addresses for
6138 a structure load (passing a struct to a varargs routine).
6139
6140 The alias code assumes that such aliasing can never happen, so we
6141 have to keep memory referencing insns from moving up beyond the
6142 last argument register store. So we emit a blockage insn here. */
6143 emit_insn (gen_blockage ());
6144
6145 return copy_to_reg (expand_binop (Pmode, add_optab,
6146 crtl->args.internal_arg_pointer,
6147 offset, 0, 0, OPTAB_LIB_WIDEN));
6148 }
6149
6150 static void
6151 hppa_va_start (tree valist, rtx nextarg)
6152 {
6153 nextarg = expand_builtin_saveregs ();
6154 std_expand_builtin_va_start (valist, nextarg);
6155 }
6156
6157 static tree
6158 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6159 gimple_seq *post_p)
6160 {
6161 if (TARGET_64BIT)
6162 {
6163 /* Args grow upward. We can use the generic routines. */
6164 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6165 }
6166 else /* !TARGET_64BIT */
6167 {
6168 tree ptr = build_pointer_type (type);
6169 tree valist_type;
6170 tree t, u;
6171 unsigned int size, ofs;
6172 bool indirect;
6173
6174 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6175 if (indirect)
6176 {
6177 type = ptr;
6178 ptr = build_pointer_type (type);
6179 }
6180 size = int_size_in_bytes (type);
6181 valist_type = TREE_TYPE (valist);
6182
6183 /* Args grow down. Not handled by generic routines. */
6184
6185 u = fold_convert (sizetype, size_in_bytes (type));
6186 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6187 t = fold_build_pointer_plus (valist, u);
6188
6189 /* Align to 4 or 8 byte boundary depending on argument size. */
6190
6191 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6192 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6193 t = fold_convert (valist_type, t);
6194
6195 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6196
6197 ofs = (8 - size) % 4;
6198 if (ofs != 0)
6199 t = fold_build_pointer_plus_hwi (t, ofs);
6200
6201 t = fold_convert (ptr, t);
6202 t = build_va_arg_indirect_ref (t);
6203
6204 if (indirect)
6205 t = build_va_arg_indirect_ref (t);
6206
6207 return t;
6208 }
6209 }
6210
6211 /* True if MODE is valid for the target. By "valid", we mean able to
6212 be manipulated in non-trivial ways. In particular, this means all
6213 the arithmetic is supported.
6214
6215 Currently, TImode is not valid as the HP 64-bit runtime documentation
6216 doesn't document the alignment and calling conventions for this type.
6217 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6218 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6219
6220 static bool
6221 pa_scalar_mode_supported_p (enum machine_mode mode)
6222 {
6223 int precision = GET_MODE_PRECISION (mode);
6224
6225 switch (GET_MODE_CLASS (mode))
6226 {
6227 case MODE_PARTIAL_INT:
6228 case MODE_INT:
6229 if (precision == CHAR_TYPE_SIZE)
6230 return true;
6231 if (precision == SHORT_TYPE_SIZE)
6232 return true;
6233 if (precision == INT_TYPE_SIZE)
6234 return true;
6235 if (precision == LONG_TYPE_SIZE)
6236 return true;
6237 if (precision == LONG_LONG_TYPE_SIZE)
6238 return true;
6239 return false;
6240
6241 case MODE_FLOAT:
6242 if (precision == FLOAT_TYPE_SIZE)
6243 return true;
6244 if (precision == DOUBLE_TYPE_SIZE)
6245 return true;
6246 if (precision == LONG_DOUBLE_TYPE_SIZE)
6247 return true;
6248 return false;
6249
6250 case MODE_DECIMAL_FLOAT:
6251 return false;
6252
6253 default:
6254 gcc_unreachable ();
6255 }
6256 }
6257
6258 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6259 it branches into the delay slot. Otherwise, return FALSE. */
6260
6261 static bool
6262 branch_to_delay_slot_p (rtx insn)
6263 {
6264 rtx jump_insn;
6265
6266 if (dbr_sequence_length ())
6267 return FALSE;
6268
6269 jump_insn = next_active_insn (JUMP_LABEL (insn));
6270 while (insn)
6271 {
6272 insn = next_active_insn (insn);
6273 if (jump_insn == insn)
6274 return TRUE;
6275
6276 /* We can't rely on the length of asms. So, we return FALSE when
6277 the branch is followed by an asm. */
6278 if (!insn
6279 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6280 || extract_asm_operands (PATTERN (insn)) != NULL_RTX
6281 || get_attr_length (insn) > 0)
6282 break;
6283 }
6284
6285 return FALSE;
6286 }
6287
6288 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6289
6290 This occurs when INSN has an unfilled delay slot and is followed
6291 by an asm. Disaster can occur if the asm is empty and the jump
6292 branches into the delay slot. So, we add a nop in the delay slot
6293 when this occurs. */
6294
6295 static bool
6296 branch_needs_nop_p (rtx insn)
6297 {
6298 rtx jump_insn;
6299
6300 if (dbr_sequence_length ())
6301 return FALSE;
6302
6303 jump_insn = next_active_insn (JUMP_LABEL (insn));
6304 while (insn)
6305 {
6306 insn = next_active_insn (insn);
6307 if (!insn || jump_insn == insn)
6308 return TRUE;
6309
6310 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6311 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6312 && get_attr_length (insn) > 0)
6313 break;
6314 }
6315
6316 return FALSE;
6317 }
6318
6319 /* Return TRUE if INSN, a forward jump insn, can use nullification
6320 to skip the following instruction. This avoids an extra cycle due
6321 to a mis-predicted branch when we fall through. */
6322
6323 static bool
6324 use_skip_p (rtx insn)
6325 {
6326 rtx jump_insn = next_active_insn (JUMP_LABEL (insn));
6327
6328 while (insn)
6329 {
6330 insn = next_active_insn (insn);
6331
6332 /* We can't rely on the length of asms, so we can't skip asms. */
6333 if (!insn
6334 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6335 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6336 break;
6337 if (get_attr_length (insn) == 4
6338 && jump_insn == next_active_insn (insn))
6339 return TRUE;
6340 if (get_attr_length (insn) > 0)
6341 break;
6342 }
6343
6344 return FALSE;
6345 }
6346
6347 /* This routine handles all the normal conditional branch sequences we
6348 might need to generate. It handles compare immediate vs compare
6349 register, nullification of delay slots, varying length branches,
6350 negated branches, and all combinations of the above. It returns the
6351 output appropriate to emit the branch corresponding to all given
6352 parameters. */
6353
6354 const char *
6355 pa_output_cbranch (rtx *operands, int negated, rtx insn)
6356 {
6357 static char buf[100];
6358 bool useskip;
6359 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6360 int length = get_attr_length (insn);
6361 int xdelay;
6362
6363 /* A conditional branch to the following instruction (e.g. the delay slot)
6364 is asking for a disaster. This can happen when not optimizing and
6365 when jump optimization fails.
6366
6367 While it is usually safe to emit nothing, this can fail if the
6368 preceding instruction is a nullified branch with an empty delay
6369 slot and the same branch target as this branch. We could check
6370 for this but jump optimization should eliminate nop jumps. It
6371 is always safe to emit a nop. */
6372 if (branch_to_delay_slot_p (insn))
6373 return "nop";
6374
6375 /* The doubleword form of the cmpib instruction doesn't have the LEU
6376 and GTU conditions while the cmpb instruction does. Since we accept
6377 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6378 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6379 operands[2] = gen_rtx_REG (DImode, 0);
6380 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6381 operands[1] = gen_rtx_REG (DImode, 0);
6382
6383 /* If this is a long branch with its delay slot unfilled, set `nullify'
6384 as it can nullify the delay slot and save a nop. */
6385 if (length == 8 && dbr_sequence_length () == 0)
6386 nullify = 1;
6387
6388 /* If this is a short forward conditional branch which did not get
6389 its delay slot filled, the delay slot can still be nullified. */
6390 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6391 nullify = forward_branch_p (insn);
6392
6393 /* A forward branch over a single nullified insn can be done with a
6394 comclr instruction. This avoids a single cycle penalty due to
6395 mis-predicted branch if we fall through (branch not taken). */
6396 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6397
6398 switch (length)
6399 {
6400 /* All short conditional branches except backwards with an unfilled
6401 delay slot. */
6402 case 4:
6403 if (useskip)
6404 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6405 else
6406 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6407 if (GET_MODE (operands[1]) == DImode)
6408 strcat (buf, "*");
6409 if (negated)
6410 strcat (buf, "%B3");
6411 else
6412 strcat (buf, "%S3");
6413 if (useskip)
6414 strcat (buf, " %2,%r1,%%r0");
6415 else if (nullify)
6416 {
6417 if (branch_needs_nop_p (insn))
6418 strcat (buf, ",n %2,%r1,%0%#");
6419 else
6420 strcat (buf, ",n %2,%r1,%0");
6421 }
6422 else
6423 strcat (buf, " %2,%r1,%0");
6424 break;
6425
6426 /* All long conditionals. Note a short backward branch with an
6427 unfilled delay slot is treated just like a long backward branch
6428 with an unfilled delay slot. */
6429 case 8:
6430 /* Handle weird backwards branch with a filled delay slot
6431 which is nullified. */
6432 if (dbr_sequence_length () != 0
6433 && ! forward_branch_p (insn)
6434 && nullify)
6435 {
6436 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6437 if (GET_MODE (operands[1]) == DImode)
6438 strcat (buf, "*");
6439 if (negated)
6440 strcat (buf, "%S3");
6441 else
6442 strcat (buf, "%B3");
6443 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6444 }
6445 /* Handle short backwards branch with an unfilled delay slot.
6446 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6447 taken and untaken branches. */
6448 else if (dbr_sequence_length () == 0
6449 && ! forward_branch_p (insn)
6450 && INSN_ADDRESSES_SET_P ()
6451 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6452 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6453 {
6454 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6455 if (GET_MODE (operands[1]) == DImode)
6456 strcat (buf, "*");
6457 if (negated)
6458 strcat (buf, "%B3 %2,%r1,%0%#");
6459 else
6460 strcat (buf, "%S3 %2,%r1,%0%#");
6461 }
6462 else
6463 {
6464 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6465 if (GET_MODE (operands[1]) == DImode)
6466 strcat (buf, "*");
6467 if (negated)
6468 strcat (buf, "%S3");
6469 else
6470 strcat (buf, "%B3");
6471 if (nullify)
6472 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6473 else
6474 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6475 }
6476 break;
6477
6478 default:
6479 /* The reversed conditional branch must branch over one additional
6480 instruction if the delay slot is filled and needs to be extracted
6481 by pa_output_lbranch. If the delay slot is empty or this is a
6482 nullified forward branch, the instruction after the reversed
6483 condition branch must be nullified. */
6484 if (dbr_sequence_length () == 0
6485 || (nullify && forward_branch_p (insn)))
6486 {
6487 nullify = 1;
6488 xdelay = 0;
6489 operands[4] = GEN_INT (length);
6490 }
6491 else
6492 {
6493 xdelay = 1;
6494 operands[4] = GEN_INT (length + 4);
6495 }
6496
6497 /* Create a reversed conditional branch which branches around
6498 the following insns. */
6499 if (GET_MODE (operands[1]) != DImode)
6500 {
6501 if (nullify)
6502 {
6503 if (negated)
6504 strcpy (buf,
6505 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6506 else
6507 strcpy (buf,
6508 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6509 }
6510 else
6511 {
6512 if (negated)
6513 strcpy (buf,
6514 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6515 else
6516 strcpy (buf,
6517 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6518 }
6519 }
6520 else
6521 {
6522 if (nullify)
6523 {
6524 if (negated)
6525 strcpy (buf,
6526 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6527 else
6528 strcpy (buf,
6529 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6530 }
6531 else
6532 {
6533 if (negated)
6534 strcpy (buf,
6535 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6536 else
6537 strcpy (buf,
6538 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6539 }
6540 }
6541
6542 output_asm_insn (buf, operands);
6543 return pa_output_lbranch (operands[0], insn, xdelay);
6544 }
6545 return buf;
6546 }
6547
6548 /* This routine handles output of long unconditional branches that
6549 exceed the maximum range of a simple branch instruction. Since
6550 we don't have a register available for the branch, we save register
6551 %r1 in the frame marker, load the branch destination DEST into %r1,
6552 execute the branch, and restore %r1 in the delay slot of the branch.
6553
6554 Since long branches may have an insn in the delay slot and the
6555 delay slot is used to restore %r1, we in general need to extract
6556 this insn and execute it before the branch. However, to facilitate
6557 use of this function by conditional branches, we also provide an
6558 option to not extract the delay insn so that it will be emitted
6559 after the long branch. So, if there is an insn in the delay slot,
6560 it is extracted if XDELAY is nonzero.
6561
6562 The lengths of the various long-branch sequences are 20, 16 and 24
6563 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6564
6565 const char *
6566 pa_output_lbranch (rtx dest, rtx insn, int xdelay)
6567 {
6568 rtx xoperands[2];
6569
6570 xoperands[0] = dest;
6571
6572 /* First, free up the delay slot. */
6573 if (xdelay && dbr_sequence_length () != 0)
6574 {
6575 /* We can't handle a jump in the delay slot. */
6576 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6577
6578 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6579 optimize, 0, NULL);
6580
6581 /* Now delete the delay insn. */
6582 SET_INSN_DELETED (NEXT_INSN (insn));
6583 }
6584
6585 /* Output an insn to save %r1. The runtime documentation doesn't
6586 specify whether the "Clean Up" slot in the callers frame can
6587 be clobbered by the callee. It isn't copied by HP's builtin
6588 alloca, so this suggests that it can be clobbered if necessary.
6589 The "Static Link" location is copied by HP builtin alloca, so
6590 we avoid using it. Using the cleanup slot might be a problem
6591 if we have to interoperate with languages that pass cleanup
6592 information. However, it should be possible to handle these
6593 situations with GCC's asm feature.
6594
6595 The "Current RP" slot is reserved for the called procedure, so
6596 we try to use it when we don't have a frame of our own. It's
6597 rather unlikely that we won't have a frame when we need to emit
6598 a very long branch.
6599
6600 Really the way to go long term is a register scavenger; goto
6601 the target of the jump and find a register which we can use
6602 as a scratch to hold the value in %r1. Then, we wouldn't have
6603 to free up the delay slot or clobber a slot that may be needed
6604 for other purposes. */
6605 if (TARGET_64BIT)
6606 {
6607 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6608 /* Use the return pointer slot in the frame marker. */
6609 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6610 else
6611 /* Use the slot at -40 in the frame marker since HP builtin
6612 alloca doesn't copy it. */
6613 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6614 }
6615 else
6616 {
6617 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6618 /* Use the return pointer slot in the frame marker. */
6619 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6620 else
6621 /* Use the "Clean Up" slot in the frame marker. In GCC,
6622 the only other use of this location is for copying a
6623 floating point double argument from a floating-point
6624 register to two general registers. The copy is done
6625 as an "atomic" operation when outputting a call, so it
6626 won't interfere with our using the location here. */
6627 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6628 }
6629
6630 if (TARGET_PORTABLE_RUNTIME)
6631 {
6632 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6633 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6634 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6635 }
6636 else if (flag_pic)
6637 {
6638 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6639 if (TARGET_SOM || !TARGET_GAS)
6640 {
6641 xoperands[1] = gen_label_rtx ();
6642 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6643 targetm.asm_out.internal_label (asm_out_file, "L",
6644 CODE_LABEL_NUMBER (xoperands[1]));
6645 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6646 }
6647 else
6648 {
6649 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6650 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6651 }
6652 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6653 }
6654 else
6655 /* Now output a very long branch to the original target. */
6656 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6657
6658 /* Now restore the value of %r1 in the delay slot. */
6659 if (TARGET_64BIT)
6660 {
6661 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6662 return "ldd -16(%%r30),%%r1";
6663 else
6664 return "ldd -40(%%r30),%%r1";
6665 }
6666 else
6667 {
6668 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6669 return "ldw -20(%%r30),%%r1";
6670 else
6671 return "ldw -12(%%r30),%%r1";
6672 }
6673 }
6674
6675 /* This routine handles all the branch-on-bit conditional branch sequences we
6676 might need to generate. It handles nullification of delay slots,
6677 varying length branches, negated branches and all combinations of the
6678 above. it returns the appropriate output template to emit the branch. */
6679
6680 const char *
6681 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6682 {
6683 static char buf[100];
6684 bool useskip;
6685 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6686 int length = get_attr_length (insn);
6687 int xdelay;
6688
6689 /* A conditional branch to the following instruction (e.g. the delay slot) is
6690 asking for a disaster. I do not think this can happen as this pattern
6691 is only used when optimizing; jump optimization should eliminate the
6692 jump. But be prepared just in case. */
6693
6694 if (branch_to_delay_slot_p (insn))
6695 return "nop";
6696
6697 /* If this is a long branch with its delay slot unfilled, set `nullify'
6698 as it can nullify the delay slot and save a nop. */
6699 if (length == 8 && dbr_sequence_length () == 0)
6700 nullify = 1;
6701
6702 /* If this is a short forward conditional branch which did not get
6703 its delay slot filled, the delay slot can still be nullified. */
6704 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6705 nullify = forward_branch_p (insn);
6706
6707 /* A forward branch over a single nullified insn can be done with a
6708 extrs instruction. This avoids a single cycle penalty due to
6709 mis-predicted branch if we fall through (branch not taken). */
6710 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6711
6712 switch (length)
6713 {
6714
6715 /* All short conditional branches except backwards with an unfilled
6716 delay slot. */
6717 case 4:
6718 if (useskip)
6719 strcpy (buf, "{extrs,|extrw,s,}");
6720 else
6721 strcpy (buf, "bb,");
6722 if (useskip && GET_MODE (operands[0]) == DImode)
6723 strcpy (buf, "extrd,s,*");
6724 else if (GET_MODE (operands[0]) == DImode)
6725 strcpy (buf, "bb,*");
6726 if ((which == 0 && negated)
6727 || (which == 1 && ! negated))
6728 strcat (buf, ">=");
6729 else
6730 strcat (buf, "<");
6731 if (useskip)
6732 strcat (buf, " %0,%1,1,%%r0");
6733 else if (nullify && negated)
6734 {
6735 if (branch_needs_nop_p (insn))
6736 strcat (buf, ",n %0,%1,%3%#");
6737 else
6738 strcat (buf, ",n %0,%1,%3");
6739 }
6740 else if (nullify && ! negated)
6741 {
6742 if (branch_needs_nop_p (insn))
6743 strcat (buf, ",n %0,%1,%2%#");
6744 else
6745 strcat (buf, ",n %0,%1,%2");
6746 }
6747 else if (! nullify && negated)
6748 strcat (buf, " %0,%1,%3");
6749 else if (! nullify && ! negated)
6750 strcat (buf, " %0,%1,%2");
6751 break;
6752
6753 /* All long conditionals. Note a short backward branch with an
6754 unfilled delay slot is treated just like a long backward branch
6755 with an unfilled delay slot. */
6756 case 8:
6757 /* Handle weird backwards branch with a filled delay slot
6758 which is nullified. */
6759 if (dbr_sequence_length () != 0
6760 && ! forward_branch_p (insn)
6761 && nullify)
6762 {
6763 strcpy (buf, "bb,");
6764 if (GET_MODE (operands[0]) == DImode)
6765 strcat (buf, "*");
6766 if ((which == 0 && negated)
6767 || (which == 1 && ! negated))
6768 strcat (buf, "<");
6769 else
6770 strcat (buf, ">=");
6771 if (negated)
6772 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6773 else
6774 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6775 }
6776 /* Handle short backwards branch with an unfilled delay slot.
6777 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6778 taken and untaken branches. */
6779 else if (dbr_sequence_length () == 0
6780 && ! forward_branch_p (insn)
6781 && INSN_ADDRESSES_SET_P ()
6782 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6783 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6784 {
6785 strcpy (buf, "bb,");
6786 if (GET_MODE (operands[0]) == DImode)
6787 strcat (buf, "*");
6788 if ((which == 0 && negated)
6789 || (which == 1 && ! negated))
6790 strcat (buf, ">=");
6791 else
6792 strcat (buf, "<");
6793 if (negated)
6794 strcat (buf, " %0,%1,%3%#");
6795 else
6796 strcat (buf, " %0,%1,%2%#");
6797 }
6798 else
6799 {
6800 if (GET_MODE (operands[0]) == DImode)
6801 strcpy (buf, "extrd,s,*");
6802 else
6803 strcpy (buf, "{extrs,|extrw,s,}");
6804 if ((which == 0 && negated)
6805 || (which == 1 && ! negated))
6806 strcat (buf, "<");
6807 else
6808 strcat (buf, ">=");
6809 if (nullify && negated)
6810 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6811 else if (nullify && ! negated)
6812 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6813 else if (negated)
6814 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6815 else
6816 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6817 }
6818 break;
6819
6820 default:
6821 /* The reversed conditional branch must branch over one additional
6822 instruction if the delay slot is filled and needs to be extracted
6823 by pa_output_lbranch. If the delay slot is empty or this is a
6824 nullified forward branch, the instruction after the reversed
6825 condition branch must be nullified. */
6826 if (dbr_sequence_length () == 0
6827 || (nullify && forward_branch_p (insn)))
6828 {
6829 nullify = 1;
6830 xdelay = 0;
6831 operands[4] = GEN_INT (length);
6832 }
6833 else
6834 {
6835 xdelay = 1;
6836 operands[4] = GEN_INT (length + 4);
6837 }
6838
6839 if (GET_MODE (operands[0]) == DImode)
6840 strcpy (buf, "bb,*");
6841 else
6842 strcpy (buf, "bb,");
6843 if ((which == 0 && negated)
6844 || (which == 1 && !negated))
6845 strcat (buf, "<");
6846 else
6847 strcat (buf, ">=");
6848 if (nullify)
6849 strcat (buf, ",n %0,%1,.+%4");
6850 else
6851 strcat (buf, " %0,%1,.+%4");
6852 output_asm_insn (buf, operands);
6853 return pa_output_lbranch (negated ? operands[3] : operands[2],
6854 insn, xdelay);
6855 }
6856 return buf;
6857 }
6858
6859 /* This routine handles all the branch-on-variable-bit conditional branch
6860 sequences we might need to generate. It handles nullification of delay
6861 slots, varying length branches, negated branches and all combinations
6862 of the above. it returns the appropriate output template to emit the
6863 branch. */
6864
6865 const char *
6866 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn,
6867 int which)
6868 {
6869 static char buf[100];
6870 bool useskip;
6871 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6872 int length = get_attr_length (insn);
6873 int xdelay;
6874
6875 /* A conditional branch to the following instruction (e.g. the delay slot) is
6876 asking for a disaster. I do not think this can happen as this pattern
6877 is only used when optimizing; jump optimization should eliminate the
6878 jump. But be prepared just in case. */
6879
6880 if (branch_to_delay_slot_p (insn))
6881 return "nop";
6882
6883 /* If this is a long branch with its delay slot unfilled, set `nullify'
6884 as it can nullify the delay slot and save a nop. */
6885 if (length == 8 && dbr_sequence_length () == 0)
6886 nullify = 1;
6887
6888 /* If this is a short forward conditional branch which did not get
6889 its delay slot filled, the delay slot can still be nullified. */
6890 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6891 nullify = forward_branch_p (insn);
6892
6893 /* A forward branch over a single nullified insn can be done with a
6894 extrs instruction. This avoids a single cycle penalty due to
6895 mis-predicted branch if we fall through (branch not taken). */
6896 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6897
6898 switch (length)
6899 {
6900
6901 /* All short conditional branches except backwards with an unfilled
6902 delay slot. */
6903 case 4:
6904 if (useskip)
6905 strcpy (buf, "{vextrs,|extrw,s,}");
6906 else
6907 strcpy (buf, "{bvb,|bb,}");
6908 if (useskip && GET_MODE (operands[0]) == DImode)
6909 strcpy (buf, "extrd,s,*");
6910 else if (GET_MODE (operands[0]) == DImode)
6911 strcpy (buf, "bb,*");
6912 if ((which == 0 && negated)
6913 || (which == 1 && ! negated))
6914 strcat (buf, ">=");
6915 else
6916 strcat (buf, "<");
6917 if (useskip)
6918 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6919 else if (nullify && negated)
6920 {
6921 if (branch_needs_nop_p (insn))
6922 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6923 else
6924 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6925 }
6926 else if (nullify && ! negated)
6927 {
6928 if (branch_needs_nop_p (insn))
6929 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
6930 else
6931 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6932 }
6933 else if (! nullify && negated)
6934 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
6935 else if (! nullify && ! negated)
6936 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6937 break;
6938
6939 /* All long conditionals. Note a short backward branch with an
6940 unfilled delay slot is treated just like a long backward branch
6941 with an unfilled delay slot. */
6942 case 8:
6943 /* Handle weird backwards branch with a filled delay slot
6944 which is nullified. */
6945 if (dbr_sequence_length () != 0
6946 && ! forward_branch_p (insn)
6947 && nullify)
6948 {
6949 strcpy (buf, "{bvb,|bb,}");
6950 if (GET_MODE (operands[0]) == DImode)
6951 strcat (buf, "*");
6952 if ((which == 0 && negated)
6953 || (which == 1 && ! negated))
6954 strcat (buf, "<");
6955 else
6956 strcat (buf, ">=");
6957 if (negated)
6958 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6959 else
6960 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6961 }
6962 /* Handle short backwards branch with an unfilled delay slot.
6963 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6964 taken and untaken branches. */
6965 else if (dbr_sequence_length () == 0
6966 && ! forward_branch_p (insn)
6967 && INSN_ADDRESSES_SET_P ()
6968 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6969 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6970 {
6971 strcpy (buf, "{bvb,|bb,}");
6972 if (GET_MODE (operands[0]) == DImode)
6973 strcat (buf, "*");
6974 if ((which == 0 && negated)
6975 || (which == 1 && ! negated))
6976 strcat (buf, ">=");
6977 else
6978 strcat (buf, "<");
6979 if (negated)
6980 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6981 else
6982 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6983 }
6984 else
6985 {
6986 strcpy (buf, "{vextrs,|extrw,s,}");
6987 if (GET_MODE (operands[0]) == DImode)
6988 strcpy (buf, "extrd,s,*");
6989 if ((which == 0 && negated)
6990 || (which == 1 && ! negated))
6991 strcat (buf, "<");
6992 else
6993 strcat (buf, ">=");
6994 if (nullify && negated)
6995 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6996 else if (nullify && ! negated)
6997 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6998 else if (negated)
6999 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7000 else
7001 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7002 }
7003 break;
7004
7005 default:
7006 /* The reversed conditional branch must branch over one additional
7007 instruction if the delay slot is filled and needs to be extracted
7008 by pa_output_lbranch. If the delay slot is empty or this is a
7009 nullified forward branch, the instruction after the reversed
7010 condition branch must be nullified. */
7011 if (dbr_sequence_length () == 0
7012 || (nullify && forward_branch_p (insn)))
7013 {
7014 nullify = 1;
7015 xdelay = 0;
7016 operands[4] = GEN_INT (length);
7017 }
7018 else
7019 {
7020 xdelay = 1;
7021 operands[4] = GEN_INT (length + 4);
7022 }
7023
7024 if (GET_MODE (operands[0]) == DImode)
7025 strcpy (buf, "bb,*");
7026 else
7027 strcpy (buf, "{bvb,|bb,}");
7028 if ((which == 0 && negated)
7029 || (which == 1 && !negated))
7030 strcat (buf, "<");
7031 else
7032 strcat (buf, ">=");
7033 if (nullify)
7034 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7035 else
7036 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7037 output_asm_insn (buf, operands);
7038 return pa_output_lbranch (negated ? operands[3] : operands[2],
7039 insn, xdelay);
7040 }
7041 return buf;
7042 }
7043
7044 /* Return the output template for emitting a dbra type insn.
7045
7046 Note it may perform some output operations on its own before
7047 returning the final output string. */
7048 const char *
7049 pa_output_dbra (rtx *operands, rtx insn, int which_alternative)
7050 {
7051 int length = get_attr_length (insn);
7052
7053 /* A conditional branch to the following instruction (e.g. the delay slot) is
7054 asking for a disaster. Be prepared! */
7055
7056 if (branch_to_delay_slot_p (insn))
7057 {
7058 if (which_alternative == 0)
7059 return "ldo %1(%0),%0";
7060 else if (which_alternative == 1)
7061 {
7062 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7063 output_asm_insn ("ldw -16(%%r30),%4", operands);
7064 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7065 return "{fldws|fldw} -16(%%r30),%0";
7066 }
7067 else
7068 {
7069 output_asm_insn ("ldw %0,%4", operands);
7070 return "ldo %1(%4),%4\n\tstw %4,%0";
7071 }
7072 }
7073
7074 if (which_alternative == 0)
7075 {
7076 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7077 int xdelay;
7078
7079 /* If this is a long branch with its delay slot unfilled, set `nullify'
7080 as it can nullify the delay slot and save a nop. */
7081 if (length == 8 && dbr_sequence_length () == 0)
7082 nullify = 1;
7083
7084 /* If this is a short forward conditional branch which did not get
7085 its delay slot filled, the delay slot can still be nullified. */
7086 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7087 nullify = forward_branch_p (insn);
7088
7089 switch (length)
7090 {
7091 case 4:
7092 if (nullify)
7093 {
7094 if (branch_needs_nop_p (insn))
7095 return "addib,%C2,n %1,%0,%3%#";
7096 else
7097 return "addib,%C2,n %1,%0,%3";
7098 }
7099 else
7100 return "addib,%C2 %1,%0,%3";
7101
7102 case 8:
7103 /* Handle weird backwards branch with a fulled delay slot
7104 which is nullified. */
7105 if (dbr_sequence_length () != 0
7106 && ! forward_branch_p (insn)
7107 && nullify)
7108 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7109 /* Handle short backwards branch with an unfilled delay slot.
7110 Using a addb;nop rather than addi;bl saves 1 cycle for both
7111 taken and untaken branches. */
7112 else if (dbr_sequence_length () == 0
7113 && ! forward_branch_p (insn)
7114 && INSN_ADDRESSES_SET_P ()
7115 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7116 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7117 return "addib,%C2 %1,%0,%3%#";
7118
7119 /* Handle normal cases. */
7120 if (nullify)
7121 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7122 else
7123 return "addi,%N2 %1,%0,%0\n\tb %3";
7124
7125 default:
7126 /* The reversed conditional branch must branch over one additional
7127 instruction if the delay slot is filled and needs to be extracted
7128 by pa_output_lbranch. If the delay slot is empty or this is a
7129 nullified forward branch, the instruction after the reversed
7130 condition branch must be nullified. */
7131 if (dbr_sequence_length () == 0
7132 || (nullify && forward_branch_p (insn)))
7133 {
7134 nullify = 1;
7135 xdelay = 0;
7136 operands[4] = GEN_INT (length);
7137 }
7138 else
7139 {
7140 xdelay = 1;
7141 operands[4] = GEN_INT (length + 4);
7142 }
7143
7144 if (nullify)
7145 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7146 else
7147 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7148
7149 return pa_output_lbranch (operands[3], insn, xdelay);
7150 }
7151
7152 }
7153 /* Deal with gross reload from FP register case. */
7154 else if (which_alternative == 1)
7155 {
7156 /* Move loop counter from FP register to MEM then into a GR,
7157 increment the GR, store the GR into MEM, and finally reload
7158 the FP register from MEM from within the branch's delay slot. */
7159 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7160 operands);
7161 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7162 if (length == 24)
7163 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7164 else if (length == 28)
7165 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7166 else
7167 {
7168 operands[5] = GEN_INT (length - 16);
7169 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7170 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7171 return pa_output_lbranch (operands[3], insn, 0);
7172 }
7173 }
7174 /* Deal with gross reload from memory case. */
7175 else
7176 {
7177 /* Reload loop counter from memory, the store back to memory
7178 happens in the branch's delay slot. */
7179 output_asm_insn ("ldw %0,%4", operands);
7180 if (length == 12)
7181 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7182 else if (length == 16)
7183 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7184 else
7185 {
7186 operands[5] = GEN_INT (length - 4);
7187 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7188 return pa_output_lbranch (operands[3], insn, 0);
7189 }
7190 }
7191 }
7192
7193 /* Return the output template for emitting a movb type insn.
7194
7195 Note it may perform some output operations on its own before
7196 returning the final output string. */
7197 const char *
7198 pa_output_movb (rtx *operands, rtx insn, int which_alternative,
7199 int reverse_comparison)
7200 {
7201 int length = get_attr_length (insn);
7202
7203 /* A conditional branch to the following instruction (e.g. the delay slot) is
7204 asking for a disaster. Be prepared! */
7205
7206 if (branch_to_delay_slot_p (insn))
7207 {
7208 if (which_alternative == 0)
7209 return "copy %1,%0";
7210 else if (which_alternative == 1)
7211 {
7212 output_asm_insn ("stw %1,-16(%%r30)", operands);
7213 return "{fldws|fldw} -16(%%r30),%0";
7214 }
7215 else if (which_alternative == 2)
7216 return "stw %1,%0";
7217 else
7218 return "mtsar %r1";
7219 }
7220
7221 /* Support the second variant. */
7222 if (reverse_comparison)
7223 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7224
7225 if (which_alternative == 0)
7226 {
7227 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7228 int xdelay;
7229
7230 /* If this is a long branch with its delay slot unfilled, set `nullify'
7231 as it can nullify the delay slot and save a nop. */
7232 if (length == 8 && dbr_sequence_length () == 0)
7233 nullify = 1;
7234
7235 /* If this is a short forward conditional branch which did not get
7236 its delay slot filled, the delay slot can still be nullified. */
7237 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7238 nullify = forward_branch_p (insn);
7239
7240 switch (length)
7241 {
7242 case 4:
7243 if (nullify)
7244 {
7245 if (branch_needs_nop_p (insn))
7246 return "movb,%C2,n %1,%0,%3%#";
7247 else
7248 return "movb,%C2,n %1,%0,%3";
7249 }
7250 else
7251 return "movb,%C2 %1,%0,%3";
7252
7253 case 8:
7254 /* Handle weird backwards branch with a filled delay slot
7255 which is nullified. */
7256 if (dbr_sequence_length () != 0
7257 && ! forward_branch_p (insn)
7258 && nullify)
7259 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7260
7261 /* Handle short backwards branch with an unfilled delay slot.
7262 Using a movb;nop rather than or;bl saves 1 cycle for both
7263 taken and untaken branches. */
7264 else if (dbr_sequence_length () == 0
7265 && ! forward_branch_p (insn)
7266 && INSN_ADDRESSES_SET_P ()
7267 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7268 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7269 return "movb,%C2 %1,%0,%3%#";
7270 /* Handle normal cases. */
7271 if (nullify)
7272 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7273 else
7274 return "or,%N2 %1,%%r0,%0\n\tb %3";
7275
7276 default:
7277 /* The reversed conditional branch must branch over one additional
7278 instruction if the delay slot is filled and needs to be extracted
7279 by pa_output_lbranch. If the delay slot is empty or this is a
7280 nullified forward branch, the instruction after the reversed
7281 condition branch must be nullified. */
7282 if (dbr_sequence_length () == 0
7283 || (nullify && forward_branch_p (insn)))
7284 {
7285 nullify = 1;
7286 xdelay = 0;
7287 operands[4] = GEN_INT (length);
7288 }
7289 else
7290 {
7291 xdelay = 1;
7292 operands[4] = GEN_INT (length + 4);
7293 }
7294
7295 if (nullify)
7296 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7297 else
7298 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7299
7300 return pa_output_lbranch (operands[3], insn, xdelay);
7301 }
7302 }
7303 /* Deal with gross reload for FP destination register case. */
7304 else if (which_alternative == 1)
7305 {
7306 /* Move source register to MEM, perform the branch test, then
7307 finally load the FP register from MEM from within the branch's
7308 delay slot. */
7309 output_asm_insn ("stw %1,-16(%%r30)", operands);
7310 if (length == 12)
7311 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7312 else if (length == 16)
7313 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7314 else
7315 {
7316 operands[4] = GEN_INT (length - 4);
7317 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7318 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7319 return pa_output_lbranch (operands[3], insn, 0);
7320 }
7321 }
7322 /* Deal with gross reload from memory case. */
7323 else if (which_alternative == 2)
7324 {
7325 /* Reload loop counter from memory, the store back to memory
7326 happens in the branch's delay slot. */
7327 if (length == 8)
7328 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7329 else if (length == 12)
7330 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7331 else
7332 {
7333 operands[4] = GEN_INT (length);
7334 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7335 operands);
7336 return pa_output_lbranch (operands[3], insn, 0);
7337 }
7338 }
7339 /* Handle SAR as a destination. */
7340 else
7341 {
7342 if (length == 8)
7343 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7344 else if (length == 12)
7345 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7346 else
7347 {
7348 operands[4] = GEN_INT (length);
7349 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7350 operands);
7351 return pa_output_lbranch (operands[3], insn, 0);
7352 }
7353 }
7354 }
7355
7356 /* Copy any FP arguments in INSN into integer registers. */
7357 static void
7358 copy_fp_args (rtx insn)
7359 {
7360 rtx link;
7361 rtx xoperands[2];
7362
7363 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7364 {
7365 int arg_mode, regno;
7366 rtx use = XEXP (link, 0);
7367
7368 if (! (GET_CODE (use) == USE
7369 && GET_CODE (XEXP (use, 0)) == REG
7370 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7371 continue;
7372
7373 arg_mode = GET_MODE (XEXP (use, 0));
7374 regno = REGNO (XEXP (use, 0));
7375
7376 /* Is it a floating point register? */
7377 if (regno >= 32 && regno <= 39)
7378 {
7379 /* Copy the FP register into an integer register via memory. */
7380 if (arg_mode == SFmode)
7381 {
7382 xoperands[0] = XEXP (use, 0);
7383 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7384 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7385 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7386 }
7387 else
7388 {
7389 xoperands[0] = XEXP (use, 0);
7390 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7391 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7392 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7393 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7394 }
7395 }
7396 }
7397 }
7398
7399 /* Compute length of the FP argument copy sequence for INSN. */
7400 static int
7401 length_fp_args (rtx insn)
7402 {
7403 int length = 0;
7404 rtx link;
7405
7406 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7407 {
7408 int arg_mode, regno;
7409 rtx use = XEXP (link, 0);
7410
7411 if (! (GET_CODE (use) == USE
7412 && GET_CODE (XEXP (use, 0)) == REG
7413 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7414 continue;
7415
7416 arg_mode = GET_MODE (XEXP (use, 0));
7417 regno = REGNO (XEXP (use, 0));
7418
7419 /* Is it a floating point register? */
7420 if (regno >= 32 && regno <= 39)
7421 {
7422 if (arg_mode == SFmode)
7423 length += 8;
7424 else
7425 length += 12;
7426 }
7427 }
7428
7429 return length;
7430 }
7431
7432 /* Return the attribute length for the millicode call instruction INSN.
7433 The length must match the code generated by pa_output_millicode_call.
7434 We include the delay slot in the returned length as it is better to
7435 over estimate the length than to under estimate it. */
7436
7437 int
7438 pa_attr_length_millicode_call (rtx insn)
7439 {
7440 unsigned long distance = -1;
7441 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7442
7443 if (INSN_ADDRESSES_SET_P ())
7444 {
7445 distance = (total + insn_current_reference_address (insn));
7446 if (distance < total)
7447 distance = -1;
7448 }
7449
7450 if (TARGET_64BIT)
7451 {
7452 if (!TARGET_LONG_CALLS && distance < 7600000)
7453 return 8;
7454
7455 return 20;
7456 }
7457 else if (TARGET_PORTABLE_RUNTIME)
7458 return 24;
7459 else
7460 {
7461 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7462 return 8;
7463
7464 if (TARGET_LONG_ABS_CALL && !flag_pic)
7465 return 12;
7466
7467 return 24;
7468 }
7469 }
7470
7471 /* INSN is a function call. It may have an unconditional jump
7472 in its delay slot.
7473
7474 CALL_DEST is the routine we are calling. */
7475
7476 const char *
7477 pa_output_millicode_call (rtx insn, rtx call_dest)
7478 {
7479 int attr_length = get_attr_length (insn);
7480 int seq_length = dbr_sequence_length ();
7481 int distance;
7482 rtx seq_insn;
7483 rtx xoperands[3];
7484
7485 xoperands[0] = call_dest;
7486 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7487
7488 /* Handle the common case where we are sure that the branch will
7489 reach the beginning of the $CODE$ subspace. The within reach
7490 form of the $$sh_func_adrs call has a length of 28. Because
7491 it has an attribute type of multi, it never has a nonzero
7492 sequence length. The length of the $$sh_func_adrs is the same
7493 as certain out of reach PIC calls to other routines. */
7494 if (!TARGET_LONG_CALLS
7495 && ((seq_length == 0
7496 && (attr_length == 12
7497 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
7498 || (seq_length != 0 && attr_length == 8)))
7499 {
7500 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7501 }
7502 else
7503 {
7504 if (TARGET_64BIT)
7505 {
7506 /* It might seem that one insn could be saved by accessing
7507 the millicode function using the linkage table. However,
7508 this doesn't work in shared libraries and other dynamically
7509 loaded objects. Using a pc-relative sequence also avoids
7510 problems related to the implicit use of the gp register. */
7511 output_asm_insn ("b,l .+8,%%r1", xoperands);
7512
7513 if (TARGET_GAS)
7514 {
7515 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7516 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7517 }
7518 else
7519 {
7520 xoperands[1] = gen_label_rtx ();
7521 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7522 targetm.asm_out.internal_label (asm_out_file, "L",
7523 CODE_LABEL_NUMBER (xoperands[1]));
7524 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7525 }
7526
7527 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7528 }
7529 else if (TARGET_PORTABLE_RUNTIME)
7530 {
7531 /* Pure portable runtime doesn't allow be/ble; we also don't
7532 have PIC support in the assembler/linker, so this sequence
7533 is needed. */
7534
7535 /* Get the address of our target into %r1. */
7536 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7537 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7538
7539 /* Get our return address into %r31. */
7540 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7541 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7542
7543 /* Jump to our target address in %r1. */
7544 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7545 }
7546 else if (!flag_pic)
7547 {
7548 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7549 if (TARGET_PA_20)
7550 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7551 else
7552 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7553 }
7554 else
7555 {
7556 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7557 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7558
7559 if (TARGET_SOM || !TARGET_GAS)
7560 {
7561 /* The HP assembler can generate relocations for the
7562 difference of two symbols. GAS can do this for a
7563 millicode symbol but not an arbitrary external
7564 symbol when generating SOM output. */
7565 xoperands[1] = gen_label_rtx ();
7566 targetm.asm_out.internal_label (asm_out_file, "L",
7567 CODE_LABEL_NUMBER (xoperands[1]));
7568 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7569 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7570 }
7571 else
7572 {
7573 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7574 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7575 xoperands);
7576 }
7577
7578 /* Jump to our target address in %r1. */
7579 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7580 }
7581 }
7582
7583 if (seq_length == 0)
7584 output_asm_insn ("nop", xoperands);
7585
7586 /* We are done if there isn't a jump in the delay slot. */
7587 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7588 return "";
7589
7590 /* This call has an unconditional jump in its delay slot. */
7591 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7592
7593 /* See if the return address can be adjusted. Use the containing
7594 sequence insn's address. */
7595 if (INSN_ADDRESSES_SET_P ())
7596 {
7597 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7598 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7599 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7600
7601 if (VAL_14_BITS_P (distance))
7602 {
7603 xoperands[1] = gen_label_rtx ();
7604 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7605 targetm.asm_out.internal_label (asm_out_file, "L",
7606 CODE_LABEL_NUMBER (xoperands[1]));
7607 }
7608 else
7609 /* ??? This branch may not reach its target. */
7610 output_asm_insn ("nop\n\tb,n %0", xoperands);
7611 }
7612 else
7613 /* ??? This branch may not reach its target. */
7614 output_asm_insn ("nop\n\tb,n %0", xoperands);
7615
7616 /* Delete the jump. */
7617 SET_INSN_DELETED (NEXT_INSN (insn));
7618
7619 return "";
7620 }
7621
7622 /* Return the attribute length of the call instruction INSN. The SIBCALL
7623 flag indicates whether INSN is a regular call or a sibling call. The
7624 length returned must be longer than the code actually generated by
7625 pa_output_call. Since branch shortening is done before delay branch
7626 sequencing, there is no way to determine whether or not the delay
7627 slot will be filled during branch shortening. Even when the delay
7628 slot is filled, we may have to add a nop if the delay slot contains
7629 a branch that can't reach its target. Thus, we always have to include
7630 the delay slot in the length estimate. This used to be done in
7631 pa_adjust_insn_length but we do it here now as some sequences always
7632 fill the delay slot and we can save four bytes in the estimate for
7633 these sequences. */
7634
7635 int
7636 pa_attr_length_call (rtx insn, int sibcall)
7637 {
7638 int local_call;
7639 rtx call, call_dest;
7640 tree call_decl;
7641 int length = 0;
7642 rtx pat = PATTERN (insn);
7643 unsigned long distance = -1;
7644
7645 gcc_assert (GET_CODE (insn) == CALL_INSN);
7646
7647 if (INSN_ADDRESSES_SET_P ())
7648 {
7649 unsigned long total;
7650
7651 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7652 distance = (total + insn_current_reference_address (insn));
7653 if (distance < total)
7654 distance = -1;
7655 }
7656
7657 gcc_assert (GET_CODE (pat) == PARALLEL);
7658
7659 /* Get the call rtx. */
7660 call = XVECEXP (pat, 0, 0);
7661 if (GET_CODE (call) == SET)
7662 call = SET_SRC (call);
7663
7664 gcc_assert (GET_CODE (call) == CALL);
7665
7666 /* Determine if this is a local call. */
7667 call_dest = XEXP (XEXP (call, 0), 0);
7668 call_decl = SYMBOL_REF_DECL (call_dest);
7669 local_call = call_decl && targetm.binds_local_p (call_decl);
7670
7671 /* pc-relative branch. */
7672 if (!TARGET_LONG_CALLS
7673 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7674 || distance < MAX_PCREL17F_OFFSET))
7675 length += 8;
7676
7677 /* 64-bit plabel sequence. */
7678 else if (TARGET_64BIT && !local_call)
7679 length += sibcall ? 28 : 24;
7680
7681 /* non-pic long absolute branch sequence. */
7682 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7683 length += 12;
7684
7685 /* long pc-relative branch sequence. */
7686 else if (TARGET_LONG_PIC_SDIFF_CALL
7687 || (TARGET_GAS && !TARGET_SOM
7688 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7689 {
7690 length += 20;
7691
7692 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7693 length += 8;
7694 }
7695
7696 /* 32-bit plabel sequence. */
7697 else
7698 {
7699 length += 32;
7700
7701 if (TARGET_SOM)
7702 length += length_fp_args (insn);
7703
7704 if (flag_pic)
7705 length += 4;
7706
7707 if (!TARGET_PA_20)
7708 {
7709 if (!sibcall)
7710 length += 8;
7711
7712 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7713 length += 8;
7714 }
7715 }
7716
7717 return length;
7718 }
7719
7720 /* INSN is a function call. It may have an unconditional jump
7721 in its delay slot.
7722
7723 CALL_DEST is the routine we are calling. */
7724
7725 const char *
7726 pa_output_call (rtx insn, rtx call_dest, int sibcall)
7727 {
7728 int delay_insn_deleted = 0;
7729 int delay_slot_filled = 0;
7730 int seq_length = dbr_sequence_length ();
7731 tree call_decl = SYMBOL_REF_DECL (call_dest);
7732 int local_call = call_decl && targetm.binds_local_p (call_decl);
7733 rtx xoperands[2];
7734
7735 xoperands[0] = call_dest;
7736
7737 /* Handle the common case where we're sure that the branch will reach
7738 the beginning of the "$CODE$" subspace. This is the beginning of
7739 the current function if we are in a named section. */
7740 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7741 {
7742 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7743 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7744 }
7745 else
7746 {
7747 if (TARGET_64BIT && !local_call)
7748 {
7749 /* ??? As far as I can tell, the HP linker doesn't support the
7750 long pc-relative sequence described in the 64-bit runtime
7751 architecture. So, we use a slightly longer indirect call. */
7752 xoperands[0] = pa_get_deferred_plabel (call_dest);
7753 xoperands[1] = gen_label_rtx ();
7754
7755 /* If this isn't a sibcall, we put the load of %r27 into the
7756 delay slot. We can't do this in a sibcall as we don't
7757 have a second call-clobbered scratch register available. */
7758 if (seq_length != 0
7759 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7760 && !sibcall)
7761 {
7762 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7763 optimize, 0, NULL);
7764
7765 /* Now delete the delay insn. */
7766 SET_INSN_DELETED (NEXT_INSN (insn));
7767 delay_insn_deleted = 1;
7768 }
7769
7770 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7771 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7772 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7773
7774 if (sibcall)
7775 {
7776 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7777 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7778 output_asm_insn ("bve (%%r1)", xoperands);
7779 }
7780 else
7781 {
7782 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7783 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7784 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7785 delay_slot_filled = 1;
7786 }
7787 }
7788 else
7789 {
7790 int indirect_call = 0;
7791
7792 /* Emit a long call. There are several different sequences
7793 of increasing length and complexity. In most cases,
7794 they don't allow an instruction in the delay slot. */
7795 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7796 && !TARGET_LONG_PIC_SDIFF_CALL
7797 && !(TARGET_GAS && !TARGET_SOM
7798 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7799 && !TARGET_64BIT)
7800 indirect_call = 1;
7801
7802 if (seq_length != 0
7803 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7804 && !sibcall
7805 && (!TARGET_PA_20
7806 || indirect_call
7807 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7808 {
7809 /* A non-jump insn in the delay slot. By definition we can
7810 emit this insn before the call (and in fact before argument
7811 relocating. */
7812 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7813 NULL);
7814
7815 /* Now delete the delay insn. */
7816 SET_INSN_DELETED (NEXT_INSN (insn));
7817 delay_insn_deleted = 1;
7818 }
7819
7820 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7821 {
7822 /* This is the best sequence for making long calls in
7823 non-pic code. Unfortunately, GNU ld doesn't provide
7824 the stub needed for external calls, and GAS's support
7825 for this with the SOM linker is buggy. It is safe
7826 to use this for local calls. */
7827 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7828 if (sibcall)
7829 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7830 else
7831 {
7832 if (TARGET_PA_20)
7833 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7834 xoperands);
7835 else
7836 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7837
7838 output_asm_insn ("copy %%r31,%%r2", xoperands);
7839 delay_slot_filled = 1;
7840 }
7841 }
7842 else
7843 {
7844 if (TARGET_LONG_PIC_SDIFF_CALL)
7845 {
7846 /* The HP assembler and linker can handle relocations
7847 for the difference of two symbols. The HP assembler
7848 recognizes the sequence as a pc-relative call and
7849 the linker provides stubs when needed. */
7850 xoperands[1] = gen_label_rtx ();
7851 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7852 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7853 targetm.asm_out.internal_label (asm_out_file, "L",
7854 CODE_LABEL_NUMBER (xoperands[1]));
7855 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7856 }
7857 else if (TARGET_GAS && !TARGET_SOM
7858 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7859 {
7860 /* GAS currently can't generate the relocations that
7861 are needed for the SOM linker under HP-UX using this
7862 sequence. The GNU linker doesn't generate the stubs
7863 that are needed for external calls on TARGET_ELF32
7864 with this sequence. For now, we have to use a
7865 longer plabel sequence when using GAS. */
7866 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7867 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7868 xoperands);
7869 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7870 xoperands);
7871 }
7872 else
7873 {
7874 /* Emit a long plabel-based call sequence. This is
7875 essentially an inline implementation of $$dyncall.
7876 We don't actually try to call $$dyncall as this is
7877 as difficult as calling the function itself. */
7878 xoperands[0] = pa_get_deferred_plabel (call_dest);
7879 xoperands[1] = gen_label_rtx ();
7880
7881 /* Since the call is indirect, FP arguments in registers
7882 need to be copied to the general registers. Then, the
7883 argument relocation stub will copy them back. */
7884 if (TARGET_SOM)
7885 copy_fp_args (insn);
7886
7887 if (flag_pic)
7888 {
7889 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7890 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7891 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7892 }
7893 else
7894 {
7895 output_asm_insn ("addil LR'%0-$global$,%%r27",
7896 xoperands);
7897 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7898 xoperands);
7899 }
7900
7901 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7902 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7903 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7904 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7905
7906 if (!sibcall && !TARGET_PA_20)
7907 {
7908 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7909 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7910 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7911 else
7912 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7913 }
7914 }
7915
7916 if (TARGET_PA_20)
7917 {
7918 if (sibcall)
7919 output_asm_insn ("bve (%%r1)", xoperands);
7920 else
7921 {
7922 if (indirect_call)
7923 {
7924 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7925 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7926 delay_slot_filled = 1;
7927 }
7928 else
7929 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7930 }
7931 }
7932 else
7933 {
7934 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7935 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7936 xoperands);
7937
7938 if (sibcall)
7939 {
7940 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7941 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7942 else
7943 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7944 }
7945 else
7946 {
7947 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7948 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7949 else
7950 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7951
7952 if (indirect_call)
7953 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7954 else
7955 output_asm_insn ("copy %%r31,%%r2", xoperands);
7956 delay_slot_filled = 1;
7957 }
7958 }
7959 }
7960 }
7961 }
7962
7963 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7964 output_asm_insn ("nop", xoperands);
7965
7966 /* We are done if there isn't a jump in the delay slot. */
7967 if (seq_length == 0
7968 || delay_insn_deleted
7969 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7970 return "";
7971
7972 /* A sibcall should never have a branch in the delay slot. */
7973 gcc_assert (!sibcall);
7974
7975 /* This call has an unconditional jump in its delay slot. */
7976 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7977
7978 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7979 {
7980 /* See if the return address can be adjusted. Use the containing
7981 sequence insn's address. This would break the regular call/return@
7982 relationship assumed by the table based eh unwinder, so only do that
7983 if the call is not possibly throwing. */
7984 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7985 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7986 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7987
7988 if (VAL_14_BITS_P (distance)
7989 && !(can_throw_internal (insn) || can_throw_external (insn)))
7990 {
7991 xoperands[1] = gen_label_rtx ();
7992 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7993 targetm.asm_out.internal_label (asm_out_file, "L",
7994 CODE_LABEL_NUMBER (xoperands[1]));
7995 }
7996 else
7997 output_asm_insn ("nop\n\tb,n %0", xoperands);
7998 }
7999 else
8000 output_asm_insn ("b,n %0", xoperands);
8001
8002 /* Delete the jump. */
8003 SET_INSN_DELETED (NEXT_INSN (insn));
8004
8005 return "";
8006 }
8007
8008 /* Return the attribute length of the indirect call instruction INSN.
8009 The length must match the code generated by output_indirect call.
8010 The returned length includes the delay slot. Currently, the delay
8011 slot of an indirect call sequence is not exposed and it is used by
8012 the sequence itself. */
8013
8014 int
8015 pa_attr_length_indirect_call (rtx insn)
8016 {
8017 unsigned long distance = -1;
8018 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8019
8020 if (INSN_ADDRESSES_SET_P ())
8021 {
8022 distance = (total + insn_current_reference_address (insn));
8023 if (distance < total)
8024 distance = -1;
8025 }
8026
8027 if (TARGET_64BIT)
8028 return 12;
8029
8030 if (TARGET_FAST_INDIRECT_CALLS
8031 || (!TARGET_PORTABLE_RUNTIME
8032 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8033 || distance < MAX_PCREL17F_OFFSET)))
8034 return 8;
8035
8036 if (flag_pic)
8037 return 24;
8038
8039 if (TARGET_PORTABLE_RUNTIME)
8040 return 20;
8041
8042 /* Out of reach, can use ble. */
8043 return 12;
8044 }
8045
8046 const char *
8047 pa_output_indirect_call (rtx insn, rtx call_dest)
8048 {
8049 rtx xoperands[1];
8050
8051 if (TARGET_64BIT)
8052 {
8053 xoperands[0] = call_dest;
8054 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
8055 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
8056 return "";
8057 }
8058
8059 /* First the special case for kernels, level 0 systems, etc. */
8060 if (TARGET_FAST_INDIRECT_CALLS)
8061 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8062
8063 /* Now the normal case -- we can reach $$dyncall directly or
8064 we're sure that we can get there via a long-branch stub.
8065
8066 No need to check target flags as the length uniquely identifies
8067 the remaining cases. */
8068 if (pa_attr_length_indirect_call (insn) == 8)
8069 {
8070 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8071 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8072 variant of the B,L instruction can't be used on the SOM target. */
8073 if (TARGET_PA_20 && !TARGET_SOM)
8074 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8075 else
8076 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8077 }
8078
8079 /* Long millicode call, but we are not generating PIC or portable runtime
8080 code. */
8081 if (pa_attr_length_indirect_call (insn) == 12)
8082 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8083
8084 /* Long millicode call for portable runtime. */
8085 if (pa_attr_length_indirect_call (insn) == 20)
8086 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
8087
8088 /* We need a long PIC call to $$dyncall. */
8089 xoperands[0] = NULL_RTX;
8090 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8091 if (TARGET_SOM || !TARGET_GAS)
8092 {
8093 xoperands[0] = gen_label_rtx ();
8094 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
8095 targetm.asm_out.internal_label (asm_out_file, "L",
8096 CODE_LABEL_NUMBER (xoperands[0]));
8097 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
8098 }
8099 else
8100 {
8101 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
8102 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8103 xoperands);
8104 }
8105 output_asm_insn ("blr %%r0,%%r2", xoperands);
8106 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
8107 return "";
8108 }
8109
8110 /* In HPUX 8.0's shared library scheme, special relocations are needed
8111 for function labels if they might be passed to a function
8112 in a shared library (because shared libraries don't live in code
8113 space), and special magic is needed to construct their address. */
8114
8115 void
8116 pa_encode_label (rtx sym)
8117 {
8118 const char *str = XSTR (sym, 0);
8119 int len = strlen (str) + 1;
8120 char *newstr, *p;
8121
8122 p = newstr = XALLOCAVEC (char, len + 1);
8123 *p++ = '@';
8124 strcpy (p, str);
8125
8126 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8127 }
8128
8129 static void
8130 pa_encode_section_info (tree decl, rtx rtl, int first)
8131 {
8132 int old_referenced = 0;
8133
8134 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8135 old_referenced
8136 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8137
8138 default_encode_section_info (decl, rtl, first);
8139
8140 if (first && TEXT_SPACE_P (decl))
8141 {
8142 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8143 if (TREE_CODE (decl) == FUNCTION_DECL)
8144 pa_encode_label (XEXP (rtl, 0));
8145 }
8146 else if (old_referenced)
8147 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8148 }
8149
8150 /* This is sort of inverse to pa_encode_section_info. */
8151
8152 static const char *
8153 pa_strip_name_encoding (const char *str)
8154 {
8155 str += (*str == '@');
8156 str += (*str == '*');
8157 return str;
8158 }
8159
8160 /* Returns 1 if OP is a function label involved in a simple addition
8161 with a constant. Used to keep certain patterns from matching
8162 during instruction combination. */
8163 int
8164 pa_is_function_label_plus_const (rtx op)
8165 {
8166 /* Strip off any CONST. */
8167 if (GET_CODE (op) == CONST)
8168 op = XEXP (op, 0);
8169
8170 return (GET_CODE (op) == PLUS
8171 && function_label_operand (XEXP (op, 0), VOIDmode)
8172 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8173 }
8174
8175 /* Output assembly code for a thunk to FUNCTION. */
8176
8177 static void
8178 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8179 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8180 tree function)
8181 {
8182 static unsigned int current_thunk_number;
8183 int val_14 = VAL_14_BITS_P (delta);
8184 unsigned int old_last_address = last_address, nbytes = 0;
8185 char label[16];
8186 rtx xoperands[4];
8187
8188 xoperands[0] = XEXP (DECL_RTL (function), 0);
8189 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8190 xoperands[2] = GEN_INT (delta);
8191
8192 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
8193 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
8194
8195 /* Output the thunk. We know that the function is in the same
8196 translation unit (i.e., the same space) as the thunk, and that
8197 thunks are output after their method. Thus, we don't need an
8198 external branch to reach the function. With SOM and GAS,
8199 functions and thunks are effectively in different sections.
8200 Thus, we can always use a IA-relative branch and the linker
8201 will add a long branch stub if necessary.
8202
8203 However, we have to be careful when generating PIC code on the
8204 SOM port to ensure that the sequence does not transfer to an
8205 import stub for the target function as this could clobber the
8206 return value saved at SP-24. This would also apply to the
8207 32-bit linux port if the multi-space model is implemented. */
8208 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8209 && !(flag_pic && TREE_PUBLIC (function))
8210 && (TARGET_GAS || last_address < 262132))
8211 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8212 && ((targetm_common.have_named_sections
8213 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8214 /* The GNU 64-bit linker has rather poor stub management.
8215 So, we use a long branch from thunks that aren't in
8216 the same section as the target function. */
8217 && ((!TARGET_64BIT
8218 && (DECL_SECTION_NAME (thunk_fndecl)
8219 != DECL_SECTION_NAME (function)))
8220 || ((DECL_SECTION_NAME (thunk_fndecl)
8221 == DECL_SECTION_NAME (function))
8222 && last_address < 262132)))
8223 || (targetm_common.have_named_sections
8224 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8225 && DECL_SECTION_NAME (function) == NULL
8226 && last_address < 262132)
8227 || (!targetm_common.have_named_sections
8228 && last_address < 262132))))
8229 {
8230 if (!val_14)
8231 output_asm_insn ("addil L'%2,%%r26", xoperands);
8232
8233 output_asm_insn ("b %0", xoperands);
8234
8235 if (val_14)
8236 {
8237 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8238 nbytes += 8;
8239 }
8240 else
8241 {
8242 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8243 nbytes += 12;
8244 }
8245 }
8246 else if (TARGET_64BIT)
8247 {
8248 /* We only have one call-clobbered scratch register, so we can't
8249 make use of the delay slot if delta doesn't fit in 14 bits. */
8250 if (!val_14)
8251 {
8252 output_asm_insn ("addil L'%2,%%r26", xoperands);
8253 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8254 }
8255
8256 output_asm_insn ("b,l .+8,%%r1", xoperands);
8257
8258 if (TARGET_GAS)
8259 {
8260 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8261 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8262 }
8263 else
8264 {
8265 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8266 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8267 }
8268
8269 if (val_14)
8270 {
8271 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8272 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8273 nbytes += 20;
8274 }
8275 else
8276 {
8277 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8278 nbytes += 24;
8279 }
8280 }
8281 else if (TARGET_PORTABLE_RUNTIME)
8282 {
8283 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8284 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8285
8286 if (!val_14)
8287 output_asm_insn ("addil L'%2,%%r26", xoperands);
8288
8289 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8290
8291 if (val_14)
8292 {
8293 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8294 nbytes += 16;
8295 }
8296 else
8297 {
8298 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8299 nbytes += 20;
8300 }
8301 }
8302 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8303 {
8304 /* The function is accessible from outside this module. The only
8305 way to avoid an import stub between the thunk and function is to
8306 call the function directly with an indirect sequence similar to
8307 that used by $$dyncall. This is possible because $$dyncall acts
8308 as the import stub in an indirect call. */
8309 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8310 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8311 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8312 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8313 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8314 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8315 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8316 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8317 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8318
8319 if (!val_14)
8320 {
8321 output_asm_insn ("addil L'%2,%%r26", xoperands);
8322 nbytes += 4;
8323 }
8324
8325 if (TARGET_PA_20)
8326 {
8327 output_asm_insn ("bve (%%r22)", xoperands);
8328 nbytes += 36;
8329 }
8330 else if (TARGET_NO_SPACE_REGS)
8331 {
8332 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8333 nbytes += 36;
8334 }
8335 else
8336 {
8337 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8338 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8339 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8340 nbytes += 44;
8341 }
8342
8343 if (val_14)
8344 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8345 else
8346 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8347 }
8348 else if (flag_pic)
8349 {
8350 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8351
8352 if (TARGET_SOM || !TARGET_GAS)
8353 {
8354 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8355 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8356 }
8357 else
8358 {
8359 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8360 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8361 }
8362
8363 if (!val_14)
8364 output_asm_insn ("addil L'%2,%%r26", xoperands);
8365
8366 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8367
8368 if (val_14)
8369 {
8370 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8371 nbytes += 20;
8372 }
8373 else
8374 {
8375 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8376 nbytes += 24;
8377 }
8378 }
8379 else
8380 {
8381 if (!val_14)
8382 output_asm_insn ("addil L'%2,%%r26", xoperands);
8383
8384 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8385 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8386
8387 if (val_14)
8388 {
8389 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8390 nbytes += 12;
8391 }
8392 else
8393 {
8394 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8395 nbytes += 16;
8396 }
8397 }
8398
8399 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
8400
8401 if (TARGET_SOM && TARGET_GAS)
8402 {
8403 /* We done with this subspace except possibly for some additional
8404 debug information. Forget that we are in this subspace to ensure
8405 that the next function is output in its own subspace. */
8406 in_section = NULL;
8407 cfun->machine->in_nsubspa = 2;
8408 }
8409
8410 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8411 {
8412 switch_to_section (data_section);
8413 output_asm_insn (".align 4", xoperands);
8414 ASM_OUTPUT_LABEL (file, label);
8415 output_asm_insn (".word P'%0", xoperands);
8416 }
8417
8418 current_thunk_number++;
8419 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8420 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8421 last_address += nbytes;
8422 if (old_last_address > last_address)
8423 last_address = UINT_MAX;
8424 update_total_code_bytes (nbytes);
8425 }
8426
8427 /* Only direct calls to static functions are allowed to be sibling (tail)
8428 call optimized.
8429
8430 This restriction is necessary because some linker generated stubs will
8431 store return pointers into rp' in some cases which might clobber a
8432 live value already in rp'.
8433
8434 In a sibcall the current function and the target function share stack
8435 space. Thus if the path to the current function and the path to the
8436 target function save a value in rp', they save the value into the
8437 same stack slot, which has undesirable consequences.
8438
8439 Because of the deferred binding nature of shared libraries any function
8440 with external scope could be in a different load module and thus require
8441 rp' to be saved when calling that function. So sibcall optimizations
8442 can only be safe for static function.
8443
8444 Note that GCC never needs return value relocations, so we don't have to
8445 worry about static calls with return value relocations (which require
8446 saving rp').
8447
8448 It is safe to perform a sibcall optimization when the target function
8449 will never return. */
8450 static bool
8451 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8452 {
8453 if (TARGET_PORTABLE_RUNTIME)
8454 return false;
8455
8456 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8457 single subspace mode and the call is not indirect. As far as I know,
8458 there is no operating system support for the multiple subspace mode.
8459 It might be possible to support indirect calls if we didn't use
8460 $$dyncall (see the indirect sequence generated in pa_output_call). */
8461 if (TARGET_ELF32)
8462 return (decl != NULL_TREE);
8463
8464 /* Sibcalls are not ok because the arg pointer register is not a fixed
8465 register. This prevents the sibcall optimization from occurring. In
8466 addition, there are problems with stub placement using GNU ld. This
8467 is because a normal sibcall branch uses a 17-bit relocation while
8468 a regular call branch uses a 22-bit relocation. As a result, more
8469 care needs to be taken in the placement of long-branch stubs. */
8470 if (TARGET_64BIT)
8471 return false;
8472
8473 /* Sibcalls are only ok within a translation unit. */
8474 return (decl && !TREE_PUBLIC (decl));
8475 }
8476
8477 /* ??? Addition is not commutative on the PA due to the weird implicit
8478 space register selection rules for memory addresses. Therefore, we
8479 don't consider a + b == b + a, as this might be inside a MEM. */
8480 static bool
8481 pa_commutative_p (const_rtx x, int outer_code)
8482 {
8483 return (COMMUTATIVE_P (x)
8484 && (TARGET_NO_SPACE_REGS
8485 || (outer_code != UNKNOWN && outer_code != MEM)
8486 || GET_CODE (x) != PLUS));
8487 }
8488
8489 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8490 use in fmpyadd instructions. */
8491 int
8492 pa_fmpyaddoperands (rtx *operands)
8493 {
8494 enum machine_mode mode = GET_MODE (operands[0]);
8495
8496 /* Must be a floating point mode. */
8497 if (mode != SFmode && mode != DFmode)
8498 return 0;
8499
8500 /* All modes must be the same. */
8501 if (! (mode == GET_MODE (operands[1])
8502 && mode == GET_MODE (operands[2])
8503 && mode == GET_MODE (operands[3])
8504 && mode == GET_MODE (operands[4])
8505 && mode == GET_MODE (operands[5])))
8506 return 0;
8507
8508 /* All operands must be registers. */
8509 if (! (GET_CODE (operands[1]) == REG
8510 && GET_CODE (operands[2]) == REG
8511 && GET_CODE (operands[3]) == REG
8512 && GET_CODE (operands[4]) == REG
8513 && GET_CODE (operands[5]) == REG))
8514 return 0;
8515
8516 /* Only 2 real operands to the addition. One of the input operands must
8517 be the same as the output operand. */
8518 if (! rtx_equal_p (operands[3], operands[4])
8519 && ! rtx_equal_p (operands[3], operands[5]))
8520 return 0;
8521
8522 /* Inout operand of add cannot conflict with any operands from multiply. */
8523 if (rtx_equal_p (operands[3], operands[0])
8524 || rtx_equal_p (operands[3], operands[1])
8525 || rtx_equal_p (operands[3], operands[2]))
8526 return 0;
8527
8528 /* multiply cannot feed into addition operands. */
8529 if (rtx_equal_p (operands[4], operands[0])
8530 || rtx_equal_p (operands[5], operands[0]))
8531 return 0;
8532
8533 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8534 if (mode == SFmode
8535 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8536 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8537 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8538 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8539 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8540 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8541 return 0;
8542
8543 /* Passed. Operands are suitable for fmpyadd. */
8544 return 1;
8545 }
8546
8547 #if !defined(USE_COLLECT2)
8548 static void
8549 pa_asm_out_constructor (rtx symbol, int priority)
8550 {
8551 if (!function_label_operand (symbol, VOIDmode))
8552 pa_encode_label (symbol);
8553
8554 #ifdef CTORS_SECTION_ASM_OP
8555 default_ctor_section_asm_out_constructor (symbol, priority);
8556 #else
8557 # ifdef TARGET_ASM_NAMED_SECTION
8558 default_named_section_asm_out_constructor (symbol, priority);
8559 # else
8560 default_stabs_asm_out_constructor (symbol, priority);
8561 # endif
8562 #endif
8563 }
8564
8565 static void
8566 pa_asm_out_destructor (rtx symbol, int priority)
8567 {
8568 if (!function_label_operand (symbol, VOIDmode))
8569 pa_encode_label (symbol);
8570
8571 #ifdef DTORS_SECTION_ASM_OP
8572 default_dtor_section_asm_out_destructor (symbol, priority);
8573 #else
8574 # ifdef TARGET_ASM_NAMED_SECTION
8575 default_named_section_asm_out_destructor (symbol, priority);
8576 # else
8577 default_stabs_asm_out_destructor (symbol, priority);
8578 # endif
8579 #endif
8580 }
8581 #endif
8582
8583 /* This function places uninitialized global data in the bss section.
8584 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8585 function on the SOM port to prevent uninitialized global data from
8586 being placed in the data section. */
8587
8588 void
8589 pa_asm_output_aligned_bss (FILE *stream,
8590 const char *name,
8591 unsigned HOST_WIDE_INT size,
8592 unsigned int align)
8593 {
8594 switch_to_section (bss_section);
8595 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8596
8597 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8598 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8599 #endif
8600
8601 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8602 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8603 #endif
8604
8605 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8606 ASM_OUTPUT_LABEL (stream, name);
8607 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8608 }
8609
8610 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8611 that doesn't allow the alignment of global common storage to be directly
8612 specified. The SOM linker aligns common storage based on the rounded
8613 value of the NUM_BYTES parameter in the .comm directive. It's not
8614 possible to use the .align directive as it doesn't affect the alignment
8615 of the label associated with a .comm directive. */
8616
8617 void
8618 pa_asm_output_aligned_common (FILE *stream,
8619 const char *name,
8620 unsigned HOST_WIDE_INT size,
8621 unsigned int align)
8622 {
8623 unsigned int max_common_align;
8624
8625 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8626 if (align > max_common_align)
8627 {
8628 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8629 "for global common data. Using %u",
8630 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8631 align = max_common_align;
8632 }
8633
8634 switch_to_section (bss_section);
8635
8636 assemble_name (stream, name);
8637 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8638 MAX (size, align / BITS_PER_UNIT));
8639 }
8640
8641 /* We can't use .comm for local common storage as the SOM linker effectively
8642 treats the symbol as universal and uses the same storage for local symbols
8643 with the same name in different object files. The .block directive
8644 reserves an uninitialized block of storage. However, it's not common
8645 storage. Fortunately, GCC never requests common storage with the same
8646 name in any given translation unit. */
8647
8648 void
8649 pa_asm_output_aligned_local (FILE *stream,
8650 const char *name,
8651 unsigned HOST_WIDE_INT size,
8652 unsigned int align)
8653 {
8654 switch_to_section (bss_section);
8655 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8656
8657 #ifdef LOCAL_ASM_OP
8658 fprintf (stream, "%s", LOCAL_ASM_OP);
8659 assemble_name (stream, name);
8660 fprintf (stream, "\n");
8661 #endif
8662
8663 ASM_OUTPUT_LABEL (stream, name);
8664 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8665 }
8666
8667 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8668 use in fmpysub instructions. */
8669 int
8670 pa_fmpysuboperands (rtx *operands)
8671 {
8672 enum machine_mode mode = GET_MODE (operands[0]);
8673
8674 /* Must be a floating point mode. */
8675 if (mode != SFmode && mode != DFmode)
8676 return 0;
8677
8678 /* All modes must be the same. */
8679 if (! (mode == GET_MODE (operands[1])
8680 && mode == GET_MODE (operands[2])
8681 && mode == GET_MODE (operands[3])
8682 && mode == GET_MODE (operands[4])
8683 && mode == GET_MODE (operands[5])))
8684 return 0;
8685
8686 /* All operands must be registers. */
8687 if (! (GET_CODE (operands[1]) == REG
8688 && GET_CODE (operands[2]) == REG
8689 && GET_CODE (operands[3]) == REG
8690 && GET_CODE (operands[4]) == REG
8691 && GET_CODE (operands[5]) == REG))
8692 return 0;
8693
8694 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8695 operation, so operands[4] must be the same as operand[3]. */
8696 if (! rtx_equal_p (operands[3], operands[4]))
8697 return 0;
8698
8699 /* multiply cannot feed into subtraction. */
8700 if (rtx_equal_p (operands[5], operands[0]))
8701 return 0;
8702
8703 /* Inout operand of sub cannot conflict with any operands from multiply. */
8704 if (rtx_equal_p (operands[3], operands[0])
8705 || rtx_equal_p (operands[3], operands[1])
8706 || rtx_equal_p (operands[3], operands[2]))
8707 return 0;
8708
8709 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8710 if (mode == SFmode
8711 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8712 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8713 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8714 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8715 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8716 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8717 return 0;
8718
8719 /* Passed. Operands are suitable for fmpysub. */
8720 return 1;
8721 }
8722
8723 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8724 constants for shadd instructions. */
8725 int
8726 pa_shadd_constant_p (int val)
8727 {
8728 if (val == 2 || val == 4 || val == 8)
8729 return 1;
8730 else
8731 return 0;
8732 }
8733
8734 /* Return TRUE if INSN branches forward. */
8735
8736 static bool
8737 forward_branch_p (rtx insn)
8738 {
8739 rtx lab = JUMP_LABEL (insn);
8740
8741 /* The INSN must have a jump label. */
8742 gcc_assert (lab != NULL_RTX);
8743
8744 if (INSN_ADDRESSES_SET_P ())
8745 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8746
8747 while (insn)
8748 {
8749 if (insn == lab)
8750 return true;
8751 else
8752 insn = NEXT_INSN (insn);
8753 }
8754
8755 return false;
8756 }
8757
8758 /* Return 1 if INSN is in the delay slot of a call instruction. */
8759 int
8760 pa_jump_in_call_delay (rtx insn)
8761 {
8762
8763 if (GET_CODE (insn) != JUMP_INSN)
8764 return 0;
8765
8766 if (PREV_INSN (insn)
8767 && PREV_INSN (PREV_INSN (insn))
8768 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8769 {
8770 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8771
8772 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8773 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8774
8775 }
8776 else
8777 return 0;
8778 }
8779
8780 /* Output an unconditional move and branch insn. */
8781
8782 const char *
8783 pa_output_parallel_movb (rtx *operands, rtx insn)
8784 {
8785 int length = get_attr_length (insn);
8786
8787 /* These are the cases in which we win. */
8788 if (length == 4)
8789 return "mov%I1b,tr %1,%0,%2";
8790
8791 /* None of the following cases win, but they don't lose either. */
8792 if (length == 8)
8793 {
8794 if (dbr_sequence_length () == 0)
8795 {
8796 /* Nothing in the delay slot, fake it by putting the combined
8797 insn (the copy or add) in the delay slot of a bl. */
8798 if (GET_CODE (operands[1]) == CONST_INT)
8799 return "b %2\n\tldi %1,%0";
8800 else
8801 return "b %2\n\tcopy %1,%0";
8802 }
8803 else
8804 {
8805 /* Something in the delay slot, but we've got a long branch. */
8806 if (GET_CODE (operands[1]) == CONST_INT)
8807 return "ldi %1,%0\n\tb %2";
8808 else
8809 return "copy %1,%0\n\tb %2";
8810 }
8811 }
8812
8813 if (GET_CODE (operands[1]) == CONST_INT)
8814 output_asm_insn ("ldi %1,%0", operands);
8815 else
8816 output_asm_insn ("copy %1,%0", operands);
8817 return pa_output_lbranch (operands[2], insn, 1);
8818 }
8819
8820 /* Output an unconditional add and branch insn. */
8821
8822 const char *
8823 pa_output_parallel_addb (rtx *operands, rtx insn)
8824 {
8825 int length = get_attr_length (insn);
8826
8827 /* To make life easy we want operand0 to be the shared input/output
8828 operand and operand1 to be the readonly operand. */
8829 if (operands[0] == operands[1])
8830 operands[1] = operands[2];
8831
8832 /* These are the cases in which we win. */
8833 if (length == 4)
8834 return "add%I1b,tr %1,%0,%3";
8835
8836 /* None of the following cases win, but they don't lose either. */
8837 if (length == 8)
8838 {
8839 if (dbr_sequence_length () == 0)
8840 /* Nothing in the delay slot, fake it by putting the combined
8841 insn (the copy or add) in the delay slot of a bl. */
8842 return "b %3\n\tadd%I1 %1,%0,%0";
8843 else
8844 /* Something in the delay slot, but we've got a long branch. */
8845 return "add%I1 %1,%0,%0\n\tb %3";
8846 }
8847
8848 output_asm_insn ("add%I1 %1,%0,%0", operands);
8849 return pa_output_lbranch (operands[3], insn, 1);
8850 }
8851
8852 /* Return nonzero if INSN (a jump insn) immediately follows a call
8853 to a named function. This is used to avoid filling the delay slot
8854 of the jump since it can usually be eliminated by modifying RP in
8855 the delay slot of the call. */
8856
8857 int
8858 pa_following_call (rtx insn)
8859 {
8860 if (! TARGET_JUMP_IN_DELAY)
8861 return 0;
8862
8863 /* Find the previous real insn, skipping NOTEs. */
8864 insn = PREV_INSN (insn);
8865 while (insn && GET_CODE (insn) == NOTE)
8866 insn = PREV_INSN (insn);
8867
8868 /* Check for CALL_INSNs and millicode calls. */
8869 if (insn
8870 && ((GET_CODE (insn) == CALL_INSN
8871 && get_attr_type (insn) != TYPE_DYNCALL)
8872 || (GET_CODE (insn) == INSN
8873 && GET_CODE (PATTERN (insn)) != SEQUENCE
8874 && GET_CODE (PATTERN (insn)) != USE
8875 && GET_CODE (PATTERN (insn)) != CLOBBER
8876 && get_attr_type (insn) == TYPE_MILLI)))
8877 return 1;
8878
8879 return 0;
8880 }
8881
8882 /* We use this hook to perform a PA specific optimization which is difficult
8883 to do in earlier passes.
8884
8885 We want the delay slots of branches within jump tables to be filled.
8886 None of the compiler passes at the moment even has the notion that a
8887 PA jump table doesn't contain addresses, but instead contains actual
8888 instructions!
8889
8890 Because we actually jump into the table, the addresses of each entry
8891 must stay constant in relation to the beginning of the table (which
8892 itself must stay constant relative to the instruction to jump into
8893 it). I don't believe we can guarantee earlier passes of the compiler
8894 will adhere to those rules.
8895
8896 So, late in the compilation process we find all the jump tables, and
8897 expand them into real code -- e.g. each entry in the jump table vector
8898 will get an appropriate label followed by a jump to the final target.
8899
8900 Reorg and the final jump pass can then optimize these branches and
8901 fill their delay slots. We end up with smaller, more efficient code.
8902
8903 The jump instructions within the table are special; we must be able
8904 to identify them during assembly output (if the jumps don't get filled
8905 we need to emit a nop rather than nullifying the delay slot)). We
8906 identify jumps in switch tables by using insns with the attribute
8907 type TYPE_BTABLE_BRANCH.
8908
8909 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8910 insns. This serves two purposes, first it prevents jump.c from
8911 noticing that the last N entries in the table jump to the instruction
8912 immediately after the table and deleting the jumps. Second, those
8913 insns mark where we should emit .begin_brtab and .end_brtab directives
8914 when using GAS (allows for better link time optimizations). */
8915
8916 static void
8917 pa_reorg (void)
8918 {
8919 rtx insn;
8920
8921 remove_useless_addtr_insns (1);
8922
8923 if (pa_cpu < PROCESSOR_8000)
8924 pa_combine_instructions ();
8925
8926
8927 /* This is fairly cheap, so always run it if optimizing. */
8928 if (optimize > 0 && !TARGET_BIG_SWITCH)
8929 {
8930 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8931 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8932 {
8933 rtx pattern, tmp, location, label;
8934 unsigned int length, i;
8935
8936 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8937 if (GET_CODE (insn) != JUMP_INSN
8938 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8939 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8940 continue;
8941
8942 /* Emit marker for the beginning of the branch table. */
8943 emit_insn_before (gen_begin_brtab (), insn);
8944
8945 pattern = PATTERN (insn);
8946 location = PREV_INSN (insn);
8947 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8948
8949 for (i = 0; i < length; i++)
8950 {
8951 /* Emit a label before each jump to keep jump.c from
8952 removing this code. */
8953 tmp = gen_label_rtx ();
8954 LABEL_NUSES (tmp) = 1;
8955 emit_label_after (tmp, location);
8956 location = NEXT_INSN (location);
8957
8958 if (GET_CODE (pattern) == ADDR_VEC)
8959 label = XEXP (XVECEXP (pattern, 0, i), 0);
8960 else
8961 label = XEXP (XVECEXP (pattern, 1, i), 0);
8962
8963 tmp = gen_short_jump (label);
8964
8965 /* Emit the jump itself. */
8966 tmp = emit_jump_insn_after (tmp, location);
8967 JUMP_LABEL (tmp) = label;
8968 LABEL_NUSES (label)++;
8969 location = NEXT_INSN (location);
8970
8971 /* Emit a BARRIER after the jump. */
8972 emit_barrier_after (location);
8973 location = NEXT_INSN (location);
8974 }
8975
8976 /* Emit marker for the end of the branch table. */
8977 emit_insn_before (gen_end_brtab (), location);
8978 location = NEXT_INSN (location);
8979 emit_barrier_after (location);
8980
8981 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8982 delete_insn (insn);
8983 }
8984 }
8985 else
8986 {
8987 /* Still need brtab marker insns. FIXME: the presence of these
8988 markers disables output of the branch table to readonly memory,
8989 and any alignment directives that might be needed. Possibly,
8990 the begin_brtab insn should be output before the label for the
8991 table. This doesn't matter at the moment since the tables are
8992 always output in the text section. */
8993 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8994 {
8995 /* Find an ADDR_VEC insn. */
8996 if (GET_CODE (insn) != JUMP_INSN
8997 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8998 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8999 continue;
9000
9001 /* Now generate markers for the beginning and end of the
9002 branch table. */
9003 emit_insn_before (gen_begin_brtab (), insn);
9004 emit_insn_after (gen_end_brtab (), insn);
9005 }
9006 }
9007 }
9008
9009 /* The PA has a number of odd instructions which can perform multiple
9010 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
9011 it may be profitable to combine two instructions into one instruction
9012 with two outputs. It's not profitable PA2.0 machines because the
9013 two outputs would take two slots in the reorder buffers.
9014
9015 This routine finds instructions which can be combined and combines
9016 them. We only support some of the potential combinations, and we
9017 only try common ways to find suitable instructions.
9018
9019 * addb can add two registers or a register and a small integer
9020 and jump to a nearby (+-8k) location. Normally the jump to the
9021 nearby location is conditional on the result of the add, but by
9022 using the "true" condition we can make the jump unconditional.
9023 Thus addb can perform two independent operations in one insn.
9024
9025 * movb is similar to addb in that it can perform a reg->reg
9026 or small immediate->reg copy and jump to a nearby (+-8k location).
9027
9028 * fmpyadd and fmpysub can perform a FP multiply and either an
9029 FP add or FP sub if the operands of the multiply and add/sub are
9030 independent (there are other minor restrictions). Note both
9031 the fmpy and fadd/fsub can in theory move to better spots according
9032 to data dependencies, but for now we require the fmpy stay at a
9033 fixed location.
9034
9035 * Many of the memory operations can perform pre & post updates
9036 of index registers. GCC's pre/post increment/decrement addressing
9037 is far too simple to take advantage of all the possibilities. This
9038 pass may not be suitable since those insns may not be independent.
9039
9040 * comclr can compare two ints or an int and a register, nullify
9041 the following instruction and zero some other register. This
9042 is more difficult to use as it's harder to find an insn which
9043 will generate a comclr than finding something like an unconditional
9044 branch. (conditional moves & long branches create comclr insns).
9045
9046 * Most arithmetic operations can conditionally skip the next
9047 instruction. They can be viewed as "perform this operation
9048 and conditionally jump to this nearby location" (where nearby
9049 is an insns away). These are difficult to use due to the
9050 branch length restrictions. */
9051
9052 static void
9053 pa_combine_instructions (void)
9054 {
9055 rtx anchor, new_rtx;
9056
9057 /* This can get expensive since the basic algorithm is on the
9058 order of O(n^2) (or worse). Only do it for -O2 or higher
9059 levels of optimization. */
9060 if (optimize < 2)
9061 return;
9062
9063 /* Walk down the list of insns looking for "anchor" insns which
9064 may be combined with "floating" insns. As the name implies,
9065 "anchor" instructions don't move, while "floating" insns may
9066 move around. */
9067 new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9068 new_rtx = make_insn_raw (new_rtx);
9069
9070 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9071 {
9072 enum attr_pa_combine_type anchor_attr;
9073 enum attr_pa_combine_type floater_attr;
9074
9075 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9076 Also ignore any special USE insns. */
9077 if ((GET_CODE (anchor) != INSN
9078 && GET_CODE (anchor) != JUMP_INSN
9079 && GET_CODE (anchor) != CALL_INSN)
9080 || GET_CODE (PATTERN (anchor)) == USE
9081 || GET_CODE (PATTERN (anchor)) == CLOBBER
9082 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
9083 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
9084 continue;
9085
9086 anchor_attr = get_attr_pa_combine_type (anchor);
9087 /* See if anchor is an insn suitable for combination. */
9088 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9089 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9090 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9091 && ! forward_branch_p (anchor)))
9092 {
9093 rtx floater;
9094
9095 for (floater = PREV_INSN (anchor);
9096 floater;
9097 floater = PREV_INSN (floater))
9098 {
9099 if (GET_CODE (floater) == NOTE
9100 || (GET_CODE (floater) == INSN
9101 && (GET_CODE (PATTERN (floater)) == USE
9102 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9103 continue;
9104
9105 /* Anything except a regular INSN will stop our search. */
9106 if (GET_CODE (floater) != INSN
9107 || GET_CODE (PATTERN (floater)) == ADDR_VEC
9108 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
9109 {
9110 floater = NULL_RTX;
9111 break;
9112 }
9113
9114 /* See if FLOATER is suitable for combination with the
9115 anchor. */
9116 floater_attr = get_attr_pa_combine_type (floater);
9117 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9118 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9119 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9120 && floater_attr == PA_COMBINE_TYPE_FMPY))
9121 {
9122 /* If ANCHOR and FLOATER can be combined, then we're
9123 done with this pass. */
9124 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9125 SET_DEST (PATTERN (floater)),
9126 XEXP (SET_SRC (PATTERN (floater)), 0),
9127 XEXP (SET_SRC (PATTERN (floater)), 1)))
9128 break;
9129 }
9130
9131 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9132 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9133 {
9134 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9135 {
9136 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9137 SET_DEST (PATTERN (floater)),
9138 XEXP (SET_SRC (PATTERN (floater)), 0),
9139 XEXP (SET_SRC (PATTERN (floater)), 1)))
9140 break;
9141 }
9142 else
9143 {
9144 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9145 SET_DEST (PATTERN (floater)),
9146 SET_SRC (PATTERN (floater)),
9147 SET_SRC (PATTERN (floater))))
9148 break;
9149 }
9150 }
9151 }
9152
9153 /* If we didn't find anything on the backwards scan try forwards. */
9154 if (!floater
9155 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9156 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9157 {
9158 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9159 {
9160 if (GET_CODE (floater) == NOTE
9161 || (GET_CODE (floater) == INSN
9162 && (GET_CODE (PATTERN (floater)) == USE
9163 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9164
9165 continue;
9166
9167 /* Anything except a regular INSN will stop our search. */
9168 if (GET_CODE (floater) != INSN
9169 || GET_CODE (PATTERN (floater)) == ADDR_VEC
9170 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
9171 {
9172 floater = NULL_RTX;
9173 break;
9174 }
9175
9176 /* See if FLOATER is suitable for combination with the
9177 anchor. */
9178 floater_attr = get_attr_pa_combine_type (floater);
9179 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9180 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9181 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9182 && floater_attr == PA_COMBINE_TYPE_FMPY))
9183 {
9184 /* If ANCHOR and FLOATER can be combined, then we're
9185 done with this pass. */
9186 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9187 SET_DEST (PATTERN (floater)),
9188 XEXP (SET_SRC (PATTERN (floater)),
9189 0),
9190 XEXP (SET_SRC (PATTERN (floater)),
9191 1)))
9192 break;
9193 }
9194 }
9195 }
9196
9197 /* FLOATER will be nonzero if we found a suitable floating
9198 insn for combination with ANCHOR. */
9199 if (floater
9200 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9201 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9202 {
9203 /* Emit the new instruction and delete the old anchor. */
9204 emit_insn_before (gen_rtx_PARALLEL
9205 (VOIDmode,
9206 gen_rtvec (2, PATTERN (anchor),
9207 PATTERN (floater))),
9208 anchor);
9209
9210 SET_INSN_DELETED (anchor);
9211
9212 /* Emit a special USE insn for FLOATER, then delete
9213 the floating insn. */
9214 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9215 delete_insn (floater);
9216
9217 continue;
9218 }
9219 else if (floater
9220 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9221 {
9222 rtx temp;
9223 /* Emit the new_jump instruction and delete the old anchor. */
9224 temp
9225 = emit_jump_insn_before (gen_rtx_PARALLEL
9226 (VOIDmode,
9227 gen_rtvec (2, PATTERN (anchor),
9228 PATTERN (floater))),
9229 anchor);
9230
9231 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9232 SET_INSN_DELETED (anchor);
9233
9234 /* Emit a special USE insn for FLOATER, then delete
9235 the floating insn. */
9236 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9237 delete_insn (floater);
9238 continue;
9239 }
9240 }
9241 }
9242 }
9243
9244 static int
9245 pa_can_combine_p (rtx new_rtx, rtx anchor, rtx floater, int reversed, rtx dest,
9246 rtx src1, rtx src2)
9247 {
9248 int insn_code_number;
9249 rtx start, end;
9250
9251 /* Create a PARALLEL with the patterns of ANCHOR and
9252 FLOATER, try to recognize it, then test constraints
9253 for the resulting pattern.
9254
9255 If the pattern doesn't match or the constraints
9256 aren't met keep searching for a suitable floater
9257 insn. */
9258 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9259 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9260 INSN_CODE (new_rtx) = -1;
9261 insn_code_number = recog_memoized (new_rtx);
9262 if (insn_code_number < 0
9263 || (extract_insn (new_rtx), ! constrain_operands (1)))
9264 return 0;
9265
9266 if (reversed)
9267 {
9268 start = anchor;
9269 end = floater;
9270 }
9271 else
9272 {
9273 start = floater;
9274 end = anchor;
9275 }
9276
9277 /* There's up to three operands to consider. One
9278 output and two inputs.
9279
9280 The output must not be used between FLOATER & ANCHOR
9281 exclusive. The inputs must not be set between
9282 FLOATER and ANCHOR exclusive. */
9283
9284 if (reg_used_between_p (dest, start, end))
9285 return 0;
9286
9287 if (reg_set_between_p (src1, start, end))
9288 return 0;
9289
9290 if (reg_set_between_p (src2, start, end))
9291 return 0;
9292
9293 /* If we get here, then everything is good. */
9294 return 1;
9295 }
9296
9297 /* Return nonzero if references for INSN are delayed.
9298
9299 Millicode insns are actually function calls with some special
9300 constraints on arguments and register usage.
9301
9302 Millicode calls always expect their arguments in the integer argument
9303 registers, and always return their result in %r29 (ret1). They
9304 are expected to clobber their arguments, %r1, %r29, and the return
9305 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9306
9307 This function tells reorg that the references to arguments and
9308 millicode calls do not appear to happen until after the millicode call.
9309 This allows reorg to put insns which set the argument registers into the
9310 delay slot of the millicode call -- thus they act more like traditional
9311 CALL_INSNs.
9312
9313 Note we cannot consider side effects of the insn to be delayed because
9314 the branch and link insn will clobber the return pointer. If we happened
9315 to use the return pointer in the delay slot of the call, then we lose.
9316
9317 get_attr_type will try to recognize the given insn, so make sure to
9318 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9319 in particular. */
9320 int
9321 pa_insn_refs_are_delayed (rtx insn)
9322 {
9323 return ((GET_CODE (insn) == INSN
9324 && GET_CODE (PATTERN (insn)) != SEQUENCE
9325 && GET_CODE (PATTERN (insn)) != USE
9326 && GET_CODE (PATTERN (insn)) != CLOBBER
9327 && get_attr_type (insn) == TYPE_MILLI));
9328 }
9329
9330 /* Promote the return value, but not the arguments. */
9331
9332 static enum machine_mode
9333 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9334 enum machine_mode mode,
9335 int *punsignedp ATTRIBUTE_UNUSED,
9336 const_tree fntype ATTRIBUTE_UNUSED,
9337 int for_return)
9338 {
9339 if (for_return == 0)
9340 return mode;
9341 return promote_mode (type, mode, punsignedp);
9342 }
9343
9344 /* On the HP-PA the value is found in register(s) 28(-29), unless
9345 the mode is SF or DF. Then the value is returned in fr4 (32).
9346
9347 This must perform the same promotions as PROMOTE_MODE, else promoting
9348 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9349
9350 Small structures must be returned in a PARALLEL on PA64 in order
9351 to match the HP Compiler ABI. */
9352
9353 static rtx
9354 pa_function_value (const_tree valtype,
9355 const_tree func ATTRIBUTE_UNUSED,
9356 bool outgoing ATTRIBUTE_UNUSED)
9357 {
9358 enum machine_mode valmode;
9359
9360 if (AGGREGATE_TYPE_P (valtype)
9361 || TREE_CODE (valtype) == COMPLEX_TYPE
9362 || TREE_CODE (valtype) == VECTOR_TYPE)
9363 {
9364 if (TARGET_64BIT)
9365 {
9366 /* Aggregates with a size less than or equal to 128 bits are
9367 returned in GR 28(-29). They are left justified. The pad
9368 bits are undefined. Larger aggregates are returned in
9369 memory. */
9370 rtx loc[2];
9371 int i, offset = 0;
9372 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9373
9374 for (i = 0; i < ub; i++)
9375 {
9376 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9377 gen_rtx_REG (DImode, 28 + i),
9378 GEN_INT (offset));
9379 offset += 8;
9380 }
9381
9382 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9383 }
9384 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9385 {
9386 /* Aggregates 5 to 8 bytes in size are returned in general
9387 registers r28-r29 in the same manner as other non
9388 floating-point objects. The data is right-justified and
9389 zero-extended to 64 bits. This is opposite to the normal
9390 justification used on big endian targets and requires
9391 special treatment. */
9392 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9393 gen_rtx_REG (DImode, 28), const0_rtx);
9394 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9395 }
9396 }
9397
9398 if ((INTEGRAL_TYPE_P (valtype)
9399 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9400 || POINTER_TYPE_P (valtype))
9401 valmode = word_mode;
9402 else
9403 valmode = TYPE_MODE (valtype);
9404
9405 if (TREE_CODE (valtype) == REAL_TYPE
9406 && !AGGREGATE_TYPE_P (valtype)
9407 && TYPE_MODE (valtype) != TFmode
9408 && !TARGET_SOFT_FLOAT)
9409 return gen_rtx_REG (valmode, 32);
9410
9411 return gen_rtx_REG (valmode, 28);
9412 }
9413
9414 /* Implement the TARGET_LIBCALL_VALUE hook. */
9415
9416 static rtx
9417 pa_libcall_value (enum machine_mode mode,
9418 const_rtx fun ATTRIBUTE_UNUSED)
9419 {
9420 if (! TARGET_SOFT_FLOAT
9421 && (mode == SFmode || mode == DFmode))
9422 return gen_rtx_REG (mode, 32);
9423 else
9424 return gen_rtx_REG (mode, 28);
9425 }
9426
9427 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9428
9429 static bool
9430 pa_function_value_regno_p (const unsigned int regno)
9431 {
9432 if (regno == 28
9433 || (! TARGET_SOFT_FLOAT && regno == 32))
9434 return true;
9435
9436 return false;
9437 }
9438
9439 /* Update the data in CUM to advance over an argument
9440 of mode MODE and data type TYPE.
9441 (TYPE is null for libcalls where that information may not be available.) */
9442
9443 static void
9444 pa_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
9445 const_tree type, bool named ATTRIBUTE_UNUSED)
9446 {
9447 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9448 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9449
9450 cum->nargs_prototype--;
9451 cum->words += (arg_size
9452 + ((cum->words & 01)
9453 && type != NULL_TREE
9454 && arg_size > 1));
9455 }
9456
9457 /* Return the location of a parameter that is passed in a register or NULL
9458 if the parameter has any component that is passed in memory.
9459
9460 This is new code and will be pushed to into the net sources after
9461 further testing.
9462
9463 ??? We might want to restructure this so that it looks more like other
9464 ports. */
9465 static rtx
9466 pa_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
9467 const_tree type, bool named ATTRIBUTE_UNUSED)
9468 {
9469 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9470 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9471 int alignment = 0;
9472 int arg_size;
9473 int fpr_reg_base;
9474 int gpr_reg_base;
9475 rtx retval;
9476
9477 if (mode == VOIDmode)
9478 return NULL_RTX;
9479
9480 arg_size = FUNCTION_ARG_SIZE (mode, type);
9481
9482 /* If this arg would be passed partially or totally on the stack, then
9483 this routine should return zero. pa_arg_partial_bytes will
9484 handle arguments which are split between regs and stack slots if
9485 the ABI mandates split arguments. */
9486 if (!TARGET_64BIT)
9487 {
9488 /* The 32-bit ABI does not split arguments. */
9489 if (cum->words + arg_size > max_arg_words)
9490 return NULL_RTX;
9491 }
9492 else
9493 {
9494 if (arg_size > 1)
9495 alignment = cum->words & 1;
9496 if (cum->words + alignment >= max_arg_words)
9497 return NULL_RTX;
9498 }
9499
9500 /* The 32bit ABIs and the 64bit ABIs are rather different,
9501 particularly in their handling of FP registers. We might
9502 be able to cleverly share code between them, but I'm not
9503 going to bother in the hope that splitting them up results
9504 in code that is more easily understood. */
9505
9506 if (TARGET_64BIT)
9507 {
9508 /* Advance the base registers to their current locations.
9509
9510 Remember, gprs grow towards smaller register numbers while
9511 fprs grow to higher register numbers. Also remember that
9512 although FP regs are 32-bit addressable, we pretend that
9513 the registers are 64-bits wide. */
9514 gpr_reg_base = 26 - cum->words;
9515 fpr_reg_base = 32 + cum->words;
9516
9517 /* Arguments wider than one word and small aggregates need special
9518 treatment. */
9519 if (arg_size > 1
9520 || mode == BLKmode
9521 || (type && (AGGREGATE_TYPE_P (type)
9522 || TREE_CODE (type) == COMPLEX_TYPE
9523 || TREE_CODE (type) == VECTOR_TYPE)))
9524 {
9525 /* Double-extended precision (80-bit), quad-precision (128-bit)
9526 and aggregates including complex numbers are aligned on
9527 128-bit boundaries. The first eight 64-bit argument slots
9528 are associated one-to-one, with general registers r26
9529 through r19, and also with floating-point registers fr4
9530 through fr11. Arguments larger than one word are always
9531 passed in general registers.
9532
9533 Using a PARALLEL with a word mode register results in left
9534 justified data on a big-endian target. */
9535
9536 rtx loc[8];
9537 int i, offset = 0, ub = arg_size;
9538
9539 /* Align the base register. */
9540 gpr_reg_base -= alignment;
9541
9542 ub = MIN (ub, max_arg_words - cum->words - alignment);
9543 for (i = 0; i < ub; i++)
9544 {
9545 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9546 gen_rtx_REG (DImode, gpr_reg_base),
9547 GEN_INT (offset));
9548 gpr_reg_base -= 1;
9549 offset += 8;
9550 }
9551
9552 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9553 }
9554 }
9555 else
9556 {
9557 /* If the argument is larger than a word, then we know precisely
9558 which registers we must use. */
9559 if (arg_size > 1)
9560 {
9561 if (cum->words)
9562 {
9563 gpr_reg_base = 23;
9564 fpr_reg_base = 38;
9565 }
9566 else
9567 {
9568 gpr_reg_base = 25;
9569 fpr_reg_base = 34;
9570 }
9571
9572 /* Structures 5 to 8 bytes in size are passed in the general
9573 registers in the same manner as other non floating-point
9574 objects. The data is right-justified and zero-extended
9575 to 64 bits. This is opposite to the normal justification
9576 used on big endian targets and requires special treatment.
9577 We now define BLOCK_REG_PADDING to pad these objects.
9578 Aggregates, complex and vector types are passed in the same
9579 manner as structures. */
9580 if (mode == BLKmode
9581 || (type && (AGGREGATE_TYPE_P (type)
9582 || TREE_CODE (type) == COMPLEX_TYPE
9583 || TREE_CODE (type) == VECTOR_TYPE)))
9584 {
9585 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9586 gen_rtx_REG (DImode, gpr_reg_base),
9587 const0_rtx);
9588 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9589 }
9590 }
9591 else
9592 {
9593 /* We have a single word (32 bits). A simple computation
9594 will get us the register #s we need. */
9595 gpr_reg_base = 26 - cum->words;
9596 fpr_reg_base = 32 + 2 * cum->words;
9597 }
9598 }
9599
9600 /* Determine if the argument needs to be passed in both general and
9601 floating point registers. */
9602 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9603 /* If we are doing soft-float with portable runtime, then there
9604 is no need to worry about FP regs. */
9605 && !TARGET_SOFT_FLOAT
9606 /* The parameter must be some kind of scalar float, else we just
9607 pass it in integer registers. */
9608 && GET_MODE_CLASS (mode) == MODE_FLOAT
9609 /* The target function must not have a prototype. */
9610 && cum->nargs_prototype <= 0
9611 /* libcalls do not need to pass items in both FP and general
9612 registers. */
9613 && type != NULL_TREE
9614 /* All this hair applies to "outgoing" args only. This includes
9615 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9616 && !cum->incoming)
9617 /* Also pass outgoing floating arguments in both registers in indirect
9618 calls with the 32 bit ABI and the HP assembler since there is no
9619 way to the specify argument locations in static functions. */
9620 || (!TARGET_64BIT
9621 && !TARGET_GAS
9622 && !cum->incoming
9623 && cum->indirect
9624 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9625 {
9626 retval
9627 = gen_rtx_PARALLEL
9628 (mode,
9629 gen_rtvec (2,
9630 gen_rtx_EXPR_LIST (VOIDmode,
9631 gen_rtx_REG (mode, fpr_reg_base),
9632 const0_rtx),
9633 gen_rtx_EXPR_LIST (VOIDmode,
9634 gen_rtx_REG (mode, gpr_reg_base),
9635 const0_rtx)));
9636 }
9637 else
9638 {
9639 /* See if we should pass this parameter in a general register. */
9640 if (TARGET_SOFT_FLOAT
9641 /* Indirect calls in the normal 32bit ABI require all arguments
9642 to be passed in general registers. */
9643 || (!TARGET_PORTABLE_RUNTIME
9644 && !TARGET_64BIT
9645 && !TARGET_ELF32
9646 && cum->indirect)
9647 /* If the parameter is not a scalar floating-point parameter,
9648 then it belongs in GPRs. */
9649 || GET_MODE_CLASS (mode) != MODE_FLOAT
9650 /* Structure with single SFmode field belongs in GPR. */
9651 || (type && AGGREGATE_TYPE_P (type)))
9652 retval = gen_rtx_REG (mode, gpr_reg_base);
9653 else
9654 retval = gen_rtx_REG (mode, fpr_reg_base);
9655 }
9656 return retval;
9657 }
9658
9659 /* Arguments larger than one word are double word aligned. */
9660
9661 static unsigned int
9662 pa_function_arg_boundary (enum machine_mode mode, const_tree type)
9663 {
9664 bool singleword = (type
9665 ? (integer_zerop (TYPE_SIZE (type))
9666 || !TREE_CONSTANT (TYPE_SIZE (type))
9667 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9668 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9669
9670 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9671 }
9672
9673 /* If this arg would be passed totally in registers or totally on the stack,
9674 then this routine should return zero. */
9675
9676 static int
9677 pa_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
9678 tree type, bool named ATTRIBUTE_UNUSED)
9679 {
9680 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9681 unsigned int max_arg_words = 8;
9682 unsigned int offset = 0;
9683
9684 if (!TARGET_64BIT)
9685 return 0;
9686
9687 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9688 offset = 1;
9689
9690 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9691 /* Arg fits fully into registers. */
9692 return 0;
9693 else if (cum->words + offset >= max_arg_words)
9694 /* Arg fully on the stack. */
9695 return 0;
9696 else
9697 /* Arg is split. */
9698 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9699 }
9700
9701
9702 /* A get_unnamed_section callback for switching to the text section.
9703
9704 This function is only used with SOM. Because we don't support
9705 named subspaces, we can only create a new subspace or switch back
9706 to the default text subspace. */
9707
9708 static void
9709 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9710 {
9711 gcc_assert (TARGET_SOM);
9712 if (TARGET_GAS)
9713 {
9714 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9715 {
9716 /* We only want to emit a .nsubspa directive once at the
9717 start of the function. */
9718 cfun->machine->in_nsubspa = 1;
9719
9720 /* Create a new subspace for the text. This provides
9721 better stub placement and one-only functions. */
9722 if (cfun->decl
9723 && DECL_ONE_ONLY (cfun->decl)
9724 && !DECL_WEAK (cfun->decl))
9725 {
9726 output_section_asm_op ("\t.SPACE $TEXT$\n"
9727 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9728 "ACCESS=44,SORT=24,COMDAT");
9729 return;
9730 }
9731 }
9732 else
9733 {
9734 /* There isn't a current function or the body of the current
9735 function has been completed. So, we are changing to the
9736 text section to output debugging information. Thus, we
9737 need to forget that we are in the text section so that
9738 varasm.c will call us when text_section is selected again. */
9739 gcc_assert (!cfun || !cfun->machine
9740 || cfun->machine->in_nsubspa == 2);
9741 in_section = NULL;
9742 }
9743 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9744 return;
9745 }
9746 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9747 }
9748
9749 /* A get_unnamed_section callback for switching to comdat data
9750 sections. This function is only used with SOM. */
9751
9752 static void
9753 som_output_comdat_data_section_asm_op (const void *data)
9754 {
9755 in_section = NULL;
9756 output_section_asm_op (data);
9757 }
9758
9759 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9760
9761 static void
9762 pa_som_asm_init_sections (void)
9763 {
9764 text_section
9765 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9766
9767 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9768 is not being generated. */
9769 som_readonly_data_section
9770 = get_unnamed_section (0, output_section_asm_op,
9771 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9772
9773 /* When secondary definitions are not supported, SOM makes readonly
9774 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9775 the comdat flag. */
9776 som_one_only_readonly_data_section
9777 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9778 "\t.SPACE $TEXT$\n"
9779 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9780 "ACCESS=0x2c,SORT=16,COMDAT");
9781
9782
9783 /* When secondary definitions are not supported, SOM makes data one-only
9784 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9785 som_one_only_data_section
9786 = get_unnamed_section (SECTION_WRITE,
9787 som_output_comdat_data_section_asm_op,
9788 "\t.SPACE $PRIVATE$\n"
9789 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9790 "ACCESS=31,SORT=24,COMDAT");
9791
9792 if (flag_tm)
9793 som_tm_clone_table_section
9794 = get_unnamed_section (0, output_section_asm_op,
9795 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9796
9797 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9798 which reference data within the $TEXT$ space (for example constant
9799 strings in the $LIT$ subspace).
9800
9801 The assemblers (GAS and HP as) both have problems with handling
9802 the difference of two symbols which is the other correct way to
9803 reference constant data during PIC code generation.
9804
9805 So, there's no way to reference constant data which is in the
9806 $TEXT$ space during PIC generation. Instead place all constant
9807 data into the $PRIVATE$ subspace (this reduces sharing, but it
9808 works correctly). */
9809 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9810
9811 /* We must not have a reference to an external symbol defined in a
9812 shared library in a readonly section, else the SOM linker will
9813 complain.
9814
9815 So, we force exception information into the data section. */
9816 exception_section = data_section;
9817 }
9818
9819 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9820
9821 static section *
9822 pa_som_tm_clone_table_section (void)
9823 {
9824 return som_tm_clone_table_section;
9825 }
9826
9827 /* On hpux10, the linker will give an error if we have a reference
9828 in the read-only data section to a symbol defined in a shared
9829 library. Therefore, expressions that might require a reloc can
9830 not be placed in the read-only data section. */
9831
9832 static section *
9833 pa_select_section (tree exp, int reloc,
9834 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9835 {
9836 if (TREE_CODE (exp) == VAR_DECL
9837 && TREE_READONLY (exp)
9838 && !TREE_THIS_VOLATILE (exp)
9839 && DECL_INITIAL (exp)
9840 && (DECL_INITIAL (exp) == error_mark_node
9841 || TREE_CONSTANT (DECL_INITIAL (exp)))
9842 && !reloc)
9843 {
9844 if (TARGET_SOM
9845 && DECL_ONE_ONLY (exp)
9846 && !DECL_WEAK (exp))
9847 return som_one_only_readonly_data_section;
9848 else
9849 return readonly_data_section;
9850 }
9851 else if (CONSTANT_CLASS_P (exp) && !reloc)
9852 return readonly_data_section;
9853 else if (TARGET_SOM
9854 && TREE_CODE (exp) == VAR_DECL
9855 && DECL_ONE_ONLY (exp)
9856 && !DECL_WEAK (exp))
9857 return som_one_only_data_section;
9858 else
9859 return data_section;
9860 }
9861
9862 static void
9863 pa_globalize_label (FILE *stream, const char *name)
9864 {
9865 /* We only handle DATA objects here, functions are globalized in
9866 ASM_DECLARE_FUNCTION_NAME. */
9867 if (! FUNCTION_NAME_P (name))
9868 {
9869 fputs ("\t.EXPORT ", stream);
9870 assemble_name (stream, name);
9871 fputs (",DATA\n", stream);
9872 }
9873 }
9874
9875 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9876
9877 static rtx
9878 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9879 int incoming ATTRIBUTE_UNUSED)
9880 {
9881 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9882 }
9883
9884 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9885
9886 bool
9887 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9888 {
9889 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9890 PA64 ABI says that objects larger than 128 bits are returned in memory.
9891 Note, int_size_in_bytes can return -1 if the size of the object is
9892 variable or larger than the maximum value that can be expressed as
9893 a HOST_WIDE_INT. It can also return zero for an empty type. The
9894 simplest way to handle variable and empty types is to pass them in
9895 memory. This avoids problems in defining the boundaries of argument
9896 slots, allocating registers, etc. */
9897 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9898 || int_size_in_bytes (type) <= 0);
9899 }
9900
9901 /* Structure to hold declaration and name of external symbols that are
9902 emitted by GCC. We generate a vector of these symbols and output them
9903 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9904 This avoids putting out names that are never really used. */
9905
9906 typedef struct GTY(()) extern_symbol
9907 {
9908 tree decl;
9909 const char *name;
9910 } extern_symbol;
9911
9912 /* Define gc'd vector type for extern_symbol. */
9913 DEF_VEC_O(extern_symbol);
9914 DEF_VEC_ALLOC_O(extern_symbol,gc);
9915
9916 /* Vector of extern_symbol pointers. */
9917 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9918
9919 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9920 /* Mark DECL (name NAME) as an external reference (assembler output
9921 file FILE). This saves the names to output at the end of the file
9922 if actually referenced. */
9923
9924 void
9925 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9926 {
9927 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9928
9929 gcc_assert (file == asm_out_file);
9930 p->decl = decl;
9931 p->name = name;
9932 }
9933
9934 /* Output text required at the end of an assembler file.
9935 This includes deferred plabels and .import directives for
9936 all external symbols that were actually referenced. */
9937
9938 static void
9939 pa_hpux_file_end (void)
9940 {
9941 unsigned int i;
9942 extern_symbol *p;
9943
9944 if (!NO_DEFERRED_PROFILE_COUNTERS)
9945 output_deferred_profile_counters ();
9946
9947 output_deferred_plabels ();
9948
9949 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9950 {
9951 tree decl = p->decl;
9952
9953 if (!TREE_ASM_WRITTEN (decl)
9954 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9955 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9956 }
9957
9958 VEC_free (extern_symbol, gc, extern_symbols);
9959 }
9960 #endif
9961
9962 /* Return true if a change from mode FROM to mode TO for a register
9963 in register class RCLASS is invalid. */
9964
9965 bool
9966 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9967 enum reg_class rclass)
9968 {
9969 if (from == to)
9970 return false;
9971
9972 /* Reject changes to/from complex and vector modes. */
9973 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9974 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9975 return true;
9976
9977 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9978 return false;
9979
9980 /* There is no way to load QImode or HImode values directly from
9981 memory. SImode loads to the FP registers are not zero extended.
9982 On the 64-bit target, this conflicts with the definition of
9983 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9984 with different sizes in the floating-point registers. */
9985 if (MAYBE_FP_REG_CLASS_P (rclass))
9986 return true;
9987
9988 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9989 in specific sets of registers. Thus, we cannot allow changing
9990 to a larger mode when it's larger than a word. */
9991 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9992 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9993 return true;
9994
9995 return false;
9996 }
9997
9998 /* Returns TRUE if it is a good idea to tie two pseudo registers
9999 when one has mode MODE1 and one has mode MODE2.
10000 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
10001 for any hard reg, then this must be FALSE for correct output.
10002
10003 We should return FALSE for QImode and HImode because these modes
10004 are not ok in the floating-point registers. However, this prevents
10005 tieing these modes to SImode and DImode in the general registers.
10006 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
10007 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
10008 in the floating-point registers. */
10009
10010 bool
10011 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
10012 {
10013 /* Don't tie modes in different classes. */
10014 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
10015 return false;
10016
10017 return true;
10018 }
10019
10020 \f
10021 /* Length in units of the trampoline instruction code. */
10022
10023 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
10024
10025
10026 /* Output assembler code for a block containing the constant parts
10027 of a trampoline, leaving space for the variable parts.\
10028
10029 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10030 and then branches to the specified routine.
10031
10032 This code template is copied from text segment to stack location
10033 and then patched with pa_trampoline_init to contain valid values,
10034 and then entered as a subroutine.
10035
10036 It is best to keep this as small as possible to avoid having to
10037 flush multiple lines in the cache. */
10038
10039 static void
10040 pa_asm_trampoline_template (FILE *f)
10041 {
10042 if (!TARGET_64BIT)
10043 {
10044 fputs ("\tldw 36(%r22),%r21\n", f);
10045 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10046 if (ASSEMBLER_DIALECT == 0)
10047 fputs ("\tdepi 0,31,2,%r21\n", f);
10048 else
10049 fputs ("\tdepwi 0,31,2,%r21\n", f);
10050 fputs ("\tldw 4(%r21),%r19\n", f);
10051 fputs ("\tldw 0(%r21),%r21\n", f);
10052 if (TARGET_PA_20)
10053 {
10054 fputs ("\tbve (%r21)\n", f);
10055 fputs ("\tldw 40(%r22),%r29\n", f);
10056 fputs ("\t.word 0\n", f);
10057 fputs ("\t.word 0\n", f);
10058 }
10059 else
10060 {
10061 fputs ("\tldsid (%r21),%r1\n", f);
10062 fputs ("\tmtsp %r1,%sr0\n", f);
10063 fputs ("\tbe 0(%sr0,%r21)\n", f);
10064 fputs ("\tldw 40(%r22),%r29\n", f);
10065 }
10066 fputs ("\t.word 0\n", f);
10067 fputs ("\t.word 0\n", f);
10068 fputs ("\t.word 0\n", f);
10069 fputs ("\t.word 0\n", f);
10070 }
10071 else
10072 {
10073 fputs ("\t.dword 0\n", f);
10074 fputs ("\t.dword 0\n", f);
10075 fputs ("\t.dword 0\n", f);
10076 fputs ("\t.dword 0\n", f);
10077 fputs ("\tmfia %r31\n", f);
10078 fputs ("\tldd 24(%r31),%r1\n", f);
10079 fputs ("\tldd 24(%r1),%r27\n", f);
10080 fputs ("\tldd 16(%r1),%r1\n", f);
10081 fputs ("\tbve (%r1)\n", f);
10082 fputs ("\tldd 32(%r31),%r31\n", f);
10083 fputs ("\t.dword 0 ; fptr\n", f);
10084 fputs ("\t.dword 0 ; static link\n", f);
10085 }
10086 }
10087
10088 /* Emit RTL insns to initialize the variable parts of a trampoline.
10089 FNADDR is an RTX for the address of the function's pure code.
10090 CXT is an RTX for the static chain value for the function.
10091
10092 Move the function address to the trampoline template at offset 36.
10093 Move the static chain value to trampoline template at offset 40.
10094 Move the trampoline address to trampoline template at offset 44.
10095 Move r19 to trampoline template at offset 48. The latter two
10096 words create a plabel for the indirect call to the trampoline.
10097
10098 A similar sequence is used for the 64-bit port but the plabel is
10099 at the beginning of the trampoline.
10100
10101 Finally, the cache entries for the trampoline code are flushed.
10102 This is necessary to ensure that the trampoline instruction sequence
10103 is written to memory prior to any attempts at prefetching the code
10104 sequence. */
10105
10106 static void
10107 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10108 {
10109 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10110 rtx start_addr = gen_reg_rtx (Pmode);
10111 rtx end_addr = gen_reg_rtx (Pmode);
10112 rtx line_length = gen_reg_rtx (Pmode);
10113 rtx r_tramp, tmp;
10114
10115 emit_block_move (m_tramp, assemble_trampoline_template (),
10116 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10117 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10118
10119 if (!TARGET_64BIT)
10120 {
10121 tmp = adjust_address (m_tramp, Pmode, 36);
10122 emit_move_insn (tmp, fnaddr);
10123 tmp = adjust_address (m_tramp, Pmode, 40);
10124 emit_move_insn (tmp, chain_value);
10125
10126 /* Create a fat pointer for the trampoline. */
10127 tmp = adjust_address (m_tramp, Pmode, 44);
10128 emit_move_insn (tmp, r_tramp);
10129 tmp = adjust_address (m_tramp, Pmode, 48);
10130 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10131
10132 /* fdc and fic only use registers for the address to flush,
10133 they do not accept integer displacements. We align the
10134 start and end addresses to the beginning of their respective
10135 cache lines to minimize the number of lines flushed. */
10136 emit_insn (gen_andsi3 (start_addr, r_tramp,
10137 GEN_INT (-MIN_CACHELINE_SIZE)));
10138 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10139 TRAMPOLINE_CODE_SIZE-1));
10140 emit_insn (gen_andsi3 (end_addr, tmp,
10141 GEN_INT (-MIN_CACHELINE_SIZE)));
10142 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10143 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10144 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10145 gen_reg_rtx (Pmode),
10146 gen_reg_rtx (Pmode)));
10147 }
10148 else
10149 {
10150 tmp = adjust_address (m_tramp, Pmode, 56);
10151 emit_move_insn (tmp, fnaddr);
10152 tmp = adjust_address (m_tramp, Pmode, 64);
10153 emit_move_insn (tmp, chain_value);
10154
10155 /* Create a fat pointer for the trampoline. */
10156 tmp = adjust_address (m_tramp, Pmode, 16);
10157 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10158 r_tramp, 32)));
10159 tmp = adjust_address (m_tramp, Pmode, 24);
10160 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10161
10162 /* fdc and fic only use registers for the address to flush,
10163 they do not accept integer displacements. We align the
10164 start and end addresses to the beginning of their respective
10165 cache lines to minimize the number of lines flushed. */
10166 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10167 emit_insn (gen_anddi3 (start_addr, tmp,
10168 GEN_INT (-MIN_CACHELINE_SIZE)));
10169 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10170 TRAMPOLINE_CODE_SIZE - 1));
10171 emit_insn (gen_anddi3 (end_addr, tmp,
10172 GEN_INT (-MIN_CACHELINE_SIZE)));
10173 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10174 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10175 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10176 gen_reg_rtx (Pmode),
10177 gen_reg_rtx (Pmode)));
10178 }
10179 }
10180
10181 /* Perform any machine-specific adjustment in the address of the trampoline.
10182 ADDR contains the address that was passed to pa_trampoline_init.
10183 Adjust the trampoline address to point to the plabel at offset 44. */
10184
10185 static rtx
10186 pa_trampoline_adjust_address (rtx addr)
10187 {
10188 if (!TARGET_64BIT)
10189 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10190 return addr;
10191 }
10192
10193 static rtx
10194 pa_delegitimize_address (rtx orig_x)
10195 {
10196 rtx x = delegitimize_mem_from_attrs (orig_x);
10197
10198 if (GET_CODE (x) == LO_SUM
10199 && GET_CODE (XEXP (x, 1)) == UNSPEC
10200 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10201 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10202 return x;
10203 }
10204 \f
10205 static rtx
10206 pa_internal_arg_pointer (void)
10207 {
10208 /* The argument pointer and the hard frame pointer are the same in
10209 the 32-bit runtime, so we don't need a copy. */
10210 if (TARGET_64BIT)
10211 return copy_to_reg (virtual_incoming_args_rtx);
10212 else
10213 return virtual_incoming_args_rtx;
10214 }
10215
10216 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10217 Frame pointer elimination is automatically handled. */
10218
10219 static bool
10220 pa_can_eliminate (const int from, const int to)
10221 {
10222 /* The argument cannot be eliminated in the 64-bit runtime. */
10223 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10224 return false;
10225
10226 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10227 ? ! frame_pointer_needed
10228 : true);
10229 }
10230
10231 /* Define the offset between two registers, FROM to be eliminated and its
10232 replacement TO, at the start of a routine. */
10233 HOST_WIDE_INT
10234 pa_initial_elimination_offset (int from, int to)
10235 {
10236 HOST_WIDE_INT offset;
10237
10238 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10239 && to == STACK_POINTER_REGNUM)
10240 offset = -pa_compute_frame_size (get_frame_size (), 0);
10241 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10242 offset = 0;
10243 else
10244 gcc_unreachable ();
10245
10246 return offset;
10247 }
10248
10249 static void
10250 pa_conditional_register_usage (void)
10251 {
10252 int i;
10253
10254 if (!TARGET_64BIT && !TARGET_PA_11)
10255 {
10256 for (i = 56; i <= FP_REG_LAST; i++)
10257 fixed_regs[i] = call_used_regs[i] = 1;
10258 for (i = 33; i < 56; i += 2)
10259 fixed_regs[i] = call_used_regs[i] = 1;
10260 }
10261 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10262 {
10263 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10264 fixed_regs[i] = call_used_regs[i] = 1;
10265 }
10266 if (flag_pic)
10267 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10268 }
10269
10270 /* Target hook for c_mode_for_suffix. */
10271
10272 static enum machine_mode
10273 pa_c_mode_for_suffix (char suffix)
10274 {
10275 if (HPUX_LONG_DOUBLE_LIBRARY)
10276 {
10277 if (suffix == 'q')
10278 return TFmode;
10279 }
10280
10281 return VOIDmode;
10282 }
10283
10284 /* Target hook for function_section. */
10285
10286 static section *
10287 pa_function_section (tree decl, enum node_frequency freq,
10288 bool startup, bool exit)
10289 {
10290 /* Put functions in text section if target doesn't have named sections. */
10291 if (!targetm_common.have_named_sections)
10292 return text_section;
10293
10294 /* Force nested functions into the same section as the containing
10295 function. */
10296 if (decl
10297 && DECL_SECTION_NAME (decl) == NULL_TREE
10298 && DECL_CONTEXT (decl) != NULL_TREE
10299 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10300 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL_TREE)
10301 return function_section (DECL_CONTEXT (decl));
10302
10303 /* Otherwise, use the default function section. */
10304 return default_function_section (decl, freq, startup, exit);
10305 }
10306
10307 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10308
10309 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10310 that need more than three instructions to load prior to reload. This
10311 limit is somewhat arbitrary. It takes three instructions to load a
10312 CONST_INT from memory but two are memory accesses. It may be better
10313 to increase the allowed range for CONST_INTS. We may also be able
10314 to handle CONST_DOUBLES. */
10315
10316 static bool
10317 pa_legitimate_constant_p (enum machine_mode mode, rtx x)
10318 {
10319 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10320 return false;
10321
10322 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10323 return false;
10324
10325 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10326 legitimate constants. */
10327 if (PA_SYMBOL_REF_TLS_P (x))
10328 {
10329 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
10330
10331 if (model == TLS_MODEL_GLOBAL_DYNAMIC || model == TLS_MODEL_LOCAL_DYNAMIC)
10332 return false;
10333 }
10334
10335 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10336 return false;
10337
10338 if (TARGET_64BIT
10339 && HOST_BITS_PER_WIDE_INT > 32
10340 && GET_CODE (x) == CONST_INT
10341 && !reload_in_progress
10342 && !reload_completed
10343 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10344 && !pa_cint_ok_for_move (INTVAL (x)))
10345 return false;
10346
10347 if (function_label_operand (x, mode))
10348 return false;
10349
10350 return true;
10351 }
10352
10353 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10354
10355 static unsigned int
10356 pa_section_type_flags (tree decl, const char *name, int reloc)
10357 {
10358 unsigned int flags;
10359
10360 flags = default_section_type_flags (decl, name, reloc);
10361
10362 /* Function labels are placed in the constant pool. This can
10363 cause a section conflict if decls are put in ".data.rel.ro"
10364 or ".data.rel.ro.local" using the __attribute__ construct. */
10365 if (strcmp (name, ".data.rel.ro") == 0
10366 || strcmp (name, ".data.rel.ro.local") == 0)
10367 flags |= SECTION_WRITE | SECTION_RELRO;
10368
10369 return flags;
10370 }
10371
10372 #include "gt-pa.h"