]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/pa/pa.c
re PR target/68729 (../Xbae/Methods.c:1772:1: ICE: in extract_insn, at recog.c:2343)
[thirdparty/gcc.git] / gcc / config / pa / pa.c
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2015 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "df.h"
29 #include "tm_p.h"
30 #include "stringpool.h"
31 #include "optabs.h"
32 #include "regs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36 #include "insn-attr.h"
37 #include "alias.h"
38 #include "fold-const.h"
39 #include "stor-layout.h"
40 #include "varasm.h"
41 #include "calls.h"
42 #include "output.h"
43 #include "except.h"
44 #include "explow.h"
45 #include "expr.h"
46 #include "reload.h"
47 #include "common/common-target.h"
48 #include "langhooks.h"
49 #include "cfgrtl.h"
50 #include "opts.h"
51 #include "builtins.h"
52
53 /* This file should be included last. */
54 #include "target-def.h"
55
56 /* Return nonzero if there is a bypass for the output of
57 OUT_INSN and the fp store IN_INSN. */
58 int
59 pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
60 {
61 machine_mode store_mode;
62 machine_mode other_mode;
63 rtx set;
64
65 if (recog_memoized (in_insn) < 0
66 || (get_attr_type (in_insn) != TYPE_FPSTORE
67 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
68 || recog_memoized (out_insn) < 0)
69 return 0;
70
71 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
72
73 set = single_set (out_insn);
74 if (!set)
75 return 0;
76
77 other_mode = GET_MODE (SET_SRC (set));
78
79 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
80 }
81
82
83 #ifndef DO_FRAME_NOTES
84 #ifdef INCOMING_RETURN_ADDR_RTX
85 #define DO_FRAME_NOTES 1
86 #else
87 #define DO_FRAME_NOTES 0
88 #endif
89 #endif
90
91 static void pa_option_override (void);
92 static void copy_reg_pointer (rtx, rtx);
93 static void fix_range (const char *);
94 static int hppa_register_move_cost (machine_mode mode, reg_class_t,
95 reg_class_t);
96 static int hppa_address_cost (rtx, machine_mode mode, addr_space_t, bool);
97 static bool hppa_rtx_costs (rtx, machine_mode, int, int, int *, bool);
98 static inline rtx force_mode (machine_mode, rtx);
99 static void pa_reorg (void);
100 static void pa_combine_instructions (void);
101 static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
102 rtx, rtx);
103 static bool forward_branch_p (rtx_insn *);
104 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
105 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
106 static int compute_movmem_length (rtx_insn *);
107 static int compute_clrmem_length (rtx_insn *);
108 static bool pa_assemble_integer (rtx, unsigned int, int);
109 static void remove_useless_addtr_insns (int);
110 static void store_reg (int, HOST_WIDE_INT, int);
111 static void store_reg_modify (int, int, HOST_WIDE_INT);
112 static void load_reg (int, HOST_WIDE_INT, int);
113 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
114 static rtx pa_function_value (const_tree, const_tree, bool);
115 static rtx pa_libcall_value (machine_mode, const_rtx);
116 static bool pa_function_value_regno_p (const unsigned int);
117 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static void update_total_code_bytes (unsigned int);
119 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
120 static int pa_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
121 static int pa_adjust_priority (rtx_insn *, int);
122 static int pa_issue_rate (void);
123 static int pa_reloc_rw_mask (void);
124 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
125 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
126 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
127 ATTRIBUTE_UNUSED;
128 static void pa_encode_section_info (tree, rtx, int);
129 static const char *pa_strip_name_encoding (const char *);
130 static bool pa_function_ok_for_sibcall (tree, tree);
131 static void pa_globalize_label (FILE *, const char *)
132 ATTRIBUTE_UNUSED;
133 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
134 HOST_WIDE_INT, tree);
135 #if !defined(USE_COLLECT2)
136 static void pa_asm_out_constructor (rtx, int);
137 static void pa_asm_out_destructor (rtx, int);
138 #endif
139 static void pa_init_builtins (void);
140 static rtx pa_expand_builtin (tree, rtx, rtx, machine_mode mode, int);
141 static rtx hppa_builtin_saveregs (void);
142 static void hppa_va_start (tree, rtx);
143 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
144 static bool pa_scalar_mode_supported_p (machine_mode);
145 static bool pa_commutative_p (const_rtx x, int outer_code);
146 static void copy_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
147 static int length_fp_args (rtx_insn *) ATTRIBUTE_UNUSED;
148 static rtx hppa_legitimize_address (rtx, rtx, machine_mode);
149 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
150 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
151 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
152 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
153 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
154 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
155 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
156 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
157 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
158 static void output_deferred_plabels (void);
159 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
160 #ifdef ASM_OUTPUT_EXTERNAL_REAL
161 static void pa_hpux_file_end (void);
162 #endif
163 static void pa_init_libfuncs (void);
164 static rtx pa_struct_value_rtx (tree, int);
165 static bool pa_pass_by_reference (cumulative_args_t, machine_mode,
166 const_tree, bool);
167 static int pa_arg_partial_bytes (cumulative_args_t, machine_mode,
168 tree, bool);
169 static void pa_function_arg_advance (cumulative_args_t, machine_mode,
170 const_tree, bool);
171 static rtx pa_function_arg (cumulative_args_t, machine_mode,
172 const_tree, bool);
173 static unsigned int pa_function_arg_boundary (machine_mode, const_tree);
174 static struct machine_function * pa_init_machine_status (void);
175 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
176 machine_mode,
177 secondary_reload_info *);
178 static void pa_extra_live_on_entry (bitmap);
179 static machine_mode pa_promote_function_mode (const_tree,
180 machine_mode, int *,
181 const_tree, int);
182
183 static void pa_asm_trampoline_template (FILE *);
184 static void pa_trampoline_init (rtx, tree, rtx);
185 static rtx pa_trampoline_adjust_address (rtx);
186 static rtx pa_delegitimize_address (rtx);
187 static bool pa_print_operand_punct_valid_p (unsigned char);
188 static rtx pa_internal_arg_pointer (void);
189 static bool pa_can_eliminate (const int, const int);
190 static void pa_conditional_register_usage (void);
191 static machine_mode pa_c_mode_for_suffix (char);
192 static section *pa_function_section (tree, enum node_frequency, bool, bool);
193 static bool pa_cannot_force_const_mem (machine_mode, rtx);
194 static bool pa_legitimate_constant_p (machine_mode, rtx);
195 static unsigned int pa_section_type_flags (tree, const char *, int);
196 static bool pa_legitimate_address_p (machine_mode, rtx, bool);
197
198 /* The following extra sections are only used for SOM. */
199 static GTY(()) section *som_readonly_data_section;
200 static GTY(()) section *som_one_only_readonly_data_section;
201 static GTY(()) section *som_one_only_data_section;
202 static GTY(()) section *som_tm_clone_table_section;
203
204 /* Counts for the number of callee-saved general and floating point
205 registers which were saved by the current function's prologue. */
206 static int gr_saved, fr_saved;
207
208 /* Boolean indicating whether the return pointer was saved by the
209 current function's prologue. */
210 static bool rp_saved;
211
212 static rtx find_addr_reg (rtx);
213
214 /* Keep track of the number of bytes we have output in the CODE subspace
215 during this compilation so we'll know when to emit inline long-calls. */
216 unsigned long total_code_bytes;
217
218 /* The last address of the previous function plus the number of bytes in
219 associated thunks that have been output. This is used to determine if
220 a thunk can use an IA-relative branch to reach its target function. */
221 static unsigned int last_address;
222
223 /* Variables to handle plabels that we discover are necessary at assembly
224 output time. They are output after the current function. */
225 struct GTY(()) deferred_plabel
226 {
227 rtx internal_label;
228 rtx symbol;
229 };
230 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
231 deferred_plabels;
232 static size_t n_deferred_plabels = 0;
233 \f
234 /* Initialize the GCC target structure. */
235
236 #undef TARGET_OPTION_OVERRIDE
237 #define TARGET_OPTION_OVERRIDE pa_option_override
238
239 #undef TARGET_ASM_ALIGNED_HI_OP
240 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
241 #undef TARGET_ASM_ALIGNED_SI_OP
242 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
243 #undef TARGET_ASM_ALIGNED_DI_OP
244 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
245 #undef TARGET_ASM_UNALIGNED_HI_OP
246 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
247 #undef TARGET_ASM_UNALIGNED_SI_OP
248 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
249 #undef TARGET_ASM_UNALIGNED_DI_OP
250 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
251 #undef TARGET_ASM_INTEGER
252 #define TARGET_ASM_INTEGER pa_assemble_integer
253
254 #undef TARGET_ASM_FUNCTION_PROLOGUE
255 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
256 #undef TARGET_ASM_FUNCTION_EPILOGUE
257 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
258
259 #undef TARGET_FUNCTION_VALUE
260 #define TARGET_FUNCTION_VALUE pa_function_value
261 #undef TARGET_LIBCALL_VALUE
262 #define TARGET_LIBCALL_VALUE pa_libcall_value
263 #undef TARGET_FUNCTION_VALUE_REGNO_P
264 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
265
266 #undef TARGET_LEGITIMIZE_ADDRESS
267 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
268
269 #undef TARGET_SCHED_ADJUST_COST
270 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
271 #undef TARGET_SCHED_ADJUST_PRIORITY
272 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
273 #undef TARGET_SCHED_ISSUE_RATE
274 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
275
276 #undef TARGET_ENCODE_SECTION_INFO
277 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
278 #undef TARGET_STRIP_NAME_ENCODING
279 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
280
281 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
282 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
283
284 #undef TARGET_COMMUTATIVE_P
285 #define TARGET_COMMUTATIVE_P pa_commutative_p
286
287 #undef TARGET_ASM_OUTPUT_MI_THUNK
288 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
289 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
290 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
291
292 #undef TARGET_ASM_FILE_END
293 #ifdef ASM_OUTPUT_EXTERNAL_REAL
294 #define TARGET_ASM_FILE_END pa_hpux_file_end
295 #else
296 #define TARGET_ASM_FILE_END output_deferred_plabels
297 #endif
298
299 #undef TARGET_ASM_RELOC_RW_MASK
300 #define TARGET_ASM_RELOC_RW_MASK pa_reloc_rw_mask
301
302 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
303 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
304
305 #if !defined(USE_COLLECT2)
306 #undef TARGET_ASM_CONSTRUCTOR
307 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
308 #undef TARGET_ASM_DESTRUCTOR
309 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
310 #endif
311
312 #undef TARGET_INIT_BUILTINS
313 #define TARGET_INIT_BUILTINS pa_init_builtins
314
315 #undef TARGET_EXPAND_BUILTIN
316 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
317
318 #undef TARGET_REGISTER_MOVE_COST
319 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
320 #undef TARGET_RTX_COSTS
321 #define TARGET_RTX_COSTS hppa_rtx_costs
322 #undef TARGET_ADDRESS_COST
323 #define TARGET_ADDRESS_COST hppa_address_cost
324
325 #undef TARGET_MACHINE_DEPENDENT_REORG
326 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
327
328 #undef TARGET_INIT_LIBFUNCS
329 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
330
331 #undef TARGET_PROMOTE_FUNCTION_MODE
332 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
333 #undef TARGET_PROMOTE_PROTOTYPES
334 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
335
336 #undef TARGET_STRUCT_VALUE_RTX
337 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
338 #undef TARGET_RETURN_IN_MEMORY
339 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
340 #undef TARGET_MUST_PASS_IN_STACK
341 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
342 #undef TARGET_PASS_BY_REFERENCE
343 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
344 #undef TARGET_CALLEE_COPIES
345 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
346 #undef TARGET_ARG_PARTIAL_BYTES
347 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
348 #undef TARGET_FUNCTION_ARG
349 #define TARGET_FUNCTION_ARG pa_function_arg
350 #undef TARGET_FUNCTION_ARG_ADVANCE
351 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
352 #undef TARGET_FUNCTION_ARG_BOUNDARY
353 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
354
355 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
356 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
357 #undef TARGET_EXPAND_BUILTIN_VA_START
358 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
359 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
360 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
361
362 #undef TARGET_SCALAR_MODE_SUPPORTED_P
363 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
364
365 #undef TARGET_CANNOT_FORCE_CONST_MEM
366 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
367
368 #undef TARGET_SECONDARY_RELOAD
369 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
370
371 #undef TARGET_EXTRA_LIVE_ON_ENTRY
372 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
373
374 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
375 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
376 #undef TARGET_TRAMPOLINE_INIT
377 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
378 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
379 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
380 #undef TARGET_DELEGITIMIZE_ADDRESS
381 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
382 #undef TARGET_INTERNAL_ARG_POINTER
383 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
384 #undef TARGET_CAN_ELIMINATE
385 #define TARGET_CAN_ELIMINATE pa_can_eliminate
386 #undef TARGET_CONDITIONAL_REGISTER_USAGE
387 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
388 #undef TARGET_C_MODE_FOR_SUFFIX
389 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
390 #undef TARGET_ASM_FUNCTION_SECTION
391 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
392
393 #undef TARGET_LEGITIMATE_CONSTANT_P
394 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
395 #undef TARGET_SECTION_TYPE_FLAGS
396 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
397 #undef TARGET_LEGITIMATE_ADDRESS_P
398 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
399
400 struct gcc_target targetm = TARGET_INITIALIZER;
401 \f
402 /* Parse the -mfixed-range= option string. */
403
404 static void
405 fix_range (const char *const_str)
406 {
407 int i, first, last;
408 char *str, *dash, *comma;
409
410 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
411 REG2 are either register names or register numbers. The effect
412 of this option is to mark the registers in the range from REG1 to
413 REG2 as ``fixed'' so they won't be used by the compiler. This is
414 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
415
416 i = strlen (const_str);
417 str = (char *) alloca (i + 1);
418 memcpy (str, const_str, i + 1);
419
420 while (1)
421 {
422 dash = strchr (str, '-');
423 if (!dash)
424 {
425 warning (0, "value of -mfixed-range must have form REG1-REG2");
426 return;
427 }
428 *dash = '\0';
429
430 comma = strchr (dash + 1, ',');
431 if (comma)
432 *comma = '\0';
433
434 first = decode_reg_name (str);
435 if (first < 0)
436 {
437 warning (0, "unknown register name: %s", str);
438 return;
439 }
440
441 last = decode_reg_name (dash + 1);
442 if (last < 0)
443 {
444 warning (0, "unknown register name: %s", dash + 1);
445 return;
446 }
447
448 *dash = '-';
449
450 if (first > last)
451 {
452 warning (0, "%s-%s is an empty range", str, dash + 1);
453 return;
454 }
455
456 for (i = first; i <= last; ++i)
457 fixed_regs[i] = call_used_regs[i] = 1;
458
459 if (!comma)
460 break;
461
462 *comma = ',';
463 str = comma + 1;
464 }
465
466 /* Check if all floating point registers have been fixed. */
467 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
468 if (!fixed_regs[i])
469 break;
470
471 if (i > FP_REG_LAST)
472 target_flags |= MASK_DISABLE_FPREGS;
473 }
474
475 /* Implement the TARGET_OPTION_OVERRIDE hook. */
476
477 static void
478 pa_option_override (void)
479 {
480 unsigned int i;
481 cl_deferred_option *opt;
482 vec<cl_deferred_option> *v
483 = (vec<cl_deferred_option> *) pa_deferred_options;
484
485 if (v)
486 FOR_EACH_VEC_ELT (*v, i, opt)
487 {
488 switch (opt->opt_index)
489 {
490 case OPT_mfixed_range_:
491 fix_range (opt->arg);
492 break;
493
494 default:
495 gcc_unreachable ();
496 }
497 }
498
499 if (flag_pic && TARGET_PORTABLE_RUNTIME)
500 {
501 warning (0, "PIC code generation is not supported in the portable runtime model");
502 }
503
504 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
505 {
506 warning (0, "PIC code generation is not compatible with fast indirect calls");
507 }
508
509 if (! TARGET_GAS && write_symbols != NO_DEBUG)
510 {
511 warning (0, "-g is only supported when using GAS on this processor,");
512 warning (0, "-g option disabled");
513 write_symbols = NO_DEBUG;
514 }
515
516 /* We only support the "big PIC" model now. And we always generate PIC
517 code when in 64bit mode. */
518 if (flag_pic == 1 || TARGET_64BIT)
519 flag_pic = 2;
520
521 /* Disable -freorder-blocks-and-partition as we don't support hot and
522 cold partitioning. */
523 if (flag_reorder_blocks_and_partition)
524 {
525 inform (input_location,
526 "-freorder-blocks-and-partition does not work "
527 "on this architecture");
528 flag_reorder_blocks_and_partition = 0;
529 flag_reorder_blocks = 1;
530 }
531
532 /* We can't guarantee that .dword is available for 32-bit targets. */
533 if (UNITS_PER_WORD == 4)
534 targetm.asm_out.aligned_op.di = NULL;
535
536 /* The unaligned ops are only available when using GAS. */
537 if (!TARGET_GAS)
538 {
539 targetm.asm_out.unaligned_op.hi = NULL;
540 targetm.asm_out.unaligned_op.si = NULL;
541 targetm.asm_out.unaligned_op.di = NULL;
542 }
543
544 init_machine_status = pa_init_machine_status;
545 }
546
547 enum pa_builtins
548 {
549 PA_BUILTIN_COPYSIGNQ,
550 PA_BUILTIN_FABSQ,
551 PA_BUILTIN_INFQ,
552 PA_BUILTIN_HUGE_VALQ,
553 PA_BUILTIN_max
554 };
555
556 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
557
558 static void
559 pa_init_builtins (void)
560 {
561 #ifdef DONT_HAVE_FPUTC_UNLOCKED
562 {
563 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
564 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
565 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
566 }
567 #endif
568 #if TARGET_HPUX_11
569 {
570 tree decl;
571
572 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
573 set_user_assembler_name (decl, "_Isfinite");
574 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
575 set_user_assembler_name (decl, "_Isfinitef");
576 }
577 #endif
578
579 if (HPUX_LONG_DOUBLE_LIBRARY)
580 {
581 tree decl, ftype;
582
583 /* Under HPUX, the __float128 type is a synonym for "long double". */
584 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
585 "__float128");
586
587 /* TFmode support builtins. */
588 ftype = build_function_type_list (long_double_type_node,
589 long_double_type_node,
590 NULL_TREE);
591 decl = add_builtin_function ("__builtin_fabsq", ftype,
592 PA_BUILTIN_FABSQ, BUILT_IN_MD,
593 "_U_Qfabs", NULL_TREE);
594 TREE_READONLY (decl) = 1;
595 pa_builtins[PA_BUILTIN_FABSQ] = decl;
596
597 ftype = build_function_type_list (long_double_type_node,
598 long_double_type_node,
599 long_double_type_node,
600 NULL_TREE);
601 decl = add_builtin_function ("__builtin_copysignq", ftype,
602 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
603 "_U_Qfcopysign", NULL_TREE);
604 TREE_READONLY (decl) = 1;
605 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
606
607 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
608 decl = add_builtin_function ("__builtin_infq", ftype,
609 PA_BUILTIN_INFQ, BUILT_IN_MD,
610 NULL, NULL_TREE);
611 pa_builtins[PA_BUILTIN_INFQ] = decl;
612
613 decl = add_builtin_function ("__builtin_huge_valq", ftype,
614 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
615 NULL, NULL_TREE);
616 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
617 }
618 }
619
620 static rtx
621 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
622 machine_mode mode ATTRIBUTE_UNUSED,
623 int ignore ATTRIBUTE_UNUSED)
624 {
625 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
626 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
627
628 switch (fcode)
629 {
630 case PA_BUILTIN_FABSQ:
631 case PA_BUILTIN_COPYSIGNQ:
632 return expand_call (exp, target, ignore);
633
634 case PA_BUILTIN_INFQ:
635 case PA_BUILTIN_HUGE_VALQ:
636 {
637 machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
638 REAL_VALUE_TYPE inf;
639 rtx tmp;
640
641 real_inf (&inf);
642 tmp = const_double_from_real_value (inf, target_mode);
643
644 tmp = validize_mem (force_const_mem (target_mode, tmp));
645
646 if (target == 0)
647 target = gen_reg_rtx (target_mode);
648
649 emit_move_insn (target, tmp);
650 return target;
651 }
652
653 default:
654 gcc_unreachable ();
655 }
656
657 return NULL_RTX;
658 }
659
660 /* Function to init struct machine_function.
661 This will be called, via a pointer variable,
662 from push_function_context. */
663
664 static struct machine_function *
665 pa_init_machine_status (void)
666 {
667 return ggc_cleared_alloc<machine_function> ();
668 }
669
670 /* If FROM is a probable pointer register, mark TO as a probable
671 pointer register with the same pointer alignment as FROM. */
672
673 static void
674 copy_reg_pointer (rtx to, rtx from)
675 {
676 if (REG_POINTER (from))
677 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
678 }
679
680 /* Return 1 if X contains a symbolic expression. We know these
681 expressions will have one of a few well defined forms, so
682 we need only check those forms. */
683 int
684 pa_symbolic_expression_p (rtx x)
685 {
686
687 /* Strip off any HIGH. */
688 if (GET_CODE (x) == HIGH)
689 x = XEXP (x, 0);
690
691 return symbolic_operand (x, VOIDmode);
692 }
693
694 /* Accept any constant that can be moved in one instruction into a
695 general register. */
696 int
697 pa_cint_ok_for_move (unsigned HOST_WIDE_INT ival)
698 {
699 /* OK if ldo, ldil, or zdepi, can be used. */
700 return (VAL_14_BITS_P (ival)
701 || pa_ldil_cint_p (ival)
702 || pa_zdepi_cint_p (ival));
703 }
704 \f
705 /* True iff ldil can be used to load this CONST_INT. The least
706 significant 11 bits of the value must be zero and the value must
707 not change sign when extended from 32 to 64 bits. */
708 int
709 pa_ldil_cint_p (unsigned HOST_WIDE_INT ival)
710 {
711 unsigned HOST_WIDE_INT x;
712
713 x = ival & (((unsigned HOST_WIDE_INT) -1 << 31) | 0x7ff);
714 return x == 0 || x == ((unsigned HOST_WIDE_INT) -1 << 31);
715 }
716
717 /* True iff zdepi can be used to generate this CONST_INT.
718 zdepi first sign extends a 5-bit signed number to a given field
719 length, then places this field anywhere in a zero. */
720 int
721 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
722 {
723 unsigned HOST_WIDE_INT lsb_mask, t;
724
725 /* This might not be obvious, but it's at least fast.
726 This function is critical; we don't have the time loops would take. */
727 lsb_mask = x & -x;
728 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
729 /* Return true iff t is a power of two. */
730 return ((t & (t - 1)) == 0);
731 }
732
733 /* True iff depi or extru can be used to compute (reg & mask).
734 Accept bit pattern like these:
735 0....01....1
736 1....10....0
737 1..10..01..1 */
738 int
739 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
740 {
741 mask = ~mask;
742 mask += mask & -mask;
743 return (mask & (mask - 1)) == 0;
744 }
745
746 /* True iff depi can be used to compute (reg | MASK). */
747 int
748 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
749 {
750 mask += mask & -mask;
751 return (mask & (mask - 1)) == 0;
752 }
753 \f
754 /* Legitimize PIC addresses. If the address is already
755 position-independent, we return ORIG. Newly generated
756 position-independent addresses go to REG. If we need more
757 than one register, we lose. */
758
759 static rtx
760 legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
761 {
762 rtx pic_ref = orig;
763
764 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
765
766 /* Labels need special handling. */
767 if (pic_label_operand (orig, mode))
768 {
769 rtx_insn *insn;
770
771 /* We do not want to go through the movXX expanders here since that
772 would create recursion.
773
774 Nor do we really want to call a generator for a named pattern
775 since that requires multiple patterns if we want to support
776 multiple word sizes.
777
778 So instead we just emit the raw set, which avoids the movXX
779 expanders completely. */
780 mark_reg_pointer (reg, BITS_PER_UNIT);
781 insn = emit_insn (gen_rtx_SET (reg, orig));
782
783 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
784 add_reg_note (insn, REG_EQUAL, orig);
785
786 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
787 and update LABEL_NUSES because this is not done automatically. */
788 if (reload_in_progress || reload_completed)
789 {
790 /* Extract LABEL_REF. */
791 if (GET_CODE (orig) == CONST)
792 orig = XEXP (XEXP (orig, 0), 0);
793 /* Extract CODE_LABEL. */
794 orig = XEXP (orig, 0);
795 add_reg_note (insn, REG_LABEL_OPERAND, orig);
796 /* Make sure we have label and not a note. */
797 if (LABEL_P (orig))
798 LABEL_NUSES (orig)++;
799 }
800 crtl->uses_pic_offset_table = 1;
801 return reg;
802 }
803 if (GET_CODE (orig) == SYMBOL_REF)
804 {
805 rtx_insn *insn;
806 rtx tmp_reg;
807
808 gcc_assert (reg);
809
810 /* Before reload, allocate a temporary register for the intermediate
811 result. This allows the sequence to be deleted when the final
812 result is unused and the insns are trivially dead. */
813 tmp_reg = ((reload_in_progress || reload_completed)
814 ? reg : gen_reg_rtx (Pmode));
815
816 if (function_label_operand (orig, VOIDmode))
817 {
818 /* Force function label into memory in word mode. */
819 orig = XEXP (force_const_mem (word_mode, orig), 0);
820 /* Load plabel address from DLT. */
821 emit_move_insn (tmp_reg,
822 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
823 gen_rtx_HIGH (word_mode, orig)));
824 pic_ref
825 = gen_const_mem (Pmode,
826 gen_rtx_LO_SUM (Pmode, tmp_reg,
827 gen_rtx_UNSPEC (Pmode,
828 gen_rtvec (1, orig),
829 UNSPEC_DLTIND14R)));
830 emit_move_insn (reg, pic_ref);
831 /* Now load address of function descriptor. */
832 pic_ref = gen_rtx_MEM (Pmode, reg);
833 }
834 else
835 {
836 /* Load symbol reference from DLT. */
837 emit_move_insn (tmp_reg,
838 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
839 gen_rtx_HIGH (word_mode, orig)));
840 pic_ref
841 = gen_const_mem (Pmode,
842 gen_rtx_LO_SUM (Pmode, tmp_reg,
843 gen_rtx_UNSPEC (Pmode,
844 gen_rtvec (1, orig),
845 UNSPEC_DLTIND14R)));
846 }
847
848 crtl->uses_pic_offset_table = 1;
849 mark_reg_pointer (reg, BITS_PER_UNIT);
850 insn = emit_move_insn (reg, pic_ref);
851
852 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
853 set_unique_reg_note (insn, REG_EQUAL, orig);
854
855 return reg;
856 }
857 else if (GET_CODE (orig) == CONST)
858 {
859 rtx base;
860
861 if (GET_CODE (XEXP (orig, 0)) == PLUS
862 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
863 return orig;
864
865 gcc_assert (reg);
866 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
867
868 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
869 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
870 base == reg ? 0 : reg);
871
872 if (GET_CODE (orig) == CONST_INT)
873 {
874 if (INT_14_BITS (orig))
875 return plus_constant (Pmode, base, INTVAL (orig));
876 orig = force_reg (Pmode, orig);
877 }
878 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
879 /* Likewise, should we set special REG_NOTEs here? */
880 }
881
882 return pic_ref;
883 }
884
885 static GTY(()) rtx gen_tls_tga;
886
887 static rtx
888 gen_tls_get_addr (void)
889 {
890 if (!gen_tls_tga)
891 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
892 return gen_tls_tga;
893 }
894
895 static rtx
896 hppa_tls_call (rtx arg)
897 {
898 rtx ret;
899
900 ret = gen_reg_rtx (Pmode);
901 emit_library_call_value (gen_tls_get_addr (), ret,
902 LCT_CONST, Pmode, 1, arg, Pmode);
903
904 return ret;
905 }
906
907 static rtx
908 legitimize_tls_address (rtx addr)
909 {
910 rtx ret, tmp, t1, t2, tp;
911 rtx_insn *insn;
912
913 /* Currently, we can't handle anything but a SYMBOL_REF. */
914 if (GET_CODE (addr) != SYMBOL_REF)
915 return addr;
916
917 switch (SYMBOL_REF_TLS_MODEL (addr))
918 {
919 case TLS_MODEL_GLOBAL_DYNAMIC:
920 tmp = gen_reg_rtx (Pmode);
921 if (flag_pic)
922 emit_insn (gen_tgd_load_pic (tmp, addr));
923 else
924 emit_insn (gen_tgd_load (tmp, addr));
925 ret = hppa_tls_call (tmp);
926 break;
927
928 case TLS_MODEL_LOCAL_DYNAMIC:
929 ret = gen_reg_rtx (Pmode);
930 tmp = gen_reg_rtx (Pmode);
931 start_sequence ();
932 if (flag_pic)
933 emit_insn (gen_tld_load_pic (tmp, addr));
934 else
935 emit_insn (gen_tld_load (tmp, addr));
936 t1 = hppa_tls_call (tmp);
937 insn = get_insns ();
938 end_sequence ();
939 t2 = gen_reg_rtx (Pmode);
940 emit_libcall_block (insn, t2, t1,
941 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
942 UNSPEC_TLSLDBASE));
943 emit_insn (gen_tld_offset_load (ret, addr, t2));
944 break;
945
946 case TLS_MODEL_INITIAL_EXEC:
947 tp = gen_reg_rtx (Pmode);
948 tmp = gen_reg_rtx (Pmode);
949 ret = gen_reg_rtx (Pmode);
950 emit_insn (gen_tp_load (tp));
951 if (flag_pic)
952 emit_insn (gen_tie_load_pic (tmp, addr));
953 else
954 emit_insn (gen_tie_load (tmp, addr));
955 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
956 break;
957
958 case TLS_MODEL_LOCAL_EXEC:
959 tp = gen_reg_rtx (Pmode);
960 ret = gen_reg_rtx (Pmode);
961 emit_insn (gen_tp_load (tp));
962 emit_insn (gen_tle_load (ret, addr, tp));
963 break;
964
965 default:
966 gcc_unreachable ();
967 }
968
969 return ret;
970 }
971
972 /* Helper for hppa_legitimize_address. Given X, return true if it
973 is a left shift by 1, 2 or 3 positions or a multiply by 2, 4 or 8.
974
975 This respectively represent canonical shift-add rtxs or scaled
976 memory addresses. */
977 static bool
978 mem_shadd_or_shadd_rtx_p (rtx x)
979 {
980 return ((GET_CODE (x) == ASHIFT
981 || GET_CODE (x) == MULT)
982 && GET_CODE (XEXP (x, 1)) == CONST_INT
983 && ((GET_CODE (x) == ASHIFT
984 && pa_shadd_constant_p (INTVAL (XEXP (x, 1))))
985 || (GET_CODE (x) == MULT
986 && pa_mem_shadd_constant_p (INTVAL (XEXP (x, 1))))));
987 }
988
989 /* Try machine-dependent ways of modifying an illegitimate address
990 to be legitimate. If we find one, return the new, valid address.
991 This macro is used in only one place: `memory_address' in explow.c.
992
993 OLDX is the address as it was before break_out_memory_refs was called.
994 In some cases it is useful to look at this to decide what needs to be done.
995
996 It is always safe for this macro to do nothing. It exists to recognize
997 opportunities to optimize the output.
998
999 For the PA, transform:
1000
1001 memory(X + <large int>)
1002
1003 into:
1004
1005 if (<large int> & mask) >= 16
1006 Y = (<large int> & ~mask) + mask + 1 Round up.
1007 else
1008 Y = (<large int> & ~mask) Round down.
1009 Z = X + Y
1010 memory (Z + (<large int> - Y));
1011
1012 This is for CSE to find several similar references, and only use one Z.
1013
1014 X can either be a SYMBOL_REF or REG, but because combine cannot
1015 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1016 D will not fit in 14 bits.
1017
1018 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1019 0x1f as the mask.
1020
1021 MODE_INT references allow displacements which fit in 14 bits, so use
1022 0x3fff as the mask.
1023
1024 This relies on the fact that most mode MODE_FLOAT references will use FP
1025 registers and most mode MODE_INT references will use integer registers.
1026 (In the rare case of an FP register used in an integer MODE, we depend
1027 on secondary reloads to clean things up.)
1028
1029
1030 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1031 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1032 addressing modes to be used).
1033
1034 Note that the addresses passed into hppa_legitimize_address always
1035 come from a MEM, so we only have to match the MULT form on incoming
1036 addresses. But to be future proof we also match the ASHIFT form.
1037
1038 However, this routine always places those shift-add sequences into
1039 registers, so we have to generate the ASHIFT form as our output.
1040
1041 Put X and Z into registers. Then put the entire expression into
1042 a register. */
1043
1044 rtx
1045 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1046 machine_mode mode)
1047 {
1048 rtx orig = x;
1049
1050 /* We need to canonicalize the order of operands in unscaled indexed
1051 addresses since the code that checks if an address is valid doesn't
1052 always try both orders. */
1053 if (!TARGET_NO_SPACE_REGS
1054 && GET_CODE (x) == PLUS
1055 && GET_MODE (x) == Pmode
1056 && REG_P (XEXP (x, 0))
1057 && REG_P (XEXP (x, 1))
1058 && REG_POINTER (XEXP (x, 0))
1059 && !REG_POINTER (XEXP (x, 1)))
1060 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1061
1062 if (tls_referenced_p (x))
1063 return legitimize_tls_address (x);
1064 else if (flag_pic)
1065 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1066
1067 /* Strip off CONST. */
1068 if (GET_CODE (x) == CONST)
1069 x = XEXP (x, 0);
1070
1071 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1072 That should always be safe. */
1073 if (GET_CODE (x) == PLUS
1074 && GET_CODE (XEXP (x, 0)) == REG
1075 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1076 {
1077 rtx reg = force_reg (Pmode, XEXP (x, 1));
1078 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1079 }
1080
1081 /* Note we must reject symbols which represent function addresses
1082 since the assembler/linker can't handle arithmetic on plabels. */
1083 if (GET_CODE (x) == PLUS
1084 && GET_CODE (XEXP (x, 1)) == CONST_INT
1085 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1086 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1087 || GET_CODE (XEXP (x, 0)) == REG))
1088 {
1089 rtx int_part, ptr_reg;
1090 int newoffset;
1091 int offset = INTVAL (XEXP (x, 1));
1092 int mask;
1093
1094 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1095 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1096
1097 /* Choose which way to round the offset. Round up if we
1098 are >= halfway to the next boundary. */
1099 if ((offset & mask) >= ((mask + 1) / 2))
1100 newoffset = (offset & ~ mask) + mask + 1;
1101 else
1102 newoffset = (offset & ~ mask);
1103
1104 /* If the newoffset will not fit in 14 bits (ldo), then
1105 handling this would take 4 or 5 instructions (2 to load
1106 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1107 add the new offset and the SYMBOL_REF.) Combine can
1108 not handle 4->2 or 5->2 combinations, so do not create
1109 them. */
1110 if (! VAL_14_BITS_P (newoffset)
1111 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1112 {
1113 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1114 rtx tmp_reg
1115 = force_reg (Pmode,
1116 gen_rtx_HIGH (Pmode, const_part));
1117 ptr_reg
1118 = force_reg (Pmode,
1119 gen_rtx_LO_SUM (Pmode,
1120 tmp_reg, const_part));
1121 }
1122 else
1123 {
1124 if (! VAL_14_BITS_P (newoffset))
1125 int_part = force_reg (Pmode, GEN_INT (newoffset));
1126 else
1127 int_part = GEN_INT (newoffset);
1128
1129 ptr_reg = force_reg (Pmode,
1130 gen_rtx_PLUS (Pmode,
1131 force_reg (Pmode, XEXP (x, 0)),
1132 int_part));
1133 }
1134 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1135 }
1136
1137 /* Handle (plus (mult (a) (mem_shadd_constant)) (b)). */
1138
1139 if (GET_CODE (x) == PLUS
1140 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1141 && (OBJECT_P (XEXP (x, 1))
1142 || GET_CODE (XEXP (x, 1)) == SUBREG)
1143 && GET_CODE (XEXP (x, 1)) != CONST)
1144 {
1145 /* If we were given a MULT, we must fix the constant
1146 as we're going to create the ASHIFT form. */
1147 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1148 if (GET_CODE (XEXP (x, 0)) == MULT)
1149 shift_val = exact_log2 (shift_val);
1150
1151 rtx reg1, reg2;
1152 reg1 = XEXP (x, 1);
1153 if (GET_CODE (reg1) != REG)
1154 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1155
1156 reg2 = XEXP (XEXP (x, 0), 0);
1157 if (GET_CODE (reg2) != REG)
1158 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1159
1160 return force_reg (Pmode,
1161 gen_rtx_PLUS (Pmode,
1162 gen_rtx_ASHIFT (Pmode, reg2,
1163 GEN_INT (shift_val)),
1164 reg1));
1165 }
1166
1167 /* Similarly for (plus (plus (mult (a) (mem_shadd_constant)) (b)) (c)).
1168
1169 Only do so for floating point modes since this is more speculative
1170 and we lose if it's an integer store. */
1171 if (GET_CODE (x) == PLUS
1172 && GET_CODE (XEXP (x, 0)) == PLUS
1173 && mem_shadd_or_shadd_rtx_p (XEXP (XEXP (x, 0), 0))
1174 && (mode == SFmode || mode == DFmode))
1175 {
1176 int shift_val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
1177
1178 /* If we were given a MULT, we must fix the constant
1179 as we're going to create the ASHIFT form. */
1180 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
1181 shift_val = exact_log2 (shift_val);
1182
1183 /* Try and figure out what to use as a base register. */
1184 rtx reg1, reg2, base, idx;
1185
1186 reg1 = XEXP (XEXP (x, 0), 1);
1187 reg2 = XEXP (x, 1);
1188 base = NULL_RTX;
1189 idx = NULL_RTX;
1190
1191 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1192 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1193 it's a base register below. */
1194 if (GET_CODE (reg1) != REG)
1195 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1196
1197 if (GET_CODE (reg2) != REG)
1198 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1199
1200 /* Figure out what the base and index are. */
1201
1202 if (GET_CODE (reg1) == REG
1203 && REG_POINTER (reg1))
1204 {
1205 base = reg1;
1206 idx = gen_rtx_PLUS (Pmode,
1207 gen_rtx_ASHIFT (Pmode,
1208 XEXP (XEXP (XEXP (x, 0), 0), 0),
1209 GEN_INT (shift_val)),
1210 XEXP (x, 1));
1211 }
1212 else if (GET_CODE (reg2) == REG
1213 && REG_POINTER (reg2))
1214 {
1215 base = reg2;
1216 idx = XEXP (x, 0);
1217 }
1218
1219 if (base == 0)
1220 return orig;
1221
1222 /* If the index adds a large constant, try to scale the
1223 constant so that it can be loaded with only one insn. */
1224 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1225 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1226 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1227 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1228 {
1229 /* Divide the CONST_INT by the scale factor, then add it to A. */
1230 int val = INTVAL (XEXP (idx, 1));
1231 val /= (1 << shift_val);
1232
1233 reg1 = XEXP (XEXP (idx, 0), 0);
1234 if (GET_CODE (reg1) != REG)
1235 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1236
1237 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1238
1239 /* We can now generate a simple scaled indexed address. */
1240 return
1241 force_reg
1242 (Pmode, gen_rtx_PLUS (Pmode,
1243 gen_rtx_ASHIFT (Pmode, reg1,
1244 GEN_INT (shift_val)),
1245 base));
1246 }
1247
1248 /* If B + C is still a valid base register, then add them. */
1249 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1250 && INTVAL (XEXP (idx, 1)) <= 4096
1251 && INTVAL (XEXP (idx, 1)) >= -4096)
1252 {
1253 rtx reg1, reg2;
1254
1255 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1256
1257 reg2 = XEXP (XEXP (idx, 0), 0);
1258 if (GET_CODE (reg2) != CONST_INT)
1259 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1260
1261 return force_reg (Pmode,
1262 gen_rtx_PLUS (Pmode,
1263 gen_rtx_ASHIFT (Pmode, reg2,
1264 GEN_INT (shift_val)),
1265 reg1));
1266 }
1267
1268 /* Get the index into a register, then add the base + index and
1269 return a register holding the result. */
1270
1271 /* First get A into a register. */
1272 reg1 = XEXP (XEXP (idx, 0), 0);
1273 if (GET_CODE (reg1) != REG)
1274 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1275
1276 /* And get B into a register. */
1277 reg2 = XEXP (idx, 1);
1278 if (GET_CODE (reg2) != REG)
1279 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1280
1281 reg1 = force_reg (Pmode,
1282 gen_rtx_PLUS (Pmode,
1283 gen_rtx_ASHIFT (Pmode, reg1,
1284 GEN_INT (shift_val)),
1285 reg2));
1286
1287 /* Add the result to our base register and return. */
1288 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1289
1290 }
1291
1292 /* Uh-oh. We might have an address for x[n-100000]. This needs
1293 special handling to avoid creating an indexed memory address
1294 with x-100000 as the base.
1295
1296 If the constant part is small enough, then it's still safe because
1297 there is a guard page at the beginning and end of the data segment.
1298
1299 Scaled references are common enough that we want to try and rearrange the
1300 terms so that we can use indexing for these addresses too. Only
1301 do the optimization for floatint point modes. */
1302
1303 if (GET_CODE (x) == PLUS
1304 && pa_symbolic_expression_p (XEXP (x, 1)))
1305 {
1306 /* Ugly. We modify things here so that the address offset specified
1307 by the index expression is computed first, then added to x to form
1308 the entire address. */
1309
1310 rtx regx1, regx2, regy1, regy2, y;
1311
1312 /* Strip off any CONST. */
1313 y = XEXP (x, 1);
1314 if (GET_CODE (y) == CONST)
1315 y = XEXP (y, 0);
1316
1317 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1318 {
1319 /* See if this looks like
1320 (plus (mult (reg) (mem_shadd_const))
1321 (const (plus (symbol_ref) (const_int))))
1322
1323 Where const_int is small. In that case the const
1324 expression is a valid pointer for indexing.
1325
1326 If const_int is big, but can be divided evenly by shadd_const
1327 and added to (reg). This allows more scaled indexed addresses. */
1328 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1329 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1330 && GET_CODE (XEXP (y, 1)) == CONST_INT
1331 && INTVAL (XEXP (y, 1)) >= -4096
1332 && INTVAL (XEXP (y, 1)) <= 4095)
1333 {
1334 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1335
1336 /* If we were given a MULT, we must fix the constant
1337 as we're going to create the ASHIFT form. */
1338 if (GET_CODE (XEXP (x, 0)) == MULT)
1339 shift_val = exact_log2 (shift_val);
1340
1341 rtx reg1, reg2;
1342
1343 reg1 = XEXP (x, 1);
1344 if (GET_CODE (reg1) != REG)
1345 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1346
1347 reg2 = XEXP (XEXP (x, 0), 0);
1348 if (GET_CODE (reg2) != REG)
1349 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1350
1351 return
1352 force_reg (Pmode,
1353 gen_rtx_PLUS (Pmode,
1354 gen_rtx_ASHIFT (Pmode,
1355 reg2,
1356 GEN_INT (shift_val)),
1357 reg1));
1358 }
1359 else if ((mode == DFmode || mode == SFmode)
1360 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1361 && mem_shadd_or_shadd_rtx_p (XEXP (x, 0))
1362 && GET_CODE (XEXP (y, 1)) == CONST_INT
1363 && INTVAL (XEXP (y, 1)) % (1 << INTVAL (XEXP (XEXP (x, 0), 1))) == 0)
1364 {
1365 int shift_val = INTVAL (XEXP (XEXP (x, 0), 1));
1366
1367 /* If we were given a MULT, we must fix the constant
1368 as we're going to create the ASHIFT form. */
1369 if (GET_CODE (XEXP (x, 0)) == MULT)
1370 shift_val = exact_log2 (shift_val);
1371
1372 regx1
1373 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1374 / INTVAL (XEXP (XEXP (x, 0), 1))));
1375 regx2 = XEXP (XEXP (x, 0), 0);
1376 if (GET_CODE (regx2) != REG)
1377 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1378 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1379 regx2, regx1));
1380 return
1381 force_reg (Pmode,
1382 gen_rtx_PLUS (Pmode,
1383 gen_rtx_ASHIFT (Pmode, regx2,
1384 GEN_INT (shift_val)),
1385 force_reg (Pmode, XEXP (y, 0))));
1386 }
1387 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1388 && INTVAL (XEXP (y, 1)) >= -4096
1389 && INTVAL (XEXP (y, 1)) <= 4095)
1390 {
1391 /* This is safe because of the guard page at the
1392 beginning and end of the data space. Just
1393 return the original address. */
1394 return orig;
1395 }
1396 else
1397 {
1398 /* Doesn't look like one we can optimize. */
1399 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1400 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1401 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1402 regx1 = force_reg (Pmode,
1403 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1404 regx1, regy2));
1405 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1406 }
1407 }
1408 }
1409
1410 return orig;
1411 }
1412
1413 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1414
1415 Compute extra cost of moving data between one register class
1416 and another.
1417
1418 Make moves from SAR so expensive they should never happen. We used to
1419 have 0xffff here, but that generates overflow in rare cases.
1420
1421 Copies involving a FP register and a non-FP register are relatively
1422 expensive because they must go through memory.
1423
1424 Other copies are reasonably cheap. */
1425
1426 static int
1427 hppa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1428 reg_class_t from, reg_class_t to)
1429 {
1430 if (from == SHIFT_REGS)
1431 return 0x100;
1432 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1433 return 18;
1434 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1435 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1436 return 16;
1437 else
1438 return 2;
1439 }
1440
1441 /* For the HPPA, REG and REG+CONST is cost 0
1442 and addresses involving symbolic constants are cost 2.
1443
1444 PIC addresses are very expensive.
1445
1446 It is no coincidence that this has the same structure
1447 as pa_legitimate_address_p. */
1448
1449 static int
1450 hppa_address_cost (rtx X, machine_mode mode ATTRIBUTE_UNUSED,
1451 addr_space_t as ATTRIBUTE_UNUSED,
1452 bool speed ATTRIBUTE_UNUSED)
1453 {
1454 switch (GET_CODE (X))
1455 {
1456 case REG:
1457 case PLUS:
1458 case LO_SUM:
1459 return 1;
1460 case HIGH:
1461 return 2;
1462 default:
1463 return 4;
1464 }
1465 }
1466
1467 /* Compute a (partial) cost for rtx X. Return true if the complete
1468 cost has been computed, and false if subexpressions should be
1469 scanned. In either case, *TOTAL contains the cost result. */
1470
1471 static bool
1472 hppa_rtx_costs (rtx x, machine_mode mode, int outer_code,
1473 int opno ATTRIBUTE_UNUSED,
1474 int *total, bool speed ATTRIBUTE_UNUSED)
1475 {
1476 int factor;
1477 int code = GET_CODE (x);
1478
1479 switch (code)
1480 {
1481 case CONST_INT:
1482 if (INTVAL (x) == 0)
1483 *total = 0;
1484 else if (INT_14_BITS (x))
1485 *total = 1;
1486 else
1487 *total = 2;
1488 return true;
1489
1490 case HIGH:
1491 *total = 2;
1492 return true;
1493
1494 case CONST:
1495 case LABEL_REF:
1496 case SYMBOL_REF:
1497 *total = 4;
1498 return true;
1499
1500 case CONST_DOUBLE:
1501 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1502 && outer_code != SET)
1503 *total = 0;
1504 else
1505 *total = 8;
1506 return true;
1507
1508 case MULT:
1509 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1510 {
1511 *total = COSTS_N_INSNS (3);
1512 return true;
1513 }
1514
1515 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1516 factor = GET_MODE_SIZE (mode) / 4;
1517 if (factor == 0)
1518 factor = 1;
1519
1520 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1521 *total = factor * factor * COSTS_N_INSNS (8);
1522 else
1523 *total = factor * factor * COSTS_N_INSNS (20);
1524 return true;
1525
1526 case DIV:
1527 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1528 {
1529 *total = COSTS_N_INSNS (14);
1530 return true;
1531 }
1532 /* FALLTHRU */
1533
1534 case UDIV:
1535 case MOD:
1536 case UMOD:
1537 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1538 factor = GET_MODE_SIZE (mode) / 4;
1539 if (factor == 0)
1540 factor = 1;
1541
1542 *total = factor * factor * COSTS_N_INSNS (60);
1543 return true;
1544
1545 case PLUS: /* this includes shNadd insns */
1546 case MINUS:
1547 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1548 {
1549 *total = COSTS_N_INSNS (3);
1550 return true;
1551 }
1552
1553 /* A size N times larger than UNITS_PER_WORD needs N times as
1554 many insns, taking N times as long. */
1555 factor = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
1556 if (factor == 0)
1557 factor = 1;
1558 *total = factor * COSTS_N_INSNS (1);
1559 return true;
1560
1561 case ASHIFT:
1562 case ASHIFTRT:
1563 case LSHIFTRT:
1564 *total = COSTS_N_INSNS (1);
1565 return true;
1566
1567 default:
1568 return false;
1569 }
1570 }
1571
1572 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1573 new rtx with the correct mode. */
1574 static inline rtx
1575 force_mode (machine_mode mode, rtx orig)
1576 {
1577 if (mode == GET_MODE (orig))
1578 return orig;
1579
1580 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1581
1582 return gen_rtx_REG (mode, REGNO (orig));
1583 }
1584
1585 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1586
1587 static bool
1588 pa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1589 {
1590 return tls_referenced_p (x);
1591 }
1592
1593 /* Emit insns to move operands[1] into operands[0].
1594
1595 Return 1 if we have written out everything that needs to be done to
1596 do the move. Otherwise, return 0 and the caller will emit the move
1597 normally.
1598
1599 Note SCRATCH_REG may not be in the proper mode depending on how it
1600 will be used. This routine is responsible for creating a new copy
1601 of SCRATCH_REG in the proper mode. */
1602
1603 int
1604 pa_emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
1605 {
1606 register rtx operand0 = operands[0];
1607 register rtx operand1 = operands[1];
1608 register rtx tem;
1609
1610 /* We can only handle indexed addresses in the destination operand
1611 of floating point stores. Thus, we need to break out indexed
1612 addresses from the destination operand. */
1613 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1614 {
1615 gcc_assert (can_create_pseudo_p ());
1616
1617 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1618 operand0 = replace_equiv_address (operand0, tem);
1619 }
1620
1621 /* On targets with non-equivalent space registers, break out unscaled
1622 indexed addresses from the source operand before the final CSE.
1623 We have to do this because the REG_POINTER flag is not correctly
1624 carried through various optimization passes and CSE may substitute
1625 a pseudo without the pointer set for one with the pointer set. As
1626 a result, we loose various opportunities to create insns with
1627 unscaled indexed addresses. */
1628 if (!TARGET_NO_SPACE_REGS
1629 && !cse_not_expected
1630 && GET_CODE (operand1) == MEM
1631 && GET_CODE (XEXP (operand1, 0)) == PLUS
1632 && REG_P (XEXP (XEXP (operand1, 0), 0))
1633 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1634 operand1
1635 = replace_equiv_address (operand1,
1636 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1637
1638 if (scratch_reg
1639 && reload_in_progress && GET_CODE (operand0) == REG
1640 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1641 operand0 = reg_equiv_mem (REGNO (operand0));
1642 else if (scratch_reg
1643 && reload_in_progress && GET_CODE (operand0) == SUBREG
1644 && GET_CODE (SUBREG_REG (operand0)) == REG
1645 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1646 {
1647 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1648 the code which tracks sets/uses for delete_output_reload. */
1649 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1650 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1651 SUBREG_BYTE (operand0));
1652 operand0 = alter_subreg (&temp, true);
1653 }
1654
1655 if (scratch_reg
1656 && reload_in_progress && GET_CODE (operand1) == REG
1657 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1658 operand1 = reg_equiv_mem (REGNO (operand1));
1659 else if (scratch_reg
1660 && reload_in_progress && GET_CODE (operand1) == SUBREG
1661 && GET_CODE (SUBREG_REG (operand1)) == REG
1662 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1663 {
1664 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1665 the code which tracks sets/uses for delete_output_reload. */
1666 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1667 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1668 SUBREG_BYTE (operand1));
1669 operand1 = alter_subreg (&temp, true);
1670 }
1671
1672 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1673 && ((tem = find_replacement (&XEXP (operand0, 0)))
1674 != XEXP (operand0, 0)))
1675 operand0 = replace_equiv_address (operand0, tem);
1676
1677 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1678 && ((tem = find_replacement (&XEXP (operand1, 0)))
1679 != XEXP (operand1, 0)))
1680 operand1 = replace_equiv_address (operand1, tem);
1681
1682 /* Handle secondary reloads for loads/stores of FP registers from
1683 REG+D addresses where D does not fit in 5 or 14 bits, including
1684 (subreg (mem (addr))) cases. */
1685 if (scratch_reg
1686 && FP_REG_P (operand0)
1687 && (MEM_P (operand1)
1688 || (GET_CODE (operand1) == SUBREG
1689 && MEM_P (XEXP (operand1, 0)))))
1690 {
1691 if (GET_CODE (operand1) == SUBREG)
1692 operand1 = XEXP (operand1, 0);
1693
1694 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1695 it in WORD_MODE regardless of what mode it was originally given
1696 to us. */
1697 scratch_reg = force_mode (word_mode, scratch_reg);
1698
1699 /* D might not fit in 14 bits either; for such cases load D into
1700 scratch reg. */
1701 if (reg_plus_base_memory_operand (operand1, GET_MODE (operand1))
1702 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1)))
1703 {
1704 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1705 emit_move_insn (scratch_reg,
1706 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1707 Pmode,
1708 XEXP (XEXP (operand1, 0), 0),
1709 scratch_reg));
1710 }
1711 else
1712 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1713 emit_insn (gen_rtx_SET (operand0,
1714 replace_equiv_address (operand1, scratch_reg)));
1715 return 1;
1716 }
1717 else if (scratch_reg
1718 && FP_REG_P (operand1)
1719 && (MEM_P (operand0)
1720 || (GET_CODE (operand0) == SUBREG
1721 && MEM_P (XEXP (operand0, 0)))))
1722 {
1723 if (GET_CODE (operand0) == SUBREG)
1724 operand0 = XEXP (operand0, 0);
1725
1726 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1727 it in WORD_MODE regardless of what mode it was originally given
1728 to us. */
1729 scratch_reg = force_mode (word_mode, scratch_reg);
1730
1731 /* D might not fit in 14 bits either; for such cases load D into
1732 scratch reg. */
1733 if (reg_plus_base_memory_operand (operand0, GET_MODE (operand0))
1734 && !INT_14_BITS (XEXP (XEXP (operand0, 0), 1)))
1735 {
1736 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1737 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1738 0)),
1739 Pmode,
1740 XEXP (XEXP (operand0, 0),
1741 0),
1742 scratch_reg));
1743 }
1744 else
1745 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1746 emit_insn (gen_rtx_SET (replace_equiv_address (operand0, scratch_reg),
1747 operand1));
1748 return 1;
1749 }
1750 /* Handle secondary reloads for loads of FP registers from constant
1751 expressions by forcing the constant into memory. For the most part,
1752 this is only necessary for SImode and DImode.
1753
1754 Use scratch_reg to hold the address of the memory location. */
1755 else if (scratch_reg
1756 && CONSTANT_P (operand1)
1757 && fp_reg_operand (operand0, mode))
1758 {
1759 rtx const_mem, xoperands[2];
1760
1761 if (operand1 == CONST0_RTX (mode))
1762 {
1763 emit_insn (gen_rtx_SET (operand0, operand1));
1764 return 1;
1765 }
1766
1767 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1768 it in WORD_MODE regardless of what mode it was originally given
1769 to us. */
1770 scratch_reg = force_mode (word_mode, scratch_reg);
1771
1772 /* Force the constant into memory and put the address of the
1773 memory location into scratch_reg. */
1774 const_mem = force_const_mem (mode, operand1);
1775 xoperands[0] = scratch_reg;
1776 xoperands[1] = XEXP (const_mem, 0);
1777 pa_emit_move_sequence (xoperands, Pmode, 0);
1778
1779 /* Now load the destination register. */
1780 emit_insn (gen_rtx_SET (operand0,
1781 replace_equiv_address (const_mem, scratch_reg)));
1782 return 1;
1783 }
1784 /* Handle secondary reloads for SAR. These occur when trying to load
1785 the SAR from memory or a constant. */
1786 else if (scratch_reg
1787 && GET_CODE (operand0) == REG
1788 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1789 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1790 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1791 {
1792 /* D might not fit in 14 bits either; for such cases load D into
1793 scratch reg. */
1794 if (GET_CODE (operand1) == MEM
1795 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1796 {
1797 /* We are reloading the address into the scratch register, so we
1798 want to make sure the scratch register is a full register. */
1799 scratch_reg = force_mode (word_mode, scratch_reg);
1800
1801 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1802 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1803 0)),
1804 Pmode,
1805 XEXP (XEXP (operand1, 0),
1806 0),
1807 scratch_reg));
1808
1809 /* Now we are going to load the scratch register from memory,
1810 we want to load it in the same width as the original MEM,
1811 which must be the same as the width of the ultimate destination,
1812 OPERAND0. */
1813 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1814
1815 emit_move_insn (scratch_reg,
1816 replace_equiv_address (operand1, scratch_reg));
1817 }
1818 else
1819 {
1820 /* We want to load the scratch register using the same mode as
1821 the ultimate destination. */
1822 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1823
1824 emit_move_insn (scratch_reg, operand1);
1825 }
1826
1827 /* And emit the insn to set the ultimate destination. We know that
1828 the scratch register has the same mode as the destination at this
1829 point. */
1830 emit_move_insn (operand0, scratch_reg);
1831 return 1;
1832 }
1833 /* Handle the most common case: storing into a register. */
1834 else if (register_operand (operand0, mode))
1835 {
1836 /* Legitimize TLS symbol references. This happens for references
1837 that aren't a legitimate constant. */
1838 if (PA_SYMBOL_REF_TLS_P (operand1))
1839 operand1 = legitimize_tls_address (operand1);
1840
1841 if (register_operand (operand1, mode)
1842 || (GET_CODE (operand1) == CONST_INT
1843 && pa_cint_ok_for_move (UINTVAL (operand1)))
1844 || (operand1 == CONST0_RTX (mode))
1845 || (GET_CODE (operand1) == HIGH
1846 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1847 /* Only `general_operands' can come here, so MEM is ok. */
1848 || GET_CODE (operand1) == MEM)
1849 {
1850 /* Various sets are created during RTL generation which don't
1851 have the REG_POINTER flag correctly set. After the CSE pass,
1852 instruction recognition can fail if we don't consistently
1853 set this flag when performing register copies. This should
1854 also improve the opportunities for creating insns that use
1855 unscaled indexing. */
1856 if (REG_P (operand0) && REG_P (operand1))
1857 {
1858 if (REG_POINTER (operand1)
1859 && !REG_POINTER (operand0)
1860 && !HARD_REGISTER_P (operand0))
1861 copy_reg_pointer (operand0, operand1);
1862 }
1863
1864 /* When MEMs are broken out, the REG_POINTER flag doesn't
1865 get set. In some cases, we can set the REG_POINTER flag
1866 from the declaration for the MEM. */
1867 if (REG_P (operand0)
1868 && GET_CODE (operand1) == MEM
1869 && !REG_POINTER (operand0))
1870 {
1871 tree decl = MEM_EXPR (operand1);
1872
1873 /* Set the register pointer flag and register alignment
1874 if the declaration for this memory reference is a
1875 pointer type. */
1876 if (decl)
1877 {
1878 tree type;
1879
1880 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1881 tree operand 1. */
1882 if (TREE_CODE (decl) == COMPONENT_REF)
1883 decl = TREE_OPERAND (decl, 1);
1884
1885 type = TREE_TYPE (decl);
1886 type = strip_array_types (type);
1887
1888 if (POINTER_TYPE_P (type))
1889 {
1890 int align;
1891
1892 type = TREE_TYPE (type);
1893 /* Using TYPE_ALIGN_OK is rather conservative as
1894 only the ada frontend actually sets it. */
1895 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1896 : BITS_PER_UNIT);
1897 mark_reg_pointer (operand0, align);
1898 }
1899 }
1900 }
1901
1902 emit_insn (gen_rtx_SET (operand0, operand1));
1903 return 1;
1904 }
1905 }
1906 else if (GET_CODE (operand0) == MEM)
1907 {
1908 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1909 && !(reload_in_progress || reload_completed))
1910 {
1911 rtx temp = gen_reg_rtx (DFmode);
1912
1913 emit_insn (gen_rtx_SET (temp, operand1));
1914 emit_insn (gen_rtx_SET (operand0, temp));
1915 return 1;
1916 }
1917 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1918 {
1919 /* Run this case quickly. */
1920 emit_insn (gen_rtx_SET (operand0, operand1));
1921 return 1;
1922 }
1923 if (! (reload_in_progress || reload_completed))
1924 {
1925 operands[0] = validize_mem (operand0);
1926 operands[1] = operand1 = force_reg (mode, operand1);
1927 }
1928 }
1929
1930 /* Simplify the source if we need to.
1931 Note we do have to handle function labels here, even though we do
1932 not consider them legitimate constants. Loop optimizations can
1933 call the emit_move_xxx with one as a source. */
1934 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1935 || (GET_CODE (operand1) == HIGH
1936 && symbolic_operand (XEXP (operand1, 0), mode))
1937 || function_label_operand (operand1, VOIDmode)
1938 || tls_referenced_p (operand1))
1939 {
1940 int ishighonly = 0;
1941
1942 if (GET_CODE (operand1) == HIGH)
1943 {
1944 ishighonly = 1;
1945 operand1 = XEXP (operand1, 0);
1946 }
1947 if (symbolic_operand (operand1, mode))
1948 {
1949 /* Argh. The assembler and linker can't handle arithmetic
1950 involving plabels.
1951
1952 So we force the plabel into memory, load operand0 from
1953 the memory location, then add in the constant part. */
1954 if ((GET_CODE (operand1) == CONST
1955 && GET_CODE (XEXP (operand1, 0)) == PLUS
1956 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1957 VOIDmode))
1958 || function_label_operand (operand1, VOIDmode))
1959 {
1960 rtx temp, const_part;
1961
1962 /* Figure out what (if any) scratch register to use. */
1963 if (reload_in_progress || reload_completed)
1964 {
1965 scratch_reg = scratch_reg ? scratch_reg : operand0;
1966 /* SCRATCH_REG will hold an address and maybe the actual
1967 data. We want it in WORD_MODE regardless of what mode it
1968 was originally given to us. */
1969 scratch_reg = force_mode (word_mode, scratch_reg);
1970 }
1971 else if (flag_pic)
1972 scratch_reg = gen_reg_rtx (Pmode);
1973
1974 if (GET_CODE (operand1) == CONST)
1975 {
1976 /* Save away the constant part of the expression. */
1977 const_part = XEXP (XEXP (operand1, 0), 1);
1978 gcc_assert (GET_CODE (const_part) == CONST_INT);
1979
1980 /* Force the function label into memory. */
1981 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1982 }
1983 else
1984 {
1985 /* No constant part. */
1986 const_part = NULL_RTX;
1987
1988 /* Force the function label into memory. */
1989 temp = force_const_mem (mode, operand1);
1990 }
1991
1992
1993 /* Get the address of the memory location. PIC-ify it if
1994 necessary. */
1995 temp = XEXP (temp, 0);
1996 if (flag_pic)
1997 temp = legitimize_pic_address (temp, mode, scratch_reg);
1998
1999 /* Put the address of the memory location into our destination
2000 register. */
2001 operands[1] = temp;
2002 pa_emit_move_sequence (operands, mode, scratch_reg);
2003
2004 /* Now load from the memory location into our destination
2005 register. */
2006 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
2007 pa_emit_move_sequence (operands, mode, scratch_reg);
2008
2009 /* And add back in the constant part. */
2010 if (const_part != NULL_RTX)
2011 expand_inc (operand0, const_part);
2012
2013 return 1;
2014 }
2015
2016 if (flag_pic)
2017 {
2018 rtx_insn *insn;
2019 rtx temp;
2020
2021 if (reload_in_progress || reload_completed)
2022 {
2023 temp = scratch_reg ? scratch_reg : operand0;
2024 /* TEMP will hold an address and maybe the actual
2025 data. We want it in WORD_MODE regardless of what mode it
2026 was originally given to us. */
2027 temp = force_mode (word_mode, temp);
2028 }
2029 else
2030 temp = gen_reg_rtx (Pmode);
2031
2032 /* Force (const (plus (symbol) (const_int))) to memory
2033 if the const_int will not fit in 14 bits. Although
2034 this requires a relocation, the instruction sequence
2035 needed to load the value is shorter. */
2036 if (GET_CODE (operand1) == CONST
2037 && GET_CODE (XEXP (operand1, 0)) == PLUS
2038 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2039 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1)))
2040 {
2041 rtx x, m = force_const_mem (mode, operand1);
2042
2043 x = legitimize_pic_address (XEXP (m, 0), mode, temp);
2044 x = replace_equiv_address (m, x);
2045 insn = emit_move_insn (operand0, x);
2046 }
2047 else
2048 {
2049 operands[1] = legitimize_pic_address (operand1, mode, temp);
2050 if (REG_P (operand0) && REG_P (operands[1]))
2051 copy_reg_pointer (operand0, operands[1]);
2052 insn = emit_move_insn (operand0, operands[1]);
2053 }
2054
2055 /* Put a REG_EQUAL note on this insn. */
2056 set_unique_reg_note (insn, REG_EQUAL, operand1);
2057 }
2058 /* On the HPPA, references to data space are supposed to use dp,
2059 register 27, but showing it in the RTL inhibits various cse
2060 and loop optimizations. */
2061 else
2062 {
2063 rtx temp, set;
2064
2065 if (reload_in_progress || reload_completed)
2066 {
2067 temp = scratch_reg ? scratch_reg : operand0;
2068 /* TEMP will hold an address and maybe the actual
2069 data. We want it in WORD_MODE regardless of what mode it
2070 was originally given to us. */
2071 temp = force_mode (word_mode, temp);
2072 }
2073 else
2074 temp = gen_reg_rtx (mode);
2075
2076 /* Loading a SYMBOL_REF into a register makes that register
2077 safe to be used as the base in an indexed address.
2078
2079 Don't mark hard registers though. That loses. */
2080 if (GET_CODE (operand0) == REG
2081 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2082 mark_reg_pointer (operand0, BITS_PER_UNIT);
2083 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2084 mark_reg_pointer (temp, BITS_PER_UNIT);
2085
2086 if (ishighonly)
2087 set = gen_rtx_SET (operand0, temp);
2088 else
2089 set = gen_rtx_SET (operand0,
2090 gen_rtx_LO_SUM (mode, temp, operand1));
2091
2092 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2093 emit_insn (set);
2094
2095 }
2096 return 1;
2097 }
2098 else if (tls_referenced_p (operand1))
2099 {
2100 rtx tmp = operand1;
2101 rtx addend = NULL;
2102
2103 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2104 {
2105 addend = XEXP (XEXP (tmp, 0), 1);
2106 tmp = XEXP (XEXP (tmp, 0), 0);
2107 }
2108
2109 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2110 tmp = legitimize_tls_address (tmp);
2111 if (addend)
2112 {
2113 tmp = gen_rtx_PLUS (mode, tmp, addend);
2114 tmp = force_operand (tmp, operands[0]);
2115 }
2116 operands[1] = tmp;
2117 }
2118 else if (GET_CODE (operand1) != CONST_INT
2119 || !pa_cint_ok_for_move (UINTVAL (operand1)))
2120 {
2121 rtx temp;
2122 rtx_insn *insn;
2123 rtx op1 = operand1;
2124 HOST_WIDE_INT value = 0;
2125 HOST_WIDE_INT insv = 0;
2126 int insert = 0;
2127
2128 if (GET_CODE (operand1) == CONST_INT)
2129 value = INTVAL (operand1);
2130
2131 if (TARGET_64BIT
2132 && GET_CODE (operand1) == CONST_INT
2133 && HOST_BITS_PER_WIDE_INT > 32
2134 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2135 {
2136 HOST_WIDE_INT nval;
2137
2138 /* Extract the low order 32 bits of the value and sign extend.
2139 If the new value is the same as the original value, we can
2140 can use the original value as-is. If the new value is
2141 different, we use it and insert the most-significant 32-bits
2142 of the original value into the final result. */
2143 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2144 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2145 if (value != nval)
2146 {
2147 #if HOST_BITS_PER_WIDE_INT > 32
2148 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2149 #endif
2150 insert = 1;
2151 value = nval;
2152 operand1 = GEN_INT (nval);
2153 }
2154 }
2155
2156 if (reload_in_progress || reload_completed)
2157 temp = scratch_reg ? scratch_reg : operand0;
2158 else
2159 temp = gen_reg_rtx (mode);
2160
2161 /* We don't directly split DImode constants on 32-bit targets
2162 because PLUS uses an 11-bit immediate and the insn sequence
2163 generated is not as efficient as the one using HIGH/LO_SUM. */
2164 if (GET_CODE (operand1) == CONST_INT
2165 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2166 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2167 && !insert)
2168 {
2169 /* Directly break constant into high and low parts. This
2170 provides better optimization opportunities because various
2171 passes recognize constants split with PLUS but not LO_SUM.
2172 We use a 14-bit signed low part except when the addition
2173 of 0x4000 to the high part might change the sign of the
2174 high part. */
2175 HOST_WIDE_INT low = value & 0x3fff;
2176 HOST_WIDE_INT high = value & ~ 0x3fff;
2177
2178 if (low >= 0x2000)
2179 {
2180 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2181 high += 0x2000;
2182 else
2183 high += 0x4000;
2184 }
2185
2186 low = value - high;
2187
2188 emit_insn (gen_rtx_SET (temp, GEN_INT (high)));
2189 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2190 }
2191 else
2192 {
2193 emit_insn (gen_rtx_SET (temp, gen_rtx_HIGH (mode, operand1)));
2194 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2195 }
2196
2197 insn = emit_move_insn (operands[0], operands[1]);
2198
2199 /* Now insert the most significant 32 bits of the value
2200 into the register. When we don't have a second register
2201 available, it could take up to nine instructions to load
2202 a 64-bit integer constant. Prior to reload, we force
2203 constants that would take more than three instructions
2204 to load to the constant pool. During and after reload,
2205 we have to handle all possible values. */
2206 if (insert)
2207 {
2208 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2209 register and the value to be inserted is outside the
2210 range that can be loaded with three depdi instructions. */
2211 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2212 {
2213 operand1 = GEN_INT (insv);
2214
2215 emit_insn (gen_rtx_SET (temp,
2216 gen_rtx_HIGH (mode, operand1)));
2217 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2218 if (mode == DImode)
2219 insn = emit_insn (gen_insvdi (operand0, GEN_INT (32),
2220 const0_rtx, temp));
2221 else
2222 insn = emit_insn (gen_insvsi (operand0, GEN_INT (32),
2223 const0_rtx, temp));
2224 }
2225 else
2226 {
2227 int len = 5, pos = 27;
2228
2229 /* Insert the bits using the depdi instruction. */
2230 while (pos >= 0)
2231 {
2232 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2233 HOST_WIDE_INT sign = v5 < 0;
2234
2235 /* Left extend the insertion. */
2236 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2237 while (pos > 0 && (insv & 1) == sign)
2238 {
2239 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2240 len += 1;
2241 pos -= 1;
2242 }
2243
2244 if (mode == DImode)
2245 insn = emit_insn (gen_insvdi (operand0,
2246 GEN_INT (len),
2247 GEN_INT (pos),
2248 GEN_INT (v5)));
2249 else
2250 insn = emit_insn (gen_insvsi (operand0,
2251 GEN_INT (len),
2252 GEN_INT (pos),
2253 GEN_INT (v5)));
2254
2255 len = pos > 0 && pos < 5 ? pos : 5;
2256 pos -= len;
2257 }
2258 }
2259 }
2260
2261 set_unique_reg_note (insn, REG_EQUAL, op1);
2262
2263 return 1;
2264 }
2265 }
2266 /* Now have insn-emit do whatever it normally does. */
2267 return 0;
2268 }
2269
2270 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2271 it will need a link/runtime reloc). */
2272
2273 int
2274 pa_reloc_needed (tree exp)
2275 {
2276 int reloc = 0;
2277
2278 switch (TREE_CODE (exp))
2279 {
2280 case ADDR_EXPR:
2281 return 1;
2282
2283 case POINTER_PLUS_EXPR:
2284 case PLUS_EXPR:
2285 case MINUS_EXPR:
2286 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2287 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2288 break;
2289
2290 CASE_CONVERT:
2291 case NON_LVALUE_EXPR:
2292 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2293 break;
2294
2295 case CONSTRUCTOR:
2296 {
2297 tree value;
2298 unsigned HOST_WIDE_INT ix;
2299
2300 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2301 if (value)
2302 reloc |= pa_reloc_needed (value);
2303 }
2304 break;
2305
2306 case ERROR_MARK:
2307 break;
2308
2309 default:
2310 break;
2311 }
2312 return reloc;
2313 }
2314
2315 \f
2316 /* Return the best assembler insn template
2317 for moving operands[1] into operands[0] as a fullword. */
2318 const char *
2319 pa_singlemove_string (rtx *operands)
2320 {
2321 HOST_WIDE_INT intval;
2322
2323 if (GET_CODE (operands[0]) == MEM)
2324 return "stw %r1,%0";
2325 if (GET_CODE (operands[1]) == MEM)
2326 return "ldw %1,%0";
2327 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2328 {
2329 long i;
2330
2331 gcc_assert (GET_MODE (operands[1]) == SFmode);
2332
2333 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2334 bit pattern. */
2335 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (operands[1]), i);
2336
2337 operands[1] = GEN_INT (i);
2338 /* Fall through to CONST_INT case. */
2339 }
2340 if (GET_CODE (operands[1]) == CONST_INT)
2341 {
2342 intval = INTVAL (operands[1]);
2343
2344 if (VAL_14_BITS_P (intval))
2345 return "ldi %1,%0";
2346 else if ((intval & 0x7ff) == 0)
2347 return "ldil L'%1,%0";
2348 else if (pa_zdepi_cint_p (intval))
2349 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2350 else
2351 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2352 }
2353 return "copy %1,%0";
2354 }
2355 \f
2356
2357 /* Compute position (in OP[1]) and width (in OP[2])
2358 useful for copying IMM to a register using the zdepi
2359 instructions. Store the immediate value to insert in OP[0]. */
2360 static void
2361 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2362 {
2363 int lsb, len;
2364
2365 /* Find the least significant set bit in IMM. */
2366 for (lsb = 0; lsb < 32; lsb++)
2367 {
2368 if ((imm & 1) != 0)
2369 break;
2370 imm >>= 1;
2371 }
2372
2373 /* Choose variants based on *sign* of the 5-bit field. */
2374 if ((imm & 0x10) == 0)
2375 len = (lsb <= 28) ? 4 : 32 - lsb;
2376 else
2377 {
2378 /* Find the width of the bitstring in IMM. */
2379 for (len = 5; len < 32 - lsb; len++)
2380 {
2381 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2382 break;
2383 }
2384
2385 /* Sign extend IMM as a 5-bit value. */
2386 imm = (imm & 0xf) - 0x10;
2387 }
2388
2389 op[0] = imm;
2390 op[1] = 31 - lsb;
2391 op[2] = len;
2392 }
2393
2394 /* Compute position (in OP[1]) and width (in OP[2])
2395 useful for copying IMM to a register using the depdi,z
2396 instructions. Store the immediate value to insert in OP[0]. */
2397
2398 static void
2399 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2400 {
2401 int lsb, len, maxlen;
2402
2403 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2404
2405 /* Find the least significant set bit in IMM. */
2406 for (lsb = 0; lsb < maxlen; lsb++)
2407 {
2408 if ((imm & 1) != 0)
2409 break;
2410 imm >>= 1;
2411 }
2412
2413 /* Choose variants based on *sign* of the 5-bit field. */
2414 if ((imm & 0x10) == 0)
2415 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2416 else
2417 {
2418 /* Find the width of the bitstring in IMM. */
2419 for (len = 5; len < maxlen - lsb; len++)
2420 {
2421 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2422 break;
2423 }
2424
2425 /* Extend length if host is narrow and IMM is negative. */
2426 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2427 len += 32;
2428
2429 /* Sign extend IMM as a 5-bit value. */
2430 imm = (imm & 0xf) - 0x10;
2431 }
2432
2433 op[0] = imm;
2434 op[1] = 63 - lsb;
2435 op[2] = len;
2436 }
2437
2438 /* Output assembler code to perform a doubleword move insn
2439 with operands OPERANDS. */
2440
2441 const char *
2442 pa_output_move_double (rtx *operands)
2443 {
2444 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2445 rtx latehalf[2];
2446 rtx addreg0 = 0, addreg1 = 0;
2447 int highonly = 0;
2448
2449 /* First classify both operands. */
2450
2451 if (REG_P (operands[0]))
2452 optype0 = REGOP;
2453 else if (offsettable_memref_p (operands[0]))
2454 optype0 = OFFSOP;
2455 else if (GET_CODE (operands[0]) == MEM)
2456 optype0 = MEMOP;
2457 else
2458 optype0 = RNDOP;
2459
2460 if (REG_P (operands[1]))
2461 optype1 = REGOP;
2462 else if (CONSTANT_P (operands[1]))
2463 optype1 = CNSTOP;
2464 else if (offsettable_memref_p (operands[1]))
2465 optype1 = OFFSOP;
2466 else if (GET_CODE (operands[1]) == MEM)
2467 optype1 = MEMOP;
2468 else
2469 optype1 = RNDOP;
2470
2471 /* Check for the cases that the operand constraints are not
2472 supposed to allow to happen. */
2473 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2474
2475 /* Handle copies between general and floating registers. */
2476
2477 if (optype0 == REGOP && optype1 == REGOP
2478 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2479 {
2480 if (FP_REG_P (operands[0]))
2481 {
2482 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2483 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2484 return "{fldds|fldd} -16(%%sp),%0";
2485 }
2486 else
2487 {
2488 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2489 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2490 return "{ldws|ldw} -12(%%sp),%R0";
2491 }
2492 }
2493
2494 /* Handle auto decrementing and incrementing loads and stores
2495 specifically, since the structure of the function doesn't work
2496 for them without major modification. Do it better when we learn
2497 this port about the general inc/dec addressing of PA.
2498 (This was written by tege. Chide him if it doesn't work.) */
2499
2500 if (optype0 == MEMOP)
2501 {
2502 /* We have to output the address syntax ourselves, since print_operand
2503 doesn't deal with the addresses we want to use. Fix this later. */
2504
2505 rtx addr = XEXP (operands[0], 0);
2506 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2507 {
2508 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2509
2510 operands[0] = XEXP (addr, 0);
2511 gcc_assert (GET_CODE (operands[1]) == REG
2512 && GET_CODE (operands[0]) == REG);
2513
2514 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2515
2516 /* No overlap between high target register and address
2517 register. (We do this in a non-obvious way to
2518 save a register file writeback) */
2519 if (GET_CODE (addr) == POST_INC)
2520 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2521 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2522 }
2523 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2524 {
2525 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2526
2527 operands[0] = XEXP (addr, 0);
2528 gcc_assert (GET_CODE (operands[1]) == REG
2529 && GET_CODE (operands[0]) == REG);
2530
2531 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2532 /* No overlap between high target register and address
2533 register. (We do this in a non-obvious way to save a
2534 register file writeback) */
2535 if (GET_CODE (addr) == PRE_INC)
2536 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2537 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2538 }
2539 }
2540 if (optype1 == MEMOP)
2541 {
2542 /* We have to output the address syntax ourselves, since print_operand
2543 doesn't deal with the addresses we want to use. Fix this later. */
2544
2545 rtx addr = XEXP (operands[1], 0);
2546 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2547 {
2548 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2549
2550 operands[1] = XEXP (addr, 0);
2551 gcc_assert (GET_CODE (operands[0]) == REG
2552 && GET_CODE (operands[1]) == REG);
2553
2554 if (!reg_overlap_mentioned_p (high_reg, addr))
2555 {
2556 /* No overlap between high target register and address
2557 register. (We do this in a non-obvious way to
2558 save a register file writeback) */
2559 if (GET_CODE (addr) == POST_INC)
2560 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2561 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2562 }
2563 else
2564 {
2565 /* This is an undefined situation. We should load into the
2566 address register *and* update that register. Probably
2567 we don't need to handle this at all. */
2568 if (GET_CODE (addr) == POST_INC)
2569 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2570 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2571 }
2572 }
2573 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2574 {
2575 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2576
2577 operands[1] = XEXP (addr, 0);
2578 gcc_assert (GET_CODE (operands[0]) == REG
2579 && GET_CODE (operands[1]) == REG);
2580
2581 if (!reg_overlap_mentioned_p (high_reg, addr))
2582 {
2583 /* No overlap between high target register and address
2584 register. (We do this in a non-obvious way to
2585 save a register file writeback) */
2586 if (GET_CODE (addr) == PRE_INC)
2587 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2588 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2589 }
2590 else
2591 {
2592 /* This is an undefined situation. We should load into the
2593 address register *and* update that register. Probably
2594 we don't need to handle this at all. */
2595 if (GET_CODE (addr) == PRE_INC)
2596 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2597 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2598 }
2599 }
2600 else if (GET_CODE (addr) == PLUS
2601 && GET_CODE (XEXP (addr, 0)) == MULT)
2602 {
2603 rtx xoperands[4];
2604
2605 /* Load address into left half of destination register. */
2606 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2607 xoperands[1] = XEXP (addr, 1);
2608 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2609 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2610 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2611 xoperands);
2612 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2613 }
2614 else if (GET_CODE (addr) == PLUS
2615 && REG_P (XEXP (addr, 0))
2616 && REG_P (XEXP (addr, 1)))
2617 {
2618 rtx xoperands[3];
2619
2620 /* Load address into left half of destination register. */
2621 xoperands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
2622 xoperands[1] = XEXP (addr, 0);
2623 xoperands[2] = XEXP (addr, 1);
2624 output_asm_insn ("{addl|add,l} %1,%2,%0",
2625 xoperands);
2626 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2627 }
2628 }
2629
2630 /* If an operand is an unoffsettable memory ref, find a register
2631 we can increment temporarily to make it refer to the second word. */
2632
2633 if (optype0 == MEMOP)
2634 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2635
2636 if (optype1 == MEMOP)
2637 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2638
2639 /* Ok, we can do one word at a time.
2640 Normally we do the low-numbered word first.
2641
2642 In either case, set up in LATEHALF the operands to use
2643 for the high-numbered word and in some cases alter the
2644 operands in OPERANDS to be suitable for the low-numbered word. */
2645
2646 if (optype0 == REGOP)
2647 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2648 else if (optype0 == OFFSOP)
2649 latehalf[0] = adjust_address_nv (operands[0], SImode, 4);
2650 else
2651 latehalf[0] = operands[0];
2652
2653 if (optype1 == REGOP)
2654 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2655 else if (optype1 == OFFSOP)
2656 latehalf[1] = adjust_address_nv (operands[1], SImode, 4);
2657 else if (optype1 == CNSTOP)
2658 {
2659 if (GET_CODE (operands[1]) == HIGH)
2660 {
2661 operands[1] = XEXP (operands[1], 0);
2662 highonly = 1;
2663 }
2664 split_double (operands[1], &operands[1], &latehalf[1]);
2665 }
2666 else
2667 latehalf[1] = operands[1];
2668
2669 /* If the first move would clobber the source of the second one,
2670 do them in the other order.
2671
2672 This can happen in two cases:
2673
2674 mem -> register where the first half of the destination register
2675 is the same register used in the memory's address. Reload
2676 can create such insns.
2677
2678 mem in this case will be either register indirect or register
2679 indirect plus a valid offset.
2680
2681 register -> register move where REGNO(dst) == REGNO(src + 1)
2682 someone (Tim/Tege?) claimed this can happen for parameter loads.
2683
2684 Handle mem -> register case first. */
2685 if (optype0 == REGOP
2686 && (optype1 == MEMOP || optype1 == OFFSOP)
2687 && refers_to_regno_p (REGNO (operands[0]), operands[1]))
2688 {
2689 /* Do the late half first. */
2690 if (addreg1)
2691 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2692 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2693
2694 /* Then clobber. */
2695 if (addreg1)
2696 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2697 return pa_singlemove_string (operands);
2698 }
2699
2700 /* Now handle register -> register case. */
2701 if (optype0 == REGOP && optype1 == REGOP
2702 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2703 {
2704 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2705 return pa_singlemove_string (operands);
2706 }
2707
2708 /* Normal case: do the two words, low-numbered first. */
2709
2710 output_asm_insn (pa_singlemove_string (operands), operands);
2711
2712 /* Make any unoffsettable addresses point at high-numbered word. */
2713 if (addreg0)
2714 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2715 if (addreg1)
2716 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2717
2718 /* Do high-numbered word. */
2719 if (highonly)
2720 output_asm_insn ("ldil L'%1,%0", latehalf);
2721 else
2722 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2723
2724 /* Undo the adds we just did. */
2725 if (addreg0)
2726 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2727 if (addreg1)
2728 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2729
2730 return "";
2731 }
2732 \f
2733 const char *
2734 pa_output_fp_move_double (rtx *operands)
2735 {
2736 if (FP_REG_P (operands[0]))
2737 {
2738 if (FP_REG_P (operands[1])
2739 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2740 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2741 else
2742 output_asm_insn ("fldd%F1 %1,%0", operands);
2743 }
2744 else if (FP_REG_P (operands[1]))
2745 {
2746 output_asm_insn ("fstd%F0 %1,%0", operands);
2747 }
2748 else
2749 {
2750 rtx xoperands[2];
2751
2752 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2753
2754 /* This is a pain. You have to be prepared to deal with an
2755 arbitrary address here including pre/post increment/decrement.
2756
2757 so avoid this in the MD. */
2758 gcc_assert (GET_CODE (operands[0]) == REG);
2759
2760 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2761 xoperands[0] = operands[0];
2762 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2763 }
2764 return "";
2765 }
2766 \f
2767 /* Return a REG that occurs in ADDR with coefficient 1.
2768 ADDR can be effectively incremented by incrementing REG. */
2769
2770 static rtx
2771 find_addr_reg (rtx addr)
2772 {
2773 while (GET_CODE (addr) == PLUS)
2774 {
2775 if (GET_CODE (XEXP (addr, 0)) == REG)
2776 addr = XEXP (addr, 0);
2777 else if (GET_CODE (XEXP (addr, 1)) == REG)
2778 addr = XEXP (addr, 1);
2779 else if (CONSTANT_P (XEXP (addr, 0)))
2780 addr = XEXP (addr, 1);
2781 else if (CONSTANT_P (XEXP (addr, 1)))
2782 addr = XEXP (addr, 0);
2783 else
2784 gcc_unreachable ();
2785 }
2786 gcc_assert (GET_CODE (addr) == REG);
2787 return addr;
2788 }
2789
2790 /* Emit code to perform a block move.
2791
2792 OPERANDS[0] is the destination pointer as a REG, clobbered.
2793 OPERANDS[1] is the source pointer as a REG, clobbered.
2794 OPERANDS[2] is a register for temporary storage.
2795 OPERANDS[3] is a register for temporary storage.
2796 OPERANDS[4] is the size as a CONST_INT
2797 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2798 OPERANDS[6] is another temporary register. */
2799
2800 const char *
2801 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2802 {
2803 int align = INTVAL (operands[5]);
2804 unsigned long n_bytes = INTVAL (operands[4]);
2805
2806 /* We can't move more than a word at a time because the PA
2807 has no longer integer move insns. (Could use fp mem ops?) */
2808 if (align > (TARGET_64BIT ? 8 : 4))
2809 align = (TARGET_64BIT ? 8 : 4);
2810
2811 /* Note that we know each loop below will execute at least twice
2812 (else we would have open-coded the copy). */
2813 switch (align)
2814 {
2815 case 8:
2816 /* Pre-adjust the loop counter. */
2817 operands[4] = GEN_INT (n_bytes - 16);
2818 output_asm_insn ("ldi %4,%2", operands);
2819
2820 /* Copying loop. */
2821 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2822 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2823 output_asm_insn ("std,ma %3,8(%0)", operands);
2824 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2825 output_asm_insn ("std,ma %6,8(%0)", operands);
2826
2827 /* Handle the residual. There could be up to 7 bytes of
2828 residual to copy! */
2829 if (n_bytes % 16 != 0)
2830 {
2831 operands[4] = GEN_INT (n_bytes % 8);
2832 if (n_bytes % 16 >= 8)
2833 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2834 if (n_bytes % 8 != 0)
2835 output_asm_insn ("ldd 0(%1),%6", operands);
2836 if (n_bytes % 16 >= 8)
2837 output_asm_insn ("std,ma %3,8(%0)", operands);
2838 if (n_bytes % 8 != 0)
2839 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2840 }
2841 return "";
2842
2843 case 4:
2844 /* Pre-adjust the loop counter. */
2845 operands[4] = GEN_INT (n_bytes - 8);
2846 output_asm_insn ("ldi %4,%2", operands);
2847
2848 /* Copying loop. */
2849 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2850 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2851 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2852 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2853 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2854
2855 /* Handle the residual. There could be up to 7 bytes of
2856 residual to copy! */
2857 if (n_bytes % 8 != 0)
2858 {
2859 operands[4] = GEN_INT (n_bytes % 4);
2860 if (n_bytes % 8 >= 4)
2861 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2862 if (n_bytes % 4 != 0)
2863 output_asm_insn ("ldw 0(%1),%6", operands);
2864 if (n_bytes % 8 >= 4)
2865 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2866 if (n_bytes % 4 != 0)
2867 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2868 }
2869 return "";
2870
2871 case 2:
2872 /* Pre-adjust the loop counter. */
2873 operands[4] = GEN_INT (n_bytes - 4);
2874 output_asm_insn ("ldi %4,%2", operands);
2875
2876 /* Copying loop. */
2877 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2878 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2879 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2880 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2881 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2882
2883 /* Handle the residual. */
2884 if (n_bytes % 4 != 0)
2885 {
2886 if (n_bytes % 4 >= 2)
2887 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2888 if (n_bytes % 2 != 0)
2889 output_asm_insn ("ldb 0(%1),%6", operands);
2890 if (n_bytes % 4 >= 2)
2891 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2892 if (n_bytes % 2 != 0)
2893 output_asm_insn ("stb %6,0(%0)", operands);
2894 }
2895 return "";
2896
2897 case 1:
2898 /* Pre-adjust the loop counter. */
2899 operands[4] = GEN_INT (n_bytes - 2);
2900 output_asm_insn ("ldi %4,%2", operands);
2901
2902 /* Copying loop. */
2903 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2904 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2905 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2906 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2907 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2908
2909 /* Handle the residual. */
2910 if (n_bytes % 2 != 0)
2911 {
2912 output_asm_insn ("ldb 0(%1),%3", operands);
2913 output_asm_insn ("stb %3,0(%0)", operands);
2914 }
2915 return "";
2916
2917 default:
2918 gcc_unreachable ();
2919 }
2920 }
2921
2922 /* Count the number of insns necessary to handle this block move.
2923
2924 Basic structure is the same as emit_block_move, except that we
2925 count insns rather than emit them. */
2926
2927 static int
2928 compute_movmem_length (rtx_insn *insn)
2929 {
2930 rtx pat = PATTERN (insn);
2931 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2932 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2933 unsigned int n_insns = 0;
2934
2935 /* We can't move more than four bytes at a time because the PA
2936 has no longer integer move insns. (Could use fp mem ops?) */
2937 if (align > (TARGET_64BIT ? 8 : 4))
2938 align = (TARGET_64BIT ? 8 : 4);
2939
2940 /* The basic copying loop. */
2941 n_insns = 6;
2942
2943 /* Residuals. */
2944 if (n_bytes % (2 * align) != 0)
2945 {
2946 if ((n_bytes % (2 * align)) >= align)
2947 n_insns += 2;
2948
2949 if ((n_bytes % align) != 0)
2950 n_insns += 2;
2951 }
2952
2953 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2954 return n_insns * 4;
2955 }
2956
2957 /* Emit code to perform a block clear.
2958
2959 OPERANDS[0] is the destination pointer as a REG, clobbered.
2960 OPERANDS[1] is a register for temporary storage.
2961 OPERANDS[2] is the size as a CONST_INT
2962 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2963
2964 const char *
2965 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2966 {
2967 int align = INTVAL (operands[3]);
2968 unsigned long n_bytes = INTVAL (operands[2]);
2969
2970 /* We can't clear more than a word at a time because the PA
2971 has no longer integer move insns. */
2972 if (align > (TARGET_64BIT ? 8 : 4))
2973 align = (TARGET_64BIT ? 8 : 4);
2974
2975 /* Note that we know each loop below will execute at least twice
2976 (else we would have open-coded the copy). */
2977 switch (align)
2978 {
2979 case 8:
2980 /* Pre-adjust the loop counter. */
2981 operands[2] = GEN_INT (n_bytes - 16);
2982 output_asm_insn ("ldi %2,%1", operands);
2983
2984 /* Loop. */
2985 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2986 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2987 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2988
2989 /* Handle the residual. There could be up to 7 bytes of
2990 residual to copy! */
2991 if (n_bytes % 16 != 0)
2992 {
2993 operands[2] = GEN_INT (n_bytes % 8);
2994 if (n_bytes % 16 >= 8)
2995 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2996 if (n_bytes % 8 != 0)
2997 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2998 }
2999 return "";
3000
3001 case 4:
3002 /* Pre-adjust the loop counter. */
3003 operands[2] = GEN_INT (n_bytes - 8);
3004 output_asm_insn ("ldi %2,%1", operands);
3005
3006 /* Loop. */
3007 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3008 output_asm_insn ("addib,>= -8,%1,.-4", operands);
3009 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3010
3011 /* Handle the residual. There could be up to 7 bytes of
3012 residual to copy! */
3013 if (n_bytes % 8 != 0)
3014 {
3015 operands[2] = GEN_INT (n_bytes % 4);
3016 if (n_bytes % 8 >= 4)
3017 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
3018 if (n_bytes % 4 != 0)
3019 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
3020 }
3021 return "";
3022
3023 case 2:
3024 /* Pre-adjust the loop counter. */
3025 operands[2] = GEN_INT (n_bytes - 4);
3026 output_asm_insn ("ldi %2,%1", operands);
3027
3028 /* Loop. */
3029 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3030 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3031 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3032
3033 /* Handle the residual. */
3034 if (n_bytes % 4 != 0)
3035 {
3036 if (n_bytes % 4 >= 2)
3037 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3038 if (n_bytes % 2 != 0)
3039 output_asm_insn ("stb %%r0,0(%0)", operands);
3040 }
3041 return "";
3042
3043 case 1:
3044 /* Pre-adjust the loop counter. */
3045 operands[2] = GEN_INT (n_bytes - 2);
3046 output_asm_insn ("ldi %2,%1", operands);
3047
3048 /* Loop. */
3049 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3050 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3051 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3052
3053 /* Handle the residual. */
3054 if (n_bytes % 2 != 0)
3055 output_asm_insn ("stb %%r0,0(%0)", operands);
3056
3057 return "";
3058
3059 default:
3060 gcc_unreachable ();
3061 }
3062 }
3063
3064 /* Count the number of insns necessary to handle this block move.
3065
3066 Basic structure is the same as emit_block_move, except that we
3067 count insns rather than emit them. */
3068
3069 static int
3070 compute_clrmem_length (rtx_insn *insn)
3071 {
3072 rtx pat = PATTERN (insn);
3073 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3074 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3075 unsigned int n_insns = 0;
3076
3077 /* We can't clear more than a word at a time because the PA
3078 has no longer integer move insns. */
3079 if (align > (TARGET_64BIT ? 8 : 4))
3080 align = (TARGET_64BIT ? 8 : 4);
3081
3082 /* The basic loop. */
3083 n_insns = 4;
3084
3085 /* Residuals. */
3086 if (n_bytes % (2 * align) != 0)
3087 {
3088 if ((n_bytes % (2 * align)) >= align)
3089 n_insns++;
3090
3091 if ((n_bytes % align) != 0)
3092 n_insns++;
3093 }
3094
3095 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3096 return n_insns * 4;
3097 }
3098 \f
3099
3100 const char *
3101 pa_output_and (rtx *operands)
3102 {
3103 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3104 {
3105 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3106 int ls0, ls1, ms0, p, len;
3107
3108 for (ls0 = 0; ls0 < 32; ls0++)
3109 if ((mask & (1 << ls0)) == 0)
3110 break;
3111
3112 for (ls1 = ls0; ls1 < 32; ls1++)
3113 if ((mask & (1 << ls1)) != 0)
3114 break;
3115
3116 for (ms0 = ls1; ms0 < 32; ms0++)
3117 if ((mask & (1 << ms0)) == 0)
3118 break;
3119
3120 gcc_assert (ms0 == 32);
3121
3122 if (ls1 == 32)
3123 {
3124 len = ls0;
3125
3126 gcc_assert (len);
3127
3128 operands[2] = GEN_INT (len);
3129 return "{extru|extrw,u} %1,31,%2,%0";
3130 }
3131 else
3132 {
3133 /* We could use this `depi' for the case above as well, but `depi'
3134 requires one more register file access than an `extru'. */
3135
3136 p = 31 - ls0;
3137 len = ls1 - ls0;
3138
3139 operands[2] = GEN_INT (p);
3140 operands[3] = GEN_INT (len);
3141 return "{depi|depwi} 0,%2,%3,%0";
3142 }
3143 }
3144 else
3145 return "and %1,%2,%0";
3146 }
3147
3148 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3149 storing the result in operands[0]. */
3150 const char *
3151 pa_output_64bit_and (rtx *operands)
3152 {
3153 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3154 {
3155 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3156 int ls0, ls1, ms0, p, len;
3157
3158 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3159 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3160 break;
3161
3162 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3163 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3164 break;
3165
3166 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3167 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3168 break;
3169
3170 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3171
3172 if (ls1 == HOST_BITS_PER_WIDE_INT)
3173 {
3174 len = ls0;
3175
3176 gcc_assert (len);
3177
3178 operands[2] = GEN_INT (len);
3179 return "extrd,u %1,63,%2,%0";
3180 }
3181 else
3182 {
3183 /* We could use this `depi' for the case above as well, but `depi'
3184 requires one more register file access than an `extru'. */
3185
3186 p = 63 - ls0;
3187 len = ls1 - ls0;
3188
3189 operands[2] = GEN_INT (p);
3190 operands[3] = GEN_INT (len);
3191 return "depdi 0,%2,%3,%0";
3192 }
3193 }
3194 else
3195 return "and %1,%2,%0";
3196 }
3197
3198 const char *
3199 pa_output_ior (rtx *operands)
3200 {
3201 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3202 int bs0, bs1, p, len;
3203
3204 if (INTVAL (operands[2]) == 0)
3205 return "copy %1,%0";
3206
3207 for (bs0 = 0; bs0 < 32; bs0++)
3208 if ((mask & (1 << bs0)) != 0)
3209 break;
3210
3211 for (bs1 = bs0; bs1 < 32; bs1++)
3212 if ((mask & (1 << bs1)) == 0)
3213 break;
3214
3215 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3216
3217 p = 31 - bs0;
3218 len = bs1 - bs0;
3219
3220 operands[2] = GEN_INT (p);
3221 operands[3] = GEN_INT (len);
3222 return "{depi|depwi} -1,%2,%3,%0";
3223 }
3224
3225 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3226 storing the result in operands[0]. */
3227 const char *
3228 pa_output_64bit_ior (rtx *operands)
3229 {
3230 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3231 int bs0, bs1, p, len;
3232
3233 if (INTVAL (operands[2]) == 0)
3234 return "copy %1,%0";
3235
3236 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3237 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3238 break;
3239
3240 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3241 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3242 break;
3243
3244 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3245 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3246
3247 p = 63 - bs0;
3248 len = bs1 - bs0;
3249
3250 operands[2] = GEN_INT (p);
3251 operands[3] = GEN_INT (len);
3252 return "depdi -1,%2,%3,%0";
3253 }
3254 \f
3255 /* Target hook for assembling integer objects. This code handles
3256 aligned SI and DI integers specially since function references
3257 must be preceded by P%. */
3258
3259 static bool
3260 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3261 {
3262 if (size == UNITS_PER_WORD
3263 && aligned_p
3264 && function_label_operand (x, VOIDmode))
3265 {
3266 fputs (size == 8? "\t.dword\t" : "\t.word\t", asm_out_file);
3267
3268 /* We don't want an OPD when generating fast indirect calls. */
3269 if (!TARGET_FAST_INDIRECT_CALLS)
3270 fputs ("P%", asm_out_file);
3271
3272 output_addr_const (asm_out_file, x);
3273 fputc ('\n', asm_out_file);
3274 return true;
3275 }
3276 return default_assemble_integer (x, size, aligned_p);
3277 }
3278 \f
3279 /* Output an ascii string. */
3280 void
3281 pa_output_ascii (FILE *file, const char *p, int size)
3282 {
3283 int i;
3284 int chars_output;
3285 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3286
3287 /* The HP assembler can only take strings of 256 characters at one
3288 time. This is a limitation on input line length, *not* the
3289 length of the string. Sigh. Even worse, it seems that the
3290 restriction is in number of input characters (see \xnn &
3291 \whatever). So we have to do this very carefully. */
3292
3293 fputs ("\t.STRING \"", file);
3294
3295 chars_output = 0;
3296 for (i = 0; i < size; i += 4)
3297 {
3298 int co = 0;
3299 int io = 0;
3300 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3301 {
3302 register unsigned int c = (unsigned char) p[i + io];
3303
3304 if (c == '\"' || c == '\\')
3305 partial_output[co++] = '\\';
3306 if (c >= ' ' && c < 0177)
3307 partial_output[co++] = c;
3308 else
3309 {
3310 unsigned int hexd;
3311 partial_output[co++] = '\\';
3312 partial_output[co++] = 'x';
3313 hexd = c / 16 - 0 + '0';
3314 if (hexd > '9')
3315 hexd -= '9' - 'a' + 1;
3316 partial_output[co++] = hexd;
3317 hexd = c % 16 - 0 + '0';
3318 if (hexd > '9')
3319 hexd -= '9' - 'a' + 1;
3320 partial_output[co++] = hexd;
3321 }
3322 }
3323 if (chars_output + co > 243)
3324 {
3325 fputs ("\"\n\t.STRING \"", file);
3326 chars_output = 0;
3327 }
3328 fwrite (partial_output, 1, (size_t) co, file);
3329 chars_output += co;
3330 co = 0;
3331 }
3332 fputs ("\"\n", file);
3333 }
3334
3335 /* Try to rewrite floating point comparisons & branches to avoid
3336 useless add,tr insns.
3337
3338 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3339 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3340 first attempt to remove useless add,tr insns. It is zero
3341 for the second pass as reorg sometimes leaves bogus REG_DEAD
3342 notes lying around.
3343
3344 When CHECK_NOTES is zero we can only eliminate add,tr insns
3345 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3346 instructions. */
3347 static void
3348 remove_useless_addtr_insns (int check_notes)
3349 {
3350 rtx_insn *insn;
3351 static int pass = 0;
3352
3353 /* This is fairly cheap, so always run it when optimizing. */
3354 if (optimize > 0)
3355 {
3356 int fcmp_count = 0;
3357 int fbranch_count = 0;
3358
3359 /* Walk all the insns in this function looking for fcmp & fbranch
3360 instructions. Keep track of how many of each we find. */
3361 for (insn = get_insns (); insn; insn = next_insn (insn))
3362 {
3363 rtx tmp;
3364
3365 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3366 if (! NONJUMP_INSN_P (insn) && ! JUMP_P (insn))
3367 continue;
3368
3369 tmp = PATTERN (insn);
3370
3371 /* It must be a set. */
3372 if (GET_CODE (tmp) != SET)
3373 continue;
3374
3375 /* If the destination is CCFP, then we've found an fcmp insn. */
3376 tmp = SET_DEST (tmp);
3377 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3378 {
3379 fcmp_count++;
3380 continue;
3381 }
3382
3383 tmp = PATTERN (insn);
3384 /* If this is an fbranch instruction, bump the fbranch counter. */
3385 if (GET_CODE (tmp) == SET
3386 && SET_DEST (tmp) == pc_rtx
3387 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3388 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3389 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3390 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3391 {
3392 fbranch_count++;
3393 continue;
3394 }
3395 }
3396
3397
3398 /* Find all floating point compare + branch insns. If possible,
3399 reverse the comparison & the branch to avoid add,tr insns. */
3400 for (insn = get_insns (); insn; insn = next_insn (insn))
3401 {
3402 rtx tmp;
3403 rtx_insn *next;
3404
3405 /* Ignore anything that isn't an INSN. */
3406 if (! NONJUMP_INSN_P (insn))
3407 continue;
3408
3409 tmp = PATTERN (insn);
3410
3411 /* It must be a set. */
3412 if (GET_CODE (tmp) != SET)
3413 continue;
3414
3415 /* The destination must be CCFP, which is register zero. */
3416 tmp = SET_DEST (tmp);
3417 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3418 continue;
3419
3420 /* INSN should be a set of CCFP.
3421
3422 See if the result of this insn is used in a reversed FP
3423 conditional branch. If so, reverse our condition and
3424 the branch. Doing so avoids useless add,tr insns. */
3425 next = next_insn (insn);
3426 while (next)
3427 {
3428 /* Jumps, calls and labels stop our search. */
3429 if (JUMP_P (next) || CALL_P (next) || LABEL_P (next))
3430 break;
3431
3432 /* As does another fcmp insn. */
3433 if (NONJUMP_INSN_P (next)
3434 && GET_CODE (PATTERN (next)) == SET
3435 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3436 && REGNO (SET_DEST (PATTERN (next))) == 0)
3437 break;
3438
3439 next = next_insn (next);
3440 }
3441
3442 /* Is NEXT_INSN a branch? */
3443 if (next && JUMP_P (next))
3444 {
3445 rtx pattern = PATTERN (next);
3446
3447 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3448 and CCFP dies, then reverse our conditional and the branch
3449 to avoid the add,tr. */
3450 if (GET_CODE (pattern) == SET
3451 && SET_DEST (pattern) == pc_rtx
3452 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3453 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3454 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3455 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3456 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3457 && (fcmp_count == fbranch_count
3458 || (check_notes
3459 && find_regno_note (next, REG_DEAD, 0))))
3460 {
3461 /* Reverse the branch. */
3462 tmp = XEXP (SET_SRC (pattern), 1);
3463 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3464 XEXP (SET_SRC (pattern), 2) = tmp;
3465 INSN_CODE (next) = -1;
3466
3467 /* Reverse our condition. */
3468 tmp = PATTERN (insn);
3469 PUT_CODE (XEXP (tmp, 1),
3470 (reverse_condition_maybe_unordered
3471 (GET_CODE (XEXP (tmp, 1)))));
3472 }
3473 }
3474 }
3475 }
3476
3477 pass = !pass;
3478
3479 }
3480 \f
3481 /* You may have trouble believing this, but this is the 32 bit HP-PA
3482 stack layout. Wow.
3483
3484 Offset Contents
3485
3486 Variable arguments (optional; any number may be allocated)
3487
3488 SP-(4*(N+9)) arg word N
3489 : :
3490 SP-56 arg word 5
3491 SP-52 arg word 4
3492
3493 Fixed arguments (must be allocated; may remain unused)
3494
3495 SP-48 arg word 3
3496 SP-44 arg word 2
3497 SP-40 arg word 1
3498 SP-36 arg word 0
3499
3500 Frame Marker
3501
3502 SP-32 External Data Pointer (DP)
3503 SP-28 External sr4
3504 SP-24 External/stub RP (RP')
3505 SP-20 Current RP
3506 SP-16 Static Link
3507 SP-12 Clean up
3508 SP-8 Calling Stub RP (RP'')
3509 SP-4 Previous SP
3510
3511 Top of Frame
3512
3513 SP-0 Stack Pointer (points to next available address)
3514
3515 */
3516
3517 /* This function saves registers as follows. Registers marked with ' are
3518 this function's registers (as opposed to the previous function's).
3519 If a frame_pointer isn't needed, r4 is saved as a general register;
3520 the space for the frame pointer is still allocated, though, to keep
3521 things simple.
3522
3523
3524 Top of Frame
3525
3526 SP (FP') Previous FP
3527 SP + 4 Alignment filler (sigh)
3528 SP + 8 Space for locals reserved here.
3529 .
3530 .
3531 .
3532 SP + n All call saved register used.
3533 .
3534 .
3535 .
3536 SP + o All call saved fp registers used.
3537 .
3538 .
3539 .
3540 SP + p (SP') points to next available address.
3541
3542 */
3543
3544 /* Global variables set by output_function_prologue(). */
3545 /* Size of frame. Need to know this to emit return insns from
3546 leaf procedures. */
3547 static HOST_WIDE_INT actual_fsize, local_fsize;
3548 static int save_fregs;
3549
3550 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3551 Handle case where DISP > 8k by using the add_high_const patterns.
3552
3553 Note in DISP > 8k case, we will leave the high part of the address
3554 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3555
3556 static void
3557 store_reg (int reg, HOST_WIDE_INT disp, int base)
3558 {
3559 rtx dest, src, basereg;
3560 rtx_insn *insn;
3561
3562 src = gen_rtx_REG (word_mode, reg);
3563 basereg = gen_rtx_REG (Pmode, base);
3564 if (VAL_14_BITS_P (disp))
3565 {
3566 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3567 insn = emit_move_insn (dest, src);
3568 }
3569 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3570 {
3571 rtx delta = GEN_INT (disp);
3572 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3573
3574 emit_move_insn (tmpreg, delta);
3575 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3576 if (DO_FRAME_NOTES)
3577 {
3578 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3579 gen_rtx_SET (tmpreg,
3580 gen_rtx_PLUS (Pmode, basereg, delta)));
3581 RTX_FRAME_RELATED_P (insn) = 1;
3582 }
3583 dest = gen_rtx_MEM (word_mode, tmpreg);
3584 insn = emit_move_insn (dest, src);
3585 }
3586 else
3587 {
3588 rtx delta = GEN_INT (disp);
3589 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3590 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3591
3592 emit_move_insn (tmpreg, high);
3593 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3594 insn = emit_move_insn (dest, src);
3595 if (DO_FRAME_NOTES)
3596 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3597 gen_rtx_SET (gen_rtx_MEM (word_mode,
3598 gen_rtx_PLUS (word_mode,
3599 basereg,
3600 delta)),
3601 src));
3602 }
3603
3604 if (DO_FRAME_NOTES)
3605 RTX_FRAME_RELATED_P (insn) = 1;
3606 }
3607
3608 /* Emit RTL to store REG at the memory location specified by BASE and then
3609 add MOD to BASE. MOD must be <= 8k. */
3610
3611 static void
3612 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3613 {
3614 rtx basereg, srcreg, delta;
3615 rtx_insn *insn;
3616
3617 gcc_assert (VAL_14_BITS_P (mod));
3618
3619 basereg = gen_rtx_REG (Pmode, base);
3620 srcreg = gen_rtx_REG (word_mode, reg);
3621 delta = GEN_INT (mod);
3622
3623 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3624 if (DO_FRAME_NOTES)
3625 {
3626 RTX_FRAME_RELATED_P (insn) = 1;
3627
3628 /* RTX_FRAME_RELATED_P must be set on each frame related set
3629 in a parallel with more than one element. */
3630 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3631 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3632 }
3633 }
3634
3635 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3636 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3637 whether to add a frame note or not.
3638
3639 In the DISP > 8k case, we leave the high part of the address in %r1.
3640 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3641
3642 static void
3643 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3644 {
3645 rtx_insn *insn;
3646
3647 if (VAL_14_BITS_P (disp))
3648 {
3649 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3650 plus_constant (Pmode,
3651 gen_rtx_REG (Pmode, base), disp));
3652 }
3653 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3654 {
3655 rtx basereg = gen_rtx_REG (Pmode, base);
3656 rtx delta = GEN_INT (disp);
3657 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3658
3659 emit_move_insn (tmpreg, delta);
3660 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3661 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3662 if (DO_FRAME_NOTES)
3663 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3664 gen_rtx_SET (tmpreg,
3665 gen_rtx_PLUS (Pmode, basereg, delta)));
3666 }
3667 else
3668 {
3669 rtx basereg = gen_rtx_REG (Pmode, base);
3670 rtx delta = GEN_INT (disp);
3671 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3672
3673 emit_move_insn (tmpreg,
3674 gen_rtx_PLUS (Pmode, basereg,
3675 gen_rtx_HIGH (Pmode, delta)));
3676 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3677 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3678 }
3679
3680 if (DO_FRAME_NOTES && note)
3681 RTX_FRAME_RELATED_P (insn) = 1;
3682 }
3683
3684 HOST_WIDE_INT
3685 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3686 {
3687 int freg_saved = 0;
3688 int i, j;
3689
3690 /* The code in pa_expand_prologue and pa_expand_epilogue must
3691 be consistent with the rounding and size calculation done here.
3692 Change them at the same time. */
3693
3694 /* We do our own stack alignment. First, round the size of the
3695 stack locals up to a word boundary. */
3696 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3697
3698 /* Space for previous frame pointer + filler. If any frame is
3699 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3700 waste some space here for the sake of HP compatibility. The
3701 first slot is only used when the frame pointer is needed. */
3702 if (size || frame_pointer_needed)
3703 size += STARTING_FRAME_OFFSET;
3704
3705 /* If the current function calls __builtin_eh_return, then we need
3706 to allocate stack space for registers that will hold data for
3707 the exception handler. */
3708 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3709 {
3710 unsigned int i;
3711
3712 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3713 continue;
3714 size += i * UNITS_PER_WORD;
3715 }
3716
3717 /* Account for space used by the callee general register saves. */
3718 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3719 if (df_regs_ever_live_p (i))
3720 size += UNITS_PER_WORD;
3721
3722 /* Account for space used by the callee floating point register saves. */
3723 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3724 if (df_regs_ever_live_p (i)
3725 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3726 {
3727 freg_saved = 1;
3728
3729 /* We always save both halves of the FP register, so always
3730 increment the frame size by 8 bytes. */
3731 size += 8;
3732 }
3733
3734 /* If any of the floating registers are saved, account for the
3735 alignment needed for the floating point register save block. */
3736 if (freg_saved)
3737 {
3738 size = (size + 7) & ~7;
3739 if (fregs_live)
3740 *fregs_live = 1;
3741 }
3742
3743 /* The various ABIs include space for the outgoing parameters in the
3744 size of the current function's stack frame. We don't need to align
3745 for the outgoing arguments as their alignment is set by the final
3746 rounding for the frame as a whole. */
3747 size += crtl->outgoing_args_size;
3748
3749 /* Allocate space for the fixed frame marker. This space must be
3750 allocated for any function that makes calls or allocates
3751 stack space. */
3752 if (!crtl->is_leaf || size)
3753 size += TARGET_64BIT ? 48 : 32;
3754
3755 /* Finally, round to the preferred stack boundary. */
3756 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3757 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3758 }
3759
3760 /* Generate the assembly code for function entry. FILE is a stdio
3761 stream to output the code to. SIZE is an int: how many units of
3762 temporary storage to allocate.
3763
3764 Refer to the array `regs_ever_live' to determine which registers to
3765 save; `regs_ever_live[I]' is nonzero if register number I is ever
3766 used in the function. This function is responsible for knowing
3767 which registers should not be saved even if used. */
3768
3769 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3770 of memory. If any fpu reg is used in the function, we allocate
3771 such a block here, at the bottom of the frame, just in case it's needed.
3772
3773 If this function is a leaf procedure, then we may choose not
3774 to do a "save" insn. The decision about whether or not
3775 to do this is made in regclass.c. */
3776
3777 static void
3778 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3779 {
3780 /* The function's label and associated .PROC must never be
3781 separated and must be output *after* any profiling declarations
3782 to avoid changing spaces/subspaces within a procedure. */
3783 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3784 fputs ("\t.PROC\n", file);
3785
3786 /* pa_expand_prologue does the dirty work now. We just need
3787 to output the assembler directives which denote the start
3788 of a function. */
3789 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3790 if (crtl->is_leaf)
3791 fputs (",NO_CALLS", file);
3792 else
3793 fputs (",CALLS", file);
3794 if (rp_saved)
3795 fputs (",SAVE_RP", file);
3796
3797 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3798 at the beginning of the frame and that it is used as the frame
3799 pointer for the frame. We do this because our current frame
3800 layout doesn't conform to that specified in the HP runtime
3801 documentation and we need a way to indicate to programs such as
3802 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3803 isn't used by HP compilers but is supported by the assembler.
3804 However, SAVE_SP is supposed to indicate that the previous stack
3805 pointer has been saved in the frame marker. */
3806 if (frame_pointer_needed)
3807 fputs (",SAVE_SP", file);
3808
3809 /* Pass on information about the number of callee register saves
3810 performed in the prologue.
3811
3812 The compiler is supposed to pass the highest register number
3813 saved, the assembler then has to adjust that number before
3814 entering it into the unwind descriptor (to account for any
3815 caller saved registers with lower register numbers than the
3816 first callee saved register). */
3817 if (gr_saved)
3818 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3819
3820 if (fr_saved)
3821 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3822
3823 fputs ("\n\t.ENTRY\n", file);
3824
3825 remove_useless_addtr_insns (0);
3826 }
3827
3828 void
3829 pa_expand_prologue (void)
3830 {
3831 int merge_sp_adjust_with_store = 0;
3832 HOST_WIDE_INT size = get_frame_size ();
3833 HOST_WIDE_INT offset;
3834 int i;
3835 rtx tmpreg;
3836 rtx_insn *insn;
3837
3838 gr_saved = 0;
3839 fr_saved = 0;
3840 save_fregs = 0;
3841
3842 /* Compute total size for frame pointer, filler, locals and rounding to
3843 the next word boundary. Similar code appears in pa_compute_frame_size
3844 and must be changed in tandem with this code. */
3845 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3846 if (local_fsize || frame_pointer_needed)
3847 local_fsize += STARTING_FRAME_OFFSET;
3848
3849 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3850 if (flag_stack_usage_info)
3851 current_function_static_stack_size = actual_fsize;
3852
3853 /* Compute a few things we will use often. */
3854 tmpreg = gen_rtx_REG (word_mode, 1);
3855
3856 /* Save RP first. The calling conventions manual states RP will
3857 always be stored into the caller's frame at sp - 20 or sp - 16
3858 depending on which ABI is in use. */
3859 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3860 {
3861 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3862 rp_saved = true;
3863 }
3864 else
3865 rp_saved = false;
3866
3867 /* Allocate the local frame and set up the frame pointer if needed. */
3868 if (actual_fsize != 0)
3869 {
3870 if (frame_pointer_needed)
3871 {
3872 /* Copy the old frame pointer temporarily into %r1. Set up the
3873 new stack pointer, then store away the saved old frame pointer
3874 into the stack at sp and at the same time update the stack
3875 pointer by actual_fsize bytes. Two versions, first
3876 handles small (<8k) frames. The second handles large (>=8k)
3877 frames. */
3878 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3879 if (DO_FRAME_NOTES)
3880 RTX_FRAME_RELATED_P (insn) = 1;
3881
3882 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3883 if (DO_FRAME_NOTES)
3884 RTX_FRAME_RELATED_P (insn) = 1;
3885
3886 if (VAL_14_BITS_P (actual_fsize))
3887 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3888 else
3889 {
3890 /* It is incorrect to store the saved frame pointer at *sp,
3891 then increment sp (writes beyond the current stack boundary).
3892
3893 So instead use stwm to store at *sp and post-increment the
3894 stack pointer as an atomic operation. Then increment sp to
3895 finish allocating the new frame. */
3896 HOST_WIDE_INT adjust1 = 8192 - 64;
3897 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3898
3899 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3900 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3901 adjust2, 1);
3902 }
3903
3904 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3905 we need to store the previous stack pointer (frame pointer)
3906 into the frame marker on targets that use the HP unwind
3907 library. This allows the HP unwind library to be used to
3908 unwind GCC frames. However, we are not fully compatible
3909 with the HP library because our frame layout differs from
3910 that specified in the HP runtime specification.
3911
3912 We don't want a frame note on this instruction as the frame
3913 marker moves during dynamic stack allocation.
3914
3915 This instruction also serves as a blockage to prevent
3916 register spills from being scheduled before the stack
3917 pointer is raised. This is necessary as we store
3918 registers using the frame pointer as a base register,
3919 and the frame pointer is set before sp is raised. */
3920 if (TARGET_HPUX_UNWIND_LIBRARY)
3921 {
3922 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3923 GEN_INT (TARGET_64BIT ? -8 : -4));
3924
3925 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3926 hard_frame_pointer_rtx);
3927 }
3928 else
3929 emit_insn (gen_blockage ());
3930 }
3931 /* no frame pointer needed. */
3932 else
3933 {
3934 /* In some cases we can perform the first callee register save
3935 and allocating the stack frame at the same time. If so, just
3936 make a note of it and defer allocating the frame until saving
3937 the callee registers. */
3938 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3939 merge_sp_adjust_with_store = 1;
3940 /* Can not optimize. Adjust the stack frame by actual_fsize
3941 bytes. */
3942 else
3943 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3944 actual_fsize, 1);
3945 }
3946 }
3947
3948 /* Normal register save.
3949
3950 Do not save the frame pointer in the frame_pointer_needed case. It
3951 was done earlier. */
3952 if (frame_pointer_needed)
3953 {
3954 offset = local_fsize;
3955
3956 /* Saving the EH return data registers in the frame is the simplest
3957 way to get the frame unwind information emitted. We put them
3958 just before the general registers. */
3959 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3960 {
3961 unsigned int i, regno;
3962
3963 for (i = 0; ; ++i)
3964 {
3965 regno = EH_RETURN_DATA_REGNO (i);
3966 if (regno == INVALID_REGNUM)
3967 break;
3968
3969 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
3970 offset += UNITS_PER_WORD;
3971 }
3972 }
3973
3974 for (i = 18; i >= 4; i--)
3975 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3976 {
3977 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
3978 offset += UNITS_PER_WORD;
3979 gr_saved++;
3980 }
3981 /* Account for %r3 which is saved in a special place. */
3982 gr_saved++;
3983 }
3984 /* No frame pointer needed. */
3985 else
3986 {
3987 offset = local_fsize - actual_fsize;
3988
3989 /* Saving the EH return data registers in the frame is the simplest
3990 way to get the frame unwind information emitted. */
3991 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3992 {
3993 unsigned int i, regno;
3994
3995 for (i = 0; ; ++i)
3996 {
3997 regno = EH_RETURN_DATA_REGNO (i);
3998 if (regno == INVALID_REGNUM)
3999 break;
4000
4001 /* If merge_sp_adjust_with_store is nonzero, then we can
4002 optimize the first save. */
4003 if (merge_sp_adjust_with_store)
4004 {
4005 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
4006 merge_sp_adjust_with_store = 0;
4007 }
4008 else
4009 store_reg (regno, offset, STACK_POINTER_REGNUM);
4010 offset += UNITS_PER_WORD;
4011 }
4012 }
4013
4014 for (i = 18; i >= 3; i--)
4015 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4016 {
4017 /* If merge_sp_adjust_with_store is nonzero, then we can
4018 optimize the first GR save. */
4019 if (merge_sp_adjust_with_store)
4020 {
4021 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
4022 merge_sp_adjust_with_store = 0;
4023 }
4024 else
4025 store_reg (i, offset, STACK_POINTER_REGNUM);
4026 offset += UNITS_PER_WORD;
4027 gr_saved++;
4028 }
4029
4030 /* If we wanted to merge the SP adjustment with a GR save, but we never
4031 did any GR saves, then just emit the adjustment here. */
4032 if (merge_sp_adjust_with_store)
4033 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4034 actual_fsize, 1);
4035 }
4036
4037 /* The hppa calling conventions say that %r19, the pic offset
4038 register, is saved at sp - 32 (in this function's frame)
4039 when generating PIC code. FIXME: What is the correct thing
4040 to do for functions which make no calls and allocate no
4041 frame? Do we need to allocate a frame, or can we just omit
4042 the save? For now we'll just omit the save.
4043
4044 We don't want a note on this insn as the frame marker can
4045 move if there is a dynamic stack allocation. */
4046 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4047 {
4048 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4049
4050 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4051
4052 }
4053
4054 /* Align pointer properly (doubleword boundary). */
4055 offset = (offset + 7) & ~7;
4056
4057 /* Floating point register store. */
4058 if (save_fregs)
4059 {
4060 rtx base;
4061
4062 /* First get the frame or stack pointer to the start of the FP register
4063 save area. */
4064 if (frame_pointer_needed)
4065 {
4066 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4067 base = hard_frame_pointer_rtx;
4068 }
4069 else
4070 {
4071 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4072 base = stack_pointer_rtx;
4073 }
4074
4075 /* Now actually save the FP registers. */
4076 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4077 {
4078 if (df_regs_ever_live_p (i)
4079 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4080 {
4081 rtx addr, reg;
4082 rtx_insn *insn;
4083 addr = gen_rtx_MEM (DFmode,
4084 gen_rtx_POST_INC (word_mode, tmpreg));
4085 reg = gen_rtx_REG (DFmode, i);
4086 insn = emit_move_insn (addr, reg);
4087 if (DO_FRAME_NOTES)
4088 {
4089 RTX_FRAME_RELATED_P (insn) = 1;
4090 if (TARGET_64BIT)
4091 {
4092 rtx mem = gen_rtx_MEM (DFmode,
4093 plus_constant (Pmode, base,
4094 offset));
4095 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4096 gen_rtx_SET (mem, reg));
4097 }
4098 else
4099 {
4100 rtx meml = gen_rtx_MEM (SFmode,
4101 plus_constant (Pmode, base,
4102 offset));
4103 rtx memr = gen_rtx_MEM (SFmode,
4104 plus_constant (Pmode, base,
4105 offset + 4));
4106 rtx regl = gen_rtx_REG (SFmode, i);
4107 rtx regr = gen_rtx_REG (SFmode, i + 1);
4108 rtx setl = gen_rtx_SET (meml, regl);
4109 rtx setr = gen_rtx_SET (memr, regr);
4110 rtvec vec;
4111
4112 RTX_FRAME_RELATED_P (setl) = 1;
4113 RTX_FRAME_RELATED_P (setr) = 1;
4114 vec = gen_rtvec (2, setl, setr);
4115 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4116 gen_rtx_SEQUENCE (VOIDmode, vec));
4117 }
4118 }
4119 offset += GET_MODE_SIZE (DFmode);
4120 fr_saved++;
4121 }
4122 }
4123 }
4124 }
4125
4126 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4127 Handle case where DISP > 8k by using the add_high_const patterns. */
4128
4129 static void
4130 load_reg (int reg, HOST_WIDE_INT disp, int base)
4131 {
4132 rtx dest = gen_rtx_REG (word_mode, reg);
4133 rtx basereg = gen_rtx_REG (Pmode, base);
4134 rtx src;
4135
4136 if (VAL_14_BITS_P (disp))
4137 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4138 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4139 {
4140 rtx delta = GEN_INT (disp);
4141 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4142
4143 emit_move_insn (tmpreg, delta);
4144 if (TARGET_DISABLE_INDEXING)
4145 {
4146 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4147 src = gen_rtx_MEM (word_mode, tmpreg);
4148 }
4149 else
4150 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4151 }
4152 else
4153 {
4154 rtx delta = GEN_INT (disp);
4155 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4156 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4157
4158 emit_move_insn (tmpreg, high);
4159 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4160 }
4161
4162 emit_move_insn (dest, src);
4163 }
4164
4165 /* Update the total code bytes output to the text section. */
4166
4167 static void
4168 update_total_code_bytes (unsigned int nbytes)
4169 {
4170 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4171 && !IN_NAMED_SECTION_P (cfun->decl))
4172 {
4173 unsigned int old_total = total_code_bytes;
4174
4175 total_code_bytes += nbytes;
4176
4177 /* Be prepared to handle overflows. */
4178 if (old_total > total_code_bytes)
4179 total_code_bytes = UINT_MAX;
4180 }
4181 }
4182
4183 /* This function generates the assembly code for function exit.
4184 Args are as for output_function_prologue ().
4185
4186 The function epilogue should not depend on the current stack
4187 pointer! It should use the frame pointer only. This is mandatory
4188 because of alloca; we also take advantage of it to omit stack
4189 adjustments before returning. */
4190
4191 static void
4192 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4193 {
4194 rtx_insn *insn = get_last_insn ();
4195 bool extra_nop;
4196
4197 /* pa_expand_epilogue does the dirty work now. We just need
4198 to output the assembler directives which denote the end
4199 of a function.
4200
4201 To make debuggers happy, emit a nop if the epilogue was completely
4202 eliminated due to a volatile call as the last insn in the
4203 current function. That way the return address (in %r2) will
4204 always point to a valid instruction in the current function. */
4205
4206 /* Get the last real insn. */
4207 if (NOTE_P (insn))
4208 insn = prev_real_insn (insn);
4209
4210 /* If it is a sequence, then look inside. */
4211 if (insn && NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
4212 insn = as_a <rtx_sequence *> (PATTERN (insn))-> insn (0);
4213
4214 /* If insn is a CALL_INSN, then it must be a call to a volatile
4215 function (otherwise there would be epilogue insns). */
4216 if (insn && CALL_P (insn))
4217 {
4218 fputs ("\tnop\n", file);
4219 extra_nop = true;
4220 }
4221 else
4222 extra_nop = false;
4223
4224 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4225
4226 if (TARGET_SOM && TARGET_GAS)
4227 {
4228 /* We are done with this subspace except possibly for some additional
4229 debug information. Forget that we are in this subspace to ensure
4230 that the next function is output in its own subspace. */
4231 in_section = NULL;
4232 cfun->machine->in_nsubspa = 2;
4233 }
4234
4235 /* Thunks do their own insn accounting. */
4236 if (cfun->is_thunk)
4237 return;
4238
4239 if (INSN_ADDRESSES_SET_P ())
4240 {
4241 last_address = extra_nop ? 4 : 0;
4242 insn = get_last_nonnote_insn ();
4243 if (insn)
4244 {
4245 last_address += INSN_ADDRESSES (INSN_UID (insn));
4246 if (INSN_P (insn))
4247 last_address += insn_default_length (insn);
4248 }
4249 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4250 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4251 }
4252 else
4253 last_address = UINT_MAX;
4254
4255 /* Finally, update the total number of code bytes output so far. */
4256 update_total_code_bytes (last_address);
4257 }
4258
4259 void
4260 pa_expand_epilogue (void)
4261 {
4262 rtx tmpreg;
4263 HOST_WIDE_INT offset;
4264 HOST_WIDE_INT ret_off = 0;
4265 int i;
4266 int merge_sp_adjust_with_load = 0;
4267
4268 /* We will use this often. */
4269 tmpreg = gen_rtx_REG (word_mode, 1);
4270
4271 /* Try to restore RP early to avoid load/use interlocks when
4272 RP gets used in the return (bv) instruction. This appears to still
4273 be necessary even when we schedule the prologue and epilogue. */
4274 if (rp_saved)
4275 {
4276 ret_off = TARGET_64BIT ? -16 : -20;
4277 if (frame_pointer_needed)
4278 {
4279 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4280 ret_off = 0;
4281 }
4282 else
4283 {
4284 /* No frame pointer, and stack is smaller than 8k. */
4285 if (VAL_14_BITS_P (ret_off - actual_fsize))
4286 {
4287 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4288 ret_off = 0;
4289 }
4290 }
4291 }
4292
4293 /* General register restores. */
4294 if (frame_pointer_needed)
4295 {
4296 offset = local_fsize;
4297
4298 /* If the current function calls __builtin_eh_return, then we need
4299 to restore the saved EH data registers. */
4300 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4301 {
4302 unsigned int i, regno;
4303
4304 for (i = 0; ; ++i)
4305 {
4306 regno = EH_RETURN_DATA_REGNO (i);
4307 if (regno == INVALID_REGNUM)
4308 break;
4309
4310 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4311 offset += UNITS_PER_WORD;
4312 }
4313 }
4314
4315 for (i = 18; i >= 4; i--)
4316 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4317 {
4318 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4319 offset += UNITS_PER_WORD;
4320 }
4321 }
4322 else
4323 {
4324 offset = local_fsize - actual_fsize;
4325
4326 /* If the current function calls __builtin_eh_return, then we need
4327 to restore the saved EH data registers. */
4328 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4329 {
4330 unsigned int i, regno;
4331
4332 for (i = 0; ; ++i)
4333 {
4334 regno = EH_RETURN_DATA_REGNO (i);
4335 if (regno == INVALID_REGNUM)
4336 break;
4337
4338 /* Only for the first load.
4339 merge_sp_adjust_with_load holds the register load
4340 with which we will merge the sp adjustment. */
4341 if (merge_sp_adjust_with_load == 0
4342 && local_fsize == 0
4343 && VAL_14_BITS_P (-actual_fsize))
4344 merge_sp_adjust_with_load = regno;
4345 else
4346 load_reg (regno, offset, STACK_POINTER_REGNUM);
4347 offset += UNITS_PER_WORD;
4348 }
4349 }
4350
4351 for (i = 18; i >= 3; i--)
4352 {
4353 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4354 {
4355 /* Only for the first load.
4356 merge_sp_adjust_with_load holds the register load
4357 with which we will merge the sp adjustment. */
4358 if (merge_sp_adjust_with_load == 0
4359 && local_fsize == 0
4360 && VAL_14_BITS_P (-actual_fsize))
4361 merge_sp_adjust_with_load = i;
4362 else
4363 load_reg (i, offset, STACK_POINTER_REGNUM);
4364 offset += UNITS_PER_WORD;
4365 }
4366 }
4367 }
4368
4369 /* Align pointer properly (doubleword boundary). */
4370 offset = (offset + 7) & ~7;
4371
4372 /* FP register restores. */
4373 if (save_fregs)
4374 {
4375 /* Adjust the register to index off of. */
4376 if (frame_pointer_needed)
4377 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4378 else
4379 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4380
4381 /* Actually do the restores now. */
4382 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4383 if (df_regs_ever_live_p (i)
4384 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4385 {
4386 rtx src = gen_rtx_MEM (DFmode,
4387 gen_rtx_POST_INC (word_mode, tmpreg));
4388 rtx dest = gen_rtx_REG (DFmode, i);
4389 emit_move_insn (dest, src);
4390 }
4391 }
4392
4393 /* Emit a blockage insn here to keep these insns from being moved to
4394 an earlier spot in the epilogue, or into the main instruction stream.
4395
4396 This is necessary as we must not cut the stack back before all the
4397 restores are finished. */
4398 emit_insn (gen_blockage ());
4399
4400 /* Reset stack pointer (and possibly frame pointer). The stack
4401 pointer is initially set to fp + 64 to avoid a race condition. */
4402 if (frame_pointer_needed)
4403 {
4404 rtx delta = GEN_INT (-64);
4405
4406 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4407 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4408 stack_pointer_rtx, delta));
4409 }
4410 /* If we were deferring a callee register restore, do it now. */
4411 else if (merge_sp_adjust_with_load)
4412 {
4413 rtx delta = GEN_INT (-actual_fsize);
4414 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4415
4416 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4417 }
4418 else if (actual_fsize != 0)
4419 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4420 - actual_fsize, 0);
4421
4422 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4423 frame greater than 8k), do so now. */
4424 if (ret_off != 0)
4425 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4426
4427 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4428 {
4429 rtx sa = EH_RETURN_STACKADJ_RTX;
4430
4431 emit_insn (gen_blockage ());
4432 emit_insn (TARGET_64BIT
4433 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4434 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4435 }
4436 }
4437
4438 bool
4439 pa_can_use_return_insn (void)
4440 {
4441 if (!reload_completed)
4442 return false;
4443
4444 if (frame_pointer_needed)
4445 return false;
4446
4447 if (df_regs_ever_live_p (2))
4448 return false;
4449
4450 if (crtl->profile)
4451 return false;
4452
4453 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4454 }
4455
4456 rtx
4457 hppa_pic_save_rtx (void)
4458 {
4459 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4460 }
4461
4462 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4463 #define NO_DEFERRED_PROFILE_COUNTERS 0
4464 #endif
4465
4466
4467 /* Vector of funcdef numbers. */
4468 static vec<int> funcdef_nos;
4469
4470 /* Output deferred profile counters. */
4471 static void
4472 output_deferred_profile_counters (void)
4473 {
4474 unsigned int i;
4475 int align, n;
4476
4477 if (funcdef_nos.is_empty ())
4478 return;
4479
4480 switch_to_section (data_section);
4481 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4482 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4483
4484 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4485 {
4486 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4487 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4488 }
4489
4490 funcdef_nos.release ();
4491 }
4492
4493 void
4494 hppa_profile_hook (int label_no)
4495 {
4496 /* We use SImode for the address of the function in both 32 and
4497 64-bit code to avoid having to provide DImode versions of the
4498 lcla2 and load_offset_label_address insn patterns. */
4499 rtx reg = gen_reg_rtx (SImode);
4500 rtx_code_label *label_rtx = gen_label_rtx ();
4501 rtx begin_label_rtx;
4502 rtx_insn *call_insn;
4503 char begin_label_name[16];
4504
4505 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4506 label_no);
4507 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4508
4509 if (TARGET_64BIT)
4510 emit_move_insn (arg_pointer_rtx,
4511 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4512 GEN_INT (64)));
4513
4514 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4515
4516 /* The address of the function is loaded into %r25 with an instruction-
4517 relative sequence that avoids the use of relocations. The sequence
4518 is split so that the load_offset_label_address instruction can
4519 occupy the delay slot of the call to _mcount. */
4520 if (TARGET_PA_20)
4521 emit_insn (gen_lcla2 (reg, label_rtx));
4522 else
4523 emit_insn (gen_lcla1 (reg, label_rtx));
4524
4525 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4526 reg, begin_label_rtx, label_rtx));
4527
4528 #if !NO_DEFERRED_PROFILE_COUNTERS
4529 {
4530 rtx count_label_rtx, addr, r24;
4531 char count_label_name[16];
4532
4533 funcdef_nos.safe_push (label_no);
4534 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4535 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4536
4537 addr = force_reg (Pmode, count_label_rtx);
4538 r24 = gen_rtx_REG (Pmode, 24);
4539 emit_move_insn (r24, addr);
4540
4541 call_insn =
4542 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4543 gen_rtx_SYMBOL_REF (Pmode,
4544 "_mcount")),
4545 GEN_INT (TARGET_64BIT ? 24 : 12)));
4546
4547 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4548 }
4549 #else
4550
4551 call_insn =
4552 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4553 gen_rtx_SYMBOL_REF (Pmode,
4554 "_mcount")),
4555 GEN_INT (TARGET_64BIT ? 16 : 8)));
4556
4557 #endif
4558
4559 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4560 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4561
4562 /* Indicate the _mcount call cannot throw, nor will it execute a
4563 non-local goto. */
4564 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4565 }
4566
4567 /* Fetch the return address for the frame COUNT steps up from
4568 the current frame, after the prologue. FRAMEADDR is the
4569 frame pointer of the COUNT frame.
4570
4571 We want to ignore any export stub remnants here. To handle this,
4572 we examine the code at the return address, and if it is an export
4573 stub, we return a memory rtx for the stub return address stored
4574 at frame-24.
4575
4576 The value returned is used in two different ways:
4577
4578 1. To find a function's caller.
4579
4580 2. To change the return address for a function.
4581
4582 This function handles most instances of case 1; however, it will
4583 fail if there are two levels of stubs to execute on the return
4584 path. The only way I believe that can happen is if the return value
4585 needs a parameter relocation, which never happens for C code.
4586
4587 This function handles most instances of case 2; however, it will
4588 fail if we did not originally have stub code on the return path
4589 but will need stub code on the new return path. This can happen if
4590 the caller & callee are both in the main program, but the new
4591 return location is in a shared library. */
4592
4593 rtx
4594 pa_return_addr_rtx (int count, rtx frameaddr)
4595 {
4596 rtx label;
4597 rtx rp;
4598 rtx saved_rp;
4599 rtx ins;
4600
4601 /* The instruction stream at the return address of a PA1.X export stub is:
4602
4603 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4604 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4605 0x00011820 | stub+16: mtsp r1,sr0
4606 0xe0400002 | stub+20: be,n 0(sr0,rp)
4607
4608 0xe0400002 must be specified as -532676606 so that it won't be
4609 rejected as an invalid immediate operand on 64-bit hosts.
4610
4611 The instruction stream at the return address of a PA2.0 export stub is:
4612
4613 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4614 0xe840d002 | stub+12: bve,n (rp)
4615 */
4616
4617 HOST_WIDE_INT insns[4];
4618 int i, len;
4619
4620 if (count != 0)
4621 return NULL_RTX;
4622
4623 rp = get_hard_reg_initial_val (Pmode, 2);
4624
4625 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4626 return rp;
4627
4628 /* If there is no export stub then just use the value saved from
4629 the return pointer register. */
4630
4631 saved_rp = gen_reg_rtx (Pmode);
4632 emit_move_insn (saved_rp, rp);
4633
4634 /* Get pointer to the instruction stream. We have to mask out the
4635 privilege level from the two low order bits of the return address
4636 pointer here so that ins will point to the start of the first
4637 instruction that would have been executed if we returned. */
4638 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4639 label = gen_label_rtx ();
4640
4641 if (TARGET_PA_20)
4642 {
4643 insns[0] = 0x4bc23fd1;
4644 insns[1] = -398405630;
4645 len = 2;
4646 }
4647 else
4648 {
4649 insns[0] = 0x4bc23fd1;
4650 insns[1] = 0x004010a1;
4651 insns[2] = 0x00011820;
4652 insns[3] = -532676606;
4653 len = 4;
4654 }
4655
4656 /* Check the instruction stream at the normal return address for the
4657 export stub. If it is an export stub, than our return address is
4658 really in -24[frameaddr]. */
4659
4660 for (i = 0; i < len; i++)
4661 {
4662 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4663 rtx op1 = GEN_INT (insns[i]);
4664 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4665 }
4666
4667 /* Here we know that our return address points to an export
4668 stub. We don't want to return the address of the export stub,
4669 but rather the return address of the export stub. That return
4670 address is stored at -24[frameaddr]. */
4671
4672 emit_move_insn (saved_rp,
4673 gen_rtx_MEM (Pmode,
4674 memory_address (Pmode,
4675 plus_constant (Pmode, frameaddr,
4676 -24))));
4677
4678 emit_label (label);
4679
4680 return saved_rp;
4681 }
4682
4683 void
4684 pa_emit_bcond_fp (rtx operands[])
4685 {
4686 enum rtx_code code = GET_CODE (operands[0]);
4687 rtx operand0 = operands[1];
4688 rtx operand1 = operands[2];
4689 rtx label = operands[3];
4690
4691 emit_insn (gen_rtx_SET (gen_rtx_REG (CCFPmode, 0),
4692 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4693
4694 emit_jump_insn (gen_rtx_SET (pc_rtx,
4695 gen_rtx_IF_THEN_ELSE (VOIDmode,
4696 gen_rtx_fmt_ee (NE,
4697 VOIDmode,
4698 gen_rtx_REG (CCFPmode, 0),
4699 const0_rtx),
4700 gen_rtx_LABEL_REF (VOIDmode, label),
4701 pc_rtx)));
4702
4703 }
4704
4705 /* Adjust the cost of a scheduling dependency. Return the new cost of
4706 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4707
4708 static int
4709 pa_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
4710 {
4711 enum attr_type attr_type;
4712
4713 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4714 true dependencies as they are described with bypasses now. */
4715 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4716 return cost;
4717
4718 if (! recog_memoized (insn))
4719 return 0;
4720
4721 attr_type = get_attr_type (insn);
4722
4723 switch (REG_NOTE_KIND (link))
4724 {
4725 case REG_DEP_ANTI:
4726 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4727 cycles later. */
4728
4729 if (attr_type == TYPE_FPLOAD)
4730 {
4731 rtx pat = PATTERN (insn);
4732 rtx dep_pat = PATTERN (dep_insn);
4733 if (GET_CODE (pat) == PARALLEL)
4734 {
4735 /* This happens for the fldXs,mb patterns. */
4736 pat = XVECEXP (pat, 0, 0);
4737 }
4738 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4739 /* If this happens, we have to extend this to schedule
4740 optimally. Return 0 for now. */
4741 return 0;
4742
4743 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4744 {
4745 if (! recog_memoized (dep_insn))
4746 return 0;
4747 switch (get_attr_type (dep_insn))
4748 {
4749 case TYPE_FPALU:
4750 case TYPE_FPMULSGL:
4751 case TYPE_FPMULDBL:
4752 case TYPE_FPDIVSGL:
4753 case TYPE_FPDIVDBL:
4754 case TYPE_FPSQRTSGL:
4755 case TYPE_FPSQRTDBL:
4756 /* A fpload can't be issued until one cycle before a
4757 preceding arithmetic operation has finished if
4758 the target of the fpload is any of the sources
4759 (or destination) of the arithmetic operation. */
4760 return insn_default_latency (dep_insn) - 1;
4761
4762 default:
4763 return 0;
4764 }
4765 }
4766 }
4767 else if (attr_type == TYPE_FPALU)
4768 {
4769 rtx pat = PATTERN (insn);
4770 rtx dep_pat = PATTERN (dep_insn);
4771 if (GET_CODE (pat) == PARALLEL)
4772 {
4773 /* This happens for the fldXs,mb patterns. */
4774 pat = XVECEXP (pat, 0, 0);
4775 }
4776 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4777 /* If this happens, we have to extend this to schedule
4778 optimally. Return 0 for now. */
4779 return 0;
4780
4781 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4782 {
4783 if (! recog_memoized (dep_insn))
4784 return 0;
4785 switch (get_attr_type (dep_insn))
4786 {
4787 case TYPE_FPDIVSGL:
4788 case TYPE_FPDIVDBL:
4789 case TYPE_FPSQRTSGL:
4790 case TYPE_FPSQRTDBL:
4791 /* An ALU flop can't be issued until two cycles before a
4792 preceding divide or sqrt operation has finished if
4793 the target of the ALU flop is any of the sources
4794 (or destination) of the divide or sqrt operation. */
4795 return insn_default_latency (dep_insn) - 2;
4796
4797 default:
4798 return 0;
4799 }
4800 }
4801 }
4802
4803 /* For other anti dependencies, the cost is 0. */
4804 return 0;
4805
4806 case REG_DEP_OUTPUT:
4807 /* Output dependency; DEP_INSN writes a register that INSN writes some
4808 cycles later. */
4809 if (attr_type == TYPE_FPLOAD)
4810 {
4811 rtx pat = PATTERN (insn);
4812 rtx dep_pat = PATTERN (dep_insn);
4813 if (GET_CODE (pat) == PARALLEL)
4814 {
4815 /* This happens for the fldXs,mb patterns. */
4816 pat = XVECEXP (pat, 0, 0);
4817 }
4818 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4819 /* If this happens, we have to extend this to schedule
4820 optimally. Return 0 for now. */
4821 return 0;
4822
4823 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4824 {
4825 if (! recog_memoized (dep_insn))
4826 return 0;
4827 switch (get_attr_type (dep_insn))
4828 {
4829 case TYPE_FPALU:
4830 case TYPE_FPMULSGL:
4831 case TYPE_FPMULDBL:
4832 case TYPE_FPDIVSGL:
4833 case TYPE_FPDIVDBL:
4834 case TYPE_FPSQRTSGL:
4835 case TYPE_FPSQRTDBL:
4836 /* A fpload can't be issued until one cycle before a
4837 preceding arithmetic operation has finished if
4838 the target of the fpload is the destination of the
4839 arithmetic operation.
4840
4841 Exception: For PA7100LC, PA7200 and PA7300, the cost
4842 is 3 cycles, unless they bundle together. We also
4843 pay the penalty if the second insn is a fpload. */
4844 return insn_default_latency (dep_insn) - 1;
4845
4846 default:
4847 return 0;
4848 }
4849 }
4850 }
4851 else if (attr_type == TYPE_FPALU)
4852 {
4853 rtx pat = PATTERN (insn);
4854 rtx dep_pat = PATTERN (dep_insn);
4855 if (GET_CODE (pat) == PARALLEL)
4856 {
4857 /* This happens for the fldXs,mb patterns. */
4858 pat = XVECEXP (pat, 0, 0);
4859 }
4860 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4861 /* If this happens, we have to extend this to schedule
4862 optimally. Return 0 for now. */
4863 return 0;
4864
4865 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4866 {
4867 if (! recog_memoized (dep_insn))
4868 return 0;
4869 switch (get_attr_type (dep_insn))
4870 {
4871 case TYPE_FPDIVSGL:
4872 case TYPE_FPDIVDBL:
4873 case TYPE_FPSQRTSGL:
4874 case TYPE_FPSQRTDBL:
4875 /* An ALU flop can't be issued until two cycles before a
4876 preceding divide or sqrt operation has finished if
4877 the target of the ALU flop is also the target of
4878 the divide or sqrt operation. */
4879 return insn_default_latency (dep_insn) - 2;
4880
4881 default:
4882 return 0;
4883 }
4884 }
4885 }
4886
4887 /* For other output dependencies, the cost is 0. */
4888 return 0;
4889
4890 default:
4891 gcc_unreachable ();
4892 }
4893 }
4894
4895 /* Adjust scheduling priorities. We use this to try and keep addil
4896 and the next use of %r1 close together. */
4897 static int
4898 pa_adjust_priority (rtx_insn *insn, int priority)
4899 {
4900 rtx set = single_set (insn);
4901 rtx src, dest;
4902 if (set)
4903 {
4904 src = SET_SRC (set);
4905 dest = SET_DEST (set);
4906 if (GET_CODE (src) == LO_SUM
4907 && symbolic_operand (XEXP (src, 1), VOIDmode)
4908 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4909 priority >>= 3;
4910
4911 else if (GET_CODE (src) == MEM
4912 && GET_CODE (XEXP (src, 0)) == LO_SUM
4913 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4914 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4915 priority >>= 1;
4916
4917 else if (GET_CODE (dest) == MEM
4918 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4919 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4920 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4921 priority >>= 3;
4922 }
4923 return priority;
4924 }
4925
4926 /* The 700 can only issue a single insn at a time.
4927 The 7XXX processors can issue two insns at a time.
4928 The 8000 can issue 4 insns at a time. */
4929 static int
4930 pa_issue_rate (void)
4931 {
4932 switch (pa_cpu)
4933 {
4934 case PROCESSOR_700: return 1;
4935 case PROCESSOR_7100: return 2;
4936 case PROCESSOR_7100LC: return 2;
4937 case PROCESSOR_7200: return 2;
4938 case PROCESSOR_7300: return 2;
4939 case PROCESSOR_8000: return 4;
4940
4941 default:
4942 gcc_unreachable ();
4943 }
4944 }
4945
4946
4947
4948 /* Return any length plus adjustment needed by INSN which already has
4949 its length computed as LENGTH. Return LENGTH if no adjustment is
4950 necessary.
4951
4952 Also compute the length of an inline block move here as it is too
4953 complicated to express as a length attribute in pa.md. */
4954 int
4955 pa_adjust_insn_length (rtx_insn *insn, int length)
4956 {
4957 rtx pat = PATTERN (insn);
4958
4959 /* If length is negative or undefined, provide initial length. */
4960 if ((unsigned int) length >= INT_MAX)
4961 {
4962 if (GET_CODE (pat) == SEQUENCE)
4963 insn = as_a <rtx_insn *> (XVECEXP (pat, 0, 0));
4964
4965 switch (get_attr_type (insn))
4966 {
4967 case TYPE_MILLI:
4968 length = pa_attr_length_millicode_call (insn);
4969 break;
4970 case TYPE_CALL:
4971 length = pa_attr_length_call (insn, 0);
4972 break;
4973 case TYPE_SIBCALL:
4974 length = pa_attr_length_call (insn, 1);
4975 break;
4976 case TYPE_DYNCALL:
4977 length = pa_attr_length_indirect_call (insn);
4978 break;
4979 case TYPE_SH_FUNC_ADRS:
4980 length = pa_attr_length_millicode_call (insn) + 20;
4981 break;
4982 default:
4983 gcc_unreachable ();
4984 }
4985 }
4986
4987 /* Block move pattern. */
4988 if (NONJUMP_INSN_P (insn)
4989 && GET_CODE (pat) == PARALLEL
4990 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4991 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4992 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4993 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4994 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4995 length += compute_movmem_length (insn) - 4;
4996 /* Block clear pattern. */
4997 else if (NONJUMP_INSN_P (insn)
4998 && GET_CODE (pat) == PARALLEL
4999 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
5000 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
5001 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
5002 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
5003 length += compute_clrmem_length (insn) - 4;
5004 /* Conditional branch with an unfilled delay slot. */
5005 else if (JUMP_P (insn) && ! simplejump_p (insn))
5006 {
5007 /* Adjust a short backwards conditional with an unfilled delay slot. */
5008 if (GET_CODE (pat) == SET
5009 && length == 4
5010 && JUMP_LABEL (insn) != NULL_RTX
5011 && ! forward_branch_p (insn))
5012 length += 4;
5013 else if (GET_CODE (pat) == PARALLEL
5014 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
5015 && length == 4)
5016 length += 4;
5017 /* Adjust dbra insn with short backwards conditional branch with
5018 unfilled delay slot -- only for case where counter is in a
5019 general register register. */
5020 else if (GET_CODE (pat) == PARALLEL
5021 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
5022 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
5023 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
5024 && length == 4
5025 && ! forward_branch_p (insn))
5026 length += 4;
5027 }
5028 return length;
5029 }
5030
5031 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
5032
5033 static bool
5034 pa_print_operand_punct_valid_p (unsigned char code)
5035 {
5036 if (code == '@'
5037 || code == '#'
5038 || code == '*'
5039 || code == '^')
5040 return true;
5041
5042 return false;
5043 }
5044
5045 /* Print operand X (an rtx) in assembler syntax to file FILE.
5046 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5047 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5048
5049 void
5050 pa_print_operand (FILE *file, rtx x, int code)
5051 {
5052 switch (code)
5053 {
5054 case '#':
5055 /* Output a 'nop' if there's nothing for the delay slot. */
5056 if (dbr_sequence_length () == 0)
5057 fputs ("\n\tnop", file);
5058 return;
5059 case '*':
5060 /* Output a nullification completer if there's nothing for the */
5061 /* delay slot or nullification is requested. */
5062 if (dbr_sequence_length () == 0 ||
5063 (final_sequence &&
5064 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5065 fputs (",n", file);
5066 return;
5067 case 'R':
5068 /* Print out the second register name of a register pair.
5069 I.e., R (6) => 7. */
5070 fputs (reg_names[REGNO (x) + 1], file);
5071 return;
5072 case 'r':
5073 /* A register or zero. */
5074 if (x == const0_rtx
5075 || (x == CONST0_RTX (DFmode))
5076 || (x == CONST0_RTX (SFmode)))
5077 {
5078 fputs ("%r0", file);
5079 return;
5080 }
5081 else
5082 break;
5083 case 'f':
5084 /* A register or zero (floating point). */
5085 if (x == const0_rtx
5086 || (x == CONST0_RTX (DFmode))
5087 || (x == CONST0_RTX (SFmode)))
5088 {
5089 fputs ("%fr0", file);
5090 return;
5091 }
5092 else
5093 break;
5094 case 'A':
5095 {
5096 rtx xoperands[2];
5097
5098 xoperands[0] = XEXP (XEXP (x, 0), 0);
5099 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5100 pa_output_global_address (file, xoperands[1], 0);
5101 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5102 return;
5103 }
5104
5105 case 'C': /* Plain (C)ondition */
5106 case 'X':
5107 switch (GET_CODE (x))
5108 {
5109 case EQ:
5110 fputs ("=", file); break;
5111 case NE:
5112 fputs ("<>", file); break;
5113 case GT:
5114 fputs (">", file); break;
5115 case GE:
5116 fputs (">=", file); break;
5117 case GEU:
5118 fputs (">>=", file); break;
5119 case GTU:
5120 fputs (">>", file); break;
5121 case LT:
5122 fputs ("<", file); break;
5123 case LE:
5124 fputs ("<=", file); break;
5125 case LEU:
5126 fputs ("<<=", file); break;
5127 case LTU:
5128 fputs ("<<", file); break;
5129 default:
5130 gcc_unreachable ();
5131 }
5132 return;
5133 case 'N': /* Condition, (N)egated */
5134 switch (GET_CODE (x))
5135 {
5136 case EQ:
5137 fputs ("<>", file); break;
5138 case NE:
5139 fputs ("=", file); break;
5140 case GT:
5141 fputs ("<=", file); break;
5142 case GE:
5143 fputs ("<", file); break;
5144 case GEU:
5145 fputs ("<<", file); break;
5146 case GTU:
5147 fputs ("<<=", file); break;
5148 case LT:
5149 fputs (">=", file); break;
5150 case LE:
5151 fputs (">", file); break;
5152 case LEU:
5153 fputs (">>", file); break;
5154 case LTU:
5155 fputs (">>=", file); break;
5156 default:
5157 gcc_unreachable ();
5158 }
5159 return;
5160 /* For floating point comparisons. Note that the output
5161 predicates are the complement of the desired mode. The
5162 conditions for GT, GE, LT, LE and LTGT cause an invalid
5163 operation exception if the result is unordered and this
5164 exception is enabled in the floating-point status register. */
5165 case 'Y':
5166 switch (GET_CODE (x))
5167 {
5168 case EQ:
5169 fputs ("!=", file); break;
5170 case NE:
5171 fputs ("=", file); break;
5172 case GT:
5173 fputs ("!>", file); break;
5174 case GE:
5175 fputs ("!>=", file); break;
5176 case LT:
5177 fputs ("!<", file); break;
5178 case LE:
5179 fputs ("!<=", file); break;
5180 case LTGT:
5181 fputs ("!<>", file); break;
5182 case UNLE:
5183 fputs ("!?<=", file); break;
5184 case UNLT:
5185 fputs ("!?<", file); break;
5186 case UNGE:
5187 fputs ("!?>=", file); break;
5188 case UNGT:
5189 fputs ("!?>", file); break;
5190 case UNEQ:
5191 fputs ("!?=", file); break;
5192 case UNORDERED:
5193 fputs ("!?", file); break;
5194 case ORDERED:
5195 fputs ("?", file); break;
5196 default:
5197 gcc_unreachable ();
5198 }
5199 return;
5200 case 'S': /* Condition, operands are (S)wapped. */
5201 switch (GET_CODE (x))
5202 {
5203 case EQ:
5204 fputs ("=", file); break;
5205 case NE:
5206 fputs ("<>", file); break;
5207 case GT:
5208 fputs ("<", file); break;
5209 case GE:
5210 fputs ("<=", file); break;
5211 case GEU:
5212 fputs ("<<=", file); break;
5213 case GTU:
5214 fputs ("<<", file); break;
5215 case LT:
5216 fputs (">", file); break;
5217 case LE:
5218 fputs (">=", file); break;
5219 case LEU:
5220 fputs (">>=", file); break;
5221 case LTU:
5222 fputs (">>", file); break;
5223 default:
5224 gcc_unreachable ();
5225 }
5226 return;
5227 case 'B': /* Condition, (B)oth swapped and negate. */
5228 switch (GET_CODE (x))
5229 {
5230 case EQ:
5231 fputs ("<>", file); break;
5232 case NE:
5233 fputs ("=", file); break;
5234 case GT:
5235 fputs (">=", file); break;
5236 case GE:
5237 fputs (">", file); break;
5238 case GEU:
5239 fputs (">>", file); break;
5240 case GTU:
5241 fputs (">>=", file); break;
5242 case LT:
5243 fputs ("<=", file); break;
5244 case LE:
5245 fputs ("<", file); break;
5246 case LEU:
5247 fputs ("<<", file); break;
5248 case LTU:
5249 fputs ("<<=", file); break;
5250 default:
5251 gcc_unreachable ();
5252 }
5253 return;
5254 case 'k':
5255 gcc_assert (GET_CODE (x) == CONST_INT);
5256 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5257 return;
5258 case 'Q':
5259 gcc_assert (GET_CODE (x) == CONST_INT);
5260 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5261 return;
5262 case 'L':
5263 gcc_assert (GET_CODE (x) == CONST_INT);
5264 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5265 return;
5266 case 'o':
5267 gcc_assert (GET_CODE (x) == CONST_INT
5268 && (INTVAL (x) == 1 || INTVAL (x) == 2 || INTVAL (x) == 3));
5269 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5270 return;
5271 case 'O':
5272 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5273 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5274 return;
5275 case 'p':
5276 gcc_assert (GET_CODE (x) == CONST_INT);
5277 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5278 return;
5279 case 'P':
5280 gcc_assert (GET_CODE (x) == CONST_INT);
5281 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5282 return;
5283 case 'I':
5284 if (GET_CODE (x) == CONST_INT)
5285 fputs ("i", file);
5286 return;
5287 case 'M':
5288 case 'F':
5289 switch (GET_CODE (XEXP (x, 0)))
5290 {
5291 case PRE_DEC:
5292 case PRE_INC:
5293 if (ASSEMBLER_DIALECT == 0)
5294 fputs ("s,mb", file);
5295 else
5296 fputs (",mb", file);
5297 break;
5298 case POST_DEC:
5299 case POST_INC:
5300 if (ASSEMBLER_DIALECT == 0)
5301 fputs ("s,ma", file);
5302 else
5303 fputs (",ma", file);
5304 break;
5305 case PLUS:
5306 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5307 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5308 {
5309 if (ASSEMBLER_DIALECT == 0)
5310 fputs ("x", file);
5311 }
5312 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5313 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5314 {
5315 if (ASSEMBLER_DIALECT == 0)
5316 fputs ("x,s", file);
5317 else
5318 fputs (",s", file);
5319 }
5320 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5321 fputs ("s", file);
5322 break;
5323 default:
5324 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5325 fputs ("s", file);
5326 break;
5327 }
5328 return;
5329 case 'G':
5330 pa_output_global_address (file, x, 0);
5331 return;
5332 case 'H':
5333 pa_output_global_address (file, x, 1);
5334 return;
5335 case 0: /* Don't do anything special */
5336 break;
5337 case 'Z':
5338 {
5339 unsigned op[3];
5340 compute_zdepwi_operands (INTVAL (x), op);
5341 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5342 return;
5343 }
5344 case 'z':
5345 {
5346 unsigned op[3];
5347 compute_zdepdi_operands (INTVAL (x), op);
5348 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5349 return;
5350 }
5351 case 'c':
5352 /* We can get here from a .vtable_inherit due to our
5353 CONSTANT_ADDRESS_P rejecting perfectly good constant
5354 addresses. */
5355 break;
5356 default:
5357 gcc_unreachable ();
5358 }
5359 if (GET_CODE (x) == REG)
5360 {
5361 fputs (reg_names [REGNO (x)], file);
5362 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5363 {
5364 fputs ("R", file);
5365 return;
5366 }
5367 if (FP_REG_P (x)
5368 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5369 && (REGNO (x) & 1) == 0)
5370 fputs ("L", file);
5371 }
5372 else if (GET_CODE (x) == MEM)
5373 {
5374 int size = GET_MODE_SIZE (GET_MODE (x));
5375 rtx base = NULL_RTX;
5376 switch (GET_CODE (XEXP (x, 0)))
5377 {
5378 case PRE_DEC:
5379 case POST_DEC:
5380 base = XEXP (XEXP (x, 0), 0);
5381 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5382 break;
5383 case PRE_INC:
5384 case POST_INC:
5385 base = XEXP (XEXP (x, 0), 0);
5386 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5387 break;
5388 case PLUS:
5389 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5390 fprintf (file, "%s(%s)",
5391 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5392 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5393 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5394 fprintf (file, "%s(%s)",
5395 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5396 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5397 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5398 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5399 {
5400 /* Because the REG_POINTER flag can get lost during reload,
5401 pa_legitimate_address_p canonicalizes the order of the
5402 index and base registers in the combined move patterns. */
5403 rtx base = XEXP (XEXP (x, 0), 1);
5404 rtx index = XEXP (XEXP (x, 0), 0);
5405
5406 fprintf (file, "%s(%s)",
5407 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5408 }
5409 else
5410 output_address (GET_MODE (x), XEXP (x, 0));
5411 break;
5412 default:
5413 output_address (GET_MODE (x), XEXP (x, 0));
5414 break;
5415 }
5416 }
5417 else
5418 output_addr_const (file, x);
5419 }
5420
5421 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5422
5423 void
5424 pa_output_global_address (FILE *file, rtx x, int round_constant)
5425 {
5426
5427 /* Imagine (high (const (plus ...))). */
5428 if (GET_CODE (x) == HIGH)
5429 x = XEXP (x, 0);
5430
5431 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5432 output_addr_const (file, x);
5433 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5434 {
5435 output_addr_const (file, x);
5436 fputs ("-$global$", file);
5437 }
5438 else if (GET_CODE (x) == CONST)
5439 {
5440 const char *sep = "";
5441 int offset = 0; /* assembler wants -$global$ at end */
5442 rtx base = NULL_RTX;
5443
5444 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5445 {
5446 case LABEL_REF:
5447 case SYMBOL_REF:
5448 base = XEXP (XEXP (x, 0), 0);
5449 output_addr_const (file, base);
5450 break;
5451 case CONST_INT:
5452 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5453 break;
5454 default:
5455 gcc_unreachable ();
5456 }
5457
5458 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5459 {
5460 case LABEL_REF:
5461 case SYMBOL_REF:
5462 base = XEXP (XEXP (x, 0), 1);
5463 output_addr_const (file, base);
5464 break;
5465 case CONST_INT:
5466 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5467 break;
5468 default:
5469 gcc_unreachable ();
5470 }
5471
5472 /* How bogus. The compiler is apparently responsible for
5473 rounding the constant if it uses an LR field selector.
5474
5475 The linker and/or assembler seem a better place since
5476 they have to do this kind of thing already.
5477
5478 If we fail to do this, HP's optimizing linker may eliminate
5479 an addil, but not update the ldw/stw/ldo instruction that
5480 uses the result of the addil. */
5481 if (round_constant)
5482 offset = ((offset + 0x1000) & ~0x1fff);
5483
5484 switch (GET_CODE (XEXP (x, 0)))
5485 {
5486 case PLUS:
5487 if (offset < 0)
5488 {
5489 offset = -offset;
5490 sep = "-";
5491 }
5492 else
5493 sep = "+";
5494 break;
5495
5496 case MINUS:
5497 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5498 sep = "-";
5499 break;
5500
5501 default:
5502 gcc_unreachable ();
5503 }
5504
5505 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5506 fputs ("-$global$", file);
5507 if (offset)
5508 fprintf (file, "%s%d", sep, offset);
5509 }
5510 else
5511 output_addr_const (file, x);
5512 }
5513
5514 /* Output boilerplate text to appear at the beginning of the file.
5515 There are several possible versions. */
5516 #define aputs(x) fputs(x, asm_out_file)
5517 static inline void
5518 pa_file_start_level (void)
5519 {
5520 if (TARGET_64BIT)
5521 aputs ("\t.LEVEL 2.0w\n");
5522 else if (TARGET_PA_20)
5523 aputs ("\t.LEVEL 2.0\n");
5524 else if (TARGET_PA_11)
5525 aputs ("\t.LEVEL 1.1\n");
5526 else
5527 aputs ("\t.LEVEL 1.0\n");
5528 }
5529
5530 static inline void
5531 pa_file_start_space (int sortspace)
5532 {
5533 aputs ("\t.SPACE $PRIVATE$");
5534 if (sortspace)
5535 aputs (",SORT=16");
5536 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5537 if (flag_tm)
5538 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5539 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5540 "\n\t.SPACE $TEXT$");
5541 if (sortspace)
5542 aputs (",SORT=8");
5543 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5544 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5545 }
5546
5547 static inline void
5548 pa_file_start_file (int want_version)
5549 {
5550 if (write_symbols != NO_DEBUG)
5551 {
5552 output_file_directive (asm_out_file, main_input_filename);
5553 if (want_version)
5554 aputs ("\t.version\t\"01.01\"\n");
5555 }
5556 }
5557
5558 static inline void
5559 pa_file_start_mcount (const char *aswhat)
5560 {
5561 if (profile_flag)
5562 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5563 }
5564
5565 static void
5566 pa_elf_file_start (void)
5567 {
5568 pa_file_start_level ();
5569 pa_file_start_mcount ("ENTRY");
5570 pa_file_start_file (0);
5571 }
5572
5573 static void
5574 pa_som_file_start (void)
5575 {
5576 pa_file_start_level ();
5577 pa_file_start_space (0);
5578 aputs ("\t.IMPORT $global$,DATA\n"
5579 "\t.IMPORT $$dyncall,MILLICODE\n");
5580 pa_file_start_mcount ("CODE");
5581 pa_file_start_file (0);
5582 }
5583
5584 static void
5585 pa_linux_file_start (void)
5586 {
5587 pa_file_start_file (1);
5588 pa_file_start_level ();
5589 pa_file_start_mcount ("CODE");
5590 }
5591
5592 static void
5593 pa_hpux64_gas_file_start (void)
5594 {
5595 pa_file_start_level ();
5596 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5597 if (profile_flag)
5598 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5599 #endif
5600 pa_file_start_file (1);
5601 }
5602
5603 static void
5604 pa_hpux64_hpas_file_start (void)
5605 {
5606 pa_file_start_level ();
5607 pa_file_start_space (1);
5608 pa_file_start_mcount ("CODE");
5609 pa_file_start_file (0);
5610 }
5611 #undef aputs
5612
5613 /* Search the deferred plabel list for SYMBOL and return its internal
5614 label. If an entry for SYMBOL is not found, a new entry is created. */
5615
5616 rtx
5617 pa_get_deferred_plabel (rtx symbol)
5618 {
5619 const char *fname = XSTR (symbol, 0);
5620 size_t i;
5621
5622 /* See if we have already put this function on the list of deferred
5623 plabels. This list is generally small, so a liner search is not
5624 too ugly. If it proves too slow replace it with something faster. */
5625 for (i = 0; i < n_deferred_plabels; i++)
5626 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5627 break;
5628
5629 /* If the deferred plabel list is empty, or this entry was not found
5630 on the list, create a new entry on the list. */
5631 if (deferred_plabels == NULL || i == n_deferred_plabels)
5632 {
5633 tree id;
5634
5635 if (deferred_plabels == 0)
5636 deferred_plabels = ggc_alloc<deferred_plabel> ();
5637 else
5638 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5639 deferred_plabels,
5640 n_deferred_plabels + 1);
5641
5642 i = n_deferred_plabels++;
5643 deferred_plabels[i].internal_label = gen_label_rtx ();
5644 deferred_plabels[i].symbol = symbol;
5645
5646 /* Gross. We have just implicitly taken the address of this
5647 function. Mark it in the same manner as assemble_name. */
5648 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5649 if (id)
5650 mark_referenced (id);
5651 }
5652
5653 return deferred_plabels[i].internal_label;
5654 }
5655
5656 static void
5657 output_deferred_plabels (void)
5658 {
5659 size_t i;
5660
5661 /* If we have some deferred plabels, then we need to switch into the
5662 data or readonly data section, and align it to a 4 byte boundary
5663 before outputting the deferred plabels. */
5664 if (n_deferred_plabels)
5665 {
5666 switch_to_section (flag_pic ? data_section : readonly_data_section);
5667 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5668 }
5669
5670 /* Now output the deferred plabels. */
5671 for (i = 0; i < n_deferred_plabels; i++)
5672 {
5673 targetm.asm_out.internal_label (asm_out_file, "L",
5674 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5675 assemble_integer (deferred_plabels[i].symbol,
5676 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5677 }
5678 }
5679
5680 /* Initialize optabs to point to emulation routines. */
5681
5682 static void
5683 pa_init_libfuncs (void)
5684 {
5685 if (HPUX_LONG_DOUBLE_LIBRARY)
5686 {
5687 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5688 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5689 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5690 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5691 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5692 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5693 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5694 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5695 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5696
5697 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5698 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5699 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5700 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5701 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5702 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5703 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5704
5705 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5706 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5707 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5708 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5709
5710 set_conv_libfunc (sfix_optab, SImode, TFmode,
5711 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5712 : "_U_Qfcnvfxt_quad_to_sgl");
5713 set_conv_libfunc (sfix_optab, DImode, TFmode,
5714 "_U_Qfcnvfxt_quad_to_dbl");
5715 set_conv_libfunc (ufix_optab, SImode, TFmode,
5716 "_U_Qfcnvfxt_quad_to_usgl");
5717 set_conv_libfunc (ufix_optab, DImode, TFmode,
5718 "_U_Qfcnvfxt_quad_to_udbl");
5719
5720 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5721 "_U_Qfcnvxf_sgl_to_quad");
5722 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5723 "_U_Qfcnvxf_dbl_to_quad");
5724 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5725 "_U_Qfcnvxf_usgl_to_quad");
5726 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5727 "_U_Qfcnvxf_udbl_to_quad");
5728 }
5729
5730 if (TARGET_SYNC_LIBCALL)
5731 init_sync_libfuncs (8);
5732 }
5733
5734 /* HP's millicode routines mean something special to the assembler.
5735 Keep track of which ones we have used. */
5736
5737 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5738 static void import_milli (enum millicodes);
5739 static char imported[(int) end1000];
5740 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5741 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5742 #define MILLI_START 10
5743
5744 static void
5745 import_milli (enum millicodes code)
5746 {
5747 char str[sizeof (import_string)];
5748
5749 if (!imported[(int) code])
5750 {
5751 imported[(int) code] = 1;
5752 strcpy (str, import_string);
5753 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5754 output_asm_insn (str, 0);
5755 }
5756 }
5757
5758 /* The register constraints have put the operands and return value in
5759 the proper registers. */
5760
5761 const char *
5762 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
5763 {
5764 import_milli (mulI);
5765 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5766 }
5767
5768 /* Emit the rtl for doing a division by a constant. */
5769
5770 /* Do magic division millicodes exist for this value? */
5771 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5772
5773 /* We'll use an array to keep track of the magic millicodes and
5774 whether or not we've used them already. [n][0] is signed, [n][1] is
5775 unsigned. */
5776
5777 static int div_milli[16][2];
5778
5779 int
5780 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5781 {
5782 if (GET_CODE (operands[2]) == CONST_INT
5783 && INTVAL (operands[2]) > 0
5784 && INTVAL (operands[2]) < 16
5785 && pa_magic_milli[INTVAL (operands[2])])
5786 {
5787 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5788
5789 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5790 emit
5791 (gen_rtx_PARALLEL
5792 (VOIDmode,
5793 gen_rtvec (6, gen_rtx_SET (gen_rtx_REG (SImode, 29),
5794 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5795 SImode,
5796 gen_rtx_REG (SImode, 26),
5797 operands[2])),
5798 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5799 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5800 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5801 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5802 gen_rtx_CLOBBER (VOIDmode, ret))));
5803 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5804 return 1;
5805 }
5806 return 0;
5807 }
5808
5809 const char *
5810 pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
5811 {
5812 int divisor;
5813
5814 /* If the divisor is a constant, try to use one of the special
5815 opcodes .*/
5816 if (GET_CODE (operands[0]) == CONST_INT)
5817 {
5818 static char buf[100];
5819 divisor = INTVAL (operands[0]);
5820 if (!div_milli[divisor][unsignedp])
5821 {
5822 div_milli[divisor][unsignedp] = 1;
5823 if (unsignedp)
5824 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5825 else
5826 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5827 }
5828 if (unsignedp)
5829 {
5830 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5831 INTVAL (operands[0]));
5832 return pa_output_millicode_call (insn,
5833 gen_rtx_SYMBOL_REF (SImode, buf));
5834 }
5835 else
5836 {
5837 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5838 INTVAL (operands[0]));
5839 return pa_output_millicode_call (insn,
5840 gen_rtx_SYMBOL_REF (SImode, buf));
5841 }
5842 }
5843 /* Divisor isn't a special constant. */
5844 else
5845 {
5846 if (unsignedp)
5847 {
5848 import_milli (divU);
5849 return pa_output_millicode_call (insn,
5850 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5851 }
5852 else
5853 {
5854 import_milli (divI);
5855 return pa_output_millicode_call (insn,
5856 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5857 }
5858 }
5859 }
5860
5861 /* Output a $$rem millicode to do mod. */
5862
5863 const char *
5864 pa_output_mod_insn (int unsignedp, rtx_insn *insn)
5865 {
5866 if (unsignedp)
5867 {
5868 import_milli (remU);
5869 return pa_output_millicode_call (insn,
5870 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5871 }
5872 else
5873 {
5874 import_milli (remI);
5875 return pa_output_millicode_call (insn,
5876 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5877 }
5878 }
5879
5880 void
5881 pa_output_arg_descriptor (rtx_insn *call_insn)
5882 {
5883 const char *arg_regs[4];
5884 machine_mode arg_mode;
5885 rtx link;
5886 int i, output_flag = 0;
5887 int regno;
5888
5889 /* We neither need nor want argument location descriptors for the
5890 64bit runtime environment or the ELF32 environment. */
5891 if (TARGET_64BIT || TARGET_ELF32)
5892 return;
5893
5894 for (i = 0; i < 4; i++)
5895 arg_regs[i] = 0;
5896
5897 /* Specify explicitly that no argument relocations should take place
5898 if using the portable runtime calling conventions. */
5899 if (TARGET_PORTABLE_RUNTIME)
5900 {
5901 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5902 asm_out_file);
5903 return;
5904 }
5905
5906 gcc_assert (CALL_P (call_insn));
5907 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5908 link; link = XEXP (link, 1))
5909 {
5910 rtx use = XEXP (link, 0);
5911
5912 if (! (GET_CODE (use) == USE
5913 && GET_CODE (XEXP (use, 0)) == REG
5914 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5915 continue;
5916
5917 arg_mode = GET_MODE (XEXP (use, 0));
5918 regno = REGNO (XEXP (use, 0));
5919 if (regno >= 23 && regno <= 26)
5920 {
5921 arg_regs[26 - regno] = "GR";
5922 if (arg_mode == DImode)
5923 arg_regs[25 - regno] = "GR";
5924 }
5925 else if (regno >= 32 && regno <= 39)
5926 {
5927 if (arg_mode == SFmode)
5928 arg_regs[(regno - 32) / 2] = "FR";
5929 else
5930 {
5931 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5932 arg_regs[(regno - 34) / 2] = "FR";
5933 arg_regs[(regno - 34) / 2 + 1] = "FU";
5934 #else
5935 arg_regs[(regno - 34) / 2] = "FU";
5936 arg_regs[(regno - 34) / 2 + 1] = "FR";
5937 #endif
5938 }
5939 }
5940 }
5941 fputs ("\t.CALL ", asm_out_file);
5942 for (i = 0; i < 4; i++)
5943 {
5944 if (arg_regs[i])
5945 {
5946 if (output_flag++)
5947 fputc (',', asm_out_file);
5948 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5949 }
5950 }
5951 fputc ('\n', asm_out_file);
5952 }
5953 \f
5954 /* Inform reload about cases where moving X with a mode MODE to or from
5955 a register in RCLASS requires an extra scratch or immediate register.
5956 Return the class needed for the immediate register. */
5957
5958 static reg_class_t
5959 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
5960 machine_mode mode, secondary_reload_info *sri)
5961 {
5962 int regno;
5963 enum reg_class rclass = (enum reg_class) rclass_i;
5964
5965 /* Handle the easy stuff first. */
5966 if (rclass == R1_REGS)
5967 return NO_REGS;
5968
5969 if (REG_P (x))
5970 {
5971 regno = REGNO (x);
5972 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5973 return NO_REGS;
5974 }
5975 else
5976 regno = -1;
5977
5978 /* If we have something like (mem (mem (...)), we can safely assume the
5979 inner MEM will end up in a general register after reloading, so there's
5980 no need for a secondary reload. */
5981 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5982 return NO_REGS;
5983
5984 /* Trying to load a constant into a FP register during PIC code
5985 generation requires %r1 as a scratch register. For float modes,
5986 the only legitimate constant is CONST0_RTX. However, there are
5987 a few patterns that accept constant double operands. */
5988 if (flag_pic
5989 && FP_REG_CLASS_P (rclass)
5990 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5991 {
5992 switch (mode)
5993 {
5994 case SImode:
5995 sri->icode = CODE_FOR_reload_insi_r1;
5996 break;
5997
5998 case DImode:
5999 sri->icode = CODE_FOR_reload_indi_r1;
6000 break;
6001
6002 case SFmode:
6003 sri->icode = CODE_FOR_reload_insf_r1;
6004 break;
6005
6006 case DFmode:
6007 sri->icode = CODE_FOR_reload_indf_r1;
6008 break;
6009
6010 default:
6011 gcc_unreachable ();
6012 }
6013 return NO_REGS;
6014 }
6015
6016 /* Secondary reloads of symbolic expressions require %r1 as a scratch
6017 register when we're generating PIC code or when the operand isn't
6018 readonly. */
6019 if (pa_symbolic_expression_p (x))
6020 {
6021 if (GET_CODE (x) == HIGH)
6022 x = XEXP (x, 0);
6023
6024 if (flag_pic || !read_only_operand (x, VOIDmode))
6025 {
6026 switch (mode)
6027 {
6028 case SImode:
6029 sri->icode = CODE_FOR_reload_insi_r1;
6030 break;
6031
6032 case DImode:
6033 sri->icode = CODE_FOR_reload_indi_r1;
6034 break;
6035
6036 default:
6037 gcc_unreachable ();
6038 }
6039 return NO_REGS;
6040 }
6041 }
6042
6043 /* Profiling showed the PA port spends about 1.3% of its compilation
6044 time in true_regnum from calls inside pa_secondary_reload_class. */
6045 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
6046 regno = true_regnum (x);
6047
6048 /* Handle reloads for floating point loads and stores. */
6049 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6050 && FP_REG_CLASS_P (rclass))
6051 {
6052 if (MEM_P (x))
6053 {
6054 x = XEXP (x, 0);
6055
6056 /* We don't need a secondary reload for indexed memory addresses.
6057
6058 When INT14_OK_STRICT is true, it might appear that we could
6059 directly allow register indirect memory addresses. However,
6060 this doesn't work because we don't support SUBREGs in
6061 floating-point register copies and reload doesn't tell us
6062 when it's going to use a SUBREG. */
6063 if (IS_INDEX_ADDR_P (x))
6064 return NO_REGS;
6065 }
6066
6067 /* Request a secondary reload with a general scratch register
6068 for everything else. ??? Could symbolic operands be handled
6069 directly when generating non-pic PA 2.0 code? */
6070 sri->icode = (in_p
6071 ? direct_optab_handler (reload_in_optab, mode)
6072 : direct_optab_handler (reload_out_optab, mode));
6073 return NO_REGS;
6074 }
6075
6076 /* A SAR<->FP register copy requires an intermediate general register
6077 and secondary memory. We need a secondary reload with a general
6078 scratch register for spills. */
6079 if (rclass == SHIFT_REGS)
6080 {
6081 /* Handle spill. */
6082 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6083 {
6084 sri->icode = (in_p
6085 ? direct_optab_handler (reload_in_optab, mode)
6086 : direct_optab_handler (reload_out_optab, mode));
6087 return NO_REGS;
6088 }
6089
6090 /* Handle FP copy. */
6091 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6092 return GENERAL_REGS;
6093 }
6094
6095 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6096 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6097 && FP_REG_CLASS_P (rclass))
6098 return GENERAL_REGS;
6099
6100 return NO_REGS;
6101 }
6102
6103 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6104 is only marked as live on entry by df-scan when it is a fixed
6105 register. It isn't a fixed register in the 64-bit runtime,
6106 so we need to mark it here. */
6107
6108 static void
6109 pa_extra_live_on_entry (bitmap regs)
6110 {
6111 if (TARGET_64BIT)
6112 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6113 }
6114
6115 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6116 to prevent it from being deleted. */
6117
6118 rtx
6119 pa_eh_return_handler_rtx (void)
6120 {
6121 rtx tmp;
6122
6123 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6124 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6125 tmp = gen_rtx_MEM (word_mode, tmp);
6126 tmp->volatil = 1;
6127 return tmp;
6128 }
6129
6130 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6131 by invisible reference. As a GCC extension, we also pass anything
6132 with a zero or variable size by reference.
6133
6134 The 64-bit runtime does not describe passing any types by invisible
6135 reference. The internals of GCC can't currently handle passing
6136 empty structures, and zero or variable length arrays when they are
6137 not passed entirely on the stack or by reference. Thus, as a GCC
6138 extension, we pass these types by reference. The HP compiler doesn't
6139 support these types, so hopefully there shouldn't be any compatibility
6140 issues. This may have to be revisited when HP releases a C99 compiler
6141 or updates the ABI. */
6142
6143 static bool
6144 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6145 machine_mode mode, const_tree type,
6146 bool named ATTRIBUTE_UNUSED)
6147 {
6148 HOST_WIDE_INT size;
6149
6150 if (type)
6151 size = int_size_in_bytes (type);
6152 else
6153 size = GET_MODE_SIZE (mode);
6154
6155 if (TARGET_64BIT)
6156 return size <= 0;
6157 else
6158 return size <= 0 || size > 8;
6159 }
6160
6161 enum direction
6162 pa_function_arg_padding (machine_mode mode, const_tree type)
6163 {
6164 if (mode == BLKmode
6165 || (TARGET_64BIT
6166 && type
6167 && (AGGREGATE_TYPE_P (type)
6168 || TREE_CODE (type) == COMPLEX_TYPE
6169 || TREE_CODE (type) == VECTOR_TYPE)))
6170 {
6171 /* Return none if justification is not required. */
6172 if (type
6173 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6174 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6175 return none;
6176
6177 /* The directions set here are ignored when a BLKmode argument larger
6178 than a word is placed in a register. Different code is used for
6179 the stack and registers. This makes it difficult to have a
6180 consistent data representation for both the stack and registers.
6181 For both runtimes, the justification and padding for arguments on
6182 the stack and in registers should be identical. */
6183 if (TARGET_64BIT)
6184 /* The 64-bit runtime specifies left justification for aggregates. */
6185 return upward;
6186 else
6187 /* The 32-bit runtime architecture specifies right justification.
6188 When the argument is passed on the stack, the argument is padded
6189 with garbage on the left. The HP compiler pads with zeros. */
6190 return downward;
6191 }
6192
6193 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6194 return downward;
6195 else
6196 return none;
6197 }
6198
6199 \f
6200 /* Do what is necessary for `va_start'. We look at the current function
6201 to determine if stdargs or varargs is used and fill in an initial
6202 va_list. A pointer to this constructor is returned. */
6203
6204 static rtx
6205 hppa_builtin_saveregs (void)
6206 {
6207 rtx offset, dest;
6208 tree fntype = TREE_TYPE (current_function_decl);
6209 int argadj = ((!stdarg_p (fntype))
6210 ? UNITS_PER_WORD : 0);
6211
6212 if (argadj)
6213 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6214 else
6215 offset = crtl->args.arg_offset_rtx;
6216
6217 if (TARGET_64BIT)
6218 {
6219 int i, off;
6220
6221 /* Adjust for varargs/stdarg differences. */
6222 if (argadj)
6223 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6224 else
6225 offset = crtl->args.arg_offset_rtx;
6226
6227 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6228 from the incoming arg pointer and growing to larger addresses. */
6229 for (i = 26, off = -64; i >= 19; i--, off += 8)
6230 emit_move_insn (gen_rtx_MEM (word_mode,
6231 plus_constant (Pmode,
6232 arg_pointer_rtx, off)),
6233 gen_rtx_REG (word_mode, i));
6234
6235 /* The incoming args pointer points just beyond the flushback area;
6236 normally this is not a serious concern. However, when we are doing
6237 varargs/stdargs we want to make the arg pointer point to the start
6238 of the incoming argument area. */
6239 emit_move_insn (virtual_incoming_args_rtx,
6240 plus_constant (Pmode, arg_pointer_rtx, -64));
6241
6242 /* Now return a pointer to the first anonymous argument. */
6243 return copy_to_reg (expand_binop (Pmode, add_optab,
6244 virtual_incoming_args_rtx,
6245 offset, 0, 0, OPTAB_LIB_WIDEN));
6246 }
6247
6248 /* Store general registers on the stack. */
6249 dest = gen_rtx_MEM (BLKmode,
6250 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6251 -16));
6252 set_mem_alias_set (dest, get_varargs_alias_set ());
6253 set_mem_align (dest, BITS_PER_WORD);
6254 move_block_from_reg (23, dest, 4);
6255
6256 /* move_block_from_reg will emit code to store the argument registers
6257 individually as scalar stores.
6258
6259 However, other insns may later load from the same addresses for
6260 a structure load (passing a struct to a varargs routine).
6261
6262 The alias code assumes that such aliasing can never happen, so we
6263 have to keep memory referencing insns from moving up beyond the
6264 last argument register store. So we emit a blockage insn here. */
6265 emit_insn (gen_blockage ());
6266
6267 return copy_to_reg (expand_binop (Pmode, add_optab,
6268 crtl->args.internal_arg_pointer,
6269 offset, 0, 0, OPTAB_LIB_WIDEN));
6270 }
6271
6272 static void
6273 hppa_va_start (tree valist, rtx nextarg)
6274 {
6275 nextarg = expand_builtin_saveregs ();
6276 std_expand_builtin_va_start (valist, nextarg);
6277 }
6278
6279 static tree
6280 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6281 gimple_seq *post_p)
6282 {
6283 if (TARGET_64BIT)
6284 {
6285 /* Args grow upward. We can use the generic routines. */
6286 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6287 }
6288 else /* !TARGET_64BIT */
6289 {
6290 tree ptr = build_pointer_type (type);
6291 tree valist_type;
6292 tree t, u;
6293 unsigned int size, ofs;
6294 bool indirect;
6295
6296 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6297 if (indirect)
6298 {
6299 type = ptr;
6300 ptr = build_pointer_type (type);
6301 }
6302 size = int_size_in_bytes (type);
6303 valist_type = TREE_TYPE (valist);
6304
6305 /* Args grow down. Not handled by generic routines. */
6306
6307 u = fold_convert (sizetype, size_in_bytes (type));
6308 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6309 t = fold_build_pointer_plus (valist, u);
6310
6311 /* Align to 4 or 8 byte boundary depending on argument size. */
6312
6313 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6314 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6315 t = fold_convert (valist_type, t);
6316
6317 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6318
6319 ofs = (8 - size) % 4;
6320 if (ofs != 0)
6321 t = fold_build_pointer_plus_hwi (t, ofs);
6322
6323 t = fold_convert (ptr, t);
6324 t = build_va_arg_indirect_ref (t);
6325
6326 if (indirect)
6327 t = build_va_arg_indirect_ref (t);
6328
6329 return t;
6330 }
6331 }
6332
6333 /* True if MODE is valid for the target. By "valid", we mean able to
6334 be manipulated in non-trivial ways. In particular, this means all
6335 the arithmetic is supported.
6336
6337 Currently, TImode is not valid as the HP 64-bit runtime documentation
6338 doesn't document the alignment and calling conventions for this type.
6339 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6340 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6341
6342 static bool
6343 pa_scalar_mode_supported_p (machine_mode mode)
6344 {
6345 int precision = GET_MODE_PRECISION (mode);
6346
6347 switch (GET_MODE_CLASS (mode))
6348 {
6349 case MODE_PARTIAL_INT:
6350 case MODE_INT:
6351 if (precision == CHAR_TYPE_SIZE)
6352 return true;
6353 if (precision == SHORT_TYPE_SIZE)
6354 return true;
6355 if (precision == INT_TYPE_SIZE)
6356 return true;
6357 if (precision == LONG_TYPE_SIZE)
6358 return true;
6359 if (precision == LONG_LONG_TYPE_SIZE)
6360 return true;
6361 return false;
6362
6363 case MODE_FLOAT:
6364 if (precision == FLOAT_TYPE_SIZE)
6365 return true;
6366 if (precision == DOUBLE_TYPE_SIZE)
6367 return true;
6368 if (precision == LONG_DOUBLE_TYPE_SIZE)
6369 return true;
6370 return false;
6371
6372 case MODE_DECIMAL_FLOAT:
6373 return false;
6374
6375 default:
6376 gcc_unreachable ();
6377 }
6378 }
6379
6380 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6381 it branches into the delay slot. Otherwise, return FALSE. */
6382
6383 static bool
6384 branch_to_delay_slot_p (rtx_insn *insn)
6385 {
6386 rtx_insn *jump_insn;
6387
6388 if (dbr_sequence_length ())
6389 return FALSE;
6390
6391 jump_insn = next_active_insn (JUMP_LABEL (insn));
6392 while (insn)
6393 {
6394 insn = next_active_insn (insn);
6395 if (jump_insn == insn)
6396 return TRUE;
6397
6398 /* We can't rely on the length of asms. So, we return FALSE when
6399 the branch is followed by an asm. */
6400 if (!insn
6401 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6402 || extract_asm_operands (PATTERN (insn)) != NULL_RTX
6403 || get_attr_length (insn) > 0)
6404 break;
6405 }
6406
6407 return FALSE;
6408 }
6409
6410 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6411
6412 This occurs when INSN has an unfilled delay slot and is followed
6413 by an asm. Disaster can occur if the asm is empty and the jump
6414 branches into the delay slot. So, we add a nop in the delay slot
6415 when this occurs. */
6416
6417 static bool
6418 branch_needs_nop_p (rtx_insn *insn)
6419 {
6420 rtx_insn *jump_insn;
6421
6422 if (dbr_sequence_length ())
6423 return FALSE;
6424
6425 jump_insn = next_active_insn (JUMP_LABEL (insn));
6426 while (insn)
6427 {
6428 insn = next_active_insn (insn);
6429 if (!insn || jump_insn == insn)
6430 return TRUE;
6431
6432 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6433 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6434 && get_attr_length (insn) > 0)
6435 break;
6436 }
6437
6438 return FALSE;
6439 }
6440
6441 /* Return TRUE if INSN, a forward jump insn, can use nullification
6442 to skip the following instruction. This avoids an extra cycle due
6443 to a mis-predicted branch when we fall through. */
6444
6445 static bool
6446 use_skip_p (rtx_insn *insn)
6447 {
6448 rtx_insn *jump_insn = next_active_insn (JUMP_LABEL (insn));
6449
6450 while (insn)
6451 {
6452 insn = next_active_insn (insn);
6453
6454 /* We can't rely on the length of asms, so we can't skip asms. */
6455 if (!insn
6456 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6457 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6458 break;
6459 if (get_attr_length (insn) == 4
6460 && jump_insn == next_active_insn (insn))
6461 return TRUE;
6462 if (get_attr_length (insn) > 0)
6463 break;
6464 }
6465
6466 return FALSE;
6467 }
6468
6469 /* This routine handles all the normal conditional branch sequences we
6470 might need to generate. It handles compare immediate vs compare
6471 register, nullification of delay slots, varying length branches,
6472 negated branches, and all combinations of the above. It returns the
6473 output appropriate to emit the branch corresponding to all given
6474 parameters. */
6475
6476 const char *
6477 pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
6478 {
6479 static char buf[100];
6480 bool useskip;
6481 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6482 int length = get_attr_length (insn);
6483 int xdelay;
6484
6485 /* A conditional branch to the following instruction (e.g. the delay slot)
6486 is asking for a disaster. This can happen when not optimizing and
6487 when jump optimization fails.
6488
6489 While it is usually safe to emit nothing, this can fail if the
6490 preceding instruction is a nullified branch with an empty delay
6491 slot and the same branch target as this branch. We could check
6492 for this but jump optimization should eliminate nop jumps. It
6493 is always safe to emit a nop. */
6494 if (branch_to_delay_slot_p (insn))
6495 return "nop";
6496
6497 /* The doubleword form of the cmpib instruction doesn't have the LEU
6498 and GTU conditions while the cmpb instruction does. Since we accept
6499 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6500 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6501 operands[2] = gen_rtx_REG (DImode, 0);
6502 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6503 operands[1] = gen_rtx_REG (DImode, 0);
6504
6505 /* If this is a long branch with its delay slot unfilled, set `nullify'
6506 as it can nullify the delay slot and save a nop. */
6507 if (length == 8 && dbr_sequence_length () == 0)
6508 nullify = 1;
6509
6510 /* If this is a short forward conditional branch which did not get
6511 its delay slot filled, the delay slot can still be nullified. */
6512 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6513 nullify = forward_branch_p (insn);
6514
6515 /* A forward branch over a single nullified insn can be done with a
6516 comclr instruction. This avoids a single cycle penalty due to
6517 mis-predicted branch if we fall through (branch not taken). */
6518 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6519
6520 switch (length)
6521 {
6522 /* All short conditional branches except backwards with an unfilled
6523 delay slot. */
6524 case 4:
6525 if (useskip)
6526 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6527 else
6528 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6529 if (GET_MODE (operands[1]) == DImode)
6530 strcat (buf, "*");
6531 if (negated)
6532 strcat (buf, "%B3");
6533 else
6534 strcat (buf, "%S3");
6535 if (useskip)
6536 strcat (buf, " %2,%r1,%%r0");
6537 else if (nullify)
6538 {
6539 if (branch_needs_nop_p (insn))
6540 strcat (buf, ",n %2,%r1,%0%#");
6541 else
6542 strcat (buf, ",n %2,%r1,%0");
6543 }
6544 else
6545 strcat (buf, " %2,%r1,%0");
6546 break;
6547
6548 /* All long conditionals. Note a short backward branch with an
6549 unfilled delay slot is treated just like a long backward branch
6550 with an unfilled delay slot. */
6551 case 8:
6552 /* Handle weird backwards branch with a filled delay slot
6553 which is nullified. */
6554 if (dbr_sequence_length () != 0
6555 && ! forward_branch_p (insn)
6556 && nullify)
6557 {
6558 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6559 if (GET_MODE (operands[1]) == DImode)
6560 strcat (buf, "*");
6561 if (negated)
6562 strcat (buf, "%S3");
6563 else
6564 strcat (buf, "%B3");
6565 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6566 }
6567 /* Handle short backwards branch with an unfilled delay slot.
6568 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6569 taken and untaken branches. */
6570 else if (dbr_sequence_length () == 0
6571 && ! forward_branch_p (insn)
6572 && INSN_ADDRESSES_SET_P ()
6573 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6574 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6575 {
6576 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6577 if (GET_MODE (operands[1]) == DImode)
6578 strcat (buf, "*");
6579 if (negated)
6580 strcat (buf, "%B3 %2,%r1,%0%#");
6581 else
6582 strcat (buf, "%S3 %2,%r1,%0%#");
6583 }
6584 else
6585 {
6586 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6587 if (GET_MODE (operands[1]) == DImode)
6588 strcat (buf, "*");
6589 if (negated)
6590 strcat (buf, "%S3");
6591 else
6592 strcat (buf, "%B3");
6593 if (nullify)
6594 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6595 else
6596 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6597 }
6598 break;
6599
6600 default:
6601 /* The reversed conditional branch must branch over one additional
6602 instruction if the delay slot is filled and needs to be extracted
6603 by pa_output_lbranch. If the delay slot is empty or this is a
6604 nullified forward branch, the instruction after the reversed
6605 condition branch must be nullified. */
6606 if (dbr_sequence_length () == 0
6607 || (nullify && forward_branch_p (insn)))
6608 {
6609 nullify = 1;
6610 xdelay = 0;
6611 operands[4] = GEN_INT (length);
6612 }
6613 else
6614 {
6615 xdelay = 1;
6616 operands[4] = GEN_INT (length + 4);
6617 }
6618
6619 /* Create a reversed conditional branch which branches around
6620 the following insns. */
6621 if (GET_MODE (operands[1]) != DImode)
6622 {
6623 if (nullify)
6624 {
6625 if (negated)
6626 strcpy (buf,
6627 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6628 else
6629 strcpy (buf,
6630 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6631 }
6632 else
6633 {
6634 if (negated)
6635 strcpy (buf,
6636 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6637 else
6638 strcpy (buf,
6639 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6640 }
6641 }
6642 else
6643 {
6644 if (nullify)
6645 {
6646 if (negated)
6647 strcpy (buf,
6648 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6649 else
6650 strcpy (buf,
6651 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6652 }
6653 else
6654 {
6655 if (negated)
6656 strcpy (buf,
6657 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6658 else
6659 strcpy (buf,
6660 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6661 }
6662 }
6663
6664 output_asm_insn (buf, operands);
6665 return pa_output_lbranch (operands[0], insn, xdelay);
6666 }
6667 return buf;
6668 }
6669
6670 /* This routine handles output of long unconditional branches that
6671 exceed the maximum range of a simple branch instruction. Since
6672 we don't have a register available for the branch, we save register
6673 %r1 in the frame marker, load the branch destination DEST into %r1,
6674 execute the branch, and restore %r1 in the delay slot of the branch.
6675
6676 Since long branches may have an insn in the delay slot and the
6677 delay slot is used to restore %r1, we in general need to extract
6678 this insn and execute it before the branch. However, to facilitate
6679 use of this function by conditional branches, we also provide an
6680 option to not extract the delay insn so that it will be emitted
6681 after the long branch. So, if there is an insn in the delay slot,
6682 it is extracted if XDELAY is nonzero.
6683
6684 The lengths of the various long-branch sequences are 20, 16 and 24
6685 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6686
6687 const char *
6688 pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
6689 {
6690 rtx xoperands[2];
6691
6692 xoperands[0] = dest;
6693
6694 /* First, free up the delay slot. */
6695 if (xdelay && dbr_sequence_length () != 0)
6696 {
6697 /* We can't handle a jump in the delay slot. */
6698 gcc_assert (! JUMP_P (NEXT_INSN (insn)));
6699
6700 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6701 optimize, 0, NULL);
6702
6703 /* Now delete the delay insn. */
6704 SET_INSN_DELETED (NEXT_INSN (insn));
6705 }
6706
6707 /* Output an insn to save %r1. The runtime documentation doesn't
6708 specify whether the "Clean Up" slot in the callers frame can
6709 be clobbered by the callee. It isn't copied by HP's builtin
6710 alloca, so this suggests that it can be clobbered if necessary.
6711 The "Static Link" location is copied by HP builtin alloca, so
6712 we avoid using it. Using the cleanup slot might be a problem
6713 if we have to interoperate with languages that pass cleanup
6714 information. However, it should be possible to handle these
6715 situations with GCC's asm feature.
6716
6717 The "Current RP" slot is reserved for the called procedure, so
6718 we try to use it when we don't have a frame of our own. It's
6719 rather unlikely that we won't have a frame when we need to emit
6720 a very long branch.
6721
6722 Really the way to go long term is a register scavenger; goto
6723 the target of the jump and find a register which we can use
6724 as a scratch to hold the value in %r1. Then, we wouldn't have
6725 to free up the delay slot or clobber a slot that may be needed
6726 for other purposes. */
6727 if (TARGET_64BIT)
6728 {
6729 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6730 /* Use the return pointer slot in the frame marker. */
6731 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6732 else
6733 /* Use the slot at -40 in the frame marker since HP builtin
6734 alloca doesn't copy it. */
6735 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6736 }
6737 else
6738 {
6739 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6740 /* Use the return pointer slot in the frame marker. */
6741 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6742 else
6743 /* Use the "Clean Up" slot in the frame marker. In GCC,
6744 the only other use of this location is for copying a
6745 floating point double argument from a floating-point
6746 register to two general registers. The copy is done
6747 as an "atomic" operation when outputting a call, so it
6748 won't interfere with our using the location here. */
6749 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6750 }
6751
6752 if (TARGET_PORTABLE_RUNTIME)
6753 {
6754 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6755 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6756 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6757 }
6758 else if (flag_pic)
6759 {
6760 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6761 if (TARGET_SOM || !TARGET_GAS)
6762 {
6763 xoperands[1] = gen_label_rtx ();
6764 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6765 targetm.asm_out.internal_label (asm_out_file, "L",
6766 CODE_LABEL_NUMBER (xoperands[1]));
6767 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6768 }
6769 else
6770 {
6771 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6772 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6773 }
6774 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6775 }
6776 else
6777 /* Now output a very long branch to the original target. */
6778 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6779
6780 /* Now restore the value of %r1 in the delay slot. */
6781 if (TARGET_64BIT)
6782 {
6783 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6784 return "ldd -16(%%r30),%%r1";
6785 else
6786 return "ldd -40(%%r30),%%r1";
6787 }
6788 else
6789 {
6790 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6791 return "ldw -20(%%r30),%%r1";
6792 else
6793 return "ldw -12(%%r30),%%r1";
6794 }
6795 }
6796
6797 /* This routine handles all the branch-on-bit conditional branch sequences we
6798 might need to generate. It handles nullification of delay slots,
6799 varying length branches, negated branches and all combinations of the
6800 above. it returns the appropriate output template to emit the branch. */
6801
6802 const char *
6803 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
6804 {
6805 static char buf[100];
6806 bool useskip;
6807 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6808 int length = get_attr_length (insn);
6809 int xdelay;
6810
6811 /* A conditional branch to the following instruction (e.g. the delay slot) is
6812 asking for a disaster. I do not think this can happen as this pattern
6813 is only used when optimizing; jump optimization should eliminate the
6814 jump. But be prepared just in case. */
6815
6816 if (branch_to_delay_slot_p (insn))
6817 return "nop";
6818
6819 /* If this is a long branch with its delay slot unfilled, set `nullify'
6820 as it can nullify the delay slot and save a nop. */
6821 if (length == 8 && dbr_sequence_length () == 0)
6822 nullify = 1;
6823
6824 /* If this is a short forward conditional branch which did not get
6825 its delay slot filled, the delay slot can still be nullified. */
6826 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6827 nullify = forward_branch_p (insn);
6828
6829 /* A forward branch over a single nullified insn can be done with a
6830 extrs instruction. This avoids a single cycle penalty due to
6831 mis-predicted branch if we fall through (branch not taken). */
6832 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6833
6834 switch (length)
6835 {
6836
6837 /* All short conditional branches except backwards with an unfilled
6838 delay slot. */
6839 case 4:
6840 if (useskip)
6841 strcpy (buf, "{extrs,|extrw,s,}");
6842 else
6843 strcpy (buf, "bb,");
6844 if (useskip && GET_MODE (operands[0]) == DImode)
6845 strcpy (buf, "extrd,s,*");
6846 else if (GET_MODE (operands[0]) == DImode)
6847 strcpy (buf, "bb,*");
6848 if ((which == 0 && negated)
6849 || (which == 1 && ! negated))
6850 strcat (buf, ">=");
6851 else
6852 strcat (buf, "<");
6853 if (useskip)
6854 strcat (buf, " %0,%1,1,%%r0");
6855 else if (nullify && negated)
6856 {
6857 if (branch_needs_nop_p (insn))
6858 strcat (buf, ",n %0,%1,%3%#");
6859 else
6860 strcat (buf, ",n %0,%1,%3");
6861 }
6862 else if (nullify && ! negated)
6863 {
6864 if (branch_needs_nop_p (insn))
6865 strcat (buf, ",n %0,%1,%2%#");
6866 else
6867 strcat (buf, ",n %0,%1,%2");
6868 }
6869 else if (! nullify && negated)
6870 strcat (buf, " %0,%1,%3");
6871 else if (! nullify && ! negated)
6872 strcat (buf, " %0,%1,%2");
6873 break;
6874
6875 /* All long conditionals. Note a short backward branch with an
6876 unfilled delay slot is treated just like a long backward branch
6877 with an unfilled delay slot. */
6878 case 8:
6879 /* Handle weird backwards branch with a filled delay slot
6880 which is nullified. */
6881 if (dbr_sequence_length () != 0
6882 && ! forward_branch_p (insn)
6883 && nullify)
6884 {
6885 strcpy (buf, "bb,");
6886 if (GET_MODE (operands[0]) == DImode)
6887 strcat (buf, "*");
6888 if ((which == 0 && negated)
6889 || (which == 1 && ! negated))
6890 strcat (buf, "<");
6891 else
6892 strcat (buf, ">=");
6893 if (negated)
6894 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6895 else
6896 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6897 }
6898 /* Handle short backwards branch with an unfilled delay slot.
6899 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6900 taken and untaken branches. */
6901 else if (dbr_sequence_length () == 0
6902 && ! forward_branch_p (insn)
6903 && INSN_ADDRESSES_SET_P ()
6904 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6905 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6906 {
6907 strcpy (buf, "bb,");
6908 if (GET_MODE (operands[0]) == DImode)
6909 strcat (buf, "*");
6910 if ((which == 0 && negated)
6911 || (which == 1 && ! negated))
6912 strcat (buf, ">=");
6913 else
6914 strcat (buf, "<");
6915 if (negated)
6916 strcat (buf, " %0,%1,%3%#");
6917 else
6918 strcat (buf, " %0,%1,%2%#");
6919 }
6920 else
6921 {
6922 if (GET_MODE (operands[0]) == DImode)
6923 strcpy (buf, "extrd,s,*");
6924 else
6925 strcpy (buf, "{extrs,|extrw,s,}");
6926 if ((which == 0 && negated)
6927 || (which == 1 && ! negated))
6928 strcat (buf, "<");
6929 else
6930 strcat (buf, ">=");
6931 if (nullify && negated)
6932 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6933 else if (nullify && ! negated)
6934 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6935 else if (negated)
6936 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6937 else
6938 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6939 }
6940 break;
6941
6942 default:
6943 /* The reversed conditional branch must branch over one additional
6944 instruction if the delay slot is filled and needs to be extracted
6945 by pa_output_lbranch. If the delay slot is empty or this is a
6946 nullified forward branch, the instruction after the reversed
6947 condition branch must be nullified. */
6948 if (dbr_sequence_length () == 0
6949 || (nullify && forward_branch_p (insn)))
6950 {
6951 nullify = 1;
6952 xdelay = 0;
6953 operands[4] = GEN_INT (length);
6954 }
6955 else
6956 {
6957 xdelay = 1;
6958 operands[4] = GEN_INT (length + 4);
6959 }
6960
6961 if (GET_MODE (operands[0]) == DImode)
6962 strcpy (buf, "bb,*");
6963 else
6964 strcpy (buf, "bb,");
6965 if ((which == 0 && negated)
6966 || (which == 1 && !negated))
6967 strcat (buf, "<");
6968 else
6969 strcat (buf, ">=");
6970 if (nullify)
6971 strcat (buf, ",n %0,%1,.+%4");
6972 else
6973 strcat (buf, " %0,%1,.+%4");
6974 output_asm_insn (buf, operands);
6975 return pa_output_lbranch (negated ? operands[3] : operands[2],
6976 insn, xdelay);
6977 }
6978 return buf;
6979 }
6980
6981 /* This routine handles all the branch-on-variable-bit conditional branch
6982 sequences we might need to generate. It handles nullification of delay
6983 slots, varying length branches, negated branches and all combinations
6984 of the above. it returns the appropriate output template to emit the
6985 branch. */
6986
6987 const char *
6988 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
6989 int which)
6990 {
6991 static char buf[100];
6992 bool useskip;
6993 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6994 int length = get_attr_length (insn);
6995 int xdelay;
6996
6997 /* A conditional branch to the following instruction (e.g. the delay slot) is
6998 asking for a disaster. I do not think this can happen as this pattern
6999 is only used when optimizing; jump optimization should eliminate the
7000 jump. But be prepared just in case. */
7001
7002 if (branch_to_delay_slot_p (insn))
7003 return "nop";
7004
7005 /* If this is a long branch with its delay slot unfilled, set `nullify'
7006 as it can nullify the delay slot and save a nop. */
7007 if (length == 8 && dbr_sequence_length () == 0)
7008 nullify = 1;
7009
7010 /* If this is a short forward conditional branch which did not get
7011 its delay slot filled, the delay slot can still be nullified. */
7012 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7013 nullify = forward_branch_p (insn);
7014
7015 /* A forward branch over a single nullified insn can be done with a
7016 extrs instruction. This avoids a single cycle penalty due to
7017 mis-predicted branch if we fall through (branch not taken). */
7018 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
7019
7020 switch (length)
7021 {
7022
7023 /* All short conditional branches except backwards with an unfilled
7024 delay slot. */
7025 case 4:
7026 if (useskip)
7027 strcpy (buf, "{vextrs,|extrw,s,}");
7028 else
7029 strcpy (buf, "{bvb,|bb,}");
7030 if (useskip && GET_MODE (operands[0]) == DImode)
7031 strcpy (buf, "extrd,s,*");
7032 else if (GET_MODE (operands[0]) == DImode)
7033 strcpy (buf, "bb,*");
7034 if ((which == 0 && negated)
7035 || (which == 1 && ! negated))
7036 strcat (buf, ">=");
7037 else
7038 strcat (buf, "<");
7039 if (useskip)
7040 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
7041 else if (nullify && negated)
7042 {
7043 if (branch_needs_nop_p (insn))
7044 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7045 else
7046 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7047 }
7048 else if (nullify && ! negated)
7049 {
7050 if (branch_needs_nop_p (insn))
7051 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7052 else
7053 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7054 }
7055 else if (! nullify && negated)
7056 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7057 else if (! nullify && ! negated)
7058 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7059 break;
7060
7061 /* All long conditionals. Note a short backward branch with an
7062 unfilled delay slot is treated just like a long backward branch
7063 with an unfilled delay slot. */
7064 case 8:
7065 /* Handle weird backwards branch with a filled delay slot
7066 which is nullified. */
7067 if (dbr_sequence_length () != 0
7068 && ! forward_branch_p (insn)
7069 && nullify)
7070 {
7071 strcpy (buf, "{bvb,|bb,}");
7072 if (GET_MODE (operands[0]) == DImode)
7073 strcat (buf, "*");
7074 if ((which == 0 && negated)
7075 || (which == 1 && ! negated))
7076 strcat (buf, "<");
7077 else
7078 strcat (buf, ">=");
7079 if (negated)
7080 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7081 else
7082 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7083 }
7084 /* Handle short backwards branch with an unfilled delay slot.
7085 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7086 taken and untaken branches. */
7087 else if (dbr_sequence_length () == 0
7088 && ! forward_branch_p (insn)
7089 && INSN_ADDRESSES_SET_P ()
7090 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7091 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7092 {
7093 strcpy (buf, "{bvb,|bb,}");
7094 if (GET_MODE (operands[0]) == DImode)
7095 strcat (buf, "*");
7096 if ((which == 0 && negated)
7097 || (which == 1 && ! negated))
7098 strcat (buf, ">=");
7099 else
7100 strcat (buf, "<");
7101 if (negated)
7102 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7103 else
7104 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7105 }
7106 else
7107 {
7108 strcpy (buf, "{vextrs,|extrw,s,}");
7109 if (GET_MODE (operands[0]) == DImode)
7110 strcpy (buf, "extrd,s,*");
7111 if ((which == 0 && negated)
7112 || (which == 1 && ! negated))
7113 strcat (buf, "<");
7114 else
7115 strcat (buf, ">=");
7116 if (nullify && negated)
7117 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7118 else if (nullify && ! negated)
7119 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7120 else if (negated)
7121 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7122 else
7123 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7124 }
7125 break;
7126
7127 default:
7128 /* The reversed conditional branch must branch over one additional
7129 instruction if the delay slot is filled and needs to be extracted
7130 by pa_output_lbranch. If the delay slot is empty or this is a
7131 nullified forward branch, the instruction after the reversed
7132 condition branch must be nullified. */
7133 if (dbr_sequence_length () == 0
7134 || (nullify && forward_branch_p (insn)))
7135 {
7136 nullify = 1;
7137 xdelay = 0;
7138 operands[4] = GEN_INT (length);
7139 }
7140 else
7141 {
7142 xdelay = 1;
7143 operands[4] = GEN_INT (length + 4);
7144 }
7145
7146 if (GET_MODE (operands[0]) == DImode)
7147 strcpy (buf, "bb,*");
7148 else
7149 strcpy (buf, "{bvb,|bb,}");
7150 if ((which == 0 && negated)
7151 || (which == 1 && !negated))
7152 strcat (buf, "<");
7153 else
7154 strcat (buf, ">=");
7155 if (nullify)
7156 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7157 else
7158 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7159 output_asm_insn (buf, operands);
7160 return pa_output_lbranch (negated ? operands[3] : operands[2],
7161 insn, xdelay);
7162 }
7163 return buf;
7164 }
7165
7166 /* Return the output template for emitting a dbra type insn.
7167
7168 Note it may perform some output operations on its own before
7169 returning the final output string. */
7170 const char *
7171 pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
7172 {
7173 int length = get_attr_length (insn);
7174
7175 /* A conditional branch to the following instruction (e.g. the delay slot) is
7176 asking for a disaster. Be prepared! */
7177
7178 if (branch_to_delay_slot_p (insn))
7179 {
7180 if (which_alternative == 0)
7181 return "ldo %1(%0),%0";
7182 else if (which_alternative == 1)
7183 {
7184 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7185 output_asm_insn ("ldw -16(%%r30),%4", operands);
7186 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7187 return "{fldws|fldw} -16(%%r30),%0";
7188 }
7189 else
7190 {
7191 output_asm_insn ("ldw %0,%4", operands);
7192 return "ldo %1(%4),%4\n\tstw %4,%0";
7193 }
7194 }
7195
7196 if (which_alternative == 0)
7197 {
7198 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7199 int xdelay;
7200
7201 /* If this is a long branch with its delay slot unfilled, set `nullify'
7202 as it can nullify the delay slot and save a nop. */
7203 if (length == 8 && dbr_sequence_length () == 0)
7204 nullify = 1;
7205
7206 /* If this is a short forward conditional branch which did not get
7207 its delay slot filled, the delay slot can still be nullified. */
7208 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7209 nullify = forward_branch_p (insn);
7210
7211 switch (length)
7212 {
7213 case 4:
7214 if (nullify)
7215 {
7216 if (branch_needs_nop_p (insn))
7217 return "addib,%C2,n %1,%0,%3%#";
7218 else
7219 return "addib,%C2,n %1,%0,%3";
7220 }
7221 else
7222 return "addib,%C2 %1,%0,%3";
7223
7224 case 8:
7225 /* Handle weird backwards branch with a fulled delay slot
7226 which is nullified. */
7227 if (dbr_sequence_length () != 0
7228 && ! forward_branch_p (insn)
7229 && nullify)
7230 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7231 /* Handle short backwards branch with an unfilled delay slot.
7232 Using a addb;nop rather than addi;bl saves 1 cycle for both
7233 taken and untaken branches. */
7234 else if (dbr_sequence_length () == 0
7235 && ! forward_branch_p (insn)
7236 && INSN_ADDRESSES_SET_P ()
7237 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7238 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7239 return "addib,%C2 %1,%0,%3%#";
7240
7241 /* Handle normal cases. */
7242 if (nullify)
7243 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7244 else
7245 return "addi,%N2 %1,%0,%0\n\tb %3";
7246
7247 default:
7248 /* The reversed conditional branch must branch over one additional
7249 instruction if the delay slot is filled and needs to be extracted
7250 by pa_output_lbranch. If the delay slot is empty or this is a
7251 nullified forward branch, the instruction after the reversed
7252 condition branch must be nullified. */
7253 if (dbr_sequence_length () == 0
7254 || (nullify && forward_branch_p (insn)))
7255 {
7256 nullify = 1;
7257 xdelay = 0;
7258 operands[4] = GEN_INT (length);
7259 }
7260 else
7261 {
7262 xdelay = 1;
7263 operands[4] = GEN_INT (length + 4);
7264 }
7265
7266 if (nullify)
7267 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7268 else
7269 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7270
7271 return pa_output_lbranch (operands[3], insn, xdelay);
7272 }
7273
7274 }
7275 /* Deal with gross reload from FP register case. */
7276 else if (which_alternative == 1)
7277 {
7278 /* Move loop counter from FP register to MEM then into a GR,
7279 increment the GR, store the GR into MEM, and finally reload
7280 the FP register from MEM from within the branch's delay slot. */
7281 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7282 operands);
7283 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7284 if (length == 24)
7285 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7286 else if (length == 28)
7287 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7288 else
7289 {
7290 operands[5] = GEN_INT (length - 16);
7291 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7292 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7293 return pa_output_lbranch (operands[3], insn, 0);
7294 }
7295 }
7296 /* Deal with gross reload from memory case. */
7297 else
7298 {
7299 /* Reload loop counter from memory, the store back to memory
7300 happens in the branch's delay slot. */
7301 output_asm_insn ("ldw %0,%4", operands);
7302 if (length == 12)
7303 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7304 else if (length == 16)
7305 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7306 else
7307 {
7308 operands[5] = GEN_INT (length - 4);
7309 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7310 return pa_output_lbranch (operands[3], insn, 0);
7311 }
7312 }
7313 }
7314
7315 /* Return the output template for emitting a movb type insn.
7316
7317 Note it may perform some output operations on its own before
7318 returning the final output string. */
7319 const char *
7320 pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
7321 int reverse_comparison)
7322 {
7323 int length = get_attr_length (insn);
7324
7325 /* A conditional branch to the following instruction (e.g. the delay slot) is
7326 asking for a disaster. Be prepared! */
7327
7328 if (branch_to_delay_slot_p (insn))
7329 {
7330 if (which_alternative == 0)
7331 return "copy %1,%0";
7332 else if (which_alternative == 1)
7333 {
7334 output_asm_insn ("stw %1,-16(%%r30)", operands);
7335 return "{fldws|fldw} -16(%%r30),%0";
7336 }
7337 else if (which_alternative == 2)
7338 return "stw %1,%0";
7339 else
7340 return "mtsar %r1";
7341 }
7342
7343 /* Support the second variant. */
7344 if (reverse_comparison)
7345 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7346
7347 if (which_alternative == 0)
7348 {
7349 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7350 int xdelay;
7351
7352 /* If this is a long branch with its delay slot unfilled, set `nullify'
7353 as it can nullify the delay slot and save a nop. */
7354 if (length == 8 && dbr_sequence_length () == 0)
7355 nullify = 1;
7356
7357 /* If this is a short forward conditional branch which did not get
7358 its delay slot filled, the delay slot can still be nullified. */
7359 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7360 nullify = forward_branch_p (insn);
7361
7362 switch (length)
7363 {
7364 case 4:
7365 if (nullify)
7366 {
7367 if (branch_needs_nop_p (insn))
7368 return "movb,%C2,n %1,%0,%3%#";
7369 else
7370 return "movb,%C2,n %1,%0,%3";
7371 }
7372 else
7373 return "movb,%C2 %1,%0,%3";
7374
7375 case 8:
7376 /* Handle weird backwards branch with a filled delay slot
7377 which is nullified. */
7378 if (dbr_sequence_length () != 0
7379 && ! forward_branch_p (insn)
7380 && nullify)
7381 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7382
7383 /* Handle short backwards branch with an unfilled delay slot.
7384 Using a movb;nop rather than or;bl saves 1 cycle for both
7385 taken and untaken branches. */
7386 else if (dbr_sequence_length () == 0
7387 && ! forward_branch_p (insn)
7388 && INSN_ADDRESSES_SET_P ()
7389 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7390 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7391 return "movb,%C2 %1,%0,%3%#";
7392 /* Handle normal cases. */
7393 if (nullify)
7394 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7395 else
7396 return "or,%N2 %1,%%r0,%0\n\tb %3";
7397
7398 default:
7399 /* The reversed conditional branch must branch over one additional
7400 instruction if the delay slot is filled and needs to be extracted
7401 by pa_output_lbranch. If the delay slot is empty or this is a
7402 nullified forward branch, the instruction after the reversed
7403 condition branch must be nullified. */
7404 if (dbr_sequence_length () == 0
7405 || (nullify && forward_branch_p (insn)))
7406 {
7407 nullify = 1;
7408 xdelay = 0;
7409 operands[4] = GEN_INT (length);
7410 }
7411 else
7412 {
7413 xdelay = 1;
7414 operands[4] = GEN_INT (length + 4);
7415 }
7416
7417 if (nullify)
7418 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7419 else
7420 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7421
7422 return pa_output_lbranch (operands[3], insn, xdelay);
7423 }
7424 }
7425 /* Deal with gross reload for FP destination register case. */
7426 else if (which_alternative == 1)
7427 {
7428 /* Move source register to MEM, perform the branch test, then
7429 finally load the FP register from MEM from within the branch's
7430 delay slot. */
7431 output_asm_insn ("stw %1,-16(%%r30)", operands);
7432 if (length == 12)
7433 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7434 else if (length == 16)
7435 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7436 else
7437 {
7438 operands[4] = GEN_INT (length - 4);
7439 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7440 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7441 return pa_output_lbranch (operands[3], insn, 0);
7442 }
7443 }
7444 /* Deal with gross reload from memory case. */
7445 else if (which_alternative == 2)
7446 {
7447 /* Reload loop counter from memory, the store back to memory
7448 happens in the branch's delay slot. */
7449 if (length == 8)
7450 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7451 else if (length == 12)
7452 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7453 else
7454 {
7455 operands[4] = GEN_INT (length);
7456 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7457 operands);
7458 return pa_output_lbranch (operands[3], insn, 0);
7459 }
7460 }
7461 /* Handle SAR as a destination. */
7462 else
7463 {
7464 if (length == 8)
7465 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7466 else if (length == 12)
7467 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7468 else
7469 {
7470 operands[4] = GEN_INT (length);
7471 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7472 operands);
7473 return pa_output_lbranch (operands[3], insn, 0);
7474 }
7475 }
7476 }
7477
7478 /* Copy any FP arguments in INSN into integer registers. */
7479 static void
7480 copy_fp_args (rtx_insn *insn)
7481 {
7482 rtx link;
7483 rtx xoperands[2];
7484
7485 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7486 {
7487 int arg_mode, regno;
7488 rtx use = XEXP (link, 0);
7489
7490 if (! (GET_CODE (use) == USE
7491 && GET_CODE (XEXP (use, 0)) == REG
7492 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7493 continue;
7494
7495 arg_mode = GET_MODE (XEXP (use, 0));
7496 regno = REGNO (XEXP (use, 0));
7497
7498 /* Is it a floating point register? */
7499 if (regno >= 32 && regno <= 39)
7500 {
7501 /* Copy the FP register into an integer register via memory. */
7502 if (arg_mode == SFmode)
7503 {
7504 xoperands[0] = XEXP (use, 0);
7505 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7506 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7507 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7508 }
7509 else
7510 {
7511 xoperands[0] = XEXP (use, 0);
7512 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7513 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7514 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7515 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7516 }
7517 }
7518 }
7519 }
7520
7521 /* Compute length of the FP argument copy sequence for INSN. */
7522 static int
7523 length_fp_args (rtx_insn *insn)
7524 {
7525 int length = 0;
7526 rtx link;
7527
7528 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7529 {
7530 int arg_mode, regno;
7531 rtx use = XEXP (link, 0);
7532
7533 if (! (GET_CODE (use) == USE
7534 && GET_CODE (XEXP (use, 0)) == REG
7535 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7536 continue;
7537
7538 arg_mode = GET_MODE (XEXP (use, 0));
7539 regno = REGNO (XEXP (use, 0));
7540
7541 /* Is it a floating point register? */
7542 if (regno >= 32 && regno <= 39)
7543 {
7544 if (arg_mode == SFmode)
7545 length += 8;
7546 else
7547 length += 12;
7548 }
7549 }
7550
7551 return length;
7552 }
7553
7554 /* Return the attribute length for the millicode call instruction INSN.
7555 The length must match the code generated by pa_output_millicode_call.
7556 We include the delay slot in the returned length as it is better to
7557 over estimate the length than to under estimate it. */
7558
7559 int
7560 pa_attr_length_millicode_call (rtx_insn *insn)
7561 {
7562 unsigned long distance = -1;
7563 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7564
7565 if (INSN_ADDRESSES_SET_P ())
7566 {
7567 distance = (total + insn_current_reference_address (insn));
7568 if (distance < total)
7569 distance = -1;
7570 }
7571
7572 if (TARGET_64BIT)
7573 {
7574 if (!TARGET_LONG_CALLS && distance < 7600000)
7575 return 8;
7576
7577 return 20;
7578 }
7579 else if (TARGET_PORTABLE_RUNTIME)
7580 return 24;
7581 else
7582 {
7583 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7584 return 8;
7585
7586 if (!flag_pic)
7587 return 12;
7588
7589 return 24;
7590 }
7591 }
7592
7593 /* INSN is a function call.
7594
7595 CALL_DEST is the routine we are calling. */
7596
7597 const char *
7598 pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
7599 {
7600 int attr_length = get_attr_length (insn);
7601 int seq_length = dbr_sequence_length ();
7602 rtx xoperands[3];
7603
7604 xoperands[0] = call_dest;
7605 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7606
7607 /* Handle the common case where we are sure that the branch will
7608 reach the beginning of the $CODE$ subspace. The within reach
7609 form of the $$sh_func_adrs call has a length of 28. Because it
7610 has an attribute type of sh_func_adrs, it never has a nonzero
7611 sequence length (i.e., the delay slot is never filled). */
7612 if (!TARGET_LONG_CALLS
7613 && (attr_length == 8
7614 || (attr_length == 28
7615 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7616 {
7617 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7618 }
7619 else
7620 {
7621 if (TARGET_64BIT)
7622 {
7623 /* It might seem that one insn could be saved by accessing
7624 the millicode function using the linkage table. However,
7625 this doesn't work in shared libraries and other dynamically
7626 loaded objects. Using a pc-relative sequence also avoids
7627 problems related to the implicit use of the gp register. */
7628 output_asm_insn ("b,l .+8,%%r1", xoperands);
7629
7630 if (TARGET_GAS)
7631 {
7632 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7633 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7634 }
7635 else
7636 {
7637 xoperands[1] = gen_label_rtx ();
7638 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7639 targetm.asm_out.internal_label (asm_out_file, "L",
7640 CODE_LABEL_NUMBER (xoperands[1]));
7641 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7642 }
7643
7644 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7645 }
7646 else if (TARGET_PORTABLE_RUNTIME)
7647 {
7648 /* Pure portable runtime doesn't allow be/ble; we also don't
7649 have PIC support in the assembler/linker, so this sequence
7650 is needed. */
7651
7652 /* Get the address of our target into %r1. */
7653 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7654 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7655
7656 /* Get our return address into %r31. */
7657 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7658 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7659
7660 /* Jump to our target address in %r1. */
7661 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7662 }
7663 else if (!flag_pic)
7664 {
7665 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7666 if (TARGET_PA_20)
7667 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7668 else
7669 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7670 }
7671 else
7672 {
7673 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7674 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7675
7676 if (TARGET_SOM || !TARGET_GAS)
7677 {
7678 /* The HP assembler can generate relocations for the
7679 difference of two symbols. GAS can do this for a
7680 millicode symbol but not an arbitrary external
7681 symbol when generating SOM output. */
7682 xoperands[1] = gen_label_rtx ();
7683 targetm.asm_out.internal_label (asm_out_file, "L",
7684 CODE_LABEL_NUMBER (xoperands[1]));
7685 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7686 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7687 }
7688 else
7689 {
7690 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7691 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7692 xoperands);
7693 }
7694
7695 /* Jump to our target address in %r1. */
7696 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7697 }
7698 }
7699
7700 if (seq_length == 0)
7701 output_asm_insn ("nop", xoperands);
7702
7703 return "";
7704 }
7705
7706 /* Return the attribute length of the call instruction INSN. The SIBCALL
7707 flag indicates whether INSN is a regular call or a sibling call. The
7708 length returned must be longer than the code actually generated by
7709 pa_output_call. Since branch shortening is done before delay branch
7710 sequencing, there is no way to determine whether or not the delay
7711 slot will be filled during branch shortening. Even when the delay
7712 slot is filled, we may have to add a nop if the delay slot contains
7713 a branch that can't reach its target. Thus, we always have to include
7714 the delay slot in the length estimate. This used to be done in
7715 pa_adjust_insn_length but we do it here now as some sequences always
7716 fill the delay slot and we can save four bytes in the estimate for
7717 these sequences. */
7718
7719 int
7720 pa_attr_length_call (rtx_insn *insn, int sibcall)
7721 {
7722 int local_call;
7723 rtx call, call_dest;
7724 tree call_decl;
7725 int length = 0;
7726 rtx pat = PATTERN (insn);
7727 unsigned long distance = -1;
7728
7729 gcc_assert (CALL_P (insn));
7730
7731 if (INSN_ADDRESSES_SET_P ())
7732 {
7733 unsigned long total;
7734
7735 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7736 distance = (total + insn_current_reference_address (insn));
7737 if (distance < total)
7738 distance = -1;
7739 }
7740
7741 gcc_assert (GET_CODE (pat) == PARALLEL);
7742
7743 /* Get the call rtx. */
7744 call = XVECEXP (pat, 0, 0);
7745 if (GET_CODE (call) == SET)
7746 call = SET_SRC (call);
7747
7748 gcc_assert (GET_CODE (call) == CALL);
7749
7750 /* Determine if this is a local call. */
7751 call_dest = XEXP (XEXP (call, 0), 0);
7752 call_decl = SYMBOL_REF_DECL (call_dest);
7753 local_call = call_decl && targetm.binds_local_p (call_decl);
7754
7755 /* pc-relative branch. */
7756 if (!TARGET_LONG_CALLS
7757 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7758 || distance < MAX_PCREL17F_OFFSET))
7759 length += 8;
7760
7761 /* 64-bit plabel sequence. */
7762 else if (TARGET_64BIT && !local_call)
7763 length += sibcall ? 28 : 24;
7764
7765 /* non-pic long absolute branch sequence. */
7766 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7767 length += 12;
7768
7769 /* long pc-relative branch sequence. */
7770 else if (TARGET_LONG_PIC_SDIFF_CALL
7771 || (TARGET_GAS && !TARGET_SOM
7772 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7773 {
7774 length += 20;
7775
7776 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7777 length += 8;
7778 }
7779
7780 /* 32-bit plabel sequence. */
7781 else
7782 {
7783 length += 32;
7784
7785 if (TARGET_SOM)
7786 length += length_fp_args (insn);
7787
7788 if (flag_pic)
7789 length += 4;
7790
7791 if (!TARGET_PA_20)
7792 {
7793 if (!sibcall)
7794 length += 8;
7795
7796 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7797 length += 8;
7798 }
7799 }
7800
7801 return length;
7802 }
7803
7804 /* INSN is a function call.
7805
7806 CALL_DEST is the routine we are calling. */
7807
7808 const char *
7809 pa_output_call (rtx_insn *insn, rtx call_dest, int sibcall)
7810 {
7811 int seq_length = dbr_sequence_length ();
7812 tree call_decl = SYMBOL_REF_DECL (call_dest);
7813 int local_call = call_decl && targetm.binds_local_p (call_decl);
7814 rtx xoperands[2];
7815
7816 xoperands[0] = call_dest;
7817
7818 /* Handle the common case where we're sure that the branch will reach
7819 the beginning of the "$CODE$" subspace. This is the beginning of
7820 the current function if we are in a named section. */
7821 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7822 {
7823 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7824 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7825 }
7826 else
7827 {
7828 if (TARGET_64BIT && !local_call)
7829 {
7830 /* ??? As far as I can tell, the HP linker doesn't support the
7831 long pc-relative sequence described in the 64-bit runtime
7832 architecture. So, we use a slightly longer indirect call. */
7833 xoperands[0] = pa_get_deferred_plabel (call_dest);
7834 xoperands[1] = gen_label_rtx ();
7835
7836 /* If this isn't a sibcall, we put the load of %r27 into the
7837 delay slot. We can't do this in a sibcall as we don't
7838 have a second call-clobbered scratch register available.
7839 We don't need to do anything when generating fast indirect
7840 calls. */
7841 if (seq_length != 0 && !sibcall)
7842 {
7843 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7844 optimize, 0, NULL);
7845
7846 /* Now delete the delay insn. */
7847 SET_INSN_DELETED (NEXT_INSN (insn));
7848 seq_length = 0;
7849 }
7850
7851 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7852 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7853 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7854
7855 if (sibcall)
7856 {
7857 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7858 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7859 output_asm_insn ("bve (%%r1)", xoperands);
7860 }
7861 else
7862 {
7863 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7864 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7865 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7866 seq_length = 1;
7867 }
7868 }
7869 else
7870 {
7871 int indirect_call = 0;
7872
7873 /* Emit a long call. There are several different sequences
7874 of increasing length and complexity. In most cases,
7875 they don't allow an instruction in the delay slot. */
7876 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7877 && !TARGET_LONG_PIC_SDIFF_CALL
7878 && !(TARGET_GAS && !TARGET_SOM
7879 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7880 && !TARGET_64BIT)
7881 indirect_call = 1;
7882
7883 if (seq_length != 0
7884 && !sibcall
7885 && (!TARGET_PA_20
7886 || indirect_call
7887 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7888 {
7889 /* A non-jump insn in the delay slot. By definition we can
7890 emit this insn before the call (and in fact before argument
7891 relocating. */
7892 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7893 NULL);
7894
7895 /* Now delete the delay insn. */
7896 SET_INSN_DELETED (NEXT_INSN (insn));
7897 seq_length = 0;
7898 }
7899
7900 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7901 {
7902 /* This is the best sequence for making long calls in
7903 non-pic code. Unfortunately, GNU ld doesn't provide
7904 the stub needed for external calls, and GAS's support
7905 for this with the SOM linker is buggy. It is safe
7906 to use this for local calls. */
7907 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7908 if (sibcall)
7909 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7910 else
7911 {
7912 if (TARGET_PA_20)
7913 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7914 xoperands);
7915 else
7916 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7917
7918 output_asm_insn ("copy %%r31,%%r2", xoperands);
7919 seq_length = 1;
7920 }
7921 }
7922 else
7923 {
7924 if (TARGET_LONG_PIC_SDIFF_CALL)
7925 {
7926 /* The HP assembler and linker can handle relocations
7927 for the difference of two symbols. The HP assembler
7928 recognizes the sequence as a pc-relative call and
7929 the linker provides stubs when needed. */
7930 xoperands[1] = gen_label_rtx ();
7931 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7932 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7933 targetm.asm_out.internal_label (asm_out_file, "L",
7934 CODE_LABEL_NUMBER (xoperands[1]));
7935 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7936 }
7937 else if (TARGET_GAS && !TARGET_SOM
7938 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7939 {
7940 /* GAS currently can't generate the relocations that
7941 are needed for the SOM linker under HP-UX using this
7942 sequence. The GNU linker doesn't generate the stubs
7943 that are needed for external calls on TARGET_ELF32
7944 with this sequence. For now, we have to use a
7945 longer plabel sequence when using GAS. */
7946 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7947 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7948 xoperands);
7949 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7950 xoperands);
7951 }
7952 else
7953 {
7954 /* Emit a long plabel-based call sequence. This is
7955 essentially an inline implementation of $$dyncall.
7956 We don't actually try to call $$dyncall as this is
7957 as difficult as calling the function itself. */
7958 xoperands[0] = pa_get_deferred_plabel (call_dest);
7959 xoperands[1] = gen_label_rtx ();
7960
7961 /* Since the call is indirect, FP arguments in registers
7962 need to be copied to the general registers. Then, the
7963 argument relocation stub will copy them back. */
7964 if (TARGET_SOM)
7965 copy_fp_args (insn);
7966
7967 if (flag_pic)
7968 {
7969 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7970 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7971 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7972 }
7973 else
7974 {
7975 output_asm_insn ("addil LR'%0-$global$,%%r27",
7976 xoperands);
7977 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7978 xoperands);
7979 }
7980
7981 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7982 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7983 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7984 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7985
7986 if (!sibcall && !TARGET_PA_20)
7987 {
7988 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7989 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7990 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7991 else
7992 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7993 }
7994 }
7995
7996 if (TARGET_PA_20)
7997 {
7998 if (sibcall)
7999 output_asm_insn ("bve (%%r1)", xoperands);
8000 else
8001 {
8002 if (indirect_call)
8003 {
8004 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8005 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
8006 seq_length = 1;
8007 }
8008 else
8009 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8010 }
8011 }
8012 else
8013 {
8014 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8015 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8016 xoperands);
8017
8018 if (sibcall)
8019 {
8020 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8021 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8022 else
8023 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8024 }
8025 else
8026 {
8027 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8028 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8029 else
8030 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8031
8032 if (indirect_call)
8033 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8034 else
8035 output_asm_insn ("copy %%r31,%%r2", xoperands);
8036 seq_length = 1;
8037 }
8038 }
8039 }
8040 }
8041 }
8042
8043 if (seq_length == 0)
8044 output_asm_insn ("nop", xoperands);
8045
8046 return "";
8047 }
8048
8049 /* Return the attribute length of the indirect call instruction INSN.
8050 The length must match the code generated by output_indirect call.
8051 The returned length includes the delay slot. Currently, the delay
8052 slot of an indirect call sequence is not exposed and it is used by
8053 the sequence itself. */
8054
8055 int
8056 pa_attr_length_indirect_call (rtx_insn *insn)
8057 {
8058 unsigned long distance = -1;
8059 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8060
8061 if (INSN_ADDRESSES_SET_P ())
8062 {
8063 distance = (total + insn_current_reference_address (insn));
8064 if (distance < total)
8065 distance = -1;
8066 }
8067
8068 if (TARGET_64BIT)
8069 return 12;
8070
8071 if (TARGET_FAST_INDIRECT_CALLS
8072 || (!TARGET_LONG_CALLS
8073 && !TARGET_PORTABLE_RUNTIME
8074 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8075 || distance < MAX_PCREL17F_OFFSET)))
8076 return 8;
8077
8078 if (flag_pic)
8079 return 20;
8080
8081 if (TARGET_PORTABLE_RUNTIME)
8082 return 16;
8083
8084 /* Out of reach, can use ble. */
8085 return 12;
8086 }
8087
8088 const char *
8089 pa_output_indirect_call (rtx_insn *insn, rtx call_dest)
8090 {
8091 rtx xoperands[1];
8092
8093 if (TARGET_64BIT)
8094 {
8095 xoperands[0] = call_dest;
8096 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
8097 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
8098 return "";
8099 }
8100
8101 /* First the special case for kernels, level 0 systems, etc. */
8102 if (TARGET_FAST_INDIRECT_CALLS)
8103 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8104
8105 /* Now the normal case -- we can reach $$dyncall directly or
8106 we're sure that we can get there via a long-branch stub.
8107
8108 No need to check target flags as the length uniquely identifies
8109 the remaining cases. */
8110 if (pa_attr_length_indirect_call (insn) == 8)
8111 {
8112 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8113 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8114 variant of the B,L instruction can't be used on the SOM target. */
8115 if (TARGET_PA_20 && !TARGET_SOM)
8116 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8117 else
8118 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8119 }
8120
8121 /* Long millicode call, but we are not generating PIC or portable runtime
8122 code. */
8123 if (pa_attr_length_indirect_call (insn) == 12)
8124 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8125
8126 /* Long millicode call for portable runtime. */
8127 if (pa_attr_length_indirect_call (insn) == 16)
8128 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)";
8129
8130 /* We need a long PIC call to $$dyncall. */
8131 xoperands[0] = NULL_RTX;
8132 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
8133 if (TARGET_SOM || !TARGET_GAS)
8134 {
8135 xoperands[0] = gen_label_rtx ();
8136 output_asm_insn ("addil L'$$dyncall-%0,%%r2", xoperands);
8137 targetm.asm_out.internal_label (asm_out_file, "L",
8138 CODE_LABEL_NUMBER (xoperands[0]));
8139 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
8140 }
8141 else
8142 {
8143 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r2", xoperands);
8144 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8145 xoperands);
8146 }
8147 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8148 output_asm_insn ("ldo 12(%%r2),%%r2", xoperands);
8149 return "";
8150 }
8151
8152 /* In HPUX 8.0's shared library scheme, special relocations are needed
8153 for function labels if they might be passed to a function
8154 in a shared library (because shared libraries don't live in code
8155 space), and special magic is needed to construct their address. */
8156
8157 void
8158 pa_encode_label (rtx sym)
8159 {
8160 const char *str = XSTR (sym, 0);
8161 int len = strlen (str) + 1;
8162 char *newstr, *p;
8163
8164 p = newstr = XALLOCAVEC (char, len + 1);
8165 *p++ = '@';
8166 strcpy (p, str);
8167
8168 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8169 }
8170
8171 static void
8172 pa_encode_section_info (tree decl, rtx rtl, int first)
8173 {
8174 int old_referenced = 0;
8175
8176 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8177 old_referenced
8178 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8179
8180 default_encode_section_info (decl, rtl, first);
8181
8182 if (first && TEXT_SPACE_P (decl))
8183 {
8184 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8185 if (TREE_CODE (decl) == FUNCTION_DECL)
8186 pa_encode_label (XEXP (rtl, 0));
8187 }
8188 else if (old_referenced)
8189 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8190 }
8191
8192 /* This is sort of inverse to pa_encode_section_info. */
8193
8194 static const char *
8195 pa_strip_name_encoding (const char *str)
8196 {
8197 str += (*str == '@');
8198 str += (*str == '*');
8199 return str;
8200 }
8201
8202 /* Returns 1 if OP is a function label involved in a simple addition
8203 with a constant. Used to keep certain patterns from matching
8204 during instruction combination. */
8205 int
8206 pa_is_function_label_plus_const (rtx op)
8207 {
8208 /* Strip off any CONST. */
8209 if (GET_CODE (op) == CONST)
8210 op = XEXP (op, 0);
8211
8212 return (GET_CODE (op) == PLUS
8213 && function_label_operand (XEXP (op, 0), VOIDmode)
8214 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8215 }
8216
8217 /* Output assembly code for a thunk to FUNCTION. */
8218
8219 static void
8220 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8221 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8222 tree function)
8223 {
8224 static unsigned int current_thunk_number;
8225 int val_14 = VAL_14_BITS_P (delta);
8226 unsigned int old_last_address = last_address, nbytes = 0;
8227 char label[16];
8228 rtx xoperands[4];
8229
8230 xoperands[0] = XEXP (DECL_RTL (function), 0);
8231 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8232 xoperands[2] = GEN_INT (delta);
8233
8234 final_start_function (emit_barrier (), file, 1);
8235
8236 /* Output the thunk. We know that the function is in the same
8237 translation unit (i.e., the same space) as the thunk, and that
8238 thunks are output after their method. Thus, we don't need an
8239 external branch to reach the function. With SOM and GAS,
8240 functions and thunks are effectively in different sections.
8241 Thus, we can always use a IA-relative branch and the linker
8242 will add a long branch stub if necessary.
8243
8244 However, we have to be careful when generating PIC code on the
8245 SOM port to ensure that the sequence does not transfer to an
8246 import stub for the target function as this could clobber the
8247 return value saved at SP-24. This would also apply to the
8248 32-bit linux port if the multi-space model is implemented. */
8249 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8250 && !(flag_pic && TREE_PUBLIC (function))
8251 && (TARGET_GAS || last_address < 262132))
8252 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8253 && ((targetm_common.have_named_sections
8254 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8255 /* The GNU 64-bit linker has rather poor stub management.
8256 So, we use a long branch from thunks that aren't in
8257 the same section as the target function. */
8258 && ((!TARGET_64BIT
8259 && (DECL_SECTION_NAME (thunk_fndecl)
8260 != DECL_SECTION_NAME (function)))
8261 || ((DECL_SECTION_NAME (thunk_fndecl)
8262 == DECL_SECTION_NAME (function))
8263 && last_address < 262132)))
8264 /* In this case, we need to be able to reach the start of
8265 the stub table even though the function is likely closer
8266 and can be jumped to directly. */
8267 || (targetm_common.have_named_sections
8268 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8269 && DECL_SECTION_NAME (function) == NULL
8270 && total_code_bytes < MAX_PCREL17F_OFFSET)
8271 /* Likewise. */
8272 || (!targetm_common.have_named_sections
8273 && total_code_bytes < MAX_PCREL17F_OFFSET))))
8274 {
8275 if (!val_14)
8276 output_asm_insn ("addil L'%2,%%r26", xoperands);
8277
8278 output_asm_insn ("b %0", xoperands);
8279
8280 if (val_14)
8281 {
8282 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8283 nbytes += 8;
8284 }
8285 else
8286 {
8287 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8288 nbytes += 12;
8289 }
8290 }
8291 else if (TARGET_64BIT)
8292 {
8293 /* We only have one call-clobbered scratch register, so we can't
8294 make use of the delay slot if delta doesn't fit in 14 bits. */
8295 if (!val_14)
8296 {
8297 output_asm_insn ("addil L'%2,%%r26", xoperands);
8298 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8299 }
8300
8301 output_asm_insn ("b,l .+8,%%r1", xoperands);
8302
8303 if (TARGET_GAS)
8304 {
8305 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8306 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8307 }
8308 else
8309 {
8310 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8311 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8312 }
8313
8314 if (val_14)
8315 {
8316 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8317 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8318 nbytes += 20;
8319 }
8320 else
8321 {
8322 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8323 nbytes += 24;
8324 }
8325 }
8326 else if (TARGET_PORTABLE_RUNTIME)
8327 {
8328 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8329 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8330
8331 if (!val_14)
8332 output_asm_insn ("addil L'%2,%%r26", xoperands);
8333
8334 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8335
8336 if (val_14)
8337 {
8338 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8339 nbytes += 16;
8340 }
8341 else
8342 {
8343 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8344 nbytes += 20;
8345 }
8346 }
8347 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8348 {
8349 /* The function is accessible from outside this module. The only
8350 way to avoid an import stub between the thunk and function is to
8351 call the function directly with an indirect sequence similar to
8352 that used by $$dyncall. This is possible because $$dyncall acts
8353 as the import stub in an indirect call. */
8354 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8355 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8356 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8357 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8358 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8359 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8360 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8361 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8362 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8363
8364 if (!val_14)
8365 {
8366 output_asm_insn ("addil L'%2,%%r26", xoperands);
8367 nbytes += 4;
8368 }
8369
8370 if (TARGET_PA_20)
8371 {
8372 output_asm_insn ("bve (%%r22)", xoperands);
8373 nbytes += 36;
8374 }
8375 else if (TARGET_NO_SPACE_REGS)
8376 {
8377 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8378 nbytes += 36;
8379 }
8380 else
8381 {
8382 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8383 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8384 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8385 nbytes += 44;
8386 }
8387
8388 if (val_14)
8389 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8390 else
8391 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8392 }
8393 else if (flag_pic)
8394 {
8395 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8396
8397 if (TARGET_SOM || !TARGET_GAS)
8398 {
8399 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8400 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8401 }
8402 else
8403 {
8404 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8405 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8406 }
8407
8408 if (!val_14)
8409 output_asm_insn ("addil L'%2,%%r26", xoperands);
8410
8411 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8412
8413 if (val_14)
8414 {
8415 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8416 nbytes += 20;
8417 }
8418 else
8419 {
8420 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8421 nbytes += 24;
8422 }
8423 }
8424 else
8425 {
8426 if (!val_14)
8427 output_asm_insn ("addil L'%2,%%r26", xoperands);
8428
8429 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8430 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8431
8432 if (val_14)
8433 {
8434 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8435 nbytes += 12;
8436 }
8437 else
8438 {
8439 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8440 nbytes += 16;
8441 }
8442 }
8443
8444 final_end_function ();
8445
8446 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8447 {
8448 switch_to_section (data_section);
8449 output_asm_insn (".align 4", xoperands);
8450 ASM_OUTPUT_LABEL (file, label);
8451 output_asm_insn (".word P'%0", xoperands);
8452 }
8453
8454 current_thunk_number++;
8455 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8456 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8457 last_address += nbytes;
8458 if (old_last_address > last_address)
8459 last_address = UINT_MAX;
8460 update_total_code_bytes (nbytes);
8461 }
8462
8463 /* Only direct calls to static functions are allowed to be sibling (tail)
8464 call optimized.
8465
8466 This restriction is necessary because some linker generated stubs will
8467 store return pointers into rp' in some cases which might clobber a
8468 live value already in rp'.
8469
8470 In a sibcall the current function and the target function share stack
8471 space. Thus if the path to the current function and the path to the
8472 target function save a value in rp', they save the value into the
8473 same stack slot, which has undesirable consequences.
8474
8475 Because of the deferred binding nature of shared libraries any function
8476 with external scope could be in a different load module and thus require
8477 rp' to be saved when calling that function. So sibcall optimizations
8478 can only be safe for static function.
8479
8480 Note that GCC never needs return value relocations, so we don't have to
8481 worry about static calls with return value relocations (which require
8482 saving rp').
8483
8484 It is safe to perform a sibcall optimization when the target function
8485 will never return. */
8486 static bool
8487 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8488 {
8489 if (TARGET_PORTABLE_RUNTIME)
8490 return false;
8491
8492 /* Sibcalls are not ok because the arg pointer register is not a fixed
8493 register. This prevents the sibcall optimization from occurring. In
8494 addition, there are problems with stub placement using GNU ld. This
8495 is because a normal sibcall branch uses a 17-bit relocation while
8496 a regular call branch uses a 22-bit relocation. As a result, more
8497 care needs to be taken in the placement of long-branch stubs. */
8498 if (TARGET_64BIT)
8499 return false;
8500
8501 /* Sibcalls are only ok within a translation unit. */
8502 return (decl && !TREE_PUBLIC (decl));
8503 }
8504
8505 /* ??? Addition is not commutative on the PA due to the weird implicit
8506 space register selection rules for memory addresses. Therefore, we
8507 don't consider a + b == b + a, as this might be inside a MEM. */
8508 static bool
8509 pa_commutative_p (const_rtx x, int outer_code)
8510 {
8511 return (COMMUTATIVE_P (x)
8512 && (TARGET_NO_SPACE_REGS
8513 || (outer_code != UNKNOWN && outer_code != MEM)
8514 || GET_CODE (x) != PLUS));
8515 }
8516
8517 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8518 use in fmpyadd instructions. */
8519 int
8520 pa_fmpyaddoperands (rtx *operands)
8521 {
8522 machine_mode mode = GET_MODE (operands[0]);
8523
8524 /* Must be a floating point mode. */
8525 if (mode != SFmode && mode != DFmode)
8526 return 0;
8527
8528 /* All modes must be the same. */
8529 if (! (mode == GET_MODE (operands[1])
8530 && mode == GET_MODE (operands[2])
8531 && mode == GET_MODE (operands[3])
8532 && mode == GET_MODE (operands[4])
8533 && mode == GET_MODE (operands[5])))
8534 return 0;
8535
8536 /* All operands must be registers. */
8537 if (! (GET_CODE (operands[1]) == REG
8538 && GET_CODE (operands[2]) == REG
8539 && GET_CODE (operands[3]) == REG
8540 && GET_CODE (operands[4]) == REG
8541 && GET_CODE (operands[5]) == REG))
8542 return 0;
8543
8544 /* Only 2 real operands to the addition. One of the input operands must
8545 be the same as the output operand. */
8546 if (! rtx_equal_p (operands[3], operands[4])
8547 && ! rtx_equal_p (operands[3], operands[5]))
8548 return 0;
8549
8550 /* Inout operand of add cannot conflict with any operands from multiply. */
8551 if (rtx_equal_p (operands[3], operands[0])
8552 || rtx_equal_p (operands[3], operands[1])
8553 || rtx_equal_p (operands[3], operands[2]))
8554 return 0;
8555
8556 /* multiply cannot feed into addition operands. */
8557 if (rtx_equal_p (operands[4], operands[0])
8558 || rtx_equal_p (operands[5], operands[0]))
8559 return 0;
8560
8561 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8562 if (mode == SFmode
8563 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8564 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8565 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8566 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8567 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8568 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8569 return 0;
8570
8571 /* Passed. Operands are suitable for fmpyadd. */
8572 return 1;
8573 }
8574
8575 #if !defined(USE_COLLECT2)
8576 static void
8577 pa_asm_out_constructor (rtx symbol, int priority)
8578 {
8579 if (!function_label_operand (symbol, VOIDmode))
8580 pa_encode_label (symbol);
8581
8582 #ifdef CTORS_SECTION_ASM_OP
8583 default_ctor_section_asm_out_constructor (symbol, priority);
8584 #else
8585 # ifdef TARGET_ASM_NAMED_SECTION
8586 default_named_section_asm_out_constructor (symbol, priority);
8587 # else
8588 default_stabs_asm_out_constructor (symbol, priority);
8589 # endif
8590 #endif
8591 }
8592
8593 static void
8594 pa_asm_out_destructor (rtx symbol, int priority)
8595 {
8596 if (!function_label_operand (symbol, VOIDmode))
8597 pa_encode_label (symbol);
8598
8599 #ifdef DTORS_SECTION_ASM_OP
8600 default_dtor_section_asm_out_destructor (symbol, priority);
8601 #else
8602 # ifdef TARGET_ASM_NAMED_SECTION
8603 default_named_section_asm_out_destructor (symbol, priority);
8604 # else
8605 default_stabs_asm_out_destructor (symbol, priority);
8606 # endif
8607 #endif
8608 }
8609 #endif
8610
8611 /* This function places uninitialized global data in the bss section.
8612 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8613 function on the SOM port to prevent uninitialized global data from
8614 being placed in the data section. */
8615
8616 void
8617 pa_asm_output_aligned_bss (FILE *stream,
8618 const char *name,
8619 unsigned HOST_WIDE_INT size,
8620 unsigned int align)
8621 {
8622 switch_to_section (bss_section);
8623 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8624
8625 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8626 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8627 #endif
8628
8629 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8630 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8631 #endif
8632
8633 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8634 ASM_OUTPUT_LABEL (stream, name);
8635 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8636 }
8637
8638 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8639 that doesn't allow the alignment of global common storage to be directly
8640 specified. The SOM linker aligns common storage based on the rounded
8641 value of the NUM_BYTES parameter in the .comm directive. It's not
8642 possible to use the .align directive as it doesn't affect the alignment
8643 of the label associated with a .comm directive. */
8644
8645 void
8646 pa_asm_output_aligned_common (FILE *stream,
8647 const char *name,
8648 unsigned HOST_WIDE_INT size,
8649 unsigned int align)
8650 {
8651 unsigned int max_common_align;
8652
8653 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8654 if (align > max_common_align)
8655 {
8656 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8657 "for global common data. Using %u",
8658 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8659 align = max_common_align;
8660 }
8661
8662 switch_to_section (bss_section);
8663
8664 assemble_name (stream, name);
8665 fprintf (stream, "\t.comm " HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8666 MAX (size, align / BITS_PER_UNIT));
8667 }
8668
8669 /* We can't use .comm for local common storage as the SOM linker effectively
8670 treats the symbol as universal and uses the same storage for local symbols
8671 with the same name in different object files. The .block directive
8672 reserves an uninitialized block of storage. However, it's not common
8673 storage. Fortunately, GCC never requests common storage with the same
8674 name in any given translation unit. */
8675
8676 void
8677 pa_asm_output_aligned_local (FILE *stream,
8678 const char *name,
8679 unsigned HOST_WIDE_INT size,
8680 unsigned int align)
8681 {
8682 switch_to_section (bss_section);
8683 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8684
8685 #ifdef LOCAL_ASM_OP
8686 fprintf (stream, "%s", LOCAL_ASM_OP);
8687 assemble_name (stream, name);
8688 fprintf (stream, "\n");
8689 #endif
8690
8691 ASM_OUTPUT_LABEL (stream, name);
8692 fprintf (stream, "\t.block " HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8693 }
8694
8695 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8696 use in fmpysub instructions. */
8697 int
8698 pa_fmpysuboperands (rtx *operands)
8699 {
8700 machine_mode mode = GET_MODE (operands[0]);
8701
8702 /* Must be a floating point mode. */
8703 if (mode != SFmode && mode != DFmode)
8704 return 0;
8705
8706 /* All modes must be the same. */
8707 if (! (mode == GET_MODE (operands[1])
8708 && mode == GET_MODE (operands[2])
8709 && mode == GET_MODE (operands[3])
8710 && mode == GET_MODE (operands[4])
8711 && mode == GET_MODE (operands[5])))
8712 return 0;
8713
8714 /* All operands must be registers. */
8715 if (! (GET_CODE (operands[1]) == REG
8716 && GET_CODE (operands[2]) == REG
8717 && GET_CODE (operands[3]) == REG
8718 && GET_CODE (operands[4]) == REG
8719 && GET_CODE (operands[5]) == REG))
8720 return 0;
8721
8722 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8723 operation, so operands[4] must be the same as operand[3]. */
8724 if (! rtx_equal_p (operands[3], operands[4]))
8725 return 0;
8726
8727 /* multiply cannot feed into subtraction. */
8728 if (rtx_equal_p (operands[5], operands[0]))
8729 return 0;
8730
8731 /* Inout operand of sub cannot conflict with any operands from multiply. */
8732 if (rtx_equal_p (operands[3], operands[0])
8733 || rtx_equal_p (operands[3], operands[1])
8734 || rtx_equal_p (operands[3], operands[2]))
8735 return 0;
8736
8737 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8738 if (mode == SFmode
8739 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8740 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8741 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8742 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8743 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8744 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8745 return 0;
8746
8747 /* Passed. Operands are suitable for fmpysub. */
8748 return 1;
8749 }
8750
8751 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8752 constants for a MULT embedded inside a memory address. */
8753 int
8754 pa_mem_shadd_constant_p (int val)
8755 {
8756 if (val == 2 || val == 4 || val == 8)
8757 return 1;
8758 else
8759 return 0;
8760 }
8761
8762 /* Return 1 if the given constant is 1, 2, or 3. These are the valid
8763 constants for shadd instructions. */
8764 int
8765 pa_shadd_constant_p (int val)
8766 {
8767 if (val == 1 || val == 2 || val == 3)
8768 return 1;
8769 else
8770 return 0;
8771 }
8772
8773 /* Return TRUE if INSN branches forward. */
8774
8775 static bool
8776 forward_branch_p (rtx_insn *insn)
8777 {
8778 rtx lab = JUMP_LABEL (insn);
8779
8780 /* The INSN must have a jump label. */
8781 gcc_assert (lab != NULL_RTX);
8782
8783 if (INSN_ADDRESSES_SET_P ())
8784 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8785
8786 while (insn)
8787 {
8788 if (insn == lab)
8789 return true;
8790 else
8791 insn = NEXT_INSN (insn);
8792 }
8793
8794 return false;
8795 }
8796
8797 /* Output an unconditional move and branch insn. */
8798
8799 const char *
8800 pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
8801 {
8802 int length = get_attr_length (insn);
8803
8804 /* These are the cases in which we win. */
8805 if (length == 4)
8806 return "mov%I1b,tr %1,%0,%2";
8807
8808 /* None of the following cases win, but they don't lose either. */
8809 if (length == 8)
8810 {
8811 if (dbr_sequence_length () == 0)
8812 {
8813 /* Nothing in the delay slot, fake it by putting the combined
8814 insn (the copy or add) in the delay slot of a bl. */
8815 if (GET_CODE (operands[1]) == CONST_INT)
8816 return "b %2\n\tldi %1,%0";
8817 else
8818 return "b %2\n\tcopy %1,%0";
8819 }
8820 else
8821 {
8822 /* Something in the delay slot, but we've got a long branch. */
8823 if (GET_CODE (operands[1]) == CONST_INT)
8824 return "ldi %1,%0\n\tb %2";
8825 else
8826 return "copy %1,%0\n\tb %2";
8827 }
8828 }
8829
8830 if (GET_CODE (operands[1]) == CONST_INT)
8831 output_asm_insn ("ldi %1,%0", operands);
8832 else
8833 output_asm_insn ("copy %1,%0", operands);
8834 return pa_output_lbranch (operands[2], insn, 1);
8835 }
8836
8837 /* Output an unconditional add and branch insn. */
8838
8839 const char *
8840 pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
8841 {
8842 int length = get_attr_length (insn);
8843
8844 /* To make life easy we want operand0 to be the shared input/output
8845 operand and operand1 to be the readonly operand. */
8846 if (operands[0] == operands[1])
8847 operands[1] = operands[2];
8848
8849 /* These are the cases in which we win. */
8850 if (length == 4)
8851 return "add%I1b,tr %1,%0,%3";
8852
8853 /* None of the following cases win, but they don't lose either. */
8854 if (length == 8)
8855 {
8856 if (dbr_sequence_length () == 0)
8857 /* Nothing in the delay slot, fake it by putting the combined
8858 insn (the copy or add) in the delay slot of a bl. */
8859 return "b %3\n\tadd%I1 %1,%0,%0";
8860 else
8861 /* Something in the delay slot, but we've got a long branch. */
8862 return "add%I1 %1,%0,%0\n\tb %3";
8863 }
8864
8865 output_asm_insn ("add%I1 %1,%0,%0", operands);
8866 return pa_output_lbranch (operands[3], insn, 1);
8867 }
8868
8869 /* We use this hook to perform a PA specific optimization which is difficult
8870 to do in earlier passes. */
8871
8872 static void
8873 pa_reorg (void)
8874 {
8875 remove_useless_addtr_insns (1);
8876
8877 if (pa_cpu < PROCESSOR_8000)
8878 pa_combine_instructions ();
8879 }
8880
8881 /* The PA has a number of odd instructions which can perform multiple
8882 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8883 it may be profitable to combine two instructions into one instruction
8884 with two outputs. It's not profitable PA2.0 machines because the
8885 two outputs would take two slots in the reorder buffers.
8886
8887 This routine finds instructions which can be combined and combines
8888 them. We only support some of the potential combinations, and we
8889 only try common ways to find suitable instructions.
8890
8891 * addb can add two registers or a register and a small integer
8892 and jump to a nearby (+-8k) location. Normally the jump to the
8893 nearby location is conditional on the result of the add, but by
8894 using the "true" condition we can make the jump unconditional.
8895 Thus addb can perform two independent operations in one insn.
8896
8897 * movb is similar to addb in that it can perform a reg->reg
8898 or small immediate->reg copy and jump to a nearby (+-8k location).
8899
8900 * fmpyadd and fmpysub can perform a FP multiply and either an
8901 FP add or FP sub if the operands of the multiply and add/sub are
8902 independent (there are other minor restrictions). Note both
8903 the fmpy and fadd/fsub can in theory move to better spots according
8904 to data dependencies, but for now we require the fmpy stay at a
8905 fixed location.
8906
8907 * Many of the memory operations can perform pre & post updates
8908 of index registers. GCC's pre/post increment/decrement addressing
8909 is far too simple to take advantage of all the possibilities. This
8910 pass may not be suitable since those insns may not be independent.
8911
8912 * comclr can compare two ints or an int and a register, nullify
8913 the following instruction and zero some other register. This
8914 is more difficult to use as it's harder to find an insn which
8915 will generate a comclr than finding something like an unconditional
8916 branch. (conditional moves & long branches create comclr insns).
8917
8918 * Most arithmetic operations can conditionally skip the next
8919 instruction. They can be viewed as "perform this operation
8920 and conditionally jump to this nearby location" (where nearby
8921 is an insns away). These are difficult to use due to the
8922 branch length restrictions. */
8923
8924 static void
8925 pa_combine_instructions (void)
8926 {
8927 rtx_insn *anchor;
8928
8929 /* This can get expensive since the basic algorithm is on the
8930 order of O(n^2) (or worse). Only do it for -O2 or higher
8931 levels of optimization. */
8932 if (optimize < 2)
8933 return;
8934
8935 /* Walk down the list of insns looking for "anchor" insns which
8936 may be combined with "floating" insns. As the name implies,
8937 "anchor" instructions don't move, while "floating" insns may
8938 move around. */
8939 rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8940 rtx_insn *new_rtx = make_insn_raw (par);
8941
8942 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8943 {
8944 enum attr_pa_combine_type anchor_attr;
8945 enum attr_pa_combine_type floater_attr;
8946
8947 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8948 Also ignore any special USE insns. */
8949 if ((! NONJUMP_INSN_P (anchor) && ! JUMP_P (anchor) && ! CALL_P (anchor))
8950 || GET_CODE (PATTERN (anchor)) == USE
8951 || GET_CODE (PATTERN (anchor)) == CLOBBER)
8952 continue;
8953
8954 anchor_attr = get_attr_pa_combine_type (anchor);
8955 /* See if anchor is an insn suitable for combination. */
8956 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8957 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8958 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8959 && ! forward_branch_p (anchor)))
8960 {
8961 rtx_insn *floater;
8962
8963 for (floater = PREV_INSN (anchor);
8964 floater;
8965 floater = PREV_INSN (floater))
8966 {
8967 if (NOTE_P (floater)
8968 || (NONJUMP_INSN_P (floater)
8969 && (GET_CODE (PATTERN (floater)) == USE
8970 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8971 continue;
8972
8973 /* Anything except a regular INSN will stop our search. */
8974 if (! NONJUMP_INSN_P (floater))
8975 {
8976 floater = NULL;
8977 break;
8978 }
8979
8980 /* See if FLOATER is suitable for combination with the
8981 anchor. */
8982 floater_attr = get_attr_pa_combine_type (floater);
8983 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8984 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8985 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8986 && floater_attr == PA_COMBINE_TYPE_FMPY))
8987 {
8988 /* If ANCHOR and FLOATER can be combined, then we're
8989 done with this pass. */
8990 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8991 SET_DEST (PATTERN (floater)),
8992 XEXP (SET_SRC (PATTERN (floater)), 0),
8993 XEXP (SET_SRC (PATTERN (floater)), 1)))
8994 break;
8995 }
8996
8997 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8998 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8999 {
9000 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9001 {
9002 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9003 SET_DEST (PATTERN (floater)),
9004 XEXP (SET_SRC (PATTERN (floater)), 0),
9005 XEXP (SET_SRC (PATTERN (floater)), 1)))
9006 break;
9007 }
9008 else
9009 {
9010 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9011 SET_DEST (PATTERN (floater)),
9012 SET_SRC (PATTERN (floater)),
9013 SET_SRC (PATTERN (floater))))
9014 break;
9015 }
9016 }
9017 }
9018
9019 /* If we didn't find anything on the backwards scan try forwards. */
9020 if (!floater
9021 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9022 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9023 {
9024 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9025 {
9026 if (NOTE_P (floater)
9027 || (NONJUMP_INSN_P (floater)
9028 && (GET_CODE (PATTERN (floater)) == USE
9029 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9030
9031 continue;
9032
9033 /* Anything except a regular INSN will stop our search. */
9034 if (! NONJUMP_INSN_P (floater))
9035 {
9036 floater = NULL;
9037 break;
9038 }
9039
9040 /* See if FLOATER is suitable for combination with the
9041 anchor. */
9042 floater_attr = get_attr_pa_combine_type (floater);
9043 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9044 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9045 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9046 && floater_attr == PA_COMBINE_TYPE_FMPY))
9047 {
9048 /* If ANCHOR and FLOATER can be combined, then we're
9049 done with this pass. */
9050 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9051 SET_DEST (PATTERN (floater)),
9052 XEXP (SET_SRC (PATTERN (floater)),
9053 0),
9054 XEXP (SET_SRC (PATTERN (floater)),
9055 1)))
9056 break;
9057 }
9058 }
9059 }
9060
9061 /* FLOATER will be nonzero if we found a suitable floating
9062 insn for combination with ANCHOR. */
9063 if (floater
9064 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9065 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9066 {
9067 /* Emit the new instruction and delete the old anchor. */
9068 emit_insn_before (gen_rtx_PARALLEL
9069 (VOIDmode,
9070 gen_rtvec (2, PATTERN (anchor),
9071 PATTERN (floater))),
9072 anchor);
9073
9074 SET_INSN_DELETED (anchor);
9075
9076 /* Emit a special USE insn for FLOATER, then delete
9077 the floating insn. */
9078 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9079 delete_insn (floater);
9080
9081 continue;
9082 }
9083 else if (floater
9084 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9085 {
9086 rtx temp;
9087 /* Emit the new_jump instruction and delete the old anchor. */
9088 temp
9089 = emit_jump_insn_before (gen_rtx_PARALLEL
9090 (VOIDmode,
9091 gen_rtvec (2, PATTERN (anchor),
9092 PATTERN (floater))),
9093 anchor);
9094
9095 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9096 SET_INSN_DELETED (anchor);
9097
9098 /* Emit a special USE insn for FLOATER, then delete
9099 the floating insn. */
9100 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9101 delete_insn (floater);
9102 continue;
9103 }
9104 }
9105 }
9106 }
9107
9108 static int
9109 pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
9110 int reversed, rtx dest,
9111 rtx src1, rtx src2)
9112 {
9113 int insn_code_number;
9114 rtx_insn *start, *end;
9115
9116 /* Create a PARALLEL with the patterns of ANCHOR and
9117 FLOATER, try to recognize it, then test constraints
9118 for the resulting pattern.
9119
9120 If the pattern doesn't match or the constraints
9121 aren't met keep searching for a suitable floater
9122 insn. */
9123 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9124 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9125 INSN_CODE (new_rtx) = -1;
9126 insn_code_number = recog_memoized (new_rtx);
9127 basic_block bb = BLOCK_FOR_INSN (anchor);
9128 if (insn_code_number < 0
9129 || (extract_insn (new_rtx),
9130 !constrain_operands (1, get_preferred_alternatives (new_rtx, bb))))
9131 return 0;
9132
9133 if (reversed)
9134 {
9135 start = anchor;
9136 end = floater;
9137 }
9138 else
9139 {
9140 start = floater;
9141 end = anchor;
9142 }
9143
9144 /* There's up to three operands to consider. One
9145 output and two inputs.
9146
9147 The output must not be used between FLOATER & ANCHOR
9148 exclusive. The inputs must not be set between
9149 FLOATER and ANCHOR exclusive. */
9150
9151 if (reg_used_between_p (dest, start, end))
9152 return 0;
9153
9154 if (reg_set_between_p (src1, start, end))
9155 return 0;
9156
9157 if (reg_set_between_p (src2, start, end))
9158 return 0;
9159
9160 /* If we get here, then everything is good. */
9161 return 1;
9162 }
9163
9164 /* Return nonzero if references for INSN are delayed.
9165
9166 Millicode insns are actually function calls with some special
9167 constraints on arguments and register usage.
9168
9169 Millicode calls always expect their arguments in the integer argument
9170 registers, and always return their result in %r29 (ret1). They
9171 are expected to clobber their arguments, %r1, %r29, and the return
9172 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9173
9174 This function tells reorg that the references to arguments and
9175 millicode calls do not appear to happen until after the millicode call.
9176 This allows reorg to put insns which set the argument registers into the
9177 delay slot of the millicode call -- thus they act more like traditional
9178 CALL_INSNs.
9179
9180 Note we cannot consider side effects of the insn to be delayed because
9181 the branch and link insn will clobber the return pointer. If we happened
9182 to use the return pointer in the delay slot of the call, then we lose.
9183
9184 get_attr_type will try to recognize the given insn, so make sure to
9185 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9186 in particular. */
9187 int
9188 pa_insn_refs_are_delayed (rtx_insn *insn)
9189 {
9190 return ((NONJUMP_INSN_P (insn)
9191 && GET_CODE (PATTERN (insn)) != SEQUENCE
9192 && GET_CODE (PATTERN (insn)) != USE
9193 && GET_CODE (PATTERN (insn)) != CLOBBER
9194 && get_attr_type (insn) == TYPE_MILLI));
9195 }
9196
9197 /* Promote the return value, but not the arguments. */
9198
9199 static machine_mode
9200 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9201 machine_mode mode,
9202 int *punsignedp ATTRIBUTE_UNUSED,
9203 const_tree fntype ATTRIBUTE_UNUSED,
9204 int for_return)
9205 {
9206 if (for_return == 0)
9207 return mode;
9208 return promote_mode (type, mode, punsignedp);
9209 }
9210
9211 /* On the HP-PA the value is found in register(s) 28(-29), unless
9212 the mode is SF or DF. Then the value is returned in fr4 (32).
9213
9214 This must perform the same promotions as PROMOTE_MODE, else promoting
9215 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9216
9217 Small structures must be returned in a PARALLEL on PA64 in order
9218 to match the HP Compiler ABI. */
9219
9220 static rtx
9221 pa_function_value (const_tree valtype,
9222 const_tree func ATTRIBUTE_UNUSED,
9223 bool outgoing ATTRIBUTE_UNUSED)
9224 {
9225 machine_mode valmode;
9226
9227 if (AGGREGATE_TYPE_P (valtype)
9228 || TREE_CODE (valtype) == COMPLEX_TYPE
9229 || TREE_CODE (valtype) == VECTOR_TYPE)
9230 {
9231 HOST_WIDE_INT valsize = int_size_in_bytes (valtype);
9232
9233 /* Handle aggregates that fit exactly in a word or double word. */
9234 if ((valsize & (UNITS_PER_WORD - 1)) == 0)
9235 return gen_rtx_REG (TYPE_MODE (valtype), 28);
9236
9237 if (TARGET_64BIT)
9238 {
9239 /* Aggregates with a size less than or equal to 128 bits are
9240 returned in GR 28(-29). They are left justified. The pad
9241 bits are undefined. Larger aggregates are returned in
9242 memory. */
9243 rtx loc[2];
9244 int i, offset = 0;
9245 int ub = valsize <= UNITS_PER_WORD ? 1 : 2;
9246
9247 for (i = 0; i < ub; i++)
9248 {
9249 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9250 gen_rtx_REG (DImode, 28 + i),
9251 GEN_INT (offset));
9252 offset += 8;
9253 }
9254
9255 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9256 }
9257 else if (valsize > UNITS_PER_WORD)
9258 {
9259 /* Aggregates 5 to 8 bytes in size are returned in general
9260 registers r28-r29 in the same manner as other non
9261 floating-point objects. The data is right-justified and
9262 zero-extended to 64 bits. This is opposite to the normal
9263 justification used on big endian targets and requires
9264 special treatment. */
9265 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9266 gen_rtx_REG (DImode, 28), const0_rtx);
9267 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9268 }
9269 }
9270
9271 if ((INTEGRAL_TYPE_P (valtype)
9272 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9273 || POINTER_TYPE_P (valtype))
9274 valmode = word_mode;
9275 else
9276 valmode = TYPE_MODE (valtype);
9277
9278 if (TREE_CODE (valtype) == REAL_TYPE
9279 && !AGGREGATE_TYPE_P (valtype)
9280 && TYPE_MODE (valtype) != TFmode
9281 && !TARGET_SOFT_FLOAT)
9282 return gen_rtx_REG (valmode, 32);
9283
9284 return gen_rtx_REG (valmode, 28);
9285 }
9286
9287 /* Implement the TARGET_LIBCALL_VALUE hook. */
9288
9289 static rtx
9290 pa_libcall_value (machine_mode mode,
9291 const_rtx fun ATTRIBUTE_UNUSED)
9292 {
9293 if (! TARGET_SOFT_FLOAT
9294 && (mode == SFmode || mode == DFmode))
9295 return gen_rtx_REG (mode, 32);
9296 else
9297 return gen_rtx_REG (mode, 28);
9298 }
9299
9300 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9301
9302 static bool
9303 pa_function_value_regno_p (const unsigned int regno)
9304 {
9305 if (regno == 28
9306 || (! TARGET_SOFT_FLOAT && regno == 32))
9307 return true;
9308
9309 return false;
9310 }
9311
9312 /* Update the data in CUM to advance over an argument
9313 of mode MODE and data type TYPE.
9314 (TYPE is null for libcalls where that information may not be available.) */
9315
9316 static void
9317 pa_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
9318 const_tree type, bool named ATTRIBUTE_UNUSED)
9319 {
9320 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9321 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9322
9323 cum->nargs_prototype--;
9324 cum->words += (arg_size
9325 + ((cum->words & 01)
9326 && type != NULL_TREE
9327 && arg_size > 1));
9328 }
9329
9330 /* Return the location of a parameter that is passed in a register or NULL
9331 if the parameter has any component that is passed in memory.
9332
9333 This is new code and will be pushed to into the net sources after
9334 further testing.
9335
9336 ??? We might want to restructure this so that it looks more like other
9337 ports. */
9338 static rtx
9339 pa_function_arg (cumulative_args_t cum_v, machine_mode mode,
9340 const_tree type, bool named ATTRIBUTE_UNUSED)
9341 {
9342 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9343 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9344 int alignment = 0;
9345 int arg_size;
9346 int fpr_reg_base;
9347 int gpr_reg_base;
9348 rtx retval;
9349
9350 if (mode == VOIDmode)
9351 return NULL_RTX;
9352
9353 arg_size = FUNCTION_ARG_SIZE (mode, type);
9354
9355 /* If this arg would be passed partially or totally on the stack, then
9356 this routine should return zero. pa_arg_partial_bytes will
9357 handle arguments which are split between regs and stack slots if
9358 the ABI mandates split arguments. */
9359 if (!TARGET_64BIT)
9360 {
9361 /* The 32-bit ABI does not split arguments. */
9362 if (cum->words + arg_size > max_arg_words)
9363 return NULL_RTX;
9364 }
9365 else
9366 {
9367 if (arg_size > 1)
9368 alignment = cum->words & 1;
9369 if (cum->words + alignment >= max_arg_words)
9370 return NULL_RTX;
9371 }
9372
9373 /* The 32bit ABIs and the 64bit ABIs are rather different,
9374 particularly in their handling of FP registers. We might
9375 be able to cleverly share code between them, but I'm not
9376 going to bother in the hope that splitting them up results
9377 in code that is more easily understood. */
9378
9379 if (TARGET_64BIT)
9380 {
9381 /* Advance the base registers to their current locations.
9382
9383 Remember, gprs grow towards smaller register numbers while
9384 fprs grow to higher register numbers. Also remember that
9385 although FP regs are 32-bit addressable, we pretend that
9386 the registers are 64-bits wide. */
9387 gpr_reg_base = 26 - cum->words;
9388 fpr_reg_base = 32 + cum->words;
9389
9390 /* Arguments wider than one word and small aggregates need special
9391 treatment. */
9392 if (arg_size > 1
9393 || mode == BLKmode
9394 || (type && (AGGREGATE_TYPE_P (type)
9395 || TREE_CODE (type) == COMPLEX_TYPE
9396 || TREE_CODE (type) == VECTOR_TYPE)))
9397 {
9398 /* Double-extended precision (80-bit), quad-precision (128-bit)
9399 and aggregates including complex numbers are aligned on
9400 128-bit boundaries. The first eight 64-bit argument slots
9401 are associated one-to-one, with general registers r26
9402 through r19, and also with floating-point registers fr4
9403 through fr11. Arguments larger than one word are always
9404 passed in general registers.
9405
9406 Using a PARALLEL with a word mode register results in left
9407 justified data on a big-endian target. */
9408
9409 rtx loc[8];
9410 int i, offset = 0, ub = arg_size;
9411
9412 /* Align the base register. */
9413 gpr_reg_base -= alignment;
9414
9415 ub = MIN (ub, max_arg_words - cum->words - alignment);
9416 for (i = 0; i < ub; i++)
9417 {
9418 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9419 gen_rtx_REG (DImode, gpr_reg_base),
9420 GEN_INT (offset));
9421 gpr_reg_base -= 1;
9422 offset += 8;
9423 }
9424
9425 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9426 }
9427 }
9428 else
9429 {
9430 /* If the argument is larger than a word, then we know precisely
9431 which registers we must use. */
9432 if (arg_size > 1)
9433 {
9434 if (cum->words)
9435 {
9436 gpr_reg_base = 23;
9437 fpr_reg_base = 38;
9438 }
9439 else
9440 {
9441 gpr_reg_base = 25;
9442 fpr_reg_base = 34;
9443 }
9444
9445 /* Structures 5 to 8 bytes in size are passed in the general
9446 registers in the same manner as other non floating-point
9447 objects. The data is right-justified and zero-extended
9448 to 64 bits. This is opposite to the normal justification
9449 used on big endian targets and requires special treatment.
9450 We now define BLOCK_REG_PADDING to pad these objects.
9451 Aggregates, complex and vector types are passed in the same
9452 manner as structures. */
9453 if (mode == BLKmode
9454 || (type && (AGGREGATE_TYPE_P (type)
9455 || TREE_CODE (type) == COMPLEX_TYPE
9456 || TREE_CODE (type) == VECTOR_TYPE)))
9457 {
9458 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9459 gen_rtx_REG (DImode, gpr_reg_base),
9460 const0_rtx);
9461 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9462 }
9463 }
9464 else
9465 {
9466 /* We have a single word (32 bits). A simple computation
9467 will get us the register #s we need. */
9468 gpr_reg_base = 26 - cum->words;
9469 fpr_reg_base = 32 + 2 * cum->words;
9470 }
9471 }
9472
9473 /* Determine if the argument needs to be passed in both general and
9474 floating point registers. */
9475 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9476 /* If we are doing soft-float with portable runtime, then there
9477 is no need to worry about FP regs. */
9478 && !TARGET_SOFT_FLOAT
9479 /* The parameter must be some kind of scalar float, else we just
9480 pass it in integer registers. */
9481 && GET_MODE_CLASS (mode) == MODE_FLOAT
9482 /* The target function must not have a prototype. */
9483 && cum->nargs_prototype <= 0
9484 /* libcalls do not need to pass items in both FP and general
9485 registers. */
9486 && type != NULL_TREE
9487 /* All this hair applies to "outgoing" args only. This includes
9488 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9489 && !cum->incoming)
9490 /* Also pass outgoing floating arguments in both registers in indirect
9491 calls with the 32 bit ABI and the HP assembler since there is no
9492 way to the specify argument locations in static functions. */
9493 || (!TARGET_64BIT
9494 && !TARGET_GAS
9495 && !cum->incoming
9496 && cum->indirect
9497 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9498 {
9499 retval
9500 = gen_rtx_PARALLEL
9501 (mode,
9502 gen_rtvec (2,
9503 gen_rtx_EXPR_LIST (VOIDmode,
9504 gen_rtx_REG (mode, fpr_reg_base),
9505 const0_rtx),
9506 gen_rtx_EXPR_LIST (VOIDmode,
9507 gen_rtx_REG (mode, gpr_reg_base),
9508 const0_rtx)));
9509 }
9510 else
9511 {
9512 /* See if we should pass this parameter in a general register. */
9513 if (TARGET_SOFT_FLOAT
9514 /* Indirect calls in the normal 32bit ABI require all arguments
9515 to be passed in general registers. */
9516 || (!TARGET_PORTABLE_RUNTIME
9517 && !TARGET_64BIT
9518 && !TARGET_ELF32
9519 && cum->indirect)
9520 /* If the parameter is not a scalar floating-point parameter,
9521 then it belongs in GPRs. */
9522 || GET_MODE_CLASS (mode) != MODE_FLOAT
9523 /* Structure with single SFmode field belongs in GPR. */
9524 || (type && AGGREGATE_TYPE_P (type)))
9525 retval = gen_rtx_REG (mode, gpr_reg_base);
9526 else
9527 retval = gen_rtx_REG (mode, fpr_reg_base);
9528 }
9529 return retval;
9530 }
9531
9532 /* Arguments larger than one word are double word aligned. */
9533
9534 static unsigned int
9535 pa_function_arg_boundary (machine_mode mode, const_tree type)
9536 {
9537 bool singleword = (type
9538 ? (integer_zerop (TYPE_SIZE (type))
9539 || !TREE_CONSTANT (TYPE_SIZE (type))
9540 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9541 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9542
9543 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9544 }
9545
9546 /* If this arg would be passed totally in registers or totally on the stack,
9547 then this routine should return zero. */
9548
9549 static int
9550 pa_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
9551 tree type, bool named ATTRIBUTE_UNUSED)
9552 {
9553 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9554 unsigned int max_arg_words = 8;
9555 unsigned int offset = 0;
9556
9557 if (!TARGET_64BIT)
9558 return 0;
9559
9560 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9561 offset = 1;
9562
9563 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9564 /* Arg fits fully into registers. */
9565 return 0;
9566 else if (cum->words + offset >= max_arg_words)
9567 /* Arg fully on the stack. */
9568 return 0;
9569 else
9570 /* Arg is split. */
9571 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9572 }
9573
9574
9575 /* A get_unnamed_section callback for switching to the text section.
9576
9577 This function is only used with SOM. Because we don't support
9578 named subspaces, we can only create a new subspace or switch back
9579 to the default text subspace. */
9580
9581 static void
9582 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9583 {
9584 gcc_assert (TARGET_SOM);
9585 if (TARGET_GAS)
9586 {
9587 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9588 {
9589 /* We only want to emit a .nsubspa directive once at the
9590 start of the function. */
9591 cfun->machine->in_nsubspa = 1;
9592
9593 /* Create a new subspace for the text. This provides
9594 better stub placement and one-only functions. */
9595 if (cfun->decl
9596 && DECL_ONE_ONLY (cfun->decl)
9597 && !DECL_WEAK (cfun->decl))
9598 {
9599 output_section_asm_op ("\t.SPACE $TEXT$\n"
9600 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9601 "ACCESS=44,SORT=24,COMDAT");
9602 return;
9603 }
9604 }
9605 else
9606 {
9607 /* There isn't a current function or the body of the current
9608 function has been completed. So, we are changing to the
9609 text section to output debugging information. Thus, we
9610 need to forget that we are in the text section so that
9611 varasm.c will call us when text_section is selected again. */
9612 gcc_assert (!cfun || !cfun->machine
9613 || cfun->machine->in_nsubspa == 2);
9614 in_section = NULL;
9615 }
9616 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9617 return;
9618 }
9619 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9620 }
9621
9622 /* A get_unnamed_section callback for switching to comdat data
9623 sections. This function is only used with SOM. */
9624
9625 static void
9626 som_output_comdat_data_section_asm_op (const void *data)
9627 {
9628 in_section = NULL;
9629 output_section_asm_op (data);
9630 }
9631
9632 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9633
9634 static void
9635 pa_som_asm_init_sections (void)
9636 {
9637 text_section
9638 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9639
9640 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9641 is not being generated. */
9642 som_readonly_data_section
9643 = get_unnamed_section (0, output_section_asm_op,
9644 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9645
9646 /* When secondary definitions are not supported, SOM makes readonly
9647 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9648 the comdat flag. */
9649 som_one_only_readonly_data_section
9650 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9651 "\t.SPACE $TEXT$\n"
9652 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9653 "ACCESS=0x2c,SORT=16,COMDAT");
9654
9655
9656 /* When secondary definitions are not supported, SOM makes data one-only
9657 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9658 som_one_only_data_section
9659 = get_unnamed_section (SECTION_WRITE,
9660 som_output_comdat_data_section_asm_op,
9661 "\t.SPACE $PRIVATE$\n"
9662 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9663 "ACCESS=31,SORT=24,COMDAT");
9664
9665 if (flag_tm)
9666 som_tm_clone_table_section
9667 = get_unnamed_section (0, output_section_asm_op,
9668 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9669
9670 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9671 which reference data within the $TEXT$ space (for example constant
9672 strings in the $LIT$ subspace).
9673
9674 The assemblers (GAS and HP as) both have problems with handling
9675 the difference of two symbols which is the other correct way to
9676 reference constant data during PIC code generation.
9677
9678 So, there's no way to reference constant data which is in the
9679 $TEXT$ space during PIC generation. Instead place all constant
9680 data into the $PRIVATE$ subspace (this reduces sharing, but it
9681 works correctly). */
9682 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9683
9684 /* We must not have a reference to an external symbol defined in a
9685 shared library in a readonly section, else the SOM linker will
9686 complain.
9687
9688 So, we force exception information into the data section. */
9689 exception_section = data_section;
9690 }
9691
9692 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9693
9694 static section *
9695 pa_som_tm_clone_table_section (void)
9696 {
9697 return som_tm_clone_table_section;
9698 }
9699
9700 /* On hpux10, the linker will give an error if we have a reference
9701 in the read-only data section to a symbol defined in a shared
9702 library. Therefore, expressions that might require a reloc can
9703 not be placed in the read-only data section. */
9704
9705 static section *
9706 pa_select_section (tree exp, int reloc,
9707 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9708 {
9709 if (TREE_CODE (exp) == VAR_DECL
9710 && TREE_READONLY (exp)
9711 && !TREE_THIS_VOLATILE (exp)
9712 && DECL_INITIAL (exp)
9713 && (DECL_INITIAL (exp) == error_mark_node
9714 || TREE_CONSTANT (DECL_INITIAL (exp)))
9715 && !reloc)
9716 {
9717 if (TARGET_SOM
9718 && DECL_ONE_ONLY (exp)
9719 && !DECL_WEAK (exp))
9720 return som_one_only_readonly_data_section;
9721 else
9722 return readonly_data_section;
9723 }
9724 else if (CONSTANT_CLASS_P (exp) && !reloc)
9725 return readonly_data_section;
9726 else if (TARGET_SOM
9727 && TREE_CODE (exp) == VAR_DECL
9728 && DECL_ONE_ONLY (exp)
9729 && !DECL_WEAK (exp))
9730 return som_one_only_data_section;
9731 else
9732 return data_section;
9733 }
9734
9735 /* Implement pa_reloc_rw_mask. */
9736
9737 static int
9738 pa_reloc_rw_mask (void)
9739 {
9740 /* We force (const (plus (symbol) (const_int))) to memory when the
9741 const_int doesn't fit in a 14-bit integer. The SOM linker can't
9742 handle this construct in read-only memory and we want to avoid
9743 this for ELF. So, we always force an RTX needing relocation to
9744 the data section. */
9745 return 3;
9746 }
9747
9748 static void
9749 pa_globalize_label (FILE *stream, const char *name)
9750 {
9751 /* We only handle DATA objects here, functions are globalized in
9752 ASM_DECLARE_FUNCTION_NAME. */
9753 if (! FUNCTION_NAME_P (name))
9754 {
9755 fputs ("\t.EXPORT ", stream);
9756 assemble_name (stream, name);
9757 fputs (",DATA\n", stream);
9758 }
9759 }
9760
9761 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9762
9763 static rtx
9764 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9765 int incoming ATTRIBUTE_UNUSED)
9766 {
9767 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9768 }
9769
9770 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9771
9772 bool
9773 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9774 {
9775 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9776 PA64 ABI says that objects larger than 128 bits are returned in memory.
9777 Note, int_size_in_bytes can return -1 if the size of the object is
9778 variable or larger than the maximum value that can be expressed as
9779 a HOST_WIDE_INT. It can also return zero for an empty type. The
9780 simplest way to handle variable and empty types is to pass them in
9781 memory. This avoids problems in defining the boundaries of argument
9782 slots, allocating registers, etc. */
9783 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9784 || int_size_in_bytes (type) <= 0);
9785 }
9786
9787 /* Structure to hold declaration and name of external symbols that are
9788 emitted by GCC. We generate a vector of these symbols and output them
9789 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9790 This avoids putting out names that are never really used. */
9791
9792 typedef struct GTY(()) extern_symbol
9793 {
9794 tree decl;
9795 const char *name;
9796 } extern_symbol;
9797
9798 /* Define gc'd vector type for extern_symbol. */
9799
9800 /* Vector of extern_symbol pointers. */
9801 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9802
9803 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9804 /* Mark DECL (name NAME) as an external reference (assembler output
9805 file FILE). This saves the names to output at the end of the file
9806 if actually referenced. */
9807
9808 void
9809 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9810 {
9811 gcc_assert (file == asm_out_file);
9812 extern_symbol p = {decl, name};
9813 vec_safe_push (extern_symbols, p);
9814 }
9815
9816 /* Output text required at the end of an assembler file.
9817 This includes deferred plabels and .import directives for
9818 all external symbols that were actually referenced. */
9819
9820 static void
9821 pa_hpux_file_end (void)
9822 {
9823 unsigned int i;
9824 extern_symbol *p;
9825
9826 if (!NO_DEFERRED_PROFILE_COUNTERS)
9827 output_deferred_profile_counters ();
9828
9829 output_deferred_plabels ();
9830
9831 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
9832 {
9833 tree decl = p->decl;
9834
9835 if (!TREE_ASM_WRITTEN (decl)
9836 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9837 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9838 }
9839
9840 vec_free (extern_symbols);
9841 }
9842 #endif
9843
9844 /* Return true if a change from mode FROM to mode TO for a register
9845 in register class RCLASS is invalid. */
9846
9847 bool
9848 pa_cannot_change_mode_class (machine_mode from, machine_mode to,
9849 enum reg_class rclass)
9850 {
9851 if (from == to)
9852 return false;
9853
9854 /* Reject changes to/from complex and vector modes. */
9855 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9856 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9857 return true;
9858
9859 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9860 return false;
9861
9862 /* There is no way to load QImode or HImode values directly from
9863 memory. SImode loads to the FP registers are not zero extended.
9864 On the 64-bit target, this conflicts with the definition of
9865 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9866 with different sizes in the floating-point registers. */
9867 if (MAYBE_FP_REG_CLASS_P (rclass))
9868 return true;
9869
9870 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9871 in specific sets of registers. Thus, we cannot allow changing
9872 to a larger mode when it's larger than a word. */
9873 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9874 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9875 return true;
9876
9877 return false;
9878 }
9879
9880 /* Returns TRUE if it is a good idea to tie two pseudo registers
9881 when one has mode MODE1 and one has mode MODE2.
9882 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9883 for any hard reg, then this must be FALSE for correct output.
9884
9885 We should return FALSE for QImode and HImode because these modes
9886 are not ok in the floating-point registers. However, this prevents
9887 tieing these modes to SImode and DImode in the general registers.
9888 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9889 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9890 in the floating-point registers. */
9891
9892 bool
9893 pa_modes_tieable_p (machine_mode mode1, machine_mode mode2)
9894 {
9895 /* Don't tie modes in different classes. */
9896 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9897 return false;
9898
9899 return true;
9900 }
9901
9902 \f
9903 /* Length in units of the trampoline instruction code. */
9904
9905 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9906
9907
9908 /* Output assembler code for a block containing the constant parts
9909 of a trampoline, leaving space for the variable parts.\
9910
9911 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9912 and then branches to the specified routine.
9913
9914 This code template is copied from text segment to stack location
9915 and then patched with pa_trampoline_init to contain valid values,
9916 and then entered as a subroutine.
9917
9918 It is best to keep this as small as possible to avoid having to
9919 flush multiple lines in the cache. */
9920
9921 static void
9922 pa_asm_trampoline_template (FILE *f)
9923 {
9924 if (!TARGET_64BIT)
9925 {
9926 fputs ("\tldw 36(%r22),%r21\n", f);
9927 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
9928 if (ASSEMBLER_DIALECT == 0)
9929 fputs ("\tdepi 0,31,2,%r21\n", f);
9930 else
9931 fputs ("\tdepwi 0,31,2,%r21\n", f);
9932 fputs ("\tldw 4(%r21),%r19\n", f);
9933 fputs ("\tldw 0(%r21),%r21\n", f);
9934 if (TARGET_PA_20)
9935 {
9936 fputs ("\tbve (%r21)\n", f);
9937 fputs ("\tldw 40(%r22),%r29\n", f);
9938 fputs ("\t.word 0\n", f);
9939 fputs ("\t.word 0\n", f);
9940 }
9941 else
9942 {
9943 fputs ("\tldsid (%r21),%r1\n", f);
9944 fputs ("\tmtsp %r1,%sr0\n", f);
9945 fputs ("\tbe 0(%sr0,%r21)\n", f);
9946 fputs ("\tldw 40(%r22),%r29\n", f);
9947 }
9948 fputs ("\t.word 0\n", f);
9949 fputs ("\t.word 0\n", f);
9950 fputs ("\t.word 0\n", f);
9951 fputs ("\t.word 0\n", f);
9952 }
9953 else
9954 {
9955 fputs ("\t.dword 0\n", f);
9956 fputs ("\t.dword 0\n", f);
9957 fputs ("\t.dword 0\n", f);
9958 fputs ("\t.dword 0\n", f);
9959 fputs ("\tmfia %r31\n", f);
9960 fputs ("\tldd 24(%r31),%r1\n", f);
9961 fputs ("\tldd 24(%r1),%r27\n", f);
9962 fputs ("\tldd 16(%r1),%r1\n", f);
9963 fputs ("\tbve (%r1)\n", f);
9964 fputs ("\tldd 32(%r31),%r31\n", f);
9965 fputs ("\t.dword 0 ; fptr\n", f);
9966 fputs ("\t.dword 0 ; static link\n", f);
9967 }
9968 }
9969
9970 /* Emit RTL insns to initialize the variable parts of a trampoline.
9971 FNADDR is an RTX for the address of the function's pure code.
9972 CXT is an RTX for the static chain value for the function.
9973
9974 Move the function address to the trampoline template at offset 36.
9975 Move the static chain value to trampoline template at offset 40.
9976 Move the trampoline address to trampoline template at offset 44.
9977 Move r19 to trampoline template at offset 48. The latter two
9978 words create a plabel for the indirect call to the trampoline.
9979
9980 A similar sequence is used for the 64-bit port but the plabel is
9981 at the beginning of the trampoline.
9982
9983 Finally, the cache entries for the trampoline code are flushed.
9984 This is necessary to ensure that the trampoline instruction sequence
9985 is written to memory prior to any attempts at prefetching the code
9986 sequence. */
9987
9988 static void
9989 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
9990 {
9991 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9992 rtx start_addr = gen_reg_rtx (Pmode);
9993 rtx end_addr = gen_reg_rtx (Pmode);
9994 rtx line_length = gen_reg_rtx (Pmode);
9995 rtx r_tramp, tmp;
9996
9997 emit_block_move (m_tramp, assemble_trampoline_template (),
9998 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
9999 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10000
10001 if (!TARGET_64BIT)
10002 {
10003 tmp = adjust_address (m_tramp, Pmode, 36);
10004 emit_move_insn (tmp, fnaddr);
10005 tmp = adjust_address (m_tramp, Pmode, 40);
10006 emit_move_insn (tmp, chain_value);
10007
10008 /* Create a fat pointer for the trampoline. */
10009 tmp = adjust_address (m_tramp, Pmode, 44);
10010 emit_move_insn (tmp, r_tramp);
10011 tmp = adjust_address (m_tramp, Pmode, 48);
10012 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10013
10014 /* fdc and fic only use registers for the address to flush,
10015 they do not accept integer displacements. We align the
10016 start and end addresses to the beginning of their respective
10017 cache lines to minimize the number of lines flushed. */
10018 emit_insn (gen_andsi3 (start_addr, r_tramp,
10019 GEN_INT (-MIN_CACHELINE_SIZE)));
10020 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10021 TRAMPOLINE_CODE_SIZE-1));
10022 emit_insn (gen_andsi3 (end_addr, tmp,
10023 GEN_INT (-MIN_CACHELINE_SIZE)));
10024 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10025 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10026 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10027 gen_reg_rtx (Pmode),
10028 gen_reg_rtx (Pmode)));
10029 }
10030 else
10031 {
10032 tmp = adjust_address (m_tramp, Pmode, 56);
10033 emit_move_insn (tmp, fnaddr);
10034 tmp = adjust_address (m_tramp, Pmode, 64);
10035 emit_move_insn (tmp, chain_value);
10036
10037 /* Create a fat pointer for the trampoline. */
10038 tmp = adjust_address (m_tramp, Pmode, 16);
10039 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10040 r_tramp, 32)));
10041 tmp = adjust_address (m_tramp, Pmode, 24);
10042 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10043
10044 /* fdc and fic only use registers for the address to flush,
10045 they do not accept integer displacements. We align the
10046 start and end addresses to the beginning of their respective
10047 cache lines to minimize the number of lines flushed. */
10048 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10049 emit_insn (gen_anddi3 (start_addr, tmp,
10050 GEN_INT (-MIN_CACHELINE_SIZE)));
10051 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10052 TRAMPOLINE_CODE_SIZE - 1));
10053 emit_insn (gen_anddi3 (end_addr, tmp,
10054 GEN_INT (-MIN_CACHELINE_SIZE)));
10055 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10056 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10057 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10058 gen_reg_rtx (Pmode),
10059 gen_reg_rtx (Pmode)));
10060 }
10061
10062 #ifdef HAVE_ENABLE_EXECUTE_STACK
10063  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
10064      LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
10065 #endif
10066 }
10067
10068 /* Perform any machine-specific adjustment in the address of the trampoline.
10069 ADDR contains the address that was passed to pa_trampoline_init.
10070 Adjust the trampoline address to point to the plabel at offset 44. */
10071
10072 static rtx
10073 pa_trampoline_adjust_address (rtx addr)
10074 {
10075 if (!TARGET_64BIT)
10076 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10077 return addr;
10078 }
10079
10080 static rtx
10081 pa_delegitimize_address (rtx orig_x)
10082 {
10083 rtx x = delegitimize_mem_from_attrs (orig_x);
10084
10085 if (GET_CODE (x) == LO_SUM
10086 && GET_CODE (XEXP (x, 1)) == UNSPEC
10087 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10088 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10089 return x;
10090 }
10091 \f
10092 static rtx
10093 pa_internal_arg_pointer (void)
10094 {
10095 /* The argument pointer and the hard frame pointer are the same in
10096 the 32-bit runtime, so we don't need a copy. */
10097 if (TARGET_64BIT)
10098 return copy_to_reg (virtual_incoming_args_rtx);
10099 else
10100 return virtual_incoming_args_rtx;
10101 }
10102
10103 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10104 Frame pointer elimination is automatically handled. */
10105
10106 static bool
10107 pa_can_eliminate (const int from, const int to)
10108 {
10109 /* The argument cannot be eliminated in the 64-bit runtime. */
10110 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10111 return false;
10112
10113 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10114 ? ! frame_pointer_needed
10115 : true);
10116 }
10117
10118 /* Define the offset between two registers, FROM to be eliminated and its
10119 replacement TO, at the start of a routine. */
10120 HOST_WIDE_INT
10121 pa_initial_elimination_offset (int from, int to)
10122 {
10123 HOST_WIDE_INT offset;
10124
10125 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10126 && to == STACK_POINTER_REGNUM)
10127 offset = -pa_compute_frame_size (get_frame_size (), 0);
10128 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10129 offset = 0;
10130 else
10131 gcc_unreachable ();
10132
10133 return offset;
10134 }
10135
10136 static void
10137 pa_conditional_register_usage (void)
10138 {
10139 int i;
10140
10141 if (!TARGET_64BIT && !TARGET_PA_11)
10142 {
10143 for (i = 56; i <= FP_REG_LAST; i++)
10144 fixed_regs[i] = call_used_regs[i] = 1;
10145 for (i = 33; i < 56; i += 2)
10146 fixed_regs[i] = call_used_regs[i] = 1;
10147 }
10148 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10149 {
10150 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10151 fixed_regs[i] = call_used_regs[i] = 1;
10152 }
10153 if (flag_pic)
10154 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10155 }
10156
10157 /* Target hook for c_mode_for_suffix. */
10158
10159 static machine_mode
10160 pa_c_mode_for_suffix (char suffix)
10161 {
10162 if (HPUX_LONG_DOUBLE_LIBRARY)
10163 {
10164 if (suffix == 'q')
10165 return TFmode;
10166 }
10167
10168 return VOIDmode;
10169 }
10170
10171 /* Target hook for function_section. */
10172
10173 static section *
10174 pa_function_section (tree decl, enum node_frequency freq,
10175 bool startup, bool exit)
10176 {
10177 /* Put functions in text section if target doesn't have named sections. */
10178 if (!targetm_common.have_named_sections)
10179 return text_section;
10180
10181 /* Force nested functions into the same section as the containing
10182 function. */
10183 if (decl
10184 && DECL_SECTION_NAME (decl) == NULL
10185 && DECL_CONTEXT (decl) != NULL_TREE
10186 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10187 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL)
10188 return function_section (DECL_CONTEXT (decl));
10189
10190 /* Otherwise, use the default function section. */
10191 return default_function_section (decl, freq, startup, exit);
10192 }
10193
10194 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10195
10196 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10197 that need more than three instructions to load prior to reload. This
10198 limit is somewhat arbitrary. It takes three instructions to load a
10199 CONST_INT from memory but two are memory accesses. It may be better
10200 to increase the allowed range for CONST_INTS. We may also be able
10201 to handle CONST_DOUBLES. */
10202
10203 static bool
10204 pa_legitimate_constant_p (machine_mode mode, rtx x)
10205 {
10206 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10207 return false;
10208
10209 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10210 return false;
10211
10212 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10213 legitimate constants. The other variants can't be handled by
10214 the move patterns after reload starts. */
10215 if (tls_referenced_p (x))
10216 return false;
10217
10218 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10219 return false;
10220
10221 if (TARGET_64BIT
10222 && HOST_BITS_PER_WIDE_INT > 32
10223 && GET_CODE (x) == CONST_INT
10224 && !reload_in_progress
10225 && !reload_completed
10226 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10227 && !pa_cint_ok_for_move (UINTVAL (x)))
10228 return false;
10229
10230 if (function_label_operand (x, mode))
10231 return false;
10232
10233 return true;
10234 }
10235
10236 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10237
10238 static unsigned int
10239 pa_section_type_flags (tree decl, const char *name, int reloc)
10240 {
10241 unsigned int flags;
10242
10243 flags = default_section_type_flags (decl, name, reloc);
10244
10245 /* Function labels are placed in the constant pool. This can
10246 cause a section conflict if decls are put in ".data.rel.ro"
10247 or ".data.rel.ro.local" using the __attribute__ construct. */
10248 if (strcmp (name, ".data.rel.ro") == 0
10249 || strcmp (name, ".data.rel.ro.local") == 0)
10250 flags |= SECTION_WRITE | SECTION_RELRO;
10251
10252 return flags;
10253 }
10254
10255 /* pa_legitimate_address_p recognizes an RTL expression that is a
10256 valid memory address for an instruction. The MODE argument is the
10257 machine mode for the MEM expression that wants to use this address.
10258
10259 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10260 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10261 available with floating point loads and stores, and integer loads.
10262 We get better code by allowing indexed addresses in the initial
10263 RTL generation.
10264
10265 The acceptance of indexed addresses as legitimate implies that we
10266 must provide patterns for doing indexed integer stores, or the move
10267 expanders must force the address of an indexed store to a register.
10268 We have adopted the latter approach.
10269
10270 Another function of pa_legitimate_address_p is to ensure that
10271 the base register is a valid pointer for indexed instructions.
10272 On targets that have non-equivalent space registers, we have to
10273 know at the time of assembler output which register in a REG+REG
10274 pair is the base register. The REG_POINTER flag is sometimes lost
10275 in reload and the following passes, so it can't be relied on during
10276 code generation. Thus, we either have to canonicalize the order
10277 of the registers in REG+REG indexed addresses, or treat REG+REG
10278 addresses separately and provide patterns for both permutations.
10279
10280 The latter approach requires several hundred additional lines of
10281 code in pa.md. The downside to canonicalizing is that a PLUS
10282 in the wrong order can't combine to form to make a scaled indexed
10283 memory operand. As we won't need to canonicalize the operands if
10284 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10285
10286 We initially break out scaled indexed addresses in canonical order
10287 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10288 scaled indexed addresses during RTL generation. However, fold_rtx
10289 has its own opinion on how the operands of a PLUS should be ordered.
10290 If one of the operands is equivalent to a constant, it will make
10291 that operand the second operand. As the base register is likely to
10292 be equivalent to a SYMBOL_REF, we have made it the second operand.
10293
10294 pa_legitimate_address_p accepts REG+REG as legitimate when the
10295 operands are in the order INDEX+BASE on targets with non-equivalent
10296 space registers, and in any order on targets with equivalent space
10297 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10298
10299 We treat a SYMBOL_REF as legitimate if it is part of the current
10300 function's constant-pool, because such addresses can actually be
10301 output as REG+SMALLINT. */
10302
10303 static bool
10304 pa_legitimate_address_p (machine_mode mode, rtx x, bool strict)
10305 {
10306 if ((REG_P (x)
10307 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10308 : REG_OK_FOR_BASE_P (x)))
10309 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10310 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10311 && REG_P (XEXP (x, 0))
10312 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10313 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10314 return true;
10315
10316 if (GET_CODE (x) == PLUS)
10317 {
10318 rtx base, index;
10319
10320 /* For REG+REG, the base register should be in XEXP (x, 1),
10321 so check it first. */
10322 if (REG_P (XEXP (x, 1))
10323 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10324 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10325 base = XEXP (x, 1), index = XEXP (x, 0);
10326 else if (REG_P (XEXP (x, 0))
10327 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10328 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10329 base = XEXP (x, 0), index = XEXP (x, 1);
10330 else
10331 return false;
10332
10333 if (GET_CODE (index) == CONST_INT)
10334 {
10335 if (INT_5_BITS (index))
10336 return true;
10337
10338 /* When INT14_OK_STRICT is false, a secondary reload is needed
10339 to adjust the displacement of SImode and DImode floating point
10340 instructions but this may fail when the register also needs
10341 reloading. So, we return false when STRICT is true. We
10342 also reject long displacements for float mode addresses since
10343 the majority of accesses will use floating point instructions
10344 that don't support 14-bit offsets. */
10345 if (!INT14_OK_STRICT
10346 && (strict || !(reload_in_progress || reload_completed))
10347 && mode != QImode
10348 && mode != HImode)
10349 return false;
10350
10351 return base14_operand (index, mode);
10352 }
10353
10354 if (!TARGET_DISABLE_INDEXING
10355 /* Only accept the "canonical" INDEX+BASE operand order
10356 on targets with non-equivalent space registers. */
10357 && (TARGET_NO_SPACE_REGS
10358 ? REG_P (index)
10359 : (base == XEXP (x, 1) && REG_P (index)
10360 && (reload_completed
10361 || (reload_in_progress && HARD_REGISTER_P (base))
10362 || REG_POINTER (base))
10363 && (reload_completed
10364 || (reload_in_progress && HARD_REGISTER_P (index))
10365 || !REG_POINTER (index))))
10366 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10367 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10368 : REG_OK_FOR_INDEX_P (index))
10369 && borx_reg_operand (base, Pmode)
10370 && borx_reg_operand (index, Pmode))
10371 return true;
10372
10373 if (!TARGET_DISABLE_INDEXING
10374 && GET_CODE (index) == MULT
10375 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10376 && REG_P (XEXP (index, 0))
10377 && GET_MODE (XEXP (index, 0)) == Pmode
10378 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10379 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10380 && GET_CODE (XEXP (index, 1)) == CONST_INT
10381 && INTVAL (XEXP (index, 1))
10382 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10383 && borx_reg_operand (base, Pmode))
10384 return true;
10385
10386 return false;
10387 }
10388
10389 if (GET_CODE (x) == LO_SUM)
10390 {
10391 rtx y = XEXP (x, 0);
10392
10393 if (GET_CODE (y) == SUBREG)
10394 y = SUBREG_REG (y);
10395
10396 if (REG_P (y)
10397 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10398 : REG_OK_FOR_BASE_P (y)))
10399 {
10400 /* Needed for -fPIC */
10401 if (mode == Pmode
10402 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10403 return true;
10404
10405 if (!INT14_OK_STRICT
10406 && (strict || !(reload_in_progress || reload_completed))
10407 && mode != QImode
10408 && mode != HImode)
10409 return false;
10410
10411 if (CONSTANT_P (XEXP (x, 1)))
10412 return true;
10413 }
10414 return false;
10415 }
10416
10417 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10418 return true;
10419
10420 return false;
10421 }
10422
10423 /* Look for machine dependent ways to make the invalid address AD a
10424 valid address.
10425
10426 For the PA, transform:
10427
10428 memory(X + <large int>)
10429
10430 into:
10431
10432 if (<large int> & mask) >= 16
10433 Y = (<large int> & ~mask) + mask + 1 Round up.
10434 else
10435 Y = (<large int> & ~mask) Round down.
10436 Z = X + Y
10437 memory (Z + (<large int> - Y));
10438
10439 This makes reload inheritance and reload_cse work better since Z
10440 can be reused.
10441
10442 There may be more opportunities to improve code with this hook. */
10443
10444 rtx
10445 pa_legitimize_reload_address (rtx ad, machine_mode mode,
10446 int opnum, int type,
10447 int ind_levels ATTRIBUTE_UNUSED)
10448 {
10449 long offset, newoffset, mask;
10450 rtx new_rtx, temp = NULL_RTX;
10451
10452 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10453 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10454
10455 if (optimize && GET_CODE (ad) == PLUS)
10456 temp = simplify_binary_operation (PLUS, Pmode,
10457 XEXP (ad, 0), XEXP (ad, 1));
10458
10459 new_rtx = temp ? temp : ad;
10460
10461 if (optimize
10462 && GET_CODE (new_rtx) == PLUS
10463 && GET_CODE (XEXP (new_rtx, 0)) == REG
10464 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10465 {
10466 offset = INTVAL (XEXP ((new_rtx), 1));
10467
10468 /* Choose rounding direction. Round up if we are >= halfway. */
10469 if ((offset & mask) >= ((mask + 1) / 2))
10470 newoffset = (offset & ~mask) + mask + 1;
10471 else
10472 newoffset = offset & ~mask;
10473
10474 /* Ensure that long displacements are aligned. */
10475 if (mask == 0x3fff
10476 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10477 || (TARGET_64BIT && (mode) == DImode)))
10478 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10479
10480 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10481 {
10482 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10483 GEN_INT (newoffset));
10484 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10485 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10486 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10487 opnum, (enum reload_type) type);
10488 return ad;
10489 }
10490 }
10491
10492 return NULL_RTX;
10493 }
10494
10495 /* Output address vector. */
10496
10497 void
10498 pa_output_addr_vec (rtx lab, rtx body)
10499 {
10500 int idx, vlen = XVECLEN (body, 0);
10501
10502 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10503 if (TARGET_GAS)
10504 fputs ("\t.begin_brtab\n", asm_out_file);
10505 for (idx = 0; idx < vlen; idx++)
10506 {
10507 ASM_OUTPUT_ADDR_VEC_ELT
10508 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10509 }
10510 if (TARGET_GAS)
10511 fputs ("\t.end_brtab\n", asm_out_file);
10512 }
10513
10514 /* Output address difference vector. */
10515
10516 void
10517 pa_output_addr_diff_vec (rtx lab, rtx body)
10518 {
10519 rtx base = XEXP (XEXP (body, 0), 0);
10520 int idx, vlen = XVECLEN (body, 1);
10521
10522 targetm.asm_out.internal_label (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
10523 if (TARGET_GAS)
10524 fputs ("\t.begin_brtab\n", asm_out_file);
10525 for (idx = 0; idx < vlen; idx++)
10526 {
10527 ASM_OUTPUT_ADDR_DIFF_ELT
10528 (asm_out_file,
10529 body,
10530 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
10531 CODE_LABEL_NUMBER (base));
10532 }
10533 if (TARGET_GAS)
10534 fputs ("\t.end_brtab\n", asm_out_file);
10535 }
10536
10537 /* This is a helper function for the other atomic operations. This function
10538 emits a loop that contains SEQ that iterates until a compare-and-swap
10539 operation at the end succeeds. MEM is the memory to be modified. SEQ is
10540 a set of instructions that takes a value from OLD_REG as an input and
10541 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
10542 set to the current contents of MEM. After SEQ, a compare-and-swap will
10543 attempt to update MEM with NEW_REG. The function returns true when the
10544 loop was generated successfully. */
10545
10546 static bool
10547 pa_expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
10548 {
10549 machine_mode mode = GET_MODE (mem);
10550 rtx_code_label *label;
10551 rtx cmp_reg, success, oldval;
10552
10553 /* The loop we want to generate looks like
10554
10555 cmp_reg = mem;
10556 label:
10557 old_reg = cmp_reg;
10558 seq;
10559 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
10560 if (success)
10561 goto label;
10562
10563 Note that we only do the plain load from memory once. Subsequent
10564 iterations use the value loaded by the compare-and-swap pattern. */
10565
10566 label = gen_label_rtx ();
10567 cmp_reg = gen_reg_rtx (mode);
10568
10569 emit_move_insn (cmp_reg, mem);
10570 emit_label (label);
10571 emit_move_insn (old_reg, cmp_reg);
10572 if (seq)
10573 emit_insn (seq);
10574
10575 success = NULL_RTX;
10576 oldval = cmp_reg;
10577 if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
10578 new_reg, false, MEMMODEL_SYNC_SEQ_CST,
10579 MEMMODEL_RELAXED))
10580 return false;
10581
10582 if (oldval != cmp_reg)
10583 emit_move_insn (cmp_reg, oldval);
10584
10585 /* Mark this jump predicted not taken. */
10586 emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
10587 GET_MODE (success), 1, label, 0);
10588 return true;
10589 }
10590
10591 /* This function tries to implement an atomic exchange operation using a
10592 compare_and_swap loop. VAL is written to *MEM. The previous contents of
10593 *MEM are returned, using TARGET if possible. No memory model is required
10594 since a compare_and_swap loop is seq-cst. */
10595
10596 rtx
10597 pa_maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
10598 {
10599 machine_mode mode = GET_MODE (mem);
10600
10601 if (can_compare_and_swap_p (mode, true))
10602 {
10603 if (!target || !register_operand (target, mode))
10604 target = gen_reg_rtx (mode);
10605 if (pa_expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
10606 return target;
10607 }
10608
10609 return NULL_RTX;
10610 }
10611
10612 #include "gt-pa.h"