]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/pa/pa.c
rtl.h (MEM_READONLY_P): Replace RTX_UNCHANGING_P.
[thirdparty/gcc.git] / gcc / config / pa / pa.c
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "tree.h"
36 #include "output.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "integrate.h"
42 #include "function.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "recog.h"
46 #include "predict.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
50
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
53 int
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
55 {
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
58 rtx set;
59
60 if (recog_memoized (in_insn) < 0
61 || get_attr_type (in_insn) != TYPE_FPSTORE
62 || recog_memoized (out_insn) < 0)
63 return 0;
64
65 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
66
67 set = single_set (out_insn);
68 if (!set)
69 return 0;
70
71 other_mode = GET_MODE (SET_SRC (set));
72
73 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
74 }
75
76
77 #ifndef DO_FRAME_NOTES
78 #ifdef INCOMING_RETURN_ADDR_RTX
79 #define DO_FRAME_NOTES 1
80 #else
81 #define DO_FRAME_NOTES 0
82 #endif
83 #endif
84
85 static void copy_reg_pointer (rtx, rtx);
86 static void fix_range (const char *);
87 static int hppa_address_cost (rtx);
88 static bool hppa_rtx_costs (rtx, int, int, int *);
89 static inline rtx force_mode (enum machine_mode, rtx);
90 static void pa_reorg (void);
91 static void pa_combine_instructions (void);
92 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
93 static int forward_branch_p (rtx);
94 static int shadd_constant_p (int);
95 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
96 static int compute_movmem_length (rtx);
97 static int compute_clrmem_length (rtx);
98 static bool pa_assemble_integer (rtx, unsigned int, int);
99 static void remove_useless_addtr_insns (int);
100 static void store_reg (int, HOST_WIDE_INT, int);
101 static void store_reg_modify (int, int, HOST_WIDE_INT);
102 static void load_reg (int, HOST_WIDE_INT, int);
103 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
104 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
105 static void update_total_code_bytes (int);
106 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
107 static int pa_adjust_cost (rtx, rtx, rtx, int);
108 static int pa_adjust_priority (rtx, int);
109 static int pa_issue_rate (void);
110 static void pa_select_section (tree, int, unsigned HOST_WIDE_INT)
111 ATTRIBUTE_UNUSED;
112 static void pa_encode_section_info (tree, rtx, int);
113 static const char *pa_strip_name_encoding (const char *);
114 static bool pa_function_ok_for_sibcall (tree, tree);
115 static void pa_globalize_label (FILE *, const char *)
116 ATTRIBUTE_UNUSED;
117 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
118 HOST_WIDE_INT, tree);
119 #if !defined(USE_COLLECT2)
120 static void pa_asm_out_constructor (rtx, int);
121 static void pa_asm_out_destructor (rtx, int);
122 #endif
123 static void pa_init_builtins (void);
124 static rtx hppa_builtin_saveregs (void);
125 static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
126 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
127 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
128 static struct deferred_plabel *get_plabel (const char *)
129 ATTRIBUTE_UNUSED;
130 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
131 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
132 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
134 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
135 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
136 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
137 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
139 static void output_deferred_plabels (void);
140 #ifdef HPUX_LONG_DOUBLE_LIBRARY
141 static void pa_hpux_init_libfuncs (void);
142 #endif
143 static rtx pa_struct_value_rtx (tree, int);
144 static bool pa_pass_by_reference (CUMULATIVE_ARGS *ca, enum machine_mode,
145 tree, bool);
146 static struct machine_function * pa_init_machine_status (void);
147
148
149 /* Save the operands last given to a compare for use when we
150 generate a scc or bcc insn. */
151 rtx hppa_compare_op0, hppa_compare_op1;
152 enum cmp_type hppa_branch_type;
153
154 /* Which architecture we are generating code for. */
155 enum architecture_type pa_arch;
156
157 /* String to hold which architecture we are generating code for. */
158 const char *pa_arch_string;
159
160 /* String used with the -mfixed-range= option. */
161 const char *pa_fixed_range_string;
162
163 /* Which cpu we are scheduling for. */
164 enum processor_type pa_cpu;
165
166 /* String to hold which cpu we are scheduling for. */
167 const char *pa_cpu_string;
168
169 /* Counts for the number of callee-saved general and floating point
170 registers which were saved by the current function's prologue. */
171 static int gr_saved, fr_saved;
172
173 static rtx find_addr_reg (rtx);
174
175 /* Keep track of the number of bytes we have output in the CODE subspace
176 during this compilation so we'll know when to emit inline long-calls. */
177 unsigned long total_code_bytes;
178
179 /* The last address of the previous function plus the number of bytes in
180 associated thunks that have been output. This is used to determine if
181 a thunk can use an IA-relative branch to reach its target function. */
182 static int last_address;
183
184 /* Variables to handle plabels that we discover are necessary at assembly
185 output time. They are output after the current function. */
186 struct deferred_plabel GTY(())
187 {
188 rtx internal_label;
189 const char *name;
190 };
191 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
192 deferred_plabels;
193 static size_t n_deferred_plabels = 0;
194
195 \f
196 /* Initialize the GCC target structure. */
197
198 #undef TARGET_ASM_ALIGNED_HI_OP
199 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
200 #undef TARGET_ASM_ALIGNED_SI_OP
201 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
202 #undef TARGET_ASM_ALIGNED_DI_OP
203 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
204 #undef TARGET_ASM_UNALIGNED_HI_OP
205 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
206 #undef TARGET_ASM_UNALIGNED_SI_OP
207 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
208 #undef TARGET_ASM_UNALIGNED_DI_OP
209 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
210 #undef TARGET_ASM_INTEGER
211 #define TARGET_ASM_INTEGER pa_assemble_integer
212
213 #undef TARGET_ASM_FUNCTION_PROLOGUE
214 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
215 #undef TARGET_ASM_FUNCTION_EPILOGUE
216 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
217
218 #undef TARGET_SCHED_ADJUST_COST
219 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
220 #undef TARGET_SCHED_ADJUST_PRIORITY
221 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
222 #undef TARGET_SCHED_ISSUE_RATE
223 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
224
225 #undef TARGET_ENCODE_SECTION_INFO
226 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
227 #undef TARGET_STRIP_NAME_ENCODING
228 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
229
230 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
231 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
232
233 #undef TARGET_ASM_OUTPUT_MI_THUNK
234 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
235 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
236 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
237
238 #undef TARGET_ASM_FILE_END
239 #define TARGET_ASM_FILE_END output_deferred_plabels
240
241 #if !defined(USE_COLLECT2)
242 #undef TARGET_ASM_CONSTRUCTOR
243 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
244 #undef TARGET_ASM_DESTRUCTOR
245 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
246 #endif
247
248 #undef TARGET_INIT_BUILTINS
249 #define TARGET_INIT_BUILTINS pa_init_builtins
250
251 #undef TARGET_RTX_COSTS
252 #define TARGET_RTX_COSTS hppa_rtx_costs
253 #undef TARGET_ADDRESS_COST
254 #define TARGET_ADDRESS_COST hppa_address_cost
255
256 #undef TARGET_MACHINE_DEPENDENT_REORG
257 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
258
259 #ifdef HPUX_LONG_DOUBLE_LIBRARY
260 #undef TARGET_INIT_LIBFUNCS
261 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
262 #endif
263
264 #undef TARGET_PROMOTE_FUNCTION_RETURN
265 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
266 #undef TARGET_PROMOTE_PROTOTYPES
267 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
268
269 #undef TARGET_STRUCT_VALUE_RTX
270 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
271 #undef TARGET_RETURN_IN_MEMORY
272 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
273 #undef TARGET_MUST_PASS_IN_STACK
274 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
275 #undef TARGET_PASS_BY_REFERENCE
276 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
277
278 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
279 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
280 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
281 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
282
283 struct gcc_target targetm = TARGET_INITIALIZER;
284 \f
285 /* Parse the -mfixed-range= option string. */
286
287 static void
288 fix_range (const char *const_str)
289 {
290 int i, first, last;
291 char *str, *dash, *comma;
292
293 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
294 REG2 are either register names or register numbers. The effect
295 of this option is to mark the registers in the range from REG1 to
296 REG2 as ``fixed'' so they won't be used by the compiler. This is
297 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
298
299 i = strlen (const_str);
300 str = (char *) alloca (i + 1);
301 memcpy (str, const_str, i + 1);
302
303 while (1)
304 {
305 dash = strchr (str, '-');
306 if (!dash)
307 {
308 warning ("value of -mfixed-range must have form REG1-REG2");
309 return;
310 }
311 *dash = '\0';
312
313 comma = strchr (dash + 1, ',');
314 if (comma)
315 *comma = '\0';
316
317 first = decode_reg_name (str);
318 if (first < 0)
319 {
320 warning ("unknown register name: %s", str);
321 return;
322 }
323
324 last = decode_reg_name (dash + 1);
325 if (last < 0)
326 {
327 warning ("unknown register name: %s", dash + 1);
328 return;
329 }
330
331 *dash = '-';
332
333 if (first > last)
334 {
335 warning ("%s-%s is an empty range", str, dash + 1);
336 return;
337 }
338
339 for (i = first; i <= last; ++i)
340 fixed_regs[i] = call_used_regs[i] = 1;
341
342 if (!comma)
343 break;
344
345 *comma = ',';
346 str = comma + 1;
347 }
348
349 /* Check if all floating point registers have been fixed. */
350 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
351 if (!fixed_regs[i])
352 break;
353
354 if (i > FP_REG_LAST)
355 target_flags |= MASK_DISABLE_FPREGS;
356 }
357
358 void
359 override_options (void)
360 {
361 if (pa_cpu_string == NULL)
362 pa_cpu_string = TARGET_SCHED_DEFAULT;
363
364 if (! strcmp (pa_cpu_string, "8000"))
365 {
366 pa_cpu_string = "8000";
367 pa_cpu = PROCESSOR_8000;
368 }
369 else if (! strcmp (pa_cpu_string, "7100"))
370 {
371 pa_cpu_string = "7100";
372 pa_cpu = PROCESSOR_7100;
373 }
374 else if (! strcmp (pa_cpu_string, "700"))
375 {
376 pa_cpu_string = "700";
377 pa_cpu = PROCESSOR_700;
378 }
379 else if (! strcmp (pa_cpu_string, "7100LC"))
380 {
381 pa_cpu_string = "7100LC";
382 pa_cpu = PROCESSOR_7100LC;
383 }
384 else if (! strcmp (pa_cpu_string, "7200"))
385 {
386 pa_cpu_string = "7200";
387 pa_cpu = PROCESSOR_7200;
388 }
389 else if (! strcmp (pa_cpu_string, "7300"))
390 {
391 pa_cpu_string = "7300";
392 pa_cpu = PROCESSOR_7300;
393 }
394 else
395 {
396 warning ("unknown -mschedule= option (%s).\nValid options are 700, 7100, 7100LC, 7200, 7300, and 8000\n", pa_cpu_string);
397 }
398
399 /* Set the instruction architecture. */
400 if (pa_arch_string && ! strcmp (pa_arch_string, "1.0"))
401 {
402 pa_arch_string = "1.0";
403 pa_arch = ARCHITECTURE_10;
404 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
405 }
406 else if (pa_arch_string && ! strcmp (pa_arch_string, "1.1"))
407 {
408 pa_arch_string = "1.1";
409 pa_arch = ARCHITECTURE_11;
410 target_flags &= ~MASK_PA_20;
411 target_flags |= MASK_PA_11;
412 }
413 else if (pa_arch_string && ! strcmp (pa_arch_string, "2.0"))
414 {
415 pa_arch_string = "2.0";
416 pa_arch = ARCHITECTURE_20;
417 target_flags |= MASK_PA_11 | MASK_PA_20;
418 }
419 else if (pa_arch_string)
420 {
421 warning ("unknown -march= option (%s).\nValid options are 1.0, 1.1, and 2.0\n", pa_arch_string);
422 }
423
424 if (pa_fixed_range_string)
425 fix_range (pa_fixed_range_string);
426
427 /* Unconditional branches in the delay slot are not compatible with dwarf2
428 call frame information. There is no benefit in using this optimization
429 on PA8000 and later processors. */
430 if (pa_cpu >= PROCESSOR_8000
431 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
432 || flag_unwind_tables)
433 target_flags &= ~MASK_JUMP_IN_DELAY;
434
435 if (flag_pic && TARGET_PORTABLE_RUNTIME)
436 {
437 warning ("PIC code generation is not supported in the portable runtime model\n");
438 }
439
440 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
441 {
442 warning ("PIC code generation is not compatible with fast indirect calls\n");
443 }
444
445 if (! TARGET_GAS && write_symbols != NO_DEBUG)
446 {
447 warning ("-g is only supported when using GAS on this processor,");
448 warning ("-g option disabled");
449 write_symbols = NO_DEBUG;
450 }
451
452 /* We only support the "big PIC" model now. And we always generate PIC
453 code when in 64bit mode. */
454 if (flag_pic == 1 || TARGET_64BIT)
455 flag_pic = 2;
456
457 /* We can't guarantee that .dword is available for 32-bit targets. */
458 if (UNITS_PER_WORD == 4)
459 targetm.asm_out.aligned_op.di = NULL;
460
461 /* The unaligned ops are only available when using GAS. */
462 if (!TARGET_GAS)
463 {
464 targetm.asm_out.unaligned_op.hi = NULL;
465 targetm.asm_out.unaligned_op.si = NULL;
466 targetm.asm_out.unaligned_op.di = NULL;
467 }
468
469 init_machine_status = pa_init_machine_status;
470 }
471
472 static void
473 pa_init_builtins (void)
474 {
475 #ifdef DONT_HAVE_FPUTC_UNLOCKED
476 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = NULL_TREE;
477 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = NULL_TREE;
478 #endif
479 }
480
481 /* Function to init struct machine_function.
482 This will be called, via a pointer variable,
483 from push_function_context. */
484
485 static struct machine_function *
486 pa_init_machine_status (void)
487 {
488 return ggc_alloc_cleared (sizeof (machine_function));
489 }
490
491 /* If FROM is a probable pointer register, mark TO as a probable
492 pointer register with the same pointer alignment as FROM. */
493
494 static void
495 copy_reg_pointer (rtx to, rtx from)
496 {
497 if (REG_POINTER (from))
498 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
499 }
500
501 /* Return nonzero only if OP is a register of mode MODE,
502 or CONST0_RTX. */
503 int
504 reg_or_0_operand (rtx op, enum machine_mode mode)
505 {
506 return (op == CONST0_RTX (mode) || register_operand (op, mode));
507 }
508
509 /* Return nonzero if OP is suitable for use in a call to a named
510 function.
511
512 For 2.5 try to eliminate either call_operand_address or
513 function_label_operand, they perform very similar functions. */
514 int
515 call_operand_address (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
516 {
517 return (GET_MODE (op) == word_mode
518 && CONSTANT_P (op) && ! TARGET_PORTABLE_RUNTIME);
519 }
520
521 /* Return 1 if X contains a symbolic expression. We know these
522 expressions will have one of a few well defined forms, so
523 we need only check those forms. */
524 int
525 symbolic_expression_p (rtx x)
526 {
527
528 /* Strip off any HIGH. */
529 if (GET_CODE (x) == HIGH)
530 x = XEXP (x, 0);
531
532 return (symbolic_operand (x, VOIDmode));
533 }
534
535 int
536 symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
537 {
538 switch (GET_CODE (op))
539 {
540 case SYMBOL_REF:
541 case LABEL_REF:
542 return 1;
543 case CONST:
544 op = XEXP (op, 0);
545 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
546 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
547 && GET_CODE (XEXP (op, 1)) == CONST_INT);
548 default:
549 return 0;
550 }
551 }
552
553 /* Return truth value of statement that OP is a symbolic memory
554 operand of mode MODE. */
555
556 int
557 symbolic_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
558 {
559 if (GET_CODE (op) == SUBREG)
560 op = SUBREG_REG (op);
561 if (GET_CODE (op) != MEM)
562 return 0;
563 op = XEXP (op, 0);
564 return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST
565 || GET_CODE (op) == HIGH || GET_CODE (op) == LABEL_REF);
566 }
567
568 /* Return 1 if the operand is either a register, zero, or a memory operand
569 that is not symbolic. */
570
571 int
572 reg_or_0_or_nonsymb_mem_operand (rtx op, enum machine_mode mode)
573 {
574 if (register_operand (op, mode))
575 return 1;
576
577 if (op == CONST0_RTX (mode))
578 return 1;
579
580 if (GET_CODE (op) == SUBREG)
581 op = SUBREG_REG (op);
582
583 if (GET_CODE (op) != MEM)
584 return 0;
585
586 /* Until problems with management of the REG_POINTER flag are resolved,
587 we need to delay creating move insns with unscaled indexed addresses
588 until CSE is not expected. */
589 if (!TARGET_NO_SPACE_REGS
590 && !cse_not_expected
591 && GET_CODE (XEXP (op, 0)) == PLUS
592 && REG_P (XEXP (XEXP (op, 0), 0))
593 && REG_P (XEXP (XEXP (op, 0), 1)))
594 return 0;
595
596 return (!symbolic_memory_operand (op, mode)
597 && memory_address_p (mode, XEXP (op, 0)));
598 }
599
600 /* Return 1 if the operand is a register operand or a non-symbolic memory
601 operand after reload. This predicate is used for branch patterns that
602 internally handle register reloading. We need to accept non-symbolic
603 memory operands after reload to ensure that the pattern is still valid
604 if reload didn't find a hard register for the operand. */
605
606 int
607 reg_before_reload_operand (rtx op, enum machine_mode mode)
608 {
609 /* Don't accept a SUBREG since it will need a reload. */
610 if (GET_CODE (op) == SUBREG)
611 return 0;
612
613 if (register_operand (op, mode))
614 return 1;
615
616 if (reload_completed
617 && memory_operand (op, mode)
618 && !symbolic_memory_operand (op, mode))
619 return 1;
620
621 return 0;
622 }
623
624 /* Accept any constant that can be moved in one instruction into a
625 general register. */
626 int
627 cint_ok_for_move (HOST_WIDE_INT intval)
628 {
629 /* OK if ldo, ldil, or zdepi, can be used. */
630 return (CONST_OK_FOR_LETTER_P (intval, 'J')
631 || CONST_OK_FOR_LETTER_P (intval, 'N')
632 || CONST_OK_FOR_LETTER_P (intval, 'K'));
633 }
634
635 /* Return 1 iff OP is an indexed memory operand. */
636 int
637 indexed_memory_operand (rtx op, enum machine_mode mode)
638 {
639 if (GET_MODE (op) != mode)
640 return 0;
641
642 /* Before reload, a (SUBREG (MEM...)) forces reloading into a register. */
643 if (reload_completed && GET_CODE (op) == SUBREG)
644 op = SUBREG_REG (op);
645
646 if (GET_CODE (op) != MEM || symbolic_memory_operand (op, mode))
647 return 0;
648
649 op = XEXP (op, 0);
650
651 return (memory_address_p (mode, op) && IS_INDEX_ADDR_P (op));
652 }
653
654 /* Accept anything that can be used as a destination operand for a
655 move instruction. We don't accept indexed memory operands since
656 they are supported only for floating point stores. */
657 int
658 move_dest_operand (rtx op, enum machine_mode mode)
659 {
660 if (register_operand (op, mode))
661 return 1;
662
663 if (GET_MODE (op) != mode)
664 return 0;
665
666 if (GET_CODE (op) == SUBREG)
667 op = SUBREG_REG (op);
668
669 if (GET_CODE (op) != MEM || symbolic_memory_operand (op, mode))
670 return 0;
671
672 op = XEXP (op, 0);
673
674 return (memory_address_p (mode, op)
675 && !IS_INDEX_ADDR_P (op)
676 && !IS_LO_SUM_DLT_ADDR_P (op));
677 }
678
679 /* Accept anything that can be used as a source operand for a move
680 instruction. */
681 int
682 move_src_operand (rtx op, enum machine_mode mode)
683 {
684 if (register_operand (op, mode))
685 return 1;
686
687 if (GET_CODE (op) == CONST_INT)
688 return cint_ok_for_move (INTVAL (op));
689
690 if (GET_MODE (op) != mode)
691 return 0;
692
693 if (GET_CODE (op) == SUBREG)
694 op = SUBREG_REG (op);
695
696 if (GET_CODE (op) != MEM)
697 return 0;
698
699 /* Until problems with management of the REG_POINTER flag are resolved,
700 we need to delay creating move insns with unscaled indexed addresses
701 until CSE is not expected. */
702 if (!TARGET_NO_SPACE_REGS
703 && !cse_not_expected
704 && GET_CODE (XEXP (op, 0)) == PLUS
705 && REG_P (XEXP (XEXP (op, 0), 0))
706 && REG_P (XEXP (XEXP (op, 0), 1)))
707 return 0;
708
709 return memory_address_p (mode, XEXP (op, 0));
710 }
711
712 /* Accept anything that can be used as the source operand for a prefetch
713 instruction. */
714 int
715 prefetch_operand (rtx op, enum machine_mode mode)
716 {
717 if (GET_CODE (op) != MEM)
718 return 0;
719
720 /* Until problems with management of the REG_POINTER flag are resolved,
721 we need to delay creating prefetch insns with unscaled indexed addresses
722 until CSE is not expected. */
723 if (!TARGET_NO_SPACE_REGS
724 && !cse_not_expected
725 && GET_CODE (XEXP (op, 0)) == PLUS
726 && REG_P (XEXP (XEXP (op, 0), 0))
727 && REG_P (XEXP (XEXP (op, 0), 1)))
728 return 0;
729
730 return memory_address_p (mode, XEXP (op, 0));
731 }
732
733 /* Accept REG and any CONST_INT that can be moved in one instruction into a
734 general register. */
735 int
736 reg_or_cint_move_operand (rtx op, enum machine_mode mode)
737 {
738 if (register_operand (op, mode))
739 return 1;
740
741 return (GET_CODE (op) == CONST_INT && cint_ok_for_move (INTVAL (op)));
742 }
743
744 int
745 pic_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
746 {
747 if (!flag_pic)
748 return 0;
749
750 switch (GET_CODE (op))
751 {
752 case LABEL_REF:
753 return 1;
754 case CONST:
755 op = XEXP (op, 0);
756 return (GET_CODE (XEXP (op, 0)) == LABEL_REF
757 && GET_CODE (XEXP (op, 1)) == CONST_INT);
758 default:
759 return 0;
760 }
761 }
762
763 int
764 fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
765 {
766 return reg_renumber && FP_REG_P (op);
767 }
768
769 \f
770
771 /* Return truth value of whether OP can be used as an operand in a
772 three operand arithmetic insn that accepts registers of mode MODE
773 or 14-bit signed integers. */
774 int
775 arith_operand (rtx op, enum machine_mode mode)
776 {
777 return (register_operand (op, mode)
778 || (GET_CODE (op) == CONST_INT && INT_14_BITS (op)));
779 }
780
781 /* Return truth value of whether OP can be used as an operand in a
782 three operand arithmetic insn that accepts registers of mode MODE
783 or 11-bit signed integers. */
784 int
785 arith11_operand (rtx op, enum machine_mode mode)
786 {
787 return (register_operand (op, mode)
788 || (GET_CODE (op) == CONST_INT && INT_11_BITS (op)));
789 }
790
791 /* Return truth value of whether OP can be used as an operand in a
792 adddi3 insn. */
793 int
794 adddi3_operand (rtx op, enum machine_mode mode)
795 {
796 return (register_operand (op, mode)
797 || (GET_CODE (op) == CONST_INT
798 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
799 }
800
801 /* A constant integer suitable for use in a PRE_MODIFY memory
802 reference. */
803 int
804 pre_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
805 {
806 return (GET_CODE (op) == CONST_INT
807 && INTVAL (op) >= -0x2000 && INTVAL (op) < 0x10);
808 }
809
810 /* A constant integer suitable for use in a POST_MODIFY memory
811 reference. */
812 int
813 post_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
814 {
815 return (GET_CODE (op) == CONST_INT
816 && INTVAL (op) < 0x2000 && INTVAL (op) >= -0x10);
817 }
818
819 int
820 arith_double_operand (rtx op, enum machine_mode mode)
821 {
822 return (register_operand (op, mode)
823 || (GET_CODE (op) == CONST_DOUBLE
824 && GET_MODE (op) == mode
825 && VAL_14_BITS_P (CONST_DOUBLE_LOW (op))
826 && ((CONST_DOUBLE_HIGH (op) >= 0)
827 == ((CONST_DOUBLE_LOW (op) & 0x1000) == 0))));
828 }
829
830 /* Return truth value of whether OP is an integer which fits the
831 range constraining immediate operands in three-address insns, or
832 is an integer register. */
833
834 int
835 ireg_or_int5_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
836 {
837 return ((GET_CODE (op) == CONST_INT && INT_5_BITS (op))
838 || (GET_CODE (op) == REG && REGNO (op) > 0 && REGNO (op) < 32));
839 }
840
841 /* Return nonzero if OP is an integer register, else return zero. */
842 int
843 ireg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
844 {
845 return (GET_CODE (op) == REG && REGNO (op) > 0 && REGNO (op) < 32);
846 }
847
848 /* Return truth value of whether OP is an integer which fits the
849 range constraining immediate operands in three-address insns. */
850
851 int
852 int5_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
853 {
854 return (GET_CODE (op) == CONST_INT && INT_5_BITS (op));
855 }
856
857 int
858 uint5_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
859 {
860 return (GET_CODE (op) == CONST_INT && INT_U5_BITS (op));
861 }
862
863 int
864 int11_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
865 {
866 return (GET_CODE (op) == CONST_INT && INT_11_BITS (op));
867 }
868
869 int
870 uint32_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
871 {
872 #if HOST_BITS_PER_WIDE_INT > 32
873 /* All allowed constants will fit a CONST_INT. */
874 return (GET_CODE (op) == CONST_INT
875 && (INTVAL (op) >= 0 && INTVAL (op) < (HOST_WIDE_INT) 1 << 32));
876 #else
877 return (GET_CODE (op) == CONST_INT
878 || (GET_CODE (op) == CONST_DOUBLE
879 && CONST_DOUBLE_HIGH (op) == 0));
880 #endif
881 }
882
883 int
884 arith5_operand (rtx op, enum machine_mode mode)
885 {
886 return register_operand (op, mode) || int5_operand (op, mode);
887 }
888
889 /* True iff zdepi can be used to generate this CONST_INT.
890 zdepi first sign extends a 5 bit signed number to a given field
891 length, then places this field anywhere in a zero. */
892 int
893 zdepi_cint_p (unsigned HOST_WIDE_INT x)
894 {
895 unsigned HOST_WIDE_INT lsb_mask, t;
896
897 /* This might not be obvious, but it's at least fast.
898 This function is critical; we don't have the time loops would take. */
899 lsb_mask = x & -x;
900 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
901 /* Return true iff t is a power of two. */
902 return ((t & (t - 1)) == 0);
903 }
904
905 /* True iff depi or extru can be used to compute (reg & mask).
906 Accept bit pattern like these:
907 0....01....1
908 1....10....0
909 1..10..01..1 */
910 int
911 and_mask_p (unsigned HOST_WIDE_INT mask)
912 {
913 mask = ~mask;
914 mask += mask & -mask;
915 return (mask & (mask - 1)) == 0;
916 }
917
918 /* True iff depi or extru can be used to compute (reg & OP). */
919 int
920 and_operand (rtx op, enum machine_mode mode)
921 {
922 return (register_operand (op, mode)
923 || (GET_CODE (op) == CONST_INT && and_mask_p (INTVAL (op))));
924 }
925
926 /* True iff depi can be used to compute (reg | MASK). */
927 int
928 ior_mask_p (unsigned HOST_WIDE_INT mask)
929 {
930 mask += mask & -mask;
931 return (mask & (mask - 1)) == 0;
932 }
933
934 /* True iff depi can be used to compute (reg | OP). */
935 int
936 ior_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
937 {
938 return (GET_CODE (op) == CONST_INT && ior_mask_p (INTVAL (op)));
939 }
940
941 int
942 lhs_lshift_operand (rtx op, enum machine_mode mode)
943 {
944 return register_operand (op, mode) || lhs_lshift_cint_operand (op, mode);
945 }
946
947 /* True iff OP is a CONST_INT of the forms 0...0xxxx or 0...01...1xxxx.
948 Such values can be the left hand side x in (x << r), using the zvdepi
949 instruction. */
950 int
951 lhs_lshift_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
952 {
953 unsigned HOST_WIDE_INT x;
954 if (GET_CODE (op) != CONST_INT)
955 return 0;
956 x = INTVAL (op) >> 4;
957 return (x & (x + 1)) == 0;
958 }
959
960 int
961 arith32_operand (rtx op, enum machine_mode mode)
962 {
963 return register_operand (op, mode) || GET_CODE (op) == CONST_INT;
964 }
965
966 int
967 pc_or_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
968 {
969 return (GET_CODE (op) == PC || GET_CODE (op) == LABEL_REF);
970 }
971 \f
972 /* Legitimize PIC addresses. If the address is already
973 position-independent, we return ORIG. Newly generated
974 position-independent addresses go to REG. If we need more
975 than one register, we lose. */
976
977 rtx
978 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
979 {
980 rtx pic_ref = orig;
981
982 /* Labels need special handling. */
983 if (pic_label_operand (orig, mode))
984 {
985 /* We do not want to go through the movXX expanders here since that
986 would create recursion.
987
988 Nor do we really want to call a generator for a named pattern
989 since that requires multiple patterns if we want to support
990 multiple word sizes.
991
992 So instead we just emit the raw set, which avoids the movXX
993 expanders completely. */
994 mark_reg_pointer (reg, BITS_PER_UNIT);
995 emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
996 current_function_uses_pic_offset_table = 1;
997 return reg;
998 }
999 if (GET_CODE (orig) == SYMBOL_REF)
1000 {
1001 rtx insn, tmp_reg;
1002
1003 if (reg == 0)
1004 abort ();
1005
1006 /* Before reload, allocate a temporary register for the intermediate
1007 result. This allows the sequence to be deleted when the final
1008 result is unused and the insns are trivially dead. */
1009 tmp_reg = ((reload_in_progress || reload_completed)
1010 ? reg : gen_reg_rtx (Pmode));
1011
1012 emit_move_insn (tmp_reg,
1013 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
1014 gen_rtx_HIGH (word_mode, orig)));
1015 pic_ref
1016 = gen_rtx_MEM (Pmode,
1017 gen_rtx_LO_SUM (Pmode, tmp_reg,
1018 gen_rtx_UNSPEC (Pmode,
1019 gen_rtvec (1, orig),
1020 UNSPEC_DLTIND14R)));
1021
1022 current_function_uses_pic_offset_table = 1;
1023 MEM_NOTRAP_P (pic_ref) = 1;
1024 MEM_READONLY_P (pic_ref) = 1;
1025 mark_reg_pointer (reg, BITS_PER_UNIT);
1026 insn = emit_move_insn (reg, pic_ref);
1027
1028 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
1029 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig, REG_NOTES (insn));
1030
1031 return reg;
1032 }
1033 else if (GET_CODE (orig) == CONST)
1034 {
1035 rtx base;
1036
1037 if (GET_CODE (XEXP (orig, 0)) == PLUS
1038 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
1039 return orig;
1040
1041 if (reg == 0)
1042 abort ();
1043
1044 if (GET_CODE (XEXP (orig, 0)) == PLUS)
1045 {
1046 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
1047 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
1048 base == reg ? 0 : reg);
1049 }
1050 else
1051 abort ();
1052
1053 if (GET_CODE (orig) == CONST_INT)
1054 {
1055 if (INT_14_BITS (orig))
1056 return plus_constant (base, INTVAL (orig));
1057 orig = force_reg (Pmode, orig);
1058 }
1059 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
1060 /* Likewise, should we set special REG_NOTEs here? */
1061 }
1062
1063 return pic_ref;
1064 }
1065
1066 /* Try machine-dependent ways of modifying an illegitimate address
1067 to be legitimate. If we find one, return the new, valid address.
1068 This macro is used in only one place: `memory_address' in explow.c.
1069
1070 OLDX is the address as it was before break_out_memory_refs was called.
1071 In some cases it is useful to look at this to decide what needs to be done.
1072
1073 MODE and WIN are passed so that this macro can use
1074 GO_IF_LEGITIMATE_ADDRESS.
1075
1076 It is always safe for this macro to do nothing. It exists to recognize
1077 opportunities to optimize the output.
1078
1079 For the PA, transform:
1080
1081 memory(X + <large int>)
1082
1083 into:
1084
1085 if (<large int> & mask) >= 16
1086 Y = (<large int> & ~mask) + mask + 1 Round up.
1087 else
1088 Y = (<large int> & ~mask) Round down.
1089 Z = X + Y
1090 memory (Z + (<large int> - Y));
1091
1092 This is for CSE to find several similar references, and only use one Z.
1093
1094 X can either be a SYMBOL_REF or REG, but because combine cannot
1095 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1096 D will not fit in 14 bits.
1097
1098 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1099 0x1f as the mask.
1100
1101 MODE_INT references allow displacements which fit in 14 bits, so use
1102 0x3fff as the mask.
1103
1104 This relies on the fact that most mode MODE_FLOAT references will use FP
1105 registers and most mode MODE_INT references will use integer registers.
1106 (In the rare case of an FP register used in an integer MODE, we depend
1107 on secondary reloads to clean things up.)
1108
1109
1110 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1111 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1112 addressing modes to be used).
1113
1114 Put X and Z into registers. Then put the entire expression into
1115 a register. */
1116
1117 rtx
1118 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1119 enum machine_mode mode)
1120 {
1121 rtx orig = x;
1122
1123 /* We need to canonicalize the order of operands in unscaled indexed
1124 addresses since the code that checks if an address is valid doesn't
1125 always try both orders. */
1126 if (!TARGET_NO_SPACE_REGS
1127 && GET_CODE (x) == PLUS
1128 && GET_MODE (x) == Pmode
1129 && REG_P (XEXP (x, 0))
1130 && REG_P (XEXP (x, 1))
1131 && REG_POINTER (XEXP (x, 0))
1132 && !REG_POINTER (XEXP (x, 1)))
1133 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1134
1135 if (flag_pic)
1136 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1137
1138 /* Strip off CONST. */
1139 if (GET_CODE (x) == CONST)
1140 x = XEXP (x, 0);
1141
1142 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1143 That should always be safe. */
1144 if (GET_CODE (x) == PLUS
1145 && GET_CODE (XEXP (x, 0)) == REG
1146 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1147 {
1148 rtx reg = force_reg (Pmode, XEXP (x, 1));
1149 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1150 }
1151
1152 /* Note we must reject symbols which represent function addresses
1153 since the assembler/linker can't handle arithmetic on plabels. */
1154 if (GET_CODE (x) == PLUS
1155 && GET_CODE (XEXP (x, 1)) == CONST_INT
1156 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1157 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1158 || GET_CODE (XEXP (x, 0)) == REG))
1159 {
1160 rtx int_part, ptr_reg;
1161 int newoffset;
1162 int offset = INTVAL (XEXP (x, 1));
1163 int mask;
1164
1165 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1166 ? (TARGET_PA_20 ? 0x3fff : 0x1f) : 0x3fff);
1167
1168 /* Choose which way to round the offset. Round up if we
1169 are >= halfway to the next boundary. */
1170 if ((offset & mask) >= ((mask + 1) / 2))
1171 newoffset = (offset & ~ mask) + mask + 1;
1172 else
1173 newoffset = (offset & ~ mask);
1174
1175 /* If the newoffset will not fit in 14 bits (ldo), then
1176 handling this would take 4 or 5 instructions (2 to load
1177 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1178 add the new offset and the SYMBOL_REF.) Combine can
1179 not handle 4->2 or 5->2 combinations, so do not create
1180 them. */
1181 if (! VAL_14_BITS_P (newoffset)
1182 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1183 {
1184 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
1185 rtx tmp_reg
1186 = force_reg (Pmode,
1187 gen_rtx_HIGH (Pmode, const_part));
1188 ptr_reg
1189 = force_reg (Pmode,
1190 gen_rtx_LO_SUM (Pmode,
1191 tmp_reg, const_part));
1192 }
1193 else
1194 {
1195 if (! VAL_14_BITS_P (newoffset))
1196 int_part = force_reg (Pmode, GEN_INT (newoffset));
1197 else
1198 int_part = GEN_INT (newoffset);
1199
1200 ptr_reg = force_reg (Pmode,
1201 gen_rtx_PLUS (Pmode,
1202 force_reg (Pmode, XEXP (x, 0)),
1203 int_part));
1204 }
1205 return plus_constant (ptr_reg, offset - newoffset);
1206 }
1207
1208 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1209
1210 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1211 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1212 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1213 && (OBJECT_P (XEXP (x, 1))
1214 || GET_CODE (XEXP (x, 1)) == SUBREG)
1215 && GET_CODE (XEXP (x, 1)) != CONST)
1216 {
1217 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1218 rtx reg1, reg2;
1219
1220 reg1 = XEXP (x, 1);
1221 if (GET_CODE (reg1) != REG)
1222 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1223
1224 reg2 = XEXP (XEXP (x, 0), 0);
1225 if (GET_CODE (reg2) != REG)
1226 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1227
1228 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1229 gen_rtx_MULT (Pmode,
1230 reg2,
1231 GEN_INT (val)),
1232 reg1));
1233 }
1234
1235 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1236
1237 Only do so for floating point modes since this is more speculative
1238 and we lose if it's an integer store. */
1239 if (GET_CODE (x) == PLUS
1240 && GET_CODE (XEXP (x, 0)) == PLUS
1241 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1242 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1243 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1244 && (mode == SFmode || mode == DFmode))
1245 {
1246
1247 /* First, try and figure out what to use as a base register. */
1248 rtx reg1, reg2, base, idx, orig_base;
1249
1250 reg1 = XEXP (XEXP (x, 0), 1);
1251 reg2 = XEXP (x, 1);
1252 base = NULL_RTX;
1253 idx = NULL_RTX;
1254
1255 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1256 then emit_move_sequence will turn on REG_POINTER so we'll know
1257 it's a base register below. */
1258 if (GET_CODE (reg1) != REG)
1259 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1260
1261 if (GET_CODE (reg2) != REG)
1262 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1263
1264 /* Figure out what the base and index are. */
1265
1266 if (GET_CODE (reg1) == REG
1267 && REG_POINTER (reg1))
1268 {
1269 base = reg1;
1270 orig_base = XEXP (XEXP (x, 0), 1);
1271 idx = gen_rtx_PLUS (Pmode,
1272 gen_rtx_MULT (Pmode,
1273 XEXP (XEXP (XEXP (x, 0), 0), 0),
1274 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1275 XEXP (x, 1));
1276 }
1277 else if (GET_CODE (reg2) == REG
1278 && REG_POINTER (reg2))
1279 {
1280 base = reg2;
1281 orig_base = XEXP (x, 1);
1282 idx = XEXP (x, 0);
1283 }
1284
1285 if (base == 0)
1286 return orig;
1287
1288 /* If the index adds a large constant, try to scale the
1289 constant so that it can be loaded with only one insn. */
1290 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1291 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1292 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1293 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1294 {
1295 /* Divide the CONST_INT by the scale factor, then add it to A. */
1296 int val = INTVAL (XEXP (idx, 1));
1297
1298 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1299 reg1 = XEXP (XEXP (idx, 0), 0);
1300 if (GET_CODE (reg1) != REG)
1301 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1302
1303 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1304
1305 /* We can now generate a simple scaled indexed address. */
1306 return
1307 force_reg
1308 (Pmode, gen_rtx_PLUS (Pmode,
1309 gen_rtx_MULT (Pmode, reg1,
1310 XEXP (XEXP (idx, 0), 1)),
1311 base));
1312 }
1313
1314 /* If B + C is still a valid base register, then add them. */
1315 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1316 && INTVAL (XEXP (idx, 1)) <= 4096
1317 && INTVAL (XEXP (idx, 1)) >= -4096)
1318 {
1319 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1320 rtx reg1, reg2;
1321
1322 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1323
1324 reg2 = XEXP (XEXP (idx, 0), 0);
1325 if (GET_CODE (reg2) != CONST_INT)
1326 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1327
1328 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1329 gen_rtx_MULT (Pmode,
1330 reg2,
1331 GEN_INT (val)),
1332 reg1));
1333 }
1334
1335 /* Get the index into a register, then add the base + index and
1336 return a register holding the result. */
1337
1338 /* First get A into a register. */
1339 reg1 = XEXP (XEXP (idx, 0), 0);
1340 if (GET_CODE (reg1) != REG)
1341 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1342
1343 /* And get B into a register. */
1344 reg2 = XEXP (idx, 1);
1345 if (GET_CODE (reg2) != REG)
1346 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1347
1348 reg1 = force_reg (Pmode,
1349 gen_rtx_PLUS (Pmode,
1350 gen_rtx_MULT (Pmode, reg1,
1351 XEXP (XEXP (idx, 0), 1)),
1352 reg2));
1353
1354 /* Add the result to our base register and return. */
1355 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1356
1357 }
1358
1359 /* Uh-oh. We might have an address for x[n-100000]. This needs
1360 special handling to avoid creating an indexed memory address
1361 with x-100000 as the base.
1362
1363 If the constant part is small enough, then it's still safe because
1364 there is a guard page at the beginning and end of the data segment.
1365
1366 Scaled references are common enough that we want to try and rearrange the
1367 terms so that we can use indexing for these addresses too. Only
1368 do the optimization for floatint point modes. */
1369
1370 if (GET_CODE (x) == PLUS
1371 && symbolic_expression_p (XEXP (x, 1)))
1372 {
1373 /* Ugly. We modify things here so that the address offset specified
1374 by the index expression is computed first, then added to x to form
1375 the entire address. */
1376
1377 rtx regx1, regx2, regy1, regy2, y;
1378
1379 /* Strip off any CONST. */
1380 y = XEXP (x, 1);
1381 if (GET_CODE (y) == CONST)
1382 y = XEXP (y, 0);
1383
1384 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1385 {
1386 /* See if this looks like
1387 (plus (mult (reg) (shadd_const))
1388 (const (plus (symbol_ref) (const_int))))
1389
1390 Where const_int is small. In that case the const
1391 expression is a valid pointer for indexing.
1392
1393 If const_int is big, but can be divided evenly by shadd_const
1394 and added to (reg). This allows more scaled indexed addresses. */
1395 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1396 && GET_CODE (XEXP (x, 0)) == MULT
1397 && GET_CODE (XEXP (y, 1)) == CONST_INT
1398 && INTVAL (XEXP (y, 1)) >= -4096
1399 && INTVAL (XEXP (y, 1)) <= 4095
1400 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1401 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1402 {
1403 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1404 rtx reg1, reg2;
1405
1406 reg1 = XEXP (x, 1);
1407 if (GET_CODE (reg1) != REG)
1408 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1409
1410 reg2 = XEXP (XEXP (x, 0), 0);
1411 if (GET_CODE (reg2) != REG)
1412 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1413
1414 return force_reg (Pmode,
1415 gen_rtx_PLUS (Pmode,
1416 gen_rtx_MULT (Pmode,
1417 reg2,
1418 GEN_INT (val)),
1419 reg1));
1420 }
1421 else if ((mode == DFmode || mode == SFmode)
1422 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1423 && GET_CODE (XEXP (x, 0)) == MULT
1424 && GET_CODE (XEXP (y, 1)) == CONST_INT
1425 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1426 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1427 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1428 {
1429 regx1
1430 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1431 / INTVAL (XEXP (XEXP (x, 0), 1))));
1432 regx2 = XEXP (XEXP (x, 0), 0);
1433 if (GET_CODE (regx2) != REG)
1434 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1435 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1436 regx2, regx1));
1437 return
1438 force_reg (Pmode,
1439 gen_rtx_PLUS (Pmode,
1440 gen_rtx_MULT (Pmode, regx2,
1441 XEXP (XEXP (x, 0), 1)),
1442 force_reg (Pmode, XEXP (y, 0))));
1443 }
1444 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1445 && INTVAL (XEXP (y, 1)) >= -4096
1446 && INTVAL (XEXP (y, 1)) <= 4095)
1447 {
1448 /* This is safe because of the guard page at the
1449 beginning and end of the data space. Just
1450 return the original address. */
1451 return orig;
1452 }
1453 else
1454 {
1455 /* Doesn't look like one we can optimize. */
1456 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1457 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1458 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1459 regx1 = force_reg (Pmode,
1460 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1461 regx1, regy2));
1462 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1463 }
1464 }
1465 }
1466
1467 return orig;
1468 }
1469
1470 /* For the HPPA, REG and REG+CONST is cost 0
1471 and addresses involving symbolic constants are cost 2.
1472
1473 PIC addresses are very expensive.
1474
1475 It is no coincidence that this has the same structure
1476 as GO_IF_LEGITIMATE_ADDRESS. */
1477
1478 static int
1479 hppa_address_cost (rtx X)
1480 {
1481 switch (GET_CODE (X))
1482 {
1483 case REG:
1484 case PLUS:
1485 case LO_SUM:
1486 return 1;
1487 case HIGH:
1488 return 2;
1489 default:
1490 return 4;
1491 }
1492 }
1493
1494 /* Compute a (partial) cost for rtx X. Return true if the complete
1495 cost has been computed, and false if subexpressions should be
1496 scanned. In either case, *TOTAL contains the cost result. */
1497
1498 static bool
1499 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1500 {
1501 switch (code)
1502 {
1503 case CONST_INT:
1504 if (INTVAL (x) == 0)
1505 *total = 0;
1506 else if (INT_14_BITS (x))
1507 *total = 1;
1508 else
1509 *total = 2;
1510 return true;
1511
1512 case HIGH:
1513 *total = 2;
1514 return true;
1515
1516 case CONST:
1517 case LABEL_REF:
1518 case SYMBOL_REF:
1519 *total = 4;
1520 return true;
1521
1522 case CONST_DOUBLE:
1523 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1524 && outer_code != SET)
1525 *total = 0;
1526 else
1527 *total = 8;
1528 return true;
1529
1530 case MULT:
1531 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1532 *total = COSTS_N_INSNS (3);
1533 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1534 *total = COSTS_N_INSNS (8);
1535 else
1536 *total = COSTS_N_INSNS (20);
1537 return true;
1538
1539 case DIV:
1540 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1541 {
1542 *total = COSTS_N_INSNS (14);
1543 return true;
1544 }
1545 /* FALLTHRU */
1546
1547 case UDIV:
1548 case MOD:
1549 case UMOD:
1550 *total = COSTS_N_INSNS (60);
1551 return true;
1552
1553 case PLUS: /* this includes shNadd insns */
1554 case MINUS:
1555 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1556 *total = COSTS_N_INSNS (3);
1557 else
1558 *total = COSTS_N_INSNS (1);
1559 return true;
1560
1561 case ASHIFT:
1562 case ASHIFTRT:
1563 case LSHIFTRT:
1564 *total = COSTS_N_INSNS (1);
1565 return true;
1566
1567 default:
1568 return false;
1569 }
1570 }
1571
1572 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1573 new rtx with the correct mode. */
1574 static inline rtx
1575 force_mode (enum machine_mode mode, rtx orig)
1576 {
1577 if (mode == GET_MODE (orig))
1578 return orig;
1579
1580 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
1581 abort ();
1582
1583 return gen_rtx_REG (mode, REGNO (orig));
1584 }
1585
1586 /* Emit insns to move operands[1] into operands[0].
1587
1588 Return 1 if we have written out everything that needs to be done to
1589 do the move. Otherwise, return 0 and the caller will emit the move
1590 normally.
1591
1592 Note SCRATCH_REG may not be in the proper mode depending on how it
1593 will be used. This routine is responsible for creating a new copy
1594 of SCRATCH_REG in the proper mode. */
1595
1596 int
1597 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1598 {
1599 register rtx operand0 = operands[0];
1600 register rtx operand1 = operands[1];
1601 register rtx tem;
1602
1603 /* We can only handle indexed addresses in the destination operand
1604 of floating point stores. Thus, we need to break out indexed
1605 addresses from the destination operand. */
1606 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1607 {
1608 /* This is only safe up to the beginning of life analysis. */
1609 if (no_new_pseudos)
1610 abort ();
1611
1612 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1613 operand0 = replace_equiv_address (operand0, tem);
1614 }
1615
1616 /* On targets with non-equivalent space registers, break out unscaled
1617 indexed addresses from the source operand before the final CSE.
1618 We have to do this because the REG_POINTER flag is not correctly
1619 carried through various optimization passes and CSE may substitute
1620 a pseudo without the pointer set for one with the pointer set. As
1621 a result, we loose various opportunities to create insns with
1622 unscaled indexed addresses. */
1623 if (!TARGET_NO_SPACE_REGS
1624 && !cse_not_expected
1625 && GET_CODE (operand1) == MEM
1626 && GET_CODE (XEXP (operand1, 0)) == PLUS
1627 && REG_P (XEXP (XEXP (operand1, 0), 0))
1628 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1629 operand1
1630 = replace_equiv_address (operand1,
1631 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1632
1633 if (scratch_reg
1634 && reload_in_progress && GET_CODE (operand0) == REG
1635 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1636 operand0 = reg_equiv_mem[REGNO (operand0)];
1637 else if (scratch_reg
1638 && reload_in_progress && GET_CODE (operand0) == SUBREG
1639 && GET_CODE (SUBREG_REG (operand0)) == REG
1640 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1641 {
1642 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1643 the code which tracks sets/uses for delete_output_reload. */
1644 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1645 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1646 SUBREG_BYTE (operand0));
1647 operand0 = alter_subreg (&temp);
1648 }
1649
1650 if (scratch_reg
1651 && reload_in_progress && GET_CODE (operand1) == REG
1652 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1653 operand1 = reg_equiv_mem[REGNO (operand1)];
1654 else if (scratch_reg
1655 && reload_in_progress && GET_CODE (operand1) == SUBREG
1656 && GET_CODE (SUBREG_REG (operand1)) == REG
1657 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1658 {
1659 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1660 the code which tracks sets/uses for delete_output_reload. */
1661 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1662 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1663 SUBREG_BYTE (operand1));
1664 operand1 = alter_subreg (&temp);
1665 }
1666
1667 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1668 && ((tem = find_replacement (&XEXP (operand0, 0)))
1669 != XEXP (operand0, 0)))
1670 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
1671
1672 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1673 && ((tem = find_replacement (&XEXP (operand1, 0)))
1674 != XEXP (operand1, 0)))
1675 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
1676
1677 /* Handle secondary reloads for loads/stores of FP registers from
1678 REG+D addresses where D does not fit in 5 or 14 bits, including
1679 (subreg (mem (addr))) cases. */
1680 if (scratch_reg
1681 && fp_reg_operand (operand0, mode)
1682 && ((GET_CODE (operand1) == MEM
1683 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1684 XEXP (operand1, 0)))
1685 || ((GET_CODE (operand1) == SUBREG
1686 && GET_CODE (XEXP (operand1, 0)) == MEM
1687 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1688 ? SFmode : DFmode),
1689 XEXP (XEXP (operand1, 0), 0))))))
1690 {
1691 if (GET_CODE (operand1) == SUBREG)
1692 operand1 = XEXP (operand1, 0);
1693
1694 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1695 it in WORD_MODE regardless of what mode it was originally given
1696 to us. */
1697 scratch_reg = force_mode (word_mode, scratch_reg);
1698
1699 /* D might not fit in 14 bits either; for such cases load D into
1700 scratch reg. */
1701 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1702 {
1703 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1704 emit_move_insn (scratch_reg,
1705 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1706 Pmode,
1707 XEXP (XEXP (operand1, 0), 0),
1708 scratch_reg));
1709 }
1710 else
1711 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1712 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1713 gen_rtx_MEM (mode, scratch_reg)));
1714 return 1;
1715 }
1716 else if (scratch_reg
1717 && fp_reg_operand (operand1, mode)
1718 && ((GET_CODE (operand0) == MEM
1719 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1720 ? SFmode : DFmode),
1721 XEXP (operand0, 0)))
1722 || ((GET_CODE (operand0) == SUBREG)
1723 && GET_CODE (XEXP (operand0, 0)) == MEM
1724 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1725 ? SFmode : DFmode),
1726 XEXP (XEXP (operand0, 0), 0)))))
1727 {
1728 if (GET_CODE (operand0) == SUBREG)
1729 operand0 = XEXP (operand0, 0);
1730
1731 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1732 it in WORD_MODE regardless of what mode it was originally given
1733 to us. */
1734 scratch_reg = force_mode (word_mode, scratch_reg);
1735
1736 /* D might not fit in 14 bits either; for such cases load D into
1737 scratch reg. */
1738 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1739 {
1740 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1741 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1742 0)),
1743 Pmode,
1744 XEXP (XEXP (operand0, 0),
1745 0),
1746 scratch_reg));
1747 }
1748 else
1749 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1750 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
1751 operand1));
1752 return 1;
1753 }
1754 /* Handle secondary reloads for loads of FP registers from constant
1755 expressions by forcing the constant into memory.
1756
1757 Use scratch_reg to hold the address of the memory location.
1758
1759 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1760 NO_REGS when presented with a const_int and a register class
1761 containing only FP registers. Doing so unfortunately creates
1762 more problems than it solves. Fix this for 2.5. */
1763 else if (scratch_reg
1764 && CONSTANT_P (operand1)
1765 && fp_reg_operand (operand0, mode))
1766 {
1767 rtx xoperands[2];
1768
1769 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1770 it in WORD_MODE regardless of what mode it was originally given
1771 to us. */
1772 scratch_reg = force_mode (word_mode, scratch_reg);
1773
1774 /* Force the constant into memory and put the address of the
1775 memory location into scratch_reg. */
1776 xoperands[0] = scratch_reg;
1777 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
1778 emit_move_sequence (xoperands, Pmode, 0);
1779
1780 /* Now load the destination register. */
1781 emit_insn (gen_rtx_SET (mode, operand0,
1782 gen_rtx_MEM (mode, scratch_reg)));
1783 return 1;
1784 }
1785 /* Handle secondary reloads for SAR. These occur when trying to load
1786 the SAR from memory, FP register, or with a constant. */
1787 else if (scratch_reg
1788 && GET_CODE (operand0) == REG
1789 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1790 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1791 && (GET_CODE (operand1) == MEM
1792 || GET_CODE (operand1) == CONST_INT
1793 || (GET_CODE (operand1) == REG
1794 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1795 {
1796 /* D might not fit in 14 bits either; for such cases load D into
1797 scratch reg. */
1798 if (GET_CODE (operand1) == MEM
1799 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1800 {
1801 /* We are reloading the address into the scratch register, so we
1802 want to make sure the scratch register is a full register. */
1803 scratch_reg = force_mode (word_mode, scratch_reg);
1804
1805 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1806 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1807 0)),
1808 Pmode,
1809 XEXP (XEXP (operand1, 0),
1810 0),
1811 scratch_reg));
1812
1813 /* Now we are going to load the scratch register from memory,
1814 we want to load it in the same width as the original MEM,
1815 which must be the same as the width of the ultimate destination,
1816 OPERAND0. */
1817 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1818
1819 emit_move_insn (scratch_reg, gen_rtx_MEM (GET_MODE (operand0),
1820 scratch_reg));
1821 }
1822 else
1823 {
1824 /* We want to load the scratch register using the same mode as
1825 the ultimate destination. */
1826 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1827
1828 emit_move_insn (scratch_reg, operand1);
1829 }
1830
1831 /* And emit the insn to set the ultimate destination. We know that
1832 the scratch register has the same mode as the destination at this
1833 point. */
1834 emit_move_insn (operand0, scratch_reg);
1835 return 1;
1836 }
1837 /* Handle the most common case: storing into a register. */
1838 else if (register_operand (operand0, mode))
1839 {
1840 if (register_operand (operand1, mode)
1841 || (GET_CODE (operand1) == CONST_INT
1842 && cint_ok_for_move (INTVAL (operand1)))
1843 || (operand1 == CONST0_RTX (mode))
1844 || (GET_CODE (operand1) == HIGH
1845 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1846 /* Only `general_operands' can come here, so MEM is ok. */
1847 || GET_CODE (operand1) == MEM)
1848 {
1849 /* Various sets are created during RTL generation which don't
1850 have the REG_POINTER flag correctly set. After the CSE pass,
1851 instruction recognition can fail if we don't consistently
1852 set this flag when performing register copies. This should
1853 also improve the opportunities for creating insns that use
1854 unscaled indexing. */
1855 if (REG_P (operand0) && REG_P (operand1))
1856 {
1857 if (REG_POINTER (operand1)
1858 && !REG_POINTER (operand0)
1859 && !HARD_REGISTER_P (operand0))
1860 copy_reg_pointer (operand0, operand1);
1861 else if (REG_POINTER (operand0)
1862 && !REG_POINTER (operand1)
1863 && !HARD_REGISTER_P (operand1))
1864 copy_reg_pointer (operand1, operand0);
1865 }
1866
1867 /* When MEMs are broken out, the REG_POINTER flag doesn't
1868 get set. In some cases, we can set the REG_POINTER flag
1869 from the declaration for the MEM. */
1870 if (REG_P (operand0)
1871 && GET_CODE (operand1) == MEM
1872 && !REG_POINTER (operand0))
1873 {
1874 tree decl = MEM_EXPR (operand1);
1875
1876 /* Set the register pointer flag and register alignment
1877 if the declaration for this memory reference is a
1878 pointer type. Fortran indirect argument references
1879 are ignored. */
1880 if (decl
1881 && !(flag_argument_noalias > 1
1882 && TREE_CODE (decl) == INDIRECT_REF
1883 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1884 {
1885 tree type;
1886
1887 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1888 tree operand 1. */
1889 if (TREE_CODE (decl) == COMPONENT_REF)
1890 decl = TREE_OPERAND (decl, 1);
1891
1892 type = TREE_TYPE (decl);
1893 if (TREE_CODE (type) == ARRAY_TYPE)
1894 type = get_inner_array_type (type);
1895
1896 if (POINTER_TYPE_P (type))
1897 {
1898 int align;
1899
1900 type = TREE_TYPE (type);
1901 /* Using TYPE_ALIGN_OK is rather conservative as
1902 only the ada frontend actually sets it. */
1903 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1904 : BITS_PER_UNIT);
1905 mark_reg_pointer (operand0, align);
1906 }
1907 }
1908 }
1909
1910 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1911 return 1;
1912 }
1913 }
1914 else if (GET_CODE (operand0) == MEM)
1915 {
1916 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1917 && !(reload_in_progress || reload_completed))
1918 {
1919 rtx temp = gen_reg_rtx (DFmode);
1920
1921 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1922 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1923 return 1;
1924 }
1925 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1926 {
1927 /* Run this case quickly. */
1928 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1929 return 1;
1930 }
1931 if (! (reload_in_progress || reload_completed))
1932 {
1933 operands[0] = validize_mem (operand0);
1934 operands[1] = operand1 = force_reg (mode, operand1);
1935 }
1936 }
1937
1938 /* Simplify the source if we need to.
1939 Note we do have to handle function labels here, even though we do
1940 not consider them legitimate constants. Loop optimizations can
1941 call the emit_move_xxx with one as a source. */
1942 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1943 || function_label_operand (operand1, mode)
1944 || (GET_CODE (operand1) == HIGH
1945 && symbolic_operand (XEXP (operand1, 0), mode)))
1946 {
1947 int ishighonly = 0;
1948
1949 if (GET_CODE (operand1) == HIGH)
1950 {
1951 ishighonly = 1;
1952 operand1 = XEXP (operand1, 0);
1953 }
1954 if (symbolic_operand (operand1, mode))
1955 {
1956 /* Argh. The assembler and linker can't handle arithmetic
1957 involving plabels.
1958
1959 So we force the plabel into memory, load operand0 from
1960 the memory location, then add in the constant part. */
1961 if ((GET_CODE (operand1) == CONST
1962 && GET_CODE (XEXP (operand1, 0)) == PLUS
1963 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1964 || function_label_operand (operand1, mode))
1965 {
1966 rtx temp, const_part;
1967
1968 /* Figure out what (if any) scratch register to use. */
1969 if (reload_in_progress || reload_completed)
1970 {
1971 scratch_reg = scratch_reg ? scratch_reg : operand0;
1972 /* SCRATCH_REG will hold an address and maybe the actual
1973 data. We want it in WORD_MODE regardless of what mode it
1974 was originally given to us. */
1975 scratch_reg = force_mode (word_mode, scratch_reg);
1976 }
1977 else if (flag_pic)
1978 scratch_reg = gen_reg_rtx (Pmode);
1979
1980 if (GET_CODE (operand1) == CONST)
1981 {
1982 /* Save away the constant part of the expression. */
1983 const_part = XEXP (XEXP (operand1, 0), 1);
1984 if (GET_CODE (const_part) != CONST_INT)
1985 abort ();
1986
1987 /* Force the function label into memory. */
1988 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1989 }
1990 else
1991 {
1992 /* No constant part. */
1993 const_part = NULL_RTX;
1994
1995 /* Force the function label into memory. */
1996 temp = force_const_mem (mode, operand1);
1997 }
1998
1999
2000 /* Get the address of the memory location. PIC-ify it if
2001 necessary. */
2002 temp = XEXP (temp, 0);
2003 if (flag_pic)
2004 temp = legitimize_pic_address (temp, mode, scratch_reg);
2005
2006 /* Put the address of the memory location into our destination
2007 register. */
2008 operands[1] = temp;
2009 emit_move_sequence (operands, mode, scratch_reg);
2010
2011 /* Now load from the memory location into our destination
2012 register. */
2013 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
2014 emit_move_sequence (operands, mode, scratch_reg);
2015
2016 /* And add back in the constant part. */
2017 if (const_part != NULL_RTX)
2018 expand_inc (operand0, const_part);
2019
2020 return 1;
2021 }
2022
2023 if (flag_pic)
2024 {
2025 rtx temp;
2026
2027 if (reload_in_progress || reload_completed)
2028 {
2029 temp = scratch_reg ? scratch_reg : operand0;
2030 /* TEMP will hold an address and maybe the actual
2031 data. We want it in WORD_MODE regardless of what mode it
2032 was originally given to us. */
2033 temp = force_mode (word_mode, temp);
2034 }
2035 else
2036 temp = gen_reg_rtx (Pmode);
2037
2038 /* (const (plus (symbol) (const_int))) must be forced to
2039 memory during/after reload if the const_int will not fit
2040 in 14 bits. */
2041 if (GET_CODE (operand1) == CONST
2042 && GET_CODE (XEXP (operand1, 0)) == PLUS
2043 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2044 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
2045 && (reload_completed || reload_in_progress)
2046 && flag_pic)
2047 {
2048 operands[1] = force_const_mem (mode, operand1);
2049 operands[1] = legitimize_pic_address (XEXP (operands[1], 0),
2050 mode, temp);
2051 operands[1] = gen_rtx_MEM (mode, operands[1]);
2052 emit_move_sequence (operands, mode, temp);
2053 }
2054 else
2055 {
2056 operands[1] = legitimize_pic_address (operand1, mode, temp);
2057 if (REG_P (operand0) && REG_P (operands[1]))
2058 copy_reg_pointer (operand0, operands[1]);
2059 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
2060 }
2061 }
2062 /* On the HPPA, references to data space are supposed to use dp,
2063 register 27, but showing it in the RTL inhibits various cse
2064 and loop optimizations. */
2065 else
2066 {
2067 rtx temp, set;
2068
2069 if (reload_in_progress || reload_completed)
2070 {
2071 temp = scratch_reg ? scratch_reg : operand0;
2072 /* TEMP will hold an address and maybe the actual
2073 data. We want it in WORD_MODE regardless of what mode it
2074 was originally given to us. */
2075 temp = force_mode (word_mode, temp);
2076 }
2077 else
2078 temp = gen_reg_rtx (mode);
2079
2080 /* Loading a SYMBOL_REF into a register makes that register
2081 safe to be used as the base in an indexed address.
2082
2083 Don't mark hard registers though. That loses. */
2084 if (GET_CODE (operand0) == REG
2085 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2086 mark_reg_pointer (operand0, BITS_PER_UNIT);
2087 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2088 mark_reg_pointer (temp, BITS_PER_UNIT);
2089
2090 if (ishighonly)
2091 set = gen_rtx_SET (mode, operand0, temp);
2092 else
2093 set = gen_rtx_SET (VOIDmode,
2094 operand0,
2095 gen_rtx_LO_SUM (mode, temp, operand1));
2096
2097 emit_insn (gen_rtx_SET (VOIDmode,
2098 temp,
2099 gen_rtx_HIGH (mode, operand1)));
2100 emit_insn (set);
2101
2102 }
2103 return 1;
2104 }
2105 else if (GET_CODE (operand1) != CONST_INT
2106 || !cint_ok_for_move (INTVAL (operand1)))
2107 {
2108 rtx insn, temp;
2109 rtx op1 = operand1;
2110 HOST_WIDE_INT value = 0;
2111 HOST_WIDE_INT insv = 0;
2112 int insert = 0;
2113
2114 if (GET_CODE (operand1) == CONST_INT)
2115 value = INTVAL (operand1);
2116
2117 if (TARGET_64BIT
2118 && GET_CODE (operand1) == CONST_INT
2119 && HOST_BITS_PER_WIDE_INT > 32
2120 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2121 {
2122 HOST_WIDE_INT nval;
2123
2124 /* Extract the low order 32 bits of the value and sign extend.
2125 If the new value is the same as the original value, we can
2126 can use the original value as-is. If the new value is
2127 different, we use it and insert the most-significant 32-bits
2128 of the original value into the final result. */
2129 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2130 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2131 if (value != nval)
2132 {
2133 #if HOST_BITS_PER_WIDE_INT > 32
2134 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2135 #endif
2136 insert = 1;
2137 value = nval;
2138 operand1 = GEN_INT (nval);
2139 }
2140 }
2141
2142 if (reload_in_progress || reload_completed)
2143 temp = scratch_reg ? scratch_reg : operand0;
2144 else
2145 temp = gen_reg_rtx (mode);
2146
2147 /* We don't directly split DImode constants on 32-bit targets
2148 because PLUS uses an 11-bit immediate and the insn sequence
2149 generated is not as efficient as the one using HIGH/LO_SUM. */
2150 if (GET_CODE (operand1) == CONST_INT
2151 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2152 && !insert)
2153 {
2154 /* Directly break constant into high and low parts. This
2155 provides better optimization opportunities because various
2156 passes recognize constants split with PLUS but not LO_SUM.
2157 We use a 14-bit signed low part except when the addition
2158 of 0x4000 to the high part might change the sign of the
2159 high part. */
2160 HOST_WIDE_INT low = value & 0x3fff;
2161 HOST_WIDE_INT high = value & ~ 0x3fff;
2162
2163 if (low >= 0x2000)
2164 {
2165 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2166 high += 0x2000;
2167 else
2168 high += 0x4000;
2169 }
2170
2171 low = value - high;
2172
2173 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2174 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2175 }
2176 else
2177 {
2178 emit_insn (gen_rtx_SET (VOIDmode, temp,
2179 gen_rtx_HIGH (mode, operand1)));
2180 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2181 }
2182
2183 insn = emit_move_insn (operands[0], operands[1]);
2184
2185 /* Now insert the most significant 32 bits of the value
2186 into the register. When we don't have a second register
2187 available, it could take up to nine instructions to load
2188 a 64-bit integer constant. Prior to reload, we force
2189 constants that would take more than three instructions
2190 to load to the constant pool. During and after reload,
2191 we have to handle all possible values. */
2192 if (insert)
2193 {
2194 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2195 register and the value to be inserted is outside the
2196 range that can be loaded with three depdi instructions. */
2197 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2198 {
2199 operand1 = GEN_INT (insv);
2200
2201 emit_insn (gen_rtx_SET (VOIDmode, temp,
2202 gen_rtx_HIGH (mode, operand1)));
2203 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2204 emit_insn (gen_insv (operand0, GEN_INT (32),
2205 const0_rtx, temp));
2206 }
2207 else
2208 {
2209 int len = 5, pos = 27;
2210
2211 /* Insert the bits using the depdi instruction. */
2212 while (pos >= 0)
2213 {
2214 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2215 HOST_WIDE_INT sign = v5 < 0;
2216
2217 /* Left extend the insertion. */
2218 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2219 while (pos > 0 && (insv & 1) == sign)
2220 {
2221 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2222 len += 1;
2223 pos -= 1;
2224 }
2225
2226 emit_insn (gen_insv (operand0, GEN_INT (len),
2227 GEN_INT (pos), GEN_INT (v5)));
2228
2229 len = pos > 0 && pos < 5 ? pos : 5;
2230 pos -= len;
2231 }
2232 }
2233 }
2234
2235 REG_NOTES (insn)
2236 = gen_rtx_EXPR_LIST (REG_EQUAL, op1, REG_NOTES (insn));
2237
2238 return 1;
2239 }
2240 }
2241 /* Now have insn-emit do whatever it normally does. */
2242 return 0;
2243 }
2244
2245 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2246 it will need a link/runtime reloc). */
2247
2248 int
2249 reloc_needed (tree exp)
2250 {
2251 int reloc = 0;
2252
2253 switch (TREE_CODE (exp))
2254 {
2255 case ADDR_EXPR:
2256 return 1;
2257
2258 case PLUS_EXPR:
2259 case MINUS_EXPR:
2260 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2261 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2262 break;
2263
2264 case NOP_EXPR:
2265 case CONVERT_EXPR:
2266 case NON_LVALUE_EXPR:
2267 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2268 break;
2269
2270 case CONSTRUCTOR:
2271 {
2272 register tree link;
2273 for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
2274 if (TREE_VALUE (link) != 0)
2275 reloc |= reloc_needed (TREE_VALUE (link));
2276 }
2277 break;
2278
2279 case ERROR_MARK:
2280 break;
2281
2282 default:
2283 break;
2284 }
2285 return reloc;
2286 }
2287
2288 /* Does operand (which is a symbolic_operand) live in text space?
2289 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2290 will be true. */
2291
2292 int
2293 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2294 {
2295 if (GET_CODE (operand) == CONST)
2296 operand = XEXP (XEXP (operand, 0), 0);
2297 if (flag_pic)
2298 {
2299 if (GET_CODE (operand) == SYMBOL_REF)
2300 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2301 }
2302 else
2303 {
2304 if (GET_CODE (operand) == SYMBOL_REF)
2305 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2306 }
2307 return 1;
2308 }
2309
2310 \f
2311 /* Return the best assembler insn template
2312 for moving operands[1] into operands[0] as a fullword. */
2313 const char *
2314 singlemove_string (rtx *operands)
2315 {
2316 HOST_WIDE_INT intval;
2317
2318 if (GET_CODE (operands[0]) == MEM)
2319 return "stw %r1,%0";
2320 if (GET_CODE (operands[1]) == MEM)
2321 return "ldw %1,%0";
2322 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2323 {
2324 long i;
2325 REAL_VALUE_TYPE d;
2326
2327 if (GET_MODE (operands[1]) != SFmode)
2328 abort ();
2329
2330 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2331 bit pattern. */
2332 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2333 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2334
2335 operands[1] = GEN_INT (i);
2336 /* Fall through to CONST_INT case. */
2337 }
2338 if (GET_CODE (operands[1]) == CONST_INT)
2339 {
2340 intval = INTVAL (operands[1]);
2341
2342 if (VAL_14_BITS_P (intval))
2343 return "ldi %1,%0";
2344 else if ((intval & 0x7ff) == 0)
2345 return "ldil L'%1,%0";
2346 else if (zdepi_cint_p (intval))
2347 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2348 else
2349 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2350 }
2351 return "copy %1,%0";
2352 }
2353 \f
2354
2355 /* Compute position (in OP[1]) and width (in OP[2])
2356 useful for copying IMM to a register using the zdepi
2357 instructions. Store the immediate value to insert in OP[0]. */
2358 static void
2359 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2360 {
2361 int lsb, len;
2362
2363 /* Find the least significant set bit in IMM. */
2364 for (lsb = 0; lsb < 32; lsb++)
2365 {
2366 if ((imm & 1) != 0)
2367 break;
2368 imm >>= 1;
2369 }
2370
2371 /* Choose variants based on *sign* of the 5-bit field. */
2372 if ((imm & 0x10) == 0)
2373 len = (lsb <= 28) ? 4 : 32 - lsb;
2374 else
2375 {
2376 /* Find the width of the bitstring in IMM. */
2377 for (len = 5; len < 32; len++)
2378 {
2379 if ((imm & (1 << len)) == 0)
2380 break;
2381 }
2382
2383 /* Sign extend IMM as a 5-bit value. */
2384 imm = (imm & 0xf) - 0x10;
2385 }
2386
2387 op[0] = imm;
2388 op[1] = 31 - lsb;
2389 op[2] = len;
2390 }
2391
2392 /* Compute position (in OP[1]) and width (in OP[2])
2393 useful for copying IMM to a register using the depdi,z
2394 instructions. Store the immediate value to insert in OP[0]. */
2395 void
2396 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2397 {
2398 HOST_WIDE_INT lsb, len;
2399
2400 /* Find the least significant set bit in IMM. */
2401 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2402 {
2403 if ((imm & 1) != 0)
2404 break;
2405 imm >>= 1;
2406 }
2407
2408 /* Choose variants based on *sign* of the 5-bit field. */
2409 if ((imm & 0x10) == 0)
2410 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2411 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2412 else
2413 {
2414 /* Find the width of the bitstring in IMM. */
2415 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2416 {
2417 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2418 break;
2419 }
2420
2421 /* Sign extend IMM as a 5-bit value. */
2422 imm = (imm & 0xf) - 0x10;
2423 }
2424
2425 op[0] = imm;
2426 op[1] = 63 - lsb;
2427 op[2] = len;
2428 }
2429
2430 /* Output assembler code to perform a doubleword move insn
2431 with operands OPERANDS. */
2432
2433 const char *
2434 output_move_double (rtx *operands)
2435 {
2436 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2437 rtx latehalf[2];
2438 rtx addreg0 = 0, addreg1 = 0;
2439
2440 /* First classify both operands. */
2441
2442 if (REG_P (operands[0]))
2443 optype0 = REGOP;
2444 else if (offsettable_memref_p (operands[0]))
2445 optype0 = OFFSOP;
2446 else if (GET_CODE (operands[0]) == MEM)
2447 optype0 = MEMOP;
2448 else
2449 optype0 = RNDOP;
2450
2451 if (REG_P (operands[1]))
2452 optype1 = REGOP;
2453 else if (CONSTANT_P (operands[1]))
2454 optype1 = CNSTOP;
2455 else if (offsettable_memref_p (operands[1]))
2456 optype1 = OFFSOP;
2457 else if (GET_CODE (operands[1]) == MEM)
2458 optype1 = MEMOP;
2459 else
2460 optype1 = RNDOP;
2461
2462 /* Check for the cases that the operand constraints are not
2463 supposed to allow to happen. Abort if we get one,
2464 because generating code for these cases is painful. */
2465
2466 if (optype0 != REGOP && optype1 != REGOP)
2467 abort ();
2468
2469 /* Handle auto decrementing and incrementing loads and stores
2470 specifically, since the structure of the function doesn't work
2471 for them without major modification. Do it better when we learn
2472 this port about the general inc/dec addressing of PA.
2473 (This was written by tege. Chide him if it doesn't work.) */
2474
2475 if (optype0 == MEMOP)
2476 {
2477 /* We have to output the address syntax ourselves, since print_operand
2478 doesn't deal with the addresses we want to use. Fix this later. */
2479
2480 rtx addr = XEXP (operands[0], 0);
2481 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2482 {
2483 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2484
2485 operands[0] = XEXP (addr, 0);
2486 if (GET_CODE (operands[1]) != REG || GET_CODE (operands[0]) != REG)
2487 abort ();
2488
2489 if (!reg_overlap_mentioned_p (high_reg, addr))
2490 {
2491 /* No overlap between high target register and address
2492 register. (We do this in a non-obvious way to
2493 save a register file writeback) */
2494 if (GET_CODE (addr) == POST_INC)
2495 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2496 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2497 }
2498 else
2499 abort ();
2500 }
2501 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2502 {
2503 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2504
2505 operands[0] = XEXP (addr, 0);
2506 if (GET_CODE (operands[1]) != REG || GET_CODE (operands[0]) != REG)
2507 abort ();
2508
2509 if (!reg_overlap_mentioned_p (high_reg, addr))
2510 {
2511 /* No overlap between high target register and address
2512 register. (We do this in a non-obvious way to
2513 save a register file writeback) */
2514 if (GET_CODE (addr) == PRE_INC)
2515 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2516 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2517 }
2518 else
2519 abort ();
2520 }
2521 }
2522 if (optype1 == MEMOP)
2523 {
2524 /* We have to output the address syntax ourselves, since print_operand
2525 doesn't deal with the addresses we want to use. Fix this later. */
2526
2527 rtx addr = XEXP (operands[1], 0);
2528 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2529 {
2530 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2531
2532 operands[1] = XEXP (addr, 0);
2533 if (GET_CODE (operands[0]) != REG || GET_CODE (operands[1]) != REG)
2534 abort ();
2535
2536 if (!reg_overlap_mentioned_p (high_reg, addr))
2537 {
2538 /* No overlap between high target register and address
2539 register. (We do this in a non-obvious way to
2540 save a register file writeback) */
2541 if (GET_CODE (addr) == POST_INC)
2542 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2543 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2544 }
2545 else
2546 {
2547 /* This is an undefined situation. We should load into the
2548 address register *and* update that register. Probably
2549 we don't need to handle this at all. */
2550 if (GET_CODE (addr) == POST_INC)
2551 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2552 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2553 }
2554 }
2555 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2556 {
2557 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2558
2559 operands[1] = XEXP (addr, 0);
2560 if (GET_CODE (operands[0]) != REG || GET_CODE (operands[1]) != REG)
2561 abort ();
2562
2563 if (!reg_overlap_mentioned_p (high_reg, addr))
2564 {
2565 /* No overlap between high target register and address
2566 register. (We do this in a non-obvious way to
2567 save a register file writeback) */
2568 if (GET_CODE (addr) == PRE_INC)
2569 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2570 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2571 }
2572 else
2573 {
2574 /* This is an undefined situation. We should load into the
2575 address register *and* update that register. Probably
2576 we don't need to handle this at all. */
2577 if (GET_CODE (addr) == PRE_INC)
2578 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2579 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2580 }
2581 }
2582 else if (GET_CODE (addr) == PLUS
2583 && GET_CODE (XEXP (addr, 0)) == MULT)
2584 {
2585 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2586
2587 if (!reg_overlap_mentioned_p (high_reg, addr))
2588 {
2589 rtx xoperands[3];
2590
2591 xoperands[0] = high_reg;
2592 xoperands[1] = XEXP (addr, 1);
2593 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2594 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2595 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2596 xoperands);
2597 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2598 }
2599 else
2600 {
2601 rtx xoperands[3];
2602
2603 xoperands[0] = high_reg;
2604 xoperands[1] = XEXP (addr, 1);
2605 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2606 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2607 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2608 xoperands);
2609 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2610 }
2611 }
2612 }
2613
2614 /* If an operand is an unoffsettable memory ref, find a register
2615 we can increment temporarily to make it refer to the second word. */
2616
2617 if (optype0 == MEMOP)
2618 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2619
2620 if (optype1 == MEMOP)
2621 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2622
2623 /* Ok, we can do one word at a time.
2624 Normally we do the low-numbered word first.
2625
2626 In either case, set up in LATEHALF the operands to use
2627 for the high-numbered word and in some cases alter the
2628 operands in OPERANDS to be suitable for the low-numbered word. */
2629
2630 if (optype0 == REGOP)
2631 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2632 else if (optype0 == OFFSOP)
2633 latehalf[0] = adjust_address (operands[0], SImode, 4);
2634 else
2635 latehalf[0] = operands[0];
2636
2637 if (optype1 == REGOP)
2638 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2639 else if (optype1 == OFFSOP)
2640 latehalf[1] = adjust_address (operands[1], SImode, 4);
2641 else if (optype1 == CNSTOP)
2642 split_double (operands[1], &operands[1], &latehalf[1]);
2643 else
2644 latehalf[1] = operands[1];
2645
2646 /* If the first move would clobber the source of the second one,
2647 do them in the other order.
2648
2649 This can happen in two cases:
2650
2651 mem -> register where the first half of the destination register
2652 is the same register used in the memory's address. Reload
2653 can create such insns.
2654
2655 mem in this case will be either register indirect or register
2656 indirect plus a valid offset.
2657
2658 register -> register move where REGNO(dst) == REGNO(src + 1)
2659 someone (Tim/Tege?) claimed this can happen for parameter loads.
2660
2661 Handle mem -> register case first. */
2662 if (optype0 == REGOP
2663 && (optype1 == MEMOP || optype1 == OFFSOP)
2664 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2665 operands[1], 0))
2666 {
2667 /* Do the late half first. */
2668 if (addreg1)
2669 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2670 output_asm_insn (singlemove_string (latehalf), latehalf);
2671
2672 /* Then clobber. */
2673 if (addreg1)
2674 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2675 return singlemove_string (operands);
2676 }
2677
2678 /* Now handle register -> register case. */
2679 if (optype0 == REGOP && optype1 == REGOP
2680 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2681 {
2682 output_asm_insn (singlemove_string (latehalf), latehalf);
2683 return singlemove_string (operands);
2684 }
2685
2686 /* Normal case: do the two words, low-numbered first. */
2687
2688 output_asm_insn (singlemove_string (operands), operands);
2689
2690 /* Make any unoffsettable addresses point at high-numbered word. */
2691 if (addreg0)
2692 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2693 if (addreg1)
2694 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2695
2696 /* Do that word. */
2697 output_asm_insn (singlemove_string (latehalf), latehalf);
2698
2699 /* Undo the adds we just did. */
2700 if (addreg0)
2701 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2702 if (addreg1)
2703 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2704
2705 return "";
2706 }
2707 \f
2708 const char *
2709 output_fp_move_double (rtx *operands)
2710 {
2711 if (FP_REG_P (operands[0]))
2712 {
2713 if (FP_REG_P (operands[1])
2714 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2715 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2716 else
2717 output_asm_insn ("fldd%F1 %1,%0", operands);
2718 }
2719 else if (FP_REG_P (operands[1]))
2720 {
2721 output_asm_insn ("fstd%F0 %1,%0", operands);
2722 }
2723 else if (operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2724 {
2725 if (GET_CODE (operands[0]) == REG)
2726 {
2727 rtx xoperands[2];
2728 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2729 xoperands[0] = operands[0];
2730 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2731 }
2732 /* This is a pain. You have to be prepared to deal with an
2733 arbitrary address here including pre/post increment/decrement.
2734
2735 so avoid this in the MD. */
2736 else
2737 abort ();
2738 }
2739 else abort ();
2740 return "";
2741 }
2742 \f
2743 /* Return a REG that occurs in ADDR with coefficient 1.
2744 ADDR can be effectively incremented by incrementing REG. */
2745
2746 static rtx
2747 find_addr_reg (rtx addr)
2748 {
2749 while (GET_CODE (addr) == PLUS)
2750 {
2751 if (GET_CODE (XEXP (addr, 0)) == REG)
2752 addr = XEXP (addr, 0);
2753 else if (GET_CODE (XEXP (addr, 1)) == REG)
2754 addr = XEXP (addr, 1);
2755 else if (CONSTANT_P (XEXP (addr, 0)))
2756 addr = XEXP (addr, 1);
2757 else if (CONSTANT_P (XEXP (addr, 1)))
2758 addr = XEXP (addr, 0);
2759 else
2760 abort ();
2761 }
2762 if (GET_CODE (addr) == REG)
2763 return addr;
2764 abort ();
2765 }
2766
2767 /* Emit code to perform a block move.
2768
2769 OPERANDS[0] is the destination pointer as a REG, clobbered.
2770 OPERANDS[1] is the source pointer as a REG, clobbered.
2771 OPERANDS[2] is a register for temporary storage.
2772 OPERANDS[3] is a register for temporary storage.
2773 OPERANDS[4] is the size as a CONST_INT
2774 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2775 OPERANDS[6] is another temporary register. */
2776
2777 const char *
2778 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2779 {
2780 int align = INTVAL (operands[5]);
2781 unsigned long n_bytes = INTVAL (operands[4]);
2782
2783 /* We can't move more than a word at a time because the PA
2784 has no longer integer move insns. (Could use fp mem ops?) */
2785 if (align > (TARGET_64BIT ? 8 : 4))
2786 align = (TARGET_64BIT ? 8 : 4);
2787
2788 /* Note that we know each loop below will execute at least twice
2789 (else we would have open-coded the copy). */
2790 switch (align)
2791 {
2792 case 8:
2793 /* Pre-adjust the loop counter. */
2794 operands[4] = GEN_INT (n_bytes - 16);
2795 output_asm_insn ("ldi %4,%2", operands);
2796
2797 /* Copying loop. */
2798 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2799 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2800 output_asm_insn ("std,ma %3,8(%0)", operands);
2801 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2802 output_asm_insn ("std,ma %6,8(%0)", operands);
2803
2804 /* Handle the residual. There could be up to 7 bytes of
2805 residual to copy! */
2806 if (n_bytes % 16 != 0)
2807 {
2808 operands[4] = GEN_INT (n_bytes % 8);
2809 if (n_bytes % 16 >= 8)
2810 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2811 if (n_bytes % 8 != 0)
2812 output_asm_insn ("ldd 0(%1),%6", operands);
2813 if (n_bytes % 16 >= 8)
2814 output_asm_insn ("std,ma %3,8(%0)", operands);
2815 if (n_bytes % 8 != 0)
2816 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2817 }
2818 return "";
2819
2820 case 4:
2821 /* Pre-adjust the loop counter. */
2822 operands[4] = GEN_INT (n_bytes - 8);
2823 output_asm_insn ("ldi %4,%2", operands);
2824
2825 /* Copying loop. */
2826 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2827 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2828 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2829 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2830 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2831
2832 /* Handle the residual. There could be up to 7 bytes of
2833 residual to copy! */
2834 if (n_bytes % 8 != 0)
2835 {
2836 operands[4] = GEN_INT (n_bytes % 4);
2837 if (n_bytes % 8 >= 4)
2838 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2839 if (n_bytes % 4 != 0)
2840 output_asm_insn ("ldw 0(%1),%6", operands);
2841 if (n_bytes % 8 >= 4)
2842 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2843 if (n_bytes % 4 != 0)
2844 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2845 }
2846 return "";
2847
2848 case 2:
2849 /* Pre-adjust the loop counter. */
2850 operands[4] = GEN_INT (n_bytes - 4);
2851 output_asm_insn ("ldi %4,%2", operands);
2852
2853 /* Copying loop. */
2854 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2855 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2856 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2857 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2858 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2859
2860 /* Handle the residual. */
2861 if (n_bytes % 4 != 0)
2862 {
2863 if (n_bytes % 4 >= 2)
2864 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2865 if (n_bytes % 2 != 0)
2866 output_asm_insn ("ldb 0(%1),%6", operands);
2867 if (n_bytes % 4 >= 2)
2868 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2869 if (n_bytes % 2 != 0)
2870 output_asm_insn ("stb %6,0(%0)", operands);
2871 }
2872 return "";
2873
2874 case 1:
2875 /* Pre-adjust the loop counter. */
2876 operands[4] = GEN_INT (n_bytes - 2);
2877 output_asm_insn ("ldi %4,%2", operands);
2878
2879 /* Copying loop. */
2880 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2881 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2882 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2883 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2884 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2885
2886 /* Handle the residual. */
2887 if (n_bytes % 2 != 0)
2888 {
2889 output_asm_insn ("ldb 0(%1),%3", operands);
2890 output_asm_insn ("stb %3,0(%0)", operands);
2891 }
2892 return "";
2893
2894 default:
2895 abort ();
2896 }
2897 }
2898
2899 /* Count the number of insns necessary to handle this block move.
2900
2901 Basic structure is the same as emit_block_move, except that we
2902 count insns rather than emit them. */
2903
2904 static int
2905 compute_movmem_length (rtx insn)
2906 {
2907 rtx pat = PATTERN (insn);
2908 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2909 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2910 unsigned int n_insns = 0;
2911
2912 /* We can't move more than four bytes at a time because the PA
2913 has no longer integer move insns. (Could use fp mem ops?) */
2914 if (align > (TARGET_64BIT ? 8 : 4))
2915 align = (TARGET_64BIT ? 8 : 4);
2916
2917 /* The basic copying loop. */
2918 n_insns = 6;
2919
2920 /* Residuals. */
2921 if (n_bytes % (2 * align) != 0)
2922 {
2923 if ((n_bytes % (2 * align)) >= align)
2924 n_insns += 2;
2925
2926 if ((n_bytes % align) != 0)
2927 n_insns += 2;
2928 }
2929
2930 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2931 return n_insns * 4;
2932 }
2933
2934 /* Emit code to perform a block clear.
2935
2936 OPERANDS[0] is the destination pointer as a REG, clobbered.
2937 OPERANDS[1] is a register for temporary storage.
2938 OPERANDS[2] is the size as a CONST_INT
2939 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2940
2941 const char *
2942 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2943 {
2944 int align = INTVAL (operands[3]);
2945 unsigned long n_bytes = INTVAL (operands[2]);
2946
2947 /* We can't clear more than a word at a time because the PA
2948 has no longer integer move insns. */
2949 if (align > (TARGET_64BIT ? 8 : 4))
2950 align = (TARGET_64BIT ? 8 : 4);
2951
2952 /* Note that we know each loop below will execute at least twice
2953 (else we would have open-coded the copy). */
2954 switch (align)
2955 {
2956 case 8:
2957 /* Pre-adjust the loop counter. */
2958 operands[2] = GEN_INT (n_bytes - 16);
2959 output_asm_insn ("ldi %2,%1", operands);
2960
2961 /* Loop. */
2962 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2963 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2964 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2965
2966 /* Handle the residual. There could be up to 7 bytes of
2967 residual to copy! */
2968 if (n_bytes % 16 != 0)
2969 {
2970 operands[2] = GEN_INT (n_bytes % 8);
2971 if (n_bytes % 16 >= 8)
2972 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2973 if (n_bytes % 8 != 0)
2974 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2975 }
2976 return "";
2977
2978 case 4:
2979 /* Pre-adjust the loop counter. */
2980 operands[2] = GEN_INT (n_bytes - 8);
2981 output_asm_insn ("ldi %2,%1", operands);
2982
2983 /* Loop. */
2984 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2985 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2986 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2987
2988 /* Handle the residual. There could be up to 7 bytes of
2989 residual to copy! */
2990 if (n_bytes % 8 != 0)
2991 {
2992 operands[2] = GEN_INT (n_bytes % 4);
2993 if (n_bytes % 8 >= 4)
2994 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2995 if (n_bytes % 4 != 0)
2996 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2997 }
2998 return "";
2999
3000 case 2:
3001 /* Pre-adjust the loop counter. */
3002 operands[2] = GEN_INT (n_bytes - 4);
3003 output_asm_insn ("ldi %2,%1", operands);
3004
3005 /* Loop. */
3006 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3007 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3008 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3009
3010 /* Handle the residual. */
3011 if (n_bytes % 4 != 0)
3012 {
3013 if (n_bytes % 4 >= 2)
3014 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3015 if (n_bytes % 2 != 0)
3016 output_asm_insn ("stb %%r0,0(%0)", operands);
3017 }
3018 return "";
3019
3020 case 1:
3021 /* Pre-adjust the loop counter. */
3022 operands[2] = GEN_INT (n_bytes - 2);
3023 output_asm_insn ("ldi %2,%1", operands);
3024
3025 /* Loop. */
3026 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3027 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3028 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3029
3030 /* Handle the residual. */
3031 if (n_bytes % 2 != 0)
3032 output_asm_insn ("stb %%r0,0(%0)", operands);
3033
3034 return "";
3035
3036 default:
3037 abort ();
3038 }
3039 }
3040
3041 /* Count the number of insns necessary to handle this block move.
3042
3043 Basic structure is the same as emit_block_move, except that we
3044 count insns rather than emit them. */
3045
3046 static int
3047 compute_clrmem_length (rtx insn)
3048 {
3049 rtx pat = PATTERN (insn);
3050 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3051 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3052 unsigned int n_insns = 0;
3053
3054 /* We can't clear more than a word at a time because the PA
3055 has no longer integer move insns. */
3056 if (align > (TARGET_64BIT ? 8 : 4))
3057 align = (TARGET_64BIT ? 8 : 4);
3058
3059 /* The basic loop. */
3060 n_insns = 4;
3061
3062 /* Residuals. */
3063 if (n_bytes % (2 * align) != 0)
3064 {
3065 if ((n_bytes % (2 * align)) >= align)
3066 n_insns++;
3067
3068 if ((n_bytes % align) != 0)
3069 n_insns++;
3070 }
3071
3072 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3073 return n_insns * 4;
3074 }
3075 \f
3076
3077 const char *
3078 output_and (rtx *operands)
3079 {
3080 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3081 {
3082 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3083 int ls0, ls1, ms0, p, len;
3084
3085 for (ls0 = 0; ls0 < 32; ls0++)
3086 if ((mask & (1 << ls0)) == 0)
3087 break;
3088
3089 for (ls1 = ls0; ls1 < 32; ls1++)
3090 if ((mask & (1 << ls1)) != 0)
3091 break;
3092
3093 for (ms0 = ls1; ms0 < 32; ms0++)
3094 if ((mask & (1 << ms0)) == 0)
3095 break;
3096
3097 if (ms0 != 32)
3098 abort ();
3099
3100 if (ls1 == 32)
3101 {
3102 len = ls0;
3103
3104 if (len == 0)
3105 abort ();
3106
3107 operands[2] = GEN_INT (len);
3108 return "{extru|extrw,u} %1,31,%2,%0";
3109 }
3110 else
3111 {
3112 /* We could use this `depi' for the case above as well, but `depi'
3113 requires one more register file access than an `extru'. */
3114
3115 p = 31 - ls0;
3116 len = ls1 - ls0;
3117
3118 operands[2] = GEN_INT (p);
3119 operands[3] = GEN_INT (len);
3120 return "{depi|depwi} 0,%2,%3,%0";
3121 }
3122 }
3123 else
3124 return "and %1,%2,%0";
3125 }
3126
3127 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3128 storing the result in operands[0]. */
3129 const char *
3130 output_64bit_and (rtx *operands)
3131 {
3132 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3133 {
3134 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3135 int ls0, ls1, ms0, p, len;
3136
3137 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3138 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3139 break;
3140
3141 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3142 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3143 break;
3144
3145 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3146 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3147 break;
3148
3149 if (ms0 != HOST_BITS_PER_WIDE_INT)
3150 abort ();
3151
3152 if (ls1 == HOST_BITS_PER_WIDE_INT)
3153 {
3154 len = ls0;
3155
3156 if (len == 0)
3157 abort ();
3158
3159 operands[2] = GEN_INT (len);
3160 return "extrd,u %1,63,%2,%0";
3161 }
3162 else
3163 {
3164 /* We could use this `depi' for the case above as well, but `depi'
3165 requires one more register file access than an `extru'. */
3166
3167 p = 63 - ls0;
3168 len = ls1 - ls0;
3169
3170 operands[2] = GEN_INT (p);
3171 operands[3] = GEN_INT (len);
3172 return "depdi 0,%2,%3,%0";
3173 }
3174 }
3175 else
3176 return "and %1,%2,%0";
3177 }
3178
3179 const char *
3180 output_ior (rtx *operands)
3181 {
3182 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3183 int bs0, bs1, p, len;
3184
3185 if (INTVAL (operands[2]) == 0)
3186 return "copy %1,%0";
3187
3188 for (bs0 = 0; bs0 < 32; bs0++)
3189 if ((mask & (1 << bs0)) != 0)
3190 break;
3191
3192 for (bs1 = bs0; bs1 < 32; bs1++)
3193 if ((mask & (1 << bs1)) == 0)
3194 break;
3195
3196 if (bs1 != 32 && ((unsigned HOST_WIDE_INT) 1 << bs1) <= mask)
3197 abort ();
3198
3199 p = 31 - bs0;
3200 len = bs1 - bs0;
3201
3202 operands[2] = GEN_INT (p);
3203 operands[3] = GEN_INT (len);
3204 return "{depi|depwi} -1,%2,%3,%0";
3205 }
3206
3207 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3208 storing the result in operands[0]. */
3209 const char *
3210 output_64bit_ior (rtx *operands)
3211 {
3212 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3213 int bs0, bs1, p, len;
3214
3215 if (INTVAL (operands[2]) == 0)
3216 return "copy %1,%0";
3217
3218 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3219 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3220 break;
3221
3222 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3223 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3224 break;
3225
3226 if (bs1 != HOST_BITS_PER_WIDE_INT
3227 && ((unsigned HOST_WIDE_INT) 1 << bs1) <= mask)
3228 abort ();
3229
3230 p = 63 - bs0;
3231 len = bs1 - bs0;
3232
3233 operands[2] = GEN_INT (p);
3234 operands[3] = GEN_INT (len);
3235 return "depdi -1,%2,%3,%0";
3236 }
3237 \f
3238 /* Target hook for assembling integer objects. This code handles
3239 aligned SI and DI integers specially, since function references must
3240 be preceded by P%. */
3241
3242 static bool
3243 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3244 {
3245 if (size == UNITS_PER_WORD && aligned_p
3246 && function_label_operand (x, VOIDmode))
3247 {
3248 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3249 output_addr_const (asm_out_file, x);
3250 fputc ('\n', asm_out_file);
3251 return true;
3252 }
3253 return default_assemble_integer (x, size, aligned_p);
3254 }
3255 \f
3256 /* Output an ascii string. */
3257 void
3258 output_ascii (FILE *file, const char *p, int size)
3259 {
3260 int i;
3261 int chars_output;
3262 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3263
3264 /* The HP assembler can only take strings of 256 characters at one
3265 time. This is a limitation on input line length, *not* the
3266 length of the string. Sigh. Even worse, it seems that the
3267 restriction is in number of input characters (see \xnn &
3268 \whatever). So we have to do this very carefully. */
3269
3270 fputs ("\t.STRING \"", file);
3271
3272 chars_output = 0;
3273 for (i = 0; i < size; i += 4)
3274 {
3275 int co = 0;
3276 int io = 0;
3277 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3278 {
3279 register unsigned int c = (unsigned char) p[i + io];
3280
3281 if (c == '\"' || c == '\\')
3282 partial_output[co++] = '\\';
3283 if (c >= ' ' && c < 0177)
3284 partial_output[co++] = c;
3285 else
3286 {
3287 unsigned int hexd;
3288 partial_output[co++] = '\\';
3289 partial_output[co++] = 'x';
3290 hexd = c / 16 - 0 + '0';
3291 if (hexd > '9')
3292 hexd -= '9' - 'a' + 1;
3293 partial_output[co++] = hexd;
3294 hexd = c % 16 - 0 + '0';
3295 if (hexd > '9')
3296 hexd -= '9' - 'a' + 1;
3297 partial_output[co++] = hexd;
3298 }
3299 }
3300 if (chars_output + co > 243)
3301 {
3302 fputs ("\"\n\t.STRING \"", file);
3303 chars_output = 0;
3304 }
3305 fwrite (partial_output, 1, (size_t) co, file);
3306 chars_output += co;
3307 co = 0;
3308 }
3309 fputs ("\"\n", file);
3310 }
3311
3312 /* Try to rewrite floating point comparisons & branches to avoid
3313 useless add,tr insns.
3314
3315 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3316 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3317 first attempt to remove useless add,tr insns. It is zero
3318 for the second pass as reorg sometimes leaves bogus REG_DEAD
3319 notes lying around.
3320
3321 When CHECK_NOTES is zero we can only eliminate add,tr insns
3322 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3323 instructions. */
3324 static void
3325 remove_useless_addtr_insns (int check_notes)
3326 {
3327 rtx insn;
3328 static int pass = 0;
3329
3330 /* This is fairly cheap, so always run it when optimizing. */
3331 if (optimize > 0)
3332 {
3333 int fcmp_count = 0;
3334 int fbranch_count = 0;
3335
3336 /* Walk all the insns in this function looking for fcmp & fbranch
3337 instructions. Keep track of how many of each we find. */
3338 for (insn = get_insns (); insn; insn = next_insn (insn))
3339 {
3340 rtx tmp;
3341
3342 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3343 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3344 continue;
3345
3346 tmp = PATTERN (insn);
3347
3348 /* It must be a set. */
3349 if (GET_CODE (tmp) != SET)
3350 continue;
3351
3352 /* If the destination is CCFP, then we've found an fcmp insn. */
3353 tmp = SET_DEST (tmp);
3354 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3355 {
3356 fcmp_count++;
3357 continue;
3358 }
3359
3360 tmp = PATTERN (insn);
3361 /* If this is an fbranch instruction, bump the fbranch counter. */
3362 if (GET_CODE (tmp) == SET
3363 && SET_DEST (tmp) == pc_rtx
3364 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3365 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3366 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3367 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3368 {
3369 fbranch_count++;
3370 continue;
3371 }
3372 }
3373
3374
3375 /* Find all floating point compare + branch insns. If possible,
3376 reverse the comparison & the branch to avoid add,tr insns. */
3377 for (insn = get_insns (); insn; insn = next_insn (insn))
3378 {
3379 rtx tmp, next;
3380
3381 /* Ignore anything that isn't an INSN. */
3382 if (GET_CODE (insn) != INSN)
3383 continue;
3384
3385 tmp = PATTERN (insn);
3386
3387 /* It must be a set. */
3388 if (GET_CODE (tmp) != SET)
3389 continue;
3390
3391 /* The destination must be CCFP, which is register zero. */
3392 tmp = SET_DEST (tmp);
3393 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3394 continue;
3395
3396 /* INSN should be a set of CCFP.
3397
3398 See if the result of this insn is used in a reversed FP
3399 conditional branch. If so, reverse our condition and
3400 the branch. Doing so avoids useless add,tr insns. */
3401 next = next_insn (insn);
3402 while (next)
3403 {
3404 /* Jumps, calls and labels stop our search. */
3405 if (GET_CODE (next) == JUMP_INSN
3406 || GET_CODE (next) == CALL_INSN
3407 || GET_CODE (next) == CODE_LABEL)
3408 break;
3409
3410 /* As does another fcmp insn. */
3411 if (GET_CODE (next) == INSN
3412 && GET_CODE (PATTERN (next)) == SET
3413 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3414 && REGNO (SET_DEST (PATTERN (next))) == 0)
3415 break;
3416
3417 next = next_insn (next);
3418 }
3419
3420 /* Is NEXT_INSN a branch? */
3421 if (next
3422 && GET_CODE (next) == JUMP_INSN)
3423 {
3424 rtx pattern = PATTERN (next);
3425
3426 /* If it a reversed fp conditional branch (eg uses add,tr)
3427 and CCFP dies, then reverse our conditional and the branch
3428 to avoid the add,tr. */
3429 if (GET_CODE (pattern) == SET
3430 && SET_DEST (pattern) == pc_rtx
3431 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3432 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3433 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3434 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3435 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3436 && (fcmp_count == fbranch_count
3437 || (check_notes
3438 && find_regno_note (next, REG_DEAD, 0))))
3439 {
3440 /* Reverse the branch. */
3441 tmp = XEXP (SET_SRC (pattern), 1);
3442 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3443 XEXP (SET_SRC (pattern), 2) = tmp;
3444 INSN_CODE (next) = -1;
3445
3446 /* Reverse our condition. */
3447 tmp = PATTERN (insn);
3448 PUT_CODE (XEXP (tmp, 1),
3449 (reverse_condition_maybe_unordered
3450 (GET_CODE (XEXP (tmp, 1)))));
3451 }
3452 }
3453 }
3454 }
3455
3456 pass = !pass;
3457
3458 }
3459 \f
3460 /* You may have trouble believing this, but this is the 32 bit HP-PA
3461 stack layout. Wow.
3462
3463 Offset Contents
3464
3465 Variable arguments (optional; any number may be allocated)
3466
3467 SP-(4*(N+9)) arg word N
3468 : :
3469 SP-56 arg word 5
3470 SP-52 arg word 4
3471
3472 Fixed arguments (must be allocated; may remain unused)
3473
3474 SP-48 arg word 3
3475 SP-44 arg word 2
3476 SP-40 arg word 1
3477 SP-36 arg word 0
3478
3479 Frame Marker
3480
3481 SP-32 External Data Pointer (DP)
3482 SP-28 External sr4
3483 SP-24 External/stub RP (RP')
3484 SP-20 Current RP
3485 SP-16 Static Link
3486 SP-12 Clean up
3487 SP-8 Calling Stub RP (RP'')
3488 SP-4 Previous SP
3489
3490 Top of Frame
3491
3492 SP-0 Stack Pointer (points to next available address)
3493
3494 */
3495
3496 /* This function saves registers as follows. Registers marked with ' are
3497 this function's registers (as opposed to the previous function's).
3498 If a frame_pointer isn't needed, r4 is saved as a general register;
3499 the space for the frame pointer is still allocated, though, to keep
3500 things simple.
3501
3502
3503 Top of Frame
3504
3505 SP (FP') Previous FP
3506 SP + 4 Alignment filler (sigh)
3507 SP + 8 Space for locals reserved here.
3508 .
3509 .
3510 .
3511 SP + n All call saved register used.
3512 .
3513 .
3514 .
3515 SP + o All call saved fp registers used.
3516 .
3517 .
3518 .
3519 SP + p (SP') points to next available address.
3520
3521 */
3522
3523 /* Global variables set by output_function_prologue(). */
3524 /* Size of frame. Need to know this to emit return insns from
3525 leaf procedures. */
3526 static HOST_WIDE_INT actual_fsize, local_fsize;
3527 static int save_fregs;
3528
3529 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3530 Handle case where DISP > 8k by using the add_high_const patterns.
3531
3532 Note in DISP > 8k case, we will leave the high part of the address
3533 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3534
3535 static void
3536 store_reg (int reg, HOST_WIDE_INT disp, int base)
3537 {
3538 rtx insn, dest, src, basereg;
3539
3540 src = gen_rtx_REG (word_mode, reg);
3541 basereg = gen_rtx_REG (Pmode, base);
3542 if (VAL_14_BITS_P (disp))
3543 {
3544 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3545 insn = emit_move_insn (dest, src);
3546 }
3547 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3548 {
3549 rtx delta = GEN_INT (disp);
3550 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3551
3552 emit_move_insn (tmpreg, delta);
3553 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3554 dest = gen_rtx_MEM (word_mode, tmpreg);
3555 insn = emit_move_insn (dest, src);
3556 if (DO_FRAME_NOTES)
3557 {
3558 REG_NOTES (insn)
3559 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3560 gen_rtx_SET (VOIDmode,
3561 gen_rtx_MEM (word_mode,
3562 gen_rtx_PLUS (word_mode, basereg,
3563 delta)),
3564 src),
3565 REG_NOTES (insn));
3566 }
3567 }
3568 else
3569 {
3570 rtx delta = GEN_INT (disp);
3571 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3572 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3573
3574 emit_move_insn (tmpreg, high);
3575 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3576 insn = emit_move_insn (dest, src);
3577 if (DO_FRAME_NOTES)
3578 {
3579 REG_NOTES (insn)
3580 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3581 gen_rtx_SET (VOIDmode,
3582 gen_rtx_MEM (word_mode,
3583 gen_rtx_PLUS (word_mode, basereg,
3584 delta)),
3585 src),
3586 REG_NOTES (insn));
3587 }
3588 }
3589
3590 if (DO_FRAME_NOTES)
3591 RTX_FRAME_RELATED_P (insn) = 1;
3592 }
3593
3594 /* Emit RTL to store REG at the memory location specified by BASE and then
3595 add MOD to BASE. MOD must be <= 8k. */
3596
3597 static void
3598 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3599 {
3600 rtx insn, basereg, srcreg, delta;
3601
3602 if (!VAL_14_BITS_P (mod))
3603 abort ();
3604
3605 basereg = gen_rtx_REG (Pmode, base);
3606 srcreg = gen_rtx_REG (word_mode, reg);
3607 delta = GEN_INT (mod);
3608
3609 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3610 if (DO_FRAME_NOTES)
3611 {
3612 RTX_FRAME_RELATED_P (insn) = 1;
3613
3614 /* RTX_FRAME_RELATED_P must be set on each frame related set
3615 in a parallel with more than one element. Don't set
3616 RTX_FRAME_RELATED_P in the first set if reg is temporary
3617 register 1. The effect of this operation is recorded in
3618 the initial copy. */
3619 if (reg != 1)
3620 {
3621 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3622 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3623 }
3624 else
3625 {
3626 /* The first element of a PARALLEL is always processed if it is
3627 a SET. Thus, we need an expression list for this case. */
3628 REG_NOTES (insn)
3629 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3630 gen_rtx_SET (VOIDmode, basereg,
3631 gen_rtx_PLUS (word_mode, basereg, delta)),
3632 REG_NOTES (insn));
3633 }
3634 }
3635 }
3636
3637 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3638 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3639 whether to add a frame note or not.
3640
3641 In the DISP > 8k case, we leave the high part of the address in %r1.
3642 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3643
3644 static void
3645 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3646 {
3647 rtx insn;
3648
3649 if (VAL_14_BITS_P (disp))
3650 {
3651 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3652 plus_constant (gen_rtx_REG (Pmode, base), disp));
3653 }
3654 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3655 {
3656 rtx basereg = gen_rtx_REG (Pmode, base);
3657 rtx delta = GEN_INT (disp);
3658 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3659
3660 emit_move_insn (tmpreg, delta);
3661 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3662 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3663 }
3664 else
3665 {
3666 rtx basereg = gen_rtx_REG (Pmode, base);
3667 rtx delta = GEN_INT (disp);
3668 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3669
3670 emit_move_insn (tmpreg,
3671 gen_rtx_PLUS (Pmode, basereg,
3672 gen_rtx_HIGH (Pmode, delta)));
3673 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3674 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3675 }
3676
3677 if (DO_FRAME_NOTES && note)
3678 RTX_FRAME_RELATED_P (insn) = 1;
3679 }
3680
3681 HOST_WIDE_INT
3682 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3683 {
3684 int freg_saved = 0;
3685 int i, j;
3686
3687 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3688 be consistent with the rounding and size calculation done here.
3689 Change them at the same time. */
3690
3691 /* We do our own stack alignment. First, round the size of the
3692 stack locals up to a word boundary. */
3693 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3694
3695 /* Space for previous frame pointer + filler. If any frame is
3696 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3697 waste some space here for the sake of HP compatibility. The
3698 first slot is only used when the frame pointer is needed. */
3699 if (size || frame_pointer_needed)
3700 size += STARTING_FRAME_OFFSET;
3701
3702 /* If the current function calls __builtin_eh_return, then we need
3703 to allocate stack space for registers that will hold data for
3704 the exception handler. */
3705 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3706 {
3707 unsigned int i;
3708
3709 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3710 continue;
3711 size += i * UNITS_PER_WORD;
3712 }
3713
3714 /* Account for space used by the callee general register saves. */
3715 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3716 if (regs_ever_live[i])
3717 size += UNITS_PER_WORD;
3718
3719 /* Account for space used by the callee floating point register saves. */
3720 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3721 if (regs_ever_live[i]
3722 || (!TARGET_64BIT && regs_ever_live[i + 1]))
3723 {
3724 freg_saved = 1;
3725
3726 /* We always save both halves of the FP register, so always
3727 increment the frame size by 8 bytes. */
3728 size += 8;
3729 }
3730
3731 /* If any of the floating registers are saved, account for the
3732 alignment needed for the floating point register save block. */
3733 if (freg_saved)
3734 {
3735 size = (size + 7) & ~7;
3736 if (fregs_live)
3737 *fregs_live = 1;
3738 }
3739
3740 /* The various ABIs include space for the outgoing parameters in the
3741 size of the current function's stack frame. We don't need to align
3742 for the outgoing arguments as their alignment is set by the final
3743 rounding for the frame as a whole. */
3744 size += current_function_outgoing_args_size;
3745
3746 /* Allocate space for the fixed frame marker. This space must be
3747 allocated for any function that makes calls or allocates
3748 stack space. */
3749 if (!current_function_is_leaf || size)
3750 size += TARGET_64BIT ? 48 : 32;
3751
3752 /* Finally, round to the preferred stack boundary. */
3753 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3754 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3755 }
3756
3757 /* Generate the assembly code for function entry. FILE is a stdio
3758 stream to output the code to. SIZE is an int: how many units of
3759 temporary storage to allocate.
3760
3761 Refer to the array `regs_ever_live' to determine which registers to
3762 save; `regs_ever_live[I]' is nonzero if register number I is ever
3763 used in the function. This function is responsible for knowing
3764 which registers should not be saved even if used. */
3765
3766 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3767 of memory. If any fpu reg is used in the function, we allocate
3768 such a block here, at the bottom of the frame, just in case it's needed.
3769
3770 If this function is a leaf procedure, then we may choose not
3771 to do a "save" insn. The decision about whether or not
3772 to do this is made in regclass.c. */
3773
3774 static void
3775 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3776 {
3777 /* The function's label and associated .PROC must never be
3778 separated and must be output *after* any profiling declarations
3779 to avoid changing spaces/subspaces within a procedure. */
3780 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3781 fputs ("\t.PROC\n", file);
3782
3783 /* hppa_expand_prologue does the dirty work now. We just need
3784 to output the assembler directives which denote the start
3785 of a function. */
3786 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3787 if (regs_ever_live[2])
3788 fputs (",CALLS,SAVE_RP", file);
3789 else
3790 fputs (",NO_CALLS", file);
3791
3792 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3793 at the beginning of the frame and that it is used as the frame
3794 pointer for the frame. We do this because our current frame
3795 layout doesn't conform to that specified in the the HP runtime
3796 documentation and we need a way to indicate to programs such as
3797 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3798 isn't used by HP compilers but is supported by the assembler.
3799 However, SAVE_SP is supposed to indicate that the previous stack
3800 pointer has been saved in the frame marker. */
3801 if (frame_pointer_needed)
3802 fputs (",SAVE_SP", file);
3803
3804 /* Pass on information about the number of callee register saves
3805 performed in the prologue.
3806
3807 The compiler is supposed to pass the highest register number
3808 saved, the assembler then has to adjust that number before
3809 entering it into the unwind descriptor (to account for any
3810 caller saved registers with lower register numbers than the
3811 first callee saved register). */
3812 if (gr_saved)
3813 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3814
3815 if (fr_saved)
3816 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3817
3818 fputs ("\n\t.ENTRY\n", file);
3819
3820 remove_useless_addtr_insns (0);
3821 }
3822
3823 void
3824 hppa_expand_prologue (void)
3825 {
3826 int merge_sp_adjust_with_store = 0;
3827 HOST_WIDE_INT size = get_frame_size ();
3828 HOST_WIDE_INT offset;
3829 int i;
3830 rtx insn, tmpreg;
3831
3832 gr_saved = 0;
3833 fr_saved = 0;
3834 save_fregs = 0;
3835
3836 /* Compute total size for frame pointer, filler, locals and rounding to
3837 the next word boundary. Similar code appears in compute_frame_size
3838 and must be changed in tandem with this code. */
3839 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3840 if (local_fsize || frame_pointer_needed)
3841 local_fsize += STARTING_FRAME_OFFSET;
3842
3843 actual_fsize = compute_frame_size (size, &save_fregs);
3844
3845 /* Compute a few things we will use often. */
3846 tmpreg = gen_rtx_REG (word_mode, 1);
3847
3848 /* Save RP first. The calling conventions manual states RP will
3849 always be stored into the caller's frame at sp - 20 or sp - 16
3850 depending on which ABI is in use. */
3851 if (regs_ever_live[2] || current_function_calls_eh_return)
3852 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3853
3854 /* Allocate the local frame and set up the frame pointer if needed. */
3855 if (actual_fsize != 0)
3856 {
3857 if (frame_pointer_needed)
3858 {
3859 /* Copy the old frame pointer temporarily into %r1. Set up the
3860 new stack pointer, then store away the saved old frame pointer
3861 into the stack at sp and at the same time update the stack
3862 pointer by actual_fsize bytes. Two versions, first
3863 handles small (<8k) frames. The second handles large (>=8k)
3864 frames. */
3865 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3866 if (DO_FRAME_NOTES)
3867 {
3868 /* We need to record the frame pointer save here since the
3869 new frame pointer is set in the following insn. */
3870 RTX_FRAME_RELATED_P (insn) = 1;
3871 REG_NOTES (insn)
3872 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3873 gen_rtx_SET (VOIDmode,
3874 gen_rtx_MEM (word_mode, stack_pointer_rtx),
3875 frame_pointer_rtx),
3876 REG_NOTES (insn));
3877 }
3878
3879 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3880 if (DO_FRAME_NOTES)
3881 RTX_FRAME_RELATED_P (insn) = 1;
3882
3883 if (VAL_14_BITS_P (actual_fsize))
3884 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3885 else
3886 {
3887 /* It is incorrect to store the saved frame pointer at *sp,
3888 then increment sp (writes beyond the current stack boundary).
3889
3890 So instead use stwm to store at *sp and post-increment the
3891 stack pointer as an atomic operation. Then increment sp to
3892 finish allocating the new frame. */
3893 HOST_WIDE_INT adjust1 = 8192 - 64;
3894 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3895
3896 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3897 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3898 adjust2, 1);
3899 }
3900
3901 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3902 we need to store the previous stack pointer (frame pointer)
3903 into the frame marker on targets that use the HP unwind
3904 library. This allows the HP unwind library to be used to
3905 unwind GCC frames. However, we are not fully compatible
3906 with the HP library because our frame layout differs from
3907 that specified in the HP runtime specification.
3908
3909 We don't want a frame note on this instruction as the frame
3910 marker moves during dynamic stack allocation.
3911
3912 This instruction also serves as a blockage to prevent
3913 register spills from being scheduled before the stack
3914 pointer is raised. This is necessary as we store
3915 registers using the frame pointer as a base register,
3916 and the frame pointer is set before sp is raised. */
3917 if (TARGET_HPUX_UNWIND_LIBRARY)
3918 {
3919 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3920 GEN_INT (TARGET_64BIT ? -8 : -4));
3921
3922 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3923 frame_pointer_rtx);
3924 }
3925 else
3926 emit_insn (gen_blockage ());
3927 }
3928 /* no frame pointer needed. */
3929 else
3930 {
3931 /* In some cases we can perform the first callee register save
3932 and allocating the stack frame at the same time. If so, just
3933 make a note of it and defer allocating the frame until saving
3934 the callee registers. */
3935 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3936 merge_sp_adjust_with_store = 1;
3937 /* Can not optimize. Adjust the stack frame by actual_fsize
3938 bytes. */
3939 else
3940 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3941 actual_fsize, 1);
3942 }
3943 }
3944
3945 /* Normal register save.
3946
3947 Do not save the frame pointer in the frame_pointer_needed case. It
3948 was done earlier. */
3949 if (frame_pointer_needed)
3950 {
3951 offset = local_fsize;
3952
3953 /* Saving the EH return data registers in the frame is the simplest
3954 way to get the frame unwind information emitted. We put them
3955 just before the general registers. */
3956 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3957 {
3958 unsigned int i, regno;
3959
3960 for (i = 0; ; ++i)
3961 {
3962 regno = EH_RETURN_DATA_REGNO (i);
3963 if (regno == INVALID_REGNUM)
3964 break;
3965
3966 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3967 offset += UNITS_PER_WORD;
3968 }
3969 }
3970
3971 for (i = 18; i >= 4; i--)
3972 if (regs_ever_live[i] && ! call_used_regs[i])
3973 {
3974 store_reg (i, offset, FRAME_POINTER_REGNUM);
3975 offset += UNITS_PER_WORD;
3976 gr_saved++;
3977 }
3978 /* Account for %r3 which is saved in a special place. */
3979 gr_saved++;
3980 }
3981 /* No frame pointer needed. */
3982 else
3983 {
3984 offset = local_fsize - actual_fsize;
3985
3986 /* Saving the EH return data registers in the frame is the simplest
3987 way to get the frame unwind information emitted. */
3988 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3989 {
3990 unsigned int i, regno;
3991
3992 for (i = 0; ; ++i)
3993 {
3994 regno = EH_RETURN_DATA_REGNO (i);
3995 if (regno == INVALID_REGNUM)
3996 break;
3997
3998 /* If merge_sp_adjust_with_store is nonzero, then we can
3999 optimize the first save. */
4000 if (merge_sp_adjust_with_store)
4001 {
4002 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
4003 merge_sp_adjust_with_store = 0;
4004 }
4005 else
4006 store_reg (regno, offset, STACK_POINTER_REGNUM);
4007 offset += UNITS_PER_WORD;
4008 }
4009 }
4010
4011 for (i = 18; i >= 3; i--)
4012 if (regs_ever_live[i] && ! call_used_regs[i])
4013 {
4014 /* If merge_sp_adjust_with_store is nonzero, then we can
4015 optimize the first GR save. */
4016 if (merge_sp_adjust_with_store)
4017 {
4018 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
4019 merge_sp_adjust_with_store = 0;
4020 }
4021 else
4022 store_reg (i, offset, STACK_POINTER_REGNUM);
4023 offset += UNITS_PER_WORD;
4024 gr_saved++;
4025 }
4026
4027 /* If we wanted to merge the SP adjustment with a GR save, but we never
4028 did any GR saves, then just emit the adjustment here. */
4029 if (merge_sp_adjust_with_store)
4030 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4031 actual_fsize, 1);
4032 }
4033
4034 /* The hppa calling conventions say that %r19, the pic offset
4035 register, is saved at sp - 32 (in this function's frame)
4036 when generating PIC code. FIXME: What is the correct thing
4037 to do for functions which make no calls and allocate no
4038 frame? Do we need to allocate a frame, or can we just omit
4039 the save? For now we'll just omit the save.
4040
4041 We don't want a note on this insn as the frame marker can
4042 move if there is a dynamic stack allocation. */
4043 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4044 {
4045 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4046
4047 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4048
4049 }
4050
4051 /* Align pointer properly (doubleword boundary). */
4052 offset = (offset + 7) & ~7;
4053
4054 /* Floating point register store. */
4055 if (save_fregs)
4056 {
4057 rtx base;
4058
4059 /* First get the frame or stack pointer to the start of the FP register
4060 save area. */
4061 if (frame_pointer_needed)
4062 {
4063 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4064 base = frame_pointer_rtx;
4065 }
4066 else
4067 {
4068 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4069 base = stack_pointer_rtx;
4070 }
4071
4072 /* Now actually save the FP registers. */
4073 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4074 {
4075 if (regs_ever_live[i]
4076 || (! TARGET_64BIT && regs_ever_live[i + 1]))
4077 {
4078 rtx addr, insn, reg;
4079 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4080 reg = gen_rtx_REG (DFmode, i);
4081 insn = emit_move_insn (addr, reg);
4082 if (DO_FRAME_NOTES)
4083 {
4084 RTX_FRAME_RELATED_P (insn) = 1;
4085 if (TARGET_64BIT)
4086 {
4087 rtx mem = gen_rtx_MEM (DFmode,
4088 plus_constant (base, offset));
4089 REG_NOTES (insn)
4090 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4091 gen_rtx_SET (VOIDmode, mem, reg),
4092 REG_NOTES (insn));
4093 }
4094 else
4095 {
4096 rtx meml = gen_rtx_MEM (SFmode,
4097 plus_constant (base, offset));
4098 rtx memr = gen_rtx_MEM (SFmode,
4099 plus_constant (base, offset + 4));
4100 rtx regl = gen_rtx_REG (SFmode, i);
4101 rtx regr = gen_rtx_REG (SFmode, i + 1);
4102 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4103 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4104 rtvec vec;
4105
4106 RTX_FRAME_RELATED_P (setl) = 1;
4107 RTX_FRAME_RELATED_P (setr) = 1;
4108 vec = gen_rtvec (2, setl, setr);
4109 REG_NOTES (insn)
4110 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4111 gen_rtx_SEQUENCE (VOIDmode, vec),
4112 REG_NOTES (insn));
4113 }
4114 }
4115 offset += GET_MODE_SIZE (DFmode);
4116 fr_saved++;
4117 }
4118 }
4119 }
4120 }
4121
4122 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4123 Handle case where DISP > 8k by using the add_high_const patterns. */
4124
4125 static void
4126 load_reg (int reg, HOST_WIDE_INT disp, int base)
4127 {
4128 rtx dest = gen_rtx_REG (word_mode, reg);
4129 rtx basereg = gen_rtx_REG (Pmode, base);
4130 rtx src;
4131
4132 if (VAL_14_BITS_P (disp))
4133 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
4134 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4135 {
4136 rtx delta = GEN_INT (disp);
4137 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4138
4139 emit_move_insn (tmpreg, delta);
4140 if (TARGET_DISABLE_INDEXING)
4141 {
4142 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4143 src = gen_rtx_MEM (word_mode, tmpreg);
4144 }
4145 else
4146 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4147 }
4148 else
4149 {
4150 rtx delta = GEN_INT (disp);
4151 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4152 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4153
4154 emit_move_insn (tmpreg, high);
4155 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4156 }
4157
4158 emit_move_insn (dest, src);
4159 }
4160
4161 /* Update the total code bytes output to the text section. */
4162
4163 static void
4164 update_total_code_bytes (int nbytes)
4165 {
4166 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4167 && !IN_NAMED_SECTION_P (cfun->decl))
4168 {
4169 if (INSN_ADDRESSES_SET_P ())
4170 {
4171 unsigned long old_total = total_code_bytes;
4172
4173 total_code_bytes += nbytes;
4174
4175 /* Be prepared to handle overflows. */
4176 if (old_total > total_code_bytes)
4177 total_code_bytes = -1;
4178 }
4179 else
4180 total_code_bytes = -1;
4181 }
4182 }
4183
4184 /* This function generates the assembly code for function exit.
4185 Args are as for output_function_prologue ().
4186
4187 The function epilogue should not depend on the current stack
4188 pointer! It should use the frame pointer only. This is mandatory
4189 because of alloca; we also take advantage of it to omit stack
4190 adjustments before returning. */
4191
4192 static void
4193 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4194 {
4195 rtx insn = get_last_insn ();
4196
4197 last_address = 0;
4198
4199 /* hppa_expand_epilogue does the dirty work now. We just need
4200 to output the assembler directives which denote the end
4201 of a function.
4202
4203 To make debuggers happy, emit a nop if the epilogue was completely
4204 eliminated due to a volatile call as the last insn in the
4205 current function. That way the return address (in %r2) will
4206 always point to a valid instruction in the current function. */
4207
4208 /* Get the last real insn. */
4209 if (GET_CODE (insn) == NOTE)
4210 insn = prev_real_insn (insn);
4211
4212 /* If it is a sequence, then look inside. */
4213 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
4214 insn = XVECEXP (PATTERN (insn), 0, 0);
4215
4216 /* If insn is a CALL_INSN, then it must be a call to a volatile
4217 function (otherwise there would be epilogue insns). */
4218 if (insn && GET_CODE (insn) == CALL_INSN)
4219 {
4220 fputs ("\tnop\n", file);
4221 last_address += 4;
4222 }
4223
4224 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4225
4226 if (TARGET_SOM && TARGET_GAS)
4227 {
4228 /* We done with this subspace except possibly for some additional
4229 debug information. Forget that we are in this subspace to ensure
4230 that the next function is output in its own subspace. */
4231 forget_section ();
4232 }
4233
4234 if (INSN_ADDRESSES_SET_P ())
4235 {
4236 insn = get_last_nonnote_insn ();
4237 last_address += INSN_ADDRESSES (INSN_UID (insn));
4238 if (INSN_P (insn))
4239 last_address += insn_default_length (insn);
4240 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4241 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4242 }
4243
4244 /* Finally, update the total number of code bytes output so far. */
4245 update_total_code_bytes (last_address);
4246 }
4247
4248 void
4249 hppa_expand_epilogue (void)
4250 {
4251 rtx tmpreg;
4252 HOST_WIDE_INT offset;
4253 HOST_WIDE_INT ret_off = 0;
4254 int i;
4255 int merge_sp_adjust_with_load = 0;
4256
4257 /* We will use this often. */
4258 tmpreg = gen_rtx_REG (word_mode, 1);
4259
4260 /* Try to restore RP early to avoid load/use interlocks when
4261 RP gets used in the return (bv) instruction. This appears to still
4262 be necessary even when we schedule the prologue and epilogue. */
4263 if (regs_ever_live [2] || current_function_calls_eh_return)
4264 {
4265 ret_off = TARGET_64BIT ? -16 : -20;
4266 if (frame_pointer_needed)
4267 {
4268 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
4269 ret_off = 0;
4270 }
4271 else
4272 {
4273 /* No frame pointer, and stack is smaller than 8k. */
4274 if (VAL_14_BITS_P (ret_off - actual_fsize))
4275 {
4276 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4277 ret_off = 0;
4278 }
4279 }
4280 }
4281
4282 /* General register restores. */
4283 if (frame_pointer_needed)
4284 {
4285 offset = local_fsize;
4286
4287 /* If the current function calls __builtin_eh_return, then we need
4288 to restore the saved EH data registers. */
4289 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4290 {
4291 unsigned int i, regno;
4292
4293 for (i = 0; ; ++i)
4294 {
4295 regno = EH_RETURN_DATA_REGNO (i);
4296 if (regno == INVALID_REGNUM)
4297 break;
4298
4299 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4300 offset += UNITS_PER_WORD;
4301 }
4302 }
4303
4304 for (i = 18; i >= 4; i--)
4305 if (regs_ever_live[i] && ! call_used_regs[i])
4306 {
4307 load_reg (i, offset, FRAME_POINTER_REGNUM);
4308 offset += UNITS_PER_WORD;
4309 }
4310 }
4311 else
4312 {
4313 offset = local_fsize - actual_fsize;
4314
4315 /* If the current function calls __builtin_eh_return, then we need
4316 to restore the saved EH data registers. */
4317 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4318 {
4319 unsigned int i, regno;
4320
4321 for (i = 0; ; ++i)
4322 {
4323 regno = EH_RETURN_DATA_REGNO (i);
4324 if (regno == INVALID_REGNUM)
4325 break;
4326
4327 /* Only for the first load.
4328 merge_sp_adjust_with_load holds the register load
4329 with which we will merge the sp adjustment. */
4330 if (merge_sp_adjust_with_load == 0
4331 && local_fsize == 0
4332 && VAL_14_BITS_P (-actual_fsize))
4333 merge_sp_adjust_with_load = regno;
4334 else
4335 load_reg (regno, offset, STACK_POINTER_REGNUM);
4336 offset += UNITS_PER_WORD;
4337 }
4338 }
4339
4340 for (i = 18; i >= 3; i--)
4341 {
4342 if (regs_ever_live[i] && ! call_used_regs[i])
4343 {
4344 /* Only for the first load.
4345 merge_sp_adjust_with_load holds the register load
4346 with which we will merge the sp adjustment. */
4347 if (merge_sp_adjust_with_load == 0
4348 && local_fsize == 0
4349 && VAL_14_BITS_P (-actual_fsize))
4350 merge_sp_adjust_with_load = i;
4351 else
4352 load_reg (i, offset, STACK_POINTER_REGNUM);
4353 offset += UNITS_PER_WORD;
4354 }
4355 }
4356 }
4357
4358 /* Align pointer properly (doubleword boundary). */
4359 offset = (offset + 7) & ~7;
4360
4361 /* FP register restores. */
4362 if (save_fregs)
4363 {
4364 /* Adjust the register to index off of. */
4365 if (frame_pointer_needed)
4366 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4367 else
4368 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4369
4370 /* Actually do the restores now. */
4371 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4372 if (regs_ever_live[i]
4373 || (! TARGET_64BIT && regs_ever_live[i + 1]))
4374 {
4375 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4376 rtx dest = gen_rtx_REG (DFmode, i);
4377 emit_move_insn (dest, src);
4378 }
4379 }
4380
4381 /* Emit a blockage insn here to keep these insns from being moved to
4382 an earlier spot in the epilogue, or into the main instruction stream.
4383
4384 This is necessary as we must not cut the stack back before all the
4385 restores are finished. */
4386 emit_insn (gen_blockage ());
4387
4388 /* Reset stack pointer (and possibly frame pointer). The stack
4389 pointer is initially set to fp + 64 to avoid a race condition. */
4390 if (frame_pointer_needed)
4391 {
4392 rtx delta = GEN_INT (-64);
4393
4394 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4395 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4396 }
4397 /* If we were deferring a callee register restore, do it now. */
4398 else if (merge_sp_adjust_with_load)
4399 {
4400 rtx delta = GEN_INT (-actual_fsize);
4401 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4402
4403 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4404 }
4405 else if (actual_fsize != 0)
4406 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4407 - actual_fsize, 0);
4408
4409 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4410 frame greater than 8k), do so now. */
4411 if (ret_off != 0)
4412 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4413
4414 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4415 {
4416 rtx sa = EH_RETURN_STACKADJ_RTX;
4417
4418 emit_insn (gen_blockage ());
4419 emit_insn (TARGET_64BIT
4420 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4421 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4422 }
4423 }
4424
4425 rtx
4426 hppa_pic_save_rtx (void)
4427 {
4428 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4429 }
4430
4431 void
4432 hppa_profile_hook (int label_no)
4433 {
4434 /* We use SImode for the address of the function in both 32 and
4435 64-bit code to avoid having to provide DImode versions of the
4436 lcla2 and load_offset_label_address insn patterns. */
4437 rtx reg = gen_reg_rtx (SImode);
4438 rtx label_rtx = gen_label_rtx ();
4439 rtx begin_label_rtx, call_insn;
4440 char begin_label_name[16];
4441
4442 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4443 label_no);
4444 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4445
4446 if (TARGET_64BIT)
4447 emit_move_insn (arg_pointer_rtx,
4448 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4449 GEN_INT (64)));
4450
4451 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4452
4453 /* The address of the function is loaded into %r25 with a instruction-
4454 relative sequence that avoids the use of relocations. The sequence
4455 is split so that the load_offset_label_address instruction can
4456 occupy the delay slot of the call to _mcount. */
4457 if (TARGET_PA_20)
4458 emit_insn (gen_lcla2 (reg, label_rtx));
4459 else
4460 emit_insn (gen_lcla1 (reg, label_rtx));
4461
4462 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4463 reg, begin_label_rtx, label_rtx));
4464
4465 #ifndef NO_PROFILE_COUNTERS
4466 {
4467 rtx count_label_rtx, addr, r24;
4468 char count_label_name[16];
4469
4470 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4471 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4472
4473 addr = force_reg (Pmode, count_label_rtx);
4474 r24 = gen_rtx_REG (Pmode, 24);
4475 emit_move_insn (r24, addr);
4476
4477 call_insn =
4478 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4479 gen_rtx_SYMBOL_REF (Pmode,
4480 "_mcount")),
4481 GEN_INT (TARGET_64BIT ? 24 : 12)));
4482
4483 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4484 }
4485 #else
4486
4487 call_insn =
4488 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4489 gen_rtx_SYMBOL_REF (Pmode,
4490 "_mcount")),
4491 GEN_INT (TARGET_64BIT ? 16 : 8)));
4492
4493 #endif
4494
4495 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4496 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4497
4498 /* Indicate the _mcount call cannot throw, nor will it execute a
4499 non-local goto. */
4500 REG_NOTES (call_insn)
4501 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4502 }
4503
4504 /* Fetch the return address for the frame COUNT steps up from
4505 the current frame, after the prologue. FRAMEADDR is the
4506 frame pointer of the COUNT frame.
4507
4508 We want to ignore any export stub remnants here. To handle this,
4509 we examine the code at the return address, and if it is an export
4510 stub, we return a memory rtx for the stub return address stored
4511 at frame-24.
4512
4513 The value returned is used in two different ways:
4514
4515 1. To find a function's caller.
4516
4517 2. To change the return address for a function.
4518
4519 This function handles most instances of case 1; however, it will
4520 fail if there are two levels of stubs to execute on the return
4521 path. The only way I believe that can happen is if the return value
4522 needs a parameter relocation, which never happens for C code.
4523
4524 This function handles most instances of case 2; however, it will
4525 fail if we did not originally have stub code on the return path
4526 but will need stub code on the new return path. This can happen if
4527 the caller & callee are both in the main program, but the new
4528 return location is in a shared library. */
4529
4530 rtx
4531 return_addr_rtx (int count, rtx frameaddr)
4532 {
4533 rtx label;
4534 rtx rp;
4535 rtx saved_rp;
4536 rtx ins;
4537
4538 if (count != 0)
4539 return NULL_RTX;
4540
4541 rp = get_hard_reg_initial_val (Pmode, 2);
4542
4543 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4544 return rp;
4545
4546 saved_rp = gen_reg_rtx (Pmode);
4547 emit_move_insn (saved_rp, rp);
4548
4549 /* Get pointer to the instruction stream. We have to mask out the
4550 privilege level from the two low order bits of the return address
4551 pointer here so that ins will point to the start of the first
4552 instruction that would have been executed if we returned. */
4553 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4554 label = gen_label_rtx ();
4555
4556 /* Check the instruction stream at the normal return address for the
4557 export stub:
4558
4559 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4560 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4561 0x00011820 | stub+16: mtsp r1,sr0
4562 0xe0400002 | stub+20: be,n 0(sr0,rp)
4563
4564 If it is an export stub, than our return address is really in
4565 -24[frameaddr]. */
4566
4567 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4568 NULL_RTX, SImode, 1);
4569 emit_jump_insn (gen_bne (label));
4570
4571 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4572 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4573 emit_jump_insn (gen_bne (label));
4574
4575 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4576 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4577 emit_jump_insn (gen_bne (label));
4578
4579 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4580 GEN_INT (0xe0400002), NE, NULL_RTX, SImode, 1);
4581
4582 /* If there is no export stub then just use the value saved from
4583 the return pointer register. */
4584
4585 emit_jump_insn (gen_bne (label));
4586
4587 /* Here we know that our return address points to an export
4588 stub. We don't want to return the address of the export stub,
4589 but rather the return address of the export stub. That return
4590 address is stored at -24[frameaddr]. */
4591
4592 emit_move_insn (saved_rp,
4593 gen_rtx_MEM (Pmode,
4594 memory_address (Pmode,
4595 plus_constant (frameaddr,
4596 -24))));
4597
4598 emit_label (label);
4599 return saved_rp;
4600 }
4601
4602 /* This is only valid once reload has completed because it depends on
4603 knowing exactly how much (if any) frame there is and...
4604
4605 It's only valid if there is no frame marker to de-allocate and...
4606
4607 It's only valid if %r2 hasn't been saved into the caller's frame
4608 (we're not profiling and %r2 isn't live anywhere). */
4609 int
4610 hppa_can_use_return_insn_p (void)
4611 {
4612 return (reload_completed
4613 && (compute_frame_size (get_frame_size (), 0) ? 0 : 1)
4614 && ! regs_ever_live[2]
4615 && ! frame_pointer_needed);
4616 }
4617
4618 void
4619 emit_bcond_fp (enum rtx_code code, rtx operand0)
4620 {
4621 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4622 gen_rtx_IF_THEN_ELSE (VOIDmode,
4623 gen_rtx_fmt_ee (code,
4624 VOIDmode,
4625 gen_rtx_REG (CCFPmode, 0),
4626 const0_rtx),
4627 gen_rtx_LABEL_REF (VOIDmode, operand0),
4628 pc_rtx)));
4629
4630 }
4631
4632 rtx
4633 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4634 {
4635 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4636 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4637 }
4638
4639 /* Adjust the cost of a scheduling dependency. Return the new cost of
4640 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4641
4642 static int
4643 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4644 {
4645 enum attr_type attr_type;
4646
4647 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4648 true dependencies as they are described with bypasses now. */
4649 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4650 return cost;
4651
4652 if (! recog_memoized (insn))
4653 return 0;
4654
4655 attr_type = get_attr_type (insn);
4656
4657 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
4658 {
4659 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4660 cycles later. */
4661
4662 if (attr_type == TYPE_FPLOAD)
4663 {
4664 rtx pat = PATTERN (insn);
4665 rtx dep_pat = PATTERN (dep_insn);
4666 if (GET_CODE (pat) == PARALLEL)
4667 {
4668 /* This happens for the fldXs,mb patterns. */
4669 pat = XVECEXP (pat, 0, 0);
4670 }
4671 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4672 /* If this happens, we have to extend this to schedule
4673 optimally. Return 0 for now. */
4674 return 0;
4675
4676 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4677 {
4678 if (! recog_memoized (dep_insn))
4679 return 0;
4680 switch (get_attr_type (dep_insn))
4681 {
4682 case TYPE_FPALU:
4683 case TYPE_FPMULSGL:
4684 case TYPE_FPMULDBL:
4685 case TYPE_FPDIVSGL:
4686 case TYPE_FPDIVDBL:
4687 case TYPE_FPSQRTSGL:
4688 case TYPE_FPSQRTDBL:
4689 /* A fpload can't be issued until one cycle before a
4690 preceding arithmetic operation has finished if
4691 the target of the fpload is any of the sources
4692 (or destination) of the arithmetic operation. */
4693 return insn_default_latency (dep_insn) - 1;
4694
4695 default:
4696 return 0;
4697 }
4698 }
4699 }
4700 else if (attr_type == TYPE_FPALU)
4701 {
4702 rtx pat = PATTERN (insn);
4703 rtx dep_pat = PATTERN (dep_insn);
4704 if (GET_CODE (pat) == PARALLEL)
4705 {
4706 /* This happens for the fldXs,mb patterns. */
4707 pat = XVECEXP (pat, 0, 0);
4708 }
4709 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4710 /* If this happens, we have to extend this to schedule
4711 optimally. Return 0 for now. */
4712 return 0;
4713
4714 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4715 {
4716 if (! recog_memoized (dep_insn))
4717 return 0;
4718 switch (get_attr_type (dep_insn))
4719 {
4720 case TYPE_FPDIVSGL:
4721 case TYPE_FPDIVDBL:
4722 case TYPE_FPSQRTSGL:
4723 case TYPE_FPSQRTDBL:
4724 /* An ALU flop can't be issued until two cycles before a
4725 preceding divide or sqrt operation has finished if
4726 the target of the ALU flop is any of the sources
4727 (or destination) of the divide or sqrt operation. */
4728 return insn_default_latency (dep_insn) - 2;
4729
4730 default:
4731 return 0;
4732 }
4733 }
4734 }
4735
4736 /* For other anti dependencies, the cost is 0. */
4737 return 0;
4738 }
4739 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4740 {
4741 /* Output dependency; DEP_INSN writes a register that INSN writes some
4742 cycles later. */
4743 if (attr_type == TYPE_FPLOAD)
4744 {
4745 rtx pat = PATTERN (insn);
4746 rtx dep_pat = PATTERN (dep_insn);
4747 if (GET_CODE (pat) == PARALLEL)
4748 {
4749 /* This happens for the fldXs,mb patterns. */
4750 pat = XVECEXP (pat, 0, 0);
4751 }
4752 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4753 /* If this happens, we have to extend this to schedule
4754 optimally. Return 0 for now. */
4755 return 0;
4756
4757 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4758 {
4759 if (! recog_memoized (dep_insn))
4760 return 0;
4761 switch (get_attr_type (dep_insn))
4762 {
4763 case TYPE_FPALU:
4764 case TYPE_FPMULSGL:
4765 case TYPE_FPMULDBL:
4766 case TYPE_FPDIVSGL:
4767 case TYPE_FPDIVDBL:
4768 case TYPE_FPSQRTSGL:
4769 case TYPE_FPSQRTDBL:
4770 /* A fpload can't be issued until one cycle before a
4771 preceding arithmetic operation has finished if
4772 the target of the fpload is the destination of the
4773 arithmetic operation.
4774
4775 Exception: For PA7100LC, PA7200 and PA7300, the cost
4776 is 3 cycles, unless they bundle together. We also
4777 pay the penalty if the second insn is a fpload. */
4778 return insn_default_latency (dep_insn) - 1;
4779
4780 default:
4781 return 0;
4782 }
4783 }
4784 }
4785 else if (attr_type == TYPE_FPALU)
4786 {
4787 rtx pat = PATTERN (insn);
4788 rtx dep_pat = PATTERN (dep_insn);
4789 if (GET_CODE (pat) == PARALLEL)
4790 {
4791 /* This happens for the fldXs,mb patterns. */
4792 pat = XVECEXP (pat, 0, 0);
4793 }
4794 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4795 /* If this happens, we have to extend this to schedule
4796 optimally. Return 0 for now. */
4797 return 0;
4798
4799 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4800 {
4801 if (! recog_memoized (dep_insn))
4802 return 0;
4803 switch (get_attr_type (dep_insn))
4804 {
4805 case TYPE_FPDIVSGL:
4806 case TYPE_FPDIVDBL:
4807 case TYPE_FPSQRTSGL:
4808 case TYPE_FPSQRTDBL:
4809 /* An ALU flop can't be issued until two cycles before a
4810 preceding divide or sqrt operation has finished if
4811 the target of the ALU flop is also the target of
4812 the divide or sqrt operation. */
4813 return insn_default_latency (dep_insn) - 2;
4814
4815 default:
4816 return 0;
4817 }
4818 }
4819 }
4820
4821 /* For other output dependencies, the cost is 0. */
4822 return 0;
4823 }
4824 else
4825 abort ();
4826 }
4827
4828 /* Adjust scheduling priorities. We use this to try and keep addil
4829 and the next use of %r1 close together. */
4830 static int
4831 pa_adjust_priority (rtx insn, int priority)
4832 {
4833 rtx set = single_set (insn);
4834 rtx src, dest;
4835 if (set)
4836 {
4837 src = SET_SRC (set);
4838 dest = SET_DEST (set);
4839 if (GET_CODE (src) == LO_SUM
4840 && symbolic_operand (XEXP (src, 1), VOIDmode)
4841 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4842 priority >>= 3;
4843
4844 else if (GET_CODE (src) == MEM
4845 && GET_CODE (XEXP (src, 0)) == LO_SUM
4846 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4847 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4848 priority >>= 1;
4849
4850 else if (GET_CODE (dest) == MEM
4851 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4852 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4853 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4854 priority >>= 3;
4855 }
4856 return priority;
4857 }
4858
4859 /* The 700 can only issue a single insn at a time.
4860 The 7XXX processors can issue two insns at a time.
4861 The 8000 can issue 4 insns at a time. */
4862 static int
4863 pa_issue_rate (void)
4864 {
4865 switch (pa_cpu)
4866 {
4867 case PROCESSOR_700: return 1;
4868 case PROCESSOR_7100: return 2;
4869 case PROCESSOR_7100LC: return 2;
4870 case PROCESSOR_7200: return 2;
4871 case PROCESSOR_7300: return 2;
4872 case PROCESSOR_8000: return 4;
4873
4874 default:
4875 abort ();
4876 }
4877 }
4878
4879
4880
4881 /* Return any length adjustment needed by INSN which already has its length
4882 computed as LENGTH. Return zero if no adjustment is necessary.
4883
4884 For the PA: function calls, millicode calls, and backwards short
4885 conditional branches with unfilled delay slots need an adjustment by +1
4886 (to account for the NOP which will be inserted into the instruction stream).
4887
4888 Also compute the length of an inline block move here as it is too
4889 complicated to express as a length attribute in pa.md. */
4890 int
4891 pa_adjust_insn_length (rtx insn, int length)
4892 {
4893 rtx pat = PATTERN (insn);
4894
4895 /* Jumps inside switch tables which have unfilled delay slots need
4896 adjustment. */
4897 if (GET_CODE (insn) == JUMP_INSN
4898 && GET_CODE (pat) == PARALLEL
4899 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4900 return 4;
4901 /* Millicode insn with an unfilled delay slot. */
4902 else if (GET_CODE (insn) == INSN
4903 && GET_CODE (pat) != SEQUENCE
4904 && GET_CODE (pat) != USE
4905 && GET_CODE (pat) != CLOBBER
4906 && get_attr_type (insn) == TYPE_MILLI)
4907 return 4;
4908 /* Block move pattern. */
4909 else if (GET_CODE (insn) == INSN
4910 && GET_CODE (pat) == PARALLEL
4911 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4912 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4913 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4914 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4915 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4916 return compute_movmem_length (insn) - 4;
4917 /* Block clear pattern. */
4918 else if (GET_CODE (insn) == INSN
4919 && GET_CODE (pat) == PARALLEL
4920 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4921 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4922 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4923 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4924 return compute_clrmem_length (insn) - 4;
4925 /* Conditional branch with an unfilled delay slot. */
4926 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4927 {
4928 /* Adjust a short backwards conditional with an unfilled delay slot. */
4929 if (GET_CODE (pat) == SET
4930 && length == 4
4931 && ! forward_branch_p (insn))
4932 return 4;
4933 else if (GET_CODE (pat) == PARALLEL
4934 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4935 && length == 4)
4936 return 4;
4937 /* Adjust dbra insn with short backwards conditional branch with
4938 unfilled delay slot -- only for case where counter is in a
4939 general register register. */
4940 else if (GET_CODE (pat) == PARALLEL
4941 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4942 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4943 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4944 && length == 4
4945 && ! forward_branch_p (insn))
4946 return 4;
4947 else
4948 return 0;
4949 }
4950 return 0;
4951 }
4952
4953 /* Print operand X (an rtx) in assembler syntax to file FILE.
4954 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4955 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4956
4957 void
4958 print_operand (FILE *file, rtx x, int code)
4959 {
4960 switch (code)
4961 {
4962 case '#':
4963 /* Output a 'nop' if there's nothing for the delay slot. */
4964 if (dbr_sequence_length () == 0)
4965 fputs ("\n\tnop", file);
4966 return;
4967 case '*':
4968 /* Output a nullification completer if there's nothing for the */
4969 /* delay slot or nullification is requested. */
4970 if (dbr_sequence_length () == 0 ||
4971 (final_sequence &&
4972 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4973 fputs (",n", file);
4974 return;
4975 case 'R':
4976 /* Print out the second register name of a register pair.
4977 I.e., R (6) => 7. */
4978 fputs (reg_names[REGNO (x) + 1], file);
4979 return;
4980 case 'r':
4981 /* A register or zero. */
4982 if (x == const0_rtx
4983 || (x == CONST0_RTX (DFmode))
4984 || (x == CONST0_RTX (SFmode)))
4985 {
4986 fputs ("%r0", file);
4987 return;
4988 }
4989 else
4990 break;
4991 case 'f':
4992 /* A register or zero (floating point). */
4993 if (x == const0_rtx
4994 || (x == CONST0_RTX (DFmode))
4995 || (x == CONST0_RTX (SFmode)))
4996 {
4997 fputs ("%fr0", file);
4998 return;
4999 }
5000 else
5001 break;
5002 case 'A':
5003 {
5004 rtx xoperands[2];
5005
5006 xoperands[0] = XEXP (XEXP (x, 0), 0);
5007 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5008 output_global_address (file, xoperands[1], 0);
5009 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5010 return;
5011 }
5012
5013 case 'C': /* Plain (C)ondition */
5014 case 'X':
5015 switch (GET_CODE (x))
5016 {
5017 case EQ:
5018 fputs ("=", file); break;
5019 case NE:
5020 fputs ("<>", file); break;
5021 case GT:
5022 fputs (">", file); break;
5023 case GE:
5024 fputs (">=", file); break;
5025 case GEU:
5026 fputs (">>=", file); break;
5027 case GTU:
5028 fputs (">>", file); break;
5029 case LT:
5030 fputs ("<", file); break;
5031 case LE:
5032 fputs ("<=", file); break;
5033 case LEU:
5034 fputs ("<<=", file); break;
5035 case LTU:
5036 fputs ("<<", file); break;
5037 default:
5038 abort ();
5039 }
5040 return;
5041 case 'N': /* Condition, (N)egated */
5042 switch (GET_CODE (x))
5043 {
5044 case EQ:
5045 fputs ("<>", file); break;
5046 case NE:
5047 fputs ("=", file); break;
5048 case GT:
5049 fputs ("<=", file); break;
5050 case GE:
5051 fputs ("<", file); break;
5052 case GEU:
5053 fputs ("<<", file); break;
5054 case GTU:
5055 fputs ("<<=", file); break;
5056 case LT:
5057 fputs (">=", file); break;
5058 case LE:
5059 fputs (">", file); break;
5060 case LEU:
5061 fputs (">>", file); break;
5062 case LTU:
5063 fputs (">>=", file); break;
5064 default:
5065 abort ();
5066 }
5067 return;
5068 /* For floating point comparisons. Note that the output
5069 predicates are the complement of the desired mode. */
5070 case 'Y':
5071 switch (GET_CODE (x))
5072 {
5073 case EQ:
5074 fputs ("!=", file); break;
5075 case NE:
5076 fputs ("=", file); break;
5077 case GT:
5078 fputs ("!>", file); break;
5079 case GE:
5080 fputs ("!>=", file); break;
5081 case LT:
5082 fputs ("!<", file); break;
5083 case LE:
5084 fputs ("!<=", file); break;
5085 case LTGT:
5086 fputs ("!<>", file); break;
5087 case UNLE:
5088 fputs (">", file); break;
5089 case UNLT:
5090 fputs (">=", file); break;
5091 case UNGE:
5092 fputs ("<", file); break;
5093 case UNGT:
5094 fputs ("<=", file); break;
5095 case UNEQ:
5096 fputs ("<>", file); break;
5097 case UNORDERED:
5098 fputs ("<=>", file); break;
5099 case ORDERED:
5100 fputs ("!<=>", file); break;
5101 default:
5102 abort ();
5103 }
5104 return;
5105 case 'S': /* Condition, operands are (S)wapped. */
5106 switch (GET_CODE (x))
5107 {
5108 case EQ:
5109 fputs ("=", file); break;
5110 case NE:
5111 fputs ("<>", file); break;
5112 case GT:
5113 fputs ("<", file); break;
5114 case GE:
5115 fputs ("<=", file); break;
5116 case GEU:
5117 fputs ("<<=", file); break;
5118 case GTU:
5119 fputs ("<<", file); break;
5120 case LT:
5121 fputs (">", file); break;
5122 case LE:
5123 fputs (">=", file); break;
5124 case LEU:
5125 fputs (">>=", file); break;
5126 case LTU:
5127 fputs (">>", file); break;
5128 default:
5129 abort ();
5130 }
5131 return;
5132 case 'B': /* Condition, (B)oth swapped and negate. */
5133 switch (GET_CODE (x))
5134 {
5135 case EQ:
5136 fputs ("<>", file); break;
5137 case NE:
5138 fputs ("=", file); break;
5139 case GT:
5140 fputs (">=", file); break;
5141 case GE:
5142 fputs (">", file); break;
5143 case GEU:
5144 fputs (">>", file); break;
5145 case GTU:
5146 fputs (">>=", file); break;
5147 case LT:
5148 fputs ("<=", file); break;
5149 case LE:
5150 fputs ("<", file); break;
5151 case LEU:
5152 fputs ("<<", file); break;
5153 case LTU:
5154 fputs ("<<=", file); break;
5155 default:
5156 abort ();
5157 }
5158 return;
5159 case 'k':
5160 if (GET_CODE (x) == CONST_INT)
5161 {
5162 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5163 return;
5164 }
5165 abort ();
5166 case 'Q':
5167 if (GET_CODE (x) == CONST_INT)
5168 {
5169 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5170 return;
5171 }
5172 abort ();
5173 case 'L':
5174 if (GET_CODE (x) == CONST_INT)
5175 {
5176 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5177 return;
5178 }
5179 abort ();
5180 case 'O':
5181 if (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0)
5182 {
5183 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5184 return;
5185 }
5186 abort ();
5187 case 'p':
5188 if (GET_CODE (x) == CONST_INT)
5189 {
5190 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5191 return;
5192 }
5193 abort ();
5194 case 'P':
5195 if (GET_CODE (x) == CONST_INT)
5196 {
5197 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5198 return;
5199 }
5200 abort ();
5201 case 'I':
5202 if (GET_CODE (x) == CONST_INT)
5203 fputs ("i", file);
5204 return;
5205 case 'M':
5206 case 'F':
5207 switch (GET_CODE (XEXP (x, 0)))
5208 {
5209 case PRE_DEC:
5210 case PRE_INC:
5211 if (ASSEMBLER_DIALECT == 0)
5212 fputs ("s,mb", file);
5213 else
5214 fputs (",mb", file);
5215 break;
5216 case POST_DEC:
5217 case POST_INC:
5218 if (ASSEMBLER_DIALECT == 0)
5219 fputs ("s,ma", file);
5220 else
5221 fputs (",ma", file);
5222 break;
5223 case PLUS:
5224 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5225 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5226 {
5227 if (ASSEMBLER_DIALECT == 0)
5228 fputs ("x", file);
5229 }
5230 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5231 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5232 {
5233 if (ASSEMBLER_DIALECT == 0)
5234 fputs ("x,s", file);
5235 else
5236 fputs (",s", file);
5237 }
5238 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5239 fputs ("s", file);
5240 break;
5241 default:
5242 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5243 fputs ("s", file);
5244 break;
5245 }
5246 return;
5247 case 'G':
5248 output_global_address (file, x, 0);
5249 return;
5250 case 'H':
5251 output_global_address (file, x, 1);
5252 return;
5253 case 0: /* Don't do anything special */
5254 break;
5255 case 'Z':
5256 {
5257 unsigned op[3];
5258 compute_zdepwi_operands (INTVAL (x), op);
5259 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5260 return;
5261 }
5262 case 'z':
5263 {
5264 unsigned op[3];
5265 compute_zdepdi_operands (INTVAL (x), op);
5266 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5267 return;
5268 }
5269 case 'c':
5270 /* We can get here from a .vtable_inherit due to our
5271 CONSTANT_ADDRESS_P rejecting perfectly good constant
5272 addresses. */
5273 break;
5274 default:
5275 abort ();
5276 }
5277 if (GET_CODE (x) == REG)
5278 {
5279 fputs (reg_names [REGNO (x)], file);
5280 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5281 {
5282 fputs ("R", file);
5283 return;
5284 }
5285 if (FP_REG_P (x)
5286 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5287 && (REGNO (x) & 1) == 0)
5288 fputs ("L", file);
5289 }
5290 else if (GET_CODE (x) == MEM)
5291 {
5292 int size = GET_MODE_SIZE (GET_MODE (x));
5293 rtx base = NULL_RTX;
5294 switch (GET_CODE (XEXP (x, 0)))
5295 {
5296 case PRE_DEC:
5297 case POST_DEC:
5298 base = XEXP (XEXP (x, 0), 0);
5299 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5300 break;
5301 case PRE_INC:
5302 case POST_INC:
5303 base = XEXP (XEXP (x, 0), 0);
5304 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5305 break;
5306 case PLUS:
5307 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5308 fprintf (file, "%s(%s)",
5309 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5310 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5311 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5312 fprintf (file, "%s(%s)",
5313 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5314 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5315 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5316 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5317 {
5318 /* Because the REG_POINTER flag can get lost during reload,
5319 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5320 index and base registers in the combined move patterns. */
5321 rtx base = XEXP (XEXP (x, 0), 1);
5322 rtx index = XEXP (XEXP (x, 0), 0);
5323
5324 fprintf (file, "%s(%s)",
5325 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5326 }
5327 else
5328 output_address (XEXP (x, 0));
5329 break;
5330 default:
5331 output_address (XEXP (x, 0));
5332 break;
5333 }
5334 }
5335 else
5336 output_addr_const (file, x);
5337 }
5338
5339 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5340
5341 void
5342 output_global_address (FILE *file, rtx x, int round_constant)
5343 {
5344
5345 /* Imagine (high (const (plus ...))). */
5346 if (GET_CODE (x) == HIGH)
5347 x = XEXP (x, 0);
5348
5349 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5350 assemble_name (file, XSTR (x, 0));
5351 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5352 {
5353 assemble_name (file, XSTR (x, 0));
5354 fputs ("-$global$", file);
5355 }
5356 else if (GET_CODE (x) == CONST)
5357 {
5358 const char *sep = "";
5359 int offset = 0; /* assembler wants -$global$ at end */
5360 rtx base = NULL_RTX;
5361
5362 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)
5363 {
5364 base = XEXP (XEXP (x, 0), 0);
5365 output_addr_const (file, base);
5366 }
5367 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == CONST_INT)
5368 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5369 else abort ();
5370
5371 if (GET_CODE (XEXP (XEXP (x, 0), 1)) == SYMBOL_REF)
5372 {
5373 base = XEXP (XEXP (x, 0), 1);
5374 output_addr_const (file, base);
5375 }
5376 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
5377 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5378 else abort ();
5379
5380 /* How bogus. The compiler is apparently responsible for
5381 rounding the constant if it uses an LR field selector.
5382
5383 The linker and/or assembler seem a better place since
5384 they have to do this kind of thing already.
5385
5386 If we fail to do this, HP's optimizing linker may eliminate
5387 an addil, but not update the ldw/stw/ldo instruction that
5388 uses the result of the addil. */
5389 if (round_constant)
5390 offset = ((offset + 0x1000) & ~0x1fff);
5391
5392 if (GET_CODE (XEXP (x, 0)) == PLUS)
5393 {
5394 if (offset < 0)
5395 {
5396 offset = -offset;
5397 sep = "-";
5398 }
5399 else
5400 sep = "+";
5401 }
5402 else if (GET_CODE (XEXP (x, 0)) == MINUS
5403 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
5404 sep = "-";
5405 else abort ();
5406
5407 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5408 fputs ("-$global$", file);
5409 if (offset)
5410 fprintf (file, "%s%d", sep, offset);
5411 }
5412 else
5413 output_addr_const (file, x);
5414 }
5415
5416 /* Output boilerplate text to appear at the beginning of the file.
5417 There are several possible versions. */
5418 #define aputs(x) fputs(x, asm_out_file)
5419 static inline void
5420 pa_file_start_level (void)
5421 {
5422 if (TARGET_64BIT)
5423 aputs ("\t.LEVEL 2.0w\n");
5424 else if (TARGET_PA_20)
5425 aputs ("\t.LEVEL 2.0\n");
5426 else if (TARGET_PA_11)
5427 aputs ("\t.LEVEL 1.1\n");
5428 else
5429 aputs ("\t.LEVEL 1.0\n");
5430 }
5431
5432 static inline void
5433 pa_file_start_space (int sortspace)
5434 {
5435 aputs ("\t.SPACE $PRIVATE$");
5436 if (sortspace)
5437 aputs (",SORT=16");
5438 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5439 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5440 "\n\t.SPACE $TEXT$");
5441 if (sortspace)
5442 aputs (",SORT=8");
5443 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5444 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5445 }
5446
5447 static inline void
5448 pa_file_start_file (int want_version)
5449 {
5450 if (write_symbols != NO_DEBUG)
5451 {
5452 output_file_directive (asm_out_file, main_input_filename);
5453 if (want_version)
5454 aputs ("\t.version\t\"01.01\"\n");
5455 }
5456 }
5457
5458 static inline void
5459 pa_file_start_mcount (const char *aswhat)
5460 {
5461 if (profile_flag)
5462 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5463 }
5464
5465 static void
5466 pa_elf_file_start (void)
5467 {
5468 pa_file_start_level ();
5469 pa_file_start_mcount ("ENTRY");
5470 pa_file_start_file (0);
5471 }
5472
5473 static void
5474 pa_som_file_start (void)
5475 {
5476 pa_file_start_level ();
5477 pa_file_start_space (0);
5478 aputs ("\t.IMPORT $global$,DATA\n"
5479 "\t.IMPORT $$dyncall,MILLICODE\n");
5480 pa_file_start_mcount ("CODE");
5481 pa_file_start_file (0);
5482 }
5483
5484 static void
5485 pa_linux_file_start (void)
5486 {
5487 pa_file_start_file (1);
5488 pa_file_start_level ();
5489 pa_file_start_mcount ("CODE");
5490 }
5491
5492 static void
5493 pa_hpux64_gas_file_start (void)
5494 {
5495 pa_file_start_level ();
5496 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5497 if (profile_flag)
5498 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5499 #endif
5500 pa_file_start_file (1);
5501 }
5502
5503 static void
5504 pa_hpux64_hpas_file_start (void)
5505 {
5506 pa_file_start_level ();
5507 pa_file_start_space (1);
5508 pa_file_start_mcount ("CODE");
5509 pa_file_start_file (0);
5510 }
5511 #undef aputs
5512
5513 static struct deferred_plabel *
5514 get_plabel (const char *fname)
5515 {
5516 size_t i;
5517
5518 /* See if we have already put this function on the list of deferred
5519 plabels. This list is generally small, so a liner search is not
5520 too ugly. If it proves too slow replace it with something faster. */
5521 for (i = 0; i < n_deferred_plabels; i++)
5522 if (strcmp (fname, deferred_plabels[i].name) == 0)
5523 break;
5524
5525 /* If the deferred plabel list is empty, or this entry was not found
5526 on the list, create a new entry on the list. */
5527 if (deferred_plabels == NULL || i == n_deferred_plabels)
5528 {
5529 const char *real_name;
5530
5531 if (deferred_plabels == 0)
5532 deferred_plabels = (struct deferred_plabel *)
5533 ggc_alloc (sizeof (struct deferred_plabel));
5534 else
5535 deferred_plabels = (struct deferred_plabel *)
5536 ggc_realloc (deferred_plabels,
5537 ((n_deferred_plabels + 1)
5538 * sizeof (struct deferred_plabel)));
5539
5540 i = n_deferred_plabels++;
5541 deferred_plabels[i].internal_label = gen_label_rtx ();
5542 deferred_plabels[i].name = ggc_strdup (fname);
5543
5544 /* Gross. We have just implicitly taken the address of this function,
5545 mark it as such. */
5546 real_name = (*targetm.strip_name_encoding) (fname);
5547 TREE_SYMBOL_REFERENCED (get_identifier (real_name)) = 1;
5548 }
5549
5550 return &deferred_plabels[i];
5551 }
5552
5553 static void
5554 output_deferred_plabels (void)
5555 {
5556 size_t i;
5557 /* If we have deferred plabels, then we need to switch into the data
5558 section and align it to a 4 byte boundary before we output the
5559 deferred plabels. */
5560 if (n_deferred_plabels)
5561 {
5562 data_section ();
5563 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5564 }
5565
5566 /* Now output the deferred plabels. */
5567 for (i = 0; i < n_deferred_plabels; i++)
5568 {
5569 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5570 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5571 assemble_integer (gen_rtx_SYMBOL_REF (Pmode, deferred_plabels[i].name),
5572 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5573 }
5574 }
5575
5576 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5577 /* Initialize optabs to point to HPUX long double emulation routines. */
5578 static void
5579 pa_hpux_init_libfuncs (void)
5580 {
5581 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5582 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5583 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5584 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5585 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5586 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5587 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5588 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5589 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5590
5591 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5592 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5593 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5594 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5595 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5596 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5597 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5598
5599 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5600 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5601 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5602 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5603
5604 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5605 ? "__U_Qfcnvfxt_quad_to_sgl"
5606 : "_U_Qfcnvfxt_quad_to_sgl");
5607 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5608 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5609 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5610
5611 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5612 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5613 }
5614 #endif
5615
5616 /* HP's millicode routines mean something special to the assembler.
5617 Keep track of which ones we have used. */
5618
5619 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5620 static void import_milli (enum millicodes);
5621 static char imported[(int) end1000];
5622 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5623 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5624 #define MILLI_START 10
5625
5626 static void
5627 import_milli (enum millicodes code)
5628 {
5629 char str[sizeof (import_string)];
5630
5631 if (!imported[(int) code])
5632 {
5633 imported[(int) code] = 1;
5634 strcpy (str, import_string);
5635 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5636 output_asm_insn (str, 0);
5637 }
5638 }
5639
5640 /* The register constraints have put the operands and return value in
5641 the proper registers. */
5642
5643 const char *
5644 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5645 {
5646 import_milli (mulI);
5647 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5648 }
5649
5650 /* Emit the rtl for doing a division by a constant. */
5651
5652 /* Do magic division millicodes exist for this value? */
5653 static const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0,
5654 1, 1};
5655
5656 /* We'll use an array to keep track of the magic millicodes and
5657 whether or not we've used them already. [n][0] is signed, [n][1] is
5658 unsigned. */
5659
5660 static int div_milli[16][2];
5661
5662 int
5663 div_operand (rtx op, enum machine_mode mode)
5664 {
5665 return (mode == SImode
5666 && ((GET_CODE (op) == REG && REGNO (op) == 25)
5667 || (GET_CODE (op) == CONST_INT && INTVAL (op) > 0
5668 && INTVAL (op) < 16 && magic_milli[INTVAL (op)])));
5669 }
5670
5671 int
5672 emit_hpdiv_const (rtx *operands, int unsignedp)
5673 {
5674 if (GET_CODE (operands[2]) == CONST_INT
5675 && INTVAL (operands[2]) > 0
5676 && INTVAL (operands[2]) < 16
5677 && magic_milli[INTVAL (operands[2])])
5678 {
5679 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5680
5681 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5682 emit
5683 (gen_rtx_PARALLEL
5684 (VOIDmode,
5685 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5686 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5687 SImode,
5688 gen_rtx_REG (SImode, 26),
5689 operands[2])),
5690 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5691 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5692 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5693 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5694 gen_rtx_CLOBBER (VOIDmode, ret))));
5695 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5696 return 1;
5697 }
5698 return 0;
5699 }
5700
5701 const char *
5702 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5703 {
5704 int divisor;
5705
5706 /* If the divisor is a constant, try to use one of the special
5707 opcodes .*/
5708 if (GET_CODE (operands[0]) == CONST_INT)
5709 {
5710 static char buf[100];
5711 divisor = INTVAL (operands[0]);
5712 if (!div_milli[divisor][unsignedp])
5713 {
5714 div_milli[divisor][unsignedp] = 1;
5715 if (unsignedp)
5716 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5717 else
5718 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5719 }
5720 if (unsignedp)
5721 {
5722 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5723 INTVAL (operands[0]));
5724 return output_millicode_call (insn,
5725 gen_rtx_SYMBOL_REF (SImode, buf));
5726 }
5727 else
5728 {
5729 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5730 INTVAL (operands[0]));
5731 return output_millicode_call (insn,
5732 gen_rtx_SYMBOL_REF (SImode, buf));
5733 }
5734 }
5735 /* Divisor isn't a special constant. */
5736 else
5737 {
5738 if (unsignedp)
5739 {
5740 import_milli (divU);
5741 return output_millicode_call (insn,
5742 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5743 }
5744 else
5745 {
5746 import_milli (divI);
5747 return output_millicode_call (insn,
5748 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5749 }
5750 }
5751 }
5752
5753 /* Output a $$rem millicode to do mod. */
5754
5755 const char *
5756 output_mod_insn (int unsignedp, rtx insn)
5757 {
5758 if (unsignedp)
5759 {
5760 import_milli (remU);
5761 return output_millicode_call (insn,
5762 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5763 }
5764 else
5765 {
5766 import_milli (remI);
5767 return output_millicode_call (insn,
5768 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5769 }
5770 }
5771
5772 void
5773 output_arg_descriptor (rtx call_insn)
5774 {
5775 const char *arg_regs[4];
5776 enum machine_mode arg_mode;
5777 rtx link;
5778 int i, output_flag = 0;
5779 int regno;
5780
5781 /* We neither need nor want argument location descriptors for the
5782 64bit runtime environment or the ELF32 environment. */
5783 if (TARGET_64BIT || TARGET_ELF32)
5784 return;
5785
5786 for (i = 0; i < 4; i++)
5787 arg_regs[i] = 0;
5788
5789 /* Specify explicitly that no argument relocations should take place
5790 if using the portable runtime calling conventions. */
5791 if (TARGET_PORTABLE_RUNTIME)
5792 {
5793 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5794 asm_out_file);
5795 return;
5796 }
5797
5798 if (GET_CODE (call_insn) != CALL_INSN)
5799 abort ();
5800 for (link = CALL_INSN_FUNCTION_USAGE (call_insn); link; link = XEXP (link, 1))
5801 {
5802 rtx use = XEXP (link, 0);
5803
5804 if (! (GET_CODE (use) == USE
5805 && GET_CODE (XEXP (use, 0)) == REG
5806 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5807 continue;
5808
5809 arg_mode = GET_MODE (XEXP (use, 0));
5810 regno = REGNO (XEXP (use, 0));
5811 if (regno >= 23 && regno <= 26)
5812 {
5813 arg_regs[26 - regno] = "GR";
5814 if (arg_mode == DImode)
5815 arg_regs[25 - regno] = "GR";
5816 }
5817 else if (regno >= 32 && regno <= 39)
5818 {
5819 if (arg_mode == SFmode)
5820 arg_regs[(regno - 32) / 2] = "FR";
5821 else
5822 {
5823 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5824 arg_regs[(regno - 34) / 2] = "FR";
5825 arg_regs[(regno - 34) / 2 + 1] = "FU";
5826 #else
5827 arg_regs[(regno - 34) / 2] = "FU";
5828 arg_regs[(regno - 34) / 2 + 1] = "FR";
5829 #endif
5830 }
5831 }
5832 }
5833 fputs ("\t.CALL ", asm_out_file);
5834 for (i = 0; i < 4; i++)
5835 {
5836 if (arg_regs[i])
5837 {
5838 if (output_flag++)
5839 fputc (',', asm_out_file);
5840 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5841 }
5842 }
5843 fputc ('\n', asm_out_file);
5844 }
5845 \f
5846 /* Return the class of any secondary reload register that is needed to
5847 move IN into a register in class CLASS using mode MODE.
5848
5849 Profiling has showed this routine and its descendants account for
5850 a significant amount of compile time (~7%). So it has been
5851 optimized to reduce redundant computations and eliminate useless
5852 function calls.
5853
5854 It might be worthwhile to try and make this a leaf function too. */
5855
5856 enum reg_class
5857 secondary_reload_class (enum reg_class class, enum machine_mode mode, rtx in)
5858 {
5859 int regno, is_symbolic;
5860
5861 /* Trying to load a constant into a FP register during PIC code
5862 generation will require %r1 as a scratch register. */
5863 if (flag_pic
5864 && GET_MODE_CLASS (mode) == MODE_INT
5865 && FP_REG_CLASS_P (class)
5866 && (GET_CODE (in) == CONST_INT || GET_CODE (in) == CONST_DOUBLE))
5867 return R1_REGS;
5868
5869 /* Profiling showed the PA port spends about 1.3% of its compilation
5870 time in true_regnum from calls inside secondary_reload_class. */
5871
5872 if (GET_CODE (in) == REG)
5873 {
5874 regno = REGNO (in);
5875 if (regno >= FIRST_PSEUDO_REGISTER)
5876 regno = true_regnum (in);
5877 }
5878 else if (GET_CODE (in) == SUBREG)
5879 regno = true_regnum (in);
5880 else
5881 regno = -1;
5882
5883 /* If we have something like (mem (mem (...)), we can safely assume the
5884 inner MEM will end up in a general register after reloading, so there's
5885 no need for a secondary reload. */
5886 if (GET_CODE (in) == MEM
5887 && GET_CODE (XEXP (in, 0)) == MEM)
5888 return NO_REGS;
5889
5890 /* Handle out of range displacement for integer mode loads/stores of
5891 FP registers. */
5892 if (((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5893 && GET_MODE_CLASS (mode) == MODE_INT
5894 && FP_REG_CLASS_P (class))
5895 || (class == SHIFT_REGS && (regno <= 0 || regno >= 32)))
5896 return GENERAL_REGS;
5897
5898 /* A SAR<->FP register copy requires a secondary register (GPR) as
5899 well as secondary memory. */
5900 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5901 && ((REGNO_REG_CLASS (regno) == SHIFT_REGS && FP_REG_CLASS_P (class))
5902 || (class == SHIFT_REGS && FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))))
5903 return GENERAL_REGS;
5904
5905 if (GET_CODE (in) == HIGH)
5906 in = XEXP (in, 0);
5907
5908 /* Profiling has showed GCC spends about 2.6% of its compilation
5909 time in symbolic_operand from calls inside secondary_reload_class.
5910
5911 We use an inline copy and only compute its return value once to avoid
5912 useless work. */
5913 switch (GET_CODE (in))
5914 {
5915 rtx tmp;
5916
5917 case SYMBOL_REF:
5918 case LABEL_REF:
5919 is_symbolic = 1;
5920 break;
5921 case CONST:
5922 tmp = XEXP (in, 0);
5923 is_symbolic = ((GET_CODE (XEXP (tmp, 0)) == SYMBOL_REF
5924 || GET_CODE (XEXP (tmp, 0)) == LABEL_REF)
5925 && GET_CODE (XEXP (tmp, 1)) == CONST_INT);
5926 break;
5927
5928 default:
5929 is_symbolic = 0;
5930 break;
5931 }
5932
5933 if (!flag_pic
5934 && is_symbolic
5935 && read_only_operand (in, VOIDmode))
5936 return NO_REGS;
5937
5938 if (class != R1_REGS && is_symbolic)
5939 return R1_REGS;
5940
5941 return NO_REGS;
5942 }
5943
5944 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5945 by invisible reference. As a GCC extension, we also pass anything
5946 with a zero or variable size by reference.
5947
5948 The 64-bit runtime does not describe passing any types by invisible
5949 reference. The internals of GCC can't currently handle passing
5950 empty structures, and zero or variable length arrays when they are
5951 not passed entirely on the stack or by reference. Thus, as a GCC
5952 extension, we pass these types by reference. The HP compiler doesn't
5953 support these types, so hopefully there shouldn't be any compatibility
5954 issues. This may have to be revisited when HP releases a C99 compiler
5955 or updates the ABI. */
5956
5957 static bool
5958 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5959 enum machine_mode mode, tree type,
5960 bool named ATTRIBUTE_UNUSED)
5961 {
5962 HOST_WIDE_INT size;
5963
5964 if (type)
5965 size = int_size_in_bytes (type);
5966 else
5967 size = GET_MODE_SIZE (mode);
5968
5969 if (TARGET_64BIT)
5970 return size <= 0;
5971 else
5972 return size <= 0 || size > 8;
5973 }
5974
5975 enum direction
5976 function_arg_padding (enum machine_mode mode, tree type)
5977 {
5978 if (mode == BLKmode
5979 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5980 {
5981 /* Return none if justification is not required. */
5982 if (type
5983 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5984 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5985 return none;
5986
5987 /* The directions set here are ignored when a BLKmode argument larger
5988 than a word is placed in a register. Different code is used for
5989 the stack and registers. This makes it difficult to have a
5990 consistent data representation for both the stack and registers.
5991 For both runtimes, the justification and padding for arguments on
5992 the stack and in registers should be identical. */
5993 if (TARGET_64BIT)
5994 /* The 64-bit runtime specifies left justification for aggregates. */
5995 return upward;
5996 else
5997 /* The 32-bit runtime architecture specifies right justification.
5998 When the argument is passed on the stack, the argument is padded
5999 with garbage on the left. The HP compiler pads with zeros. */
6000 return downward;
6001 }
6002
6003 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6004 return downward;
6005 else
6006 return none;
6007 }
6008
6009 \f
6010 /* Do what is necessary for `va_start'. We look at the current function
6011 to determine if stdargs or varargs is used and fill in an initial
6012 va_list. A pointer to this constructor is returned. */
6013
6014 static rtx
6015 hppa_builtin_saveregs (void)
6016 {
6017 rtx offset, dest;
6018 tree fntype = TREE_TYPE (current_function_decl);
6019 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
6020 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
6021 != void_type_node)))
6022 ? UNITS_PER_WORD : 0);
6023
6024 if (argadj)
6025 offset = plus_constant (current_function_arg_offset_rtx, argadj);
6026 else
6027 offset = current_function_arg_offset_rtx;
6028
6029 if (TARGET_64BIT)
6030 {
6031 int i, off;
6032
6033 /* Adjust for varargs/stdarg differences. */
6034 if (argadj)
6035 offset = plus_constant (current_function_arg_offset_rtx, -argadj);
6036 else
6037 offset = current_function_arg_offset_rtx;
6038
6039 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6040 from the incoming arg pointer and growing to larger addresses. */
6041 for (i = 26, off = -64; i >= 19; i--, off += 8)
6042 emit_move_insn (gen_rtx_MEM (word_mode,
6043 plus_constant (arg_pointer_rtx, off)),
6044 gen_rtx_REG (word_mode, i));
6045
6046 /* The incoming args pointer points just beyond the flushback area;
6047 normally this is not a serious concern. However, when we are doing
6048 varargs/stdargs we want to make the arg pointer point to the start
6049 of the incoming argument area. */
6050 emit_move_insn (virtual_incoming_args_rtx,
6051 plus_constant (arg_pointer_rtx, -64));
6052
6053 /* Now return a pointer to the first anonymous argument. */
6054 return copy_to_reg (expand_binop (Pmode, add_optab,
6055 virtual_incoming_args_rtx,
6056 offset, 0, 0, OPTAB_LIB_WIDEN));
6057 }
6058
6059 /* Store general registers on the stack. */
6060 dest = gen_rtx_MEM (BLKmode,
6061 plus_constant (current_function_internal_arg_pointer,
6062 -16));
6063 set_mem_alias_set (dest, get_varargs_alias_set ());
6064 set_mem_align (dest, BITS_PER_WORD);
6065 move_block_from_reg (23, dest, 4);
6066
6067 /* move_block_from_reg will emit code to store the argument registers
6068 individually as scalar stores.
6069
6070 However, other insns may later load from the same addresses for
6071 a structure load (passing a struct to a varargs routine).
6072
6073 The alias code assumes that such aliasing can never happen, so we
6074 have to keep memory referencing insns from moving up beyond the
6075 last argument register store. So we emit a blockage insn here. */
6076 emit_insn (gen_blockage ());
6077
6078 return copy_to_reg (expand_binop (Pmode, add_optab,
6079 current_function_internal_arg_pointer,
6080 offset, 0, 0, OPTAB_LIB_WIDEN));
6081 }
6082
6083 void
6084 hppa_va_start (tree valist, rtx nextarg)
6085 {
6086 nextarg = expand_builtin_saveregs ();
6087 std_expand_builtin_va_start (valist, nextarg);
6088 }
6089
6090 static tree
6091 hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
6092 {
6093 if (TARGET_64BIT)
6094 {
6095 /* Args grow upward. We can use the generic routines. */
6096 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6097 }
6098 else /* !TARGET_64BIT */
6099 {
6100 tree ptr = build_pointer_type (type);
6101 tree valist_type;
6102 tree t, u;
6103 unsigned int size, ofs;
6104 bool indirect;
6105
6106 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6107 if (indirect)
6108 {
6109 type = ptr;
6110 ptr = build_pointer_type (type);
6111 }
6112 size = int_size_in_bytes (type);
6113 valist_type = TREE_TYPE (valist);
6114
6115 /* Args grow down. Not handled by generic routines. */
6116
6117 u = fold_convert (valist_type, size_in_bytes (type));
6118 t = build (MINUS_EXPR, valist_type, valist, u);
6119
6120 /* Copied from va-pa.h, but we probably don't need to align to
6121 word size, since we generate and preserve that invariant. */
6122 u = build_int_cst (valist_type, (size > 4 ? -8 : -4), -1);
6123 t = build (BIT_AND_EXPR, valist_type, t, u);
6124
6125 t = build (MODIFY_EXPR, valist_type, valist, t);
6126
6127 ofs = (8 - size) % 4;
6128 if (ofs != 0)
6129 {
6130 u = fold_convert (valist_type, size_int (ofs));
6131 t = build (PLUS_EXPR, valist_type, t, u);
6132 }
6133
6134 t = fold_convert (ptr, t);
6135 t = build_fold_indirect_ref (t);
6136
6137 if (indirect)
6138 t = build_fold_indirect_ref (t);
6139
6140 return t;
6141 }
6142 }
6143
6144 /* This routine handles all the normal conditional branch sequences we
6145 might need to generate. It handles compare immediate vs compare
6146 register, nullification of delay slots, varying length branches,
6147 negated branches, and all combinations of the above. It returns the
6148 output appropriate to emit the branch corresponding to all given
6149 parameters. */
6150
6151 const char *
6152 output_cbranch (rtx *operands, int nullify, int length, int negated, rtx insn)
6153 {
6154 static char buf[100];
6155 int useskip = 0;
6156 rtx xoperands[5];
6157
6158 /* A conditional branch to the following instruction (eg the delay slot)
6159 is asking for a disaster. This can happen when not optimizing and
6160 when jump optimization fails.
6161
6162 While it is usually safe to emit nothing, this can fail if the
6163 preceding instruction is a nullified branch with an empty delay
6164 slot and the same branch target as this branch. We could check
6165 for this but jump optimization should eliminate nop jumps. It
6166 is always safe to emit a nop. */
6167 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6168 return "nop";
6169
6170 /* The doubleword form of the cmpib instruction doesn't have the LEU
6171 and GTU conditions while the cmpb instruction does. Since we accept
6172 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6173 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6174 operands[2] = gen_rtx_REG (DImode, 0);
6175
6176 /* If this is a long branch with its delay slot unfilled, set `nullify'
6177 as it can nullify the delay slot and save a nop. */
6178 if (length == 8 && dbr_sequence_length () == 0)
6179 nullify = 1;
6180
6181 /* If this is a short forward conditional branch which did not get
6182 its delay slot filled, the delay slot can still be nullified. */
6183 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6184 nullify = forward_branch_p (insn);
6185
6186 /* A forward branch over a single nullified insn can be done with a
6187 comclr instruction. This avoids a single cycle penalty due to
6188 mis-predicted branch if we fall through (branch not taken). */
6189 if (length == 4
6190 && next_real_insn (insn) != 0
6191 && get_attr_length (next_real_insn (insn)) == 4
6192 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6193 && nullify)
6194 useskip = 1;
6195
6196 switch (length)
6197 {
6198 /* All short conditional branches except backwards with an unfilled
6199 delay slot. */
6200 case 4:
6201 if (useskip)
6202 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6203 else
6204 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6205 if (GET_MODE (operands[1]) == DImode)
6206 strcat (buf, "*");
6207 if (negated)
6208 strcat (buf, "%B3");
6209 else
6210 strcat (buf, "%S3");
6211 if (useskip)
6212 strcat (buf, " %2,%r1,%%r0");
6213 else if (nullify)
6214 strcat (buf, ",n %2,%r1,%0");
6215 else
6216 strcat (buf, " %2,%r1,%0");
6217 break;
6218
6219 /* All long conditionals. Note a short backward branch with an
6220 unfilled delay slot is treated just like a long backward branch
6221 with an unfilled delay slot. */
6222 case 8:
6223 /* Handle weird backwards branch with a filled delay slot
6224 with is nullified. */
6225 if (dbr_sequence_length () != 0
6226 && ! forward_branch_p (insn)
6227 && nullify)
6228 {
6229 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6230 if (GET_MODE (operands[1]) == DImode)
6231 strcat (buf, "*");
6232 if (negated)
6233 strcat (buf, "%S3");
6234 else
6235 strcat (buf, "%B3");
6236 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6237 }
6238 /* Handle short backwards branch with an unfilled delay slot.
6239 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6240 taken and untaken branches. */
6241 else if (dbr_sequence_length () == 0
6242 && ! forward_branch_p (insn)
6243 && INSN_ADDRESSES_SET_P ()
6244 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6245 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6246 {
6247 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6248 if (GET_MODE (operands[1]) == DImode)
6249 strcat (buf, "*");
6250 if (negated)
6251 strcat (buf, "%B3 %2,%r1,%0%#");
6252 else
6253 strcat (buf, "%S3 %2,%r1,%0%#");
6254 }
6255 else
6256 {
6257 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6258 if (GET_MODE (operands[1]) == DImode)
6259 strcat (buf, "*");
6260 if (negated)
6261 strcat (buf, "%S3");
6262 else
6263 strcat (buf, "%B3");
6264 if (nullify)
6265 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6266 else
6267 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6268 }
6269 break;
6270
6271 case 20:
6272 case 28:
6273 xoperands[0] = operands[0];
6274 xoperands[1] = operands[1];
6275 xoperands[2] = operands[2];
6276 xoperands[3] = operands[3];
6277
6278 /* The reversed conditional branch must branch over one additional
6279 instruction if the delay slot is filled. If the delay slot
6280 is empty, the instruction after the reversed condition branch
6281 must be nullified. */
6282 nullify = dbr_sequence_length () == 0;
6283 xoperands[4] = nullify ? GEN_INT (length) : GEN_INT (length + 4);
6284
6285 /* Create a reversed conditional branch which branches around
6286 the following insns. */
6287 if (GET_MODE (operands[1]) != DImode)
6288 {
6289 if (nullify)
6290 {
6291 if (negated)
6292 strcpy (buf,
6293 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6294 else
6295 strcpy (buf,
6296 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6297 }
6298 else
6299 {
6300 if (negated)
6301 strcpy (buf,
6302 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6303 else
6304 strcpy (buf,
6305 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6306 }
6307 }
6308 else
6309 {
6310 if (nullify)
6311 {
6312 if (negated)
6313 strcpy (buf,
6314 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6315 else
6316 strcpy (buf,
6317 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6318 }
6319 else
6320 {
6321 if (negated)
6322 strcpy (buf,
6323 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6324 else
6325 strcpy (buf,
6326 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6327 }
6328 }
6329
6330 output_asm_insn (buf, xoperands);
6331 return output_lbranch (operands[0], insn);
6332
6333 default:
6334 abort ();
6335 }
6336 return buf;
6337 }
6338
6339 /* This routine handles long unconditional branches that exceed the
6340 maximum range of a simple branch instruction. */
6341
6342 const char *
6343 output_lbranch (rtx dest, rtx insn)
6344 {
6345 rtx xoperands[2];
6346
6347 xoperands[0] = dest;
6348
6349 /* First, free up the delay slot. */
6350 if (dbr_sequence_length () != 0)
6351 {
6352 /* We can't handle a jump in the delay slot. */
6353 if (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN)
6354 abort ();
6355
6356 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6357 optimize, 0, 0, NULL);
6358
6359 /* Now delete the delay insn. */
6360 PUT_CODE (NEXT_INSN (insn), NOTE);
6361 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6362 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6363 }
6364
6365 /* Output an insn to save %r1. The runtime documentation doesn't
6366 specify whether the "Clean Up" slot in the callers frame can
6367 be clobbered by the callee. It isn't copied by HP's builtin
6368 alloca, so this suggests that it can be clobbered if necessary.
6369 The "Static Link" location is copied by HP builtin alloca, so
6370 we avoid using it. Using the cleanup slot might be a problem
6371 if we have to interoperate with languages that pass cleanup
6372 information. However, it should be possible to handle these
6373 situations with GCC's asm feature.
6374
6375 The "Current RP" slot is reserved for the called procedure, so
6376 we try to use it when we don't have a frame of our own. It's
6377 rather unlikely that we won't have a frame when we need to emit
6378 a very long branch.
6379
6380 Really the way to go long term is a register scavenger; goto
6381 the target of the jump and find a register which we can use
6382 as a scratch to hold the value in %r1. Then, we wouldn't have
6383 to free up the delay slot or clobber a slot that may be needed
6384 for other purposes. */
6385 if (TARGET_64BIT)
6386 {
6387 if (actual_fsize == 0 && !regs_ever_live[2])
6388 /* Use the return pointer slot in the frame marker. */
6389 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6390 else
6391 /* Use the slot at -40 in the frame marker since HP builtin
6392 alloca doesn't copy it. */
6393 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6394 }
6395 else
6396 {
6397 if (actual_fsize == 0 && !regs_ever_live[2])
6398 /* Use the return pointer slot in the frame marker. */
6399 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6400 else
6401 /* Use the "Clean Up" slot in the frame marker. In GCC,
6402 the only other use of this location is for copying a
6403 floating point double argument from a floating-point
6404 register to two general registers. The copy is done
6405 as an "atomic" operation when outputting a call, so it
6406 won't interfere with our using the location here. */
6407 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6408 }
6409
6410 if (TARGET_PORTABLE_RUNTIME)
6411 {
6412 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6413 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6414 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6415 }
6416 else if (flag_pic)
6417 {
6418 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6419 if (TARGET_SOM || !TARGET_GAS)
6420 {
6421 xoperands[1] = gen_label_rtx ();
6422 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6423 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6424 CODE_LABEL_NUMBER (xoperands[1]));
6425 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6426 }
6427 else
6428 {
6429 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6430 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6431 }
6432 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6433 }
6434 else
6435 /* Now output a very long branch to the original target. */
6436 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6437
6438 /* Now restore the value of %r1 in the delay slot. */
6439 if (TARGET_64BIT)
6440 {
6441 if (actual_fsize == 0 && !regs_ever_live[2])
6442 return "ldd -16(%%r30),%%r1";
6443 else
6444 return "ldd -40(%%r30),%%r1";
6445 }
6446 else
6447 {
6448 if (actual_fsize == 0 && !regs_ever_live[2])
6449 return "ldw -20(%%r30),%%r1";
6450 else
6451 return "ldw -12(%%r30),%%r1";
6452 }
6453 }
6454
6455 /* This routine handles all the branch-on-bit conditional branch sequences we
6456 might need to generate. It handles nullification of delay slots,
6457 varying length branches, negated branches and all combinations of the
6458 above. it returns the appropriate output template to emit the branch. */
6459
6460 const char *
6461 output_bb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6462 int negated, rtx insn, int which)
6463 {
6464 static char buf[100];
6465 int useskip = 0;
6466
6467 /* A conditional branch to the following instruction (eg the delay slot) is
6468 asking for a disaster. I do not think this can happen as this pattern
6469 is only used when optimizing; jump optimization should eliminate the
6470 jump. But be prepared just in case. */
6471
6472 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6473 return "nop";
6474
6475 /* If this is a long branch with its delay slot unfilled, set `nullify'
6476 as it can nullify the delay slot and save a nop. */
6477 if (length == 8 && dbr_sequence_length () == 0)
6478 nullify = 1;
6479
6480 /* If this is a short forward conditional branch which did not get
6481 its delay slot filled, the delay slot can still be nullified. */
6482 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6483 nullify = forward_branch_p (insn);
6484
6485 /* A forward branch over a single nullified insn can be done with a
6486 extrs instruction. This avoids a single cycle penalty due to
6487 mis-predicted branch if we fall through (branch not taken). */
6488
6489 if (length == 4
6490 && next_real_insn (insn) != 0
6491 && get_attr_length (next_real_insn (insn)) == 4
6492 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6493 && nullify)
6494 useskip = 1;
6495
6496 switch (length)
6497 {
6498
6499 /* All short conditional branches except backwards with an unfilled
6500 delay slot. */
6501 case 4:
6502 if (useskip)
6503 strcpy (buf, "{extrs,|extrw,s,}");
6504 else
6505 strcpy (buf, "bb,");
6506 if (useskip && GET_MODE (operands[0]) == DImode)
6507 strcpy (buf, "extrd,s,*");
6508 else if (GET_MODE (operands[0]) == DImode)
6509 strcpy (buf, "bb,*");
6510 if ((which == 0 && negated)
6511 || (which == 1 && ! negated))
6512 strcat (buf, ">=");
6513 else
6514 strcat (buf, "<");
6515 if (useskip)
6516 strcat (buf, " %0,%1,1,%%r0");
6517 else if (nullify && negated)
6518 strcat (buf, ",n %0,%1,%3");
6519 else if (nullify && ! negated)
6520 strcat (buf, ",n %0,%1,%2");
6521 else if (! nullify && negated)
6522 strcat (buf, "%0,%1,%3");
6523 else if (! nullify && ! negated)
6524 strcat (buf, " %0,%1,%2");
6525 break;
6526
6527 /* All long conditionals. Note a short backward branch with an
6528 unfilled delay slot is treated just like a long backward branch
6529 with an unfilled delay slot. */
6530 case 8:
6531 /* Handle weird backwards branch with a filled delay slot
6532 with is nullified. */
6533 if (dbr_sequence_length () != 0
6534 && ! forward_branch_p (insn)
6535 && nullify)
6536 {
6537 strcpy (buf, "bb,");
6538 if (GET_MODE (operands[0]) == DImode)
6539 strcat (buf, "*");
6540 if ((which == 0 && negated)
6541 || (which == 1 && ! negated))
6542 strcat (buf, "<");
6543 else
6544 strcat (buf, ">=");
6545 if (negated)
6546 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6547 else
6548 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6549 }
6550 /* Handle short backwards branch with an unfilled delay slot.
6551 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6552 taken and untaken branches. */
6553 else if (dbr_sequence_length () == 0
6554 && ! forward_branch_p (insn)
6555 && INSN_ADDRESSES_SET_P ()
6556 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6557 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6558 {
6559 strcpy (buf, "bb,");
6560 if (GET_MODE (operands[0]) == DImode)
6561 strcat (buf, "*");
6562 if ((which == 0 && negated)
6563 || (which == 1 && ! negated))
6564 strcat (buf, ">=");
6565 else
6566 strcat (buf, "<");
6567 if (negated)
6568 strcat (buf, " %0,%1,%3%#");
6569 else
6570 strcat (buf, " %0,%1,%2%#");
6571 }
6572 else
6573 {
6574 strcpy (buf, "{extrs,|extrw,s,}");
6575 if (GET_MODE (operands[0]) == DImode)
6576 strcpy (buf, "extrd,s,*");
6577 if ((which == 0 && negated)
6578 || (which == 1 && ! negated))
6579 strcat (buf, "<");
6580 else
6581 strcat (buf, ">=");
6582 if (nullify && negated)
6583 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6584 else if (nullify && ! negated)
6585 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6586 else if (negated)
6587 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6588 else
6589 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6590 }
6591 break;
6592
6593 default:
6594 abort ();
6595 }
6596 return buf;
6597 }
6598
6599 /* This routine handles all the branch-on-variable-bit conditional branch
6600 sequences we might need to generate. It handles nullification of delay
6601 slots, varying length branches, negated branches and all combinations
6602 of the above. it returns the appropriate output template to emit the
6603 branch. */
6604
6605 const char *
6606 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6607 int negated, rtx insn, int which)
6608 {
6609 static char buf[100];
6610 int useskip = 0;
6611
6612 /* A conditional branch to the following instruction (eg the delay slot) is
6613 asking for a disaster. I do not think this can happen as this pattern
6614 is only used when optimizing; jump optimization should eliminate the
6615 jump. But be prepared just in case. */
6616
6617 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6618 return "nop";
6619
6620 /* If this is a long branch with its delay slot unfilled, set `nullify'
6621 as it can nullify the delay slot and save a nop. */
6622 if (length == 8 && dbr_sequence_length () == 0)
6623 nullify = 1;
6624
6625 /* If this is a short forward conditional branch which did not get
6626 its delay slot filled, the delay slot can still be nullified. */
6627 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6628 nullify = forward_branch_p (insn);
6629
6630 /* A forward branch over a single nullified insn can be done with a
6631 extrs instruction. This avoids a single cycle penalty due to
6632 mis-predicted branch if we fall through (branch not taken). */
6633
6634 if (length == 4
6635 && next_real_insn (insn) != 0
6636 && get_attr_length (next_real_insn (insn)) == 4
6637 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6638 && nullify)
6639 useskip = 1;
6640
6641 switch (length)
6642 {
6643
6644 /* All short conditional branches except backwards with an unfilled
6645 delay slot. */
6646 case 4:
6647 if (useskip)
6648 strcpy (buf, "{vextrs,|extrw,s,}");
6649 else
6650 strcpy (buf, "{bvb,|bb,}");
6651 if (useskip && GET_MODE (operands[0]) == DImode)
6652 strcpy (buf, "extrd,s,*");
6653 else if (GET_MODE (operands[0]) == DImode)
6654 strcpy (buf, "bb,*");
6655 if ((which == 0 && negated)
6656 || (which == 1 && ! negated))
6657 strcat (buf, ">=");
6658 else
6659 strcat (buf, "<");
6660 if (useskip)
6661 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6662 else if (nullify && negated)
6663 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6664 else if (nullify && ! negated)
6665 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6666 else if (! nullify && negated)
6667 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6668 else if (! nullify && ! negated)
6669 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6670 break;
6671
6672 /* All long conditionals. Note a short backward branch with an
6673 unfilled delay slot is treated just like a long backward branch
6674 with an unfilled delay slot. */
6675 case 8:
6676 /* Handle weird backwards branch with a filled delay slot
6677 with is nullified. */
6678 if (dbr_sequence_length () != 0
6679 && ! forward_branch_p (insn)
6680 && nullify)
6681 {
6682 strcpy (buf, "{bvb,|bb,}");
6683 if (GET_MODE (operands[0]) == DImode)
6684 strcat (buf, "*");
6685 if ((which == 0 && negated)
6686 || (which == 1 && ! negated))
6687 strcat (buf, "<");
6688 else
6689 strcat (buf, ">=");
6690 if (negated)
6691 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6692 else
6693 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6694 }
6695 /* Handle short backwards branch with an unfilled delay slot.
6696 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6697 taken and untaken branches. */
6698 else if (dbr_sequence_length () == 0
6699 && ! forward_branch_p (insn)
6700 && INSN_ADDRESSES_SET_P ()
6701 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6702 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6703 {
6704 strcpy (buf, "{bvb,|bb,}");
6705 if (GET_MODE (operands[0]) == DImode)
6706 strcat (buf, "*");
6707 if ((which == 0 && negated)
6708 || (which == 1 && ! negated))
6709 strcat (buf, ">=");
6710 else
6711 strcat (buf, "<");
6712 if (negated)
6713 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6714 else
6715 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6716 }
6717 else
6718 {
6719 strcpy (buf, "{vextrs,|extrw,s,}");
6720 if (GET_MODE (operands[0]) == DImode)
6721 strcpy (buf, "extrd,s,*");
6722 if ((which == 0 && negated)
6723 || (which == 1 && ! negated))
6724 strcat (buf, "<");
6725 else
6726 strcat (buf, ">=");
6727 if (nullify && negated)
6728 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6729 else if (nullify && ! negated)
6730 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6731 else if (negated)
6732 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6733 else
6734 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6735 }
6736 break;
6737
6738 default:
6739 abort ();
6740 }
6741 return buf;
6742 }
6743
6744 /* Return the output template for emitting a dbra type insn.
6745
6746 Note it may perform some output operations on its own before
6747 returning the final output string. */
6748 const char *
6749 output_dbra (rtx *operands, rtx insn, int which_alternative)
6750 {
6751
6752 /* A conditional branch to the following instruction (eg the delay slot) is
6753 asking for a disaster. Be prepared! */
6754
6755 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6756 {
6757 if (which_alternative == 0)
6758 return "ldo %1(%0),%0";
6759 else if (which_alternative == 1)
6760 {
6761 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6762 output_asm_insn ("ldw -16(%%r30),%4", operands);
6763 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6764 return "{fldws|fldw} -16(%%r30),%0";
6765 }
6766 else
6767 {
6768 output_asm_insn ("ldw %0,%4", operands);
6769 return "ldo %1(%4),%4\n\tstw %4,%0";
6770 }
6771 }
6772
6773 if (which_alternative == 0)
6774 {
6775 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6776 int length = get_attr_length (insn);
6777
6778 /* If this is a long branch with its delay slot unfilled, set `nullify'
6779 as it can nullify the delay slot and save a nop. */
6780 if (length == 8 && dbr_sequence_length () == 0)
6781 nullify = 1;
6782
6783 /* If this is a short forward conditional branch which did not get
6784 its delay slot filled, the delay slot can still be nullified. */
6785 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6786 nullify = forward_branch_p (insn);
6787
6788 /* Handle short versions first. */
6789 if (length == 4 && nullify)
6790 return "addib,%C2,n %1,%0,%3";
6791 else if (length == 4 && ! nullify)
6792 return "addib,%C2 %1,%0,%3";
6793 else if (length == 8)
6794 {
6795 /* Handle weird backwards branch with a fulled delay slot
6796 which is nullified. */
6797 if (dbr_sequence_length () != 0
6798 && ! forward_branch_p (insn)
6799 && nullify)
6800 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6801 /* Handle short backwards branch with an unfilled delay slot.
6802 Using a addb;nop rather than addi;bl saves 1 cycle for both
6803 taken and untaken branches. */
6804 else if (dbr_sequence_length () == 0
6805 && ! forward_branch_p (insn)
6806 && INSN_ADDRESSES_SET_P ()
6807 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6808 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6809 return "addib,%C2 %1,%0,%3%#";
6810
6811 /* Handle normal cases. */
6812 if (nullify)
6813 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6814 else
6815 return "addi,%N2 %1,%0,%0\n\tb %3";
6816 }
6817 else
6818 abort ();
6819 }
6820 /* Deal with gross reload from FP register case. */
6821 else if (which_alternative == 1)
6822 {
6823 /* Move loop counter from FP register to MEM then into a GR,
6824 increment the GR, store the GR into MEM, and finally reload
6825 the FP register from MEM from within the branch's delay slot. */
6826 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6827 operands);
6828 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6829 if (get_attr_length (insn) == 24)
6830 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6831 else
6832 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6833 }
6834 /* Deal with gross reload from memory case. */
6835 else
6836 {
6837 /* Reload loop counter from memory, the store back to memory
6838 happens in the branch's delay slot. */
6839 output_asm_insn ("ldw %0,%4", operands);
6840 if (get_attr_length (insn) == 12)
6841 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6842 else
6843 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6844 }
6845 }
6846
6847 /* Return the output template for emitting a dbra type insn.
6848
6849 Note it may perform some output operations on its own before
6850 returning the final output string. */
6851 const char *
6852 output_movb (rtx *operands, rtx insn, int which_alternative,
6853 int reverse_comparison)
6854 {
6855
6856 /* A conditional branch to the following instruction (eg the delay slot) is
6857 asking for a disaster. Be prepared! */
6858
6859 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6860 {
6861 if (which_alternative == 0)
6862 return "copy %1,%0";
6863 else if (which_alternative == 1)
6864 {
6865 output_asm_insn ("stw %1,-16(%%r30)", operands);
6866 return "{fldws|fldw} -16(%%r30),%0";
6867 }
6868 else if (which_alternative == 2)
6869 return "stw %1,%0";
6870 else
6871 return "mtsar %r1";
6872 }
6873
6874 /* Support the second variant. */
6875 if (reverse_comparison)
6876 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6877
6878 if (which_alternative == 0)
6879 {
6880 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6881 int length = get_attr_length (insn);
6882
6883 /* If this is a long branch with its delay slot unfilled, set `nullify'
6884 as it can nullify the delay slot and save a nop. */
6885 if (length == 8 && dbr_sequence_length () == 0)
6886 nullify = 1;
6887
6888 /* If this is a short forward conditional branch which did not get
6889 its delay slot filled, the delay slot can still be nullified. */
6890 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6891 nullify = forward_branch_p (insn);
6892
6893 /* Handle short versions first. */
6894 if (length == 4 && nullify)
6895 return "movb,%C2,n %1,%0,%3";
6896 else if (length == 4 && ! nullify)
6897 return "movb,%C2 %1,%0,%3";
6898 else if (length == 8)
6899 {
6900 /* Handle weird backwards branch with a filled delay slot
6901 which is nullified. */
6902 if (dbr_sequence_length () != 0
6903 && ! forward_branch_p (insn)
6904 && nullify)
6905 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6906
6907 /* Handle short backwards branch with an unfilled delay slot.
6908 Using a movb;nop rather than or;bl saves 1 cycle for both
6909 taken and untaken branches. */
6910 else if (dbr_sequence_length () == 0
6911 && ! forward_branch_p (insn)
6912 && INSN_ADDRESSES_SET_P ()
6913 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6914 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6915 return "movb,%C2 %1,%0,%3%#";
6916 /* Handle normal cases. */
6917 if (nullify)
6918 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
6919 else
6920 return "or,%N2 %1,%%r0,%0\n\tb %3";
6921 }
6922 else
6923 abort ();
6924 }
6925 /* Deal with gross reload from FP register case. */
6926 else if (which_alternative == 1)
6927 {
6928 /* Move loop counter from FP register to MEM then into a GR,
6929 increment the GR, store the GR into MEM, and finally reload
6930 the FP register from MEM from within the branch's delay slot. */
6931 output_asm_insn ("stw %1,-16(%%r30)", operands);
6932 if (get_attr_length (insn) == 12)
6933 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
6934 else
6935 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6936 }
6937 /* Deal with gross reload from memory case. */
6938 else if (which_alternative == 2)
6939 {
6940 /* Reload loop counter from memory, the store back to memory
6941 happens in the branch's delay slot. */
6942 if (get_attr_length (insn) == 8)
6943 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
6944 else
6945 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
6946 }
6947 /* Handle SAR as a destination. */
6948 else
6949 {
6950 if (get_attr_length (insn) == 8)
6951 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
6952 else
6953 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
6954 }
6955 }
6956
6957 /* Copy any FP arguments in INSN into integer registers. */
6958 static void
6959 copy_fp_args (rtx insn)
6960 {
6961 rtx link;
6962 rtx xoperands[2];
6963
6964 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6965 {
6966 int arg_mode, regno;
6967 rtx use = XEXP (link, 0);
6968
6969 if (! (GET_CODE (use) == USE
6970 && GET_CODE (XEXP (use, 0)) == REG
6971 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6972 continue;
6973
6974 arg_mode = GET_MODE (XEXP (use, 0));
6975 regno = REGNO (XEXP (use, 0));
6976
6977 /* Is it a floating point register? */
6978 if (regno >= 32 && regno <= 39)
6979 {
6980 /* Copy the FP register into an integer register via memory. */
6981 if (arg_mode == SFmode)
6982 {
6983 xoperands[0] = XEXP (use, 0);
6984 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
6985 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
6986 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6987 }
6988 else
6989 {
6990 xoperands[0] = XEXP (use, 0);
6991 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
6992 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
6993 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
6994 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6995 }
6996 }
6997 }
6998 }
6999
7000 /* Compute length of the FP argument copy sequence for INSN. */
7001 static int
7002 length_fp_args (rtx insn)
7003 {
7004 int length = 0;
7005 rtx link;
7006
7007 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7008 {
7009 int arg_mode, regno;
7010 rtx use = XEXP (link, 0);
7011
7012 if (! (GET_CODE (use) == USE
7013 && GET_CODE (XEXP (use, 0)) == REG
7014 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7015 continue;
7016
7017 arg_mode = GET_MODE (XEXP (use, 0));
7018 regno = REGNO (XEXP (use, 0));
7019
7020 /* Is it a floating point register? */
7021 if (regno >= 32 && regno <= 39)
7022 {
7023 if (arg_mode == SFmode)
7024 length += 8;
7025 else
7026 length += 12;
7027 }
7028 }
7029
7030 return length;
7031 }
7032
7033 /* Return the attribute length for the millicode call instruction INSN.
7034 The length must match the code generated by output_millicode_call.
7035 We include the delay slot in the returned length as it is better to
7036 over estimate the length than to under estimate it. */
7037
7038 int
7039 attr_length_millicode_call (rtx insn)
7040 {
7041 unsigned long distance = -1;
7042 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7043
7044 if (INSN_ADDRESSES_SET_P ())
7045 {
7046 distance = (total + insn_current_reference_address (insn));
7047 if (distance < total)
7048 distance = -1;
7049 }
7050
7051 if (TARGET_64BIT)
7052 {
7053 if (!TARGET_LONG_CALLS && distance < 7600000)
7054 return 8;
7055
7056 return 20;
7057 }
7058 else if (TARGET_PORTABLE_RUNTIME)
7059 return 24;
7060 else
7061 {
7062 if (!TARGET_LONG_CALLS && distance < 240000)
7063 return 8;
7064
7065 if (TARGET_LONG_ABS_CALL && !flag_pic)
7066 return 12;
7067
7068 return 24;
7069 }
7070 }
7071
7072 /* INSN is a function call. It may have an unconditional jump
7073 in its delay slot.
7074
7075 CALL_DEST is the routine we are calling. */
7076
7077 const char *
7078 output_millicode_call (rtx insn, rtx call_dest)
7079 {
7080 int attr_length = get_attr_length (insn);
7081 int seq_length = dbr_sequence_length ();
7082 int distance;
7083 rtx seq_insn;
7084 rtx xoperands[3];
7085
7086 xoperands[0] = call_dest;
7087 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7088
7089 /* Handle the common case where we are sure that the branch will
7090 reach the beginning of the $CODE$ subspace. The within reach
7091 form of the $$sh_func_adrs call has a length of 28. Because
7092 it has an attribute type of multi, it never has a nonzero
7093 sequence length. The length of the $$sh_func_adrs is the same
7094 as certain out of reach PIC calls to other routines. */
7095 if (!TARGET_LONG_CALLS
7096 && ((seq_length == 0
7097 && (attr_length == 12
7098 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
7099 || (seq_length != 0 && attr_length == 8)))
7100 {
7101 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7102 }
7103 else
7104 {
7105 if (TARGET_64BIT)
7106 {
7107 /* It might seem that one insn could be saved by accessing
7108 the millicode function using the linkage table. However,
7109 this doesn't work in shared libraries and other dynamically
7110 loaded objects. Using a pc-relative sequence also avoids
7111 problems related to the implicit use of the gp register. */
7112 output_asm_insn ("b,l .+8,%%r1", xoperands);
7113
7114 if (TARGET_GAS)
7115 {
7116 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7117 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7118 }
7119 else
7120 {
7121 xoperands[1] = gen_label_rtx ();
7122 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7123 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7124 CODE_LABEL_NUMBER (xoperands[1]));
7125 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7126 }
7127
7128 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7129 }
7130 else if (TARGET_PORTABLE_RUNTIME)
7131 {
7132 /* Pure portable runtime doesn't allow be/ble; we also don't
7133 have PIC support in the assembler/linker, so this sequence
7134 is needed. */
7135
7136 /* Get the address of our target into %r1. */
7137 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7138 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7139
7140 /* Get our return address into %r31. */
7141 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7142 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7143
7144 /* Jump to our target address in %r1. */
7145 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7146 }
7147 else if (!flag_pic)
7148 {
7149 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7150 if (TARGET_PA_20)
7151 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7152 else
7153 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7154 }
7155 else
7156 {
7157 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7158 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7159
7160 if (TARGET_SOM || !TARGET_GAS)
7161 {
7162 /* The HP assembler can generate relocations for the
7163 difference of two symbols. GAS can do this for a
7164 millicode symbol but not an arbitrary external
7165 symbol when generating SOM output. */
7166 xoperands[1] = gen_label_rtx ();
7167 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7168 CODE_LABEL_NUMBER (xoperands[1]));
7169 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7170 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7171 }
7172 else
7173 {
7174 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7175 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7176 xoperands);
7177 }
7178
7179 /* Jump to our target address in %r1. */
7180 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7181 }
7182 }
7183
7184 if (seq_length == 0)
7185 output_asm_insn ("nop", xoperands);
7186
7187 /* We are done if there isn't a jump in the delay slot. */
7188 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7189 return "";
7190
7191 /* This call has an unconditional jump in its delay slot. */
7192 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7193
7194 /* See if the return address can be adjusted. Use the containing
7195 sequence insn's address. */
7196 if (INSN_ADDRESSES_SET_P ())
7197 {
7198 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7199 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7200 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7201
7202 if (VAL_14_BITS_P (distance))
7203 {
7204 xoperands[1] = gen_label_rtx ();
7205 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7206 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7207 CODE_LABEL_NUMBER (xoperands[1]));
7208 }
7209 else
7210 /* ??? This branch may not reach its target. */
7211 output_asm_insn ("nop\n\tb,n %0", xoperands);
7212 }
7213 else
7214 /* ??? This branch may not reach its target. */
7215 output_asm_insn ("nop\n\tb,n %0", xoperands);
7216
7217 /* Delete the jump. */
7218 PUT_CODE (NEXT_INSN (insn), NOTE);
7219 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7220 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7221
7222 return "";
7223 }
7224
7225 /* Return the attribute length of the call instruction INSN. The SIBCALL
7226 flag indicates whether INSN is a regular call or a sibling call. The
7227 length returned must be longer than the code actually generated by
7228 output_call. Since branch shortening is done before delay branch
7229 sequencing, there is no way to determine whether or not the delay
7230 slot will be filled during branch shortening. Even when the delay
7231 slot is filled, we may have to add a nop if the delay slot contains
7232 a branch that can't reach its target. Thus, we always have to include
7233 the delay slot in the length estimate. This used to be done in
7234 pa_adjust_insn_length but we do it here now as some sequences always
7235 fill the delay slot and we can save four bytes in the estimate for
7236 these sequences. */
7237
7238 int
7239 attr_length_call (rtx insn, int sibcall)
7240 {
7241 int local_call;
7242 rtx call_dest;
7243 tree call_decl;
7244 int length = 0;
7245 rtx pat = PATTERN (insn);
7246 unsigned long distance = -1;
7247
7248 if (INSN_ADDRESSES_SET_P ())
7249 {
7250 unsigned long total;
7251
7252 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7253 distance = (total + insn_current_reference_address (insn));
7254 if (distance < total)
7255 distance = -1;
7256 }
7257
7258 /* Determine if this is a local call. */
7259 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7260 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7261 else
7262 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7263
7264 call_decl = SYMBOL_REF_DECL (call_dest);
7265 local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7266
7267 /* pc-relative branch. */
7268 if (!TARGET_LONG_CALLS
7269 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7270 || distance < 240000))
7271 length += 8;
7272
7273 /* 64-bit plabel sequence. */
7274 else if (TARGET_64BIT && !local_call)
7275 length += sibcall ? 28 : 24;
7276
7277 /* non-pic long absolute branch sequence. */
7278 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7279 length += 12;
7280
7281 /* long pc-relative branch sequence. */
7282 else if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7283 || (TARGET_64BIT && !TARGET_GAS)
7284 || (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7285 {
7286 length += 20;
7287
7288 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS)
7289 length += 8;
7290 }
7291
7292 /* 32-bit plabel sequence. */
7293 else
7294 {
7295 length += 32;
7296
7297 if (TARGET_SOM)
7298 length += length_fp_args (insn);
7299
7300 if (flag_pic)
7301 length += 4;
7302
7303 if (!TARGET_PA_20)
7304 {
7305 if (!sibcall)
7306 length += 8;
7307
7308 if (!TARGET_NO_SPACE_REGS)
7309 length += 8;
7310 }
7311 }
7312
7313 return length;
7314 }
7315
7316 /* INSN is a function call. It may have an unconditional jump
7317 in its delay slot.
7318
7319 CALL_DEST is the routine we are calling. */
7320
7321 const char *
7322 output_call (rtx insn, rtx call_dest, int sibcall)
7323 {
7324 int delay_insn_deleted = 0;
7325 int delay_slot_filled = 0;
7326 int seq_length = dbr_sequence_length ();
7327 tree call_decl = SYMBOL_REF_DECL (call_dest);
7328 int local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7329 rtx xoperands[2];
7330
7331 xoperands[0] = call_dest;
7332
7333 /* Handle the common case where we're sure that the branch will reach
7334 the beginning of the "$CODE$" subspace. This is the beginning of
7335 the current function if we are in a named section. */
7336 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7337 {
7338 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7339 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7340 }
7341 else
7342 {
7343 if (TARGET_64BIT && !local_call)
7344 {
7345 /* ??? As far as I can tell, the HP linker doesn't support the
7346 long pc-relative sequence described in the 64-bit runtime
7347 architecture. So, we use a slightly longer indirect call. */
7348 struct deferred_plabel *p = get_plabel (XSTR (call_dest, 0));
7349
7350 xoperands[0] = p->internal_label;
7351 xoperands[1] = gen_label_rtx ();
7352
7353 /* If this isn't a sibcall, we put the load of %r27 into the
7354 delay slot. We can't do this in a sibcall as we don't
7355 have a second call-clobbered scratch register available. */
7356 if (seq_length != 0
7357 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7358 && !sibcall)
7359 {
7360 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7361 optimize, 0, 0, NULL);
7362
7363 /* Now delete the delay insn. */
7364 PUT_CODE (NEXT_INSN (insn), NOTE);
7365 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7366 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7367 delay_insn_deleted = 1;
7368 }
7369
7370 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7371 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7372 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7373
7374 if (sibcall)
7375 {
7376 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7377 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7378 output_asm_insn ("bve (%%r1)", xoperands);
7379 }
7380 else
7381 {
7382 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7383 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7384 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7385 delay_slot_filled = 1;
7386 }
7387 }
7388 else
7389 {
7390 int indirect_call = 0;
7391
7392 /* Emit a long call. There are several different sequences
7393 of increasing length and complexity. In most cases,
7394 they don't allow an instruction in the delay slot. */
7395 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7396 && !(TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7397 && !(TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7398 && !TARGET_64BIT)
7399 indirect_call = 1;
7400
7401 if (seq_length != 0
7402 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7403 && !sibcall
7404 && (!TARGET_PA_20 || indirect_call))
7405 {
7406 /* A non-jump insn in the delay slot. By definition we can
7407 emit this insn before the call (and in fact before argument
7408 relocating. */
7409 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0, 0,
7410 NULL);
7411
7412 /* Now delete the delay insn. */
7413 PUT_CODE (NEXT_INSN (insn), NOTE);
7414 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7415 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7416 delay_insn_deleted = 1;
7417 }
7418
7419 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7420 {
7421 /* This is the best sequence for making long calls in
7422 non-pic code. Unfortunately, GNU ld doesn't provide
7423 the stub needed for external calls, and GAS's support
7424 for this with the SOM linker is buggy. It is safe
7425 to use this for local calls. */
7426 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7427 if (sibcall)
7428 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7429 else
7430 {
7431 if (TARGET_PA_20)
7432 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7433 xoperands);
7434 else
7435 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7436
7437 output_asm_insn ("copy %%r31,%%r2", xoperands);
7438 delay_slot_filled = 1;
7439 }
7440 }
7441 else
7442 {
7443 if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7444 || (TARGET_64BIT && !TARGET_GAS))
7445 {
7446 /* The HP assembler and linker can handle relocations
7447 for the difference of two symbols. GAS and the HP
7448 linker can't do this when one of the symbols is
7449 external. */
7450 xoperands[1] = gen_label_rtx ();
7451 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7452 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7453 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7454 CODE_LABEL_NUMBER (xoperands[1]));
7455 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7456 }
7457 else if (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7458 {
7459 /* GAS currently can't generate the relocations that
7460 are needed for the SOM linker under HP-UX using this
7461 sequence. The GNU linker doesn't generate the stubs
7462 that are needed for external calls on TARGET_ELF32
7463 with this sequence. For now, we have to use a
7464 longer plabel sequence when using GAS. */
7465 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7466 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7467 xoperands);
7468 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7469 xoperands);
7470 }
7471 else
7472 {
7473 /* Emit a long plabel-based call sequence. This is
7474 essentially an inline implementation of $$dyncall.
7475 We don't actually try to call $$dyncall as this is
7476 as difficult as calling the function itself. */
7477 struct deferred_plabel *p = get_plabel (XSTR (call_dest, 0));
7478
7479 xoperands[0] = p->internal_label;
7480 xoperands[1] = gen_label_rtx ();
7481
7482 /* Since the call is indirect, FP arguments in registers
7483 need to be copied to the general registers. Then, the
7484 argument relocation stub will copy them back. */
7485 if (TARGET_SOM)
7486 copy_fp_args (insn);
7487
7488 if (flag_pic)
7489 {
7490 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7491 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7492 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7493 }
7494 else
7495 {
7496 output_asm_insn ("addil LR'%0-$global$,%%r27",
7497 xoperands);
7498 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7499 xoperands);
7500 }
7501
7502 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7503 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7504 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7505 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7506
7507 if (!sibcall && !TARGET_PA_20)
7508 {
7509 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7510 if (TARGET_NO_SPACE_REGS)
7511 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7512 else
7513 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7514 }
7515 }
7516
7517 if (TARGET_PA_20)
7518 {
7519 if (sibcall)
7520 output_asm_insn ("bve (%%r1)", xoperands);
7521 else
7522 {
7523 if (indirect_call)
7524 {
7525 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7526 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7527 delay_slot_filled = 1;
7528 }
7529 else
7530 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7531 }
7532 }
7533 else
7534 {
7535 if (!TARGET_NO_SPACE_REGS)
7536 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7537 xoperands);
7538
7539 if (sibcall)
7540 {
7541 if (TARGET_NO_SPACE_REGS)
7542 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7543 else
7544 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7545 }
7546 else
7547 {
7548 if (TARGET_NO_SPACE_REGS)
7549 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7550 else
7551 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7552
7553 if (indirect_call)
7554 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7555 else
7556 output_asm_insn ("copy %%r31,%%r2", xoperands);
7557 delay_slot_filled = 1;
7558 }
7559 }
7560 }
7561 }
7562 }
7563
7564 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7565 output_asm_insn ("nop", xoperands);
7566
7567 /* We are done if there isn't a jump in the delay slot. */
7568 if (seq_length == 0
7569 || delay_insn_deleted
7570 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7571 return "";
7572
7573 /* A sibcall should never have a branch in the delay slot. */
7574 if (sibcall)
7575 abort ();
7576
7577 /* This call has an unconditional jump in its delay slot. */
7578 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7579
7580 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7581 {
7582 /* See if the return address can be adjusted. Use the containing
7583 sequence insn's address. */
7584 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7585 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7586 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7587
7588 if (VAL_14_BITS_P (distance))
7589 {
7590 xoperands[1] = gen_label_rtx ();
7591 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7592 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7593 CODE_LABEL_NUMBER (xoperands[1]));
7594 }
7595 else
7596 output_asm_insn ("nop\n\tb,n %0", xoperands);
7597 }
7598 else
7599 output_asm_insn ("b,n %0", xoperands);
7600
7601 /* Delete the jump. */
7602 PUT_CODE (NEXT_INSN (insn), NOTE);
7603 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7604 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7605
7606 return "";
7607 }
7608
7609 /* Return the attribute length of the indirect call instruction INSN.
7610 The length must match the code generated by output_indirect call.
7611 The returned length includes the delay slot. Currently, the delay
7612 slot of an indirect call sequence is not exposed and it is used by
7613 the sequence itself. */
7614
7615 int
7616 attr_length_indirect_call (rtx insn)
7617 {
7618 unsigned long distance = -1;
7619 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7620
7621 if (INSN_ADDRESSES_SET_P ())
7622 {
7623 distance = (total + insn_current_reference_address (insn));
7624 if (distance < total)
7625 distance = -1;
7626 }
7627
7628 if (TARGET_64BIT)
7629 return 12;
7630
7631 if (TARGET_FAST_INDIRECT_CALLS
7632 || (!TARGET_PORTABLE_RUNTIME
7633 && ((TARGET_PA_20 && distance < 7600000) || distance < 240000)))
7634 return 8;
7635
7636 if (flag_pic)
7637 return 24;
7638
7639 if (TARGET_PORTABLE_RUNTIME)
7640 return 20;
7641
7642 /* Out of reach, can use ble. */
7643 return 12;
7644 }
7645
7646 const char *
7647 output_indirect_call (rtx insn, rtx call_dest)
7648 {
7649 rtx xoperands[1];
7650
7651 if (TARGET_64BIT)
7652 {
7653 xoperands[0] = call_dest;
7654 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7655 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7656 return "";
7657 }
7658
7659 /* First the special case for kernels, level 0 systems, etc. */
7660 if (TARGET_FAST_INDIRECT_CALLS)
7661 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7662
7663 /* Now the normal case -- we can reach $$dyncall directly or
7664 we're sure that we can get there via a long-branch stub.
7665
7666 No need to check target flags as the length uniquely identifies
7667 the remaining cases. */
7668 if (attr_length_indirect_call (insn) == 8)
7669 {
7670 /* The HP linker substitutes a BLE for millicode calls using
7671 the short PIC PCREL form. Thus, we must use %r31 as the
7672 link register when generating PA 1.x code. */
7673 if (TARGET_PA_20)
7674 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7675 else
7676 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7677 }
7678
7679 /* Long millicode call, but we are not generating PIC or portable runtime
7680 code. */
7681 if (attr_length_indirect_call (insn) == 12)
7682 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7683
7684 /* Long millicode call for portable runtime. */
7685 if (attr_length_indirect_call (insn) == 20)
7686 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7687
7688 /* We need a long PIC call to $$dyncall. */
7689 xoperands[0] = NULL_RTX;
7690 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7691 if (TARGET_SOM || !TARGET_GAS)
7692 {
7693 xoperands[0] = gen_label_rtx ();
7694 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7695 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7696 CODE_LABEL_NUMBER (xoperands[0]));
7697 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7698 }
7699 else
7700 {
7701 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7702 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7703 xoperands);
7704 }
7705 output_asm_insn ("blr %%r0,%%r2", xoperands);
7706 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7707 return "";
7708 }
7709
7710 /* Return the total length of the save and restore instructions needed for
7711 the data linkage table pointer (i.e., the PIC register) across the call
7712 instruction INSN. No-return calls do not require a save and restore.
7713 In addition, we may be able to avoid the save and restore for calls
7714 within the same translation unit. */
7715
7716 int
7717 attr_length_save_restore_dltp (rtx insn)
7718 {
7719 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7720 return 0;
7721
7722 return 8;
7723 }
7724
7725 /* In HPUX 8.0's shared library scheme, special relocations are needed
7726 for function labels if they might be passed to a function
7727 in a shared library (because shared libraries don't live in code
7728 space), and special magic is needed to construct their address. */
7729
7730 void
7731 hppa_encode_label (rtx sym)
7732 {
7733 const char *str = XSTR (sym, 0);
7734 int len = strlen (str) + 1;
7735 char *newstr, *p;
7736
7737 p = newstr = alloca (len + 1);
7738 *p++ = '@';
7739 strcpy (p, str);
7740
7741 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7742 }
7743
7744 static void
7745 pa_encode_section_info (tree decl, rtx rtl, int first)
7746 {
7747 if (first && TEXT_SPACE_P (decl))
7748 {
7749 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7750 if (TREE_CODE (decl) == FUNCTION_DECL)
7751 hppa_encode_label (XEXP (rtl, 0));
7752 }
7753 }
7754
7755 /* This is sort of inverse to pa_encode_section_info. */
7756
7757 static const char *
7758 pa_strip_name_encoding (const char *str)
7759 {
7760 str += (*str == '@');
7761 str += (*str == '*');
7762 return str;
7763 }
7764
7765 int
7766 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7767 {
7768 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7769 }
7770
7771 /* Returns 1 if OP is a function label involved in a simple addition
7772 with a constant. Used to keep certain patterns from matching
7773 during instruction combination. */
7774 int
7775 is_function_label_plus_const (rtx op)
7776 {
7777 /* Strip off any CONST. */
7778 if (GET_CODE (op) == CONST)
7779 op = XEXP (op, 0);
7780
7781 return (GET_CODE (op) == PLUS
7782 && function_label_operand (XEXP (op, 0), Pmode)
7783 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7784 }
7785
7786 /* Output assembly code for a thunk to FUNCTION. */
7787
7788 static void
7789 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7790 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7791 tree function)
7792 {
7793 const char *fname = XSTR (XEXP (DECL_RTL (function), 0), 0);
7794 const char *tname = XSTR (XEXP (DECL_RTL (thunk_fndecl), 0), 0);
7795 int val_14 = VAL_14_BITS_P (delta);
7796 int nbytes = 0;
7797 static unsigned int current_thunk_number;
7798 char label[16];
7799
7800 ASM_OUTPUT_LABEL (file, tname);
7801 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7802
7803 fname = (*targetm.strip_name_encoding) (fname);
7804 tname = (*targetm.strip_name_encoding) (tname);
7805
7806 /* Output the thunk. We know that the function is in the same
7807 translation unit (i.e., the same space) as the thunk, and that
7808 thunks are output after their method. Thus, we don't need an
7809 external branch to reach the function. With SOM and GAS,
7810 functions and thunks are effectively in different sections.
7811 Thus, we can always use a IA-relative branch and the linker
7812 will add a long branch stub if necessary.
7813
7814 However, we have to be careful when generating PIC code on the
7815 SOM port to ensure that the sequence does not transfer to an
7816 import stub for the target function as this could clobber the
7817 return value saved at SP-24. This would also apply to the
7818 32-bit linux port if the multi-space model is implemented. */
7819 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7820 && !(flag_pic && TREE_PUBLIC (function))
7821 && (TARGET_GAS || last_address < 262132))
7822 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7823 && ((targetm.have_named_sections
7824 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7825 /* The GNU 64-bit linker has rather poor stub management.
7826 So, we use a long branch from thunks that aren't in
7827 the same section as the target function. */
7828 && ((!TARGET_64BIT
7829 && (DECL_SECTION_NAME (thunk_fndecl)
7830 != DECL_SECTION_NAME (function)))
7831 || ((DECL_SECTION_NAME (thunk_fndecl)
7832 == DECL_SECTION_NAME (function))
7833 && last_address < 262132)))
7834 || (!targetm.have_named_sections && last_address < 262132))))
7835 {
7836 if (val_14)
7837 {
7838 fprintf (file, "\tb %s\n\tldo " HOST_WIDE_INT_PRINT_DEC
7839 "(%%r26),%%r26\n", fname, delta);
7840 nbytes += 8;
7841 }
7842 else
7843 {
7844 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7845 ",%%r26\n", delta);
7846 fprintf (file, "\tb %s\n\tldo R'" HOST_WIDE_INT_PRINT_DEC
7847 "(%%r1),%%r26\n", fname, delta);
7848 nbytes += 12;
7849 }
7850 }
7851 else if (TARGET_64BIT)
7852 {
7853 /* We only have one call-clobbered scratch register, so we can't
7854 make use of the delay slot if delta doesn't fit in 14 bits. */
7855 if (!val_14)
7856 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7857 ",%%r26\n\tldo R'" HOST_WIDE_INT_PRINT_DEC
7858 "(%%r1),%%r26\n", delta, delta);
7859
7860 fprintf (file, "\tb,l .+8,%%r1\n");
7861
7862 if (TARGET_GAS)
7863 {
7864 fprintf (file, "\taddil L'%s-$PIC_pcrel$0+4,%%r1\n", fname);
7865 fprintf (file, "\tldo R'%s-$PIC_pcrel$0+8(%%r1),%%r1\n", fname);
7866 }
7867 else
7868 {
7869 int off = val_14 ? 8 : 16;
7870 fprintf (file, "\taddil L'%s-%s-%d,%%r1\n", fname, tname, off);
7871 fprintf (file, "\tldo R'%s-%s-%d(%%r1),%%r1\n", fname, tname, off);
7872 }
7873
7874 if (val_14)
7875 {
7876 fprintf (file, "\tbv %%r0(%%r1)\n\tldo ");
7877 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7878 nbytes += 20;
7879 }
7880 else
7881 {
7882 fprintf (file, "\tbv,n %%r0(%%r1)\n");
7883 nbytes += 24;
7884 }
7885 }
7886 else if (TARGET_PORTABLE_RUNTIME)
7887 {
7888 fprintf (file, "\tldil L'%s,%%r1\n", fname);
7889 fprintf (file, "\tldo R'%s(%%r1),%%r22\n", fname);
7890
7891 if (val_14)
7892 {
7893 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7894 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7895 nbytes += 16;
7896 }
7897 else
7898 {
7899 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7900 ",%%r26\n", delta);
7901 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7902 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
7903 nbytes += 20;
7904 }
7905 }
7906 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7907 {
7908 /* The function is accessible from outside this module. The only
7909 way to avoid an import stub between the thunk and function is to
7910 call the function directly with an indirect sequence similar to
7911 that used by $$dyncall. This is possible because $$dyncall acts
7912 as the import stub in an indirect call. */
7913 const char *lab;
7914
7915 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
7916 lab = (*targetm.strip_name_encoding) (label);
7917
7918 fprintf (file, "\taddil LT'%s,%%r19\n", lab);
7919 fprintf (file, "\tldw RT'%s(%%r1),%%r22\n", lab);
7920 fprintf (file, "\tldw 0(%%sr0,%%r22),%%r22\n");
7921 fprintf (file, "\tbb,>=,n %%r22,30,.+16\n");
7922 fprintf (file, "\tdepi 0,31,2,%%r22\n");
7923 fprintf (file, "\tldw 4(%%sr0,%%r22),%%r19\n");
7924 fprintf (file, "\tldw 0(%%sr0,%%r22),%%r22\n");
7925 if (!val_14)
7926 {
7927 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7928 ",%%r26\n", delta);
7929 nbytes += 4;
7930 }
7931 if (TARGET_PA_20)
7932 {
7933 fprintf (file, "\tbve (%%r22)\n\tldo ");
7934 nbytes += 36;
7935 }
7936 else
7937 {
7938 if (TARGET_NO_SPACE_REGS)
7939 {
7940 fprintf (file, "\tbe 0(%%sr4,%%r22)\n\tldo ");
7941 nbytes += 36;
7942 }
7943 else
7944 {
7945 fprintf (file, "\tldsid (%%sr0,%%r22),%%r21\n");
7946 fprintf (file, "\tmtsp %%r21,%%sr0\n");
7947 fprintf (file, "\tbe 0(%%sr0,%%r22)\n\tldo ");
7948 nbytes += 44;
7949 }
7950 }
7951
7952 if (val_14)
7953 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7954 else
7955 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
7956 }
7957 else if (flag_pic)
7958 {
7959 if (TARGET_PA_20)
7960 fprintf (file, "\tb,l .+8,%%r1\n");
7961 else
7962 fprintf (file, "\tbl .+8,%%r1\n");
7963
7964 if (TARGET_SOM || !TARGET_GAS)
7965 {
7966 fprintf (file, "\taddil L'%s-%s-8,%%r1\n", fname, tname);
7967 fprintf (file, "\tldo R'%s-%s-8(%%r1),%%r22\n", fname, tname);
7968 }
7969 else
7970 {
7971 fprintf (file, "\taddil L'%s-$PIC_pcrel$0+4,%%r1\n", fname);
7972 fprintf (file, "\tldo R'%s-$PIC_pcrel$0+8(%%r1),%%r22\n", fname);
7973 }
7974
7975 if (val_14)
7976 {
7977 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7978 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7979 nbytes += 20;
7980 }
7981 else
7982 {
7983 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7984 ",%%r26\n", delta);
7985 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7986 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
7987 nbytes += 24;
7988 }
7989 }
7990 else
7991 {
7992 if (!val_14)
7993 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC ",%%r26\n", delta);
7994
7995 fprintf (file, "\tldil L'%s,%%r22\n", fname);
7996 fprintf (file, "\tbe R'%s(%%sr4,%%r22)\n\tldo ", fname);
7997
7998 if (val_14)
7999 {
8000 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
8001 nbytes += 12;
8002 }
8003 else
8004 {
8005 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
8006 nbytes += 16;
8007 }
8008 }
8009
8010 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
8011
8012 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8013 {
8014 data_section ();
8015 fprintf (file, "\t.align 4\n");
8016 ASM_OUTPUT_LABEL (file, label);
8017 fprintf (file, "\t.word P'%s\n", fname);
8018 }
8019 else if (TARGET_SOM && TARGET_GAS)
8020 forget_section ();
8021
8022 current_thunk_number++;
8023 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8024 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8025 last_address += nbytes;
8026 update_total_code_bytes (nbytes);
8027 }
8028
8029 /* Only direct calls to static functions are allowed to be sibling (tail)
8030 call optimized.
8031
8032 This restriction is necessary because some linker generated stubs will
8033 store return pointers into rp' in some cases which might clobber a
8034 live value already in rp'.
8035
8036 In a sibcall the current function and the target function share stack
8037 space. Thus if the path to the current function and the path to the
8038 target function save a value in rp', they save the value into the
8039 same stack slot, which has undesirable consequences.
8040
8041 Because of the deferred binding nature of shared libraries any function
8042 with external scope could be in a different load module and thus require
8043 rp' to be saved when calling that function. So sibcall optimizations
8044 can only be safe for static function.
8045
8046 Note that GCC never needs return value relocations, so we don't have to
8047 worry about static calls with return value relocations (which require
8048 saving rp').
8049
8050 It is safe to perform a sibcall optimization when the target function
8051 will never return. */
8052 static bool
8053 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8054 {
8055 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8056 single subspace mode and the call is not indirect. As far as I know,
8057 there is no operating system support for the multiple subspace mode.
8058 It might be possible to support indirect calls if we didn't use
8059 $$dyncall (see the indirect sequence generated in output_call). */
8060 if (TARGET_ELF32)
8061 return (decl != NULL_TREE);
8062
8063 /* Sibcalls are not ok because the arg pointer register is not a fixed
8064 register. This prevents the sibcall optimization from occurring. In
8065 addition, there are problems with stub placement using GNU ld. This
8066 is because a normal sibcall branch uses a 17-bit relocation while
8067 a regular call branch uses a 22-bit relocation. As a result, more
8068 care needs to be taken in the placement of long-branch stubs. */
8069 if (TARGET_64BIT)
8070 return false;
8071
8072 return (decl
8073 && !TARGET_PORTABLE_RUNTIME
8074 && !TREE_PUBLIC (decl));
8075 }
8076
8077 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8078 use in fmpyadd instructions. */
8079 int
8080 fmpyaddoperands (rtx *operands)
8081 {
8082 enum machine_mode mode = GET_MODE (operands[0]);
8083
8084 /* Must be a floating point mode. */
8085 if (mode != SFmode && mode != DFmode)
8086 return 0;
8087
8088 /* All modes must be the same. */
8089 if (! (mode == GET_MODE (operands[1])
8090 && mode == GET_MODE (operands[2])
8091 && mode == GET_MODE (operands[3])
8092 && mode == GET_MODE (operands[4])
8093 && mode == GET_MODE (operands[5])))
8094 return 0;
8095
8096 /* All operands must be registers. */
8097 if (! (GET_CODE (operands[1]) == REG
8098 && GET_CODE (operands[2]) == REG
8099 && GET_CODE (operands[3]) == REG
8100 && GET_CODE (operands[4]) == REG
8101 && GET_CODE (operands[5]) == REG))
8102 return 0;
8103
8104 /* Only 2 real operands to the addition. One of the input operands must
8105 be the same as the output operand. */
8106 if (! rtx_equal_p (operands[3], operands[4])
8107 && ! rtx_equal_p (operands[3], operands[5]))
8108 return 0;
8109
8110 /* Inout operand of add cannot conflict with any operands from multiply. */
8111 if (rtx_equal_p (operands[3], operands[0])
8112 || rtx_equal_p (operands[3], operands[1])
8113 || rtx_equal_p (operands[3], operands[2]))
8114 return 0;
8115
8116 /* multiply cannot feed into addition operands. */
8117 if (rtx_equal_p (operands[4], operands[0])
8118 || rtx_equal_p (operands[5], operands[0]))
8119 return 0;
8120
8121 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8122 if (mode == SFmode
8123 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8124 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8125 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8126 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8127 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8128 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8129 return 0;
8130
8131 /* Passed. Operands are suitable for fmpyadd. */
8132 return 1;
8133 }
8134
8135 #if !defined(USE_COLLECT2)
8136 static void
8137 pa_asm_out_constructor (rtx symbol, int priority)
8138 {
8139 if (!function_label_operand (symbol, VOIDmode))
8140 hppa_encode_label (symbol);
8141
8142 #ifdef CTORS_SECTION_ASM_OP
8143 default_ctor_section_asm_out_constructor (symbol, priority);
8144 #else
8145 # ifdef TARGET_ASM_NAMED_SECTION
8146 default_named_section_asm_out_constructor (symbol, priority);
8147 # else
8148 default_stabs_asm_out_constructor (symbol, priority);
8149 # endif
8150 #endif
8151 }
8152
8153 static void
8154 pa_asm_out_destructor (rtx symbol, int priority)
8155 {
8156 if (!function_label_operand (symbol, VOIDmode))
8157 hppa_encode_label (symbol);
8158
8159 #ifdef DTORS_SECTION_ASM_OP
8160 default_dtor_section_asm_out_destructor (symbol, priority);
8161 #else
8162 # ifdef TARGET_ASM_NAMED_SECTION
8163 default_named_section_asm_out_destructor (symbol, priority);
8164 # else
8165 default_stabs_asm_out_destructor (symbol, priority);
8166 # endif
8167 #endif
8168 }
8169 #endif
8170
8171 /* This function places uninitialized global data in the bss section.
8172 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8173 function on the SOM port to prevent uninitialized global data from
8174 being placed in the data section. */
8175
8176 void
8177 pa_asm_output_aligned_bss (FILE *stream,
8178 const char *name,
8179 unsigned HOST_WIDE_INT size,
8180 unsigned int align)
8181 {
8182 bss_section ();
8183 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8184
8185 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8186 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8187 #endif
8188
8189 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8190 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8191 #endif
8192
8193 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8194 ASM_OUTPUT_LABEL (stream, name);
8195 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8196 }
8197
8198 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8199 that doesn't allow the alignment of global common storage to be directly
8200 specified. The SOM linker aligns common storage based on the rounded
8201 value of the NUM_BYTES parameter in the .comm directive. It's not
8202 possible to use the .align directive as it doesn't affect the alignment
8203 of the label associated with a .comm directive. */
8204
8205 void
8206 pa_asm_output_aligned_common (FILE *stream,
8207 const char *name,
8208 unsigned HOST_WIDE_INT size,
8209 unsigned int align)
8210 {
8211 bss_section ();
8212
8213 assemble_name (stream, name);
8214 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8215 MAX (size, align / BITS_PER_UNIT));
8216 }
8217
8218 /* We can't use .comm for local common storage as the SOM linker effectively
8219 treats the symbol as universal and uses the same storage for local symbols
8220 with the same name in different object files. The .block directive
8221 reserves an uninitialized block of storage. However, it's not common
8222 storage. Fortunately, GCC never requests common storage with the same
8223 name in any given translation unit. */
8224
8225 void
8226 pa_asm_output_aligned_local (FILE *stream,
8227 const char *name,
8228 unsigned HOST_WIDE_INT size,
8229 unsigned int align)
8230 {
8231 bss_section ();
8232 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8233
8234 #ifdef LOCAL_ASM_OP
8235 fprintf (stream, "%s", LOCAL_ASM_OP);
8236 assemble_name (stream, name);
8237 fprintf (stream, "\n");
8238 #endif
8239
8240 ASM_OUTPUT_LABEL (stream, name);
8241 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8242 }
8243
8244 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8245 use in fmpysub instructions. */
8246 int
8247 fmpysuboperands (rtx *operands)
8248 {
8249 enum machine_mode mode = GET_MODE (operands[0]);
8250
8251 /* Must be a floating point mode. */
8252 if (mode != SFmode && mode != DFmode)
8253 return 0;
8254
8255 /* All modes must be the same. */
8256 if (! (mode == GET_MODE (operands[1])
8257 && mode == GET_MODE (operands[2])
8258 && mode == GET_MODE (operands[3])
8259 && mode == GET_MODE (operands[4])
8260 && mode == GET_MODE (operands[5])))
8261 return 0;
8262
8263 /* All operands must be registers. */
8264 if (! (GET_CODE (operands[1]) == REG
8265 && GET_CODE (operands[2]) == REG
8266 && GET_CODE (operands[3]) == REG
8267 && GET_CODE (operands[4]) == REG
8268 && GET_CODE (operands[5]) == REG))
8269 return 0;
8270
8271 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8272 operation, so operands[4] must be the same as operand[3]. */
8273 if (! rtx_equal_p (operands[3], operands[4]))
8274 return 0;
8275
8276 /* multiply cannot feed into subtraction. */
8277 if (rtx_equal_p (operands[5], operands[0]))
8278 return 0;
8279
8280 /* Inout operand of sub cannot conflict with any operands from multiply. */
8281 if (rtx_equal_p (operands[3], operands[0])
8282 || rtx_equal_p (operands[3], operands[1])
8283 || rtx_equal_p (operands[3], operands[2]))
8284 return 0;
8285
8286 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8287 if (mode == SFmode
8288 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8289 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8290 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8291 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8292 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8293 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8294 return 0;
8295
8296 /* Passed. Operands are suitable for fmpysub. */
8297 return 1;
8298 }
8299
8300 int
8301 plus_xor_ior_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8302 {
8303 return (GET_CODE (op) == PLUS || GET_CODE (op) == XOR
8304 || GET_CODE (op) == IOR);
8305 }
8306
8307 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8308 constants for shadd instructions. */
8309 static int
8310 shadd_constant_p (int val)
8311 {
8312 if (val == 2 || val == 4 || val == 8)
8313 return 1;
8314 else
8315 return 0;
8316 }
8317
8318 /* Return 1 if OP is a CONST_INT with the value 2, 4, or 8. These are
8319 the valid constant for shadd instructions. */
8320 int
8321 shadd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8322 {
8323 return (GET_CODE (op) == CONST_INT && shadd_constant_p (INTVAL (op)));
8324 }
8325
8326 /* Return 1 if OP is valid as a base or index register in a
8327 REG+REG address. */
8328
8329 int
8330 borx_reg_operand (rtx op, enum machine_mode mode)
8331 {
8332 if (GET_CODE (op) != REG)
8333 return 0;
8334
8335 /* We must reject virtual registers as the only expressions that
8336 can be instantiated are REG and REG+CONST. */
8337 if (op == virtual_incoming_args_rtx
8338 || op == virtual_stack_vars_rtx
8339 || op == virtual_stack_dynamic_rtx
8340 || op == virtual_outgoing_args_rtx
8341 || op == virtual_cfa_rtx)
8342 return 0;
8343
8344 /* While it's always safe to index off the frame pointer, it's not
8345 profitable to do so when the frame pointer is being eliminated. */
8346 if (!reload_completed
8347 && flag_omit_frame_pointer
8348 && !current_function_calls_alloca
8349 && op == frame_pointer_rtx)
8350 return 0;
8351
8352 return register_operand (op, mode);
8353 }
8354
8355 /* Return 1 if this operand is anything other than a hard register. */
8356
8357 int
8358 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8359 {
8360 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8361 }
8362
8363 /* Return 1 if INSN branches forward. Should be using insn_addresses
8364 to avoid walking through all the insns... */
8365 static int
8366 forward_branch_p (rtx insn)
8367 {
8368 rtx label = JUMP_LABEL (insn);
8369
8370 while (insn)
8371 {
8372 if (insn == label)
8373 break;
8374 else
8375 insn = NEXT_INSN (insn);
8376 }
8377
8378 return (insn == label);
8379 }
8380
8381 /* Return 1 if OP is an equality comparison, else return 0. */
8382 int
8383 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8384 {
8385 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8386 }
8387
8388 /* Return 1 if OP is an operator suitable for use in a movb instruction. */
8389 int
8390 movb_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8391 {
8392 return (GET_CODE (op) == EQ || GET_CODE (op) == NE
8393 || GET_CODE (op) == LT || GET_CODE (op) == GE);
8394 }
8395
8396 /* Return 1 if INSN is in the delay slot of a call instruction. */
8397 int
8398 jump_in_call_delay (rtx insn)
8399 {
8400
8401 if (GET_CODE (insn) != JUMP_INSN)
8402 return 0;
8403
8404 if (PREV_INSN (insn)
8405 && PREV_INSN (PREV_INSN (insn))
8406 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8407 {
8408 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8409
8410 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8411 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8412
8413 }
8414 else
8415 return 0;
8416 }
8417
8418 /* Output an unconditional move and branch insn. */
8419
8420 const char *
8421 output_parallel_movb (rtx *operands, int length)
8422 {
8423 /* These are the cases in which we win. */
8424 if (length == 4)
8425 return "mov%I1b,tr %1,%0,%2";
8426
8427 /* None of these cases wins, but they don't lose either. */
8428 if (dbr_sequence_length () == 0)
8429 {
8430 /* Nothing in the delay slot, fake it by putting the combined
8431 insn (the copy or add) in the delay slot of a bl. */
8432 if (GET_CODE (operands[1]) == CONST_INT)
8433 return "b %2\n\tldi %1,%0";
8434 else
8435 return "b %2\n\tcopy %1,%0";
8436 }
8437 else
8438 {
8439 /* Something in the delay slot, but we've got a long branch. */
8440 if (GET_CODE (operands[1]) == CONST_INT)
8441 return "ldi %1,%0\n\tb %2";
8442 else
8443 return "copy %1,%0\n\tb %2";
8444 }
8445 }
8446
8447 /* Output an unconditional add and branch insn. */
8448
8449 const char *
8450 output_parallel_addb (rtx *operands, int length)
8451 {
8452 /* To make life easy we want operand0 to be the shared input/output
8453 operand and operand1 to be the readonly operand. */
8454 if (operands[0] == operands[1])
8455 operands[1] = operands[2];
8456
8457 /* These are the cases in which we win. */
8458 if (length == 4)
8459 return "add%I1b,tr %1,%0,%3";
8460
8461 /* None of these cases win, but they don't lose either. */
8462 if (dbr_sequence_length () == 0)
8463 {
8464 /* Nothing in the delay slot, fake it by putting the combined
8465 insn (the copy or add) in the delay slot of a bl. */
8466 return "b %3\n\tadd%I1 %1,%0,%0";
8467 }
8468 else
8469 {
8470 /* Something in the delay slot, but we've got a long branch. */
8471 return "add%I1 %1,%0,%0\n\tb %3";
8472 }
8473 }
8474
8475 /* Return nonzero if INSN (a jump insn) immediately follows a call
8476 to a named function. This is used to avoid filling the delay slot
8477 of the jump since it can usually be eliminated by modifying RP in
8478 the delay slot of the call. */
8479
8480 int
8481 following_call (rtx insn)
8482 {
8483 if (! TARGET_JUMP_IN_DELAY)
8484 return 0;
8485
8486 /* Find the previous real insn, skipping NOTEs. */
8487 insn = PREV_INSN (insn);
8488 while (insn && GET_CODE (insn) == NOTE)
8489 insn = PREV_INSN (insn);
8490
8491 /* Check for CALL_INSNs and millicode calls. */
8492 if (insn
8493 && ((GET_CODE (insn) == CALL_INSN
8494 && get_attr_type (insn) != TYPE_DYNCALL)
8495 || (GET_CODE (insn) == INSN
8496 && GET_CODE (PATTERN (insn)) != SEQUENCE
8497 && GET_CODE (PATTERN (insn)) != USE
8498 && GET_CODE (PATTERN (insn)) != CLOBBER
8499 && get_attr_type (insn) == TYPE_MILLI)))
8500 return 1;
8501
8502 return 0;
8503 }
8504
8505 /* We use this hook to perform a PA specific optimization which is difficult
8506 to do in earlier passes.
8507
8508 We want the delay slots of branches within jump tables to be filled.
8509 None of the compiler passes at the moment even has the notion that a
8510 PA jump table doesn't contain addresses, but instead contains actual
8511 instructions!
8512
8513 Because we actually jump into the table, the addresses of each entry
8514 must stay constant in relation to the beginning of the table (which
8515 itself must stay constant relative to the instruction to jump into
8516 it). I don't believe we can guarantee earlier passes of the compiler
8517 will adhere to those rules.
8518
8519 So, late in the compilation process we find all the jump tables, and
8520 expand them into real code -- eg each entry in the jump table vector
8521 will get an appropriate label followed by a jump to the final target.
8522
8523 Reorg and the final jump pass can then optimize these branches and
8524 fill their delay slots. We end up with smaller, more efficient code.
8525
8526 The jump instructions within the table are special; we must be able
8527 to identify them during assembly output (if the jumps don't get filled
8528 we need to emit a nop rather than nullifying the delay slot)). We
8529 identify jumps in switch tables by using insns with the attribute
8530 type TYPE_BTABLE_BRANCH.
8531
8532 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8533 insns. This serves two purposes, first it prevents jump.c from
8534 noticing that the last N entries in the table jump to the instruction
8535 immediately after the table and deleting the jumps. Second, those
8536 insns mark where we should emit .begin_brtab and .end_brtab directives
8537 when using GAS (allows for better link time optimizations). */
8538
8539 static void
8540 pa_reorg (void)
8541 {
8542 rtx insn;
8543
8544 remove_useless_addtr_insns (1);
8545
8546 if (pa_cpu < PROCESSOR_8000)
8547 pa_combine_instructions ();
8548
8549
8550 /* This is fairly cheap, so always run it if optimizing. */
8551 if (optimize > 0 && !TARGET_BIG_SWITCH)
8552 {
8553 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8554 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8555 {
8556 rtx pattern, tmp, location, label;
8557 unsigned int length, i;
8558
8559 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8560 if (GET_CODE (insn) != JUMP_INSN
8561 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8562 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8563 continue;
8564
8565 /* Emit marker for the beginning of the branch table. */
8566 emit_insn_before (gen_begin_brtab (), insn);
8567
8568 pattern = PATTERN (insn);
8569 location = PREV_INSN (insn);
8570 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8571
8572 for (i = 0; i < length; i++)
8573 {
8574 /* Emit a label before each jump to keep jump.c from
8575 removing this code. */
8576 tmp = gen_label_rtx ();
8577 LABEL_NUSES (tmp) = 1;
8578 emit_label_after (tmp, location);
8579 location = NEXT_INSN (location);
8580
8581 if (GET_CODE (pattern) == ADDR_VEC)
8582 label = XEXP (XVECEXP (pattern, 0, i), 0);
8583 else
8584 label = XEXP (XVECEXP (pattern, 1, i), 0);
8585
8586 tmp = gen_short_jump (label);
8587
8588 /* Emit the jump itself. */
8589 tmp = emit_jump_insn_after (tmp, location);
8590 JUMP_LABEL (tmp) = label;
8591 LABEL_NUSES (label)++;
8592 location = NEXT_INSN (location);
8593
8594 /* Emit a BARRIER after the jump. */
8595 emit_barrier_after (location);
8596 location = NEXT_INSN (location);
8597 }
8598
8599 /* Emit marker for the end of the branch table. */
8600 emit_insn_before (gen_end_brtab (), location);
8601 location = NEXT_INSN (location);
8602 emit_barrier_after (location);
8603
8604 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8605 delete_insn (insn);
8606 }
8607 }
8608 else
8609 {
8610 /* Still need brtab marker insns. FIXME: the presence of these
8611 markers disables output of the branch table to readonly memory,
8612 and any alignment directives that might be needed. Possibly,
8613 the begin_brtab insn should be output before the label for the
8614 table. This doesn't matter at the moment since the tables are
8615 always output in the text section. */
8616 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8617 {
8618 /* Find an ADDR_VEC insn. */
8619 if (GET_CODE (insn) != JUMP_INSN
8620 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8621 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8622 continue;
8623
8624 /* Now generate markers for the beginning and end of the
8625 branch table. */
8626 emit_insn_before (gen_begin_brtab (), insn);
8627 emit_insn_after (gen_end_brtab (), insn);
8628 }
8629 }
8630 }
8631
8632 /* The PA has a number of odd instructions which can perform multiple
8633 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8634 it may be profitable to combine two instructions into one instruction
8635 with two outputs. It's not profitable PA2.0 machines because the
8636 two outputs would take two slots in the reorder buffers.
8637
8638 This routine finds instructions which can be combined and combines
8639 them. We only support some of the potential combinations, and we
8640 only try common ways to find suitable instructions.
8641
8642 * addb can add two registers or a register and a small integer
8643 and jump to a nearby (+-8k) location. Normally the jump to the
8644 nearby location is conditional on the result of the add, but by
8645 using the "true" condition we can make the jump unconditional.
8646 Thus addb can perform two independent operations in one insn.
8647
8648 * movb is similar to addb in that it can perform a reg->reg
8649 or small immediate->reg copy and jump to a nearby (+-8k location).
8650
8651 * fmpyadd and fmpysub can perform a FP multiply and either an
8652 FP add or FP sub if the operands of the multiply and add/sub are
8653 independent (there are other minor restrictions). Note both
8654 the fmpy and fadd/fsub can in theory move to better spots according
8655 to data dependencies, but for now we require the fmpy stay at a
8656 fixed location.
8657
8658 * Many of the memory operations can perform pre & post updates
8659 of index registers. GCC's pre/post increment/decrement addressing
8660 is far too simple to take advantage of all the possibilities. This
8661 pass may not be suitable since those insns may not be independent.
8662
8663 * comclr can compare two ints or an int and a register, nullify
8664 the following instruction and zero some other register. This
8665 is more difficult to use as it's harder to find an insn which
8666 will generate a comclr than finding something like an unconditional
8667 branch. (conditional moves & long branches create comclr insns).
8668
8669 * Most arithmetic operations can conditionally skip the next
8670 instruction. They can be viewed as "perform this operation
8671 and conditionally jump to this nearby location" (where nearby
8672 is an insns away). These are difficult to use due to the
8673 branch length restrictions. */
8674
8675 static void
8676 pa_combine_instructions (void)
8677 {
8678 rtx anchor, new;
8679
8680 /* This can get expensive since the basic algorithm is on the
8681 order of O(n^2) (or worse). Only do it for -O2 or higher
8682 levels of optimization. */
8683 if (optimize < 2)
8684 return;
8685
8686 /* Walk down the list of insns looking for "anchor" insns which
8687 may be combined with "floating" insns. As the name implies,
8688 "anchor" instructions don't move, while "floating" insns may
8689 move around. */
8690 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8691 new = make_insn_raw (new);
8692
8693 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8694 {
8695 enum attr_pa_combine_type anchor_attr;
8696 enum attr_pa_combine_type floater_attr;
8697
8698 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8699 Also ignore any special USE insns. */
8700 if ((GET_CODE (anchor) != INSN
8701 && GET_CODE (anchor) != JUMP_INSN
8702 && GET_CODE (anchor) != CALL_INSN)
8703 || GET_CODE (PATTERN (anchor)) == USE
8704 || GET_CODE (PATTERN (anchor)) == CLOBBER
8705 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8706 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8707 continue;
8708
8709 anchor_attr = get_attr_pa_combine_type (anchor);
8710 /* See if anchor is an insn suitable for combination. */
8711 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8712 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8713 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8714 && ! forward_branch_p (anchor)))
8715 {
8716 rtx floater;
8717
8718 for (floater = PREV_INSN (anchor);
8719 floater;
8720 floater = PREV_INSN (floater))
8721 {
8722 if (GET_CODE (floater) == NOTE
8723 || (GET_CODE (floater) == INSN
8724 && (GET_CODE (PATTERN (floater)) == USE
8725 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8726 continue;
8727
8728 /* Anything except a regular INSN will stop our search. */
8729 if (GET_CODE (floater) != INSN
8730 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8731 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8732 {
8733 floater = NULL_RTX;
8734 break;
8735 }
8736
8737 /* See if FLOATER is suitable for combination with the
8738 anchor. */
8739 floater_attr = get_attr_pa_combine_type (floater);
8740 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8741 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8742 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8743 && floater_attr == PA_COMBINE_TYPE_FMPY))
8744 {
8745 /* If ANCHOR and FLOATER can be combined, then we're
8746 done with this pass. */
8747 if (pa_can_combine_p (new, anchor, floater, 0,
8748 SET_DEST (PATTERN (floater)),
8749 XEXP (SET_SRC (PATTERN (floater)), 0),
8750 XEXP (SET_SRC (PATTERN (floater)), 1)))
8751 break;
8752 }
8753
8754 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8755 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8756 {
8757 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8758 {
8759 if (pa_can_combine_p (new, anchor, floater, 0,
8760 SET_DEST (PATTERN (floater)),
8761 XEXP (SET_SRC (PATTERN (floater)), 0),
8762 XEXP (SET_SRC (PATTERN (floater)), 1)))
8763 break;
8764 }
8765 else
8766 {
8767 if (pa_can_combine_p (new, anchor, floater, 0,
8768 SET_DEST (PATTERN (floater)),
8769 SET_SRC (PATTERN (floater)),
8770 SET_SRC (PATTERN (floater))))
8771 break;
8772 }
8773 }
8774 }
8775
8776 /* If we didn't find anything on the backwards scan try forwards. */
8777 if (!floater
8778 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8779 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8780 {
8781 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8782 {
8783 if (GET_CODE (floater) == NOTE
8784 || (GET_CODE (floater) == INSN
8785 && (GET_CODE (PATTERN (floater)) == USE
8786 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8787
8788 continue;
8789
8790 /* Anything except a regular INSN will stop our search. */
8791 if (GET_CODE (floater) != INSN
8792 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8793 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8794 {
8795 floater = NULL_RTX;
8796 break;
8797 }
8798
8799 /* See if FLOATER is suitable for combination with the
8800 anchor. */
8801 floater_attr = get_attr_pa_combine_type (floater);
8802 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8803 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8804 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8805 && floater_attr == PA_COMBINE_TYPE_FMPY))
8806 {
8807 /* If ANCHOR and FLOATER can be combined, then we're
8808 done with this pass. */
8809 if (pa_can_combine_p (new, anchor, floater, 1,
8810 SET_DEST (PATTERN (floater)),
8811 XEXP (SET_SRC (PATTERN (floater)),
8812 0),
8813 XEXP (SET_SRC (PATTERN (floater)),
8814 1)))
8815 break;
8816 }
8817 }
8818 }
8819
8820 /* FLOATER will be nonzero if we found a suitable floating
8821 insn for combination with ANCHOR. */
8822 if (floater
8823 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8824 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8825 {
8826 /* Emit the new instruction and delete the old anchor. */
8827 emit_insn_before (gen_rtx_PARALLEL
8828 (VOIDmode,
8829 gen_rtvec (2, PATTERN (anchor),
8830 PATTERN (floater))),
8831 anchor);
8832
8833 PUT_CODE (anchor, NOTE);
8834 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8835 NOTE_SOURCE_FILE (anchor) = 0;
8836
8837 /* Emit a special USE insn for FLOATER, then delete
8838 the floating insn. */
8839 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8840 delete_insn (floater);
8841
8842 continue;
8843 }
8844 else if (floater
8845 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
8846 {
8847 rtx temp;
8848 /* Emit the new_jump instruction and delete the old anchor. */
8849 temp
8850 = emit_jump_insn_before (gen_rtx_PARALLEL
8851 (VOIDmode,
8852 gen_rtvec (2, PATTERN (anchor),
8853 PATTERN (floater))),
8854 anchor);
8855
8856 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
8857 PUT_CODE (anchor, NOTE);
8858 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8859 NOTE_SOURCE_FILE (anchor) = 0;
8860
8861 /* Emit a special USE insn for FLOATER, then delete
8862 the floating insn. */
8863 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8864 delete_insn (floater);
8865 continue;
8866 }
8867 }
8868 }
8869 }
8870
8871 static int
8872 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
8873 rtx src1, rtx src2)
8874 {
8875 int insn_code_number;
8876 rtx start, end;
8877
8878 /* Create a PARALLEL with the patterns of ANCHOR and
8879 FLOATER, try to recognize it, then test constraints
8880 for the resulting pattern.
8881
8882 If the pattern doesn't match or the constraints
8883 aren't met keep searching for a suitable floater
8884 insn. */
8885 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
8886 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
8887 INSN_CODE (new) = -1;
8888 insn_code_number = recog_memoized (new);
8889 if (insn_code_number < 0
8890 || (extract_insn (new), ! constrain_operands (1)))
8891 return 0;
8892
8893 if (reversed)
8894 {
8895 start = anchor;
8896 end = floater;
8897 }
8898 else
8899 {
8900 start = floater;
8901 end = anchor;
8902 }
8903
8904 /* There's up to three operands to consider. One
8905 output and two inputs.
8906
8907 The output must not be used between FLOATER & ANCHOR
8908 exclusive. The inputs must not be set between
8909 FLOATER and ANCHOR exclusive. */
8910
8911 if (reg_used_between_p (dest, start, end))
8912 return 0;
8913
8914 if (reg_set_between_p (src1, start, end))
8915 return 0;
8916
8917 if (reg_set_between_p (src2, start, end))
8918 return 0;
8919
8920 /* If we get here, then everything is good. */
8921 return 1;
8922 }
8923
8924 /* Return nonzero if references for INSN are delayed.
8925
8926 Millicode insns are actually function calls with some special
8927 constraints on arguments and register usage.
8928
8929 Millicode calls always expect their arguments in the integer argument
8930 registers, and always return their result in %r29 (ret1). They
8931 are expected to clobber their arguments, %r1, %r29, and the return
8932 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
8933
8934 This function tells reorg that the references to arguments and
8935 millicode calls do not appear to happen until after the millicode call.
8936 This allows reorg to put insns which set the argument registers into the
8937 delay slot of the millicode call -- thus they act more like traditional
8938 CALL_INSNs.
8939
8940 Note we cannot consider side effects of the insn to be delayed because
8941 the branch and link insn will clobber the return pointer. If we happened
8942 to use the return pointer in the delay slot of the call, then we lose.
8943
8944 get_attr_type will try to recognize the given insn, so make sure to
8945 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
8946 in particular. */
8947 int
8948 insn_refs_are_delayed (rtx insn)
8949 {
8950 return ((GET_CODE (insn) == INSN
8951 && GET_CODE (PATTERN (insn)) != SEQUENCE
8952 && GET_CODE (PATTERN (insn)) != USE
8953 && GET_CODE (PATTERN (insn)) != CLOBBER
8954 && get_attr_type (insn) == TYPE_MILLI));
8955 }
8956
8957 /* On the HP-PA the value is found in register(s) 28(-29), unless
8958 the mode is SF or DF. Then the value is returned in fr4 (32).
8959
8960 This must perform the same promotions as PROMOTE_MODE, else
8961 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
8962
8963 Small structures must be returned in a PARALLEL on PA64 in order
8964 to match the HP Compiler ABI. */
8965
8966 rtx
8967 function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
8968 {
8969 enum machine_mode valmode;
8970
8971 /* Aggregates with a size less than or equal to 128 bits are returned
8972 in GR 28(-29). They are left justified. The pad bits are undefined.
8973 Larger aggregates are returned in memory. */
8974 if (TARGET_64BIT && AGGREGATE_TYPE_P (valtype))
8975 {
8976 rtx loc[2];
8977 int i, offset = 0;
8978 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
8979
8980 for (i = 0; i < ub; i++)
8981 {
8982 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8983 gen_rtx_REG (DImode, 28 + i),
8984 GEN_INT (offset));
8985 offset += 8;
8986 }
8987
8988 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
8989 }
8990
8991 if ((INTEGRAL_TYPE_P (valtype)
8992 && TYPE_PRECISION (valtype) < BITS_PER_WORD)
8993 || POINTER_TYPE_P (valtype))
8994 valmode = word_mode;
8995 else
8996 valmode = TYPE_MODE (valtype);
8997
8998 if (TREE_CODE (valtype) == REAL_TYPE
8999 && TYPE_MODE (valtype) != TFmode
9000 && !TARGET_SOFT_FLOAT)
9001 return gen_rtx_REG (valmode, 32);
9002
9003 return gen_rtx_REG (valmode, 28);
9004 }
9005
9006 /* Return the location of a parameter that is passed in a register or NULL
9007 if the parameter has any component that is passed in memory.
9008
9009 This is new code and will be pushed to into the net sources after
9010 further testing.
9011
9012 ??? We might want to restructure this so that it looks more like other
9013 ports. */
9014 rtx
9015 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
9016 int named ATTRIBUTE_UNUSED)
9017 {
9018 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9019 int alignment = 0;
9020 int arg_size;
9021 int fpr_reg_base;
9022 int gpr_reg_base;
9023 rtx retval;
9024
9025 if (mode == VOIDmode)
9026 return NULL_RTX;
9027
9028 arg_size = FUNCTION_ARG_SIZE (mode, type);
9029
9030 /* If this arg would be passed partially or totally on the stack, then
9031 this routine should return zero. FUNCTION_ARG_PARTIAL_NREGS will
9032 handle arguments which are split between regs and stack slots if
9033 the ABI mandates split arguments. */
9034 if (! TARGET_64BIT)
9035 {
9036 /* The 32-bit ABI does not split arguments. */
9037 if (cum->words + arg_size > max_arg_words)
9038 return NULL_RTX;
9039 }
9040 else
9041 {
9042 if (arg_size > 1)
9043 alignment = cum->words & 1;
9044 if (cum->words + alignment >= max_arg_words)
9045 return NULL_RTX;
9046 }
9047
9048 /* The 32bit ABIs and the 64bit ABIs are rather different,
9049 particularly in their handling of FP registers. We might
9050 be able to cleverly share code between them, but I'm not
9051 going to bother in the hope that splitting them up results
9052 in code that is more easily understood. */
9053
9054 if (TARGET_64BIT)
9055 {
9056 /* Advance the base registers to their current locations.
9057
9058 Remember, gprs grow towards smaller register numbers while
9059 fprs grow to higher register numbers. Also remember that
9060 although FP regs are 32-bit addressable, we pretend that
9061 the registers are 64-bits wide. */
9062 gpr_reg_base = 26 - cum->words;
9063 fpr_reg_base = 32 + cum->words;
9064
9065 /* Arguments wider than one word and small aggregates need special
9066 treatment. */
9067 if (arg_size > 1
9068 || mode == BLKmode
9069 || (type && AGGREGATE_TYPE_P (type)))
9070 {
9071 /* Double-extended precision (80-bit), quad-precision (128-bit)
9072 and aggregates including complex numbers are aligned on
9073 128-bit boundaries. The first eight 64-bit argument slots
9074 are associated one-to-one, with general registers r26
9075 through r19, and also with floating-point registers fr4
9076 through fr11. Arguments larger than one word are always
9077 passed in general registers.
9078
9079 Using a PARALLEL with a word mode register results in left
9080 justified data on a big-endian target. */
9081
9082 rtx loc[8];
9083 int i, offset = 0, ub = arg_size;
9084
9085 /* Align the base register. */
9086 gpr_reg_base -= alignment;
9087
9088 ub = MIN (ub, max_arg_words - cum->words - alignment);
9089 for (i = 0; i < ub; i++)
9090 {
9091 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9092 gen_rtx_REG (DImode, gpr_reg_base),
9093 GEN_INT (offset));
9094 gpr_reg_base -= 1;
9095 offset += 8;
9096 }
9097
9098 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9099 }
9100 }
9101 else
9102 {
9103 /* If the argument is larger than a word, then we know precisely
9104 which registers we must use. */
9105 if (arg_size > 1)
9106 {
9107 if (cum->words)
9108 {
9109 gpr_reg_base = 23;
9110 fpr_reg_base = 38;
9111 }
9112 else
9113 {
9114 gpr_reg_base = 25;
9115 fpr_reg_base = 34;
9116 }
9117
9118 /* Structures 5 to 8 bytes in size are passed in the general
9119 registers in the same manner as other non floating-point
9120 objects. The data is right-justified and zero-extended
9121 to 64 bits. This is opposite to the normal justification
9122 used on big endian targets and requires special treatment.
9123 We now define BLOCK_REG_PADDING to pad these objects. */
9124 if (mode == BLKmode)
9125 {
9126 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9127 gen_rtx_REG (DImode, gpr_reg_base),
9128 const0_rtx);
9129 return gen_rtx_PARALLEL (mode, gen_rtvec (1, loc));
9130 }
9131 }
9132 else
9133 {
9134 /* We have a single word (32 bits). A simple computation
9135 will get us the register #s we need. */
9136 gpr_reg_base = 26 - cum->words;
9137 fpr_reg_base = 32 + 2 * cum->words;
9138 }
9139 }
9140
9141 /* Determine if the argument needs to be passed in both general and
9142 floating point registers. */
9143 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9144 /* If we are doing soft-float with portable runtime, then there
9145 is no need to worry about FP regs. */
9146 && !TARGET_SOFT_FLOAT
9147 /* The parameter must be some kind of float, else we can just
9148 pass it in integer registers. */
9149 && FLOAT_MODE_P (mode)
9150 /* The target function must not have a prototype. */
9151 && cum->nargs_prototype <= 0
9152 /* libcalls do not need to pass items in both FP and general
9153 registers. */
9154 && type != NULL_TREE
9155 /* All this hair applies to "outgoing" args only. This includes
9156 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9157 && !cum->incoming)
9158 /* Also pass outgoing floating arguments in both registers in indirect
9159 calls with the 32 bit ABI and the HP assembler since there is no
9160 way to the specify argument locations in static functions. */
9161 || (!TARGET_64BIT
9162 && !TARGET_GAS
9163 && !cum->incoming
9164 && cum->indirect
9165 && FLOAT_MODE_P (mode)))
9166 {
9167 retval
9168 = gen_rtx_PARALLEL
9169 (mode,
9170 gen_rtvec (2,
9171 gen_rtx_EXPR_LIST (VOIDmode,
9172 gen_rtx_REG (mode, fpr_reg_base),
9173 const0_rtx),
9174 gen_rtx_EXPR_LIST (VOIDmode,
9175 gen_rtx_REG (mode, gpr_reg_base),
9176 const0_rtx)));
9177 }
9178 else
9179 {
9180 /* See if we should pass this parameter in a general register. */
9181 if (TARGET_SOFT_FLOAT
9182 /* Indirect calls in the normal 32bit ABI require all arguments
9183 to be passed in general registers. */
9184 || (!TARGET_PORTABLE_RUNTIME
9185 && !TARGET_64BIT
9186 && !TARGET_ELF32
9187 && cum->indirect)
9188 /* If the parameter is not a floating point parameter, then
9189 it belongs in GPRs. */
9190 || !FLOAT_MODE_P (mode))
9191 retval = gen_rtx_REG (mode, gpr_reg_base);
9192 else
9193 retval = gen_rtx_REG (mode, fpr_reg_base);
9194 }
9195 return retval;
9196 }
9197
9198
9199 /* If this arg would be passed totally in registers or totally on the stack,
9200 then this routine should return zero. It is currently called only for
9201 the 64-bit target. */
9202 int
9203 function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9204 tree type, int named ATTRIBUTE_UNUSED)
9205 {
9206 unsigned int max_arg_words = 8;
9207 unsigned int offset = 0;
9208
9209 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9210 offset = 1;
9211
9212 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9213 /* Arg fits fully into registers. */
9214 return 0;
9215 else if (cum->words + offset >= max_arg_words)
9216 /* Arg fully on the stack. */
9217 return 0;
9218 else
9219 /* Arg is split. */
9220 return max_arg_words - cum->words - offset;
9221 }
9222
9223
9224 /* Return 1 if this is a comparison operator. This allows the use of
9225 MATCH_OPERATOR to recognize all the branch insns. */
9226
9227 int
9228 cmpib_comparison_operator (rtx op, enum machine_mode mode)
9229 {
9230 return ((mode == VOIDmode || GET_MODE (op) == mode)
9231 && (GET_CODE (op) == EQ
9232 || GET_CODE (op) == NE
9233 || GET_CODE (op) == GT
9234 || GET_CODE (op) == GTU
9235 || GET_CODE (op) == GE
9236 || GET_CODE (op) == LT
9237 || GET_CODE (op) == LE
9238 || GET_CODE (op) == LEU));
9239 }
9240
9241 #ifndef ONE_ONLY_TEXT_SECTION_ASM_OP
9242 #define ONE_ONLY_TEXT_SECTION_ASM_OP ""
9243 #endif
9244
9245 #ifndef NEW_TEXT_SECTION_ASM_OP
9246 #define NEW_TEXT_SECTION_ASM_OP ""
9247 #endif
9248
9249 #ifndef DEFAULT_TEXT_SECTION_ASM_OP
9250 #define DEFAULT_TEXT_SECTION_ASM_OP ""
9251 #endif
9252
9253 /* Select and return a TEXT_SECTION_ASM_OP for the current function.
9254
9255 This function is only used with SOM. Because we don't support
9256 named subspaces, we can only create a new subspace or switch back
9257 into the default text subspace. */
9258 const char *
9259 som_text_section_asm_op (void)
9260 {
9261 if (TARGET_SOM && TARGET_GAS)
9262 {
9263 if (cfun && !cfun->machine->in_nsubspa)
9264 {
9265 /* We only want to emit a .nsubspa directive once at the
9266 start of the function. */
9267 cfun->machine->in_nsubspa = 1;
9268
9269 /* Create a new subspace for the text. This provides
9270 better stub placement and one-only functions. */
9271 if (cfun->decl
9272 && DECL_ONE_ONLY (cfun->decl)
9273 && !DECL_WEAK (cfun->decl))
9274 return ONE_ONLY_TEXT_SECTION_ASM_OP;
9275
9276 return NEW_TEXT_SECTION_ASM_OP;
9277 }
9278 else
9279 {
9280 /* There isn't a current function or the body of the current
9281 function has been completed. So, we are changing to the
9282 text section to output debugging information. Do this in
9283 the default text section. We need to forget that we are
9284 in the text section so that text_section will call us the
9285 next time around. */
9286 forget_section ();
9287 }
9288 }
9289
9290 return DEFAULT_TEXT_SECTION_ASM_OP;
9291 }
9292
9293 /* On hpux10, the linker will give an error if we have a reference
9294 in the read-only data section to a symbol defined in a shared
9295 library. Therefore, expressions that might require a reloc can
9296 not be placed in the read-only data section. */
9297
9298 static void
9299 pa_select_section (tree exp, int reloc,
9300 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9301 {
9302 if (TREE_CODE (exp) == VAR_DECL
9303 && TREE_READONLY (exp)
9304 && !TREE_THIS_VOLATILE (exp)
9305 && DECL_INITIAL (exp)
9306 && (DECL_INITIAL (exp) == error_mark_node
9307 || TREE_CONSTANT (DECL_INITIAL (exp)))
9308 && !reloc)
9309 {
9310 if (TARGET_SOM
9311 && DECL_ONE_ONLY (exp)
9312 && !DECL_WEAK (exp))
9313 one_only_readonly_data_section ();
9314 else
9315 readonly_data_section ();
9316 }
9317 else if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
9318 && !reloc)
9319 readonly_data_section ();
9320 else if (TARGET_SOM
9321 && TREE_CODE (exp) == VAR_DECL
9322 && DECL_ONE_ONLY (exp)
9323 && !DECL_WEAK (exp)
9324 && DECL_INITIAL (exp))
9325 one_only_data_section ();
9326 else
9327 data_section ();
9328 }
9329
9330 static void
9331 pa_globalize_label (FILE *stream, const char *name)
9332 {
9333 /* We only handle DATA objects here, functions are globalized in
9334 ASM_DECLARE_FUNCTION_NAME. */
9335 if (! FUNCTION_NAME_P (name))
9336 {
9337 fputs ("\t.EXPORT ", stream);
9338 assemble_name (stream, name);
9339 fputs (",DATA\n", stream);
9340 }
9341 }
9342
9343 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9344
9345 static rtx
9346 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9347 int incoming ATTRIBUTE_UNUSED)
9348 {
9349 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9350 }
9351
9352 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9353
9354 bool
9355 pa_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
9356 {
9357 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9358 PA64 ABI says that objects larger than 128 bits are returned in memory.
9359 Note, int_size_in_bytes can return -1 if the size of the object is
9360 variable or larger than the maximum value that can be expressed as
9361 a HOST_WIDE_INT. It can also return zero for an empty type. The
9362 simplest way to handle variable and empty types is to pass them in
9363 memory. This avoids problems in defining the boundaries of argument
9364 slots, allocating registers, etc. */
9365 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9366 || int_size_in_bytes (type) <= 0);
9367 }
9368
9369 #include "gt-pa.h"