]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/pa/pa.c
target-def.h (TARGET_MUST_PASS_IN_STACK): New.
[thirdparty/gcc.git] / gcc / config / pa / pa.c
1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
34 #include "flags.h"
35 #include "tree.h"
36 #include "output.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "integrate.h"
42 #include "function.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "recog.h"
46 #include "predict.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
50
51 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
52 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE hook_int_void_1
53
54 /* Return nonzero if there is a bypass for the output of
55 OUT_INSN and the fp store IN_INSN. */
56 int
57 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
58 {
59 enum machine_mode store_mode;
60 enum machine_mode other_mode;
61 rtx set;
62
63 if (recog_memoized (in_insn) < 0
64 || get_attr_type (in_insn) != TYPE_FPSTORE
65 || recog_memoized (out_insn) < 0)
66 return 0;
67
68 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
69
70 set = single_set (out_insn);
71 if (!set)
72 return 0;
73
74 other_mode = GET_MODE (SET_SRC (set));
75
76 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
77 }
78
79
80 #ifndef DO_FRAME_NOTES
81 #ifdef INCOMING_RETURN_ADDR_RTX
82 #define DO_FRAME_NOTES 1
83 #else
84 #define DO_FRAME_NOTES 0
85 #endif
86 #endif
87
88 static void copy_reg_pointer (rtx, rtx);
89 static int hppa_address_cost (rtx);
90 static bool hppa_rtx_costs (rtx, int, int, int *);
91 static inline rtx force_mode (enum machine_mode, rtx);
92 static void pa_reorg (void);
93 static void pa_combine_instructions (void);
94 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
95 static int forward_branch_p (rtx);
96 static int shadd_constant_p (int);
97 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
98 static int compute_movmem_length (rtx);
99 static int compute_clrmem_length (rtx);
100 static bool pa_assemble_integer (rtx, unsigned int, int);
101 static void remove_useless_addtr_insns (int);
102 static void store_reg (int, HOST_WIDE_INT, int);
103 static void store_reg_modify (int, int, HOST_WIDE_INT);
104 static void load_reg (int, HOST_WIDE_INT, int);
105 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
106 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
107 static void update_total_code_bytes (int);
108 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
109 static int pa_adjust_cost (rtx, rtx, rtx, int);
110 static int pa_adjust_priority (rtx, int);
111 static int pa_issue_rate (void);
112 static void pa_select_section (tree, int, unsigned HOST_WIDE_INT)
113 ATTRIBUTE_UNUSED;
114 static void pa_encode_section_info (tree, rtx, int);
115 static const char *pa_strip_name_encoding (const char *);
116 static bool pa_function_ok_for_sibcall (tree, tree);
117 static void pa_globalize_label (FILE *, const char *)
118 ATTRIBUTE_UNUSED;
119 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
120 HOST_WIDE_INT, tree);
121 #if !defined(USE_COLLECT2)
122 static void pa_asm_out_constructor (rtx, int);
123 static void pa_asm_out_destructor (rtx, int);
124 #endif
125 static void pa_init_builtins (void);
126 static rtx hppa_builtin_saveregs (void);
127 static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
128 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
129 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
130 static struct deferred_plabel *get_plabel (const char *)
131 ATTRIBUTE_UNUSED;
132 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
134 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
135 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
136 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
137 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
139 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
140 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
141 static void output_deferred_plabels (void);
142 #ifdef HPUX_LONG_DOUBLE_LIBRARY
143 static void pa_hpux_init_libfuncs (void);
144 #endif
145 static rtx pa_struct_value_rtx (tree, int);
146
147 /* Save the operands last given to a compare for use when we
148 generate a scc or bcc insn. */
149 rtx hppa_compare_op0, hppa_compare_op1;
150 enum cmp_type hppa_branch_type;
151
152 /* Which cpu we are scheduling for. */
153 enum processor_type pa_cpu;
154
155 /* String to hold which cpu we are scheduling for. */
156 const char *pa_cpu_string;
157
158 /* Which architecture we are generating code for. */
159 enum architecture_type pa_arch;
160
161 /* String to hold which architecture we are generating code for. */
162 const char *pa_arch_string;
163
164 /* Counts for the number of callee-saved general and floating point
165 registers which were saved by the current function's prologue. */
166 static int gr_saved, fr_saved;
167
168 static rtx find_addr_reg (rtx);
169
170 /* Keep track of the number of bytes we have output in the CODE subspace
171 during this compilation so we'll know when to emit inline long-calls. */
172 unsigned long total_code_bytes;
173
174 /* The last address of the previous function plus the number of bytes in
175 associated thunks that have been output. This is used to determine if
176 a thunk can use an IA-relative branch to reach its target function. */
177 static int last_address;
178
179 /* Variables to handle plabels that we discover are necessary at assembly
180 output time. They are output after the current function. */
181 struct deferred_plabel GTY(())
182 {
183 rtx internal_label;
184 const char *name;
185 };
186 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
187 deferred_plabels;
188 static size_t n_deferred_plabels = 0;
189
190 \f
191 /* Initialize the GCC target structure. */
192
193 #undef TARGET_ASM_ALIGNED_HI_OP
194 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
195 #undef TARGET_ASM_ALIGNED_SI_OP
196 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
197 #undef TARGET_ASM_ALIGNED_DI_OP
198 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
199 #undef TARGET_ASM_UNALIGNED_HI_OP
200 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
201 #undef TARGET_ASM_UNALIGNED_SI_OP
202 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
203 #undef TARGET_ASM_UNALIGNED_DI_OP
204 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
205 #undef TARGET_ASM_INTEGER
206 #define TARGET_ASM_INTEGER pa_assemble_integer
207
208 #undef TARGET_ASM_FUNCTION_PROLOGUE
209 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
210 #undef TARGET_ASM_FUNCTION_EPILOGUE
211 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
212
213 #undef TARGET_SCHED_ADJUST_COST
214 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
215 #undef TARGET_SCHED_ADJUST_PRIORITY
216 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
217 #undef TARGET_SCHED_ISSUE_RATE
218 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
219
220 #undef TARGET_ENCODE_SECTION_INFO
221 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
222 #undef TARGET_STRIP_NAME_ENCODING
223 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
224
225 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
226 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
227
228 #undef TARGET_ASM_OUTPUT_MI_THUNK
229 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
230 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
231 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
232
233 #undef TARGET_ASM_FILE_END
234 #define TARGET_ASM_FILE_END output_deferred_plabels
235
236 #if !defined(USE_COLLECT2)
237 #undef TARGET_ASM_CONSTRUCTOR
238 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
239 #undef TARGET_ASM_DESTRUCTOR
240 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
241 #endif
242
243 #undef TARGET_INIT_BUILTINS
244 #define TARGET_INIT_BUILTINS pa_init_builtins
245
246 #undef TARGET_RTX_COSTS
247 #define TARGET_RTX_COSTS hppa_rtx_costs
248 #undef TARGET_ADDRESS_COST
249 #define TARGET_ADDRESS_COST hppa_address_cost
250
251 #undef TARGET_MACHINE_DEPENDENT_REORG
252 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
253
254 #ifdef HPUX_LONG_DOUBLE_LIBRARY
255 #undef TARGET_INIT_LIBFUNCS
256 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
257 #endif
258
259 #undef TARGET_PROMOTE_FUNCTION_RETURN
260 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
261 #undef TARGET_PROMOTE_PROTOTYPES
262 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
263
264 #undef TARGET_STRUCT_VALUE_RTX
265 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
266 #undef TARGET_RETURN_IN_MEMORY
267 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
268 #undef TARGET_MUST_PASS_IN_STACK
269 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
270
271 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
272 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
273 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
274 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
275
276 struct gcc_target targetm = TARGET_INITIALIZER;
277 \f
278 void
279 override_options (void)
280 {
281 if (pa_cpu_string == NULL)
282 pa_cpu_string = TARGET_SCHED_DEFAULT;
283
284 if (! strcmp (pa_cpu_string, "8000"))
285 {
286 pa_cpu_string = "8000";
287 pa_cpu = PROCESSOR_8000;
288 }
289 else if (! strcmp (pa_cpu_string, "7100"))
290 {
291 pa_cpu_string = "7100";
292 pa_cpu = PROCESSOR_7100;
293 }
294 else if (! strcmp (pa_cpu_string, "700"))
295 {
296 pa_cpu_string = "700";
297 pa_cpu = PROCESSOR_700;
298 }
299 else if (! strcmp (pa_cpu_string, "7100LC"))
300 {
301 pa_cpu_string = "7100LC";
302 pa_cpu = PROCESSOR_7100LC;
303 }
304 else if (! strcmp (pa_cpu_string, "7200"))
305 {
306 pa_cpu_string = "7200";
307 pa_cpu = PROCESSOR_7200;
308 }
309 else if (! strcmp (pa_cpu_string, "7300"))
310 {
311 pa_cpu_string = "7300";
312 pa_cpu = PROCESSOR_7300;
313 }
314 else
315 {
316 warning ("unknown -mschedule= option (%s).\nValid options are 700, 7100, 7100LC, 7200, 7300, and 8000\n", pa_cpu_string);
317 }
318
319 /* Set the instruction set architecture. */
320 if (pa_arch_string && ! strcmp (pa_arch_string, "1.0"))
321 {
322 pa_arch_string = "1.0";
323 pa_arch = ARCHITECTURE_10;
324 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
325 }
326 else if (pa_arch_string && ! strcmp (pa_arch_string, "1.1"))
327 {
328 pa_arch_string = "1.1";
329 pa_arch = ARCHITECTURE_11;
330 target_flags &= ~MASK_PA_20;
331 target_flags |= MASK_PA_11;
332 }
333 else if (pa_arch_string && ! strcmp (pa_arch_string, "2.0"))
334 {
335 pa_arch_string = "2.0";
336 pa_arch = ARCHITECTURE_20;
337 target_flags |= MASK_PA_11 | MASK_PA_20;
338 }
339 else if (pa_arch_string)
340 {
341 warning ("unknown -march= option (%s).\nValid options are 1.0, 1.1, and 2.0\n", pa_arch_string);
342 }
343
344 /* Unconditional branches in the delay slot are not compatible with dwarf2
345 call frame information. There is no benefit in using this optimization
346 on PA8000 and later processors. */
347 if (pa_cpu >= PROCESSOR_8000
348 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
349 || flag_unwind_tables)
350 target_flags &= ~MASK_JUMP_IN_DELAY;
351
352 if (flag_pic && TARGET_PORTABLE_RUNTIME)
353 {
354 warning ("PIC code generation is not supported in the portable runtime model\n");
355 }
356
357 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
358 {
359 warning ("PIC code generation is not compatible with fast indirect calls\n");
360 }
361
362 if (! TARGET_GAS && write_symbols != NO_DEBUG)
363 {
364 warning ("-g is only supported when using GAS on this processor,");
365 warning ("-g option disabled");
366 write_symbols = NO_DEBUG;
367 }
368
369 /* We only support the "big PIC" model now. And we always generate PIC
370 code when in 64bit mode. */
371 if (flag_pic == 1 || TARGET_64BIT)
372 flag_pic = 2;
373
374 /* We can't guarantee that .dword is available for 32-bit targets. */
375 if (UNITS_PER_WORD == 4)
376 targetm.asm_out.aligned_op.di = NULL;
377
378 /* The unaligned ops are only available when using GAS. */
379 if (!TARGET_GAS)
380 {
381 targetm.asm_out.unaligned_op.hi = NULL;
382 targetm.asm_out.unaligned_op.si = NULL;
383 targetm.asm_out.unaligned_op.di = NULL;
384 }
385 }
386
387 static void
388 pa_init_builtins (void)
389 {
390 #ifdef DONT_HAVE_FPUTC_UNLOCKED
391 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = NULL_TREE;
392 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = NULL_TREE;
393 #endif
394 }
395
396 /* If FROM is a probable pointer register, mark TO as a probable
397 pointer register with the same pointer alignment as FROM. */
398
399 static void
400 copy_reg_pointer (rtx to, rtx from)
401 {
402 if (REG_POINTER (from))
403 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
404 }
405
406 /* Return nonzero only if OP is a register of mode MODE,
407 or CONST0_RTX. */
408 int
409 reg_or_0_operand (rtx op, enum machine_mode mode)
410 {
411 return (op == CONST0_RTX (mode) || register_operand (op, mode));
412 }
413
414 /* Return nonzero if OP is suitable for use in a call to a named
415 function.
416
417 For 2.5 try to eliminate either call_operand_address or
418 function_label_operand, they perform very similar functions. */
419 int
420 call_operand_address (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
421 {
422 return (GET_MODE (op) == word_mode
423 && CONSTANT_P (op) && ! TARGET_PORTABLE_RUNTIME);
424 }
425
426 /* Return 1 if X contains a symbolic expression. We know these
427 expressions will have one of a few well defined forms, so
428 we need only check those forms. */
429 int
430 symbolic_expression_p (rtx x)
431 {
432
433 /* Strip off any HIGH. */
434 if (GET_CODE (x) == HIGH)
435 x = XEXP (x, 0);
436
437 return (symbolic_operand (x, VOIDmode));
438 }
439
440 int
441 symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
442 {
443 switch (GET_CODE (op))
444 {
445 case SYMBOL_REF:
446 case LABEL_REF:
447 return 1;
448 case CONST:
449 op = XEXP (op, 0);
450 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
451 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
452 && GET_CODE (XEXP (op, 1)) == CONST_INT);
453 default:
454 return 0;
455 }
456 }
457
458 /* Return truth value of statement that OP is a symbolic memory
459 operand of mode MODE. */
460
461 int
462 symbolic_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
463 {
464 if (GET_CODE (op) == SUBREG)
465 op = SUBREG_REG (op);
466 if (GET_CODE (op) != MEM)
467 return 0;
468 op = XEXP (op, 0);
469 return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST
470 || GET_CODE (op) == HIGH || GET_CODE (op) == LABEL_REF);
471 }
472
473 /* Return 1 if the operand is either a register, zero, or a memory operand
474 that is not symbolic. */
475
476 int
477 reg_or_0_or_nonsymb_mem_operand (rtx op, enum machine_mode mode)
478 {
479 if (register_operand (op, mode))
480 return 1;
481
482 if (op == CONST0_RTX (mode))
483 return 1;
484
485 if (GET_CODE (op) == SUBREG)
486 op = SUBREG_REG (op);
487
488 if (GET_CODE (op) != MEM)
489 return 0;
490
491 /* Until problems with management of the REG_POINTER flag are resolved,
492 we need to delay creating move insns with unscaled indexed addresses
493 until CSE is not expected. */
494 if (!TARGET_NO_SPACE_REGS
495 && !cse_not_expected
496 && GET_CODE (XEXP (op, 0)) == PLUS
497 && REG_P (XEXP (XEXP (op, 0), 0))
498 && REG_P (XEXP (XEXP (op, 0), 1)))
499 return 0;
500
501 return (!symbolic_memory_operand (op, mode)
502 && memory_address_p (mode, XEXP (op, 0)));
503 }
504
505 /* Return 1 if the operand is a register operand or a non-symbolic memory
506 operand after reload. This predicate is used for branch patterns that
507 internally handle register reloading. We need to accept non-symbolic
508 memory operands after reload to ensure that the pattern is still valid
509 if reload didn't find a hard register for the operand. */
510
511 int
512 reg_before_reload_operand (rtx op, enum machine_mode mode)
513 {
514 /* Don't accept a SUBREG since it will need a reload. */
515 if (GET_CODE (op) == SUBREG)
516 return 0;
517
518 if (register_operand (op, mode))
519 return 1;
520
521 if (reload_completed
522 && memory_operand (op, mode)
523 && !symbolic_memory_operand (op, mode))
524 return 1;
525
526 return 0;
527 }
528
529 /* Accept any constant that can be moved in one instruction into a
530 general register. */
531 int
532 cint_ok_for_move (HOST_WIDE_INT intval)
533 {
534 /* OK if ldo, ldil, or zdepi, can be used. */
535 return (CONST_OK_FOR_LETTER_P (intval, 'J')
536 || CONST_OK_FOR_LETTER_P (intval, 'N')
537 || CONST_OK_FOR_LETTER_P (intval, 'K'));
538 }
539
540 /* Return 1 iff OP is an indexed memory operand. */
541 int
542 indexed_memory_operand (rtx op, enum machine_mode mode)
543 {
544 if (GET_MODE (op) != mode)
545 return 0;
546
547 /* Before reload, a (SUBREG (MEM...)) forces reloading into a register. */
548 if (reload_completed && GET_CODE (op) == SUBREG)
549 op = SUBREG_REG (op);
550
551 if (GET_CODE (op) != MEM || symbolic_memory_operand (op, mode))
552 return 0;
553
554 op = XEXP (op, 0);
555
556 return (memory_address_p (mode, op) && IS_INDEX_ADDR_P (op));
557 }
558
559 /* Accept anything that can be used as a destination operand for a
560 move instruction. We don't accept indexed memory operands since
561 they are supported only for floating point stores. */
562 int
563 move_dest_operand (rtx op, enum machine_mode mode)
564 {
565 if (register_operand (op, mode))
566 return 1;
567
568 if (GET_MODE (op) != mode)
569 return 0;
570
571 if (GET_CODE (op) == SUBREG)
572 op = SUBREG_REG (op);
573
574 if (GET_CODE (op) != MEM || symbolic_memory_operand (op, mode))
575 return 0;
576
577 op = XEXP (op, 0);
578
579 return (memory_address_p (mode, op)
580 && !IS_INDEX_ADDR_P (op)
581 && !IS_LO_SUM_DLT_ADDR_P (op));
582 }
583
584 /* Accept anything that can be used as a source operand for a move
585 instruction. */
586 int
587 move_src_operand (rtx op, enum machine_mode mode)
588 {
589 if (register_operand (op, mode))
590 return 1;
591
592 if (GET_CODE (op) == CONST_INT)
593 return cint_ok_for_move (INTVAL (op));
594
595 if (GET_MODE (op) != mode)
596 return 0;
597
598 if (GET_CODE (op) == SUBREG)
599 op = SUBREG_REG (op);
600
601 if (GET_CODE (op) != MEM)
602 return 0;
603
604 /* Until problems with management of the REG_POINTER flag are resolved,
605 we need to delay creating move insns with unscaled indexed addresses
606 until CSE is not expected. */
607 if (!TARGET_NO_SPACE_REGS
608 && !cse_not_expected
609 && GET_CODE (XEXP (op, 0)) == PLUS
610 && REG_P (XEXP (XEXP (op, 0), 0))
611 && REG_P (XEXP (XEXP (op, 0), 1)))
612 return 0;
613
614 return memory_address_p (mode, XEXP (op, 0));
615 }
616
617 /* Accept anything that can be used as the source operand for a prefetch
618 instruction. */
619 int
620 prefetch_operand (rtx op, enum machine_mode mode)
621 {
622 if (GET_CODE (op) != MEM)
623 return 0;
624
625 /* Until problems with management of the REG_POINTER flag are resolved,
626 we need to delay creating prefetch insns with unscaled indexed addresses
627 until CSE is not expected. */
628 if (!TARGET_NO_SPACE_REGS
629 && !cse_not_expected
630 && GET_CODE (XEXP (op, 0)) == PLUS
631 && REG_P (XEXP (XEXP (op, 0), 0))
632 && REG_P (XEXP (XEXP (op, 0), 1)))
633 return 0;
634
635 return memory_address_p (mode, XEXP (op, 0));
636 }
637
638 /* Accept REG and any CONST_INT that can be moved in one instruction into a
639 general register. */
640 int
641 reg_or_cint_move_operand (rtx op, enum machine_mode mode)
642 {
643 if (register_operand (op, mode))
644 return 1;
645
646 return (GET_CODE (op) == CONST_INT && cint_ok_for_move (INTVAL (op)));
647 }
648
649 int
650 pic_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
651 {
652 if (!flag_pic)
653 return 0;
654
655 switch (GET_CODE (op))
656 {
657 case LABEL_REF:
658 return 1;
659 case CONST:
660 op = XEXP (op, 0);
661 return (GET_CODE (XEXP (op, 0)) == LABEL_REF
662 && GET_CODE (XEXP (op, 1)) == CONST_INT);
663 default:
664 return 0;
665 }
666 }
667
668 int
669 fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
670 {
671 return reg_renumber && FP_REG_P (op);
672 }
673
674 \f
675
676 /* Return truth value of whether OP can be used as an operand in a
677 three operand arithmetic insn that accepts registers of mode MODE
678 or 14-bit signed integers. */
679 int
680 arith_operand (rtx op, enum machine_mode mode)
681 {
682 return (register_operand (op, mode)
683 || (GET_CODE (op) == CONST_INT && INT_14_BITS (op)));
684 }
685
686 /* Return truth value of whether OP can be used as an operand in a
687 three operand arithmetic insn that accepts registers of mode MODE
688 or 11-bit signed integers. */
689 int
690 arith11_operand (rtx op, enum machine_mode mode)
691 {
692 return (register_operand (op, mode)
693 || (GET_CODE (op) == CONST_INT && INT_11_BITS (op)));
694 }
695
696 /* Return truth value of whether OP can be used as an operand in a
697 adddi3 insn. */
698 int
699 adddi3_operand (rtx op, enum machine_mode mode)
700 {
701 return (register_operand (op, mode)
702 || (GET_CODE (op) == CONST_INT
703 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
704 }
705
706 /* A constant integer suitable for use in a PRE_MODIFY memory
707 reference. */
708 int
709 pre_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
710 {
711 return (GET_CODE (op) == CONST_INT
712 && INTVAL (op) >= -0x2000 && INTVAL (op) < 0x10);
713 }
714
715 /* A constant integer suitable for use in a POST_MODIFY memory
716 reference. */
717 int
718 post_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
719 {
720 return (GET_CODE (op) == CONST_INT
721 && INTVAL (op) < 0x2000 && INTVAL (op) >= -0x10);
722 }
723
724 int
725 arith_double_operand (rtx op, enum machine_mode mode)
726 {
727 return (register_operand (op, mode)
728 || (GET_CODE (op) == CONST_DOUBLE
729 && GET_MODE (op) == mode
730 && VAL_14_BITS_P (CONST_DOUBLE_LOW (op))
731 && ((CONST_DOUBLE_HIGH (op) >= 0)
732 == ((CONST_DOUBLE_LOW (op) & 0x1000) == 0))));
733 }
734
735 /* Return truth value of whether OP is an integer which fits the
736 range constraining immediate operands in three-address insns, or
737 is an integer register. */
738
739 int
740 ireg_or_int5_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
741 {
742 return ((GET_CODE (op) == CONST_INT && INT_5_BITS (op))
743 || (GET_CODE (op) == REG && REGNO (op) > 0 && REGNO (op) < 32));
744 }
745
746 /* Return nonzero if OP is an integer register, else return zero. */
747 int
748 ireg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
749 {
750 return (GET_CODE (op) == REG && REGNO (op) > 0 && REGNO (op) < 32);
751 }
752
753 /* Return truth value of whether OP is an integer which fits the
754 range constraining immediate operands in three-address insns. */
755
756 int
757 int5_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
758 {
759 return (GET_CODE (op) == CONST_INT && INT_5_BITS (op));
760 }
761
762 int
763 uint5_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
764 {
765 return (GET_CODE (op) == CONST_INT && INT_U5_BITS (op));
766 }
767
768 int
769 int11_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
770 {
771 return (GET_CODE (op) == CONST_INT && INT_11_BITS (op));
772 }
773
774 int
775 uint32_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
776 {
777 #if HOST_BITS_PER_WIDE_INT > 32
778 /* All allowed constants will fit a CONST_INT. */
779 return (GET_CODE (op) == CONST_INT
780 && (INTVAL (op) >= 0 && INTVAL (op) < (HOST_WIDE_INT) 1 << 32));
781 #else
782 return (GET_CODE (op) == CONST_INT
783 || (GET_CODE (op) == CONST_DOUBLE
784 && CONST_DOUBLE_HIGH (op) == 0));
785 #endif
786 }
787
788 int
789 arith5_operand (rtx op, enum machine_mode mode)
790 {
791 return register_operand (op, mode) || int5_operand (op, mode);
792 }
793
794 /* True iff zdepi can be used to generate this CONST_INT.
795 zdepi first sign extends a 5 bit signed number to a given field
796 length, then places this field anywhere in a zero. */
797 int
798 zdepi_cint_p (unsigned HOST_WIDE_INT x)
799 {
800 unsigned HOST_WIDE_INT lsb_mask, t;
801
802 /* This might not be obvious, but it's at least fast.
803 This function is critical; we don't have the time loops would take. */
804 lsb_mask = x & -x;
805 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
806 /* Return true iff t is a power of two. */
807 return ((t & (t - 1)) == 0);
808 }
809
810 /* True iff depi or extru can be used to compute (reg & mask).
811 Accept bit pattern like these:
812 0....01....1
813 1....10....0
814 1..10..01..1 */
815 int
816 and_mask_p (unsigned HOST_WIDE_INT mask)
817 {
818 mask = ~mask;
819 mask += mask & -mask;
820 return (mask & (mask - 1)) == 0;
821 }
822
823 /* True iff depi or extru can be used to compute (reg & OP). */
824 int
825 and_operand (rtx op, enum machine_mode mode)
826 {
827 return (register_operand (op, mode)
828 || (GET_CODE (op) == CONST_INT && and_mask_p (INTVAL (op))));
829 }
830
831 /* True iff depi can be used to compute (reg | MASK). */
832 int
833 ior_mask_p (unsigned HOST_WIDE_INT mask)
834 {
835 mask += mask & -mask;
836 return (mask & (mask - 1)) == 0;
837 }
838
839 /* True iff depi can be used to compute (reg | OP). */
840 int
841 ior_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
842 {
843 return (GET_CODE (op) == CONST_INT && ior_mask_p (INTVAL (op)));
844 }
845
846 int
847 lhs_lshift_operand (rtx op, enum machine_mode mode)
848 {
849 return register_operand (op, mode) || lhs_lshift_cint_operand (op, mode);
850 }
851
852 /* True iff OP is a CONST_INT of the forms 0...0xxxx or 0...01...1xxxx.
853 Such values can be the left hand side x in (x << r), using the zvdepi
854 instruction. */
855 int
856 lhs_lshift_cint_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
857 {
858 unsigned HOST_WIDE_INT x;
859 if (GET_CODE (op) != CONST_INT)
860 return 0;
861 x = INTVAL (op) >> 4;
862 return (x & (x + 1)) == 0;
863 }
864
865 int
866 arith32_operand (rtx op, enum machine_mode mode)
867 {
868 return register_operand (op, mode) || GET_CODE (op) == CONST_INT;
869 }
870
871 int
872 pc_or_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
873 {
874 return (GET_CODE (op) == PC || GET_CODE (op) == LABEL_REF);
875 }
876 \f
877 /* Legitimize PIC addresses. If the address is already
878 position-independent, we return ORIG. Newly generated
879 position-independent addresses go to REG. If we need more
880 than one register, we lose. */
881
882 rtx
883 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
884 {
885 rtx pic_ref = orig;
886
887 /* Labels need special handling. */
888 if (pic_label_operand (orig, mode))
889 {
890 /* We do not want to go through the movXX expanders here since that
891 would create recursion.
892
893 Nor do we really want to call a generator for a named pattern
894 since that requires multiple patterns if we want to support
895 multiple word sizes.
896
897 So instead we just emit the raw set, which avoids the movXX
898 expanders completely. */
899 mark_reg_pointer (reg, BITS_PER_UNIT);
900 emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
901 current_function_uses_pic_offset_table = 1;
902 return reg;
903 }
904 if (GET_CODE (orig) == SYMBOL_REF)
905 {
906 rtx insn, tmp_reg;
907
908 if (reg == 0)
909 abort ();
910
911 /* Before reload, allocate a temporary register for the intermediate
912 result. This allows the sequence to be deleted when the final
913 result is unused and the insns are trivially dead. */
914 tmp_reg = ((reload_in_progress || reload_completed)
915 ? reg : gen_reg_rtx (Pmode));
916
917 emit_move_insn (tmp_reg,
918 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
919 gen_rtx_HIGH (word_mode, orig)));
920 pic_ref
921 = gen_rtx_MEM (Pmode,
922 gen_rtx_LO_SUM (Pmode, tmp_reg,
923 gen_rtx_UNSPEC (Pmode,
924 gen_rtvec (1, orig),
925 UNSPEC_DLTIND14R)));
926
927 current_function_uses_pic_offset_table = 1;
928 MEM_NOTRAP_P (pic_ref) = 1;
929 RTX_UNCHANGING_P (pic_ref) = 1;
930 mark_reg_pointer (reg, BITS_PER_UNIT);
931 insn = emit_move_insn (reg, pic_ref);
932
933 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
934 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig, REG_NOTES (insn));
935
936 return reg;
937 }
938 else if (GET_CODE (orig) == CONST)
939 {
940 rtx base;
941
942 if (GET_CODE (XEXP (orig, 0)) == PLUS
943 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
944 return orig;
945
946 if (reg == 0)
947 abort ();
948
949 if (GET_CODE (XEXP (orig, 0)) == PLUS)
950 {
951 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
952 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
953 base == reg ? 0 : reg);
954 }
955 else
956 abort ();
957
958 if (GET_CODE (orig) == CONST_INT)
959 {
960 if (INT_14_BITS (orig))
961 return plus_constant (base, INTVAL (orig));
962 orig = force_reg (Pmode, orig);
963 }
964 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
965 /* Likewise, should we set special REG_NOTEs here? */
966 }
967
968 return pic_ref;
969 }
970
971 /* Try machine-dependent ways of modifying an illegitimate address
972 to be legitimate. If we find one, return the new, valid address.
973 This macro is used in only one place: `memory_address' in explow.c.
974
975 OLDX is the address as it was before break_out_memory_refs was called.
976 In some cases it is useful to look at this to decide what needs to be done.
977
978 MODE and WIN are passed so that this macro can use
979 GO_IF_LEGITIMATE_ADDRESS.
980
981 It is always safe for this macro to do nothing. It exists to recognize
982 opportunities to optimize the output.
983
984 For the PA, transform:
985
986 memory(X + <large int>)
987
988 into:
989
990 if (<large int> & mask) >= 16
991 Y = (<large int> & ~mask) + mask + 1 Round up.
992 else
993 Y = (<large int> & ~mask) Round down.
994 Z = X + Y
995 memory (Z + (<large int> - Y));
996
997 This is for CSE to find several similar references, and only use one Z.
998
999 X can either be a SYMBOL_REF or REG, but because combine can not
1000 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
1001 D will not fit in 14 bits.
1002
1003 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1004 0x1f as the mask.
1005
1006 MODE_INT references allow displacements which fit in 14 bits, so use
1007 0x3fff as the mask.
1008
1009 This relies on the fact that most mode MODE_FLOAT references will use FP
1010 registers and most mode MODE_INT references will use integer registers.
1011 (In the rare case of an FP register used in an integer MODE, we depend
1012 on secondary reloads to clean things up.)
1013
1014
1015 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1016 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1017 addressing modes to be used).
1018
1019 Put X and Z into registers. Then put the entire expression into
1020 a register. */
1021
1022 rtx
1023 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1024 enum machine_mode mode)
1025 {
1026 rtx orig = x;
1027
1028 /* We need to canonicalize the order of operands in unscaled indexed
1029 addresses since the code that checks if an address is valid doesn't
1030 always try both orders. */
1031 if (!TARGET_NO_SPACE_REGS
1032 && GET_CODE (x) == PLUS
1033 && GET_MODE (x) == Pmode
1034 && REG_P (XEXP (x, 0))
1035 && REG_P (XEXP (x, 1))
1036 && REG_POINTER (XEXP (x, 0))
1037 && !REG_POINTER (XEXP (x, 1)))
1038 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1039
1040 if (flag_pic)
1041 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1042
1043 /* Strip off CONST. */
1044 if (GET_CODE (x) == CONST)
1045 x = XEXP (x, 0);
1046
1047 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1048 That should always be safe. */
1049 if (GET_CODE (x) == PLUS
1050 && GET_CODE (XEXP (x, 0)) == REG
1051 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1052 {
1053 rtx reg = force_reg (Pmode, XEXP (x, 1));
1054 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1055 }
1056
1057 /* Note we must reject symbols which represent function addresses
1058 since the assembler/linker can't handle arithmetic on plabels. */
1059 if (GET_CODE (x) == PLUS
1060 && GET_CODE (XEXP (x, 1)) == CONST_INT
1061 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1062 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1063 || GET_CODE (XEXP (x, 0)) == REG))
1064 {
1065 rtx int_part, ptr_reg;
1066 int newoffset;
1067 int offset = INTVAL (XEXP (x, 1));
1068 int mask;
1069
1070 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1071 ? (TARGET_PA_20 ? 0x3fff : 0x1f) : 0x3fff);
1072
1073 /* Choose which way to round the offset. Round up if we
1074 are >= halfway to the next boundary. */
1075 if ((offset & mask) >= ((mask + 1) / 2))
1076 newoffset = (offset & ~ mask) + mask + 1;
1077 else
1078 newoffset = (offset & ~ mask);
1079
1080 /* If the newoffset will not fit in 14 bits (ldo), then
1081 handling this would take 4 or 5 instructions (2 to load
1082 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1083 add the new offset and the SYMBOL_REF.) Combine can
1084 not handle 4->2 or 5->2 combinations, so do not create
1085 them. */
1086 if (! VAL_14_BITS_P (newoffset)
1087 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1088 {
1089 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
1090 rtx tmp_reg
1091 = force_reg (Pmode,
1092 gen_rtx_HIGH (Pmode, const_part));
1093 ptr_reg
1094 = force_reg (Pmode,
1095 gen_rtx_LO_SUM (Pmode,
1096 tmp_reg, const_part));
1097 }
1098 else
1099 {
1100 if (! VAL_14_BITS_P (newoffset))
1101 int_part = force_reg (Pmode, GEN_INT (newoffset));
1102 else
1103 int_part = GEN_INT (newoffset);
1104
1105 ptr_reg = force_reg (Pmode,
1106 gen_rtx_PLUS (Pmode,
1107 force_reg (Pmode, XEXP (x, 0)),
1108 int_part));
1109 }
1110 return plus_constant (ptr_reg, offset - newoffset);
1111 }
1112
1113 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1114
1115 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1116 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1117 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1118 && (OBJECT_P (XEXP (x, 1))
1119 || GET_CODE (XEXP (x, 1)) == SUBREG)
1120 && GET_CODE (XEXP (x, 1)) != CONST)
1121 {
1122 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1123 rtx reg1, reg2;
1124
1125 reg1 = XEXP (x, 1);
1126 if (GET_CODE (reg1) != REG)
1127 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1128
1129 reg2 = XEXP (XEXP (x, 0), 0);
1130 if (GET_CODE (reg2) != REG)
1131 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1132
1133 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1134 gen_rtx_MULT (Pmode,
1135 reg2,
1136 GEN_INT (val)),
1137 reg1));
1138 }
1139
1140 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1141
1142 Only do so for floating point modes since this is more speculative
1143 and we lose if it's an integer store. */
1144 if (GET_CODE (x) == PLUS
1145 && GET_CODE (XEXP (x, 0)) == PLUS
1146 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1147 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1148 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1149 && (mode == SFmode || mode == DFmode))
1150 {
1151
1152 /* First, try and figure out what to use as a base register. */
1153 rtx reg1, reg2, base, idx, orig_base;
1154
1155 reg1 = XEXP (XEXP (x, 0), 1);
1156 reg2 = XEXP (x, 1);
1157 base = NULL_RTX;
1158 idx = NULL_RTX;
1159
1160 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1161 then emit_move_sequence will turn on REG_POINTER so we'll know
1162 it's a base register below. */
1163 if (GET_CODE (reg1) != REG)
1164 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1165
1166 if (GET_CODE (reg2) != REG)
1167 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1168
1169 /* Figure out what the base and index are. */
1170
1171 if (GET_CODE (reg1) == REG
1172 && REG_POINTER (reg1))
1173 {
1174 base = reg1;
1175 orig_base = XEXP (XEXP (x, 0), 1);
1176 idx = gen_rtx_PLUS (Pmode,
1177 gen_rtx_MULT (Pmode,
1178 XEXP (XEXP (XEXP (x, 0), 0), 0),
1179 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1180 XEXP (x, 1));
1181 }
1182 else if (GET_CODE (reg2) == REG
1183 && REG_POINTER (reg2))
1184 {
1185 base = reg2;
1186 orig_base = XEXP (x, 1);
1187 idx = XEXP (x, 0);
1188 }
1189
1190 if (base == 0)
1191 return orig;
1192
1193 /* If the index adds a large constant, try to scale the
1194 constant so that it can be loaded with only one insn. */
1195 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1196 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1197 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1198 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1199 {
1200 /* Divide the CONST_INT by the scale factor, then add it to A. */
1201 int val = INTVAL (XEXP (idx, 1));
1202
1203 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1204 reg1 = XEXP (XEXP (idx, 0), 0);
1205 if (GET_CODE (reg1) != REG)
1206 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1207
1208 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1209
1210 /* We can now generate a simple scaled indexed address. */
1211 return
1212 force_reg
1213 (Pmode, gen_rtx_PLUS (Pmode,
1214 gen_rtx_MULT (Pmode, reg1,
1215 XEXP (XEXP (idx, 0), 1)),
1216 base));
1217 }
1218
1219 /* If B + C is still a valid base register, then add them. */
1220 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1221 && INTVAL (XEXP (idx, 1)) <= 4096
1222 && INTVAL (XEXP (idx, 1)) >= -4096)
1223 {
1224 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1225 rtx reg1, reg2;
1226
1227 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1228
1229 reg2 = XEXP (XEXP (idx, 0), 0);
1230 if (GET_CODE (reg2) != CONST_INT)
1231 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1232
1233 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1234 gen_rtx_MULT (Pmode,
1235 reg2,
1236 GEN_INT (val)),
1237 reg1));
1238 }
1239
1240 /* Get the index into a register, then add the base + index and
1241 return a register holding the result. */
1242
1243 /* First get A into a register. */
1244 reg1 = XEXP (XEXP (idx, 0), 0);
1245 if (GET_CODE (reg1) != REG)
1246 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1247
1248 /* And get B into a register. */
1249 reg2 = XEXP (idx, 1);
1250 if (GET_CODE (reg2) != REG)
1251 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1252
1253 reg1 = force_reg (Pmode,
1254 gen_rtx_PLUS (Pmode,
1255 gen_rtx_MULT (Pmode, reg1,
1256 XEXP (XEXP (idx, 0), 1)),
1257 reg2));
1258
1259 /* Add the result to our base register and return. */
1260 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1261
1262 }
1263
1264 /* Uh-oh. We might have an address for x[n-100000]. This needs
1265 special handling to avoid creating an indexed memory address
1266 with x-100000 as the base.
1267
1268 If the constant part is small enough, then it's still safe because
1269 there is a guard page at the beginning and end of the data segment.
1270
1271 Scaled references are common enough that we want to try and rearrange the
1272 terms so that we can use indexing for these addresses too. Only
1273 do the optimization for floatint point modes. */
1274
1275 if (GET_CODE (x) == PLUS
1276 && symbolic_expression_p (XEXP (x, 1)))
1277 {
1278 /* Ugly. We modify things here so that the address offset specified
1279 by the index expression is computed first, then added to x to form
1280 the entire address. */
1281
1282 rtx regx1, regx2, regy1, regy2, y;
1283
1284 /* Strip off any CONST. */
1285 y = XEXP (x, 1);
1286 if (GET_CODE (y) == CONST)
1287 y = XEXP (y, 0);
1288
1289 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1290 {
1291 /* See if this looks like
1292 (plus (mult (reg) (shadd_const))
1293 (const (plus (symbol_ref) (const_int))))
1294
1295 Where const_int is small. In that case the const
1296 expression is a valid pointer for indexing.
1297
1298 If const_int is big, but can be divided evenly by shadd_const
1299 and added to (reg). This allows more scaled indexed addresses. */
1300 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1301 && GET_CODE (XEXP (x, 0)) == MULT
1302 && GET_CODE (XEXP (y, 1)) == CONST_INT
1303 && INTVAL (XEXP (y, 1)) >= -4096
1304 && INTVAL (XEXP (y, 1)) <= 4095
1305 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1306 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1307 {
1308 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1309 rtx reg1, reg2;
1310
1311 reg1 = XEXP (x, 1);
1312 if (GET_CODE (reg1) != REG)
1313 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1314
1315 reg2 = XEXP (XEXP (x, 0), 0);
1316 if (GET_CODE (reg2) != REG)
1317 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1318
1319 return force_reg (Pmode,
1320 gen_rtx_PLUS (Pmode,
1321 gen_rtx_MULT (Pmode,
1322 reg2,
1323 GEN_INT (val)),
1324 reg1));
1325 }
1326 else if ((mode == DFmode || mode == SFmode)
1327 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1328 && GET_CODE (XEXP (x, 0)) == MULT
1329 && GET_CODE (XEXP (y, 1)) == CONST_INT
1330 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1331 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1332 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1333 {
1334 regx1
1335 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1336 / INTVAL (XEXP (XEXP (x, 0), 1))));
1337 regx2 = XEXP (XEXP (x, 0), 0);
1338 if (GET_CODE (regx2) != REG)
1339 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1340 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1341 regx2, regx1));
1342 return
1343 force_reg (Pmode,
1344 gen_rtx_PLUS (Pmode,
1345 gen_rtx_MULT (Pmode, regx2,
1346 XEXP (XEXP (x, 0), 1)),
1347 force_reg (Pmode, XEXP (y, 0))));
1348 }
1349 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1350 && INTVAL (XEXP (y, 1)) >= -4096
1351 && INTVAL (XEXP (y, 1)) <= 4095)
1352 {
1353 /* This is safe because of the guard page at the
1354 beginning and end of the data space. Just
1355 return the original address. */
1356 return orig;
1357 }
1358 else
1359 {
1360 /* Doesn't look like one we can optimize. */
1361 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1362 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1363 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1364 regx1 = force_reg (Pmode,
1365 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1366 regx1, regy2));
1367 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1368 }
1369 }
1370 }
1371
1372 return orig;
1373 }
1374
1375 /* For the HPPA, REG and REG+CONST is cost 0
1376 and addresses involving symbolic constants are cost 2.
1377
1378 PIC addresses are very expensive.
1379
1380 It is no coincidence that this has the same structure
1381 as GO_IF_LEGITIMATE_ADDRESS. */
1382
1383 static int
1384 hppa_address_cost (rtx X)
1385 {
1386 switch (GET_CODE (X))
1387 {
1388 case REG:
1389 case PLUS:
1390 case LO_SUM:
1391 return 1;
1392 case HIGH:
1393 return 2;
1394 default:
1395 return 4;
1396 }
1397 }
1398
1399 /* Compute a (partial) cost for rtx X. Return true if the complete
1400 cost has been computed, and false if subexpressions should be
1401 scanned. In either case, *TOTAL contains the cost result. */
1402
1403 static bool
1404 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1405 {
1406 switch (code)
1407 {
1408 case CONST_INT:
1409 if (INTVAL (x) == 0)
1410 *total = 0;
1411 else if (INT_14_BITS (x))
1412 *total = 1;
1413 else
1414 *total = 2;
1415 return true;
1416
1417 case HIGH:
1418 *total = 2;
1419 return true;
1420
1421 case CONST:
1422 case LABEL_REF:
1423 case SYMBOL_REF:
1424 *total = 4;
1425 return true;
1426
1427 case CONST_DOUBLE:
1428 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1429 && outer_code != SET)
1430 *total = 0;
1431 else
1432 *total = 8;
1433 return true;
1434
1435 case MULT:
1436 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1437 *total = COSTS_N_INSNS (3);
1438 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1439 *total = COSTS_N_INSNS (8);
1440 else
1441 *total = COSTS_N_INSNS (20);
1442 return true;
1443
1444 case DIV:
1445 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1446 {
1447 *total = COSTS_N_INSNS (14);
1448 return true;
1449 }
1450 /* FALLTHRU */
1451
1452 case UDIV:
1453 case MOD:
1454 case UMOD:
1455 *total = COSTS_N_INSNS (60);
1456 return true;
1457
1458 case PLUS: /* this includes shNadd insns */
1459 case MINUS:
1460 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1461 *total = COSTS_N_INSNS (3);
1462 else
1463 *total = COSTS_N_INSNS (1);
1464 return true;
1465
1466 case ASHIFT:
1467 case ASHIFTRT:
1468 case LSHIFTRT:
1469 *total = COSTS_N_INSNS (1);
1470 return true;
1471
1472 default:
1473 return false;
1474 }
1475 }
1476
1477 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1478 new rtx with the correct mode. */
1479 static inline rtx
1480 force_mode (enum machine_mode mode, rtx orig)
1481 {
1482 if (mode == GET_MODE (orig))
1483 return orig;
1484
1485 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
1486 abort ();
1487
1488 return gen_rtx_REG (mode, REGNO (orig));
1489 }
1490
1491 /* Emit insns to move operands[1] into operands[0].
1492
1493 Return 1 if we have written out everything that needs to be done to
1494 do the move. Otherwise, return 0 and the caller will emit the move
1495 normally.
1496
1497 Note SCRATCH_REG may not be in the proper mode depending on how it
1498 will be used. This routine is responsible for creating a new copy
1499 of SCRATCH_REG in the proper mode. */
1500
1501 int
1502 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1503 {
1504 register rtx operand0 = operands[0];
1505 register rtx operand1 = operands[1];
1506 register rtx tem;
1507
1508 /* We can only handle indexed addresses in the destination operand
1509 of floating point stores. Thus, we need to break out indexed
1510 addresses from the destination operand. */
1511 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1512 {
1513 /* This is only safe up to the beginning of life analysis. */
1514 if (no_new_pseudos)
1515 abort ();
1516
1517 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1518 operand0 = replace_equiv_address (operand0, tem);
1519 }
1520
1521 /* On targets with non-equivalent space registers, break out unscaled
1522 indexed addresses from the source operand before the final CSE.
1523 We have to do this because the REG_POINTER flag is not correctly
1524 carried through various optimization passes and CSE may substitute
1525 a pseudo without the pointer set for one with the pointer set. As
1526 a result, we loose various opportunities to create insns with
1527 unscaled indexed addresses. */
1528 if (!TARGET_NO_SPACE_REGS
1529 && !cse_not_expected
1530 && GET_CODE (operand1) == MEM
1531 && GET_CODE (XEXP (operand1, 0)) == PLUS
1532 && REG_P (XEXP (XEXP (operand1, 0), 0))
1533 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1534 operand1
1535 = replace_equiv_address (operand1,
1536 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1537
1538 if (scratch_reg
1539 && reload_in_progress && GET_CODE (operand0) == REG
1540 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1541 operand0 = reg_equiv_mem[REGNO (operand0)];
1542 else if (scratch_reg
1543 && reload_in_progress && GET_CODE (operand0) == SUBREG
1544 && GET_CODE (SUBREG_REG (operand0)) == REG
1545 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1546 {
1547 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1548 the code which tracks sets/uses for delete_output_reload. */
1549 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1550 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1551 SUBREG_BYTE (operand0));
1552 operand0 = alter_subreg (&temp);
1553 }
1554
1555 if (scratch_reg
1556 && reload_in_progress && GET_CODE (operand1) == REG
1557 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1558 operand1 = reg_equiv_mem[REGNO (operand1)];
1559 else if (scratch_reg
1560 && reload_in_progress && GET_CODE (operand1) == SUBREG
1561 && GET_CODE (SUBREG_REG (operand1)) == REG
1562 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1563 {
1564 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1565 the code which tracks sets/uses for delete_output_reload. */
1566 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1567 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1568 SUBREG_BYTE (operand1));
1569 operand1 = alter_subreg (&temp);
1570 }
1571
1572 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1573 && ((tem = find_replacement (&XEXP (operand0, 0)))
1574 != XEXP (operand0, 0)))
1575 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
1576
1577 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1578 && ((tem = find_replacement (&XEXP (operand1, 0)))
1579 != XEXP (operand1, 0)))
1580 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
1581
1582 /* Handle secondary reloads for loads/stores of FP registers from
1583 REG+D addresses where D does not fit in 5 or 14 bits, including
1584 (subreg (mem (addr))) cases. */
1585 if (scratch_reg
1586 && fp_reg_operand (operand0, mode)
1587 && ((GET_CODE (operand1) == MEM
1588 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1589 XEXP (operand1, 0)))
1590 || ((GET_CODE (operand1) == SUBREG
1591 && GET_CODE (XEXP (operand1, 0)) == MEM
1592 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1593 ? SFmode : DFmode),
1594 XEXP (XEXP (operand1, 0), 0))))))
1595 {
1596 if (GET_CODE (operand1) == SUBREG)
1597 operand1 = XEXP (operand1, 0);
1598
1599 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1600 it in WORD_MODE regardless of what mode it was originally given
1601 to us. */
1602 scratch_reg = force_mode (word_mode, scratch_reg);
1603
1604 /* D might not fit in 14 bits either; for such cases load D into
1605 scratch reg. */
1606 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1607 {
1608 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1609 emit_move_insn (scratch_reg,
1610 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1611 Pmode,
1612 XEXP (XEXP (operand1, 0), 0),
1613 scratch_reg));
1614 }
1615 else
1616 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1617 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1618 gen_rtx_MEM (mode, scratch_reg)));
1619 return 1;
1620 }
1621 else if (scratch_reg
1622 && fp_reg_operand (operand1, mode)
1623 && ((GET_CODE (operand0) == MEM
1624 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1625 ? SFmode : DFmode),
1626 XEXP (operand0, 0)))
1627 || ((GET_CODE (operand0) == SUBREG)
1628 && GET_CODE (XEXP (operand0, 0)) == MEM
1629 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1630 ? SFmode : DFmode),
1631 XEXP (XEXP (operand0, 0), 0)))))
1632 {
1633 if (GET_CODE (operand0) == SUBREG)
1634 operand0 = XEXP (operand0, 0);
1635
1636 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1637 it in WORD_MODE regardless of what mode it was originally given
1638 to us. */
1639 scratch_reg = force_mode (word_mode, scratch_reg);
1640
1641 /* D might not fit in 14 bits either; for such cases load D into
1642 scratch reg. */
1643 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1644 {
1645 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1646 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1647 0)),
1648 Pmode,
1649 XEXP (XEXP (operand0, 0),
1650 0),
1651 scratch_reg));
1652 }
1653 else
1654 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1655 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
1656 operand1));
1657 return 1;
1658 }
1659 /* Handle secondary reloads for loads of FP registers from constant
1660 expressions by forcing the constant into memory.
1661
1662 Use scratch_reg to hold the address of the memory location.
1663
1664 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1665 NO_REGS when presented with a const_int and a register class
1666 containing only FP registers. Doing so unfortunately creates
1667 more problems than it solves. Fix this for 2.5. */
1668 else if (scratch_reg
1669 && CONSTANT_P (operand1)
1670 && fp_reg_operand (operand0, mode))
1671 {
1672 rtx xoperands[2];
1673
1674 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1675 it in WORD_MODE regardless of what mode it was originally given
1676 to us. */
1677 scratch_reg = force_mode (word_mode, scratch_reg);
1678
1679 /* Force the constant into memory and put the address of the
1680 memory location into scratch_reg. */
1681 xoperands[0] = scratch_reg;
1682 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
1683 emit_move_sequence (xoperands, Pmode, 0);
1684
1685 /* Now load the destination register. */
1686 emit_insn (gen_rtx_SET (mode, operand0,
1687 gen_rtx_MEM (mode, scratch_reg)));
1688 return 1;
1689 }
1690 /* Handle secondary reloads for SAR. These occur when trying to load
1691 the SAR from memory, FP register, or with a constant. */
1692 else if (scratch_reg
1693 && GET_CODE (operand0) == REG
1694 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1695 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1696 && (GET_CODE (operand1) == MEM
1697 || GET_CODE (operand1) == CONST_INT
1698 || (GET_CODE (operand1) == REG
1699 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1700 {
1701 /* D might not fit in 14 bits either; for such cases load D into
1702 scratch reg. */
1703 if (GET_CODE (operand1) == MEM
1704 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1705 {
1706 /* We are reloading the address into the scratch register, so we
1707 want to make sure the scratch register is a full register. */
1708 scratch_reg = force_mode (word_mode, scratch_reg);
1709
1710 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1711 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1712 0)),
1713 Pmode,
1714 XEXP (XEXP (operand1, 0),
1715 0),
1716 scratch_reg));
1717
1718 /* Now we are going to load the scratch register from memory,
1719 we want to load it in the same width as the original MEM,
1720 which must be the same as the width of the ultimate destination,
1721 OPERAND0. */
1722 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1723
1724 emit_move_insn (scratch_reg, gen_rtx_MEM (GET_MODE (operand0),
1725 scratch_reg));
1726 }
1727 else
1728 {
1729 /* We want to load the scratch register using the same mode as
1730 the ultimate destination. */
1731 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1732
1733 emit_move_insn (scratch_reg, operand1);
1734 }
1735
1736 /* And emit the insn to set the ultimate destination. We know that
1737 the scratch register has the same mode as the destination at this
1738 point. */
1739 emit_move_insn (operand0, scratch_reg);
1740 return 1;
1741 }
1742 /* Handle the most common case: storing into a register. */
1743 else if (register_operand (operand0, mode))
1744 {
1745 if (register_operand (operand1, mode)
1746 || (GET_CODE (operand1) == CONST_INT
1747 && cint_ok_for_move (INTVAL (operand1)))
1748 || (operand1 == CONST0_RTX (mode))
1749 || (GET_CODE (operand1) == HIGH
1750 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1751 /* Only `general_operands' can come here, so MEM is ok. */
1752 || GET_CODE (operand1) == MEM)
1753 {
1754 /* Various sets are created during RTL generation which don't
1755 have the REG_POINTER flag correctly set. After the CSE pass,
1756 instruction recognition can fail if we don't consistently
1757 set this flag when performing register copies. This should
1758 also improve the opportunities for creating insns that use
1759 unscaled indexing. */
1760 if (REG_P (operand0) && REG_P (operand1))
1761 {
1762 if (REG_POINTER (operand1)
1763 && !REG_POINTER (operand0)
1764 && !HARD_REGISTER_P (operand0))
1765 copy_reg_pointer (operand0, operand1);
1766 else if (REG_POINTER (operand0)
1767 && !REG_POINTER (operand1)
1768 && !HARD_REGISTER_P (operand1))
1769 copy_reg_pointer (operand1, operand0);
1770 }
1771
1772 /* When MEMs are broken out, the REG_POINTER flag doesn't
1773 get set. In some cases, we can set the REG_POINTER flag
1774 from the declaration for the MEM. */
1775 if (REG_P (operand0)
1776 && GET_CODE (operand1) == MEM
1777 && !REG_POINTER (operand0))
1778 {
1779 tree decl = MEM_EXPR (operand1);
1780
1781 /* Set the register pointer flag and register alignment
1782 if the declaration for this memory reference is a
1783 pointer type. Fortran indirect argument references
1784 are ignored. */
1785 if (decl
1786 && !(flag_argument_noalias > 1
1787 && TREE_CODE (decl) == INDIRECT_REF
1788 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1789 {
1790 tree type;
1791
1792 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1793 tree operand 1. */
1794 if (TREE_CODE (decl) == COMPONENT_REF)
1795 decl = TREE_OPERAND (decl, 1);
1796
1797 type = TREE_TYPE (decl);
1798 if (TREE_CODE (type) == ARRAY_TYPE)
1799 type = get_inner_array_type (type);
1800
1801 if (POINTER_TYPE_P (type))
1802 {
1803 int align;
1804
1805 type = TREE_TYPE (type);
1806 /* Using TYPE_ALIGN_OK is rather conservative as
1807 only the ada frontend actually sets it. */
1808 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1809 : BITS_PER_UNIT);
1810 mark_reg_pointer (operand0, align);
1811 }
1812 }
1813 }
1814
1815 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1816 return 1;
1817 }
1818 }
1819 else if (GET_CODE (operand0) == MEM)
1820 {
1821 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1822 && !(reload_in_progress || reload_completed))
1823 {
1824 rtx temp = gen_reg_rtx (DFmode);
1825
1826 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1827 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1828 return 1;
1829 }
1830 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1831 {
1832 /* Run this case quickly. */
1833 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1834 return 1;
1835 }
1836 if (! (reload_in_progress || reload_completed))
1837 {
1838 operands[0] = validize_mem (operand0);
1839 operands[1] = operand1 = force_reg (mode, operand1);
1840 }
1841 }
1842
1843 /* Simplify the source if we need to.
1844 Note we do have to handle function labels here, even though we do
1845 not consider them legitimate constants. Loop optimizations can
1846 call the emit_move_xxx with one as a source. */
1847 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1848 || function_label_operand (operand1, mode)
1849 || (GET_CODE (operand1) == HIGH
1850 && symbolic_operand (XEXP (operand1, 0), mode)))
1851 {
1852 int ishighonly = 0;
1853
1854 if (GET_CODE (operand1) == HIGH)
1855 {
1856 ishighonly = 1;
1857 operand1 = XEXP (operand1, 0);
1858 }
1859 if (symbolic_operand (operand1, mode))
1860 {
1861 /* Argh. The assembler and linker can't handle arithmetic
1862 involving plabels.
1863
1864 So we force the plabel into memory, load operand0 from
1865 the memory location, then add in the constant part. */
1866 if ((GET_CODE (operand1) == CONST
1867 && GET_CODE (XEXP (operand1, 0)) == PLUS
1868 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1869 || function_label_operand (operand1, mode))
1870 {
1871 rtx temp, const_part;
1872
1873 /* Figure out what (if any) scratch register to use. */
1874 if (reload_in_progress || reload_completed)
1875 {
1876 scratch_reg = scratch_reg ? scratch_reg : operand0;
1877 /* SCRATCH_REG will hold an address and maybe the actual
1878 data. We want it in WORD_MODE regardless of what mode it
1879 was originally given to us. */
1880 scratch_reg = force_mode (word_mode, scratch_reg);
1881 }
1882 else if (flag_pic)
1883 scratch_reg = gen_reg_rtx (Pmode);
1884
1885 if (GET_CODE (operand1) == CONST)
1886 {
1887 /* Save away the constant part of the expression. */
1888 const_part = XEXP (XEXP (operand1, 0), 1);
1889 if (GET_CODE (const_part) != CONST_INT)
1890 abort ();
1891
1892 /* Force the function label into memory. */
1893 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1894 }
1895 else
1896 {
1897 /* No constant part. */
1898 const_part = NULL_RTX;
1899
1900 /* Force the function label into memory. */
1901 temp = force_const_mem (mode, operand1);
1902 }
1903
1904
1905 /* Get the address of the memory location. PIC-ify it if
1906 necessary. */
1907 temp = XEXP (temp, 0);
1908 if (flag_pic)
1909 temp = legitimize_pic_address (temp, mode, scratch_reg);
1910
1911 /* Put the address of the memory location into our destination
1912 register. */
1913 operands[1] = temp;
1914 emit_move_sequence (operands, mode, scratch_reg);
1915
1916 /* Now load from the memory location into our destination
1917 register. */
1918 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1919 emit_move_sequence (operands, mode, scratch_reg);
1920
1921 /* And add back in the constant part. */
1922 if (const_part != NULL_RTX)
1923 expand_inc (operand0, const_part);
1924
1925 return 1;
1926 }
1927
1928 if (flag_pic)
1929 {
1930 rtx temp;
1931
1932 if (reload_in_progress || reload_completed)
1933 {
1934 temp = scratch_reg ? scratch_reg : operand0;
1935 /* TEMP will hold an address and maybe the actual
1936 data. We want it in WORD_MODE regardless of what mode it
1937 was originally given to us. */
1938 temp = force_mode (word_mode, temp);
1939 }
1940 else
1941 temp = gen_reg_rtx (Pmode);
1942
1943 /* (const (plus (symbol) (const_int))) must be forced to
1944 memory during/after reload if the const_int will not fit
1945 in 14 bits. */
1946 if (GET_CODE (operand1) == CONST
1947 && GET_CODE (XEXP (operand1, 0)) == PLUS
1948 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1949 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1950 && (reload_completed || reload_in_progress)
1951 && flag_pic)
1952 {
1953 operands[1] = force_const_mem (mode, operand1);
1954 operands[1] = legitimize_pic_address (XEXP (operands[1], 0),
1955 mode, temp);
1956 operands[1] = gen_rtx_MEM (mode, operands[1]);
1957 emit_move_sequence (operands, mode, temp);
1958 }
1959 else
1960 {
1961 operands[1] = legitimize_pic_address (operand1, mode, temp);
1962 if (REG_P (operand0) && REG_P (operands[1]))
1963 copy_reg_pointer (operand0, operands[1]);
1964 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1965 }
1966 }
1967 /* On the HPPA, references to data space are supposed to use dp,
1968 register 27, but showing it in the RTL inhibits various cse
1969 and loop optimizations. */
1970 else
1971 {
1972 rtx temp, set;
1973
1974 if (reload_in_progress || reload_completed)
1975 {
1976 temp = scratch_reg ? scratch_reg : operand0;
1977 /* TEMP will hold an address and maybe the actual
1978 data. We want it in WORD_MODE regardless of what mode it
1979 was originally given to us. */
1980 temp = force_mode (word_mode, temp);
1981 }
1982 else
1983 temp = gen_reg_rtx (mode);
1984
1985 /* Loading a SYMBOL_REF into a register makes that register
1986 safe to be used as the base in an indexed address.
1987
1988 Don't mark hard registers though. That loses. */
1989 if (GET_CODE (operand0) == REG
1990 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1991 mark_reg_pointer (operand0, BITS_PER_UNIT);
1992 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1993 mark_reg_pointer (temp, BITS_PER_UNIT);
1994
1995 if (ishighonly)
1996 set = gen_rtx_SET (mode, operand0, temp);
1997 else
1998 set = gen_rtx_SET (VOIDmode,
1999 operand0,
2000 gen_rtx_LO_SUM (mode, temp, operand1));
2001
2002 emit_insn (gen_rtx_SET (VOIDmode,
2003 temp,
2004 gen_rtx_HIGH (mode, operand1)));
2005 emit_insn (set);
2006
2007 }
2008 return 1;
2009 }
2010 else if (GET_CODE (operand1) != CONST_INT
2011 || !cint_ok_for_move (INTVAL (operand1)))
2012 {
2013 rtx insn, temp;
2014 rtx op1 = operand1;
2015 HOST_WIDE_INT value = 0;
2016 HOST_WIDE_INT insv = 0;
2017 int insert = 0;
2018
2019 if (GET_CODE (operand1) == CONST_INT)
2020 value = INTVAL (operand1);
2021
2022 if (TARGET_64BIT
2023 && GET_CODE (operand1) == CONST_INT
2024 && HOST_BITS_PER_WIDE_INT > 32
2025 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2026 {
2027 HOST_WIDE_INT nval;
2028
2029 /* Extract the low order 32 bits of the value and sign extend.
2030 If the new value is the same as the original value, we can
2031 can use the original value as-is. If the new value is
2032 different, we use it and insert the most-significant 32-bits
2033 of the original value into the final result. */
2034 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2035 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2036 if (value != nval)
2037 {
2038 #if HOST_BITS_PER_WIDE_INT > 32
2039 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2040 #endif
2041 insert = 1;
2042 value = nval;
2043 operand1 = GEN_INT (nval);
2044 }
2045 }
2046
2047 if (reload_in_progress || reload_completed)
2048 temp = scratch_reg ? scratch_reg : operand0;
2049 else
2050 temp = gen_reg_rtx (mode);
2051
2052 /* We don't directly split DImode constants on 32-bit targets
2053 because PLUS uses an 11-bit immediate and the insn sequence
2054 generated is not as efficient as the one using HIGH/LO_SUM. */
2055 if (GET_CODE (operand1) == CONST_INT
2056 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2057 && !insert)
2058 {
2059 /* Directly break constant into high and low parts. This
2060 provides better optimization opportunities because various
2061 passes recognize constants split with PLUS but not LO_SUM.
2062 We use a 14-bit signed low part except when the addition
2063 of 0x4000 to the high part might change the sign of the
2064 high part. */
2065 HOST_WIDE_INT low = value & 0x3fff;
2066 HOST_WIDE_INT high = value & ~ 0x3fff;
2067
2068 if (low >= 0x2000)
2069 {
2070 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2071 high += 0x2000;
2072 else
2073 high += 0x4000;
2074 }
2075
2076 low = value - high;
2077
2078 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2079 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2080 }
2081 else
2082 {
2083 emit_insn (gen_rtx_SET (VOIDmode, temp,
2084 gen_rtx_HIGH (mode, operand1)));
2085 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2086 }
2087
2088 insn = emit_move_insn (operands[0], operands[1]);
2089
2090 /* Now insert the most significant 32 bits of the value
2091 into the register. When we don't have a second register
2092 available, it could take up to nine instructions to load
2093 a 64-bit integer constant. Prior to reload, we force
2094 constants that would take more than three instructions
2095 to load to the constant pool. During and after reload,
2096 we have to handle all possible values. */
2097 if (insert)
2098 {
2099 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2100 register and the value to be inserted is outside the
2101 range that can be loaded with three depdi instructions. */
2102 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2103 {
2104 operand1 = GEN_INT (insv);
2105
2106 emit_insn (gen_rtx_SET (VOIDmode, temp,
2107 gen_rtx_HIGH (mode, operand1)));
2108 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2109 emit_insn (gen_insv (operand0, GEN_INT (32),
2110 const0_rtx, temp));
2111 }
2112 else
2113 {
2114 int len = 5, pos = 27;
2115
2116 /* Insert the bits using the depdi instruction. */
2117 while (pos >= 0)
2118 {
2119 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2120 HOST_WIDE_INT sign = v5 < 0;
2121
2122 /* Left extend the insertion. */
2123 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2124 while (pos > 0 && (insv & 1) == sign)
2125 {
2126 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2127 len += 1;
2128 pos -= 1;
2129 }
2130
2131 emit_insn (gen_insv (operand0, GEN_INT (len),
2132 GEN_INT (pos), GEN_INT (v5)));
2133
2134 len = pos > 0 && pos < 5 ? pos : 5;
2135 pos -= len;
2136 }
2137 }
2138 }
2139
2140 REG_NOTES (insn)
2141 = gen_rtx_EXPR_LIST (REG_EQUAL, op1, REG_NOTES (insn));
2142
2143 return 1;
2144 }
2145 }
2146 /* Now have insn-emit do whatever it normally does. */
2147 return 0;
2148 }
2149
2150 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2151 it will need a link/runtime reloc). */
2152
2153 int
2154 reloc_needed (tree exp)
2155 {
2156 int reloc = 0;
2157
2158 switch (TREE_CODE (exp))
2159 {
2160 case ADDR_EXPR:
2161 return 1;
2162
2163 case PLUS_EXPR:
2164 case MINUS_EXPR:
2165 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2166 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2167 break;
2168
2169 case NOP_EXPR:
2170 case CONVERT_EXPR:
2171 case NON_LVALUE_EXPR:
2172 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2173 break;
2174
2175 case CONSTRUCTOR:
2176 {
2177 register tree link;
2178 for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
2179 if (TREE_VALUE (link) != 0)
2180 reloc |= reloc_needed (TREE_VALUE (link));
2181 }
2182 break;
2183
2184 case ERROR_MARK:
2185 break;
2186
2187 default:
2188 break;
2189 }
2190 return reloc;
2191 }
2192
2193 /* Does operand (which is a symbolic_operand) live in text space?
2194 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2195 will be true. */
2196
2197 int
2198 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2199 {
2200 if (GET_CODE (operand) == CONST)
2201 operand = XEXP (XEXP (operand, 0), 0);
2202 if (flag_pic)
2203 {
2204 if (GET_CODE (operand) == SYMBOL_REF)
2205 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2206 }
2207 else
2208 {
2209 if (GET_CODE (operand) == SYMBOL_REF)
2210 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2211 }
2212 return 1;
2213 }
2214
2215 \f
2216 /* Return the best assembler insn template
2217 for moving operands[1] into operands[0] as a fullword. */
2218 const char *
2219 singlemove_string (rtx *operands)
2220 {
2221 HOST_WIDE_INT intval;
2222
2223 if (GET_CODE (operands[0]) == MEM)
2224 return "stw %r1,%0";
2225 if (GET_CODE (operands[1]) == MEM)
2226 return "ldw %1,%0";
2227 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2228 {
2229 long i;
2230 REAL_VALUE_TYPE d;
2231
2232 if (GET_MODE (operands[1]) != SFmode)
2233 abort ();
2234
2235 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2236 bit pattern. */
2237 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2238 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2239
2240 operands[1] = GEN_INT (i);
2241 /* Fall through to CONST_INT case. */
2242 }
2243 if (GET_CODE (operands[1]) == CONST_INT)
2244 {
2245 intval = INTVAL (operands[1]);
2246
2247 if (VAL_14_BITS_P (intval))
2248 return "ldi %1,%0";
2249 else if ((intval & 0x7ff) == 0)
2250 return "ldil L'%1,%0";
2251 else if (zdepi_cint_p (intval))
2252 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2253 else
2254 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2255 }
2256 return "copy %1,%0";
2257 }
2258 \f
2259
2260 /* Compute position (in OP[1]) and width (in OP[2])
2261 useful for copying IMM to a register using the zdepi
2262 instructions. Store the immediate value to insert in OP[0]. */
2263 static void
2264 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2265 {
2266 int lsb, len;
2267
2268 /* Find the least significant set bit in IMM. */
2269 for (lsb = 0; lsb < 32; lsb++)
2270 {
2271 if ((imm & 1) != 0)
2272 break;
2273 imm >>= 1;
2274 }
2275
2276 /* Choose variants based on *sign* of the 5-bit field. */
2277 if ((imm & 0x10) == 0)
2278 len = (lsb <= 28) ? 4 : 32 - lsb;
2279 else
2280 {
2281 /* Find the width of the bitstring in IMM. */
2282 for (len = 5; len < 32; len++)
2283 {
2284 if ((imm & (1 << len)) == 0)
2285 break;
2286 }
2287
2288 /* Sign extend IMM as a 5-bit value. */
2289 imm = (imm & 0xf) - 0x10;
2290 }
2291
2292 op[0] = imm;
2293 op[1] = 31 - lsb;
2294 op[2] = len;
2295 }
2296
2297 /* Compute position (in OP[1]) and width (in OP[2])
2298 useful for copying IMM to a register using the depdi,z
2299 instructions. Store the immediate value to insert in OP[0]. */
2300 void
2301 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2302 {
2303 HOST_WIDE_INT lsb, len;
2304
2305 /* Find the least significant set bit in IMM. */
2306 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2307 {
2308 if ((imm & 1) != 0)
2309 break;
2310 imm >>= 1;
2311 }
2312
2313 /* Choose variants based on *sign* of the 5-bit field. */
2314 if ((imm & 0x10) == 0)
2315 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2316 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2317 else
2318 {
2319 /* Find the width of the bitstring in IMM. */
2320 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2321 {
2322 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2323 break;
2324 }
2325
2326 /* Sign extend IMM as a 5-bit value. */
2327 imm = (imm & 0xf) - 0x10;
2328 }
2329
2330 op[0] = imm;
2331 op[1] = 63 - lsb;
2332 op[2] = len;
2333 }
2334
2335 /* Output assembler code to perform a doubleword move insn
2336 with operands OPERANDS. */
2337
2338 const char *
2339 output_move_double (rtx *operands)
2340 {
2341 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2342 rtx latehalf[2];
2343 rtx addreg0 = 0, addreg1 = 0;
2344
2345 /* First classify both operands. */
2346
2347 if (REG_P (operands[0]))
2348 optype0 = REGOP;
2349 else if (offsettable_memref_p (operands[0]))
2350 optype0 = OFFSOP;
2351 else if (GET_CODE (operands[0]) == MEM)
2352 optype0 = MEMOP;
2353 else
2354 optype0 = RNDOP;
2355
2356 if (REG_P (operands[1]))
2357 optype1 = REGOP;
2358 else if (CONSTANT_P (operands[1]))
2359 optype1 = CNSTOP;
2360 else if (offsettable_memref_p (operands[1]))
2361 optype1 = OFFSOP;
2362 else if (GET_CODE (operands[1]) == MEM)
2363 optype1 = MEMOP;
2364 else
2365 optype1 = RNDOP;
2366
2367 /* Check for the cases that the operand constraints are not
2368 supposed to allow to happen. Abort if we get one,
2369 because generating code for these cases is painful. */
2370
2371 if (optype0 != REGOP && optype1 != REGOP)
2372 abort ();
2373
2374 /* Handle auto decrementing and incrementing loads and stores
2375 specifically, since the structure of the function doesn't work
2376 for them without major modification. Do it better when we learn
2377 this port about the general inc/dec addressing of PA.
2378 (This was written by tege. Chide him if it doesn't work.) */
2379
2380 if (optype0 == MEMOP)
2381 {
2382 /* We have to output the address syntax ourselves, since print_operand
2383 doesn't deal with the addresses we want to use. Fix this later. */
2384
2385 rtx addr = XEXP (operands[0], 0);
2386 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2387 {
2388 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2389
2390 operands[0] = XEXP (addr, 0);
2391 if (GET_CODE (operands[1]) != REG || GET_CODE (operands[0]) != REG)
2392 abort ();
2393
2394 if (!reg_overlap_mentioned_p (high_reg, addr))
2395 {
2396 /* No overlap between high target register and address
2397 register. (We do this in a non-obvious way to
2398 save a register file writeback) */
2399 if (GET_CODE (addr) == POST_INC)
2400 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2401 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2402 }
2403 else
2404 abort ();
2405 }
2406 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2407 {
2408 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2409
2410 operands[0] = XEXP (addr, 0);
2411 if (GET_CODE (operands[1]) != REG || GET_CODE (operands[0]) != REG)
2412 abort ();
2413
2414 if (!reg_overlap_mentioned_p (high_reg, addr))
2415 {
2416 /* No overlap between high target register and address
2417 register. (We do this in a non-obvious way to
2418 save a register file writeback) */
2419 if (GET_CODE (addr) == PRE_INC)
2420 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2421 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2422 }
2423 else
2424 abort ();
2425 }
2426 }
2427 if (optype1 == MEMOP)
2428 {
2429 /* We have to output the address syntax ourselves, since print_operand
2430 doesn't deal with the addresses we want to use. Fix this later. */
2431
2432 rtx addr = XEXP (operands[1], 0);
2433 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2434 {
2435 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2436
2437 operands[1] = XEXP (addr, 0);
2438 if (GET_CODE (operands[0]) != REG || GET_CODE (operands[1]) != REG)
2439 abort ();
2440
2441 if (!reg_overlap_mentioned_p (high_reg, addr))
2442 {
2443 /* No overlap between high target register and address
2444 register. (We do this in a non-obvious way to
2445 save a register file writeback) */
2446 if (GET_CODE (addr) == POST_INC)
2447 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2448 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2449 }
2450 else
2451 {
2452 /* This is an undefined situation. We should load into the
2453 address register *and* update that register. Probably
2454 we don't need to handle this at all. */
2455 if (GET_CODE (addr) == POST_INC)
2456 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2457 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2458 }
2459 }
2460 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2461 {
2462 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2463
2464 operands[1] = XEXP (addr, 0);
2465 if (GET_CODE (operands[0]) != REG || GET_CODE (operands[1]) != REG)
2466 abort ();
2467
2468 if (!reg_overlap_mentioned_p (high_reg, addr))
2469 {
2470 /* No overlap between high target register and address
2471 register. (We do this in a non-obvious way to
2472 save a register file writeback) */
2473 if (GET_CODE (addr) == PRE_INC)
2474 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2475 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2476 }
2477 else
2478 {
2479 /* This is an undefined situation. We should load into the
2480 address register *and* update that register. Probably
2481 we don't need to handle this at all. */
2482 if (GET_CODE (addr) == PRE_INC)
2483 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2484 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2485 }
2486 }
2487 else if (GET_CODE (addr) == PLUS
2488 && GET_CODE (XEXP (addr, 0)) == MULT)
2489 {
2490 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2491
2492 if (!reg_overlap_mentioned_p (high_reg, addr))
2493 {
2494 rtx xoperands[3];
2495
2496 xoperands[0] = high_reg;
2497 xoperands[1] = XEXP (addr, 1);
2498 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2499 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2500 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2501 xoperands);
2502 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2503 }
2504 else
2505 {
2506 rtx xoperands[3];
2507
2508 xoperands[0] = high_reg;
2509 xoperands[1] = XEXP (addr, 1);
2510 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2511 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2512 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2513 xoperands);
2514 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2515 }
2516 }
2517 }
2518
2519 /* If an operand is an unoffsettable memory ref, find a register
2520 we can increment temporarily to make it refer to the second word. */
2521
2522 if (optype0 == MEMOP)
2523 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2524
2525 if (optype1 == MEMOP)
2526 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2527
2528 /* Ok, we can do one word at a time.
2529 Normally we do the low-numbered word first.
2530
2531 In either case, set up in LATEHALF the operands to use
2532 for the high-numbered word and in some cases alter the
2533 operands in OPERANDS to be suitable for the low-numbered word. */
2534
2535 if (optype0 == REGOP)
2536 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2537 else if (optype0 == OFFSOP)
2538 latehalf[0] = adjust_address (operands[0], SImode, 4);
2539 else
2540 latehalf[0] = operands[0];
2541
2542 if (optype1 == REGOP)
2543 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2544 else if (optype1 == OFFSOP)
2545 latehalf[1] = adjust_address (operands[1], SImode, 4);
2546 else if (optype1 == CNSTOP)
2547 split_double (operands[1], &operands[1], &latehalf[1]);
2548 else
2549 latehalf[1] = operands[1];
2550
2551 /* If the first move would clobber the source of the second one,
2552 do them in the other order.
2553
2554 This can happen in two cases:
2555
2556 mem -> register where the first half of the destination register
2557 is the same register used in the memory's address. Reload
2558 can create such insns.
2559
2560 mem in this case will be either register indirect or register
2561 indirect plus a valid offset.
2562
2563 register -> register move where REGNO(dst) == REGNO(src + 1)
2564 someone (Tim/Tege?) claimed this can happen for parameter loads.
2565
2566 Handle mem -> register case first. */
2567 if (optype0 == REGOP
2568 && (optype1 == MEMOP || optype1 == OFFSOP)
2569 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2570 operands[1], 0))
2571 {
2572 /* Do the late half first. */
2573 if (addreg1)
2574 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2575 output_asm_insn (singlemove_string (latehalf), latehalf);
2576
2577 /* Then clobber. */
2578 if (addreg1)
2579 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2580 return singlemove_string (operands);
2581 }
2582
2583 /* Now handle register -> register case. */
2584 if (optype0 == REGOP && optype1 == REGOP
2585 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2586 {
2587 output_asm_insn (singlemove_string (latehalf), latehalf);
2588 return singlemove_string (operands);
2589 }
2590
2591 /* Normal case: do the two words, low-numbered first. */
2592
2593 output_asm_insn (singlemove_string (operands), operands);
2594
2595 /* Make any unoffsettable addresses point at high-numbered word. */
2596 if (addreg0)
2597 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2598 if (addreg1)
2599 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2600
2601 /* Do that word. */
2602 output_asm_insn (singlemove_string (latehalf), latehalf);
2603
2604 /* Undo the adds we just did. */
2605 if (addreg0)
2606 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2607 if (addreg1)
2608 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2609
2610 return "";
2611 }
2612 \f
2613 const char *
2614 output_fp_move_double (rtx *operands)
2615 {
2616 if (FP_REG_P (operands[0]))
2617 {
2618 if (FP_REG_P (operands[1])
2619 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2620 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2621 else
2622 output_asm_insn ("fldd%F1 %1,%0", operands);
2623 }
2624 else if (FP_REG_P (operands[1]))
2625 {
2626 output_asm_insn ("fstd%F0 %1,%0", operands);
2627 }
2628 else if (operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2629 {
2630 if (GET_CODE (operands[0]) == REG)
2631 {
2632 rtx xoperands[2];
2633 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2634 xoperands[0] = operands[0];
2635 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2636 }
2637 /* This is a pain. You have to be prepared to deal with an
2638 arbitrary address here including pre/post increment/decrement.
2639
2640 so avoid this in the MD. */
2641 else
2642 abort ();
2643 }
2644 else abort ();
2645 return "";
2646 }
2647 \f
2648 /* Return a REG that occurs in ADDR with coefficient 1.
2649 ADDR can be effectively incremented by incrementing REG. */
2650
2651 static rtx
2652 find_addr_reg (rtx addr)
2653 {
2654 while (GET_CODE (addr) == PLUS)
2655 {
2656 if (GET_CODE (XEXP (addr, 0)) == REG)
2657 addr = XEXP (addr, 0);
2658 else if (GET_CODE (XEXP (addr, 1)) == REG)
2659 addr = XEXP (addr, 1);
2660 else if (CONSTANT_P (XEXP (addr, 0)))
2661 addr = XEXP (addr, 1);
2662 else if (CONSTANT_P (XEXP (addr, 1)))
2663 addr = XEXP (addr, 0);
2664 else
2665 abort ();
2666 }
2667 if (GET_CODE (addr) == REG)
2668 return addr;
2669 abort ();
2670 }
2671
2672 /* Emit code to perform a block move.
2673
2674 OPERANDS[0] is the destination pointer as a REG, clobbered.
2675 OPERANDS[1] is the source pointer as a REG, clobbered.
2676 OPERANDS[2] is a register for temporary storage.
2677 OPERANDS[3] is a register for temporary storage.
2678 OPERANDS[4] is the size as a CONST_INT
2679 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2680 OPERANDS[6] is another temporary register. */
2681
2682 const char *
2683 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2684 {
2685 int align = INTVAL (operands[5]);
2686 unsigned long n_bytes = INTVAL (operands[4]);
2687
2688 /* We can't move more than a word at a time because the PA
2689 has no longer integer move insns. (Could use fp mem ops?) */
2690 if (align > (TARGET_64BIT ? 8 : 4))
2691 align = (TARGET_64BIT ? 8 : 4);
2692
2693 /* Note that we know each loop below will execute at least twice
2694 (else we would have open-coded the copy). */
2695 switch (align)
2696 {
2697 case 8:
2698 /* Pre-adjust the loop counter. */
2699 operands[4] = GEN_INT (n_bytes - 16);
2700 output_asm_insn ("ldi %4,%2", operands);
2701
2702 /* Copying loop. */
2703 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2704 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2705 output_asm_insn ("std,ma %3,8(%0)", operands);
2706 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2707 output_asm_insn ("std,ma %6,8(%0)", operands);
2708
2709 /* Handle the residual. There could be up to 7 bytes of
2710 residual to copy! */
2711 if (n_bytes % 16 != 0)
2712 {
2713 operands[4] = GEN_INT (n_bytes % 8);
2714 if (n_bytes % 16 >= 8)
2715 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2716 if (n_bytes % 8 != 0)
2717 output_asm_insn ("ldd 0(%1),%6", operands);
2718 if (n_bytes % 16 >= 8)
2719 output_asm_insn ("std,ma %3,8(%0)", operands);
2720 if (n_bytes % 8 != 0)
2721 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2722 }
2723 return "";
2724
2725 case 4:
2726 /* Pre-adjust the loop counter. */
2727 operands[4] = GEN_INT (n_bytes - 8);
2728 output_asm_insn ("ldi %4,%2", operands);
2729
2730 /* Copying loop. */
2731 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2732 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2733 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2734 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2735 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2736
2737 /* Handle the residual. There could be up to 7 bytes of
2738 residual to copy! */
2739 if (n_bytes % 8 != 0)
2740 {
2741 operands[4] = GEN_INT (n_bytes % 4);
2742 if (n_bytes % 8 >= 4)
2743 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2744 if (n_bytes % 4 != 0)
2745 output_asm_insn ("ldw 0(%1),%6", operands);
2746 if (n_bytes % 8 >= 4)
2747 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2748 if (n_bytes % 4 != 0)
2749 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2750 }
2751 return "";
2752
2753 case 2:
2754 /* Pre-adjust the loop counter. */
2755 operands[4] = GEN_INT (n_bytes - 4);
2756 output_asm_insn ("ldi %4,%2", operands);
2757
2758 /* Copying loop. */
2759 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2760 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2761 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2762 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2763 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2764
2765 /* Handle the residual. */
2766 if (n_bytes % 4 != 0)
2767 {
2768 if (n_bytes % 4 >= 2)
2769 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2770 if (n_bytes % 2 != 0)
2771 output_asm_insn ("ldb 0(%1),%6", operands);
2772 if (n_bytes % 4 >= 2)
2773 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2774 if (n_bytes % 2 != 0)
2775 output_asm_insn ("stb %6,0(%0)", operands);
2776 }
2777 return "";
2778
2779 case 1:
2780 /* Pre-adjust the loop counter. */
2781 operands[4] = GEN_INT (n_bytes - 2);
2782 output_asm_insn ("ldi %4,%2", operands);
2783
2784 /* Copying loop. */
2785 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2786 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2787 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2788 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2789 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2790
2791 /* Handle the residual. */
2792 if (n_bytes % 2 != 0)
2793 {
2794 output_asm_insn ("ldb 0(%1),%3", operands);
2795 output_asm_insn ("stb %3,0(%0)", operands);
2796 }
2797 return "";
2798
2799 default:
2800 abort ();
2801 }
2802 }
2803
2804 /* Count the number of insns necessary to handle this block move.
2805
2806 Basic structure is the same as emit_block_move, except that we
2807 count insns rather than emit them. */
2808
2809 static int
2810 compute_movmem_length (rtx insn)
2811 {
2812 rtx pat = PATTERN (insn);
2813 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2814 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2815 unsigned int n_insns = 0;
2816
2817 /* We can't move more than four bytes at a time because the PA
2818 has no longer integer move insns. (Could use fp mem ops?) */
2819 if (align > (TARGET_64BIT ? 8 : 4))
2820 align = (TARGET_64BIT ? 8 : 4);
2821
2822 /* The basic copying loop. */
2823 n_insns = 6;
2824
2825 /* Residuals. */
2826 if (n_bytes % (2 * align) != 0)
2827 {
2828 if ((n_bytes % (2 * align)) >= align)
2829 n_insns += 2;
2830
2831 if ((n_bytes % align) != 0)
2832 n_insns += 2;
2833 }
2834
2835 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2836 return n_insns * 4;
2837 }
2838
2839 /* Emit code to perform a block clear.
2840
2841 OPERANDS[0] is the destination pointer as a REG, clobbered.
2842 OPERANDS[1] is a register for temporary storage.
2843 OPERANDS[2] is the size as a CONST_INT
2844 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2845
2846 const char *
2847 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2848 {
2849 int align = INTVAL (operands[3]);
2850 unsigned long n_bytes = INTVAL (operands[2]);
2851
2852 /* We can't clear more than a word at a time because the PA
2853 has no longer integer move insns. */
2854 if (align > (TARGET_64BIT ? 8 : 4))
2855 align = (TARGET_64BIT ? 8 : 4);
2856
2857 /* Note that we know each loop below will execute at least twice
2858 (else we would have open-coded the copy). */
2859 switch (align)
2860 {
2861 case 8:
2862 /* Pre-adjust the loop counter. */
2863 operands[2] = GEN_INT (n_bytes - 16);
2864 output_asm_insn ("ldi %2,%1", operands);
2865
2866 /* Loop. */
2867 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2868 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2869 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2870
2871 /* Handle the residual. There could be up to 7 bytes of
2872 residual to copy! */
2873 if (n_bytes % 16 != 0)
2874 {
2875 operands[2] = GEN_INT (n_bytes % 8);
2876 if (n_bytes % 16 >= 8)
2877 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2878 if (n_bytes % 8 != 0)
2879 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2880 }
2881 return "";
2882
2883 case 4:
2884 /* Pre-adjust the loop counter. */
2885 operands[2] = GEN_INT (n_bytes - 8);
2886 output_asm_insn ("ldi %2,%1", operands);
2887
2888 /* Loop. */
2889 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2890 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2891 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2892
2893 /* Handle the residual. There could be up to 7 bytes of
2894 residual to copy! */
2895 if (n_bytes % 8 != 0)
2896 {
2897 operands[2] = GEN_INT (n_bytes % 4);
2898 if (n_bytes % 8 >= 4)
2899 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2900 if (n_bytes % 4 != 0)
2901 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2902 }
2903 return "";
2904
2905 case 2:
2906 /* Pre-adjust the loop counter. */
2907 operands[2] = GEN_INT (n_bytes - 4);
2908 output_asm_insn ("ldi %2,%1", operands);
2909
2910 /* Loop. */
2911 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2912 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2913 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2914
2915 /* Handle the residual. */
2916 if (n_bytes % 4 != 0)
2917 {
2918 if (n_bytes % 4 >= 2)
2919 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2920 if (n_bytes % 2 != 0)
2921 output_asm_insn ("stb %%r0,0(%0)", operands);
2922 }
2923 return "";
2924
2925 case 1:
2926 /* Pre-adjust the loop counter. */
2927 operands[2] = GEN_INT (n_bytes - 2);
2928 output_asm_insn ("ldi %2,%1", operands);
2929
2930 /* Loop. */
2931 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2932 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2933 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2934
2935 /* Handle the residual. */
2936 if (n_bytes % 2 != 0)
2937 output_asm_insn ("stb %%r0,0(%0)", operands);
2938
2939 return "";
2940
2941 default:
2942 abort ();
2943 }
2944 }
2945
2946 /* Count the number of insns necessary to handle this block move.
2947
2948 Basic structure is the same as emit_block_move, except that we
2949 count insns rather than emit them. */
2950
2951 static int
2952 compute_clrmem_length (rtx insn)
2953 {
2954 rtx pat = PATTERN (insn);
2955 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2956 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2957 unsigned int n_insns = 0;
2958
2959 /* We can't clear more than a word at a time because the PA
2960 has no longer integer move insns. */
2961 if (align > (TARGET_64BIT ? 8 : 4))
2962 align = (TARGET_64BIT ? 8 : 4);
2963
2964 /* The basic loop. */
2965 n_insns = 4;
2966
2967 /* Residuals. */
2968 if (n_bytes % (2 * align) != 0)
2969 {
2970 if ((n_bytes % (2 * align)) >= align)
2971 n_insns++;
2972
2973 if ((n_bytes % align) != 0)
2974 n_insns++;
2975 }
2976
2977 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2978 return n_insns * 4;
2979 }
2980 \f
2981
2982 const char *
2983 output_and (rtx *operands)
2984 {
2985 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2986 {
2987 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2988 int ls0, ls1, ms0, p, len;
2989
2990 for (ls0 = 0; ls0 < 32; ls0++)
2991 if ((mask & (1 << ls0)) == 0)
2992 break;
2993
2994 for (ls1 = ls0; ls1 < 32; ls1++)
2995 if ((mask & (1 << ls1)) != 0)
2996 break;
2997
2998 for (ms0 = ls1; ms0 < 32; ms0++)
2999 if ((mask & (1 << ms0)) == 0)
3000 break;
3001
3002 if (ms0 != 32)
3003 abort ();
3004
3005 if (ls1 == 32)
3006 {
3007 len = ls0;
3008
3009 if (len == 0)
3010 abort ();
3011
3012 operands[2] = GEN_INT (len);
3013 return "{extru|extrw,u} %1,31,%2,%0";
3014 }
3015 else
3016 {
3017 /* We could use this `depi' for the case above as well, but `depi'
3018 requires one more register file access than an `extru'. */
3019
3020 p = 31 - ls0;
3021 len = ls1 - ls0;
3022
3023 operands[2] = GEN_INT (p);
3024 operands[3] = GEN_INT (len);
3025 return "{depi|depwi} 0,%2,%3,%0";
3026 }
3027 }
3028 else
3029 return "and %1,%2,%0";
3030 }
3031
3032 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3033 storing the result in operands[0]. */
3034 const char *
3035 output_64bit_and (rtx *operands)
3036 {
3037 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3038 {
3039 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3040 int ls0, ls1, ms0, p, len;
3041
3042 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3043 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3044 break;
3045
3046 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3047 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3048 break;
3049
3050 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3051 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3052 break;
3053
3054 if (ms0 != HOST_BITS_PER_WIDE_INT)
3055 abort ();
3056
3057 if (ls1 == HOST_BITS_PER_WIDE_INT)
3058 {
3059 len = ls0;
3060
3061 if (len == 0)
3062 abort ();
3063
3064 operands[2] = GEN_INT (len);
3065 return "extrd,u %1,63,%2,%0";
3066 }
3067 else
3068 {
3069 /* We could use this `depi' for the case above as well, but `depi'
3070 requires one more register file access than an `extru'. */
3071
3072 p = 63 - ls0;
3073 len = ls1 - ls0;
3074
3075 operands[2] = GEN_INT (p);
3076 operands[3] = GEN_INT (len);
3077 return "depdi 0,%2,%3,%0";
3078 }
3079 }
3080 else
3081 return "and %1,%2,%0";
3082 }
3083
3084 const char *
3085 output_ior (rtx *operands)
3086 {
3087 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3088 int bs0, bs1, p, len;
3089
3090 if (INTVAL (operands[2]) == 0)
3091 return "copy %1,%0";
3092
3093 for (bs0 = 0; bs0 < 32; bs0++)
3094 if ((mask & (1 << bs0)) != 0)
3095 break;
3096
3097 for (bs1 = bs0; bs1 < 32; bs1++)
3098 if ((mask & (1 << bs1)) == 0)
3099 break;
3100
3101 if (bs1 != 32 && ((unsigned HOST_WIDE_INT) 1 << bs1) <= mask)
3102 abort ();
3103
3104 p = 31 - bs0;
3105 len = bs1 - bs0;
3106
3107 operands[2] = GEN_INT (p);
3108 operands[3] = GEN_INT (len);
3109 return "{depi|depwi} -1,%2,%3,%0";
3110 }
3111
3112 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3113 storing the result in operands[0]. */
3114 const char *
3115 output_64bit_ior (rtx *operands)
3116 {
3117 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3118 int bs0, bs1, p, len;
3119
3120 if (INTVAL (operands[2]) == 0)
3121 return "copy %1,%0";
3122
3123 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3124 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3125 break;
3126
3127 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3128 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3129 break;
3130
3131 if (bs1 != HOST_BITS_PER_WIDE_INT
3132 && ((unsigned HOST_WIDE_INT) 1 << bs1) <= mask)
3133 abort ();
3134
3135 p = 63 - bs0;
3136 len = bs1 - bs0;
3137
3138 operands[2] = GEN_INT (p);
3139 operands[3] = GEN_INT (len);
3140 return "depdi -1,%2,%3,%0";
3141 }
3142 \f
3143 /* Target hook for assembling integer objects. This code handles
3144 aligned SI and DI integers specially, since function references must
3145 be preceded by P%. */
3146
3147 static bool
3148 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3149 {
3150 if (size == UNITS_PER_WORD && aligned_p
3151 && function_label_operand (x, VOIDmode))
3152 {
3153 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3154 output_addr_const (asm_out_file, x);
3155 fputc ('\n', asm_out_file);
3156 return true;
3157 }
3158 return default_assemble_integer (x, size, aligned_p);
3159 }
3160 \f
3161 /* Output an ascii string. */
3162 void
3163 output_ascii (FILE *file, const char *p, int size)
3164 {
3165 int i;
3166 int chars_output;
3167 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3168
3169 /* The HP assembler can only take strings of 256 characters at one
3170 time. This is a limitation on input line length, *not* the
3171 length of the string. Sigh. Even worse, it seems that the
3172 restriction is in number of input characters (see \xnn &
3173 \whatever). So we have to do this very carefully. */
3174
3175 fputs ("\t.STRING \"", file);
3176
3177 chars_output = 0;
3178 for (i = 0; i < size; i += 4)
3179 {
3180 int co = 0;
3181 int io = 0;
3182 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3183 {
3184 register unsigned int c = (unsigned char) p[i + io];
3185
3186 if (c == '\"' || c == '\\')
3187 partial_output[co++] = '\\';
3188 if (c >= ' ' && c < 0177)
3189 partial_output[co++] = c;
3190 else
3191 {
3192 unsigned int hexd;
3193 partial_output[co++] = '\\';
3194 partial_output[co++] = 'x';
3195 hexd = c / 16 - 0 + '0';
3196 if (hexd > '9')
3197 hexd -= '9' - 'a' + 1;
3198 partial_output[co++] = hexd;
3199 hexd = c % 16 - 0 + '0';
3200 if (hexd > '9')
3201 hexd -= '9' - 'a' + 1;
3202 partial_output[co++] = hexd;
3203 }
3204 }
3205 if (chars_output + co > 243)
3206 {
3207 fputs ("\"\n\t.STRING \"", file);
3208 chars_output = 0;
3209 }
3210 fwrite (partial_output, 1, (size_t) co, file);
3211 chars_output += co;
3212 co = 0;
3213 }
3214 fputs ("\"\n", file);
3215 }
3216
3217 /* Try to rewrite floating point comparisons & branches to avoid
3218 useless add,tr insns.
3219
3220 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3221 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3222 first attempt to remove useless add,tr insns. It is zero
3223 for the second pass as reorg sometimes leaves bogus REG_DEAD
3224 notes lying around.
3225
3226 When CHECK_NOTES is zero we can only eliminate add,tr insns
3227 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3228 instructions. */
3229 static void
3230 remove_useless_addtr_insns (int check_notes)
3231 {
3232 rtx insn;
3233 static int pass = 0;
3234
3235 /* This is fairly cheap, so always run it when optimizing. */
3236 if (optimize > 0)
3237 {
3238 int fcmp_count = 0;
3239 int fbranch_count = 0;
3240
3241 /* Walk all the insns in this function looking for fcmp & fbranch
3242 instructions. Keep track of how many of each we find. */
3243 for (insn = get_insns (); insn; insn = next_insn (insn))
3244 {
3245 rtx tmp;
3246
3247 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3248 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3249 continue;
3250
3251 tmp = PATTERN (insn);
3252
3253 /* It must be a set. */
3254 if (GET_CODE (tmp) != SET)
3255 continue;
3256
3257 /* If the destination is CCFP, then we've found an fcmp insn. */
3258 tmp = SET_DEST (tmp);
3259 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3260 {
3261 fcmp_count++;
3262 continue;
3263 }
3264
3265 tmp = PATTERN (insn);
3266 /* If this is an fbranch instruction, bump the fbranch counter. */
3267 if (GET_CODE (tmp) == SET
3268 && SET_DEST (tmp) == pc_rtx
3269 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3270 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3271 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3272 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3273 {
3274 fbranch_count++;
3275 continue;
3276 }
3277 }
3278
3279
3280 /* Find all floating point compare + branch insns. If possible,
3281 reverse the comparison & the branch to avoid add,tr insns. */
3282 for (insn = get_insns (); insn; insn = next_insn (insn))
3283 {
3284 rtx tmp, next;
3285
3286 /* Ignore anything that isn't an INSN. */
3287 if (GET_CODE (insn) != INSN)
3288 continue;
3289
3290 tmp = PATTERN (insn);
3291
3292 /* It must be a set. */
3293 if (GET_CODE (tmp) != SET)
3294 continue;
3295
3296 /* The destination must be CCFP, which is register zero. */
3297 tmp = SET_DEST (tmp);
3298 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3299 continue;
3300
3301 /* INSN should be a set of CCFP.
3302
3303 See if the result of this insn is used in a reversed FP
3304 conditional branch. If so, reverse our condition and
3305 the branch. Doing so avoids useless add,tr insns. */
3306 next = next_insn (insn);
3307 while (next)
3308 {
3309 /* Jumps, calls and labels stop our search. */
3310 if (GET_CODE (next) == JUMP_INSN
3311 || GET_CODE (next) == CALL_INSN
3312 || GET_CODE (next) == CODE_LABEL)
3313 break;
3314
3315 /* As does another fcmp insn. */
3316 if (GET_CODE (next) == INSN
3317 && GET_CODE (PATTERN (next)) == SET
3318 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3319 && REGNO (SET_DEST (PATTERN (next))) == 0)
3320 break;
3321
3322 next = next_insn (next);
3323 }
3324
3325 /* Is NEXT_INSN a branch? */
3326 if (next
3327 && GET_CODE (next) == JUMP_INSN)
3328 {
3329 rtx pattern = PATTERN (next);
3330
3331 /* If it a reversed fp conditional branch (eg uses add,tr)
3332 and CCFP dies, then reverse our conditional and the branch
3333 to avoid the add,tr. */
3334 if (GET_CODE (pattern) == SET
3335 && SET_DEST (pattern) == pc_rtx
3336 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3337 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3338 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3339 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3340 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3341 && (fcmp_count == fbranch_count
3342 || (check_notes
3343 && find_regno_note (next, REG_DEAD, 0))))
3344 {
3345 /* Reverse the branch. */
3346 tmp = XEXP (SET_SRC (pattern), 1);
3347 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3348 XEXP (SET_SRC (pattern), 2) = tmp;
3349 INSN_CODE (next) = -1;
3350
3351 /* Reverse our condition. */
3352 tmp = PATTERN (insn);
3353 PUT_CODE (XEXP (tmp, 1),
3354 (reverse_condition_maybe_unordered
3355 (GET_CODE (XEXP (tmp, 1)))));
3356 }
3357 }
3358 }
3359 }
3360
3361 pass = !pass;
3362
3363 }
3364 \f
3365 /* You may have trouble believing this, but this is the 32 bit HP-PA
3366 stack layout. Wow.
3367
3368 Offset Contents
3369
3370 Variable arguments (optional; any number may be allocated)
3371
3372 SP-(4*(N+9)) arg word N
3373 : :
3374 SP-56 arg word 5
3375 SP-52 arg word 4
3376
3377 Fixed arguments (must be allocated; may remain unused)
3378
3379 SP-48 arg word 3
3380 SP-44 arg word 2
3381 SP-40 arg word 1
3382 SP-36 arg word 0
3383
3384 Frame Marker
3385
3386 SP-32 External Data Pointer (DP)
3387 SP-28 External sr4
3388 SP-24 External/stub RP (RP')
3389 SP-20 Current RP
3390 SP-16 Static Link
3391 SP-12 Clean up
3392 SP-8 Calling Stub RP (RP'')
3393 SP-4 Previous SP
3394
3395 Top of Frame
3396
3397 SP-0 Stack Pointer (points to next available address)
3398
3399 */
3400
3401 /* This function saves registers as follows. Registers marked with ' are
3402 this function's registers (as opposed to the previous function's).
3403 If a frame_pointer isn't needed, r4 is saved as a general register;
3404 the space for the frame pointer is still allocated, though, to keep
3405 things simple.
3406
3407
3408 Top of Frame
3409
3410 SP (FP') Previous FP
3411 SP + 4 Alignment filler (sigh)
3412 SP + 8 Space for locals reserved here.
3413 .
3414 .
3415 .
3416 SP + n All call saved register used.
3417 .
3418 .
3419 .
3420 SP + o All call saved fp registers used.
3421 .
3422 .
3423 .
3424 SP + p (SP') points to next available address.
3425
3426 */
3427
3428 /* Global variables set by output_function_prologue(). */
3429 /* Size of frame. Need to know this to emit return insns from
3430 leaf procedures. */
3431 static HOST_WIDE_INT actual_fsize, local_fsize;
3432 static int save_fregs;
3433
3434 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3435 Handle case where DISP > 8k by using the add_high_const patterns.
3436
3437 Note in DISP > 8k case, we will leave the high part of the address
3438 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3439
3440 static void
3441 store_reg (int reg, HOST_WIDE_INT disp, int base)
3442 {
3443 rtx insn, dest, src, basereg;
3444
3445 src = gen_rtx_REG (word_mode, reg);
3446 basereg = gen_rtx_REG (Pmode, base);
3447 if (VAL_14_BITS_P (disp))
3448 {
3449 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3450 insn = emit_move_insn (dest, src);
3451 }
3452 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3453 {
3454 rtx delta = GEN_INT (disp);
3455 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3456
3457 emit_move_insn (tmpreg, delta);
3458 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3459 dest = gen_rtx_MEM (word_mode, tmpreg);
3460 insn = emit_move_insn (dest, src);
3461 if (DO_FRAME_NOTES)
3462 {
3463 REG_NOTES (insn)
3464 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3465 gen_rtx_SET (VOIDmode,
3466 gen_rtx_MEM (word_mode,
3467 gen_rtx_PLUS (word_mode, basereg,
3468 delta)),
3469 src),
3470 REG_NOTES (insn));
3471 }
3472 }
3473 else
3474 {
3475 rtx delta = GEN_INT (disp);
3476 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3477 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3478
3479 emit_move_insn (tmpreg, high);
3480 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3481 insn = emit_move_insn (dest, src);
3482 if (DO_FRAME_NOTES)
3483 {
3484 REG_NOTES (insn)
3485 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3486 gen_rtx_SET (VOIDmode,
3487 gen_rtx_MEM (word_mode,
3488 gen_rtx_PLUS (word_mode, basereg,
3489 delta)),
3490 src),
3491 REG_NOTES (insn));
3492 }
3493 }
3494
3495 if (DO_FRAME_NOTES)
3496 RTX_FRAME_RELATED_P (insn) = 1;
3497 }
3498
3499 /* Emit RTL to store REG at the memory location specified by BASE and then
3500 add MOD to BASE. MOD must be <= 8k. */
3501
3502 static void
3503 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3504 {
3505 rtx insn, basereg, srcreg, delta;
3506
3507 if (!VAL_14_BITS_P (mod))
3508 abort ();
3509
3510 basereg = gen_rtx_REG (Pmode, base);
3511 srcreg = gen_rtx_REG (word_mode, reg);
3512 delta = GEN_INT (mod);
3513
3514 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3515 if (DO_FRAME_NOTES)
3516 {
3517 RTX_FRAME_RELATED_P (insn) = 1;
3518
3519 /* RTX_FRAME_RELATED_P must be set on each frame related set
3520 in a parallel with more than one element. Don't set
3521 RTX_FRAME_RELATED_P in the first set if reg is temporary
3522 register 1. The effect of this operation is recorded in
3523 the initial copy. */
3524 if (reg != 1)
3525 {
3526 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3527 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3528 }
3529 else
3530 {
3531 /* The first element of a PARALLEL is always processed if it is
3532 a SET. Thus, we need an expression list for this case. */
3533 REG_NOTES (insn)
3534 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3535 gen_rtx_SET (VOIDmode, basereg,
3536 gen_rtx_PLUS (word_mode, basereg, delta)),
3537 REG_NOTES (insn));
3538 }
3539 }
3540 }
3541
3542 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3543 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3544 whether to add a frame note or not.
3545
3546 In the DISP > 8k case, we leave the high part of the address in %r1.
3547 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3548
3549 static void
3550 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3551 {
3552 rtx insn;
3553
3554 if (VAL_14_BITS_P (disp))
3555 {
3556 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3557 plus_constant (gen_rtx_REG (Pmode, base), disp));
3558 }
3559 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3560 {
3561 rtx basereg = gen_rtx_REG (Pmode, base);
3562 rtx delta = GEN_INT (disp);
3563 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3564
3565 emit_move_insn (tmpreg, delta);
3566 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3567 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3568 }
3569 else
3570 {
3571 rtx basereg = gen_rtx_REG (Pmode, base);
3572 rtx delta = GEN_INT (disp);
3573 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3574
3575 emit_move_insn (tmpreg,
3576 gen_rtx_PLUS (Pmode, basereg,
3577 gen_rtx_HIGH (Pmode, delta)));
3578 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3579 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3580 }
3581
3582 if (DO_FRAME_NOTES && note)
3583 RTX_FRAME_RELATED_P (insn) = 1;
3584 }
3585
3586 HOST_WIDE_INT
3587 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3588 {
3589 int freg_saved = 0;
3590 int i, j;
3591
3592 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3593 be consistent with the rounding and size calculation done here.
3594 Change them at the same time. */
3595
3596 /* We do our own stack alignment. First, round the size of the
3597 stack locals up to a word boundary. */
3598 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3599
3600 /* Space for previous frame pointer + filler. If any frame is
3601 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3602 waste some space here for the sake of HP compatibility. The
3603 first slot is only used when the frame pointer is needed. */
3604 if (size || frame_pointer_needed)
3605 size += STARTING_FRAME_OFFSET;
3606
3607 /* If the current function calls __builtin_eh_return, then we need
3608 to allocate stack space for registers that will hold data for
3609 the exception handler. */
3610 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3611 {
3612 unsigned int i;
3613
3614 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3615 continue;
3616 size += i * UNITS_PER_WORD;
3617 }
3618
3619 /* Account for space used by the callee general register saves. */
3620 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3621 if (regs_ever_live[i])
3622 size += UNITS_PER_WORD;
3623
3624 /* Account for space used by the callee floating point register saves. */
3625 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3626 if (regs_ever_live[i]
3627 || (!TARGET_64BIT && regs_ever_live[i + 1]))
3628 {
3629 freg_saved = 1;
3630
3631 /* We always save both halves of the FP register, so always
3632 increment the frame size by 8 bytes. */
3633 size += 8;
3634 }
3635
3636 /* If any of the floating registers are saved, account for the
3637 alignment needed for the floating point register save block. */
3638 if (freg_saved)
3639 {
3640 size = (size + 7) & ~7;
3641 if (fregs_live)
3642 *fregs_live = 1;
3643 }
3644
3645 /* The various ABIs include space for the outgoing parameters in the
3646 size of the current function's stack frame. We don't need to align
3647 for the outgoing arguments as their alignment is set by the final
3648 rounding for the frame as a whole. */
3649 size += current_function_outgoing_args_size;
3650
3651 /* Allocate space for the fixed frame marker. This space must be
3652 allocated for any function that makes calls or allocates
3653 stack space. */
3654 if (!current_function_is_leaf || size)
3655 size += TARGET_64BIT ? 48 : 32;
3656
3657 /* Finally, round to the preferred stack boundary. */
3658 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3659 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3660 }
3661
3662 /* Generate the assembly code for function entry. FILE is a stdio
3663 stream to output the code to. SIZE is an int: how many units of
3664 temporary storage to allocate.
3665
3666 Refer to the array `regs_ever_live' to determine which registers to
3667 save; `regs_ever_live[I]' is nonzero if register number I is ever
3668 used in the function. This function is responsible for knowing
3669 which registers should not be saved even if used. */
3670
3671 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3672 of memory. If any fpu reg is used in the function, we allocate
3673 such a block here, at the bottom of the frame, just in case it's needed.
3674
3675 If this function is a leaf procedure, then we may choose not
3676 to do a "save" insn. The decision about whether or not
3677 to do this is made in regclass.c. */
3678
3679 static void
3680 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3681 {
3682 /* The function's label and associated .PROC must never be
3683 separated and must be output *after* any profiling declarations
3684 to avoid changing spaces/subspaces within a procedure. */
3685 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3686 fputs ("\t.PROC\n", file);
3687
3688 /* hppa_expand_prologue does the dirty work now. We just need
3689 to output the assembler directives which denote the start
3690 of a function. */
3691 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3692 if (regs_ever_live[2])
3693 fputs (",CALLS,SAVE_RP", file);
3694 else
3695 fputs (",NO_CALLS", file);
3696
3697 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3698 at the beginning of the frame and that it is used as the frame
3699 pointer for the frame. We do this because our current frame
3700 layout doesn't conform to that specified in the the HP runtime
3701 documentation and we need a way to indicate to programs such as
3702 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3703 isn't used by HP compilers but is supported by the assembler.
3704 However, SAVE_SP is supposed to indicate that the previous stack
3705 pointer has been saved in the frame marker. */
3706 if (frame_pointer_needed)
3707 fputs (",SAVE_SP", file);
3708
3709 /* Pass on information about the number of callee register saves
3710 performed in the prologue.
3711
3712 The compiler is supposed to pass the highest register number
3713 saved, the assembler then has to adjust that number before
3714 entering it into the unwind descriptor (to account for any
3715 caller saved registers with lower register numbers than the
3716 first callee saved register). */
3717 if (gr_saved)
3718 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3719
3720 if (fr_saved)
3721 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3722
3723 fputs ("\n\t.ENTRY\n", file);
3724
3725 remove_useless_addtr_insns (0);
3726 }
3727
3728 void
3729 hppa_expand_prologue (void)
3730 {
3731 int merge_sp_adjust_with_store = 0;
3732 HOST_WIDE_INT size = get_frame_size ();
3733 HOST_WIDE_INT offset;
3734 int i;
3735 rtx insn, tmpreg;
3736
3737 gr_saved = 0;
3738 fr_saved = 0;
3739 save_fregs = 0;
3740
3741 /* Compute total size for frame pointer, filler, locals and rounding to
3742 the next word boundary. Similar code appears in compute_frame_size
3743 and must be changed in tandem with this code. */
3744 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3745 if (local_fsize || frame_pointer_needed)
3746 local_fsize += STARTING_FRAME_OFFSET;
3747
3748 actual_fsize = compute_frame_size (size, &save_fregs);
3749
3750 /* Compute a few things we will use often. */
3751 tmpreg = gen_rtx_REG (word_mode, 1);
3752
3753 /* Save RP first. The calling conventions manual states RP will
3754 always be stored into the caller's frame at sp - 20 or sp - 16
3755 depending on which ABI is in use. */
3756 if (regs_ever_live[2] || current_function_calls_eh_return)
3757 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3758
3759 /* Allocate the local frame and set up the frame pointer if needed. */
3760 if (actual_fsize != 0)
3761 {
3762 if (frame_pointer_needed)
3763 {
3764 /* Copy the old frame pointer temporarily into %r1. Set up the
3765 new stack pointer, then store away the saved old frame pointer
3766 into the stack at sp and at the same time update the stack
3767 pointer by actual_fsize bytes. Two versions, first
3768 handles small (<8k) frames. The second handles large (>=8k)
3769 frames. */
3770 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3771 if (DO_FRAME_NOTES)
3772 {
3773 /* We need to record the frame pointer save here since the
3774 new frame pointer is set in the following insn. */
3775 RTX_FRAME_RELATED_P (insn) = 1;
3776 REG_NOTES (insn)
3777 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3778 gen_rtx_SET (VOIDmode,
3779 gen_rtx_MEM (word_mode, stack_pointer_rtx),
3780 frame_pointer_rtx),
3781 REG_NOTES (insn));
3782 }
3783
3784 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3785 if (DO_FRAME_NOTES)
3786 RTX_FRAME_RELATED_P (insn) = 1;
3787
3788 if (VAL_14_BITS_P (actual_fsize))
3789 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3790 else
3791 {
3792 /* It is incorrect to store the saved frame pointer at *sp,
3793 then increment sp (writes beyond the current stack boundary).
3794
3795 So instead use stwm to store at *sp and post-increment the
3796 stack pointer as an atomic operation. Then increment sp to
3797 finish allocating the new frame. */
3798 HOST_WIDE_INT adjust1 = 8192 - 64;
3799 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3800
3801 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3802 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3803 adjust2, 1);
3804 }
3805
3806 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3807 we need to store the previous stack pointer (frame pointer)
3808 into the frame marker on targets that use the HP unwind
3809 library. This allows the HP unwind library to be used to
3810 unwind GCC frames. However, we are not fully compatible
3811 with the HP library because our frame layout differs from
3812 that specified in the HP runtime specification.
3813
3814 We don't want a frame note on this instruction as the frame
3815 marker moves during dynamic stack allocation.
3816
3817 This instruction also serves as a blockage to prevent
3818 register spills from being scheduled before the stack
3819 pointer is raised. This is necessary as we store
3820 registers using the frame pointer as a base register,
3821 and the frame pointer is set before sp is raised. */
3822 if (TARGET_HPUX_UNWIND_LIBRARY)
3823 {
3824 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3825 GEN_INT (TARGET_64BIT ? -8 : -4));
3826
3827 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3828 frame_pointer_rtx);
3829 }
3830 else
3831 emit_insn (gen_blockage ());
3832 }
3833 /* no frame pointer needed. */
3834 else
3835 {
3836 /* In some cases we can perform the first callee register save
3837 and allocating the stack frame at the same time. If so, just
3838 make a note of it and defer allocating the frame until saving
3839 the callee registers. */
3840 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3841 merge_sp_adjust_with_store = 1;
3842 /* Can not optimize. Adjust the stack frame by actual_fsize
3843 bytes. */
3844 else
3845 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3846 actual_fsize, 1);
3847 }
3848 }
3849
3850 /* Normal register save.
3851
3852 Do not save the frame pointer in the frame_pointer_needed case. It
3853 was done earlier. */
3854 if (frame_pointer_needed)
3855 {
3856 offset = local_fsize;
3857
3858 /* Saving the EH return data registers in the frame is the simplest
3859 way to get the frame unwind information emitted. We put them
3860 just before the general registers. */
3861 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3862 {
3863 unsigned int i, regno;
3864
3865 for (i = 0; ; ++i)
3866 {
3867 regno = EH_RETURN_DATA_REGNO (i);
3868 if (regno == INVALID_REGNUM)
3869 break;
3870
3871 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3872 offset += UNITS_PER_WORD;
3873 }
3874 }
3875
3876 for (i = 18; i >= 4; i--)
3877 if (regs_ever_live[i] && ! call_used_regs[i])
3878 {
3879 store_reg (i, offset, FRAME_POINTER_REGNUM);
3880 offset += UNITS_PER_WORD;
3881 gr_saved++;
3882 }
3883 /* Account for %r3 which is saved in a special place. */
3884 gr_saved++;
3885 }
3886 /* No frame pointer needed. */
3887 else
3888 {
3889 offset = local_fsize - actual_fsize;
3890
3891 /* Saving the EH return data registers in the frame is the simplest
3892 way to get the frame unwind information emitted. */
3893 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3894 {
3895 unsigned int i, regno;
3896
3897 for (i = 0; ; ++i)
3898 {
3899 regno = EH_RETURN_DATA_REGNO (i);
3900 if (regno == INVALID_REGNUM)
3901 break;
3902
3903 /* If merge_sp_adjust_with_store is nonzero, then we can
3904 optimize the first save. */
3905 if (merge_sp_adjust_with_store)
3906 {
3907 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3908 merge_sp_adjust_with_store = 0;
3909 }
3910 else
3911 store_reg (regno, offset, STACK_POINTER_REGNUM);
3912 offset += UNITS_PER_WORD;
3913 }
3914 }
3915
3916 for (i = 18; i >= 3; i--)
3917 if (regs_ever_live[i] && ! call_used_regs[i])
3918 {
3919 /* If merge_sp_adjust_with_store is nonzero, then we can
3920 optimize the first GR save. */
3921 if (merge_sp_adjust_with_store)
3922 {
3923 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3924 merge_sp_adjust_with_store = 0;
3925 }
3926 else
3927 store_reg (i, offset, STACK_POINTER_REGNUM);
3928 offset += UNITS_PER_WORD;
3929 gr_saved++;
3930 }
3931
3932 /* If we wanted to merge the SP adjustment with a GR save, but we never
3933 did any GR saves, then just emit the adjustment here. */
3934 if (merge_sp_adjust_with_store)
3935 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3936 actual_fsize, 1);
3937 }
3938
3939 /* The hppa calling conventions say that %r19, the pic offset
3940 register, is saved at sp - 32 (in this function's frame)
3941 when generating PIC code. FIXME: What is the correct thing
3942 to do for functions which make no calls and allocate no
3943 frame? Do we need to allocate a frame, or can we just omit
3944 the save? For now we'll just omit the save.
3945
3946 We don't want a note on this insn as the frame marker can
3947 move if there is a dynamic stack allocation. */
3948 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3949 {
3950 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3951
3952 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3953
3954 }
3955
3956 /* Align pointer properly (doubleword boundary). */
3957 offset = (offset + 7) & ~7;
3958
3959 /* Floating point register store. */
3960 if (save_fregs)
3961 {
3962 rtx base;
3963
3964 /* First get the frame or stack pointer to the start of the FP register
3965 save area. */
3966 if (frame_pointer_needed)
3967 {
3968 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3969 base = frame_pointer_rtx;
3970 }
3971 else
3972 {
3973 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3974 base = stack_pointer_rtx;
3975 }
3976
3977 /* Now actually save the FP registers. */
3978 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3979 {
3980 if (regs_ever_live[i]
3981 || (! TARGET_64BIT && regs_ever_live[i + 1]))
3982 {
3983 rtx addr, insn, reg;
3984 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3985 reg = gen_rtx_REG (DFmode, i);
3986 insn = emit_move_insn (addr, reg);
3987 if (DO_FRAME_NOTES)
3988 {
3989 RTX_FRAME_RELATED_P (insn) = 1;
3990 if (TARGET_64BIT)
3991 {
3992 rtx mem = gen_rtx_MEM (DFmode,
3993 plus_constant (base, offset));
3994 REG_NOTES (insn)
3995 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3996 gen_rtx_SET (VOIDmode, mem, reg),
3997 REG_NOTES (insn));
3998 }
3999 else
4000 {
4001 rtx meml = gen_rtx_MEM (SFmode,
4002 plus_constant (base, offset));
4003 rtx memr = gen_rtx_MEM (SFmode,
4004 plus_constant (base, offset + 4));
4005 rtx regl = gen_rtx_REG (SFmode, i);
4006 rtx regr = gen_rtx_REG (SFmode, i + 1);
4007 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4008 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4009 rtvec vec;
4010
4011 RTX_FRAME_RELATED_P (setl) = 1;
4012 RTX_FRAME_RELATED_P (setr) = 1;
4013 vec = gen_rtvec (2, setl, setr);
4014 REG_NOTES (insn)
4015 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4016 gen_rtx_SEQUENCE (VOIDmode, vec),
4017 REG_NOTES (insn));
4018 }
4019 }
4020 offset += GET_MODE_SIZE (DFmode);
4021 fr_saved++;
4022 }
4023 }
4024 }
4025 }
4026
4027 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4028 Handle case where DISP > 8k by using the add_high_const patterns. */
4029
4030 static void
4031 load_reg (int reg, HOST_WIDE_INT disp, int base)
4032 {
4033 rtx dest = gen_rtx_REG (word_mode, reg);
4034 rtx basereg = gen_rtx_REG (Pmode, base);
4035 rtx src;
4036
4037 if (VAL_14_BITS_P (disp))
4038 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
4039 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4040 {
4041 rtx delta = GEN_INT (disp);
4042 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4043
4044 emit_move_insn (tmpreg, delta);
4045 if (TARGET_DISABLE_INDEXING)
4046 {
4047 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4048 src = gen_rtx_MEM (word_mode, tmpreg);
4049 }
4050 else
4051 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4052 }
4053 else
4054 {
4055 rtx delta = GEN_INT (disp);
4056 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4057 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4058
4059 emit_move_insn (tmpreg, high);
4060 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4061 }
4062
4063 emit_move_insn (dest, src);
4064 }
4065
4066 /* Update the total code bytes output to the text section. */
4067
4068 static void
4069 update_total_code_bytes (int nbytes)
4070 {
4071 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4072 && !IN_NAMED_SECTION_P (cfun->decl))
4073 {
4074 if (INSN_ADDRESSES_SET_P ())
4075 {
4076 unsigned long old_total = total_code_bytes;
4077
4078 total_code_bytes += nbytes;
4079
4080 /* Be prepared to handle overflows. */
4081 if (old_total > total_code_bytes)
4082 total_code_bytes = -1;
4083 }
4084 else
4085 total_code_bytes = -1;
4086 }
4087 }
4088
4089 /* This function generates the assembly code for function exit.
4090 Args are as for output_function_prologue ().
4091
4092 The function epilogue should not depend on the current stack
4093 pointer! It should use the frame pointer only. This is mandatory
4094 because of alloca; we also take advantage of it to omit stack
4095 adjustments before returning. */
4096
4097 static void
4098 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4099 {
4100 rtx insn = get_last_insn ();
4101
4102 last_address = 0;
4103
4104 /* hppa_expand_epilogue does the dirty work now. We just need
4105 to output the assembler directives which denote the end
4106 of a function.
4107
4108 To make debuggers happy, emit a nop if the epilogue was completely
4109 eliminated due to a volatile call as the last insn in the
4110 current function. That way the return address (in %r2) will
4111 always point to a valid instruction in the current function. */
4112
4113 /* Get the last real insn. */
4114 if (GET_CODE (insn) == NOTE)
4115 insn = prev_real_insn (insn);
4116
4117 /* If it is a sequence, then look inside. */
4118 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
4119 insn = XVECEXP (PATTERN (insn), 0, 0);
4120
4121 /* If insn is a CALL_INSN, then it must be a call to a volatile
4122 function (otherwise there would be epilogue insns). */
4123 if (insn && GET_CODE (insn) == CALL_INSN)
4124 {
4125 fputs ("\tnop\n", file);
4126 last_address += 4;
4127 }
4128
4129 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4130
4131 if (INSN_ADDRESSES_SET_P ())
4132 {
4133 insn = get_last_nonnote_insn ();
4134 last_address += INSN_ADDRESSES (INSN_UID (insn));
4135 if (INSN_P (insn))
4136 last_address += insn_default_length (insn);
4137 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4138 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4139 }
4140
4141 /* Finally, update the total number of code bytes output so far. */
4142 update_total_code_bytes (last_address);
4143 }
4144
4145 void
4146 hppa_expand_epilogue (void)
4147 {
4148 rtx tmpreg;
4149 HOST_WIDE_INT offset;
4150 HOST_WIDE_INT ret_off = 0;
4151 int i;
4152 int merge_sp_adjust_with_load = 0;
4153
4154 /* We will use this often. */
4155 tmpreg = gen_rtx_REG (word_mode, 1);
4156
4157 /* Try to restore RP early to avoid load/use interlocks when
4158 RP gets used in the return (bv) instruction. This appears to still
4159 be necessary even when we schedule the prologue and epilogue. */
4160 if (regs_ever_live [2] || current_function_calls_eh_return)
4161 {
4162 ret_off = TARGET_64BIT ? -16 : -20;
4163 if (frame_pointer_needed)
4164 {
4165 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
4166 ret_off = 0;
4167 }
4168 else
4169 {
4170 /* No frame pointer, and stack is smaller than 8k. */
4171 if (VAL_14_BITS_P (ret_off - actual_fsize))
4172 {
4173 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4174 ret_off = 0;
4175 }
4176 }
4177 }
4178
4179 /* General register restores. */
4180 if (frame_pointer_needed)
4181 {
4182 offset = local_fsize;
4183
4184 /* If the current function calls __builtin_eh_return, then we need
4185 to restore the saved EH data registers. */
4186 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4187 {
4188 unsigned int i, regno;
4189
4190 for (i = 0; ; ++i)
4191 {
4192 regno = EH_RETURN_DATA_REGNO (i);
4193 if (regno == INVALID_REGNUM)
4194 break;
4195
4196 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4197 offset += UNITS_PER_WORD;
4198 }
4199 }
4200
4201 for (i = 18; i >= 4; i--)
4202 if (regs_ever_live[i] && ! call_used_regs[i])
4203 {
4204 load_reg (i, offset, FRAME_POINTER_REGNUM);
4205 offset += UNITS_PER_WORD;
4206 }
4207 }
4208 else
4209 {
4210 offset = local_fsize - actual_fsize;
4211
4212 /* If the current function calls __builtin_eh_return, then we need
4213 to restore the saved EH data registers. */
4214 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4215 {
4216 unsigned int i, regno;
4217
4218 for (i = 0; ; ++i)
4219 {
4220 regno = EH_RETURN_DATA_REGNO (i);
4221 if (regno == INVALID_REGNUM)
4222 break;
4223
4224 /* Only for the first load.
4225 merge_sp_adjust_with_load holds the register load
4226 with which we will merge the sp adjustment. */
4227 if (merge_sp_adjust_with_load == 0
4228 && local_fsize == 0
4229 && VAL_14_BITS_P (-actual_fsize))
4230 merge_sp_adjust_with_load = regno;
4231 else
4232 load_reg (regno, offset, STACK_POINTER_REGNUM);
4233 offset += UNITS_PER_WORD;
4234 }
4235 }
4236
4237 for (i = 18; i >= 3; i--)
4238 {
4239 if (regs_ever_live[i] && ! call_used_regs[i])
4240 {
4241 /* Only for the first load.
4242 merge_sp_adjust_with_load holds the register load
4243 with which we will merge the sp adjustment. */
4244 if (merge_sp_adjust_with_load == 0
4245 && local_fsize == 0
4246 && VAL_14_BITS_P (-actual_fsize))
4247 merge_sp_adjust_with_load = i;
4248 else
4249 load_reg (i, offset, STACK_POINTER_REGNUM);
4250 offset += UNITS_PER_WORD;
4251 }
4252 }
4253 }
4254
4255 /* Align pointer properly (doubleword boundary). */
4256 offset = (offset + 7) & ~7;
4257
4258 /* FP register restores. */
4259 if (save_fregs)
4260 {
4261 /* Adjust the register to index off of. */
4262 if (frame_pointer_needed)
4263 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4264 else
4265 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4266
4267 /* Actually do the restores now. */
4268 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4269 if (regs_ever_live[i]
4270 || (! TARGET_64BIT && regs_ever_live[i + 1]))
4271 {
4272 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4273 rtx dest = gen_rtx_REG (DFmode, i);
4274 emit_move_insn (dest, src);
4275 }
4276 }
4277
4278 /* Emit a blockage insn here to keep these insns from being moved to
4279 an earlier spot in the epilogue, or into the main instruction stream.
4280
4281 This is necessary as we must not cut the stack back before all the
4282 restores are finished. */
4283 emit_insn (gen_blockage ());
4284
4285 /* Reset stack pointer (and possibly frame pointer). The stack
4286 pointer is initially set to fp + 64 to avoid a race condition. */
4287 if (frame_pointer_needed)
4288 {
4289 rtx delta = GEN_INT (-64);
4290
4291 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4292 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4293 }
4294 /* If we were deferring a callee register restore, do it now. */
4295 else if (merge_sp_adjust_with_load)
4296 {
4297 rtx delta = GEN_INT (-actual_fsize);
4298 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4299
4300 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4301 }
4302 else if (actual_fsize != 0)
4303 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4304 - actual_fsize, 0);
4305
4306 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4307 frame greater than 8k), do so now. */
4308 if (ret_off != 0)
4309 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4310
4311 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4312 {
4313 rtx sa = EH_RETURN_STACKADJ_RTX;
4314
4315 emit_insn (gen_blockage ());
4316 emit_insn (TARGET_64BIT
4317 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4318 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4319 }
4320 }
4321
4322 rtx
4323 hppa_pic_save_rtx (void)
4324 {
4325 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4326 }
4327
4328 void
4329 hppa_profile_hook (int label_no)
4330 {
4331 /* We use SImode for the address of the function in both 32 and
4332 64-bit code to avoid having to provide DImode versions of the
4333 lcla2 and load_offset_label_address insn patterns. */
4334 rtx reg = gen_reg_rtx (SImode);
4335 rtx label_rtx = gen_label_rtx ();
4336 rtx begin_label_rtx, call_insn;
4337 char begin_label_name[16];
4338
4339 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4340 label_no);
4341 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4342
4343 if (TARGET_64BIT)
4344 emit_move_insn (arg_pointer_rtx,
4345 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4346 GEN_INT (64)));
4347
4348 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4349
4350 /* The address of the function is loaded into %r25 with a instruction-
4351 relative sequence that avoids the use of relocations. The sequence
4352 is split so that the load_offset_label_address instruction can
4353 occupy the delay slot of the call to _mcount. */
4354 if (TARGET_PA_20)
4355 emit_insn (gen_lcla2 (reg, label_rtx));
4356 else
4357 emit_insn (gen_lcla1 (reg, label_rtx));
4358
4359 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4360 reg, begin_label_rtx, label_rtx));
4361
4362 #ifndef NO_PROFILE_COUNTERS
4363 {
4364 rtx count_label_rtx, addr, r24;
4365 char count_label_name[16];
4366
4367 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4368 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4369
4370 addr = force_reg (Pmode, count_label_rtx);
4371 r24 = gen_rtx_REG (Pmode, 24);
4372 emit_move_insn (r24, addr);
4373
4374 call_insn =
4375 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4376 gen_rtx_SYMBOL_REF (Pmode,
4377 "_mcount")),
4378 GEN_INT (TARGET_64BIT ? 24 : 12)));
4379
4380 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4381 }
4382 #else
4383
4384 call_insn =
4385 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4386 gen_rtx_SYMBOL_REF (Pmode,
4387 "_mcount")),
4388 GEN_INT (TARGET_64BIT ? 16 : 8)));
4389
4390 #endif
4391
4392 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4393 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4394
4395 /* Indicate the _mcount call cannot throw, nor will it execute a
4396 non-local goto. */
4397 REG_NOTES (call_insn)
4398 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4399 }
4400
4401 /* Fetch the return address for the frame COUNT steps up from
4402 the current frame, after the prologue. FRAMEADDR is the
4403 frame pointer of the COUNT frame.
4404
4405 We want to ignore any export stub remnants here. To handle this,
4406 we examine the code at the return address, and if it is an export
4407 stub, we return a memory rtx for the stub return address stored
4408 at frame-24.
4409
4410 The value returned is used in two different ways:
4411
4412 1. To find a function's caller.
4413
4414 2. To change the return address for a function.
4415
4416 This function handles most instances of case 1; however, it will
4417 fail if there are two levels of stubs to execute on the return
4418 path. The only way I believe that can happen is if the return value
4419 needs a parameter relocation, which never happens for C code.
4420
4421 This function handles most instances of case 2; however, it will
4422 fail if we did not originally have stub code on the return path
4423 but will need stub code on the new return path. This can happen if
4424 the caller & callee are both in the main program, but the new
4425 return location is in a shared library. */
4426
4427 rtx
4428 return_addr_rtx (int count, rtx frameaddr)
4429 {
4430 rtx label;
4431 rtx rp;
4432 rtx saved_rp;
4433 rtx ins;
4434
4435 if (count != 0)
4436 return NULL_RTX;
4437
4438 rp = get_hard_reg_initial_val (Pmode, 2);
4439
4440 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4441 return rp;
4442
4443 saved_rp = gen_reg_rtx (Pmode);
4444 emit_move_insn (saved_rp, rp);
4445
4446 /* Get pointer to the instruction stream. We have to mask out the
4447 privilege level from the two low order bits of the return address
4448 pointer here so that ins will point to the start of the first
4449 instruction that would have been executed if we returned. */
4450 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4451 label = gen_label_rtx ();
4452
4453 /* Check the instruction stream at the normal return address for the
4454 export stub:
4455
4456 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4457 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4458 0x00011820 | stub+16: mtsp r1,sr0
4459 0xe0400002 | stub+20: be,n 0(sr0,rp)
4460
4461 If it is an export stub, than our return address is really in
4462 -24[frameaddr]. */
4463
4464 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4465 NULL_RTX, SImode, 1);
4466 emit_jump_insn (gen_bne (label));
4467
4468 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4469 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4470 emit_jump_insn (gen_bne (label));
4471
4472 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4473 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4474 emit_jump_insn (gen_bne (label));
4475
4476 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4477 GEN_INT (0xe0400002), NE, NULL_RTX, SImode, 1);
4478
4479 /* If there is no export stub then just use the value saved from
4480 the return pointer register. */
4481
4482 emit_jump_insn (gen_bne (label));
4483
4484 /* Here we know that our return address points to an export
4485 stub. We don't want to return the address of the export stub,
4486 but rather the return address of the export stub. That return
4487 address is stored at -24[frameaddr]. */
4488
4489 emit_move_insn (saved_rp,
4490 gen_rtx_MEM (Pmode,
4491 memory_address (Pmode,
4492 plus_constant (frameaddr,
4493 -24))));
4494
4495 emit_label (label);
4496 return saved_rp;
4497 }
4498
4499 /* This is only valid once reload has completed because it depends on
4500 knowing exactly how much (if any) frame there is and...
4501
4502 It's only valid if there is no frame marker to de-allocate and...
4503
4504 It's only valid if %r2 hasn't been saved into the caller's frame
4505 (we're not profiling and %r2 isn't live anywhere). */
4506 int
4507 hppa_can_use_return_insn_p (void)
4508 {
4509 return (reload_completed
4510 && (compute_frame_size (get_frame_size (), 0) ? 0 : 1)
4511 && ! regs_ever_live[2]
4512 && ! frame_pointer_needed);
4513 }
4514
4515 void
4516 emit_bcond_fp (enum rtx_code code, rtx operand0)
4517 {
4518 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4519 gen_rtx_IF_THEN_ELSE (VOIDmode,
4520 gen_rtx_fmt_ee (code,
4521 VOIDmode,
4522 gen_rtx_REG (CCFPmode, 0),
4523 const0_rtx),
4524 gen_rtx_LABEL_REF (VOIDmode, operand0),
4525 pc_rtx)));
4526
4527 }
4528
4529 rtx
4530 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4531 {
4532 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4533 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4534 }
4535
4536 /* Adjust the cost of a scheduling dependency. Return the new cost of
4537 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4538
4539 static int
4540 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4541 {
4542 enum attr_type attr_type;
4543
4544 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4545 true dependencies as they are described with bypasses now. */
4546 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4547 return cost;
4548
4549 if (! recog_memoized (insn))
4550 return 0;
4551
4552 attr_type = get_attr_type (insn);
4553
4554 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
4555 {
4556 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4557 cycles later. */
4558
4559 if (attr_type == TYPE_FPLOAD)
4560 {
4561 rtx pat = PATTERN (insn);
4562 rtx dep_pat = PATTERN (dep_insn);
4563 if (GET_CODE (pat) == PARALLEL)
4564 {
4565 /* This happens for the fldXs,mb patterns. */
4566 pat = XVECEXP (pat, 0, 0);
4567 }
4568 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4569 /* If this happens, we have to extend this to schedule
4570 optimally. Return 0 for now. */
4571 return 0;
4572
4573 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4574 {
4575 if (! recog_memoized (dep_insn))
4576 return 0;
4577 switch (get_attr_type (dep_insn))
4578 {
4579 case TYPE_FPALU:
4580 case TYPE_FPMULSGL:
4581 case TYPE_FPMULDBL:
4582 case TYPE_FPDIVSGL:
4583 case TYPE_FPDIVDBL:
4584 case TYPE_FPSQRTSGL:
4585 case TYPE_FPSQRTDBL:
4586 /* A fpload can't be issued until one cycle before a
4587 preceding arithmetic operation has finished if
4588 the target of the fpload is any of the sources
4589 (or destination) of the arithmetic operation. */
4590 return insn_default_latency (dep_insn) - 1;
4591
4592 default:
4593 return 0;
4594 }
4595 }
4596 }
4597 else if (attr_type == TYPE_FPALU)
4598 {
4599 rtx pat = PATTERN (insn);
4600 rtx dep_pat = PATTERN (dep_insn);
4601 if (GET_CODE (pat) == PARALLEL)
4602 {
4603 /* This happens for the fldXs,mb patterns. */
4604 pat = XVECEXP (pat, 0, 0);
4605 }
4606 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4607 /* If this happens, we have to extend this to schedule
4608 optimally. Return 0 for now. */
4609 return 0;
4610
4611 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4612 {
4613 if (! recog_memoized (dep_insn))
4614 return 0;
4615 switch (get_attr_type (dep_insn))
4616 {
4617 case TYPE_FPDIVSGL:
4618 case TYPE_FPDIVDBL:
4619 case TYPE_FPSQRTSGL:
4620 case TYPE_FPSQRTDBL:
4621 /* An ALU flop can't be issued until two cycles before a
4622 preceding divide or sqrt operation has finished if
4623 the target of the ALU flop is any of the sources
4624 (or destination) of the divide or sqrt operation. */
4625 return insn_default_latency (dep_insn) - 2;
4626
4627 default:
4628 return 0;
4629 }
4630 }
4631 }
4632
4633 /* For other anti dependencies, the cost is 0. */
4634 return 0;
4635 }
4636 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4637 {
4638 /* Output dependency; DEP_INSN writes a register that INSN writes some
4639 cycles later. */
4640 if (attr_type == TYPE_FPLOAD)
4641 {
4642 rtx pat = PATTERN (insn);
4643 rtx dep_pat = PATTERN (dep_insn);
4644 if (GET_CODE (pat) == PARALLEL)
4645 {
4646 /* This happens for the fldXs,mb patterns. */
4647 pat = XVECEXP (pat, 0, 0);
4648 }
4649 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4650 /* If this happens, we have to extend this to schedule
4651 optimally. Return 0 for now. */
4652 return 0;
4653
4654 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4655 {
4656 if (! recog_memoized (dep_insn))
4657 return 0;
4658 switch (get_attr_type (dep_insn))
4659 {
4660 case TYPE_FPALU:
4661 case TYPE_FPMULSGL:
4662 case TYPE_FPMULDBL:
4663 case TYPE_FPDIVSGL:
4664 case TYPE_FPDIVDBL:
4665 case TYPE_FPSQRTSGL:
4666 case TYPE_FPSQRTDBL:
4667 /* A fpload can't be issued until one cycle before a
4668 preceding arithmetic operation has finished if
4669 the target of the fpload is the destination of the
4670 arithmetic operation.
4671
4672 Exception: For PA7100LC, PA7200 and PA7300, the cost
4673 is 3 cycles, unless they bundle together. We also
4674 pay the penalty if the second insn is a fpload. */
4675 return insn_default_latency (dep_insn) - 1;
4676
4677 default:
4678 return 0;
4679 }
4680 }
4681 }
4682 else if (attr_type == TYPE_FPALU)
4683 {
4684 rtx pat = PATTERN (insn);
4685 rtx dep_pat = PATTERN (dep_insn);
4686 if (GET_CODE (pat) == PARALLEL)
4687 {
4688 /* This happens for the fldXs,mb patterns. */
4689 pat = XVECEXP (pat, 0, 0);
4690 }
4691 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4692 /* If this happens, we have to extend this to schedule
4693 optimally. Return 0 for now. */
4694 return 0;
4695
4696 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4697 {
4698 if (! recog_memoized (dep_insn))
4699 return 0;
4700 switch (get_attr_type (dep_insn))
4701 {
4702 case TYPE_FPDIVSGL:
4703 case TYPE_FPDIVDBL:
4704 case TYPE_FPSQRTSGL:
4705 case TYPE_FPSQRTDBL:
4706 /* An ALU flop can't be issued until two cycles before a
4707 preceding divide or sqrt operation has finished if
4708 the target of the ALU flop is also the target of
4709 the divide or sqrt operation. */
4710 return insn_default_latency (dep_insn) - 2;
4711
4712 default:
4713 return 0;
4714 }
4715 }
4716 }
4717
4718 /* For other output dependencies, the cost is 0. */
4719 return 0;
4720 }
4721 else
4722 abort ();
4723 }
4724
4725 /* Adjust scheduling priorities. We use this to try and keep addil
4726 and the next use of %r1 close together. */
4727 static int
4728 pa_adjust_priority (rtx insn, int priority)
4729 {
4730 rtx set = single_set (insn);
4731 rtx src, dest;
4732 if (set)
4733 {
4734 src = SET_SRC (set);
4735 dest = SET_DEST (set);
4736 if (GET_CODE (src) == LO_SUM
4737 && symbolic_operand (XEXP (src, 1), VOIDmode)
4738 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4739 priority >>= 3;
4740
4741 else if (GET_CODE (src) == MEM
4742 && GET_CODE (XEXP (src, 0)) == LO_SUM
4743 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4744 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4745 priority >>= 1;
4746
4747 else if (GET_CODE (dest) == MEM
4748 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4749 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4750 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4751 priority >>= 3;
4752 }
4753 return priority;
4754 }
4755
4756 /* The 700 can only issue a single insn at a time.
4757 The 7XXX processors can issue two insns at a time.
4758 The 8000 can issue 4 insns at a time. */
4759 static int
4760 pa_issue_rate (void)
4761 {
4762 switch (pa_cpu)
4763 {
4764 case PROCESSOR_700: return 1;
4765 case PROCESSOR_7100: return 2;
4766 case PROCESSOR_7100LC: return 2;
4767 case PROCESSOR_7200: return 2;
4768 case PROCESSOR_7300: return 2;
4769 case PROCESSOR_8000: return 4;
4770
4771 default:
4772 abort ();
4773 }
4774 }
4775
4776
4777
4778 /* Return any length adjustment needed by INSN which already has its length
4779 computed as LENGTH. Return zero if no adjustment is necessary.
4780
4781 For the PA: function calls, millicode calls, and backwards short
4782 conditional branches with unfilled delay slots need an adjustment by +1
4783 (to account for the NOP which will be inserted into the instruction stream).
4784
4785 Also compute the length of an inline block move here as it is too
4786 complicated to express as a length attribute in pa.md. */
4787 int
4788 pa_adjust_insn_length (rtx insn, int length)
4789 {
4790 rtx pat = PATTERN (insn);
4791
4792 /* Jumps inside switch tables which have unfilled delay slots need
4793 adjustment. */
4794 if (GET_CODE (insn) == JUMP_INSN
4795 && GET_CODE (pat) == PARALLEL
4796 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4797 return 4;
4798 /* Millicode insn with an unfilled delay slot. */
4799 else if (GET_CODE (insn) == INSN
4800 && GET_CODE (pat) != SEQUENCE
4801 && GET_CODE (pat) != USE
4802 && GET_CODE (pat) != CLOBBER
4803 && get_attr_type (insn) == TYPE_MILLI)
4804 return 4;
4805 /* Block move pattern. */
4806 else if (GET_CODE (insn) == INSN
4807 && GET_CODE (pat) == PARALLEL
4808 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4809 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4810 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4811 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4812 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4813 return compute_movmem_length (insn) - 4;
4814 /* Block clear pattern. */
4815 else if (GET_CODE (insn) == INSN
4816 && GET_CODE (pat) == PARALLEL
4817 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4818 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4819 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4820 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4821 return compute_clrmem_length (insn) - 4;
4822 /* Conditional branch with an unfilled delay slot. */
4823 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4824 {
4825 /* Adjust a short backwards conditional with an unfilled delay slot. */
4826 if (GET_CODE (pat) == SET
4827 && length == 4
4828 && ! forward_branch_p (insn))
4829 return 4;
4830 else if (GET_CODE (pat) == PARALLEL
4831 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4832 && length == 4)
4833 return 4;
4834 /* Adjust dbra insn with short backwards conditional branch with
4835 unfilled delay slot -- only for case where counter is in a
4836 general register register. */
4837 else if (GET_CODE (pat) == PARALLEL
4838 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4839 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4840 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4841 && length == 4
4842 && ! forward_branch_p (insn))
4843 return 4;
4844 else
4845 return 0;
4846 }
4847 return 0;
4848 }
4849
4850 /* Print operand X (an rtx) in assembler syntax to file FILE.
4851 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4852 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4853
4854 void
4855 print_operand (FILE *file, rtx x, int code)
4856 {
4857 switch (code)
4858 {
4859 case '#':
4860 /* Output a 'nop' if there's nothing for the delay slot. */
4861 if (dbr_sequence_length () == 0)
4862 fputs ("\n\tnop", file);
4863 return;
4864 case '*':
4865 /* Output a nullification completer if there's nothing for the */
4866 /* delay slot or nullification is requested. */
4867 if (dbr_sequence_length () == 0 ||
4868 (final_sequence &&
4869 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4870 fputs (",n", file);
4871 return;
4872 case 'R':
4873 /* Print out the second register name of a register pair.
4874 I.e., R (6) => 7. */
4875 fputs (reg_names[REGNO (x) + 1], file);
4876 return;
4877 case 'r':
4878 /* A register or zero. */
4879 if (x == const0_rtx
4880 || (x == CONST0_RTX (DFmode))
4881 || (x == CONST0_RTX (SFmode)))
4882 {
4883 fputs ("%r0", file);
4884 return;
4885 }
4886 else
4887 break;
4888 case 'f':
4889 /* A register or zero (floating point). */
4890 if (x == const0_rtx
4891 || (x == CONST0_RTX (DFmode))
4892 || (x == CONST0_RTX (SFmode)))
4893 {
4894 fputs ("%fr0", file);
4895 return;
4896 }
4897 else
4898 break;
4899 case 'A':
4900 {
4901 rtx xoperands[2];
4902
4903 xoperands[0] = XEXP (XEXP (x, 0), 0);
4904 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4905 output_global_address (file, xoperands[1], 0);
4906 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4907 return;
4908 }
4909
4910 case 'C': /* Plain (C)ondition */
4911 case 'X':
4912 switch (GET_CODE (x))
4913 {
4914 case EQ:
4915 fputs ("=", file); break;
4916 case NE:
4917 fputs ("<>", file); break;
4918 case GT:
4919 fputs (">", file); break;
4920 case GE:
4921 fputs (">=", file); break;
4922 case GEU:
4923 fputs (">>=", file); break;
4924 case GTU:
4925 fputs (">>", file); break;
4926 case LT:
4927 fputs ("<", file); break;
4928 case LE:
4929 fputs ("<=", file); break;
4930 case LEU:
4931 fputs ("<<=", file); break;
4932 case LTU:
4933 fputs ("<<", file); break;
4934 default:
4935 abort ();
4936 }
4937 return;
4938 case 'N': /* Condition, (N)egated */
4939 switch (GET_CODE (x))
4940 {
4941 case EQ:
4942 fputs ("<>", file); break;
4943 case NE:
4944 fputs ("=", file); break;
4945 case GT:
4946 fputs ("<=", file); break;
4947 case GE:
4948 fputs ("<", file); break;
4949 case GEU:
4950 fputs ("<<", file); break;
4951 case GTU:
4952 fputs ("<<=", file); break;
4953 case LT:
4954 fputs (">=", file); break;
4955 case LE:
4956 fputs (">", file); break;
4957 case LEU:
4958 fputs (">>", file); break;
4959 case LTU:
4960 fputs (">>=", file); break;
4961 default:
4962 abort ();
4963 }
4964 return;
4965 /* For floating point comparisons. Note that the output
4966 predicates are the complement of the desired mode. */
4967 case 'Y':
4968 switch (GET_CODE (x))
4969 {
4970 case EQ:
4971 fputs ("!=", file); break;
4972 case NE:
4973 fputs ("=", file); break;
4974 case GT:
4975 fputs ("!>", file); break;
4976 case GE:
4977 fputs ("!>=", file); break;
4978 case LT:
4979 fputs ("!<", file); break;
4980 case LE:
4981 fputs ("!<=", file); break;
4982 case LTGT:
4983 fputs ("!<>", file); break;
4984 case UNLE:
4985 fputs (">", file); break;
4986 case UNLT:
4987 fputs (">=", file); break;
4988 case UNGE:
4989 fputs ("<", file); break;
4990 case UNGT:
4991 fputs ("<=", file); break;
4992 case UNEQ:
4993 fputs ("<>", file); break;
4994 case UNORDERED:
4995 fputs ("<=>", file); break;
4996 case ORDERED:
4997 fputs ("!<=>", file); break;
4998 default:
4999 abort ();
5000 }
5001 return;
5002 case 'S': /* Condition, operands are (S)wapped. */
5003 switch (GET_CODE (x))
5004 {
5005 case EQ:
5006 fputs ("=", file); break;
5007 case NE:
5008 fputs ("<>", file); break;
5009 case GT:
5010 fputs ("<", file); break;
5011 case GE:
5012 fputs ("<=", file); break;
5013 case GEU:
5014 fputs ("<<=", file); break;
5015 case GTU:
5016 fputs ("<<", file); break;
5017 case LT:
5018 fputs (">", file); break;
5019 case LE:
5020 fputs (">=", file); break;
5021 case LEU:
5022 fputs (">>=", file); break;
5023 case LTU:
5024 fputs (">>", file); break;
5025 default:
5026 abort ();
5027 }
5028 return;
5029 case 'B': /* Condition, (B)oth swapped and negate. */
5030 switch (GET_CODE (x))
5031 {
5032 case EQ:
5033 fputs ("<>", file); break;
5034 case NE:
5035 fputs ("=", file); break;
5036 case GT:
5037 fputs (">=", file); break;
5038 case GE:
5039 fputs (">", file); break;
5040 case GEU:
5041 fputs (">>", file); break;
5042 case GTU:
5043 fputs (">>=", file); break;
5044 case LT:
5045 fputs ("<=", file); break;
5046 case LE:
5047 fputs ("<", file); break;
5048 case LEU:
5049 fputs ("<<", file); break;
5050 case LTU:
5051 fputs ("<<=", file); break;
5052 default:
5053 abort ();
5054 }
5055 return;
5056 case 'k':
5057 if (GET_CODE (x) == CONST_INT)
5058 {
5059 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5060 return;
5061 }
5062 abort ();
5063 case 'Q':
5064 if (GET_CODE (x) == CONST_INT)
5065 {
5066 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5067 return;
5068 }
5069 abort ();
5070 case 'L':
5071 if (GET_CODE (x) == CONST_INT)
5072 {
5073 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5074 return;
5075 }
5076 abort ();
5077 case 'O':
5078 if (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0)
5079 {
5080 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5081 return;
5082 }
5083 abort ();
5084 case 'p':
5085 if (GET_CODE (x) == CONST_INT)
5086 {
5087 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5088 return;
5089 }
5090 abort ();
5091 case 'P':
5092 if (GET_CODE (x) == CONST_INT)
5093 {
5094 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5095 return;
5096 }
5097 abort ();
5098 case 'I':
5099 if (GET_CODE (x) == CONST_INT)
5100 fputs ("i", file);
5101 return;
5102 case 'M':
5103 case 'F':
5104 switch (GET_CODE (XEXP (x, 0)))
5105 {
5106 case PRE_DEC:
5107 case PRE_INC:
5108 if (ASSEMBLER_DIALECT == 0)
5109 fputs ("s,mb", file);
5110 else
5111 fputs (",mb", file);
5112 break;
5113 case POST_DEC:
5114 case POST_INC:
5115 if (ASSEMBLER_DIALECT == 0)
5116 fputs ("s,ma", file);
5117 else
5118 fputs (",ma", file);
5119 break;
5120 case PLUS:
5121 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5122 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5123 {
5124 if (ASSEMBLER_DIALECT == 0)
5125 fputs ("x", file);
5126 }
5127 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5128 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5129 {
5130 if (ASSEMBLER_DIALECT == 0)
5131 fputs ("x,s", file);
5132 else
5133 fputs (",s", file);
5134 }
5135 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5136 fputs ("s", file);
5137 break;
5138 default:
5139 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5140 fputs ("s", file);
5141 break;
5142 }
5143 return;
5144 case 'G':
5145 output_global_address (file, x, 0);
5146 return;
5147 case 'H':
5148 output_global_address (file, x, 1);
5149 return;
5150 case 0: /* Don't do anything special */
5151 break;
5152 case 'Z':
5153 {
5154 unsigned op[3];
5155 compute_zdepwi_operands (INTVAL (x), op);
5156 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5157 return;
5158 }
5159 case 'z':
5160 {
5161 unsigned op[3];
5162 compute_zdepdi_operands (INTVAL (x), op);
5163 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5164 return;
5165 }
5166 case 'c':
5167 /* We can get here from a .vtable_inherit due to our
5168 CONSTANT_ADDRESS_P rejecting perfectly good constant
5169 addresses. */
5170 break;
5171 default:
5172 abort ();
5173 }
5174 if (GET_CODE (x) == REG)
5175 {
5176 fputs (reg_names [REGNO (x)], file);
5177 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5178 {
5179 fputs ("R", file);
5180 return;
5181 }
5182 if (FP_REG_P (x)
5183 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5184 && (REGNO (x) & 1) == 0)
5185 fputs ("L", file);
5186 }
5187 else if (GET_CODE (x) == MEM)
5188 {
5189 int size = GET_MODE_SIZE (GET_MODE (x));
5190 rtx base = NULL_RTX;
5191 switch (GET_CODE (XEXP (x, 0)))
5192 {
5193 case PRE_DEC:
5194 case POST_DEC:
5195 base = XEXP (XEXP (x, 0), 0);
5196 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5197 break;
5198 case PRE_INC:
5199 case POST_INC:
5200 base = XEXP (XEXP (x, 0), 0);
5201 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5202 break;
5203 case PLUS:
5204 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5205 fprintf (file, "%s(%s)",
5206 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5207 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5208 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5209 fprintf (file, "%s(%s)",
5210 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5211 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5212 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5213 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5214 {
5215 /* Because the REG_POINTER flag can get lost during reload,
5216 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5217 index and base registers in the combined move patterns. */
5218 rtx base = XEXP (XEXP (x, 0), 1);
5219 rtx index = XEXP (XEXP (x, 0), 0);
5220
5221 fprintf (file, "%s(%s)",
5222 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5223 }
5224 else
5225 output_address (XEXP (x, 0));
5226 break;
5227 default:
5228 output_address (XEXP (x, 0));
5229 break;
5230 }
5231 }
5232 else
5233 output_addr_const (file, x);
5234 }
5235
5236 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5237
5238 void
5239 output_global_address (FILE *file, rtx x, int round_constant)
5240 {
5241
5242 /* Imagine (high (const (plus ...))). */
5243 if (GET_CODE (x) == HIGH)
5244 x = XEXP (x, 0);
5245
5246 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5247 assemble_name (file, XSTR (x, 0));
5248 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5249 {
5250 assemble_name (file, XSTR (x, 0));
5251 fputs ("-$global$", file);
5252 }
5253 else if (GET_CODE (x) == CONST)
5254 {
5255 const char *sep = "";
5256 int offset = 0; /* assembler wants -$global$ at end */
5257 rtx base = NULL_RTX;
5258
5259 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)
5260 {
5261 base = XEXP (XEXP (x, 0), 0);
5262 output_addr_const (file, base);
5263 }
5264 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == CONST_INT)
5265 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5266 else abort ();
5267
5268 if (GET_CODE (XEXP (XEXP (x, 0), 1)) == SYMBOL_REF)
5269 {
5270 base = XEXP (XEXP (x, 0), 1);
5271 output_addr_const (file, base);
5272 }
5273 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
5274 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5275 else abort ();
5276
5277 /* How bogus. The compiler is apparently responsible for
5278 rounding the constant if it uses an LR field selector.
5279
5280 The linker and/or assembler seem a better place since
5281 they have to do this kind of thing already.
5282
5283 If we fail to do this, HP's optimizing linker may eliminate
5284 an addil, but not update the ldw/stw/ldo instruction that
5285 uses the result of the addil. */
5286 if (round_constant)
5287 offset = ((offset + 0x1000) & ~0x1fff);
5288
5289 if (GET_CODE (XEXP (x, 0)) == PLUS)
5290 {
5291 if (offset < 0)
5292 {
5293 offset = -offset;
5294 sep = "-";
5295 }
5296 else
5297 sep = "+";
5298 }
5299 else if (GET_CODE (XEXP (x, 0)) == MINUS
5300 && (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
5301 sep = "-";
5302 else abort ();
5303
5304 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5305 fputs ("-$global$", file);
5306 if (offset)
5307 fprintf (file, "%s%d", sep, offset);
5308 }
5309 else
5310 output_addr_const (file, x);
5311 }
5312
5313 /* Output boilerplate text to appear at the beginning of the file.
5314 There are several possible versions. */
5315 #define aputs(x) fputs(x, asm_out_file)
5316 static inline void
5317 pa_file_start_level (void)
5318 {
5319 if (TARGET_64BIT)
5320 aputs ("\t.LEVEL 2.0w\n");
5321 else if (TARGET_PA_20)
5322 aputs ("\t.LEVEL 2.0\n");
5323 else if (TARGET_PA_11)
5324 aputs ("\t.LEVEL 1.1\n");
5325 else
5326 aputs ("\t.LEVEL 1.0\n");
5327 }
5328
5329 static inline void
5330 pa_file_start_space (int sortspace)
5331 {
5332 aputs ("\t.SPACE $PRIVATE$");
5333 if (sortspace)
5334 aputs (",SORT=16");
5335 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5336 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5337 "\n\t.SPACE $TEXT$");
5338 if (sortspace)
5339 aputs (",SORT=8");
5340 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5341 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5342 }
5343
5344 static inline void
5345 pa_file_start_file (int want_version)
5346 {
5347 if (write_symbols != NO_DEBUG)
5348 {
5349 output_file_directive (asm_out_file, main_input_filename);
5350 if (want_version)
5351 aputs ("\t.version\t\"01.01\"\n");
5352 }
5353 }
5354
5355 static inline void
5356 pa_file_start_mcount (const char *aswhat)
5357 {
5358 if (profile_flag)
5359 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5360 }
5361
5362 static void
5363 pa_elf_file_start (void)
5364 {
5365 pa_file_start_level ();
5366 pa_file_start_mcount ("ENTRY");
5367 pa_file_start_file (0);
5368 }
5369
5370 static void
5371 pa_som_file_start (void)
5372 {
5373 pa_file_start_level ();
5374 pa_file_start_space (0);
5375 aputs ("\t.IMPORT $global$,DATA\n"
5376 "\t.IMPORT $$dyncall,MILLICODE\n");
5377 pa_file_start_mcount ("CODE");
5378 pa_file_start_file (0);
5379 }
5380
5381 static void
5382 pa_linux_file_start (void)
5383 {
5384 pa_file_start_file (1);
5385 pa_file_start_level ();
5386 pa_file_start_mcount ("CODE");
5387 }
5388
5389 static void
5390 pa_hpux64_gas_file_start (void)
5391 {
5392 pa_file_start_level ();
5393 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5394 if (profile_flag)
5395 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5396 #endif
5397 pa_file_start_file (1);
5398 }
5399
5400 static void
5401 pa_hpux64_hpas_file_start (void)
5402 {
5403 pa_file_start_level ();
5404 pa_file_start_space (1);
5405 pa_file_start_mcount ("CODE");
5406 pa_file_start_file (0);
5407 }
5408 #undef aputs
5409
5410 static struct deferred_plabel *
5411 get_plabel (const char *fname)
5412 {
5413 size_t i;
5414
5415 /* See if we have already put this function on the list of deferred
5416 plabels. This list is generally small, so a liner search is not
5417 too ugly. If it proves too slow replace it with something faster. */
5418 for (i = 0; i < n_deferred_plabels; i++)
5419 if (strcmp (fname, deferred_plabels[i].name) == 0)
5420 break;
5421
5422 /* If the deferred plabel list is empty, or this entry was not found
5423 on the list, create a new entry on the list. */
5424 if (deferred_plabels == NULL || i == n_deferred_plabels)
5425 {
5426 const char *real_name;
5427
5428 if (deferred_plabels == 0)
5429 deferred_plabels = (struct deferred_plabel *)
5430 ggc_alloc (sizeof (struct deferred_plabel));
5431 else
5432 deferred_plabels = (struct deferred_plabel *)
5433 ggc_realloc (deferred_plabels,
5434 ((n_deferred_plabels + 1)
5435 * sizeof (struct deferred_plabel)));
5436
5437 i = n_deferred_plabels++;
5438 deferred_plabels[i].internal_label = gen_label_rtx ();
5439 deferred_plabels[i].name = ggc_strdup (fname);
5440
5441 /* Gross. We have just implicitly taken the address of this function,
5442 mark it as such. */
5443 real_name = (*targetm.strip_name_encoding) (fname);
5444 TREE_SYMBOL_REFERENCED (get_identifier (real_name)) = 1;
5445 }
5446
5447 return &deferred_plabels[i];
5448 }
5449
5450 static void
5451 output_deferred_plabels (void)
5452 {
5453 size_t i;
5454 /* If we have deferred plabels, then we need to switch into the data
5455 section and align it to a 4 byte boundary before we output the
5456 deferred plabels. */
5457 if (n_deferred_plabels)
5458 {
5459 data_section ();
5460 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5461 }
5462
5463 /* Now output the deferred plabels. */
5464 for (i = 0; i < n_deferred_plabels; i++)
5465 {
5466 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5467 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5468 assemble_integer (gen_rtx_SYMBOL_REF (Pmode, deferred_plabels[i].name),
5469 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5470 }
5471 }
5472
5473 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5474 /* Initialize optabs to point to HPUX long double emulation routines. */
5475 static void
5476 pa_hpux_init_libfuncs (void)
5477 {
5478 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5479 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5480 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5481 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5482 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5483 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5484 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5485 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5486 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5487
5488 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5489 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5490 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5491 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5492 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5493 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5494 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5495
5496 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5497 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5498 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5499 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5500
5501 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5502 ? "__U_Qfcnvfxt_quad_to_sgl"
5503 : "_U_Qfcnvfxt_quad_to_sgl");
5504 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5505 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5506 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5507
5508 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5509 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5510 }
5511 #endif
5512
5513 /* HP's millicode routines mean something special to the assembler.
5514 Keep track of which ones we have used. */
5515
5516 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5517 static void import_milli (enum millicodes);
5518 static char imported[(int) end1000];
5519 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5520 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5521 #define MILLI_START 10
5522
5523 static void
5524 import_milli (enum millicodes code)
5525 {
5526 char str[sizeof (import_string)];
5527
5528 if (!imported[(int) code])
5529 {
5530 imported[(int) code] = 1;
5531 strcpy (str, import_string);
5532 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5533 output_asm_insn (str, 0);
5534 }
5535 }
5536
5537 /* The register constraints have put the operands and return value in
5538 the proper registers. */
5539
5540 const char *
5541 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5542 {
5543 import_milli (mulI);
5544 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5545 }
5546
5547 /* Emit the rtl for doing a division by a constant. */
5548
5549 /* Do magic division millicodes exist for this value? */
5550 static const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0,
5551 1, 1};
5552
5553 /* We'll use an array to keep track of the magic millicodes and
5554 whether or not we've used them already. [n][0] is signed, [n][1] is
5555 unsigned. */
5556
5557 static int div_milli[16][2];
5558
5559 int
5560 div_operand (rtx op, enum machine_mode mode)
5561 {
5562 return (mode == SImode
5563 && ((GET_CODE (op) == REG && REGNO (op) == 25)
5564 || (GET_CODE (op) == CONST_INT && INTVAL (op) > 0
5565 && INTVAL (op) < 16 && magic_milli[INTVAL (op)])));
5566 }
5567
5568 int
5569 emit_hpdiv_const (rtx *operands, int unsignedp)
5570 {
5571 if (GET_CODE (operands[2]) == CONST_INT
5572 && INTVAL (operands[2]) > 0
5573 && INTVAL (operands[2]) < 16
5574 && magic_milli[INTVAL (operands[2])])
5575 {
5576 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5577
5578 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5579 emit
5580 (gen_rtx_PARALLEL
5581 (VOIDmode,
5582 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5583 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5584 SImode,
5585 gen_rtx_REG (SImode, 26),
5586 operands[2])),
5587 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5588 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5589 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5590 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5591 gen_rtx_CLOBBER (VOIDmode, ret))));
5592 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5593 return 1;
5594 }
5595 return 0;
5596 }
5597
5598 const char *
5599 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5600 {
5601 int divisor;
5602
5603 /* If the divisor is a constant, try to use one of the special
5604 opcodes .*/
5605 if (GET_CODE (operands[0]) == CONST_INT)
5606 {
5607 static char buf[100];
5608 divisor = INTVAL (operands[0]);
5609 if (!div_milli[divisor][unsignedp])
5610 {
5611 div_milli[divisor][unsignedp] = 1;
5612 if (unsignedp)
5613 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5614 else
5615 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5616 }
5617 if (unsignedp)
5618 {
5619 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5620 INTVAL (operands[0]));
5621 return output_millicode_call (insn,
5622 gen_rtx_SYMBOL_REF (SImode, buf));
5623 }
5624 else
5625 {
5626 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5627 INTVAL (operands[0]));
5628 return output_millicode_call (insn,
5629 gen_rtx_SYMBOL_REF (SImode, buf));
5630 }
5631 }
5632 /* Divisor isn't a special constant. */
5633 else
5634 {
5635 if (unsignedp)
5636 {
5637 import_milli (divU);
5638 return output_millicode_call (insn,
5639 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5640 }
5641 else
5642 {
5643 import_milli (divI);
5644 return output_millicode_call (insn,
5645 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5646 }
5647 }
5648 }
5649
5650 /* Output a $$rem millicode to do mod. */
5651
5652 const char *
5653 output_mod_insn (int unsignedp, rtx insn)
5654 {
5655 if (unsignedp)
5656 {
5657 import_milli (remU);
5658 return output_millicode_call (insn,
5659 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5660 }
5661 else
5662 {
5663 import_milli (remI);
5664 return output_millicode_call (insn,
5665 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5666 }
5667 }
5668
5669 void
5670 output_arg_descriptor (rtx call_insn)
5671 {
5672 const char *arg_regs[4];
5673 enum machine_mode arg_mode;
5674 rtx link;
5675 int i, output_flag = 0;
5676 int regno;
5677
5678 /* We neither need nor want argument location descriptors for the
5679 64bit runtime environment or the ELF32 environment. */
5680 if (TARGET_64BIT || TARGET_ELF32)
5681 return;
5682
5683 for (i = 0; i < 4; i++)
5684 arg_regs[i] = 0;
5685
5686 /* Specify explicitly that no argument relocations should take place
5687 if using the portable runtime calling conventions. */
5688 if (TARGET_PORTABLE_RUNTIME)
5689 {
5690 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5691 asm_out_file);
5692 return;
5693 }
5694
5695 if (GET_CODE (call_insn) != CALL_INSN)
5696 abort ();
5697 for (link = CALL_INSN_FUNCTION_USAGE (call_insn); link; link = XEXP (link, 1))
5698 {
5699 rtx use = XEXP (link, 0);
5700
5701 if (! (GET_CODE (use) == USE
5702 && GET_CODE (XEXP (use, 0)) == REG
5703 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5704 continue;
5705
5706 arg_mode = GET_MODE (XEXP (use, 0));
5707 regno = REGNO (XEXP (use, 0));
5708 if (regno >= 23 && regno <= 26)
5709 {
5710 arg_regs[26 - regno] = "GR";
5711 if (arg_mode == DImode)
5712 arg_regs[25 - regno] = "GR";
5713 }
5714 else if (regno >= 32 && regno <= 39)
5715 {
5716 if (arg_mode == SFmode)
5717 arg_regs[(regno - 32) / 2] = "FR";
5718 else
5719 {
5720 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5721 arg_regs[(regno - 34) / 2] = "FR";
5722 arg_regs[(regno - 34) / 2 + 1] = "FU";
5723 #else
5724 arg_regs[(regno - 34) / 2] = "FU";
5725 arg_regs[(regno - 34) / 2 + 1] = "FR";
5726 #endif
5727 }
5728 }
5729 }
5730 fputs ("\t.CALL ", asm_out_file);
5731 for (i = 0; i < 4; i++)
5732 {
5733 if (arg_regs[i])
5734 {
5735 if (output_flag++)
5736 fputc (',', asm_out_file);
5737 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5738 }
5739 }
5740 fputc ('\n', asm_out_file);
5741 }
5742 \f
5743 /* Return the class of any secondary reload register that is needed to
5744 move IN into a register in class CLASS using mode MODE.
5745
5746 Profiling has showed this routine and its descendants account for
5747 a significant amount of compile time (~7%). So it has been
5748 optimized to reduce redundant computations and eliminate useless
5749 function calls.
5750
5751 It might be worthwhile to try and make this a leaf function too. */
5752
5753 enum reg_class
5754 secondary_reload_class (enum reg_class class, enum machine_mode mode, rtx in)
5755 {
5756 int regno, is_symbolic;
5757
5758 /* Trying to load a constant into a FP register during PIC code
5759 generation will require %r1 as a scratch register. */
5760 if (flag_pic
5761 && GET_MODE_CLASS (mode) == MODE_INT
5762 && FP_REG_CLASS_P (class)
5763 && (GET_CODE (in) == CONST_INT || GET_CODE (in) == CONST_DOUBLE))
5764 return R1_REGS;
5765
5766 /* Profiling showed the PA port spends about 1.3% of its compilation
5767 time in true_regnum from calls inside secondary_reload_class. */
5768
5769 if (GET_CODE (in) == REG)
5770 {
5771 regno = REGNO (in);
5772 if (regno >= FIRST_PSEUDO_REGISTER)
5773 regno = true_regnum (in);
5774 }
5775 else if (GET_CODE (in) == SUBREG)
5776 regno = true_regnum (in);
5777 else
5778 regno = -1;
5779
5780 /* If we have something like (mem (mem (...)), we can safely assume the
5781 inner MEM will end up in a general register after reloading, so there's
5782 no need for a secondary reload. */
5783 if (GET_CODE (in) == MEM
5784 && GET_CODE (XEXP (in, 0)) == MEM)
5785 return NO_REGS;
5786
5787 /* Handle out of range displacement for integer mode loads/stores of
5788 FP registers. */
5789 if (((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5790 && GET_MODE_CLASS (mode) == MODE_INT
5791 && FP_REG_CLASS_P (class))
5792 || (class == SHIFT_REGS && (regno <= 0 || regno >= 32)))
5793 return GENERAL_REGS;
5794
5795 /* A SAR<->FP register copy requires a secondary register (GPR) as
5796 well as secondary memory. */
5797 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5798 && ((REGNO_REG_CLASS (regno) == SHIFT_REGS && FP_REG_CLASS_P (class))
5799 || (class == SHIFT_REGS && FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))))
5800 return GENERAL_REGS;
5801
5802 if (GET_CODE (in) == HIGH)
5803 in = XEXP (in, 0);
5804
5805 /* Profiling has showed GCC spends about 2.6% of its compilation
5806 time in symbolic_operand from calls inside secondary_reload_class.
5807
5808 We use an inline copy and only compute its return value once to avoid
5809 useless work. */
5810 switch (GET_CODE (in))
5811 {
5812 rtx tmp;
5813
5814 case SYMBOL_REF:
5815 case LABEL_REF:
5816 is_symbolic = 1;
5817 break;
5818 case CONST:
5819 tmp = XEXP (in, 0);
5820 is_symbolic = ((GET_CODE (XEXP (tmp, 0)) == SYMBOL_REF
5821 || GET_CODE (XEXP (tmp, 0)) == LABEL_REF)
5822 && GET_CODE (XEXP (tmp, 1)) == CONST_INT);
5823 break;
5824
5825 default:
5826 is_symbolic = 0;
5827 break;
5828 }
5829
5830 if (!flag_pic
5831 && is_symbolic
5832 && read_only_operand (in, VOIDmode))
5833 return NO_REGS;
5834
5835 if (class != R1_REGS && is_symbolic)
5836 return R1_REGS;
5837
5838 return NO_REGS;
5839 }
5840
5841 enum direction
5842 function_arg_padding (enum machine_mode mode, tree type)
5843 {
5844 if (mode == BLKmode
5845 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5846 {
5847 /* Return none if justification is not required. */
5848 if (type
5849 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5850 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5851 return none;
5852
5853 /* The directions set here are ignored when a BLKmode argument larger
5854 than a word is placed in a register. Different code is used for
5855 the stack and registers. This makes it difficult to have a
5856 consistent data representation for both the stack and registers.
5857 For both runtimes, the justification and padding for arguments on
5858 the stack and in registers should be identical. */
5859 if (TARGET_64BIT)
5860 /* The 64-bit runtime specifies left justification for aggregates. */
5861 return upward;
5862 else
5863 /* The 32-bit runtime architecture specifies right justification.
5864 When the argument is passed on the stack, the argument is padded
5865 with garbage on the left. The HP compiler pads with zeros. */
5866 return downward;
5867 }
5868
5869 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5870 return downward;
5871 else
5872 return none;
5873 }
5874
5875 \f
5876 /* Do what is necessary for `va_start'. We look at the current function
5877 to determine if stdargs or varargs is used and fill in an initial
5878 va_list. A pointer to this constructor is returned. */
5879
5880 static rtx
5881 hppa_builtin_saveregs (void)
5882 {
5883 rtx offset, dest;
5884 tree fntype = TREE_TYPE (current_function_decl);
5885 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5886 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5887 != void_type_node)))
5888 ? UNITS_PER_WORD : 0);
5889
5890 if (argadj)
5891 offset = plus_constant (current_function_arg_offset_rtx, argadj);
5892 else
5893 offset = current_function_arg_offset_rtx;
5894
5895 if (TARGET_64BIT)
5896 {
5897 int i, off;
5898
5899 /* Adjust for varargs/stdarg differences. */
5900 if (argadj)
5901 offset = plus_constant (current_function_arg_offset_rtx, -argadj);
5902 else
5903 offset = current_function_arg_offset_rtx;
5904
5905 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5906 from the incoming arg pointer and growing to larger addresses. */
5907 for (i = 26, off = -64; i >= 19; i--, off += 8)
5908 emit_move_insn (gen_rtx_MEM (word_mode,
5909 plus_constant (arg_pointer_rtx, off)),
5910 gen_rtx_REG (word_mode, i));
5911
5912 /* The incoming args pointer points just beyond the flushback area;
5913 normally this is not a serious concern. However, when we are doing
5914 varargs/stdargs we want to make the arg pointer point to the start
5915 of the incoming argument area. */
5916 emit_move_insn (virtual_incoming_args_rtx,
5917 plus_constant (arg_pointer_rtx, -64));
5918
5919 /* Now return a pointer to the first anonymous argument. */
5920 return copy_to_reg (expand_binop (Pmode, add_optab,
5921 virtual_incoming_args_rtx,
5922 offset, 0, 0, OPTAB_LIB_WIDEN));
5923 }
5924
5925 /* Store general registers on the stack. */
5926 dest = gen_rtx_MEM (BLKmode,
5927 plus_constant (current_function_internal_arg_pointer,
5928 -16));
5929 set_mem_alias_set (dest, get_varargs_alias_set ());
5930 set_mem_align (dest, BITS_PER_WORD);
5931 move_block_from_reg (23, dest, 4);
5932
5933 /* move_block_from_reg will emit code to store the argument registers
5934 individually as scalar stores.
5935
5936 However, other insns may later load from the same addresses for
5937 a structure load (passing a struct to a varargs routine).
5938
5939 The alias code assumes that such aliasing can never happen, so we
5940 have to keep memory referencing insns from moving up beyond the
5941 last argument register store. So we emit a blockage insn here. */
5942 emit_insn (gen_blockage ());
5943
5944 return copy_to_reg (expand_binop (Pmode, add_optab,
5945 current_function_internal_arg_pointer,
5946 offset, 0, 0, OPTAB_LIB_WIDEN));
5947 }
5948
5949 void
5950 hppa_va_start (tree valist, rtx nextarg)
5951 {
5952 nextarg = expand_builtin_saveregs ();
5953 std_expand_builtin_va_start (valist, nextarg);
5954 }
5955
5956 static tree
5957 hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5958 {
5959 bool indirect;
5960
5961 indirect = FUNCTION_ARG_PASS_BY_REFERENCE (dummy, TYPE_MODE (type), type, 0);
5962
5963 if (TARGET_64BIT)
5964 {
5965 /* Args grow upward. We can use the generic routines. */
5966
5967 if (indirect)
5968 return ind_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5969 else
5970 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5971 }
5972 else /* !TARGET_64BIT */
5973 {
5974 tree ptr = build_pointer_type (type);
5975 tree valist_type;
5976 tree t, u;
5977 unsigned int size, ofs;
5978
5979 if (indirect)
5980 {
5981 type = ptr;
5982 ptr = build_pointer_type (type);
5983 }
5984 size = int_size_in_bytes (type);
5985 valist_type = TREE_TYPE (valist);
5986
5987 /* Args grow down. Not handled by generic routines. */
5988
5989 u = fold_convert (valist_type, size_in_bytes (type));
5990 t = build (MINUS_EXPR, valist_type, valist, u);
5991
5992 /* Copied from va-pa.h, but we probably don't need to align to
5993 word size, since we generate and preserve that invariant. */
5994 u = build_int_2 ((size > 4 ? -8 : -4), -1);
5995 u = fold_convert (valist_type, u);
5996 t = build (BIT_AND_EXPR, valist_type, t, u);
5997
5998 t = build (MODIFY_EXPR, valist_type, valist, t);
5999
6000 ofs = (8 - size) % 4;
6001 if (ofs != 0)
6002 {
6003 u = fold_convert (valist_type, size_int (ofs));
6004 t = build (PLUS_EXPR, valist_type, t, u);
6005 }
6006
6007 t = fold_convert (ptr, t);
6008 t = build_fold_indirect_ref (t);
6009
6010 if (indirect)
6011 t = build_fold_indirect_ref (t);
6012
6013 return t;
6014 }
6015 }
6016
6017 /* This routine handles all the normal conditional branch sequences we
6018 might need to generate. It handles compare immediate vs compare
6019 register, nullification of delay slots, varying length branches,
6020 negated branches, and all combinations of the above. It returns the
6021 output appropriate to emit the branch corresponding to all given
6022 parameters. */
6023
6024 const char *
6025 output_cbranch (rtx *operands, int nullify, int length, int negated, rtx insn)
6026 {
6027 static char buf[100];
6028 int useskip = 0;
6029 rtx xoperands[5];
6030
6031 /* A conditional branch to the following instruction (eg the delay slot)
6032 is asking for a disaster. This can happen when not optimizing and
6033 when jump optimization fails.
6034
6035 While it is usually safe to emit nothing, this can fail if the
6036 preceding instruction is a nullified branch with an empty delay
6037 slot and the same branch target as this branch. We could check
6038 for this but jump optimization should eliminate nop jumps. It
6039 is always safe to emit a nop. */
6040 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6041 return "nop";
6042
6043 /* The doubleword form of the cmpib instruction doesn't have the LEU
6044 and GTU conditions while the cmpb instruction does. Since we accept
6045 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6046 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6047 operands[2] = gen_rtx_REG (DImode, 0);
6048
6049 /* If this is a long branch with its delay slot unfilled, set `nullify'
6050 as it can nullify the delay slot and save a nop. */
6051 if (length == 8 && dbr_sequence_length () == 0)
6052 nullify = 1;
6053
6054 /* If this is a short forward conditional branch which did not get
6055 its delay slot filled, the delay slot can still be nullified. */
6056 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6057 nullify = forward_branch_p (insn);
6058
6059 /* A forward branch over a single nullified insn can be done with a
6060 comclr instruction. This avoids a single cycle penalty due to
6061 mis-predicted branch if we fall through (branch not taken). */
6062 if (length == 4
6063 && next_real_insn (insn) != 0
6064 && get_attr_length (next_real_insn (insn)) == 4
6065 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6066 && nullify)
6067 useskip = 1;
6068
6069 switch (length)
6070 {
6071 /* All short conditional branches except backwards with an unfilled
6072 delay slot. */
6073 case 4:
6074 if (useskip)
6075 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6076 else
6077 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6078 if (GET_MODE (operands[1]) == DImode)
6079 strcat (buf, "*");
6080 if (negated)
6081 strcat (buf, "%B3");
6082 else
6083 strcat (buf, "%S3");
6084 if (useskip)
6085 strcat (buf, " %2,%r1,%%r0");
6086 else if (nullify)
6087 strcat (buf, ",n %2,%r1,%0");
6088 else
6089 strcat (buf, " %2,%r1,%0");
6090 break;
6091
6092 /* All long conditionals. Note a short backward branch with an
6093 unfilled delay slot is treated just like a long backward branch
6094 with an unfilled delay slot. */
6095 case 8:
6096 /* Handle weird backwards branch with a filled delay slot
6097 with is nullified. */
6098 if (dbr_sequence_length () != 0
6099 && ! forward_branch_p (insn)
6100 && nullify)
6101 {
6102 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6103 if (GET_MODE (operands[1]) == DImode)
6104 strcat (buf, "*");
6105 if (negated)
6106 strcat (buf, "%S3");
6107 else
6108 strcat (buf, "%B3");
6109 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6110 }
6111 /* Handle short backwards branch with an unfilled delay slot.
6112 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6113 taken and untaken branches. */
6114 else if (dbr_sequence_length () == 0
6115 && ! forward_branch_p (insn)
6116 && INSN_ADDRESSES_SET_P ()
6117 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6118 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6119 {
6120 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6121 if (GET_MODE (operands[1]) == DImode)
6122 strcat (buf, "*");
6123 if (negated)
6124 strcat (buf, "%B3 %2,%r1,%0%#");
6125 else
6126 strcat (buf, "%S3 %2,%r1,%0%#");
6127 }
6128 else
6129 {
6130 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6131 if (GET_MODE (operands[1]) == DImode)
6132 strcat (buf, "*");
6133 if (negated)
6134 strcat (buf, "%S3");
6135 else
6136 strcat (buf, "%B3");
6137 if (nullify)
6138 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6139 else
6140 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6141 }
6142 break;
6143
6144 case 20:
6145 case 28:
6146 xoperands[0] = operands[0];
6147 xoperands[1] = operands[1];
6148 xoperands[2] = operands[2];
6149 xoperands[3] = operands[3];
6150
6151 /* The reversed conditional branch must branch over one additional
6152 instruction if the delay slot is filled. If the delay slot
6153 is empty, the instruction after the reversed condition branch
6154 must be nullified. */
6155 nullify = dbr_sequence_length () == 0;
6156 xoperands[4] = nullify ? GEN_INT (length) : GEN_INT (length + 4);
6157
6158 /* Create a reversed conditional branch which branches around
6159 the following insns. */
6160 if (GET_MODE (operands[1]) != DImode)
6161 {
6162 if (nullify)
6163 {
6164 if (negated)
6165 strcpy (buf,
6166 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6167 else
6168 strcpy (buf,
6169 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6170 }
6171 else
6172 {
6173 if (negated)
6174 strcpy (buf,
6175 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6176 else
6177 strcpy (buf,
6178 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6179 }
6180 }
6181 else
6182 {
6183 if (nullify)
6184 {
6185 if (negated)
6186 strcpy (buf,
6187 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6188 else
6189 strcpy (buf,
6190 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6191 }
6192 else
6193 {
6194 if (negated)
6195 strcpy (buf,
6196 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6197 else
6198 strcpy (buf,
6199 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6200 }
6201 }
6202
6203 output_asm_insn (buf, xoperands);
6204 return output_lbranch (operands[0], insn);
6205
6206 default:
6207 abort ();
6208 }
6209 return buf;
6210 }
6211
6212 /* This routine handles long unconditional branches that exceed the
6213 maximum range of a simple branch instruction. */
6214
6215 const char *
6216 output_lbranch (rtx dest, rtx insn)
6217 {
6218 rtx xoperands[2];
6219
6220 xoperands[0] = dest;
6221
6222 /* First, free up the delay slot. */
6223 if (dbr_sequence_length () != 0)
6224 {
6225 /* We can't handle a jump in the delay slot. */
6226 if (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN)
6227 abort ();
6228
6229 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6230 optimize, 0, 0, NULL);
6231
6232 /* Now delete the delay insn. */
6233 PUT_CODE (NEXT_INSN (insn), NOTE);
6234 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6235 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6236 }
6237
6238 /* Output an insn to save %r1. The runtime documentation doesn't
6239 specify whether the "Clean Up" slot in the callers frame can
6240 be clobbered by the callee. It isn't copied by HP's builtin
6241 alloca, so this suggests that it can be clobbered if necessary.
6242 The "Static Link" location is copied by HP builtin alloca, so
6243 we avoid using it. Using the cleanup slot might be a problem
6244 if we have to interoperate with languages that pass cleanup
6245 information. However, it should be possible to handle these
6246 situations with GCC's asm feature.
6247
6248 The "Current RP" slot is reserved for the called procedure, so
6249 we try to use it when we don't have a frame of our own. It's
6250 rather unlikely that we won't have a frame when we need to emit
6251 a very long branch.
6252
6253 Really the way to go long term is a register scavenger; goto
6254 the target of the jump and find a register which we can use
6255 as a scratch to hold the value in %r1. Then, we wouldn't have
6256 to free up the delay slot or clobber a slot that may be needed
6257 for other purposes. */
6258 if (TARGET_64BIT)
6259 {
6260 if (actual_fsize == 0 && !regs_ever_live[2])
6261 /* Use the return pointer slot in the frame marker. */
6262 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6263 else
6264 /* Use the slot at -40 in the frame marker since HP builtin
6265 alloca doesn't copy it. */
6266 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6267 }
6268 else
6269 {
6270 if (actual_fsize == 0 && !regs_ever_live[2])
6271 /* Use the return pointer slot in the frame marker. */
6272 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6273 else
6274 /* Use the "Clean Up" slot in the frame marker. In GCC,
6275 the only other use of this location is for copying a
6276 floating point double argument from a floating-point
6277 register to two general registers. The copy is done
6278 as an "atomic" operation when outputting a call, so it
6279 won't interfere with our using the location here. */
6280 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6281 }
6282
6283 if (TARGET_PORTABLE_RUNTIME)
6284 {
6285 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6286 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6287 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6288 }
6289 else if (flag_pic)
6290 {
6291 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6292 if (TARGET_SOM || !TARGET_GAS)
6293 {
6294 xoperands[1] = gen_label_rtx ();
6295 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6296 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6297 CODE_LABEL_NUMBER (xoperands[1]));
6298 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6299 }
6300 else
6301 {
6302 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6303 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6304 }
6305 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6306 }
6307 else
6308 /* Now output a very long branch to the original target. */
6309 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6310
6311 /* Now restore the value of %r1 in the delay slot. */
6312 if (TARGET_64BIT)
6313 {
6314 if (actual_fsize == 0 && !regs_ever_live[2])
6315 return "ldd -16(%%r30),%%r1";
6316 else
6317 return "ldd -40(%%r30),%%r1";
6318 }
6319 else
6320 {
6321 if (actual_fsize == 0 && !regs_ever_live[2])
6322 return "ldw -20(%%r30),%%r1";
6323 else
6324 return "ldw -12(%%r30),%%r1";
6325 }
6326 }
6327
6328 /* This routine handles all the branch-on-bit conditional branch sequences we
6329 might need to generate. It handles nullification of delay slots,
6330 varying length branches, negated branches and all combinations of the
6331 above. it returns the appropriate output template to emit the branch. */
6332
6333 const char *
6334 output_bb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6335 int negated, rtx insn, int which)
6336 {
6337 static char buf[100];
6338 int useskip = 0;
6339
6340 /* A conditional branch to the following instruction (eg the delay slot) is
6341 asking for a disaster. I do not think this can happen as this pattern
6342 is only used when optimizing; jump optimization should eliminate the
6343 jump. But be prepared just in case. */
6344
6345 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6346 return "nop";
6347
6348 /* If this is a long branch with its delay slot unfilled, set `nullify'
6349 as it can nullify the delay slot and save a nop. */
6350 if (length == 8 && dbr_sequence_length () == 0)
6351 nullify = 1;
6352
6353 /* If this is a short forward conditional branch which did not get
6354 its delay slot filled, the delay slot can still be nullified. */
6355 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6356 nullify = forward_branch_p (insn);
6357
6358 /* A forward branch over a single nullified insn can be done with a
6359 extrs instruction. This avoids a single cycle penalty due to
6360 mis-predicted branch if we fall through (branch not taken). */
6361
6362 if (length == 4
6363 && next_real_insn (insn) != 0
6364 && get_attr_length (next_real_insn (insn)) == 4
6365 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6366 && nullify)
6367 useskip = 1;
6368
6369 switch (length)
6370 {
6371
6372 /* All short conditional branches except backwards with an unfilled
6373 delay slot. */
6374 case 4:
6375 if (useskip)
6376 strcpy (buf, "{extrs,|extrw,s,}");
6377 else
6378 strcpy (buf, "bb,");
6379 if (useskip && GET_MODE (operands[0]) == DImode)
6380 strcpy (buf, "extrd,s,*");
6381 else if (GET_MODE (operands[0]) == DImode)
6382 strcpy (buf, "bb,*");
6383 if ((which == 0 && negated)
6384 || (which == 1 && ! negated))
6385 strcat (buf, ">=");
6386 else
6387 strcat (buf, "<");
6388 if (useskip)
6389 strcat (buf, " %0,%1,1,%%r0");
6390 else if (nullify && negated)
6391 strcat (buf, ",n %0,%1,%3");
6392 else if (nullify && ! negated)
6393 strcat (buf, ",n %0,%1,%2");
6394 else if (! nullify && negated)
6395 strcat (buf, "%0,%1,%3");
6396 else if (! nullify && ! negated)
6397 strcat (buf, " %0,%1,%2");
6398 break;
6399
6400 /* All long conditionals. Note a short backward branch with an
6401 unfilled delay slot is treated just like a long backward branch
6402 with an unfilled delay slot. */
6403 case 8:
6404 /* Handle weird backwards branch with a filled delay slot
6405 with is nullified. */
6406 if (dbr_sequence_length () != 0
6407 && ! forward_branch_p (insn)
6408 && nullify)
6409 {
6410 strcpy (buf, "bb,");
6411 if (GET_MODE (operands[0]) == DImode)
6412 strcat (buf, "*");
6413 if ((which == 0 && negated)
6414 || (which == 1 && ! negated))
6415 strcat (buf, "<");
6416 else
6417 strcat (buf, ">=");
6418 if (negated)
6419 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6420 else
6421 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6422 }
6423 /* Handle short backwards branch with an unfilled delay slot.
6424 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6425 taken and untaken branches. */
6426 else if (dbr_sequence_length () == 0
6427 && ! forward_branch_p (insn)
6428 && INSN_ADDRESSES_SET_P ()
6429 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6430 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6431 {
6432 strcpy (buf, "bb,");
6433 if (GET_MODE (operands[0]) == DImode)
6434 strcat (buf, "*");
6435 if ((which == 0 && negated)
6436 || (which == 1 && ! negated))
6437 strcat (buf, ">=");
6438 else
6439 strcat (buf, "<");
6440 if (negated)
6441 strcat (buf, " %0,%1,%3%#");
6442 else
6443 strcat (buf, " %0,%1,%2%#");
6444 }
6445 else
6446 {
6447 strcpy (buf, "{extrs,|extrw,s,}");
6448 if (GET_MODE (operands[0]) == DImode)
6449 strcpy (buf, "extrd,s,*");
6450 if ((which == 0 && negated)
6451 || (which == 1 && ! negated))
6452 strcat (buf, "<");
6453 else
6454 strcat (buf, ">=");
6455 if (nullify && negated)
6456 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6457 else if (nullify && ! negated)
6458 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6459 else if (negated)
6460 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6461 else
6462 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6463 }
6464 break;
6465
6466 default:
6467 abort ();
6468 }
6469 return buf;
6470 }
6471
6472 /* This routine handles all the branch-on-variable-bit conditional branch
6473 sequences we might need to generate. It handles nullification of delay
6474 slots, varying length branches, negated branches and all combinations
6475 of the above. it returns the appropriate output template to emit the
6476 branch. */
6477
6478 const char *
6479 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6480 int negated, rtx insn, int which)
6481 {
6482 static char buf[100];
6483 int useskip = 0;
6484
6485 /* A conditional branch to the following instruction (eg the delay slot) is
6486 asking for a disaster. I do not think this can happen as this pattern
6487 is only used when optimizing; jump optimization should eliminate the
6488 jump. But be prepared just in case. */
6489
6490 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6491 return "nop";
6492
6493 /* If this is a long branch with its delay slot unfilled, set `nullify'
6494 as it can nullify the delay slot and save a nop. */
6495 if (length == 8 && dbr_sequence_length () == 0)
6496 nullify = 1;
6497
6498 /* If this is a short forward conditional branch which did not get
6499 its delay slot filled, the delay slot can still be nullified. */
6500 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6501 nullify = forward_branch_p (insn);
6502
6503 /* A forward branch over a single nullified insn can be done with a
6504 extrs instruction. This avoids a single cycle penalty due to
6505 mis-predicted branch if we fall through (branch not taken). */
6506
6507 if (length == 4
6508 && next_real_insn (insn) != 0
6509 && get_attr_length (next_real_insn (insn)) == 4
6510 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6511 && nullify)
6512 useskip = 1;
6513
6514 switch (length)
6515 {
6516
6517 /* All short conditional branches except backwards with an unfilled
6518 delay slot. */
6519 case 4:
6520 if (useskip)
6521 strcpy (buf, "{vextrs,|extrw,s,}");
6522 else
6523 strcpy (buf, "{bvb,|bb,}");
6524 if (useskip && GET_MODE (operands[0]) == DImode)
6525 strcpy (buf, "extrd,s,*");
6526 else if (GET_MODE (operands[0]) == DImode)
6527 strcpy (buf, "bb,*");
6528 if ((which == 0 && negated)
6529 || (which == 1 && ! negated))
6530 strcat (buf, ">=");
6531 else
6532 strcat (buf, "<");
6533 if (useskip)
6534 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6535 else if (nullify && negated)
6536 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6537 else if (nullify && ! negated)
6538 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6539 else if (! nullify && negated)
6540 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6541 else if (! nullify && ! negated)
6542 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6543 break;
6544
6545 /* All long conditionals. Note a short backward branch with an
6546 unfilled delay slot is treated just like a long backward branch
6547 with an unfilled delay slot. */
6548 case 8:
6549 /* Handle weird backwards branch with a filled delay slot
6550 with is nullified. */
6551 if (dbr_sequence_length () != 0
6552 && ! forward_branch_p (insn)
6553 && nullify)
6554 {
6555 strcpy (buf, "{bvb,|bb,}");
6556 if (GET_MODE (operands[0]) == DImode)
6557 strcat (buf, "*");
6558 if ((which == 0 && negated)
6559 || (which == 1 && ! negated))
6560 strcat (buf, "<");
6561 else
6562 strcat (buf, ">=");
6563 if (negated)
6564 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6565 else
6566 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6567 }
6568 /* Handle short backwards branch with an unfilled delay slot.
6569 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6570 taken and untaken branches. */
6571 else if (dbr_sequence_length () == 0
6572 && ! forward_branch_p (insn)
6573 && INSN_ADDRESSES_SET_P ()
6574 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6575 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6576 {
6577 strcpy (buf, "{bvb,|bb,}");
6578 if (GET_MODE (operands[0]) == DImode)
6579 strcat (buf, "*");
6580 if ((which == 0 && negated)
6581 || (which == 1 && ! negated))
6582 strcat (buf, ">=");
6583 else
6584 strcat (buf, "<");
6585 if (negated)
6586 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6587 else
6588 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6589 }
6590 else
6591 {
6592 strcpy (buf, "{vextrs,|extrw,s,}");
6593 if (GET_MODE (operands[0]) == DImode)
6594 strcpy (buf, "extrd,s,*");
6595 if ((which == 0 && negated)
6596 || (which == 1 && ! negated))
6597 strcat (buf, "<");
6598 else
6599 strcat (buf, ">=");
6600 if (nullify && negated)
6601 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6602 else if (nullify && ! negated)
6603 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6604 else if (negated)
6605 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6606 else
6607 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6608 }
6609 break;
6610
6611 default:
6612 abort ();
6613 }
6614 return buf;
6615 }
6616
6617 /* Return the output template for emitting a dbra type insn.
6618
6619 Note it may perform some output operations on its own before
6620 returning the final output string. */
6621 const char *
6622 output_dbra (rtx *operands, rtx insn, int which_alternative)
6623 {
6624
6625 /* A conditional branch to the following instruction (eg the delay slot) is
6626 asking for a disaster. Be prepared! */
6627
6628 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6629 {
6630 if (which_alternative == 0)
6631 return "ldo %1(%0),%0";
6632 else if (which_alternative == 1)
6633 {
6634 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6635 output_asm_insn ("ldw -16(%%r30),%4", operands);
6636 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6637 return "{fldws|fldw} -16(%%r30),%0";
6638 }
6639 else
6640 {
6641 output_asm_insn ("ldw %0,%4", operands);
6642 return "ldo %1(%4),%4\n\tstw %4,%0";
6643 }
6644 }
6645
6646 if (which_alternative == 0)
6647 {
6648 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6649 int length = get_attr_length (insn);
6650
6651 /* If this is a long branch with its delay slot unfilled, set `nullify'
6652 as it can nullify the delay slot and save a nop. */
6653 if (length == 8 && dbr_sequence_length () == 0)
6654 nullify = 1;
6655
6656 /* If this is a short forward conditional branch which did not get
6657 its delay slot filled, the delay slot can still be nullified. */
6658 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6659 nullify = forward_branch_p (insn);
6660
6661 /* Handle short versions first. */
6662 if (length == 4 && nullify)
6663 return "addib,%C2,n %1,%0,%3";
6664 else if (length == 4 && ! nullify)
6665 return "addib,%C2 %1,%0,%3";
6666 else if (length == 8)
6667 {
6668 /* Handle weird backwards branch with a fulled delay slot
6669 which is nullified. */
6670 if (dbr_sequence_length () != 0
6671 && ! forward_branch_p (insn)
6672 && nullify)
6673 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6674 /* Handle short backwards branch with an unfilled delay slot.
6675 Using a addb;nop rather than addi;bl saves 1 cycle for both
6676 taken and untaken branches. */
6677 else if (dbr_sequence_length () == 0
6678 && ! forward_branch_p (insn)
6679 && INSN_ADDRESSES_SET_P ()
6680 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6681 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6682 return "addib,%C2 %1,%0,%3%#";
6683
6684 /* Handle normal cases. */
6685 if (nullify)
6686 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6687 else
6688 return "addi,%N2 %1,%0,%0\n\tb %3";
6689 }
6690 else
6691 abort ();
6692 }
6693 /* Deal with gross reload from FP register case. */
6694 else if (which_alternative == 1)
6695 {
6696 /* Move loop counter from FP register to MEM then into a GR,
6697 increment the GR, store the GR into MEM, and finally reload
6698 the FP register from MEM from within the branch's delay slot. */
6699 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6700 operands);
6701 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6702 if (get_attr_length (insn) == 24)
6703 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6704 else
6705 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6706 }
6707 /* Deal with gross reload from memory case. */
6708 else
6709 {
6710 /* Reload loop counter from memory, the store back to memory
6711 happens in the branch's delay slot. */
6712 output_asm_insn ("ldw %0,%4", operands);
6713 if (get_attr_length (insn) == 12)
6714 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6715 else
6716 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6717 }
6718 }
6719
6720 /* Return the output template for emitting a dbra type insn.
6721
6722 Note it may perform some output operations on its own before
6723 returning the final output string. */
6724 const char *
6725 output_movb (rtx *operands, rtx insn, int which_alternative,
6726 int reverse_comparison)
6727 {
6728
6729 /* A conditional branch to the following instruction (eg the delay slot) is
6730 asking for a disaster. Be prepared! */
6731
6732 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6733 {
6734 if (which_alternative == 0)
6735 return "copy %1,%0";
6736 else if (which_alternative == 1)
6737 {
6738 output_asm_insn ("stw %1,-16(%%r30)", operands);
6739 return "{fldws|fldw} -16(%%r30),%0";
6740 }
6741 else if (which_alternative == 2)
6742 return "stw %1,%0";
6743 else
6744 return "mtsar %r1";
6745 }
6746
6747 /* Support the second variant. */
6748 if (reverse_comparison)
6749 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6750
6751 if (which_alternative == 0)
6752 {
6753 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6754 int length = get_attr_length (insn);
6755
6756 /* If this is a long branch with its delay slot unfilled, set `nullify'
6757 as it can nullify the delay slot and save a nop. */
6758 if (length == 8 && dbr_sequence_length () == 0)
6759 nullify = 1;
6760
6761 /* If this is a short forward conditional branch which did not get
6762 its delay slot filled, the delay slot can still be nullified. */
6763 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6764 nullify = forward_branch_p (insn);
6765
6766 /* Handle short versions first. */
6767 if (length == 4 && nullify)
6768 return "movb,%C2,n %1,%0,%3";
6769 else if (length == 4 && ! nullify)
6770 return "movb,%C2 %1,%0,%3";
6771 else if (length == 8)
6772 {
6773 /* Handle weird backwards branch with a filled delay slot
6774 which is nullified. */
6775 if (dbr_sequence_length () != 0
6776 && ! forward_branch_p (insn)
6777 && nullify)
6778 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6779
6780 /* Handle short backwards branch with an unfilled delay slot.
6781 Using a movb;nop rather than or;bl saves 1 cycle for both
6782 taken and untaken branches. */
6783 else if (dbr_sequence_length () == 0
6784 && ! forward_branch_p (insn)
6785 && INSN_ADDRESSES_SET_P ()
6786 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6787 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6788 return "movb,%C2 %1,%0,%3%#";
6789 /* Handle normal cases. */
6790 if (nullify)
6791 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
6792 else
6793 return "or,%N2 %1,%%r0,%0\n\tb %3";
6794 }
6795 else
6796 abort ();
6797 }
6798 /* Deal with gross reload from FP register case. */
6799 else if (which_alternative == 1)
6800 {
6801 /* Move loop counter from FP register to MEM then into a GR,
6802 increment the GR, store the GR into MEM, and finally reload
6803 the FP register from MEM from within the branch's delay slot. */
6804 output_asm_insn ("stw %1,-16(%%r30)", operands);
6805 if (get_attr_length (insn) == 12)
6806 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
6807 else
6808 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6809 }
6810 /* Deal with gross reload from memory case. */
6811 else if (which_alternative == 2)
6812 {
6813 /* Reload loop counter from memory, the store back to memory
6814 happens in the branch's delay slot. */
6815 if (get_attr_length (insn) == 8)
6816 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
6817 else
6818 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
6819 }
6820 /* Handle SAR as a destination. */
6821 else
6822 {
6823 if (get_attr_length (insn) == 8)
6824 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
6825 else
6826 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tbl %3\n\tmtsar %r1";
6827 }
6828 }
6829
6830 /* Copy any FP arguments in INSN into integer registers. */
6831 static void
6832 copy_fp_args (rtx insn)
6833 {
6834 rtx link;
6835 rtx xoperands[2];
6836
6837 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6838 {
6839 int arg_mode, regno;
6840 rtx use = XEXP (link, 0);
6841
6842 if (! (GET_CODE (use) == USE
6843 && GET_CODE (XEXP (use, 0)) == REG
6844 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6845 continue;
6846
6847 arg_mode = GET_MODE (XEXP (use, 0));
6848 regno = REGNO (XEXP (use, 0));
6849
6850 /* Is it a floating point register? */
6851 if (regno >= 32 && regno <= 39)
6852 {
6853 /* Copy the FP register into an integer register via memory. */
6854 if (arg_mode == SFmode)
6855 {
6856 xoperands[0] = XEXP (use, 0);
6857 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
6858 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
6859 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6860 }
6861 else
6862 {
6863 xoperands[0] = XEXP (use, 0);
6864 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
6865 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
6866 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
6867 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6868 }
6869 }
6870 }
6871 }
6872
6873 /* Compute length of the FP argument copy sequence for INSN. */
6874 static int
6875 length_fp_args (rtx insn)
6876 {
6877 int length = 0;
6878 rtx link;
6879
6880 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6881 {
6882 int arg_mode, regno;
6883 rtx use = XEXP (link, 0);
6884
6885 if (! (GET_CODE (use) == USE
6886 && GET_CODE (XEXP (use, 0)) == REG
6887 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6888 continue;
6889
6890 arg_mode = GET_MODE (XEXP (use, 0));
6891 regno = REGNO (XEXP (use, 0));
6892
6893 /* Is it a floating point register? */
6894 if (regno >= 32 && regno <= 39)
6895 {
6896 if (arg_mode == SFmode)
6897 length += 8;
6898 else
6899 length += 12;
6900 }
6901 }
6902
6903 return length;
6904 }
6905
6906 /* Return the attribute length for the millicode call instruction INSN.
6907 The length must match the code generated by output_millicode_call.
6908 We include the delay slot in the returned length as it is better to
6909 over estimate the length than to under estimate it. */
6910
6911 int
6912 attr_length_millicode_call (rtx insn)
6913 {
6914 unsigned long distance = -1;
6915 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
6916
6917 if (INSN_ADDRESSES_SET_P ())
6918 {
6919 distance = (total + insn_current_reference_address (insn));
6920 if (distance < total)
6921 distance = -1;
6922 }
6923
6924 if (TARGET_64BIT)
6925 {
6926 if (!TARGET_LONG_CALLS && distance < 7600000)
6927 return 8;
6928
6929 return 20;
6930 }
6931 else if (TARGET_PORTABLE_RUNTIME)
6932 return 24;
6933 else
6934 {
6935 if (!TARGET_LONG_CALLS && distance < 240000)
6936 return 8;
6937
6938 if (TARGET_LONG_ABS_CALL && !flag_pic)
6939 return 12;
6940
6941 return 24;
6942 }
6943 }
6944
6945 /* INSN is a function call. It may have an unconditional jump
6946 in its delay slot.
6947
6948 CALL_DEST is the routine we are calling. */
6949
6950 const char *
6951 output_millicode_call (rtx insn, rtx call_dest)
6952 {
6953 int attr_length = get_attr_length (insn);
6954 int seq_length = dbr_sequence_length ();
6955 int distance;
6956 rtx seq_insn;
6957 rtx xoperands[3];
6958
6959 xoperands[0] = call_dest;
6960 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
6961
6962 /* Handle the common case where we are sure that the branch will
6963 reach the beginning of the $CODE$ subspace. The within reach
6964 form of the $$sh_func_adrs call has a length of 28. Because
6965 it has an attribute type of multi, it never has a nonzero
6966 sequence length. The length of the $$sh_func_adrs is the same
6967 as certain out of reach PIC calls to other routines. */
6968 if (!TARGET_LONG_CALLS
6969 && ((seq_length == 0
6970 && (attr_length == 12
6971 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
6972 || (seq_length != 0 && attr_length == 8)))
6973 {
6974 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
6975 }
6976 else
6977 {
6978 if (TARGET_64BIT)
6979 {
6980 /* It might seem that one insn could be saved by accessing
6981 the millicode function using the linkage table. However,
6982 this doesn't work in shared libraries and other dynamically
6983 loaded objects. Using a pc-relative sequence also avoids
6984 problems related to the implicit use of the gp register. */
6985 output_asm_insn ("b,l .+8,%%r1", xoperands);
6986
6987 if (TARGET_GAS)
6988 {
6989 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
6990 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6991 }
6992 else
6993 {
6994 xoperands[1] = gen_label_rtx ();
6995 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
6996 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6997 CODE_LABEL_NUMBER (xoperands[1]));
6998 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
6999 }
7000
7001 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7002 }
7003 else if (TARGET_PORTABLE_RUNTIME)
7004 {
7005 /* Pure portable runtime doesn't allow be/ble; we also don't
7006 have PIC support in the assembler/linker, so this sequence
7007 is needed. */
7008
7009 /* Get the address of our target into %r1. */
7010 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7011 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7012
7013 /* Get our return address into %r31. */
7014 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7015 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7016
7017 /* Jump to our target address in %r1. */
7018 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7019 }
7020 else if (!flag_pic)
7021 {
7022 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7023 if (TARGET_PA_20)
7024 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7025 else
7026 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7027 }
7028 else
7029 {
7030 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7031 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7032
7033 if (TARGET_SOM || !TARGET_GAS)
7034 {
7035 /* The HP assembler can generate relocations for the
7036 difference of two symbols. GAS can do this for a
7037 millicode symbol but not an arbitrary external
7038 symbol when generating SOM output. */
7039 xoperands[1] = gen_label_rtx ();
7040 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7041 CODE_LABEL_NUMBER (xoperands[1]));
7042 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7043 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7044 }
7045 else
7046 {
7047 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7048 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7049 xoperands);
7050 }
7051
7052 /* Jump to our target address in %r1. */
7053 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7054 }
7055 }
7056
7057 if (seq_length == 0)
7058 output_asm_insn ("nop", xoperands);
7059
7060 /* We are done if there isn't a jump in the delay slot. */
7061 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7062 return "";
7063
7064 /* This call has an unconditional jump in its delay slot. */
7065 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7066
7067 /* See if the return address can be adjusted. Use the containing
7068 sequence insn's address. */
7069 if (INSN_ADDRESSES_SET_P ())
7070 {
7071 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7072 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7073 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7074
7075 if (VAL_14_BITS_P (distance))
7076 {
7077 xoperands[1] = gen_label_rtx ();
7078 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7079 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7080 CODE_LABEL_NUMBER (xoperands[1]));
7081 }
7082 else
7083 /* ??? This branch may not reach its target. */
7084 output_asm_insn ("nop\n\tb,n %0", xoperands);
7085 }
7086 else
7087 /* ??? This branch may not reach its target. */
7088 output_asm_insn ("nop\n\tb,n %0", xoperands);
7089
7090 /* Delete the jump. */
7091 PUT_CODE (NEXT_INSN (insn), NOTE);
7092 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7093 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7094
7095 return "";
7096 }
7097
7098 /* Return the attribute length of the call instruction INSN. The SIBCALL
7099 flag indicates whether INSN is a regular call or a sibling call. The
7100 length returned must be longer than the code actually generated by
7101 output_call. Since branch shortening is done before delay branch
7102 sequencing, there is no way to determine whether or not the delay
7103 slot will be filled during branch shortening. Even when the delay
7104 slot is filled, we may have to add a nop if the delay slot contains
7105 a branch that can't reach its target. Thus, we always have to include
7106 the delay slot in the length estimate. This used to be done in
7107 pa_adjust_insn_length but we do it here now as some sequences always
7108 fill the delay slot and we can save four bytes in the estimate for
7109 these sequences. */
7110
7111 int
7112 attr_length_call (rtx insn, int sibcall)
7113 {
7114 int local_call;
7115 rtx call_dest;
7116 tree call_decl;
7117 int length = 0;
7118 rtx pat = PATTERN (insn);
7119 unsigned long distance = -1;
7120
7121 if (INSN_ADDRESSES_SET_P ())
7122 {
7123 unsigned long total;
7124
7125 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7126 distance = (total + insn_current_reference_address (insn));
7127 if (distance < total)
7128 distance = -1;
7129 }
7130
7131 /* Determine if this is a local call. */
7132 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7133 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7134 else
7135 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7136
7137 call_decl = SYMBOL_REF_DECL (call_dest);
7138 local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7139
7140 /* pc-relative branch. */
7141 if (!TARGET_LONG_CALLS
7142 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7143 || distance < 240000))
7144 length += 8;
7145
7146 /* 64-bit plabel sequence. */
7147 else if (TARGET_64BIT && !local_call)
7148 length += sibcall ? 28 : 24;
7149
7150 /* non-pic long absolute branch sequence. */
7151 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7152 length += 12;
7153
7154 /* long pc-relative branch sequence. */
7155 else if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7156 || (TARGET_64BIT && !TARGET_GAS)
7157 || (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7158 {
7159 length += 20;
7160
7161 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS)
7162 length += 8;
7163 }
7164
7165 /* 32-bit plabel sequence. */
7166 else
7167 {
7168 length += 32;
7169
7170 if (TARGET_SOM)
7171 length += length_fp_args (insn);
7172
7173 if (flag_pic)
7174 length += 4;
7175
7176 if (!TARGET_PA_20)
7177 {
7178 if (!sibcall)
7179 length += 8;
7180
7181 if (!TARGET_NO_SPACE_REGS)
7182 length += 8;
7183 }
7184 }
7185
7186 return length;
7187 }
7188
7189 /* INSN is a function call. It may have an unconditional jump
7190 in its delay slot.
7191
7192 CALL_DEST is the routine we are calling. */
7193
7194 const char *
7195 output_call (rtx insn, rtx call_dest, int sibcall)
7196 {
7197 int delay_insn_deleted = 0;
7198 int delay_slot_filled = 0;
7199 int seq_length = dbr_sequence_length ();
7200 tree call_decl = SYMBOL_REF_DECL (call_dest);
7201 int local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7202 rtx xoperands[2];
7203
7204 xoperands[0] = call_dest;
7205
7206 /* Handle the common case where we're sure that the branch will reach
7207 the beginning of the "$CODE$" subspace. This is the beginning of
7208 the current function if we are in a named section. */
7209 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7210 {
7211 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7212 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7213 }
7214 else
7215 {
7216 if (TARGET_64BIT && !local_call)
7217 {
7218 /* ??? As far as I can tell, the HP linker doesn't support the
7219 long pc-relative sequence described in the 64-bit runtime
7220 architecture. So, we use a slightly longer indirect call. */
7221 struct deferred_plabel *p = get_plabel (XSTR (call_dest, 0));
7222
7223 xoperands[0] = p->internal_label;
7224 xoperands[1] = gen_label_rtx ();
7225
7226 /* If this isn't a sibcall, we put the load of %r27 into the
7227 delay slot. We can't do this in a sibcall as we don't
7228 have a second call-clobbered scratch register available. */
7229 if (seq_length != 0
7230 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7231 && !sibcall)
7232 {
7233 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7234 optimize, 0, 0, NULL);
7235
7236 /* Now delete the delay insn. */
7237 PUT_CODE (NEXT_INSN (insn), NOTE);
7238 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7239 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7240 delay_insn_deleted = 1;
7241 }
7242
7243 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7244 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7245 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7246
7247 if (sibcall)
7248 {
7249 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7250 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7251 output_asm_insn ("bve (%%r1)", xoperands);
7252 }
7253 else
7254 {
7255 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7256 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7257 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7258 delay_slot_filled = 1;
7259 }
7260 }
7261 else
7262 {
7263 int indirect_call = 0;
7264
7265 /* Emit a long call. There are several different sequences
7266 of increasing length and complexity. In most cases,
7267 they don't allow an instruction in the delay slot. */
7268 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7269 && !(TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7270 && !(TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7271 && !TARGET_64BIT)
7272 indirect_call = 1;
7273
7274 if (seq_length != 0
7275 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7276 && !sibcall
7277 && (!TARGET_PA_20 || indirect_call))
7278 {
7279 /* A non-jump insn in the delay slot. By definition we can
7280 emit this insn before the call (and in fact before argument
7281 relocating. */
7282 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0, 0,
7283 NULL);
7284
7285 /* Now delete the delay insn. */
7286 PUT_CODE (NEXT_INSN (insn), NOTE);
7287 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7288 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7289 delay_insn_deleted = 1;
7290 }
7291
7292 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7293 {
7294 /* This is the best sequence for making long calls in
7295 non-pic code. Unfortunately, GNU ld doesn't provide
7296 the stub needed for external calls, and GAS's support
7297 for this with the SOM linker is buggy. It is safe
7298 to use this for local calls. */
7299 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7300 if (sibcall)
7301 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7302 else
7303 {
7304 if (TARGET_PA_20)
7305 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7306 xoperands);
7307 else
7308 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7309
7310 output_asm_insn ("copy %%r31,%%r2", xoperands);
7311 delay_slot_filled = 1;
7312 }
7313 }
7314 else
7315 {
7316 if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7317 || (TARGET_64BIT && !TARGET_GAS))
7318 {
7319 /* The HP assembler and linker can handle relocations
7320 for the difference of two symbols. GAS and the HP
7321 linker can't do this when one of the symbols is
7322 external. */
7323 xoperands[1] = gen_label_rtx ();
7324 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7325 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7326 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7327 CODE_LABEL_NUMBER (xoperands[1]));
7328 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7329 }
7330 else if (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7331 {
7332 /* GAS currently can't generate the relocations that
7333 are needed for the SOM linker under HP-UX using this
7334 sequence. The GNU linker doesn't generate the stubs
7335 that are needed for external calls on TARGET_ELF32
7336 with this sequence. For now, we have to use a
7337 longer plabel sequence when using GAS. */
7338 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7339 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7340 xoperands);
7341 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7342 xoperands);
7343 }
7344 else
7345 {
7346 /* Emit a long plabel-based call sequence. This is
7347 essentially an inline implementation of $$dyncall.
7348 We don't actually try to call $$dyncall as this is
7349 as difficult as calling the function itself. */
7350 struct deferred_plabel *p = get_plabel (XSTR (call_dest, 0));
7351
7352 xoperands[0] = p->internal_label;
7353 xoperands[1] = gen_label_rtx ();
7354
7355 /* Since the call is indirect, FP arguments in registers
7356 need to be copied to the general registers. Then, the
7357 argument relocation stub will copy them back. */
7358 if (TARGET_SOM)
7359 copy_fp_args (insn);
7360
7361 if (flag_pic)
7362 {
7363 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7364 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7365 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7366 }
7367 else
7368 {
7369 output_asm_insn ("addil LR'%0-$global$,%%r27",
7370 xoperands);
7371 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7372 xoperands);
7373 }
7374
7375 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7376 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7377 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7378 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7379
7380 if (!sibcall && !TARGET_PA_20)
7381 {
7382 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7383 if (TARGET_NO_SPACE_REGS)
7384 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7385 else
7386 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7387 }
7388 }
7389
7390 if (TARGET_PA_20)
7391 {
7392 if (sibcall)
7393 output_asm_insn ("bve (%%r1)", xoperands);
7394 else
7395 {
7396 if (indirect_call)
7397 {
7398 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7399 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7400 delay_slot_filled = 1;
7401 }
7402 else
7403 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7404 }
7405 }
7406 else
7407 {
7408 if (!TARGET_NO_SPACE_REGS)
7409 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7410 xoperands);
7411
7412 if (sibcall)
7413 {
7414 if (TARGET_NO_SPACE_REGS)
7415 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7416 else
7417 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7418 }
7419 else
7420 {
7421 if (TARGET_NO_SPACE_REGS)
7422 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7423 else
7424 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7425
7426 if (indirect_call)
7427 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7428 else
7429 output_asm_insn ("copy %%r31,%%r2", xoperands);
7430 delay_slot_filled = 1;
7431 }
7432 }
7433 }
7434 }
7435 }
7436
7437 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7438 output_asm_insn ("nop", xoperands);
7439
7440 /* We are done if there isn't a jump in the delay slot. */
7441 if (seq_length == 0
7442 || delay_insn_deleted
7443 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7444 return "";
7445
7446 /* A sibcall should never have a branch in the delay slot. */
7447 if (sibcall)
7448 abort ();
7449
7450 /* This call has an unconditional jump in its delay slot. */
7451 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7452
7453 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7454 {
7455 /* See if the return address can be adjusted. Use the containing
7456 sequence insn's address. */
7457 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7458 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7459 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7460
7461 if (VAL_14_BITS_P (distance))
7462 {
7463 xoperands[1] = gen_label_rtx ();
7464 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7465 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7466 CODE_LABEL_NUMBER (xoperands[1]));
7467 }
7468 else
7469 output_asm_insn ("nop\n\tb,n %0", xoperands);
7470 }
7471 else
7472 output_asm_insn ("b,n %0", xoperands);
7473
7474 /* Delete the jump. */
7475 PUT_CODE (NEXT_INSN (insn), NOTE);
7476 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7477 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7478
7479 return "";
7480 }
7481
7482 /* Return the attribute length of the indirect call instruction INSN.
7483 The length must match the code generated by output_indirect call.
7484 The returned length includes the delay slot. Currently, the delay
7485 slot of an indirect call sequence is not exposed and it is used by
7486 the sequence itself. */
7487
7488 int
7489 attr_length_indirect_call (rtx insn)
7490 {
7491 unsigned long distance = -1;
7492 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7493
7494 if (INSN_ADDRESSES_SET_P ())
7495 {
7496 distance = (total + insn_current_reference_address (insn));
7497 if (distance < total)
7498 distance = -1;
7499 }
7500
7501 if (TARGET_64BIT)
7502 return 12;
7503
7504 if (TARGET_FAST_INDIRECT_CALLS
7505 || (!TARGET_PORTABLE_RUNTIME
7506 && ((TARGET_PA_20 && distance < 7600000) || distance < 240000)))
7507 return 8;
7508
7509 if (flag_pic)
7510 return 24;
7511
7512 if (TARGET_PORTABLE_RUNTIME)
7513 return 20;
7514
7515 /* Out of reach, can use ble. */
7516 return 12;
7517 }
7518
7519 const char *
7520 output_indirect_call (rtx insn, rtx call_dest)
7521 {
7522 rtx xoperands[1];
7523
7524 if (TARGET_64BIT)
7525 {
7526 xoperands[0] = call_dest;
7527 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7528 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7529 return "";
7530 }
7531
7532 /* First the special case for kernels, level 0 systems, etc. */
7533 if (TARGET_FAST_INDIRECT_CALLS)
7534 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7535
7536 /* Now the normal case -- we can reach $$dyncall directly or
7537 we're sure that we can get there via a long-branch stub.
7538
7539 No need to check target flags as the length uniquely identifies
7540 the remaining cases. */
7541 if (attr_length_indirect_call (insn) == 8)
7542 {
7543 /* The HP linker substitutes a BLE for millicode calls using
7544 the short PIC PCREL form. Thus, we must use %r31 as the
7545 link register when generating PA 1.x code. */
7546 if (TARGET_PA_20)
7547 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7548 else
7549 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7550 }
7551
7552 /* Long millicode call, but we are not generating PIC or portable runtime
7553 code. */
7554 if (attr_length_indirect_call (insn) == 12)
7555 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7556
7557 /* Long millicode call for portable runtime. */
7558 if (attr_length_indirect_call (insn) == 20)
7559 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7560
7561 /* We need a long PIC call to $$dyncall. */
7562 xoperands[0] = NULL_RTX;
7563 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7564 if (TARGET_SOM || !TARGET_GAS)
7565 {
7566 xoperands[0] = gen_label_rtx ();
7567 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7568 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7569 CODE_LABEL_NUMBER (xoperands[0]));
7570 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7571 }
7572 else
7573 {
7574 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7575 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7576 xoperands);
7577 }
7578 output_asm_insn ("blr %%r0,%%r2", xoperands);
7579 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7580 return "";
7581 }
7582
7583 /* Return the total length of the save and restore instructions needed for
7584 the data linkage table pointer (i.e., the PIC register) across the call
7585 instruction INSN. No-return calls do not require a save and restore.
7586 In addition, we may be able to avoid the save and restore for calls
7587 within the same translation unit. */
7588
7589 int
7590 attr_length_save_restore_dltp (rtx insn)
7591 {
7592 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7593 return 0;
7594
7595 return 8;
7596 }
7597
7598 /* In HPUX 8.0's shared library scheme, special relocations are needed
7599 for function labels if they might be passed to a function
7600 in a shared library (because shared libraries don't live in code
7601 space), and special magic is needed to construct their address. */
7602
7603 void
7604 hppa_encode_label (rtx sym)
7605 {
7606 const char *str = XSTR (sym, 0);
7607 int len = strlen (str) + 1;
7608 char *newstr, *p;
7609
7610 p = newstr = alloca (len + 1);
7611 *p++ = '@';
7612 strcpy (p, str);
7613
7614 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7615 }
7616
7617 static void
7618 pa_encode_section_info (tree decl, rtx rtl, int first)
7619 {
7620 if (first && TEXT_SPACE_P (decl))
7621 {
7622 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7623 if (TREE_CODE (decl) == FUNCTION_DECL)
7624 hppa_encode_label (XEXP (rtl, 0));
7625 }
7626 }
7627
7628 /* This is sort of inverse to pa_encode_section_info. */
7629
7630 static const char *
7631 pa_strip_name_encoding (const char *str)
7632 {
7633 str += (*str == '@');
7634 str += (*str == '*');
7635 return str;
7636 }
7637
7638 int
7639 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7640 {
7641 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7642 }
7643
7644 /* Returns 1 if OP is a function label involved in a simple addition
7645 with a constant. Used to keep certain patterns from matching
7646 during instruction combination. */
7647 int
7648 is_function_label_plus_const (rtx op)
7649 {
7650 /* Strip off any CONST. */
7651 if (GET_CODE (op) == CONST)
7652 op = XEXP (op, 0);
7653
7654 return (GET_CODE (op) == PLUS
7655 && function_label_operand (XEXP (op, 0), Pmode)
7656 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7657 }
7658
7659 /* Output assembly code for a thunk to FUNCTION. */
7660
7661 static void
7662 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7663 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7664 tree function)
7665 {
7666 const char *fname = XSTR (XEXP (DECL_RTL (function), 0), 0);
7667 const char *tname = XSTR (XEXP (DECL_RTL (thunk_fndecl), 0), 0);
7668 int val_14 = VAL_14_BITS_P (delta);
7669 int nbytes = 0;
7670 static unsigned int current_thunk_number;
7671 char label[16];
7672
7673 ASM_OUTPUT_LABEL (file, tname);
7674 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7675
7676 fname = (*targetm.strip_name_encoding) (fname);
7677 tname = (*targetm.strip_name_encoding) (tname);
7678
7679 /* Output the thunk. We know that the function is in the same
7680 translation unit (i.e., the same space) as the thunk, and that
7681 thunks are output after their method. Thus, we don't need an
7682 external branch to reach the function. With SOM and GAS,
7683 functions and thunks are effectively in different sections.
7684 Thus, we can always use a IA-relative branch and the linker
7685 will add a long branch stub if necessary.
7686
7687 However, we have to be careful when generating PIC code on the
7688 SOM port to ensure that the sequence does not transfer to an
7689 import stub for the target function as this could clobber the
7690 return value saved at SP-24. This would also apply to the
7691 32-bit linux port if the multi-space model is implemented. */
7692 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7693 && !(flag_pic && TREE_PUBLIC (function))
7694 && (TARGET_GAS || last_address < 262132))
7695 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7696 && ((targetm.have_named_sections
7697 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7698 /* The GNU 64-bit linker has rather poor stub management.
7699 So, we use a long branch from thunks that aren't in
7700 the same section as the target function. */
7701 && ((!TARGET_64BIT
7702 && (DECL_SECTION_NAME (thunk_fndecl)
7703 != DECL_SECTION_NAME (function)))
7704 || ((DECL_SECTION_NAME (thunk_fndecl)
7705 == DECL_SECTION_NAME (function))
7706 && last_address < 262132)))
7707 || (!targetm.have_named_sections && last_address < 262132))))
7708 {
7709 if (val_14)
7710 {
7711 fprintf (file, "\tb %s\n\tldo " HOST_WIDE_INT_PRINT_DEC
7712 "(%%r26),%%r26\n", fname, delta);
7713 nbytes += 8;
7714 }
7715 else
7716 {
7717 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7718 ",%%r26\n", delta);
7719 fprintf (file, "\tb %s\n\tldo R'" HOST_WIDE_INT_PRINT_DEC
7720 "(%%r1),%%r26\n", fname, delta);
7721 nbytes += 12;
7722 }
7723 }
7724 else if (TARGET_64BIT)
7725 {
7726 /* We only have one call-clobbered scratch register, so we can't
7727 make use of the delay slot if delta doesn't fit in 14 bits. */
7728 if (!val_14)
7729 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7730 ",%%r26\n\tldo R'" HOST_WIDE_INT_PRINT_DEC
7731 "(%%r1),%%r26\n", delta, delta);
7732
7733 fprintf (file, "\tb,l .+8,%%r1\n");
7734
7735 if (TARGET_GAS)
7736 {
7737 fprintf (file, "\taddil L'%s-$PIC_pcrel$0+4,%%r1\n", fname);
7738 fprintf (file, "\tldo R'%s-$PIC_pcrel$0+8(%%r1),%%r1\n", fname);
7739 }
7740 else
7741 {
7742 int off = val_14 ? 8 : 16;
7743 fprintf (file, "\taddil L'%s-%s-%d,%%r1\n", fname, tname, off);
7744 fprintf (file, "\tldo R'%s-%s-%d(%%r1),%%r1\n", fname, tname, off);
7745 }
7746
7747 if (val_14)
7748 {
7749 fprintf (file, "\tbv %%r0(%%r1)\n\tldo ");
7750 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7751 nbytes += 20;
7752 }
7753 else
7754 {
7755 fprintf (file, "\tbv,n %%r0(%%r1)\n");
7756 nbytes += 24;
7757 }
7758 }
7759 else if (TARGET_PORTABLE_RUNTIME)
7760 {
7761 fprintf (file, "\tldil L'%s,%%r1\n", fname);
7762 fprintf (file, "\tldo R'%s(%%r1),%%r22\n", fname);
7763
7764 if (val_14)
7765 {
7766 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7767 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7768 nbytes += 16;
7769 }
7770 else
7771 {
7772 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7773 ",%%r26\n", delta);
7774 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7775 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
7776 nbytes += 20;
7777 }
7778 }
7779 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7780 {
7781 /* The function is accessible from outside this module. The only
7782 way to avoid an import stub between the thunk and function is to
7783 call the function directly with an indirect sequence similar to
7784 that used by $$dyncall. This is possible because $$dyncall acts
7785 as the import stub in an indirect call. */
7786 const char *lab;
7787
7788 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
7789 lab = (*targetm.strip_name_encoding) (label);
7790
7791 fprintf (file, "\taddil LT'%s,%%r19\n", lab);
7792 fprintf (file, "\tldw RT'%s(%%r1),%%r22\n", lab);
7793 fprintf (file, "\tldw 0(%%sr0,%%r22),%%r22\n");
7794 fprintf (file, "\tbb,>=,n %%r22,30,.+16\n");
7795 fprintf (file, "\tdepi 0,31,2,%%r22\n");
7796 fprintf (file, "\tldw 4(%%sr0,%%r22),%%r19\n");
7797 fprintf (file, "\tldw 0(%%sr0,%%r22),%%r22\n");
7798 if (!val_14)
7799 {
7800 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7801 ",%%r26\n", delta);
7802 nbytes += 4;
7803 }
7804 if (TARGET_PA_20)
7805 {
7806 fprintf (file, "\tbve (%%r22)\n\tldo ");
7807 nbytes += 36;
7808 }
7809 else
7810 {
7811 if (TARGET_NO_SPACE_REGS)
7812 {
7813 fprintf (file, "\tbe 0(%%sr4,%%r22)\n\tldo ");
7814 nbytes += 36;
7815 }
7816 else
7817 {
7818 fprintf (file, "\tldsid (%%sr0,%%r22),%%r21\n");
7819 fprintf (file, "\tmtsp %%r21,%%sr0\n");
7820 fprintf (file, "\tbe 0(%%sr0,%%r22)\n\tldo ");
7821 nbytes += 44;
7822 }
7823 }
7824
7825 if (val_14)
7826 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7827 else
7828 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
7829 }
7830 else if (flag_pic)
7831 {
7832 if (TARGET_PA_20)
7833 fprintf (file, "\tb,l .+8,%%r1\n");
7834 else
7835 fprintf (file, "\tbl .+8,%%r1\n");
7836
7837 if (TARGET_SOM || !TARGET_GAS)
7838 {
7839 fprintf (file, "\taddil L'%s-%s-8,%%r1\n", fname, tname);
7840 fprintf (file, "\tldo R'%s-%s-8(%%r1),%%r22\n", fname, tname);
7841 }
7842 else
7843 {
7844 fprintf (file, "\taddil L'%s-$PIC_pcrel$0+4,%%r1\n", fname);
7845 fprintf (file, "\tldo R'%s-$PIC_pcrel$0+8(%%r1),%%r22\n", fname);
7846 }
7847
7848 if (val_14)
7849 {
7850 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7851 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7852 nbytes += 20;
7853 }
7854 else
7855 {
7856 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC
7857 ",%%r26\n", delta);
7858 fprintf (file, "\tbv %%r0(%%r22)\n\tldo ");
7859 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
7860 nbytes += 24;
7861 }
7862 }
7863 else
7864 {
7865 if (!val_14)
7866 fprintf (file, "\taddil L'" HOST_WIDE_INT_PRINT_DEC ",%%r26\n", delta);
7867
7868 fprintf (file, "\tldil L'%s,%%r22\n", fname);
7869 fprintf (file, "\tbe R'%s(%%sr4,%%r22)\n\tldo ", fname);
7870
7871 if (val_14)
7872 {
7873 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%%r26),%%r26\n", delta);
7874 nbytes += 12;
7875 }
7876 else
7877 {
7878 fprintf (file, "R'" HOST_WIDE_INT_PRINT_DEC "(%%r1),%%r26\n", delta);
7879 nbytes += 16;
7880 }
7881 }
7882
7883 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
7884
7885 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7886 {
7887 data_section ();
7888 fprintf (file, "\t.align 4\n");
7889 ASM_OUTPUT_LABEL (file, label);
7890 fprintf (file, "\t.word P'%s\n", fname);
7891 function_section (thunk_fndecl);
7892 }
7893
7894 current_thunk_number++;
7895 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
7896 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
7897 last_address += nbytes;
7898 update_total_code_bytes (nbytes);
7899 }
7900
7901 /* Only direct calls to static functions are allowed to be sibling (tail)
7902 call optimized.
7903
7904 This restriction is necessary because some linker generated stubs will
7905 store return pointers into rp' in some cases which might clobber a
7906 live value already in rp'.
7907
7908 In a sibcall the current function and the target function share stack
7909 space. Thus if the path to the current function and the path to the
7910 target function save a value in rp', they save the value into the
7911 same stack slot, which has undesirable consequences.
7912
7913 Because of the deferred binding nature of shared libraries any function
7914 with external scope could be in a different load module and thus require
7915 rp' to be saved when calling that function. So sibcall optimizations
7916 can only be safe for static function.
7917
7918 Note that GCC never needs return value relocations, so we don't have to
7919 worry about static calls with return value relocations (which require
7920 saving rp').
7921
7922 It is safe to perform a sibcall optimization when the target function
7923 will never return. */
7924 static bool
7925 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7926 {
7927 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
7928 single subspace mode and the call is not indirect. As far as I know,
7929 there is no operating system support for the multiple subspace mode.
7930 It might be possible to support indirect calls if we didn't use
7931 $$dyncall (see the indirect sequence generated in output_call). */
7932 if (TARGET_ELF32)
7933 return (decl != NULL_TREE);
7934
7935 /* Sibcalls are not ok because the arg pointer register is not a fixed
7936 register. This prevents the sibcall optimization from occurring. In
7937 addition, there are problems with stub placement using GNU ld. This
7938 is because a normal sibcall branch uses a 17-bit relocation while
7939 a regular call branch uses a 22-bit relocation. As a result, more
7940 care needs to be taken in the placement of long-branch stubs. */
7941 if (TARGET_64BIT)
7942 return false;
7943
7944 return (decl
7945 && !TARGET_PORTABLE_RUNTIME
7946 && !TREE_PUBLIC (decl));
7947 }
7948
7949 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
7950 use in fmpyadd instructions. */
7951 int
7952 fmpyaddoperands (rtx *operands)
7953 {
7954 enum machine_mode mode = GET_MODE (operands[0]);
7955
7956 /* Must be a floating point mode. */
7957 if (mode != SFmode && mode != DFmode)
7958 return 0;
7959
7960 /* All modes must be the same. */
7961 if (! (mode == GET_MODE (operands[1])
7962 && mode == GET_MODE (operands[2])
7963 && mode == GET_MODE (operands[3])
7964 && mode == GET_MODE (operands[4])
7965 && mode == GET_MODE (operands[5])))
7966 return 0;
7967
7968 /* All operands must be registers. */
7969 if (! (GET_CODE (operands[1]) == REG
7970 && GET_CODE (operands[2]) == REG
7971 && GET_CODE (operands[3]) == REG
7972 && GET_CODE (operands[4]) == REG
7973 && GET_CODE (operands[5]) == REG))
7974 return 0;
7975
7976 /* Only 2 real operands to the addition. One of the input operands must
7977 be the same as the output operand. */
7978 if (! rtx_equal_p (operands[3], operands[4])
7979 && ! rtx_equal_p (operands[3], operands[5]))
7980 return 0;
7981
7982 /* Inout operand of add can not conflict with any operands from multiply. */
7983 if (rtx_equal_p (operands[3], operands[0])
7984 || rtx_equal_p (operands[3], operands[1])
7985 || rtx_equal_p (operands[3], operands[2]))
7986 return 0;
7987
7988 /* multiply can not feed into addition operands. */
7989 if (rtx_equal_p (operands[4], operands[0])
7990 || rtx_equal_p (operands[5], operands[0]))
7991 return 0;
7992
7993 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
7994 if (mode == SFmode
7995 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
7996 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
7997 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
7998 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
7999 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8000 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8001 return 0;
8002
8003 /* Passed. Operands are suitable for fmpyadd. */
8004 return 1;
8005 }
8006
8007 #if !defined(USE_COLLECT2)
8008 static void
8009 pa_asm_out_constructor (rtx symbol, int priority)
8010 {
8011 if (!function_label_operand (symbol, VOIDmode))
8012 hppa_encode_label (symbol);
8013
8014 #ifdef CTORS_SECTION_ASM_OP
8015 default_ctor_section_asm_out_constructor (symbol, priority);
8016 #else
8017 # ifdef TARGET_ASM_NAMED_SECTION
8018 default_named_section_asm_out_constructor (symbol, priority);
8019 # else
8020 default_stabs_asm_out_constructor (symbol, priority);
8021 # endif
8022 #endif
8023 }
8024
8025 static void
8026 pa_asm_out_destructor (rtx symbol, int priority)
8027 {
8028 if (!function_label_operand (symbol, VOIDmode))
8029 hppa_encode_label (symbol);
8030
8031 #ifdef DTORS_SECTION_ASM_OP
8032 default_dtor_section_asm_out_destructor (symbol, priority);
8033 #else
8034 # ifdef TARGET_ASM_NAMED_SECTION
8035 default_named_section_asm_out_destructor (symbol, priority);
8036 # else
8037 default_stabs_asm_out_destructor (symbol, priority);
8038 # endif
8039 #endif
8040 }
8041 #endif
8042
8043 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8044 use in fmpysub instructions. */
8045 int
8046 fmpysuboperands (rtx *operands)
8047 {
8048 enum machine_mode mode = GET_MODE (operands[0]);
8049
8050 /* Must be a floating point mode. */
8051 if (mode != SFmode && mode != DFmode)
8052 return 0;
8053
8054 /* All modes must be the same. */
8055 if (! (mode == GET_MODE (operands[1])
8056 && mode == GET_MODE (operands[2])
8057 && mode == GET_MODE (operands[3])
8058 && mode == GET_MODE (operands[4])
8059 && mode == GET_MODE (operands[5])))
8060 return 0;
8061
8062 /* All operands must be registers. */
8063 if (! (GET_CODE (operands[1]) == REG
8064 && GET_CODE (operands[2]) == REG
8065 && GET_CODE (operands[3]) == REG
8066 && GET_CODE (operands[4]) == REG
8067 && GET_CODE (operands[5]) == REG))
8068 return 0;
8069
8070 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8071 operation, so operands[4] must be the same as operand[3]. */
8072 if (! rtx_equal_p (operands[3], operands[4]))
8073 return 0;
8074
8075 /* multiply can not feed into subtraction. */
8076 if (rtx_equal_p (operands[5], operands[0]))
8077 return 0;
8078
8079 /* Inout operand of sub can not conflict with any operands from multiply. */
8080 if (rtx_equal_p (operands[3], operands[0])
8081 || rtx_equal_p (operands[3], operands[1])
8082 || rtx_equal_p (operands[3], operands[2]))
8083 return 0;
8084
8085 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8086 if (mode == SFmode
8087 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8088 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8089 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8090 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8091 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8092 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8093 return 0;
8094
8095 /* Passed. Operands are suitable for fmpysub. */
8096 return 1;
8097 }
8098
8099 int
8100 plus_xor_ior_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8101 {
8102 return (GET_CODE (op) == PLUS || GET_CODE (op) == XOR
8103 || GET_CODE (op) == IOR);
8104 }
8105
8106 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8107 constants for shadd instructions. */
8108 static int
8109 shadd_constant_p (int val)
8110 {
8111 if (val == 2 || val == 4 || val == 8)
8112 return 1;
8113 else
8114 return 0;
8115 }
8116
8117 /* Return 1 if OP is a CONST_INT with the value 2, 4, or 8. These are
8118 the valid constant for shadd instructions. */
8119 int
8120 shadd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8121 {
8122 return (GET_CODE (op) == CONST_INT && shadd_constant_p (INTVAL (op)));
8123 }
8124
8125 /* Return 1 if OP is valid as a base or index register in a
8126 REG+REG address. */
8127
8128 int
8129 borx_reg_operand (rtx op, enum machine_mode mode)
8130 {
8131 if (GET_CODE (op) != REG)
8132 return 0;
8133
8134 /* We must reject virtual registers as the only expressions that
8135 can be instantiated are REG and REG+CONST. */
8136 if (op == virtual_incoming_args_rtx
8137 || op == virtual_stack_vars_rtx
8138 || op == virtual_stack_dynamic_rtx
8139 || op == virtual_outgoing_args_rtx
8140 || op == virtual_cfa_rtx)
8141 return 0;
8142
8143 /* While it's always safe to index off the frame pointer, it's not
8144 profitable to do so when the frame pointer is being eliminated. */
8145 if (!reload_completed
8146 && flag_omit_frame_pointer
8147 && !current_function_calls_alloca
8148 && op == frame_pointer_rtx)
8149 return 0;
8150
8151 return register_operand (op, mode);
8152 }
8153
8154 /* Return 1 if this operand is anything other than a hard register. */
8155
8156 int
8157 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8158 {
8159 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8160 }
8161
8162 /* Return 1 if INSN branches forward. Should be using insn_addresses
8163 to avoid walking through all the insns... */
8164 static int
8165 forward_branch_p (rtx insn)
8166 {
8167 rtx label = JUMP_LABEL (insn);
8168
8169 while (insn)
8170 {
8171 if (insn == label)
8172 break;
8173 else
8174 insn = NEXT_INSN (insn);
8175 }
8176
8177 return (insn == label);
8178 }
8179
8180 /* Return 1 if OP is an equality comparison, else return 0. */
8181 int
8182 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8183 {
8184 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8185 }
8186
8187 /* Return 1 if OP is an operator suitable for use in a movb instruction. */
8188 int
8189 movb_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8190 {
8191 return (GET_CODE (op) == EQ || GET_CODE (op) == NE
8192 || GET_CODE (op) == LT || GET_CODE (op) == GE);
8193 }
8194
8195 /* Return 1 if INSN is in the delay slot of a call instruction. */
8196 int
8197 jump_in_call_delay (rtx insn)
8198 {
8199
8200 if (GET_CODE (insn) != JUMP_INSN)
8201 return 0;
8202
8203 if (PREV_INSN (insn)
8204 && PREV_INSN (PREV_INSN (insn))
8205 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8206 {
8207 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8208
8209 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8210 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8211
8212 }
8213 else
8214 return 0;
8215 }
8216
8217 /* Output an unconditional move and branch insn. */
8218
8219 const char *
8220 output_parallel_movb (rtx *operands, int length)
8221 {
8222 /* These are the cases in which we win. */
8223 if (length == 4)
8224 return "mov%I1b,tr %1,%0,%2";
8225
8226 /* None of these cases wins, but they don't lose either. */
8227 if (dbr_sequence_length () == 0)
8228 {
8229 /* Nothing in the delay slot, fake it by putting the combined
8230 insn (the copy or add) in the delay slot of a bl. */
8231 if (GET_CODE (operands[1]) == CONST_INT)
8232 return "b %2\n\tldi %1,%0";
8233 else
8234 return "b %2\n\tcopy %1,%0";
8235 }
8236 else
8237 {
8238 /* Something in the delay slot, but we've got a long branch. */
8239 if (GET_CODE (operands[1]) == CONST_INT)
8240 return "ldi %1,%0\n\tb %2";
8241 else
8242 return "copy %1,%0\n\tb %2";
8243 }
8244 }
8245
8246 /* Output an unconditional add and branch insn. */
8247
8248 const char *
8249 output_parallel_addb (rtx *operands, int length)
8250 {
8251 /* To make life easy we want operand0 to be the shared input/output
8252 operand and operand1 to be the readonly operand. */
8253 if (operands[0] == operands[1])
8254 operands[1] = operands[2];
8255
8256 /* These are the cases in which we win. */
8257 if (length == 4)
8258 return "add%I1b,tr %1,%0,%3";
8259
8260 /* None of these cases win, but they don't lose either. */
8261 if (dbr_sequence_length () == 0)
8262 {
8263 /* Nothing in the delay slot, fake it by putting the combined
8264 insn (the copy or add) in the delay slot of a bl. */
8265 return "b %3\n\tadd%I1 %1,%0,%0";
8266 }
8267 else
8268 {
8269 /* Something in the delay slot, but we've got a long branch. */
8270 return "add%I1 %1,%0,%0\n\tb %3";
8271 }
8272 }
8273
8274 /* Return nonzero if INSN (a jump insn) immediately follows a call
8275 to a named function. This is used to avoid filling the delay slot
8276 of the jump since it can usually be eliminated by modifying RP in
8277 the delay slot of the call. */
8278
8279 int
8280 following_call (rtx insn)
8281 {
8282 if (! TARGET_JUMP_IN_DELAY)
8283 return 0;
8284
8285 /* Find the previous real insn, skipping NOTEs. */
8286 insn = PREV_INSN (insn);
8287 while (insn && GET_CODE (insn) == NOTE)
8288 insn = PREV_INSN (insn);
8289
8290 /* Check for CALL_INSNs and millicode calls. */
8291 if (insn
8292 && ((GET_CODE (insn) == CALL_INSN
8293 && get_attr_type (insn) != TYPE_DYNCALL)
8294 || (GET_CODE (insn) == INSN
8295 && GET_CODE (PATTERN (insn)) != SEQUENCE
8296 && GET_CODE (PATTERN (insn)) != USE
8297 && GET_CODE (PATTERN (insn)) != CLOBBER
8298 && get_attr_type (insn) == TYPE_MILLI)))
8299 return 1;
8300
8301 return 0;
8302 }
8303
8304 /* We use this hook to perform a PA specific optimization which is difficult
8305 to do in earlier passes.
8306
8307 We want the delay slots of branches within jump tables to be filled.
8308 None of the compiler passes at the moment even has the notion that a
8309 PA jump table doesn't contain addresses, but instead contains actual
8310 instructions!
8311
8312 Because we actually jump into the table, the addresses of each entry
8313 must stay constant in relation to the beginning of the table (which
8314 itself must stay constant relative to the instruction to jump into
8315 it). I don't believe we can guarantee earlier passes of the compiler
8316 will adhere to those rules.
8317
8318 So, late in the compilation process we find all the jump tables, and
8319 expand them into real code -- eg each entry in the jump table vector
8320 will get an appropriate label followed by a jump to the final target.
8321
8322 Reorg and the final jump pass can then optimize these branches and
8323 fill their delay slots. We end up with smaller, more efficient code.
8324
8325 The jump instructions within the table are special; we must be able
8326 to identify them during assembly output (if the jumps don't get filled
8327 we need to emit a nop rather than nullifying the delay slot)). We
8328 identify jumps in switch tables by using insns with the attribute
8329 type TYPE_BTABLE_BRANCH.
8330
8331 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8332 insns. This serves two purposes, first it prevents jump.c from
8333 noticing that the last N entries in the table jump to the instruction
8334 immediately after the table and deleting the jumps. Second, those
8335 insns mark where we should emit .begin_brtab and .end_brtab directives
8336 when using GAS (allows for better link time optimizations). */
8337
8338 static void
8339 pa_reorg (void)
8340 {
8341 rtx insn;
8342
8343 remove_useless_addtr_insns (1);
8344
8345 if (pa_cpu < PROCESSOR_8000)
8346 pa_combine_instructions ();
8347
8348
8349 /* This is fairly cheap, so always run it if optimizing. */
8350 if (optimize > 0 && !TARGET_BIG_SWITCH)
8351 {
8352 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8353 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8354 {
8355 rtx pattern, tmp, location, label;
8356 unsigned int length, i;
8357
8358 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8359 if (GET_CODE (insn) != JUMP_INSN
8360 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8361 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8362 continue;
8363
8364 /* Emit marker for the beginning of the branch table. */
8365 emit_insn_before (gen_begin_brtab (), insn);
8366
8367 pattern = PATTERN (insn);
8368 location = PREV_INSN (insn);
8369 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8370
8371 for (i = 0; i < length; i++)
8372 {
8373 /* Emit a label before each jump to keep jump.c from
8374 removing this code. */
8375 tmp = gen_label_rtx ();
8376 LABEL_NUSES (tmp) = 1;
8377 emit_label_after (tmp, location);
8378 location = NEXT_INSN (location);
8379
8380 if (GET_CODE (pattern) == ADDR_VEC)
8381 label = XEXP (XVECEXP (pattern, 0, i), 0);
8382 else
8383 label = XEXP (XVECEXP (pattern, 1, i), 0);
8384
8385 tmp = gen_short_jump (label);
8386
8387 /* Emit the jump itself. */
8388 tmp = emit_jump_insn_after (tmp, location);
8389 JUMP_LABEL (tmp) = label;
8390 LABEL_NUSES (label)++;
8391 location = NEXT_INSN (location);
8392
8393 /* Emit a BARRIER after the jump. */
8394 emit_barrier_after (location);
8395 location = NEXT_INSN (location);
8396 }
8397
8398 /* Emit marker for the end of the branch table. */
8399 emit_insn_before (gen_end_brtab (), location);
8400 location = NEXT_INSN (location);
8401 emit_barrier_after (location);
8402
8403 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8404 delete_insn (insn);
8405 }
8406 }
8407 else
8408 {
8409 /* Still need brtab marker insns. FIXME: the presence of these
8410 markers disables output of the branch table to readonly memory,
8411 and any alignment directives that might be needed. Possibly,
8412 the begin_brtab insn should be output before the label for the
8413 table. This doesn't matter at the moment since the tables are
8414 always output in the text section. */
8415 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8416 {
8417 /* Find an ADDR_VEC insn. */
8418 if (GET_CODE (insn) != JUMP_INSN
8419 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8420 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8421 continue;
8422
8423 /* Now generate markers for the beginning and end of the
8424 branch table. */
8425 emit_insn_before (gen_begin_brtab (), insn);
8426 emit_insn_after (gen_end_brtab (), insn);
8427 }
8428 }
8429 }
8430
8431 /* The PA has a number of odd instructions which can perform multiple
8432 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8433 it may be profitable to combine two instructions into one instruction
8434 with two outputs. It's not profitable PA2.0 machines because the
8435 two outputs would take two slots in the reorder buffers.
8436
8437 This routine finds instructions which can be combined and combines
8438 them. We only support some of the potential combinations, and we
8439 only try common ways to find suitable instructions.
8440
8441 * addb can add two registers or a register and a small integer
8442 and jump to a nearby (+-8k) location. Normally the jump to the
8443 nearby location is conditional on the result of the add, but by
8444 using the "true" condition we can make the jump unconditional.
8445 Thus addb can perform two independent operations in one insn.
8446
8447 * movb is similar to addb in that it can perform a reg->reg
8448 or small immediate->reg copy and jump to a nearby (+-8k location).
8449
8450 * fmpyadd and fmpysub can perform a FP multiply and either an
8451 FP add or FP sub if the operands of the multiply and add/sub are
8452 independent (there are other minor restrictions). Note both
8453 the fmpy and fadd/fsub can in theory move to better spots according
8454 to data dependencies, but for now we require the fmpy stay at a
8455 fixed location.
8456
8457 * Many of the memory operations can perform pre & post updates
8458 of index registers. GCC's pre/post increment/decrement addressing
8459 is far too simple to take advantage of all the possibilities. This
8460 pass may not be suitable since those insns may not be independent.
8461
8462 * comclr can compare two ints or an int and a register, nullify
8463 the following instruction and zero some other register. This
8464 is more difficult to use as it's harder to find an insn which
8465 will generate a comclr than finding something like an unconditional
8466 branch. (conditional moves & long branches create comclr insns).
8467
8468 * Most arithmetic operations can conditionally skip the next
8469 instruction. They can be viewed as "perform this operation
8470 and conditionally jump to this nearby location" (where nearby
8471 is an insns away). These are difficult to use due to the
8472 branch length restrictions. */
8473
8474 static void
8475 pa_combine_instructions (void)
8476 {
8477 rtx anchor, new;
8478
8479 /* This can get expensive since the basic algorithm is on the
8480 order of O(n^2) (or worse). Only do it for -O2 or higher
8481 levels of optimization. */
8482 if (optimize < 2)
8483 return;
8484
8485 /* Walk down the list of insns looking for "anchor" insns which
8486 may be combined with "floating" insns. As the name implies,
8487 "anchor" instructions don't move, while "floating" insns may
8488 move around. */
8489 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8490 new = make_insn_raw (new);
8491
8492 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8493 {
8494 enum attr_pa_combine_type anchor_attr;
8495 enum attr_pa_combine_type floater_attr;
8496
8497 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8498 Also ignore any special USE insns. */
8499 if ((GET_CODE (anchor) != INSN
8500 && GET_CODE (anchor) != JUMP_INSN
8501 && GET_CODE (anchor) != CALL_INSN)
8502 || GET_CODE (PATTERN (anchor)) == USE
8503 || GET_CODE (PATTERN (anchor)) == CLOBBER
8504 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8505 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8506 continue;
8507
8508 anchor_attr = get_attr_pa_combine_type (anchor);
8509 /* See if anchor is an insn suitable for combination. */
8510 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8511 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8512 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8513 && ! forward_branch_p (anchor)))
8514 {
8515 rtx floater;
8516
8517 for (floater = PREV_INSN (anchor);
8518 floater;
8519 floater = PREV_INSN (floater))
8520 {
8521 if (GET_CODE (floater) == NOTE
8522 || (GET_CODE (floater) == INSN
8523 && (GET_CODE (PATTERN (floater)) == USE
8524 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8525 continue;
8526
8527 /* Anything except a regular INSN will stop our search. */
8528 if (GET_CODE (floater) != INSN
8529 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8530 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8531 {
8532 floater = NULL_RTX;
8533 break;
8534 }
8535
8536 /* See if FLOATER is suitable for combination with the
8537 anchor. */
8538 floater_attr = get_attr_pa_combine_type (floater);
8539 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8540 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8541 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8542 && floater_attr == PA_COMBINE_TYPE_FMPY))
8543 {
8544 /* If ANCHOR and FLOATER can be combined, then we're
8545 done with this pass. */
8546 if (pa_can_combine_p (new, anchor, floater, 0,
8547 SET_DEST (PATTERN (floater)),
8548 XEXP (SET_SRC (PATTERN (floater)), 0),
8549 XEXP (SET_SRC (PATTERN (floater)), 1)))
8550 break;
8551 }
8552
8553 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8554 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8555 {
8556 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8557 {
8558 if (pa_can_combine_p (new, anchor, floater, 0,
8559 SET_DEST (PATTERN (floater)),
8560 XEXP (SET_SRC (PATTERN (floater)), 0),
8561 XEXP (SET_SRC (PATTERN (floater)), 1)))
8562 break;
8563 }
8564 else
8565 {
8566 if (pa_can_combine_p (new, anchor, floater, 0,
8567 SET_DEST (PATTERN (floater)),
8568 SET_SRC (PATTERN (floater)),
8569 SET_SRC (PATTERN (floater))))
8570 break;
8571 }
8572 }
8573 }
8574
8575 /* If we didn't find anything on the backwards scan try forwards. */
8576 if (!floater
8577 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8578 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8579 {
8580 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8581 {
8582 if (GET_CODE (floater) == NOTE
8583 || (GET_CODE (floater) == INSN
8584 && (GET_CODE (PATTERN (floater)) == USE
8585 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8586
8587 continue;
8588
8589 /* Anything except a regular INSN will stop our search. */
8590 if (GET_CODE (floater) != INSN
8591 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8592 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8593 {
8594 floater = NULL_RTX;
8595 break;
8596 }
8597
8598 /* See if FLOATER is suitable for combination with the
8599 anchor. */
8600 floater_attr = get_attr_pa_combine_type (floater);
8601 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8602 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8603 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8604 && floater_attr == PA_COMBINE_TYPE_FMPY))
8605 {
8606 /* If ANCHOR and FLOATER can be combined, then we're
8607 done with this pass. */
8608 if (pa_can_combine_p (new, anchor, floater, 1,
8609 SET_DEST (PATTERN (floater)),
8610 XEXP (SET_SRC (PATTERN (floater)),
8611 0),
8612 XEXP (SET_SRC (PATTERN (floater)),
8613 1)))
8614 break;
8615 }
8616 }
8617 }
8618
8619 /* FLOATER will be nonzero if we found a suitable floating
8620 insn for combination with ANCHOR. */
8621 if (floater
8622 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8623 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8624 {
8625 /* Emit the new instruction and delete the old anchor. */
8626 emit_insn_before (gen_rtx_PARALLEL
8627 (VOIDmode,
8628 gen_rtvec (2, PATTERN (anchor),
8629 PATTERN (floater))),
8630 anchor);
8631
8632 PUT_CODE (anchor, NOTE);
8633 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8634 NOTE_SOURCE_FILE (anchor) = 0;
8635
8636 /* Emit a special USE insn for FLOATER, then delete
8637 the floating insn. */
8638 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8639 delete_insn (floater);
8640
8641 continue;
8642 }
8643 else if (floater
8644 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
8645 {
8646 rtx temp;
8647 /* Emit the new_jump instruction and delete the old anchor. */
8648 temp
8649 = emit_jump_insn_before (gen_rtx_PARALLEL
8650 (VOIDmode,
8651 gen_rtvec (2, PATTERN (anchor),
8652 PATTERN (floater))),
8653 anchor);
8654
8655 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
8656 PUT_CODE (anchor, NOTE);
8657 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8658 NOTE_SOURCE_FILE (anchor) = 0;
8659
8660 /* Emit a special USE insn for FLOATER, then delete
8661 the floating insn. */
8662 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8663 delete_insn (floater);
8664 continue;
8665 }
8666 }
8667 }
8668 }
8669
8670 static int
8671 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
8672 rtx src1, rtx src2)
8673 {
8674 int insn_code_number;
8675 rtx start, end;
8676
8677 /* Create a PARALLEL with the patterns of ANCHOR and
8678 FLOATER, try to recognize it, then test constraints
8679 for the resulting pattern.
8680
8681 If the pattern doesn't match or the constraints
8682 aren't met keep searching for a suitable floater
8683 insn. */
8684 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
8685 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
8686 INSN_CODE (new) = -1;
8687 insn_code_number = recog_memoized (new);
8688 if (insn_code_number < 0
8689 || (extract_insn (new), ! constrain_operands (1)))
8690 return 0;
8691
8692 if (reversed)
8693 {
8694 start = anchor;
8695 end = floater;
8696 }
8697 else
8698 {
8699 start = floater;
8700 end = anchor;
8701 }
8702
8703 /* There's up to three operands to consider. One
8704 output and two inputs.
8705
8706 The output must not be used between FLOATER & ANCHOR
8707 exclusive. The inputs must not be set between
8708 FLOATER and ANCHOR exclusive. */
8709
8710 if (reg_used_between_p (dest, start, end))
8711 return 0;
8712
8713 if (reg_set_between_p (src1, start, end))
8714 return 0;
8715
8716 if (reg_set_between_p (src2, start, end))
8717 return 0;
8718
8719 /* If we get here, then everything is good. */
8720 return 1;
8721 }
8722
8723 /* Return nonzero if references for INSN are delayed.
8724
8725 Millicode insns are actually function calls with some special
8726 constraints on arguments and register usage.
8727
8728 Millicode calls always expect their arguments in the integer argument
8729 registers, and always return their result in %r29 (ret1). They
8730 are expected to clobber their arguments, %r1, %r29, and the return
8731 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
8732
8733 This function tells reorg that the references to arguments and
8734 millicode calls do not appear to happen until after the millicode call.
8735 This allows reorg to put insns which set the argument registers into the
8736 delay slot of the millicode call -- thus they act more like traditional
8737 CALL_INSNs.
8738
8739 Note we can not consider side effects of the insn to be delayed because
8740 the branch and link insn will clobber the return pointer. If we happened
8741 to use the return pointer in the delay slot of the call, then we lose.
8742
8743 get_attr_type will try to recognize the given insn, so make sure to
8744 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
8745 in particular. */
8746 int
8747 insn_refs_are_delayed (rtx insn)
8748 {
8749 return ((GET_CODE (insn) == INSN
8750 && GET_CODE (PATTERN (insn)) != SEQUENCE
8751 && GET_CODE (PATTERN (insn)) != USE
8752 && GET_CODE (PATTERN (insn)) != CLOBBER
8753 && get_attr_type (insn) == TYPE_MILLI));
8754 }
8755
8756 /* On the HP-PA the value is found in register(s) 28(-29), unless
8757 the mode is SF or DF. Then the value is returned in fr4 (32).
8758
8759 This must perform the same promotions as PROMOTE_MODE, else
8760 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
8761
8762 Small structures must be returned in a PARALLEL on PA64 in order
8763 to match the HP Compiler ABI. */
8764
8765 rtx
8766 function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
8767 {
8768 enum machine_mode valmode;
8769
8770 /* Aggregates with a size less than or equal to 128 bits are returned
8771 in GR 28(-29). They are left justified. The pad bits are undefined.
8772 Larger aggregates are returned in memory. */
8773 if (TARGET_64BIT && AGGREGATE_TYPE_P (valtype))
8774 {
8775 rtx loc[2];
8776 int i, offset = 0;
8777 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
8778
8779 for (i = 0; i < ub; i++)
8780 {
8781 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8782 gen_rtx_REG (DImode, 28 + i),
8783 GEN_INT (offset));
8784 offset += 8;
8785 }
8786
8787 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
8788 }
8789
8790 if ((INTEGRAL_TYPE_P (valtype)
8791 && TYPE_PRECISION (valtype) < BITS_PER_WORD)
8792 || POINTER_TYPE_P (valtype))
8793 valmode = word_mode;
8794 else
8795 valmode = TYPE_MODE (valtype);
8796
8797 if (TREE_CODE (valtype) == REAL_TYPE
8798 && TYPE_MODE (valtype) != TFmode
8799 && !TARGET_SOFT_FLOAT)
8800 return gen_rtx_REG (valmode, 32);
8801
8802 return gen_rtx_REG (valmode, 28);
8803 }
8804
8805 /* Return the location of a parameter that is passed in a register or NULL
8806 if the parameter has any component that is passed in memory.
8807
8808 This is new code and will be pushed to into the net sources after
8809 further testing.
8810
8811 ??? We might want to restructure this so that it looks more like other
8812 ports. */
8813 rtx
8814 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
8815 int named ATTRIBUTE_UNUSED)
8816 {
8817 int max_arg_words = (TARGET_64BIT ? 8 : 4);
8818 int alignment = 0;
8819 int arg_size;
8820 int fpr_reg_base;
8821 int gpr_reg_base;
8822 rtx retval;
8823
8824 if (mode == VOIDmode)
8825 return NULL_RTX;
8826
8827 arg_size = FUNCTION_ARG_SIZE (mode, type);
8828
8829 /* If this arg would be passed partially or totally on the stack, then
8830 this routine should return zero. FUNCTION_ARG_PARTIAL_NREGS will
8831 handle arguments which are split between regs and stack slots if
8832 the ABI mandates split arguments. */
8833 if (! TARGET_64BIT)
8834 {
8835 /* The 32-bit ABI does not split arguments. */
8836 if (cum->words + arg_size > max_arg_words)
8837 return NULL_RTX;
8838 }
8839 else
8840 {
8841 if (arg_size > 1)
8842 alignment = cum->words & 1;
8843 if (cum->words + alignment >= max_arg_words)
8844 return NULL_RTX;
8845 }
8846
8847 /* The 32bit ABIs and the 64bit ABIs are rather different,
8848 particularly in their handling of FP registers. We might
8849 be able to cleverly share code between them, but I'm not
8850 going to bother in the hope that splitting them up results
8851 in code that is more easily understood. */
8852
8853 if (TARGET_64BIT)
8854 {
8855 /* Advance the base registers to their current locations.
8856
8857 Remember, gprs grow towards smaller register numbers while
8858 fprs grow to higher register numbers. Also remember that
8859 although FP regs are 32-bit addressable, we pretend that
8860 the registers are 64-bits wide. */
8861 gpr_reg_base = 26 - cum->words;
8862 fpr_reg_base = 32 + cum->words;
8863
8864 /* Arguments wider than one word and small aggregates need special
8865 treatment. */
8866 if (arg_size > 1
8867 || mode == BLKmode
8868 || (type && AGGREGATE_TYPE_P (type)))
8869 {
8870 /* Double-extended precision (80-bit), quad-precision (128-bit)
8871 and aggregates including complex numbers are aligned on
8872 128-bit boundaries. The first eight 64-bit argument slots
8873 are associated one-to-one, with general registers r26
8874 through r19, and also with floating-point registers fr4
8875 through fr11. Arguments larger than one word are always
8876 passed in general registers.
8877
8878 Using a PARALLEL with a word mode register results in left
8879 justified data on a big-endian target. */
8880
8881 rtx loc[8];
8882 int i, offset = 0, ub = arg_size;
8883
8884 /* Align the base register. */
8885 gpr_reg_base -= alignment;
8886
8887 ub = MIN (ub, max_arg_words - cum->words - alignment);
8888 for (i = 0; i < ub; i++)
8889 {
8890 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8891 gen_rtx_REG (DImode, gpr_reg_base),
8892 GEN_INT (offset));
8893 gpr_reg_base -= 1;
8894 offset += 8;
8895 }
8896
8897 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
8898 }
8899 }
8900 else
8901 {
8902 /* If the argument is larger than a word, then we know precisely
8903 which registers we must use. */
8904 if (arg_size > 1)
8905 {
8906 if (cum->words)
8907 {
8908 gpr_reg_base = 23;
8909 fpr_reg_base = 38;
8910 }
8911 else
8912 {
8913 gpr_reg_base = 25;
8914 fpr_reg_base = 34;
8915 }
8916
8917 /* Structures 5 to 8 bytes in size are passed in the general
8918 registers in the same manner as other non floating-point
8919 objects. The data is right-justified and zero-extended
8920 to 64 bits. This is opposite to the normal justification
8921 used on big endian targets and requires special treatment.
8922 We now define BLOCK_REG_PADDING to pad these objects. */
8923 if (mode == BLKmode)
8924 {
8925 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
8926 gen_rtx_REG (DImode, gpr_reg_base),
8927 const0_rtx);
8928 return gen_rtx_PARALLEL (mode, gen_rtvec (1, loc));
8929 }
8930 }
8931 else
8932 {
8933 /* We have a single word (32 bits). A simple computation
8934 will get us the register #s we need. */
8935 gpr_reg_base = 26 - cum->words;
8936 fpr_reg_base = 32 + 2 * cum->words;
8937 }
8938 }
8939
8940 /* Determine if the argument needs to be passed in both general and
8941 floating point registers. */
8942 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
8943 /* If we are doing soft-float with portable runtime, then there
8944 is no need to worry about FP regs. */
8945 && !TARGET_SOFT_FLOAT
8946 /* The parameter must be some kind of float, else we can just
8947 pass it in integer registers. */
8948 && FLOAT_MODE_P (mode)
8949 /* The target function must not have a prototype. */
8950 && cum->nargs_prototype <= 0
8951 /* libcalls do not need to pass items in both FP and general
8952 registers. */
8953 && type != NULL_TREE
8954 /* All this hair applies to "outgoing" args only. This includes
8955 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
8956 && !cum->incoming)
8957 /* Also pass outgoing floating arguments in both registers in indirect
8958 calls with the 32 bit ABI and the HP assembler since there is no
8959 way to the specify argument locations in static functions. */
8960 || (!TARGET_64BIT
8961 && !TARGET_GAS
8962 && !cum->incoming
8963 && cum->indirect
8964 && FLOAT_MODE_P (mode)))
8965 {
8966 retval
8967 = gen_rtx_PARALLEL
8968 (mode,
8969 gen_rtvec (2,
8970 gen_rtx_EXPR_LIST (VOIDmode,
8971 gen_rtx_REG (mode, fpr_reg_base),
8972 const0_rtx),
8973 gen_rtx_EXPR_LIST (VOIDmode,
8974 gen_rtx_REG (mode, gpr_reg_base),
8975 const0_rtx)));
8976 }
8977 else
8978 {
8979 /* See if we should pass this parameter in a general register. */
8980 if (TARGET_SOFT_FLOAT
8981 /* Indirect calls in the normal 32bit ABI require all arguments
8982 to be passed in general registers. */
8983 || (!TARGET_PORTABLE_RUNTIME
8984 && !TARGET_64BIT
8985 && !TARGET_ELF32
8986 && cum->indirect)
8987 /* If the parameter is not a floating point parameter, then
8988 it belongs in GPRs. */
8989 || !FLOAT_MODE_P (mode))
8990 retval = gen_rtx_REG (mode, gpr_reg_base);
8991 else
8992 retval = gen_rtx_REG (mode, fpr_reg_base);
8993 }
8994 return retval;
8995 }
8996
8997
8998 /* If this arg would be passed totally in registers or totally on the stack,
8999 then this routine should return zero. It is currently called only for
9000 the 64-bit target. */
9001 int
9002 function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9003 tree type, int named ATTRIBUTE_UNUSED)
9004 {
9005 unsigned int max_arg_words = 8;
9006 unsigned int offset = 0;
9007
9008 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9009 offset = 1;
9010
9011 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9012 /* Arg fits fully into registers. */
9013 return 0;
9014 else if (cum->words + offset >= max_arg_words)
9015 /* Arg fully on the stack. */
9016 return 0;
9017 else
9018 /* Arg is split. */
9019 return max_arg_words - cum->words - offset;
9020 }
9021
9022
9023 /* Return 1 if this is a comparison operator. This allows the use of
9024 MATCH_OPERATOR to recognize all the branch insns. */
9025
9026 int
9027 cmpib_comparison_operator (rtx op, enum machine_mode mode)
9028 {
9029 return ((mode == VOIDmode || GET_MODE (op) == mode)
9030 && (GET_CODE (op) == EQ
9031 || GET_CODE (op) == NE
9032 || GET_CODE (op) == GT
9033 || GET_CODE (op) == GTU
9034 || GET_CODE (op) == GE
9035 || GET_CODE (op) == LT
9036 || GET_CODE (op) == LE
9037 || GET_CODE (op) == LEU));
9038 }
9039
9040 /* On hpux10, the linker will give an error if we have a reference
9041 in the read-only data section to a symbol defined in a shared
9042 library. Therefore, expressions that might require a reloc can
9043 not be placed in the read-only data section. */
9044
9045 static void
9046 pa_select_section (tree exp, int reloc,
9047 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9048 {
9049 if (TREE_CODE (exp) == VAR_DECL
9050 && TREE_READONLY (exp)
9051 && !TREE_THIS_VOLATILE (exp)
9052 && DECL_INITIAL (exp)
9053 && (DECL_INITIAL (exp) == error_mark_node
9054 || TREE_CONSTANT (DECL_INITIAL (exp)))
9055 && !reloc)
9056 readonly_data_section ();
9057 else if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
9058 && !reloc)
9059 readonly_data_section ();
9060 else
9061 data_section ();
9062 }
9063
9064 static void
9065 pa_globalize_label (FILE *stream, const char *name)
9066 {
9067 /* We only handle DATA objects here, functions are globalized in
9068 ASM_DECLARE_FUNCTION_NAME. */
9069 if (! FUNCTION_NAME_P (name))
9070 {
9071 fputs ("\t.EXPORT ", stream);
9072 assemble_name (stream, name);
9073 fputs (",DATA\n", stream);
9074 }
9075 }
9076
9077 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9078
9079 static rtx
9080 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9081 int incoming ATTRIBUTE_UNUSED)
9082 {
9083 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9084 }
9085
9086 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9087
9088 bool
9089 pa_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
9090 {
9091 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9092 PA64 ABI says that objects larger than 128 bits are returned in memory.
9093 Note, int_size_in_bytes can return -1 if the size of the object is
9094 variable or larger than the maximum value that can be expressed as
9095 a HOST_WIDE_INT. It can also return zero for an empty type. The
9096 simplest way to handle variable and empty types is to pass them in
9097 memory. This avoids problems in defining the boundaries of argument
9098 slots, allocating registers, etc. */
9099 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9100 || int_size_in_bytes (type) <= 0);
9101 }
9102
9103 #include "gt-pa.h"