]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/s390/s390.c
976d4cbc8a7c05570f80fa81b2ef7520e0bda9d9
[thirdparty/gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "debug.h"
49 #include "langhooks.h"
50 #include "optabs.h"
51 #include "gimple.h"
52 #include "df.h"
53 #include "params.h"
54 #include "cfgloop.h"
55 #include "opts.h"
56
57 /* Define the specific costs for a given cpu. */
58
59 struct processor_costs
60 {
61 /* multiplication */
62 const int m; /* cost of an M instruction. */
63 const int mghi; /* cost of an MGHI instruction. */
64 const int mh; /* cost of an MH instruction. */
65 const int mhi; /* cost of an MHI instruction. */
66 const int ml; /* cost of an ML instruction. */
67 const int mr; /* cost of an MR instruction. */
68 const int ms; /* cost of an MS instruction. */
69 const int msg; /* cost of an MSG instruction. */
70 const int msgf; /* cost of an MSGF instruction. */
71 const int msgfr; /* cost of an MSGFR instruction. */
72 const int msgr; /* cost of an MSGR instruction. */
73 const int msr; /* cost of an MSR instruction. */
74 const int mult_df; /* cost of multiplication in DFmode. */
75 const int mxbr;
76 /* square root */
77 const int sqxbr; /* cost of square root in TFmode. */
78 const int sqdbr; /* cost of square root in DFmode. */
79 const int sqebr; /* cost of square root in SFmode. */
80 /* multiply and add */
81 const int madbr; /* cost of multiply and add in DFmode. */
82 const int maebr; /* cost of multiply and add in SFmode. */
83 /* division */
84 const int dxbr;
85 const int ddbr;
86 const int debr;
87 const int dlgr;
88 const int dlr;
89 const int dr;
90 const int dsgfr;
91 const int dsgr;
92 };
93
94 const struct processor_costs *s390_cost;
95
96 static const
97 struct processor_costs z900_cost =
98 {
99 COSTS_N_INSNS (5), /* M */
100 COSTS_N_INSNS (10), /* MGHI */
101 COSTS_N_INSNS (5), /* MH */
102 COSTS_N_INSNS (4), /* MHI */
103 COSTS_N_INSNS (5), /* ML */
104 COSTS_N_INSNS (5), /* MR */
105 COSTS_N_INSNS (4), /* MS */
106 COSTS_N_INSNS (15), /* MSG */
107 COSTS_N_INSNS (7), /* MSGF */
108 COSTS_N_INSNS (7), /* MSGFR */
109 COSTS_N_INSNS (10), /* MSGR */
110 COSTS_N_INSNS (4), /* MSR */
111 COSTS_N_INSNS (7), /* multiplication in DFmode */
112 COSTS_N_INSNS (13), /* MXBR */
113 COSTS_N_INSNS (136), /* SQXBR */
114 COSTS_N_INSNS (44), /* SQDBR */
115 COSTS_N_INSNS (35), /* SQEBR */
116 COSTS_N_INSNS (18), /* MADBR */
117 COSTS_N_INSNS (13), /* MAEBR */
118 COSTS_N_INSNS (134), /* DXBR */
119 COSTS_N_INSNS (30), /* DDBR */
120 COSTS_N_INSNS (27), /* DEBR */
121 COSTS_N_INSNS (220), /* DLGR */
122 COSTS_N_INSNS (34), /* DLR */
123 COSTS_N_INSNS (34), /* DR */
124 COSTS_N_INSNS (32), /* DSGFR */
125 COSTS_N_INSNS (32), /* DSGR */
126 };
127
128 static const
129 struct processor_costs z990_cost =
130 {
131 COSTS_N_INSNS (4), /* M */
132 COSTS_N_INSNS (2), /* MGHI */
133 COSTS_N_INSNS (2), /* MH */
134 COSTS_N_INSNS (2), /* MHI */
135 COSTS_N_INSNS (4), /* ML */
136 COSTS_N_INSNS (4), /* MR */
137 COSTS_N_INSNS (5), /* MS */
138 COSTS_N_INSNS (6), /* MSG */
139 COSTS_N_INSNS (4), /* MSGF */
140 COSTS_N_INSNS (4), /* MSGFR */
141 COSTS_N_INSNS (4), /* MSGR */
142 COSTS_N_INSNS (4), /* MSR */
143 COSTS_N_INSNS (1), /* multiplication in DFmode */
144 COSTS_N_INSNS (28), /* MXBR */
145 COSTS_N_INSNS (130), /* SQXBR */
146 COSTS_N_INSNS (66), /* SQDBR */
147 COSTS_N_INSNS (38), /* SQEBR */
148 COSTS_N_INSNS (1), /* MADBR */
149 COSTS_N_INSNS (1), /* MAEBR */
150 COSTS_N_INSNS (60), /* DXBR */
151 COSTS_N_INSNS (40), /* DDBR */
152 COSTS_N_INSNS (26), /* DEBR */
153 COSTS_N_INSNS (176), /* DLGR */
154 COSTS_N_INSNS (31), /* DLR */
155 COSTS_N_INSNS (31), /* DR */
156 COSTS_N_INSNS (31), /* DSGFR */
157 COSTS_N_INSNS (31), /* DSGR */
158 };
159
160 static const
161 struct processor_costs z9_109_cost =
162 {
163 COSTS_N_INSNS (4), /* M */
164 COSTS_N_INSNS (2), /* MGHI */
165 COSTS_N_INSNS (2), /* MH */
166 COSTS_N_INSNS (2), /* MHI */
167 COSTS_N_INSNS (4), /* ML */
168 COSTS_N_INSNS (4), /* MR */
169 COSTS_N_INSNS (5), /* MS */
170 COSTS_N_INSNS (6), /* MSG */
171 COSTS_N_INSNS (4), /* MSGF */
172 COSTS_N_INSNS (4), /* MSGFR */
173 COSTS_N_INSNS (4), /* MSGR */
174 COSTS_N_INSNS (4), /* MSR */
175 COSTS_N_INSNS (1), /* multiplication in DFmode */
176 COSTS_N_INSNS (28), /* MXBR */
177 COSTS_N_INSNS (130), /* SQXBR */
178 COSTS_N_INSNS (66), /* SQDBR */
179 COSTS_N_INSNS (38), /* SQEBR */
180 COSTS_N_INSNS (1), /* MADBR */
181 COSTS_N_INSNS (1), /* MAEBR */
182 COSTS_N_INSNS (60), /* DXBR */
183 COSTS_N_INSNS (40), /* DDBR */
184 COSTS_N_INSNS (26), /* DEBR */
185 COSTS_N_INSNS (30), /* DLGR */
186 COSTS_N_INSNS (23), /* DLR */
187 COSTS_N_INSNS (23), /* DR */
188 COSTS_N_INSNS (24), /* DSGFR */
189 COSTS_N_INSNS (24), /* DSGR */
190 };
191
192 static const
193 struct processor_costs z10_cost =
194 {
195 COSTS_N_INSNS (10), /* M */
196 COSTS_N_INSNS (10), /* MGHI */
197 COSTS_N_INSNS (10), /* MH */
198 COSTS_N_INSNS (10), /* MHI */
199 COSTS_N_INSNS (10), /* ML */
200 COSTS_N_INSNS (10), /* MR */
201 COSTS_N_INSNS (10), /* MS */
202 COSTS_N_INSNS (10), /* MSG */
203 COSTS_N_INSNS (10), /* MSGF */
204 COSTS_N_INSNS (10), /* MSGFR */
205 COSTS_N_INSNS (10), /* MSGR */
206 COSTS_N_INSNS (10), /* MSR */
207 COSTS_N_INSNS (1) , /* multiplication in DFmode */
208 COSTS_N_INSNS (50), /* MXBR */
209 COSTS_N_INSNS (120), /* SQXBR */
210 COSTS_N_INSNS (52), /* SQDBR */
211 COSTS_N_INSNS (38), /* SQEBR */
212 COSTS_N_INSNS (1), /* MADBR */
213 COSTS_N_INSNS (1), /* MAEBR */
214 COSTS_N_INSNS (111), /* DXBR */
215 COSTS_N_INSNS (39), /* DDBR */
216 COSTS_N_INSNS (32), /* DEBR */
217 COSTS_N_INSNS (160), /* DLGR */
218 COSTS_N_INSNS (71), /* DLR */
219 COSTS_N_INSNS (71), /* DR */
220 COSTS_N_INSNS (71), /* DSGFR */
221 COSTS_N_INSNS (71), /* DSGR */
222 };
223
224 static const
225 struct processor_costs z196_cost =
226 {
227 COSTS_N_INSNS (7), /* M */
228 COSTS_N_INSNS (5), /* MGHI */
229 COSTS_N_INSNS (5), /* MH */
230 COSTS_N_INSNS (5), /* MHI */
231 COSTS_N_INSNS (7), /* ML */
232 COSTS_N_INSNS (7), /* MR */
233 COSTS_N_INSNS (6), /* MS */
234 COSTS_N_INSNS (8), /* MSG */
235 COSTS_N_INSNS (6), /* MSGF */
236 COSTS_N_INSNS (6), /* MSGFR */
237 COSTS_N_INSNS (8), /* MSGR */
238 COSTS_N_INSNS (6), /* MSR */
239 COSTS_N_INSNS (1) , /* multiplication in DFmode */
240 COSTS_N_INSNS (40), /* MXBR B+40 */
241 COSTS_N_INSNS (100), /* SQXBR B+100 */
242 COSTS_N_INSNS (42), /* SQDBR B+42 */
243 COSTS_N_INSNS (28), /* SQEBR B+28 */
244 COSTS_N_INSNS (1), /* MADBR B */
245 COSTS_N_INSNS (1), /* MAEBR B */
246 COSTS_N_INSNS (101), /* DXBR B+101 */
247 COSTS_N_INSNS (29), /* DDBR */
248 COSTS_N_INSNS (22), /* DEBR */
249 COSTS_N_INSNS (160), /* DLGR cracked */
250 COSTS_N_INSNS (160), /* DLR cracked */
251 COSTS_N_INSNS (160), /* DR expanded */
252 COSTS_N_INSNS (160), /* DSGFR cracked */
253 COSTS_N_INSNS (160), /* DSGR cracked */
254 };
255
256 extern int reload_completed;
257
258 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
259 static rtx last_scheduled_insn;
260
261 /* Structure used to hold the components of a S/390 memory
262 address. A legitimate address on S/390 is of the general
263 form
264 base + index + displacement
265 where any of the components is optional.
266
267 base and index are registers of the class ADDR_REGS,
268 displacement is an unsigned 12-bit immediate constant. */
269
270 struct s390_address
271 {
272 rtx base;
273 rtx indx;
274 rtx disp;
275 bool pointer;
276 bool literal_pool;
277 };
278
279 /* The following structure is embedded in the machine
280 specific part of struct function. */
281
282 struct GTY (()) s390_frame_layout
283 {
284 /* Offset within stack frame. */
285 HOST_WIDE_INT gprs_offset;
286 HOST_WIDE_INT f0_offset;
287 HOST_WIDE_INT f4_offset;
288 HOST_WIDE_INT f8_offset;
289 HOST_WIDE_INT backchain_offset;
290
291 /* Number of first and last gpr where slots in the register
292 save area are reserved for. */
293 int first_save_gpr_slot;
294 int last_save_gpr_slot;
295
296 /* Number of first and last gpr to be saved, restored. */
297 int first_save_gpr;
298 int first_restore_gpr;
299 int last_save_gpr;
300 int last_restore_gpr;
301
302 /* Bits standing for floating point registers. Set, if the
303 respective register has to be saved. Starting with reg 16 (f0)
304 at the rightmost bit.
305 Bit 15 - 8 7 6 5 4 3 2 1 0
306 fpr 15 - 8 7 5 3 1 6 4 2 0
307 reg 31 - 24 23 22 21 20 19 18 17 16 */
308 unsigned int fpr_bitmap;
309
310 /* Number of floating point registers f8-f15 which must be saved. */
311 int high_fprs;
312
313 /* Set if return address needs to be saved.
314 This flag is set by s390_return_addr_rtx if it could not use
315 the initial value of r14 and therefore depends on r14 saved
316 to the stack. */
317 bool save_return_addr_p;
318
319 /* Size of stack frame. */
320 HOST_WIDE_INT frame_size;
321 };
322
323 /* Define the structure for the machine field in struct function. */
324
325 struct GTY(()) machine_function
326 {
327 struct s390_frame_layout frame_layout;
328
329 /* Literal pool base register. */
330 rtx base_reg;
331
332 /* True if we may need to perform branch splitting. */
333 bool split_branches_pending_p;
334
335 /* Some local-dynamic TLS symbol name. */
336 const char *some_ld_name;
337
338 bool has_landing_pad_p;
339 };
340
341 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
342
343 #define cfun_frame_layout (cfun->machine->frame_layout)
344 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
345 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
346 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
347 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
348 (1 << (BITNUM)))
349 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
350 (1 << (BITNUM))))
351
352 /* Number of GPRs and FPRs used for argument passing. */
353 #define GP_ARG_NUM_REG 5
354 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
355
356 /* A couple of shortcuts. */
357 #define CONST_OK_FOR_J(x) \
358 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
359 #define CONST_OK_FOR_K(x) \
360 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
361 #define CONST_OK_FOR_Os(x) \
362 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
363 #define CONST_OK_FOR_Op(x) \
364 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
365 #define CONST_OK_FOR_On(x) \
366 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
367
368 #define REGNO_PAIR_OK(REGNO, MODE) \
369 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
370
371 /* That's the read ahead of the dynamic branch prediction unit in
372 bytes on a z10 (or higher) CPU. */
373 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
374
375 /* Return the alignment for LABEL. We default to the -falign-labels
376 value except for the literal pool base label. */
377 int
378 s390_label_align (rtx label)
379 {
380 rtx prev_insn = prev_active_insn (label);
381
382 if (prev_insn == NULL_RTX)
383 goto old;
384
385 prev_insn = single_set (prev_insn);
386
387 if (prev_insn == NULL_RTX)
388 goto old;
389
390 prev_insn = SET_SRC (prev_insn);
391
392 /* Don't align literal pool base labels. */
393 if (GET_CODE (prev_insn) == UNSPEC
394 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
395 return 0;
396
397 old:
398 return align_labels_log;
399 }
400
401 static enum machine_mode
402 s390_libgcc_cmp_return_mode (void)
403 {
404 return TARGET_64BIT ? DImode : SImode;
405 }
406
407 static enum machine_mode
408 s390_libgcc_shift_count_mode (void)
409 {
410 return TARGET_64BIT ? DImode : SImode;
411 }
412
413 static enum machine_mode
414 s390_unwind_word_mode (void)
415 {
416 return TARGET_64BIT ? DImode : SImode;
417 }
418
419 /* Return true if the back end supports mode MODE. */
420 static bool
421 s390_scalar_mode_supported_p (enum machine_mode mode)
422 {
423 /* In contrast to the default implementation reject TImode constants on 31bit
424 TARGET_ZARCH for ABI compliance. */
425 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
426 return false;
427
428 if (DECIMAL_FLOAT_MODE_P (mode))
429 return default_decimal_float_supported_p ();
430
431 return default_scalar_mode_supported_p (mode);
432 }
433
434 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
435
436 void
437 s390_set_has_landing_pad_p (bool value)
438 {
439 cfun->machine->has_landing_pad_p = value;
440 }
441
442 /* If two condition code modes are compatible, return a condition code
443 mode which is compatible with both. Otherwise, return
444 VOIDmode. */
445
446 static enum machine_mode
447 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
448 {
449 if (m1 == m2)
450 return m1;
451
452 switch (m1)
453 {
454 case CCZmode:
455 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
456 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
457 return m2;
458 return VOIDmode;
459
460 case CCSmode:
461 case CCUmode:
462 case CCTmode:
463 case CCSRmode:
464 case CCURmode:
465 case CCZ1mode:
466 if (m2 == CCZmode)
467 return m1;
468
469 return VOIDmode;
470
471 default:
472 return VOIDmode;
473 }
474 return VOIDmode;
475 }
476
477 /* Return true if SET either doesn't set the CC register, or else
478 the source and destination have matching CC modes and that
479 CC mode is at least as constrained as REQ_MODE. */
480
481 static bool
482 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
483 {
484 enum machine_mode set_mode;
485
486 gcc_assert (GET_CODE (set) == SET);
487
488 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
489 return 1;
490
491 set_mode = GET_MODE (SET_DEST (set));
492 switch (set_mode)
493 {
494 case CCSmode:
495 case CCSRmode:
496 case CCUmode:
497 case CCURmode:
498 case CCLmode:
499 case CCL1mode:
500 case CCL2mode:
501 case CCL3mode:
502 case CCT1mode:
503 case CCT2mode:
504 case CCT3mode:
505 if (req_mode != set_mode)
506 return 0;
507 break;
508
509 case CCZmode:
510 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
511 && req_mode != CCSRmode && req_mode != CCURmode)
512 return 0;
513 break;
514
515 case CCAPmode:
516 case CCANmode:
517 if (req_mode != CCAmode)
518 return 0;
519 break;
520
521 default:
522 gcc_unreachable ();
523 }
524
525 return (GET_MODE (SET_SRC (set)) == set_mode);
526 }
527
528 /* Return true if every SET in INSN that sets the CC register
529 has source and destination with matching CC modes and that
530 CC mode is at least as constrained as REQ_MODE.
531 If REQ_MODE is VOIDmode, always return false. */
532
533 bool
534 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
535 {
536 int i;
537
538 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
539 if (req_mode == VOIDmode)
540 return false;
541
542 if (GET_CODE (PATTERN (insn)) == SET)
543 return s390_match_ccmode_set (PATTERN (insn), req_mode);
544
545 if (GET_CODE (PATTERN (insn)) == PARALLEL)
546 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
547 {
548 rtx set = XVECEXP (PATTERN (insn), 0, i);
549 if (GET_CODE (set) == SET)
550 if (!s390_match_ccmode_set (set, req_mode))
551 return false;
552 }
553
554 return true;
555 }
556
557 /* If a test-under-mask instruction can be used to implement
558 (compare (and ... OP1) OP2), return the CC mode required
559 to do that. Otherwise, return VOIDmode.
560 MIXED is true if the instruction can distinguish between
561 CC1 and CC2 for mixed selected bits (TMxx), it is false
562 if the instruction cannot (TM). */
563
564 enum machine_mode
565 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
566 {
567 int bit0, bit1;
568
569 /* ??? Fixme: should work on CONST_DOUBLE as well. */
570 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
571 return VOIDmode;
572
573 /* Selected bits all zero: CC0.
574 e.g.: int a; if ((a & (16 + 128)) == 0) */
575 if (INTVAL (op2) == 0)
576 return CCTmode;
577
578 /* Selected bits all one: CC3.
579 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
580 if (INTVAL (op2) == INTVAL (op1))
581 return CCT3mode;
582
583 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
584 int a;
585 if ((a & (16 + 128)) == 16) -> CCT1
586 if ((a & (16 + 128)) == 128) -> CCT2 */
587 if (mixed)
588 {
589 bit1 = exact_log2 (INTVAL (op2));
590 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
591 if (bit0 != -1 && bit1 != -1)
592 return bit0 > bit1 ? CCT1mode : CCT2mode;
593 }
594
595 return VOIDmode;
596 }
597
598 /* Given a comparison code OP (EQ, NE, etc.) and the operands
599 OP0 and OP1 of a COMPARE, return the mode to be used for the
600 comparison. */
601
602 enum machine_mode
603 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
604 {
605 switch (code)
606 {
607 case EQ:
608 case NE:
609 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
610 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
611 return CCAPmode;
612 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
613 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
614 return CCAPmode;
615 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
616 || GET_CODE (op1) == NEG)
617 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
618 return CCLmode;
619
620 if (GET_CODE (op0) == AND)
621 {
622 /* Check whether we can potentially do it via TM. */
623 enum machine_mode ccmode;
624 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
625 if (ccmode != VOIDmode)
626 {
627 /* Relax CCTmode to CCZmode to allow fall-back to AND
628 if that turns out to be beneficial. */
629 return ccmode == CCTmode ? CCZmode : ccmode;
630 }
631 }
632
633 if (register_operand (op0, HImode)
634 && GET_CODE (op1) == CONST_INT
635 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
636 return CCT3mode;
637 if (register_operand (op0, QImode)
638 && GET_CODE (op1) == CONST_INT
639 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
640 return CCT3mode;
641
642 return CCZmode;
643
644 case LE:
645 case LT:
646 case GE:
647 case GT:
648 /* The only overflow condition of NEG and ABS happens when
649 -INT_MAX is used as parameter, which stays negative. So
650 we have an overflow from a positive value to a negative.
651 Using CCAP mode the resulting cc can be used for comparisons. */
652 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
653 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
654 return CCAPmode;
655
656 /* If constants are involved in an add instruction it is possible to use
657 the resulting cc for comparisons with zero. Knowing the sign of the
658 constant the overflow behavior gets predictable. e.g.:
659 int a, b; if ((b = a + c) > 0)
660 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
661 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
662 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
663 {
664 if (INTVAL (XEXP((op0), 1)) < 0)
665 return CCANmode;
666 else
667 return CCAPmode;
668 }
669 /* Fall through. */
670 case UNORDERED:
671 case ORDERED:
672 case UNEQ:
673 case UNLE:
674 case UNLT:
675 case UNGE:
676 case UNGT:
677 case LTGT:
678 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
679 && GET_CODE (op1) != CONST_INT)
680 return CCSRmode;
681 return CCSmode;
682
683 case LTU:
684 case GEU:
685 if (GET_CODE (op0) == PLUS
686 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
687 return CCL1mode;
688
689 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
690 && GET_CODE (op1) != CONST_INT)
691 return CCURmode;
692 return CCUmode;
693
694 case LEU:
695 case GTU:
696 if (GET_CODE (op0) == MINUS
697 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
698 return CCL2mode;
699
700 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
701 && GET_CODE (op1) != CONST_INT)
702 return CCURmode;
703 return CCUmode;
704
705 default:
706 gcc_unreachable ();
707 }
708 }
709
710 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
711 that we can implement more efficiently. */
712
713 void
714 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
715 {
716 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
717 if ((*code == EQ || *code == NE)
718 && *op1 == const0_rtx
719 && GET_CODE (*op0) == ZERO_EXTRACT
720 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
721 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
722 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
723 {
724 rtx inner = XEXP (*op0, 0);
725 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
726 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
727 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
728
729 if (len > 0 && len < modesize
730 && pos >= 0 && pos + len <= modesize
731 && modesize <= HOST_BITS_PER_WIDE_INT)
732 {
733 unsigned HOST_WIDE_INT block;
734 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
735 block <<= modesize - pos - len;
736
737 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
738 gen_int_mode (block, GET_MODE (inner)));
739 }
740 }
741
742 /* Narrow AND of memory against immediate to enable TM. */
743 if ((*code == EQ || *code == NE)
744 && *op1 == const0_rtx
745 && GET_CODE (*op0) == AND
746 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
747 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
748 {
749 rtx inner = XEXP (*op0, 0);
750 rtx mask = XEXP (*op0, 1);
751
752 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
753 if (GET_CODE (inner) == SUBREG
754 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
755 && (GET_MODE_SIZE (GET_MODE (inner))
756 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
757 && ((INTVAL (mask)
758 & GET_MODE_MASK (GET_MODE (inner))
759 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
760 == 0))
761 inner = SUBREG_REG (inner);
762
763 /* Do not change volatile MEMs. */
764 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
765 {
766 int part = s390_single_part (XEXP (*op0, 1),
767 GET_MODE (inner), QImode, 0);
768 if (part >= 0)
769 {
770 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
771 inner = adjust_address_nv (inner, QImode, part);
772 *op0 = gen_rtx_AND (QImode, inner, mask);
773 }
774 }
775 }
776
777 /* Narrow comparisons against 0xffff to HImode if possible. */
778 if ((*code == EQ || *code == NE)
779 && GET_CODE (*op1) == CONST_INT
780 && INTVAL (*op1) == 0xffff
781 && SCALAR_INT_MODE_P (GET_MODE (*op0))
782 && (nonzero_bits (*op0, GET_MODE (*op0))
783 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
784 {
785 *op0 = gen_lowpart (HImode, *op0);
786 *op1 = constm1_rtx;
787 }
788
789 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
790 if (GET_CODE (*op0) == UNSPEC
791 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
792 && XVECLEN (*op0, 0) == 1
793 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
794 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
795 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
796 && *op1 == const0_rtx)
797 {
798 enum rtx_code new_code = UNKNOWN;
799 switch (*code)
800 {
801 case EQ: new_code = EQ; break;
802 case NE: new_code = NE; break;
803 case LT: new_code = GTU; break;
804 case GT: new_code = LTU; break;
805 case LE: new_code = GEU; break;
806 case GE: new_code = LEU; break;
807 default: break;
808 }
809
810 if (new_code != UNKNOWN)
811 {
812 *op0 = XVECEXP (*op0, 0, 0);
813 *code = new_code;
814 }
815 }
816
817 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
818 if (GET_CODE (*op0) == UNSPEC
819 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
820 && XVECLEN (*op0, 0) == 1
821 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
822 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
823 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
824 && *op1 == const0_rtx)
825 {
826 enum rtx_code new_code = UNKNOWN;
827 switch (*code)
828 {
829 case EQ: new_code = EQ; break;
830 case NE: new_code = NE; break;
831 default: break;
832 }
833
834 if (new_code != UNKNOWN)
835 {
836 *op0 = XVECEXP (*op0, 0, 0);
837 *code = new_code;
838 }
839 }
840
841 /* Simplify cascaded EQ, NE with const0_rtx. */
842 if ((*code == NE || *code == EQ)
843 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
844 && GET_MODE (*op0) == SImode
845 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
846 && REG_P (XEXP (*op0, 0))
847 && XEXP (*op0, 1) == const0_rtx
848 && *op1 == const0_rtx)
849 {
850 if ((*code == EQ && GET_CODE (*op0) == NE)
851 || (*code == NE && GET_CODE (*op0) == EQ))
852 *code = EQ;
853 else
854 *code = NE;
855 *op0 = XEXP (*op0, 0);
856 }
857
858 /* Prefer register over memory as first operand. */
859 if (MEM_P (*op0) && REG_P (*op1))
860 {
861 rtx tem = *op0; *op0 = *op1; *op1 = tem;
862 *code = swap_condition (*code);
863 }
864 }
865
866 /* Emit a compare instruction suitable to implement the comparison
867 OP0 CODE OP1. Return the correct condition RTL to be placed in
868 the IF_THEN_ELSE of the conditional branch testing the result. */
869
870 rtx
871 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
872 {
873 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
874 rtx cc;
875
876 /* Do not output a redundant compare instruction if a compare_and_swap
877 pattern already computed the result and the machine modes are compatible. */
878 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
879 {
880 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
881 == GET_MODE (op0));
882 cc = op0;
883 }
884 else
885 {
886 cc = gen_rtx_REG (mode, CC_REGNUM);
887 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
888 }
889
890 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
891 }
892
893 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
894 matches CMP.
895 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
896 conditional branch testing the result. */
897
898 static rtx
899 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
900 rtx cmp, rtx new_rtx)
901 {
902 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
903 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
904 const0_rtx);
905 }
906
907 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
908 unconditional jump, else a conditional jump under condition COND. */
909
910 void
911 s390_emit_jump (rtx target, rtx cond)
912 {
913 rtx insn;
914
915 target = gen_rtx_LABEL_REF (VOIDmode, target);
916 if (cond)
917 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
918
919 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
920 emit_jump_insn (insn);
921 }
922
923 /* Return branch condition mask to implement a branch
924 specified by CODE. Return -1 for invalid comparisons. */
925
926 int
927 s390_branch_condition_mask (rtx code)
928 {
929 const int CC0 = 1 << 3;
930 const int CC1 = 1 << 2;
931 const int CC2 = 1 << 1;
932 const int CC3 = 1 << 0;
933
934 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
935 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
936 gcc_assert (XEXP (code, 1) == const0_rtx);
937
938 switch (GET_MODE (XEXP (code, 0)))
939 {
940 case CCZmode:
941 case CCZ1mode:
942 switch (GET_CODE (code))
943 {
944 case EQ: return CC0;
945 case NE: return CC1 | CC2 | CC3;
946 default: return -1;
947 }
948 break;
949
950 case CCT1mode:
951 switch (GET_CODE (code))
952 {
953 case EQ: return CC1;
954 case NE: return CC0 | CC2 | CC3;
955 default: return -1;
956 }
957 break;
958
959 case CCT2mode:
960 switch (GET_CODE (code))
961 {
962 case EQ: return CC2;
963 case NE: return CC0 | CC1 | CC3;
964 default: return -1;
965 }
966 break;
967
968 case CCT3mode:
969 switch (GET_CODE (code))
970 {
971 case EQ: return CC3;
972 case NE: return CC0 | CC1 | CC2;
973 default: return -1;
974 }
975 break;
976
977 case CCLmode:
978 switch (GET_CODE (code))
979 {
980 case EQ: return CC0 | CC2;
981 case NE: return CC1 | CC3;
982 default: return -1;
983 }
984 break;
985
986 case CCL1mode:
987 switch (GET_CODE (code))
988 {
989 case LTU: return CC2 | CC3; /* carry */
990 case GEU: return CC0 | CC1; /* no carry */
991 default: return -1;
992 }
993 break;
994
995 case CCL2mode:
996 switch (GET_CODE (code))
997 {
998 case GTU: return CC0 | CC1; /* borrow */
999 case LEU: return CC2 | CC3; /* no borrow */
1000 default: return -1;
1001 }
1002 break;
1003
1004 case CCL3mode:
1005 switch (GET_CODE (code))
1006 {
1007 case EQ: return CC0 | CC2;
1008 case NE: return CC1 | CC3;
1009 case LTU: return CC1;
1010 case GTU: return CC3;
1011 case LEU: return CC1 | CC2;
1012 case GEU: return CC2 | CC3;
1013 default: return -1;
1014 }
1015
1016 case CCUmode:
1017 switch (GET_CODE (code))
1018 {
1019 case EQ: return CC0;
1020 case NE: return CC1 | CC2 | CC3;
1021 case LTU: return CC1;
1022 case GTU: return CC2;
1023 case LEU: return CC0 | CC1;
1024 case GEU: return CC0 | CC2;
1025 default: return -1;
1026 }
1027 break;
1028
1029 case CCURmode:
1030 switch (GET_CODE (code))
1031 {
1032 case EQ: return CC0;
1033 case NE: return CC2 | CC1 | CC3;
1034 case LTU: return CC2;
1035 case GTU: return CC1;
1036 case LEU: return CC0 | CC2;
1037 case GEU: return CC0 | CC1;
1038 default: return -1;
1039 }
1040 break;
1041
1042 case CCAPmode:
1043 switch (GET_CODE (code))
1044 {
1045 case EQ: return CC0;
1046 case NE: return CC1 | CC2 | CC3;
1047 case LT: return CC1 | CC3;
1048 case GT: return CC2;
1049 case LE: return CC0 | CC1 | CC3;
1050 case GE: return CC0 | CC2;
1051 default: return -1;
1052 }
1053 break;
1054
1055 case CCANmode:
1056 switch (GET_CODE (code))
1057 {
1058 case EQ: return CC0;
1059 case NE: return CC1 | CC2 | CC3;
1060 case LT: return CC1;
1061 case GT: return CC2 | CC3;
1062 case LE: return CC0 | CC1;
1063 case GE: return CC0 | CC2 | CC3;
1064 default: return -1;
1065 }
1066 break;
1067
1068 case CCSmode:
1069 switch (GET_CODE (code))
1070 {
1071 case EQ: return CC0;
1072 case NE: return CC1 | CC2 | CC3;
1073 case LT: return CC1;
1074 case GT: return CC2;
1075 case LE: return CC0 | CC1;
1076 case GE: return CC0 | CC2;
1077 case UNORDERED: return CC3;
1078 case ORDERED: return CC0 | CC1 | CC2;
1079 case UNEQ: return CC0 | CC3;
1080 case UNLT: return CC1 | CC3;
1081 case UNGT: return CC2 | CC3;
1082 case UNLE: return CC0 | CC1 | CC3;
1083 case UNGE: return CC0 | CC2 | CC3;
1084 case LTGT: return CC1 | CC2;
1085 default: return -1;
1086 }
1087 break;
1088
1089 case CCSRmode:
1090 switch (GET_CODE (code))
1091 {
1092 case EQ: return CC0;
1093 case NE: return CC2 | CC1 | CC3;
1094 case LT: return CC2;
1095 case GT: return CC1;
1096 case LE: return CC0 | CC2;
1097 case GE: return CC0 | CC1;
1098 case UNORDERED: return CC3;
1099 case ORDERED: return CC0 | CC2 | CC1;
1100 case UNEQ: return CC0 | CC3;
1101 case UNLT: return CC2 | CC3;
1102 case UNGT: return CC1 | CC3;
1103 case UNLE: return CC0 | CC2 | CC3;
1104 case UNGE: return CC0 | CC1 | CC3;
1105 case LTGT: return CC2 | CC1;
1106 default: return -1;
1107 }
1108 break;
1109
1110 default:
1111 return -1;
1112 }
1113 }
1114
1115
1116 /* Return branch condition mask to implement a compare and branch
1117 specified by CODE. Return -1 for invalid comparisons. */
1118
1119 int
1120 s390_compare_and_branch_condition_mask (rtx code)
1121 {
1122 const int CC0 = 1 << 3;
1123 const int CC1 = 1 << 2;
1124 const int CC2 = 1 << 1;
1125
1126 switch (GET_CODE (code))
1127 {
1128 case EQ:
1129 return CC0;
1130 case NE:
1131 return CC1 | CC2;
1132 case LT:
1133 case LTU:
1134 return CC1;
1135 case GT:
1136 case GTU:
1137 return CC2;
1138 case LE:
1139 case LEU:
1140 return CC0 | CC1;
1141 case GE:
1142 case GEU:
1143 return CC0 | CC2;
1144 default:
1145 gcc_unreachable ();
1146 }
1147 return -1;
1148 }
1149
1150 /* If INV is false, return assembler mnemonic string to implement
1151 a branch specified by CODE. If INV is true, return mnemonic
1152 for the corresponding inverted branch. */
1153
1154 static const char *
1155 s390_branch_condition_mnemonic (rtx code, int inv)
1156 {
1157 int mask;
1158
1159 static const char *const mnemonic[16] =
1160 {
1161 NULL, "o", "h", "nle",
1162 "l", "nhe", "lh", "ne",
1163 "e", "nlh", "he", "nl",
1164 "le", "nh", "no", NULL
1165 };
1166
1167 if (GET_CODE (XEXP (code, 0)) == REG
1168 && REGNO (XEXP (code, 0)) == CC_REGNUM
1169 && XEXP (code, 1) == const0_rtx)
1170 mask = s390_branch_condition_mask (code);
1171 else
1172 mask = s390_compare_and_branch_condition_mask (code);
1173
1174 gcc_assert (mask >= 0);
1175
1176 if (inv)
1177 mask ^= 15;
1178
1179 gcc_assert (mask >= 1 && mask <= 14);
1180
1181 return mnemonic[mask];
1182 }
1183
1184 /* Return the part of op which has a value different from def.
1185 The size of the part is determined by mode.
1186 Use this function only if you already know that op really
1187 contains such a part. */
1188
1189 unsigned HOST_WIDE_INT
1190 s390_extract_part (rtx op, enum machine_mode mode, int def)
1191 {
1192 unsigned HOST_WIDE_INT value = 0;
1193 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1194 int part_bits = GET_MODE_BITSIZE (mode);
1195 unsigned HOST_WIDE_INT part_mask
1196 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1197 int i;
1198
1199 for (i = 0; i < max_parts; i++)
1200 {
1201 if (i == 0)
1202 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1203 else
1204 value >>= part_bits;
1205
1206 if ((value & part_mask) != (def & part_mask))
1207 return value & part_mask;
1208 }
1209
1210 gcc_unreachable ();
1211 }
1212
1213 /* If OP is an integer constant of mode MODE with exactly one
1214 part of mode PART_MODE unequal to DEF, return the number of that
1215 part. Otherwise, return -1. */
1216
1217 int
1218 s390_single_part (rtx op,
1219 enum machine_mode mode,
1220 enum machine_mode part_mode,
1221 int def)
1222 {
1223 unsigned HOST_WIDE_INT value = 0;
1224 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1225 unsigned HOST_WIDE_INT part_mask
1226 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1227 int i, part = -1;
1228
1229 if (GET_CODE (op) != CONST_INT)
1230 return -1;
1231
1232 for (i = 0; i < n_parts; i++)
1233 {
1234 if (i == 0)
1235 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1236 else
1237 value >>= GET_MODE_BITSIZE (part_mode);
1238
1239 if ((value & part_mask) != (def & part_mask))
1240 {
1241 if (part != -1)
1242 return -1;
1243 else
1244 part = i;
1245 }
1246 }
1247 return part == -1 ? -1 : n_parts - 1 - part;
1248 }
1249
1250 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1251 bits and no other bits are set in IN. POS and LENGTH can be used
1252 to obtain the start position and the length of the bitfield.
1253
1254 POS gives the position of the first bit of the bitfield counting
1255 from the lowest order bit starting with zero. In order to use this
1256 value for S/390 instructions this has to be converted to "bits big
1257 endian" style. */
1258
1259 bool
1260 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1261 int *pos, int *length)
1262 {
1263 int tmp_pos = 0;
1264 int tmp_length = 0;
1265 int i;
1266 unsigned HOST_WIDE_INT mask = 1ULL;
1267 bool contiguous = false;
1268
1269 for (i = 0; i < size; mask <<= 1, i++)
1270 {
1271 if (contiguous)
1272 {
1273 if (mask & in)
1274 tmp_length++;
1275 else
1276 break;
1277 }
1278 else
1279 {
1280 if (mask & in)
1281 {
1282 contiguous = true;
1283 tmp_length++;
1284 }
1285 else
1286 tmp_pos++;
1287 }
1288 }
1289
1290 if (!tmp_length)
1291 return false;
1292
1293 /* Calculate a mask for all bits beyond the contiguous bits. */
1294 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1295
1296 if (mask & in)
1297 return false;
1298
1299 if (tmp_length + tmp_pos - 1 > size)
1300 return false;
1301
1302 if (length)
1303 *length = tmp_length;
1304
1305 if (pos)
1306 *pos = tmp_pos;
1307
1308 return true;
1309 }
1310
1311 /* Check whether we can (and want to) split a double-word
1312 move in mode MODE from SRC to DST into two single-word
1313 moves, moving the subword FIRST_SUBWORD first. */
1314
1315 bool
1316 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1317 {
1318 /* Floating point registers cannot be split. */
1319 if (FP_REG_P (src) || FP_REG_P (dst))
1320 return false;
1321
1322 /* We don't need to split if operands are directly accessible. */
1323 if (s_operand (src, mode) || s_operand (dst, mode))
1324 return false;
1325
1326 /* Non-offsettable memory references cannot be split. */
1327 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1328 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1329 return false;
1330
1331 /* Moving the first subword must not clobber a register
1332 needed to move the second subword. */
1333 if (register_operand (dst, mode))
1334 {
1335 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1336 if (reg_overlap_mentioned_p (subreg, src))
1337 return false;
1338 }
1339
1340 return true;
1341 }
1342
1343 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1344 and [MEM2, MEM2 + SIZE] do overlap and false
1345 otherwise. */
1346
1347 bool
1348 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1349 {
1350 rtx addr1, addr2, addr_delta;
1351 HOST_WIDE_INT delta;
1352
1353 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1354 return true;
1355
1356 if (size == 0)
1357 return false;
1358
1359 addr1 = XEXP (mem1, 0);
1360 addr2 = XEXP (mem2, 0);
1361
1362 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1363
1364 /* This overlapping check is used by peepholes merging memory block operations.
1365 Overlapping operations would otherwise be recognized by the S/390 hardware
1366 and would fall back to a slower implementation. Allowing overlapping
1367 operations would lead to slow code but not to wrong code. Therefore we are
1368 somewhat optimistic if we cannot prove that the memory blocks are
1369 overlapping.
1370 That's why we return false here although this may accept operations on
1371 overlapping memory areas. */
1372 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1373 return false;
1374
1375 delta = INTVAL (addr_delta);
1376
1377 if (delta == 0
1378 || (delta > 0 && delta < size)
1379 || (delta < 0 && -delta < size))
1380 return true;
1381
1382 return false;
1383 }
1384
1385 /* Check whether the address of memory reference MEM2 equals exactly
1386 the address of memory reference MEM1 plus DELTA. Return true if
1387 we can prove this to be the case, false otherwise. */
1388
1389 bool
1390 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1391 {
1392 rtx addr1, addr2, addr_delta;
1393
1394 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1395 return false;
1396
1397 addr1 = XEXP (mem1, 0);
1398 addr2 = XEXP (mem2, 0);
1399
1400 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1401 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1402 return false;
1403
1404 return true;
1405 }
1406
1407 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1408
1409 void
1410 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1411 rtx *operands)
1412 {
1413 enum machine_mode wmode = mode;
1414 rtx dst = operands[0];
1415 rtx src1 = operands[1];
1416 rtx src2 = operands[2];
1417 rtx op, clob, tem;
1418
1419 /* If we cannot handle the operation directly, use a temp register. */
1420 if (!s390_logical_operator_ok_p (operands))
1421 dst = gen_reg_rtx (mode);
1422
1423 /* QImode and HImode patterns make sense only if we have a destination
1424 in memory. Otherwise perform the operation in SImode. */
1425 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1426 wmode = SImode;
1427
1428 /* Widen operands if required. */
1429 if (mode != wmode)
1430 {
1431 if (GET_CODE (dst) == SUBREG
1432 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1433 dst = tem;
1434 else if (REG_P (dst))
1435 dst = gen_rtx_SUBREG (wmode, dst, 0);
1436 else
1437 dst = gen_reg_rtx (wmode);
1438
1439 if (GET_CODE (src1) == SUBREG
1440 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1441 src1 = tem;
1442 else if (GET_MODE (src1) != VOIDmode)
1443 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1444
1445 if (GET_CODE (src2) == SUBREG
1446 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1447 src2 = tem;
1448 else if (GET_MODE (src2) != VOIDmode)
1449 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1450 }
1451
1452 /* Emit the instruction. */
1453 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1454 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1455 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1456
1457 /* Fix up the destination if needed. */
1458 if (dst != operands[0])
1459 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1460 }
1461
1462 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1463
1464 bool
1465 s390_logical_operator_ok_p (rtx *operands)
1466 {
1467 /* If the destination operand is in memory, it needs to coincide
1468 with one of the source operands. After reload, it has to be
1469 the first source operand. */
1470 if (GET_CODE (operands[0]) == MEM)
1471 return rtx_equal_p (operands[0], operands[1])
1472 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1473
1474 return true;
1475 }
1476
1477 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1478 operand IMMOP to switch from SS to SI type instructions. */
1479
1480 void
1481 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1482 {
1483 int def = code == AND ? -1 : 0;
1484 HOST_WIDE_INT mask;
1485 int part;
1486
1487 gcc_assert (GET_CODE (*memop) == MEM);
1488 gcc_assert (!MEM_VOLATILE_P (*memop));
1489
1490 mask = s390_extract_part (*immop, QImode, def);
1491 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1492 gcc_assert (part >= 0);
1493
1494 *memop = adjust_address (*memop, QImode, part);
1495 *immop = gen_int_mode (mask, QImode);
1496 }
1497
1498
1499 /* How to allocate a 'struct machine_function'. */
1500
1501 static struct machine_function *
1502 s390_init_machine_status (void)
1503 {
1504 return ggc_alloc_cleared_machine_function ();
1505 }
1506
1507 static void
1508 s390_option_override (void)
1509 {
1510 /* Set up function hooks. */
1511 init_machine_status = s390_init_machine_status;
1512
1513 /* Architecture mode defaults according to ABI. */
1514 if (!(target_flags_explicit & MASK_ZARCH))
1515 {
1516 if (TARGET_64BIT)
1517 target_flags |= MASK_ZARCH;
1518 else
1519 target_flags &= ~MASK_ZARCH;
1520 }
1521
1522 /* Set the march default in case it hasn't been specified on
1523 cmdline. */
1524 if (s390_arch == PROCESSOR_max)
1525 {
1526 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1527 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1528 s390_arch_flags = processor_flags_table[(int)s390_arch];
1529 }
1530
1531 /* Determine processor to tune for. */
1532 if (s390_tune == PROCESSOR_max)
1533 {
1534 s390_tune = s390_arch;
1535 s390_tune_flags = s390_arch_flags;
1536 }
1537
1538 /* Sanity checks. */
1539 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1540 error ("z/Architecture mode not supported on %s", s390_arch_string);
1541 if (TARGET_64BIT && !TARGET_ZARCH)
1542 error ("64-bit ABI not supported in ESA/390 mode");
1543
1544 /* Use hardware DFP if available and not explicitly disabled by
1545 user. E.g. with -m31 -march=z10 -mzarch */
1546 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
1547 target_flags |= MASK_HARD_DFP;
1548
1549 if (TARGET_HARD_DFP && !TARGET_DFP)
1550 {
1551 if (target_flags_explicit & MASK_HARD_DFP)
1552 {
1553 if (!TARGET_CPU_DFP)
1554 error ("hardware decimal floating point instructions"
1555 " not available on %s", s390_arch_string);
1556 if (!TARGET_ZARCH)
1557 error ("hardware decimal floating point instructions"
1558 " not available in ESA/390 mode");
1559 }
1560 else
1561 target_flags &= ~MASK_HARD_DFP;
1562 }
1563
1564 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1565 {
1566 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1567 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1568
1569 target_flags &= ~MASK_HARD_DFP;
1570 }
1571
1572 /* Set processor cost function. */
1573 switch (s390_tune)
1574 {
1575 case PROCESSOR_2084_Z990:
1576 s390_cost = &z990_cost;
1577 break;
1578 case PROCESSOR_2094_Z9_109:
1579 s390_cost = &z9_109_cost;
1580 break;
1581 case PROCESSOR_2097_Z10:
1582 s390_cost = &z10_cost;
1583 case PROCESSOR_2817_Z196:
1584 s390_cost = &z196_cost;
1585 break;
1586 default:
1587 s390_cost = &z900_cost;
1588 }
1589
1590 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1591 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1592 "in combination");
1593
1594 if (s390_stack_size)
1595 {
1596 if (s390_stack_guard >= s390_stack_size)
1597 error ("stack size must be greater than the stack guard value");
1598 else if (s390_stack_size > 1 << 16)
1599 error ("stack size must not be greater than 64k");
1600 }
1601 else if (s390_stack_guard)
1602 error ("-mstack-guard implies use of -mstack-size");
1603
1604 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1605 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1606 target_flags |= MASK_LONG_DOUBLE_128;
1607 #endif
1608
1609 if (s390_tune == PROCESSOR_2097_Z10
1610 || s390_tune == PROCESSOR_2817_Z196)
1611 {
1612 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1613 global_options.x_param_values,
1614 global_options_set.x_param_values);
1615 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1616 global_options.x_param_values,
1617 global_options_set.x_param_values);
1618 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1619 global_options.x_param_values,
1620 global_options_set.x_param_values);
1621 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1622 global_options.x_param_values,
1623 global_options_set.x_param_values);
1624 }
1625
1626 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1627 global_options.x_param_values,
1628 global_options_set.x_param_values);
1629 /* values for loop prefetching */
1630 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1631 global_options.x_param_values,
1632 global_options_set.x_param_values);
1633 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1634 global_options.x_param_values,
1635 global_options_set.x_param_values);
1636 /* s390 has more than 2 levels and the size is much larger. Since
1637 we are always running virtualized assume that we only get a small
1638 part of the caches above l1. */
1639 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1640 global_options.x_param_values,
1641 global_options_set.x_param_values);
1642 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1643 global_options.x_param_values,
1644 global_options_set.x_param_values);
1645 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1646 global_options.x_param_values,
1647 global_options_set.x_param_values);
1648
1649 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1650 requires the arch flags to be evaluated already. Since prefetching
1651 is beneficial on s390, we enable it if available. */
1652 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1653 flag_prefetch_loop_arrays = 1;
1654
1655 /* Use the alternative scheduling-pressure algorithm by default. */
1656 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
1657 global_options.x_param_values,
1658 global_options_set.x_param_values);
1659
1660 if (TARGET_TPF)
1661 {
1662 /* Don't emit DWARF3/4 unless specifically selected. The TPF
1663 debuggers do not yet support DWARF 3/4. */
1664 if (!global_options_set.x_dwarf_strict)
1665 dwarf_strict = 1;
1666 if (!global_options_set.x_dwarf_version)
1667 dwarf_version = 2;
1668 }
1669 }
1670
1671 /* Map for smallest class containing reg regno. */
1672
1673 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1674 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1675 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1676 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1677 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1678 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1679 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1680 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1681 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1682 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1683 ACCESS_REGS, ACCESS_REGS
1684 };
1685
1686 /* Return attribute type of insn. */
1687
1688 static enum attr_type
1689 s390_safe_attr_type (rtx insn)
1690 {
1691 if (recog_memoized (insn) >= 0)
1692 return get_attr_type (insn);
1693 else
1694 return TYPE_NONE;
1695 }
1696
1697 /* Return true if DISP is a valid short displacement. */
1698
1699 static bool
1700 s390_short_displacement (rtx disp)
1701 {
1702 /* No displacement is OK. */
1703 if (!disp)
1704 return true;
1705
1706 /* Without the long displacement facility we don't need to
1707 distingiush between long and short displacement. */
1708 if (!TARGET_LONG_DISPLACEMENT)
1709 return true;
1710
1711 /* Integer displacement in range. */
1712 if (GET_CODE (disp) == CONST_INT)
1713 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1714
1715 /* GOT offset is not OK, the GOT can be large. */
1716 if (GET_CODE (disp) == CONST
1717 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1718 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1719 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1720 return false;
1721
1722 /* All other symbolic constants are literal pool references,
1723 which are OK as the literal pool must be small. */
1724 if (GET_CODE (disp) == CONST)
1725 return true;
1726
1727 return false;
1728 }
1729
1730 /* Decompose a RTL expression ADDR for a memory address into
1731 its components, returned in OUT.
1732
1733 Returns false if ADDR is not a valid memory address, true
1734 otherwise. If OUT is NULL, don't return the components,
1735 but check for validity only.
1736
1737 Note: Only addresses in canonical form are recognized.
1738 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1739 canonical form so that they will be recognized. */
1740
1741 static int
1742 s390_decompose_address (rtx addr, struct s390_address *out)
1743 {
1744 HOST_WIDE_INT offset = 0;
1745 rtx base = NULL_RTX;
1746 rtx indx = NULL_RTX;
1747 rtx disp = NULL_RTX;
1748 rtx orig_disp;
1749 bool pointer = false;
1750 bool base_ptr = false;
1751 bool indx_ptr = false;
1752 bool literal_pool = false;
1753
1754 /* We may need to substitute the literal pool base register into the address
1755 below. However, at this point we do not know which register is going to
1756 be used as base, so we substitute the arg pointer register. This is going
1757 to be treated as holding a pointer below -- it shouldn't be used for any
1758 other purpose. */
1759 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1760
1761 /* Decompose address into base + index + displacement. */
1762
1763 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1764 base = addr;
1765
1766 else if (GET_CODE (addr) == PLUS)
1767 {
1768 rtx op0 = XEXP (addr, 0);
1769 rtx op1 = XEXP (addr, 1);
1770 enum rtx_code code0 = GET_CODE (op0);
1771 enum rtx_code code1 = GET_CODE (op1);
1772
1773 if (code0 == REG || code0 == UNSPEC)
1774 {
1775 if (code1 == REG || code1 == UNSPEC)
1776 {
1777 indx = op0; /* index + base */
1778 base = op1;
1779 }
1780
1781 else
1782 {
1783 base = op0; /* base + displacement */
1784 disp = op1;
1785 }
1786 }
1787
1788 else if (code0 == PLUS)
1789 {
1790 indx = XEXP (op0, 0); /* index + base + disp */
1791 base = XEXP (op0, 1);
1792 disp = op1;
1793 }
1794
1795 else
1796 {
1797 return false;
1798 }
1799 }
1800
1801 else
1802 disp = addr; /* displacement */
1803
1804 /* Extract integer part of displacement. */
1805 orig_disp = disp;
1806 if (disp)
1807 {
1808 if (GET_CODE (disp) == CONST_INT)
1809 {
1810 offset = INTVAL (disp);
1811 disp = NULL_RTX;
1812 }
1813 else if (GET_CODE (disp) == CONST
1814 && GET_CODE (XEXP (disp, 0)) == PLUS
1815 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1816 {
1817 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1818 disp = XEXP (XEXP (disp, 0), 0);
1819 }
1820 }
1821
1822 /* Strip off CONST here to avoid special case tests later. */
1823 if (disp && GET_CODE (disp) == CONST)
1824 disp = XEXP (disp, 0);
1825
1826 /* We can convert literal pool addresses to
1827 displacements by basing them off the base register. */
1828 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1829 {
1830 /* Either base or index must be free to hold the base register. */
1831 if (!base)
1832 base = fake_pool_base, literal_pool = true;
1833 else if (!indx)
1834 indx = fake_pool_base, literal_pool = true;
1835 else
1836 return false;
1837
1838 /* Mark up the displacement. */
1839 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1840 UNSPEC_LTREL_OFFSET);
1841 }
1842
1843 /* Validate base register. */
1844 if (base)
1845 {
1846 if (GET_CODE (base) == UNSPEC)
1847 switch (XINT (base, 1))
1848 {
1849 case UNSPEC_LTREF:
1850 if (!disp)
1851 disp = gen_rtx_UNSPEC (Pmode,
1852 gen_rtvec (1, XVECEXP (base, 0, 0)),
1853 UNSPEC_LTREL_OFFSET);
1854 else
1855 return false;
1856
1857 base = XVECEXP (base, 0, 1);
1858 break;
1859
1860 case UNSPEC_LTREL_BASE:
1861 if (XVECLEN (base, 0) == 1)
1862 base = fake_pool_base, literal_pool = true;
1863 else
1864 base = XVECEXP (base, 0, 1);
1865 break;
1866
1867 default:
1868 return false;
1869 }
1870
1871 if (!REG_P (base)
1872 || (GET_MODE (base) != SImode
1873 && GET_MODE (base) != Pmode))
1874 return false;
1875
1876 if (REGNO (base) == STACK_POINTER_REGNUM
1877 || REGNO (base) == FRAME_POINTER_REGNUM
1878 || ((reload_completed || reload_in_progress)
1879 && frame_pointer_needed
1880 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1881 || REGNO (base) == ARG_POINTER_REGNUM
1882 || (flag_pic
1883 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1884 pointer = base_ptr = true;
1885
1886 if ((reload_completed || reload_in_progress)
1887 && base == cfun->machine->base_reg)
1888 pointer = base_ptr = literal_pool = true;
1889 }
1890
1891 /* Validate index register. */
1892 if (indx)
1893 {
1894 if (GET_CODE (indx) == UNSPEC)
1895 switch (XINT (indx, 1))
1896 {
1897 case UNSPEC_LTREF:
1898 if (!disp)
1899 disp = gen_rtx_UNSPEC (Pmode,
1900 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1901 UNSPEC_LTREL_OFFSET);
1902 else
1903 return false;
1904
1905 indx = XVECEXP (indx, 0, 1);
1906 break;
1907
1908 case UNSPEC_LTREL_BASE:
1909 if (XVECLEN (indx, 0) == 1)
1910 indx = fake_pool_base, literal_pool = true;
1911 else
1912 indx = XVECEXP (indx, 0, 1);
1913 break;
1914
1915 default:
1916 return false;
1917 }
1918
1919 if (!REG_P (indx)
1920 || (GET_MODE (indx) != SImode
1921 && GET_MODE (indx) != Pmode))
1922 return false;
1923
1924 if (REGNO (indx) == STACK_POINTER_REGNUM
1925 || REGNO (indx) == FRAME_POINTER_REGNUM
1926 || ((reload_completed || reload_in_progress)
1927 && frame_pointer_needed
1928 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1929 || REGNO (indx) == ARG_POINTER_REGNUM
1930 || (flag_pic
1931 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1932 pointer = indx_ptr = true;
1933
1934 if ((reload_completed || reload_in_progress)
1935 && indx == cfun->machine->base_reg)
1936 pointer = indx_ptr = literal_pool = true;
1937 }
1938
1939 /* Prefer to use pointer as base, not index. */
1940 if (base && indx && !base_ptr
1941 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1942 {
1943 rtx tmp = base;
1944 base = indx;
1945 indx = tmp;
1946 }
1947
1948 /* Validate displacement. */
1949 if (!disp)
1950 {
1951 /* If virtual registers are involved, the displacement will change later
1952 anyway as the virtual registers get eliminated. This could make a
1953 valid displacement invalid, but it is more likely to make an invalid
1954 displacement valid, because we sometimes access the register save area
1955 via negative offsets to one of those registers.
1956 Thus we don't check the displacement for validity here. If after
1957 elimination the displacement turns out to be invalid after all,
1958 this is fixed up by reload in any case. */
1959 if (base != arg_pointer_rtx
1960 && indx != arg_pointer_rtx
1961 && base != return_address_pointer_rtx
1962 && indx != return_address_pointer_rtx
1963 && base != frame_pointer_rtx
1964 && indx != frame_pointer_rtx
1965 && base != virtual_stack_vars_rtx
1966 && indx != virtual_stack_vars_rtx)
1967 if (!DISP_IN_RANGE (offset))
1968 return false;
1969 }
1970 else
1971 {
1972 /* All the special cases are pointers. */
1973 pointer = true;
1974
1975 /* In the small-PIC case, the linker converts @GOT
1976 and @GOTNTPOFF offsets to possible displacements. */
1977 if (GET_CODE (disp) == UNSPEC
1978 && (XINT (disp, 1) == UNSPEC_GOT
1979 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1980 && flag_pic == 1)
1981 {
1982 ;
1983 }
1984
1985 /* Accept pool label offsets. */
1986 else if (GET_CODE (disp) == UNSPEC
1987 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
1988 ;
1989
1990 /* Accept literal pool references. */
1991 else if (GET_CODE (disp) == UNSPEC
1992 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
1993 {
1994 /* In case CSE pulled a non literal pool reference out of
1995 the pool we have to reject the address. This is
1996 especially important when loading the GOT pointer on non
1997 zarch CPUs. In this case the literal pool contains an lt
1998 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
1999 will most likely exceed the displacement. */
2000 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2001 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2002 return false;
2003
2004 orig_disp = gen_rtx_CONST (Pmode, disp);
2005 if (offset)
2006 {
2007 /* If we have an offset, make sure it does not
2008 exceed the size of the constant pool entry. */
2009 rtx sym = XVECEXP (disp, 0, 0);
2010 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2011 return false;
2012
2013 orig_disp = plus_constant (Pmode, orig_disp, offset);
2014 }
2015 }
2016
2017 else
2018 return false;
2019 }
2020
2021 if (!base && !indx)
2022 pointer = true;
2023
2024 if (out)
2025 {
2026 out->base = base;
2027 out->indx = indx;
2028 out->disp = orig_disp;
2029 out->pointer = pointer;
2030 out->literal_pool = literal_pool;
2031 }
2032
2033 return true;
2034 }
2035
2036 /* Decompose a RTL expression OP for a shift count into its components,
2037 and return the base register in BASE and the offset in OFFSET.
2038
2039 Return true if OP is a valid shift count, false if not. */
2040
2041 bool
2042 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2043 {
2044 HOST_WIDE_INT off = 0;
2045
2046 /* We can have an integer constant, an address register,
2047 or a sum of the two. */
2048 if (GET_CODE (op) == CONST_INT)
2049 {
2050 off = INTVAL (op);
2051 op = NULL_RTX;
2052 }
2053 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2054 {
2055 off = INTVAL (XEXP (op, 1));
2056 op = XEXP (op, 0);
2057 }
2058 while (op && GET_CODE (op) == SUBREG)
2059 op = SUBREG_REG (op);
2060
2061 if (op && GET_CODE (op) != REG)
2062 return false;
2063
2064 if (offset)
2065 *offset = off;
2066 if (base)
2067 *base = op;
2068
2069 return true;
2070 }
2071
2072
2073 /* Return true if CODE is a valid address without index. */
2074
2075 bool
2076 s390_legitimate_address_without_index_p (rtx op)
2077 {
2078 struct s390_address addr;
2079
2080 if (!s390_decompose_address (XEXP (op, 0), &addr))
2081 return false;
2082 if (addr.indx)
2083 return false;
2084
2085 return true;
2086 }
2087
2088
2089 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2090 and return these parts in SYMREF and ADDEND. You can pass NULL in
2091 SYMREF and/or ADDEND if you are not interested in these values.
2092 Literal pool references are *not* considered symbol references. */
2093
2094 static bool
2095 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2096 {
2097 HOST_WIDE_INT tmpaddend = 0;
2098
2099 if (GET_CODE (addr) == CONST)
2100 addr = XEXP (addr, 0);
2101
2102 if (GET_CODE (addr) == PLUS)
2103 {
2104 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2105 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2106 && CONST_INT_P (XEXP (addr, 1)))
2107 {
2108 tmpaddend = INTVAL (XEXP (addr, 1));
2109 addr = XEXP (addr, 0);
2110 }
2111 else
2112 return false;
2113 }
2114 else
2115 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2116 return false;
2117
2118 if (symref)
2119 *symref = addr;
2120 if (addend)
2121 *addend = tmpaddend;
2122
2123 return true;
2124 }
2125
2126 /* Return TRUE if ADDR is an operand valid for a load/store relative
2127 instructions. Be aware that the alignment of the operand needs to
2128 be checked separately. */
2129 static bool
2130 s390_loadrelative_operand_p (rtx addr)
2131 {
2132 if (GET_CODE (addr) == CONST)
2133 addr = XEXP (addr, 0);
2134
2135 /* Enable load relative for symbol@GOTENT. */
2136 if (GET_CODE (addr) == UNSPEC
2137 && XINT (addr, 1) == UNSPEC_GOTENT)
2138 return true;
2139
2140 return s390_symref_operand_p (addr, NULL, NULL);
2141 }
2142
2143 /* Return true if the address in OP is valid for constraint letter C
2144 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2145 pool MEMs should be accepted. Only the Q, R, S, T constraint
2146 letters are allowed for C. */
2147
2148 static int
2149 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2150 {
2151 struct s390_address addr;
2152 bool decomposed = false;
2153
2154 /* This check makes sure that no symbolic address (except literal
2155 pool references) are accepted by the R or T constraints. */
2156 if (s390_loadrelative_operand_p (op))
2157 return 0;
2158
2159 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2160 if (!lit_pool_ok)
2161 {
2162 if (!s390_decompose_address (op, &addr))
2163 return 0;
2164 if (addr.literal_pool)
2165 return 0;
2166 decomposed = true;
2167 }
2168
2169 switch (c)
2170 {
2171 case 'Q': /* no index short displacement */
2172 if (!decomposed && !s390_decompose_address (op, &addr))
2173 return 0;
2174 if (addr.indx)
2175 return 0;
2176 if (!s390_short_displacement (addr.disp))
2177 return 0;
2178 break;
2179
2180 case 'R': /* with index short displacement */
2181 if (TARGET_LONG_DISPLACEMENT)
2182 {
2183 if (!decomposed && !s390_decompose_address (op, &addr))
2184 return 0;
2185 if (!s390_short_displacement (addr.disp))
2186 return 0;
2187 }
2188 /* Any invalid address here will be fixed up by reload,
2189 so accept it for the most generic constraint. */
2190 break;
2191
2192 case 'S': /* no index long displacement */
2193 if (!TARGET_LONG_DISPLACEMENT)
2194 return 0;
2195 if (!decomposed && !s390_decompose_address (op, &addr))
2196 return 0;
2197 if (addr.indx)
2198 return 0;
2199 if (s390_short_displacement (addr.disp))
2200 return 0;
2201 break;
2202
2203 case 'T': /* with index long displacement */
2204 if (!TARGET_LONG_DISPLACEMENT)
2205 return 0;
2206 /* Any invalid address here will be fixed up by reload,
2207 so accept it for the most generic constraint. */
2208 if ((decomposed || s390_decompose_address (op, &addr))
2209 && s390_short_displacement (addr.disp))
2210 return 0;
2211 break;
2212 default:
2213 return 0;
2214 }
2215 return 1;
2216 }
2217
2218
2219 /* Evaluates constraint strings described by the regular expression
2220 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2221 the constraint given in STR, or 0 else. */
2222
2223 int
2224 s390_mem_constraint (const char *str, rtx op)
2225 {
2226 char c = str[0];
2227
2228 switch (c)
2229 {
2230 case 'A':
2231 /* Check for offsettable variants of memory constraints. */
2232 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2233 return 0;
2234 if ((reload_completed || reload_in_progress)
2235 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2236 return 0;
2237 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2238 case 'B':
2239 /* Check for non-literal-pool variants of memory constraints. */
2240 if (!MEM_P (op))
2241 return 0;
2242 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2243 case 'Q':
2244 case 'R':
2245 case 'S':
2246 case 'T':
2247 if (GET_CODE (op) != MEM)
2248 return 0;
2249 return s390_check_qrst_address (c, XEXP (op, 0), true);
2250 case 'U':
2251 return (s390_check_qrst_address ('Q', op, true)
2252 || s390_check_qrst_address ('R', op, true));
2253 case 'W':
2254 return (s390_check_qrst_address ('S', op, true)
2255 || s390_check_qrst_address ('T', op, true));
2256 case 'Y':
2257 /* Simply check for the basic form of a shift count. Reload will
2258 take care of making sure we have a proper base register. */
2259 if (!s390_decompose_shift_count (op, NULL, NULL))
2260 return 0;
2261 break;
2262 case 'Z':
2263 return s390_check_qrst_address (str[1], op, true);
2264 default:
2265 return 0;
2266 }
2267 return 1;
2268 }
2269
2270
2271 /* Evaluates constraint strings starting with letter O. Input
2272 parameter C is the second letter following the "O" in the constraint
2273 string. Returns 1 if VALUE meets the respective constraint and 0
2274 otherwise. */
2275
2276 int
2277 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2278 {
2279 if (!TARGET_EXTIMM)
2280 return 0;
2281
2282 switch (c)
2283 {
2284 case 's':
2285 return trunc_int_for_mode (value, SImode) == value;
2286
2287 case 'p':
2288 return value == 0
2289 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2290
2291 case 'n':
2292 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2293
2294 default:
2295 gcc_unreachable ();
2296 }
2297 }
2298
2299
2300 /* Evaluates constraint strings starting with letter N. Parameter STR
2301 contains the letters following letter "N" in the constraint string.
2302 Returns true if VALUE matches the constraint. */
2303
2304 int
2305 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2306 {
2307 enum machine_mode mode, part_mode;
2308 int def;
2309 int part, part_goal;
2310
2311
2312 if (str[0] == 'x')
2313 part_goal = -1;
2314 else
2315 part_goal = str[0] - '0';
2316
2317 switch (str[1])
2318 {
2319 case 'Q':
2320 part_mode = QImode;
2321 break;
2322 case 'H':
2323 part_mode = HImode;
2324 break;
2325 case 'S':
2326 part_mode = SImode;
2327 break;
2328 default:
2329 return 0;
2330 }
2331
2332 switch (str[2])
2333 {
2334 case 'H':
2335 mode = HImode;
2336 break;
2337 case 'S':
2338 mode = SImode;
2339 break;
2340 case 'D':
2341 mode = DImode;
2342 break;
2343 default:
2344 return 0;
2345 }
2346
2347 switch (str[3])
2348 {
2349 case '0':
2350 def = 0;
2351 break;
2352 case 'F':
2353 def = -1;
2354 break;
2355 default:
2356 return 0;
2357 }
2358
2359 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2360 return 0;
2361
2362 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2363 if (part < 0)
2364 return 0;
2365 if (part_goal != -1 && part_goal != part)
2366 return 0;
2367
2368 return 1;
2369 }
2370
2371
2372 /* Returns true if the input parameter VALUE is a float zero. */
2373
2374 int
2375 s390_float_const_zero_p (rtx value)
2376 {
2377 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2378 && value == CONST0_RTX (GET_MODE (value)));
2379 }
2380
2381 /* Implement TARGET_REGISTER_MOVE_COST. */
2382
2383 static int
2384 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2385 reg_class_t from, reg_class_t to)
2386 {
2387 /* On s390, copy between fprs and gprs is expensive. */
2388 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2389 && reg_classes_intersect_p (to, FP_REGS))
2390 || (reg_classes_intersect_p (from, FP_REGS)
2391 && reg_classes_intersect_p (to, GENERAL_REGS)))
2392 return 10;
2393
2394 return 1;
2395 }
2396
2397 /* Implement TARGET_MEMORY_MOVE_COST. */
2398
2399 static int
2400 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2401 reg_class_t rclass ATTRIBUTE_UNUSED,
2402 bool in ATTRIBUTE_UNUSED)
2403 {
2404 return 1;
2405 }
2406
2407 /* Compute a (partial) cost for rtx X. Return true if the complete
2408 cost has been computed, and false if subexpressions should be
2409 scanned. In either case, *TOTAL contains the cost result.
2410 CODE contains GET_CODE (x), OUTER_CODE contains the code
2411 of the superexpression of x. */
2412
2413 static bool
2414 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2415 int *total, bool speed ATTRIBUTE_UNUSED)
2416 {
2417 switch (code)
2418 {
2419 case CONST:
2420 case CONST_INT:
2421 case LABEL_REF:
2422 case SYMBOL_REF:
2423 case CONST_DOUBLE:
2424 case MEM:
2425 *total = 0;
2426 return true;
2427
2428 case ASHIFT:
2429 case ASHIFTRT:
2430 case LSHIFTRT:
2431 case ROTATE:
2432 case ROTATERT:
2433 case AND:
2434 case IOR:
2435 case XOR:
2436 case NEG:
2437 case NOT:
2438 *total = COSTS_N_INSNS (1);
2439 return false;
2440
2441 case PLUS:
2442 case MINUS:
2443 *total = COSTS_N_INSNS (1);
2444 return false;
2445
2446 case MULT:
2447 switch (GET_MODE (x))
2448 {
2449 case SImode:
2450 {
2451 rtx left = XEXP (x, 0);
2452 rtx right = XEXP (x, 1);
2453 if (GET_CODE (right) == CONST_INT
2454 && CONST_OK_FOR_K (INTVAL (right)))
2455 *total = s390_cost->mhi;
2456 else if (GET_CODE (left) == SIGN_EXTEND)
2457 *total = s390_cost->mh;
2458 else
2459 *total = s390_cost->ms; /* msr, ms, msy */
2460 break;
2461 }
2462 case DImode:
2463 {
2464 rtx left = XEXP (x, 0);
2465 rtx right = XEXP (x, 1);
2466 if (TARGET_ZARCH)
2467 {
2468 if (GET_CODE (right) == CONST_INT
2469 && CONST_OK_FOR_K (INTVAL (right)))
2470 *total = s390_cost->mghi;
2471 else if (GET_CODE (left) == SIGN_EXTEND)
2472 *total = s390_cost->msgf;
2473 else
2474 *total = s390_cost->msg; /* msgr, msg */
2475 }
2476 else /* TARGET_31BIT */
2477 {
2478 if (GET_CODE (left) == SIGN_EXTEND
2479 && GET_CODE (right) == SIGN_EXTEND)
2480 /* mulsidi case: mr, m */
2481 *total = s390_cost->m;
2482 else if (GET_CODE (left) == ZERO_EXTEND
2483 && GET_CODE (right) == ZERO_EXTEND
2484 && TARGET_CPU_ZARCH)
2485 /* umulsidi case: ml, mlr */
2486 *total = s390_cost->ml;
2487 else
2488 /* Complex calculation is required. */
2489 *total = COSTS_N_INSNS (40);
2490 }
2491 break;
2492 }
2493 case SFmode:
2494 case DFmode:
2495 *total = s390_cost->mult_df;
2496 break;
2497 case TFmode:
2498 *total = s390_cost->mxbr;
2499 break;
2500 default:
2501 return false;
2502 }
2503 return false;
2504
2505 case FMA:
2506 switch (GET_MODE (x))
2507 {
2508 case DFmode:
2509 *total = s390_cost->madbr;
2510 break;
2511 case SFmode:
2512 *total = s390_cost->maebr;
2513 break;
2514 default:
2515 return false;
2516 }
2517 /* Negate in the third argument is free: FMSUB. */
2518 if (GET_CODE (XEXP (x, 2)) == NEG)
2519 {
2520 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2521 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2522 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2523 return true;
2524 }
2525 return false;
2526
2527 case UDIV:
2528 case UMOD:
2529 if (GET_MODE (x) == TImode) /* 128 bit division */
2530 *total = s390_cost->dlgr;
2531 else if (GET_MODE (x) == DImode)
2532 {
2533 rtx right = XEXP (x, 1);
2534 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2535 *total = s390_cost->dlr;
2536 else /* 64 by 64 bit division */
2537 *total = s390_cost->dlgr;
2538 }
2539 else if (GET_MODE (x) == SImode) /* 32 bit division */
2540 *total = s390_cost->dlr;
2541 return false;
2542
2543 case DIV:
2544 case MOD:
2545 if (GET_MODE (x) == DImode)
2546 {
2547 rtx right = XEXP (x, 1);
2548 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2549 if (TARGET_ZARCH)
2550 *total = s390_cost->dsgfr;
2551 else
2552 *total = s390_cost->dr;
2553 else /* 64 by 64 bit division */
2554 *total = s390_cost->dsgr;
2555 }
2556 else if (GET_MODE (x) == SImode) /* 32 bit division */
2557 *total = s390_cost->dlr;
2558 else if (GET_MODE (x) == SFmode)
2559 {
2560 *total = s390_cost->debr;
2561 }
2562 else if (GET_MODE (x) == DFmode)
2563 {
2564 *total = s390_cost->ddbr;
2565 }
2566 else if (GET_MODE (x) == TFmode)
2567 {
2568 *total = s390_cost->dxbr;
2569 }
2570 return false;
2571
2572 case SQRT:
2573 if (GET_MODE (x) == SFmode)
2574 *total = s390_cost->sqebr;
2575 else if (GET_MODE (x) == DFmode)
2576 *total = s390_cost->sqdbr;
2577 else /* TFmode */
2578 *total = s390_cost->sqxbr;
2579 return false;
2580
2581 case SIGN_EXTEND:
2582 case ZERO_EXTEND:
2583 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2584 || outer_code == PLUS || outer_code == MINUS
2585 || outer_code == COMPARE)
2586 *total = 0;
2587 return false;
2588
2589 case COMPARE:
2590 *total = COSTS_N_INSNS (1);
2591 if (GET_CODE (XEXP (x, 0)) == AND
2592 && GET_CODE (XEXP (x, 1)) == CONST_INT
2593 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2594 {
2595 rtx op0 = XEXP (XEXP (x, 0), 0);
2596 rtx op1 = XEXP (XEXP (x, 0), 1);
2597 rtx op2 = XEXP (x, 1);
2598
2599 if (memory_operand (op0, GET_MODE (op0))
2600 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2601 return true;
2602 if (register_operand (op0, GET_MODE (op0))
2603 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2604 return true;
2605 }
2606 return false;
2607
2608 default:
2609 return false;
2610 }
2611 }
2612
2613 /* Return the cost of an address rtx ADDR. */
2614
2615 static int
2616 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2617 {
2618 struct s390_address ad;
2619 if (!s390_decompose_address (addr, &ad))
2620 return 1000;
2621
2622 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2623 }
2624
2625 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2626 otherwise return 0. */
2627
2628 int
2629 tls_symbolic_operand (rtx op)
2630 {
2631 if (GET_CODE (op) != SYMBOL_REF)
2632 return 0;
2633 return SYMBOL_REF_TLS_MODEL (op);
2634 }
2635 \f
2636 /* Split DImode access register reference REG (on 64-bit) into its constituent
2637 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2638 gen_highpart cannot be used as they assume all registers are word-sized,
2639 while our access registers have only half that size. */
2640
2641 void
2642 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2643 {
2644 gcc_assert (TARGET_64BIT);
2645 gcc_assert (ACCESS_REG_P (reg));
2646 gcc_assert (GET_MODE (reg) == DImode);
2647 gcc_assert (!(REGNO (reg) & 1));
2648
2649 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2650 *hi = gen_rtx_REG (SImode, REGNO (reg));
2651 }
2652
2653 /* Return true if OP contains a symbol reference */
2654
2655 bool
2656 symbolic_reference_mentioned_p (rtx op)
2657 {
2658 const char *fmt;
2659 int i;
2660
2661 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2662 return 1;
2663
2664 fmt = GET_RTX_FORMAT (GET_CODE (op));
2665 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2666 {
2667 if (fmt[i] == 'E')
2668 {
2669 int j;
2670
2671 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2672 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2673 return 1;
2674 }
2675
2676 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2677 return 1;
2678 }
2679
2680 return 0;
2681 }
2682
2683 /* Return true if OP contains a reference to a thread-local symbol. */
2684
2685 bool
2686 tls_symbolic_reference_mentioned_p (rtx op)
2687 {
2688 const char *fmt;
2689 int i;
2690
2691 if (GET_CODE (op) == SYMBOL_REF)
2692 return tls_symbolic_operand (op);
2693
2694 fmt = GET_RTX_FORMAT (GET_CODE (op));
2695 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2696 {
2697 if (fmt[i] == 'E')
2698 {
2699 int j;
2700
2701 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2702 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2703 return true;
2704 }
2705
2706 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2707 return true;
2708 }
2709
2710 return false;
2711 }
2712
2713
2714 /* Return true if OP is a legitimate general operand when
2715 generating PIC code. It is given that flag_pic is on
2716 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2717
2718 int
2719 legitimate_pic_operand_p (rtx op)
2720 {
2721 /* Accept all non-symbolic constants. */
2722 if (!SYMBOLIC_CONST (op))
2723 return 1;
2724
2725 /* Reject everything else; must be handled
2726 via emit_symbolic_move. */
2727 return 0;
2728 }
2729
2730 /* Returns true if the constant value OP is a legitimate general operand.
2731 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2732
2733 static bool
2734 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2735 {
2736 /* Accept all non-symbolic constants. */
2737 if (!SYMBOLIC_CONST (op))
2738 return 1;
2739
2740 /* Accept immediate LARL operands. */
2741 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2742 return 1;
2743
2744 /* Thread-local symbols are never legal constants. This is
2745 so that emit_call knows that computing such addresses
2746 might require a function call. */
2747 if (TLS_SYMBOLIC_CONST (op))
2748 return 0;
2749
2750 /* In the PIC case, symbolic constants must *not* be
2751 forced into the literal pool. We accept them here,
2752 so that they will be handled by emit_symbolic_move. */
2753 if (flag_pic)
2754 return 1;
2755
2756 /* All remaining non-PIC symbolic constants are
2757 forced into the literal pool. */
2758 return 0;
2759 }
2760
2761 /* Determine if it's legal to put X into the constant pool. This
2762 is not possible if X contains the address of a symbol that is
2763 not constant (TLS) or not known at final link time (PIC). */
2764
2765 static bool
2766 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2767 {
2768 switch (GET_CODE (x))
2769 {
2770 case CONST_INT:
2771 case CONST_DOUBLE:
2772 /* Accept all non-symbolic constants. */
2773 return false;
2774
2775 case LABEL_REF:
2776 /* Labels are OK iff we are non-PIC. */
2777 return flag_pic != 0;
2778
2779 case SYMBOL_REF:
2780 /* 'Naked' TLS symbol references are never OK,
2781 non-TLS symbols are OK iff we are non-PIC. */
2782 if (tls_symbolic_operand (x))
2783 return true;
2784 else
2785 return flag_pic != 0;
2786
2787 case CONST:
2788 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2789 case PLUS:
2790 case MINUS:
2791 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2792 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2793
2794 case UNSPEC:
2795 switch (XINT (x, 1))
2796 {
2797 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2798 case UNSPEC_LTREL_OFFSET:
2799 case UNSPEC_GOT:
2800 case UNSPEC_GOTOFF:
2801 case UNSPEC_PLTOFF:
2802 case UNSPEC_TLSGD:
2803 case UNSPEC_TLSLDM:
2804 case UNSPEC_NTPOFF:
2805 case UNSPEC_DTPOFF:
2806 case UNSPEC_GOTNTPOFF:
2807 case UNSPEC_INDNTPOFF:
2808 return false;
2809
2810 /* If the literal pool shares the code section, be put
2811 execute template placeholders into the pool as well. */
2812 case UNSPEC_INSN:
2813 return TARGET_CPU_ZARCH;
2814
2815 default:
2816 return true;
2817 }
2818 break;
2819
2820 default:
2821 gcc_unreachable ();
2822 }
2823 }
2824
2825 /* Returns true if the constant value OP is a legitimate general
2826 operand during and after reload. The difference to
2827 legitimate_constant_p is that this function will not accept
2828 a constant that would need to be forced to the literal pool
2829 before it can be used as operand.
2830 This function accepts all constants which can be loaded directly
2831 into a GPR. */
2832
2833 bool
2834 legitimate_reload_constant_p (rtx op)
2835 {
2836 /* Accept la(y) operands. */
2837 if (GET_CODE (op) == CONST_INT
2838 && DISP_IN_RANGE (INTVAL (op)))
2839 return true;
2840
2841 /* Accept l(g)hi/l(g)fi operands. */
2842 if (GET_CODE (op) == CONST_INT
2843 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2844 return true;
2845
2846 /* Accept lliXX operands. */
2847 if (TARGET_ZARCH
2848 && GET_CODE (op) == CONST_INT
2849 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2850 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2851 return true;
2852
2853 if (TARGET_EXTIMM
2854 && GET_CODE (op) == CONST_INT
2855 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2856 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2857 return true;
2858
2859 /* Accept larl operands. */
2860 if (TARGET_CPU_ZARCH
2861 && larl_operand (op, VOIDmode))
2862 return true;
2863
2864 /* Accept floating-point zero operands that fit into a single GPR. */
2865 if (GET_CODE (op) == CONST_DOUBLE
2866 && s390_float_const_zero_p (op)
2867 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2868 return true;
2869
2870 /* Accept double-word operands that can be split. */
2871 if (GET_CODE (op) == CONST_INT
2872 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2873 {
2874 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2875 rtx hi = operand_subword (op, 0, 0, dword_mode);
2876 rtx lo = operand_subword (op, 1, 0, dword_mode);
2877 return legitimate_reload_constant_p (hi)
2878 && legitimate_reload_constant_p (lo);
2879 }
2880
2881 /* Everything else cannot be handled without reload. */
2882 return false;
2883 }
2884
2885 /* Returns true if the constant value OP is a legitimate fp operand
2886 during and after reload.
2887 This function accepts all constants which can be loaded directly
2888 into an FPR. */
2889
2890 static bool
2891 legitimate_reload_fp_constant_p (rtx op)
2892 {
2893 /* Accept floating-point zero operands if the load zero instruction
2894 can be used. */
2895 if (TARGET_Z196
2896 && GET_CODE (op) == CONST_DOUBLE
2897 && s390_float_const_zero_p (op))
2898 return true;
2899
2900 return false;
2901 }
2902
2903 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2904 return the class of reg to actually use. */
2905
2906 static reg_class_t
2907 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2908 {
2909 switch (GET_CODE (op))
2910 {
2911 /* Constants we cannot reload into general registers
2912 must be forced into the literal pool. */
2913 case CONST_DOUBLE:
2914 case CONST_INT:
2915 if (reg_class_subset_p (GENERAL_REGS, rclass)
2916 && legitimate_reload_constant_p (op))
2917 return GENERAL_REGS;
2918 else if (reg_class_subset_p (ADDR_REGS, rclass)
2919 && legitimate_reload_constant_p (op))
2920 return ADDR_REGS;
2921 else if (reg_class_subset_p (FP_REGS, rclass)
2922 && legitimate_reload_fp_constant_p (op))
2923 return FP_REGS;
2924 return NO_REGS;
2925
2926 /* If a symbolic constant or a PLUS is reloaded,
2927 it is most likely being used as an address, so
2928 prefer ADDR_REGS. If 'class' is not a superset
2929 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2930 case LABEL_REF:
2931 case SYMBOL_REF:
2932 case CONST:
2933 if (!legitimate_reload_constant_p (op))
2934 return NO_REGS;
2935 /* fallthrough */
2936 case PLUS:
2937 /* load address will be used. */
2938 if (reg_class_subset_p (ADDR_REGS, rclass))
2939 return ADDR_REGS;
2940 else
2941 return NO_REGS;
2942
2943 default:
2944 break;
2945 }
2946
2947 return rclass;
2948 }
2949
2950 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2951 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2952 aligned. */
2953
2954 bool
2955 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2956 {
2957 HOST_WIDE_INT addend;
2958 rtx symref;
2959
2960 /* Accept symbol@GOTENT with pointer size alignment. */
2961 if (GET_CODE (addr) == CONST
2962 && GET_CODE (XEXP (addr, 0)) == UNSPEC
2963 && XINT (XEXP (addr, 0), 1) == UNSPEC_GOTENT
2964 && alignment <= UNITS_PER_LONG)
2965 return true;
2966
2967 if (!s390_symref_operand_p (addr, &symref, &addend))
2968 return false;
2969
2970 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2971 && !(addend & (alignment - 1)));
2972 }
2973
2974 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2975 operand SCRATCH is used to reload the even part of the address and
2976 adding one. */
2977
2978 void
2979 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2980 {
2981 HOST_WIDE_INT addend;
2982 rtx symref;
2983
2984 if (!s390_symref_operand_p (addr, &symref, &addend))
2985 gcc_unreachable ();
2986
2987 if (!(addend & 1))
2988 /* Easy case. The addend is even so larl will do fine. */
2989 emit_move_insn (reg, addr);
2990 else
2991 {
2992 /* We can leave the scratch register untouched if the target
2993 register is a valid base register. */
2994 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2995 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2996 scratch = reg;
2997
2998 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
2999 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3000
3001 if (addend != 1)
3002 emit_move_insn (scratch,
3003 gen_rtx_CONST (Pmode,
3004 gen_rtx_PLUS (Pmode, symref,
3005 GEN_INT (addend - 1))));
3006 else
3007 emit_move_insn (scratch, symref);
3008
3009 /* Increment the address using la in order to avoid clobbering cc. */
3010 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3011 }
3012 }
3013
3014 /* Generate what is necessary to move between REG and MEM using
3015 SCRATCH. The direction is given by TOMEM. */
3016
3017 void
3018 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3019 {
3020 /* Reload might have pulled a constant out of the literal pool.
3021 Force it back in. */
3022 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3023 || GET_CODE (mem) == CONST)
3024 mem = force_const_mem (GET_MODE (reg), mem);
3025
3026 gcc_assert (MEM_P (mem));
3027
3028 /* For a load from memory we can leave the scratch register
3029 untouched if the target register is a valid base register. */
3030 if (!tomem
3031 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3032 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3033 && GET_MODE (reg) == GET_MODE (scratch))
3034 scratch = reg;
3035
3036 /* Load address into scratch register. Since we can't have a
3037 secondary reload for a secondary reload we have to cover the case
3038 where larl would need a secondary reload here as well. */
3039 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3040
3041 /* Now we can use a standard load/store to do the move. */
3042 if (tomem)
3043 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3044 else
3045 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3046 }
3047
3048 /* Inform reload about cases where moving X with a mode MODE to a register in
3049 RCLASS requires an extra scratch or immediate register. Return the class
3050 needed for the immediate register. */
3051
3052 static reg_class_t
3053 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3054 enum machine_mode mode, secondary_reload_info *sri)
3055 {
3056 enum reg_class rclass = (enum reg_class) rclass_i;
3057
3058 /* Intermediate register needed. */
3059 if (reg_classes_intersect_p (CC_REGS, rclass))
3060 return GENERAL_REGS;
3061
3062 if (TARGET_Z10)
3063 {
3064 HOST_WIDE_INT offset;
3065 rtx symref;
3066
3067 /* On z10 several optimizer steps may generate larl operands with
3068 an odd addend. */
3069 if (in_p
3070 && s390_symref_operand_p (x, &symref, &offset)
3071 && mode == Pmode
3072 && !SYMBOL_REF_ALIGN1_P (symref)
3073 && (offset & 1) == 1)
3074 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3075 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3076
3077 /* On z10 we need a scratch register when moving QI, TI or floating
3078 point mode values from or to a memory location with a SYMBOL_REF
3079 or if the symref addend of a SI or DI move is not aligned to the
3080 width of the access. */
3081 if (MEM_P (x)
3082 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3083 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3084 || (!TARGET_ZARCH && mode == DImode)
3085 || ((mode == HImode || mode == SImode || mode == DImode)
3086 && (!s390_check_symref_alignment (XEXP (x, 0),
3087 GET_MODE_SIZE (mode))))))
3088 {
3089 #define __SECONDARY_RELOAD_CASE(M,m) \
3090 case M##mode: \
3091 if (TARGET_64BIT) \
3092 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3093 CODE_FOR_reload##m##di_tomem_z10; \
3094 else \
3095 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3096 CODE_FOR_reload##m##si_tomem_z10; \
3097 break;
3098
3099 switch (GET_MODE (x))
3100 {
3101 __SECONDARY_RELOAD_CASE (QI, qi);
3102 __SECONDARY_RELOAD_CASE (HI, hi);
3103 __SECONDARY_RELOAD_CASE (SI, si);
3104 __SECONDARY_RELOAD_CASE (DI, di);
3105 __SECONDARY_RELOAD_CASE (TI, ti);
3106 __SECONDARY_RELOAD_CASE (SF, sf);
3107 __SECONDARY_RELOAD_CASE (DF, df);
3108 __SECONDARY_RELOAD_CASE (TF, tf);
3109 __SECONDARY_RELOAD_CASE (SD, sd);
3110 __SECONDARY_RELOAD_CASE (DD, dd);
3111 __SECONDARY_RELOAD_CASE (TD, td);
3112
3113 default:
3114 gcc_unreachable ();
3115 }
3116 #undef __SECONDARY_RELOAD_CASE
3117 }
3118 }
3119
3120 /* We need a scratch register when loading a PLUS expression which
3121 is not a legitimate operand of the LOAD ADDRESS instruction. */
3122 if (in_p && s390_plus_operand (x, mode))
3123 sri->icode = (TARGET_64BIT ?
3124 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3125
3126 /* Performing a multiword move from or to memory we have to make sure the
3127 second chunk in memory is addressable without causing a displacement
3128 overflow. If that would be the case we calculate the address in
3129 a scratch register. */
3130 if (MEM_P (x)
3131 && GET_CODE (XEXP (x, 0)) == PLUS
3132 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3133 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3134 + GET_MODE_SIZE (mode) - 1))
3135 {
3136 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3137 in a s_operand address since we may fallback to lm/stm. So we only
3138 have to care about overflows in the b+i+d case. */
3139 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3140 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3141 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3142 /* For FP_REGS no lm/stm is available so this check is triggered
3143 for displacement overflows in b+i+d and b+d like addresses. */
3144 || (reg_classes_intersect_p (FP_REGS, rclass)
3145 && s390_class_max_nregs (FP_REGS, mode) > 1))
3146 {
3147 if (in_p)
3148 sri->icode = (TARGET_64BIT ?
3149 CODE_FOR_reloaddi_nonoffmem_in :
3150 CODE_FOR_reloadsi_nonoffmem_in);
3151 else
3152 sri->icode = (TARGET_64BIT ?
3153 CODE_FOR_reloaddi_nonoffmem_out :
3154 CODE_FOR_reloadsi_nonoffmem_out);
3155 }
3156 }
3157
3158 /* A scratch address register is needed when a symbolic constant is
3159 copied to r0 compiling with -fPIC. In other cases the target
3160 register might be used as temporary (see legitimize_pic_address). */
3161 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3162 sri->icode = (TARGET_64BIT ?
3163 CODE_FOR_reloaddi_PIC_addr :
3164 CODE_FOR_reloadsi_PIC_addr);
3165
3166 /* Either scratch or no register needed. */
3167 return NO_REGS;
3168 }
3169
3170 /* Generate code to load SRC, which is PLUS that is not a
3171 legitimate operand for the LA instruction, into TARGET.
3172 SCRATCH may be used as scratch register. */
3173
3174 void
3175 s390_expand_plus_operand (rtx target, rtx src,
3176 rtx scratch)
3177 {
3178 rtx sum1, sum2;
3179 struct s390_address ad;
3180
3181 /* src must be a PLUS; get its two operands. */
3182 gcc_assert (GET_CODE (src) == PLUS);
3183 gcc_assert (GET_MODE (src) == Pmode);
3184
3185 /* Check if any of the two operands is already scheduled
3186 for replacement by reload. This can happen e.g. when
3187 float registers occur in an address. */
3188 sum1 = find_replacement (&XEXP (src, 0));
3189 sum2 = find_replacement (&XEXP (src, 1));
3190 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3191
3192 /* If the address is already strictly valid, there's nothing to do. */
3193 if (!s390_decompose_address (src, &ad)
3194 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3195 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3196 {
3197 /* Otherwise, one of the operands cannot be an address register;
3198 we reload its value into the scratch register. */
3199 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3200 {
3201 emit_move_insn (scratch, sum1);
3202 sum1 = scratch;
3203 }
3204 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3205 {
3206 emit_move_insn (scratch, sum2);
3207 sum2 = scratch;
3208 }
3209
3210 /* According to the way these invalid addresses are generated
3211 in reload.c, it should never happen (at least on s390) that
3212 *neither* of the PLUS components, after find_replacements
3213 was applied, is an address register. */
3214 if (sum1 == scratch && sum2 == scratch)
3215 {
3216 debug_rtx (src);
3217 gcc_unreachable ();
3218 }
3219
3220 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3221 }
3222
3223 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3224 is only ever performed on addresses, so we can mark the
3225 sum as legitimate for LA in any case. */
3226 s390_load_address (target, src);
3227 }
3228
3229
3230 /* Return true if ADDR is a valid memory address.
3231 STRICT specifies whether strict register checking applies. */
3232
3233 static bool
3234 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3235 {
3236 struct s390_address ad;
3237
3238 if (TARGET_Z10
3239 && larl_operand (addr, VOIDmode)
3240 && (mode == VOIDmode
3241 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3242 return true;
3243
3244 if (!s390_decompose_address (addr, &ad))
3245 return false;
3246
3247 if (strict)
3248 {
3249 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3250 return false;
3251
3252 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3253 return false;
3254 }
3255 else
3256 {
3257 if (ad.base
3258 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3259 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3260 return false;
3261
3262 if (ad.indx
3263 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3264 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3265 return false;
3266 }
3267 return true;
3268 }
3269
3270 /* Return true if OP is a valid operand for the LA instruction.
3271 In 31-bit, we need to prove that the result is used as an
3272 address, as LA performs only a 31-bit addition. */
3273
3274 bool
3275 legitimate_la_operand_p (rtx op)
3276 {
3277 struct s390_address addr;
3278 if (!s390_decompose_address (op, &addr))
3279 return false;
3280
3281 return (TARGET_64BIT || addr.pointer);
3282 }
3283
3284 /* Return true if it is valid *and* preferable to use LA to
3285 compute the sum of OP1 and OP2. */
3286
3287 bool
3288 preferred_la_operand_p (rtx op1, rtx op2)
3289 {
3290 struct s390_address addr;
3291
3292 if (op2 != const0_rtx)
3293 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3294
3295 if (!s390_decompose_address (op1, &addr))
3296 return false;
3297 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3298 return false;
3299 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3300 return false;
3301
3302 /* Avoid LA instructions with index register on z196; it is
3303 preferable to use regular add instructions when possible. */
3304 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3305 return false;
3306
3307 if (!TARGET_64BIT && !addr.pointer)
3308 return false;
3309
3310 if (addr.pointer)
3311 return true;
3312
3313 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3314 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3315 return true;
3316
3317 return false;
3318 }
3319
3320 /* Emit a forced load-address operation to load SRC into DST.
3321 This will use the LOAD ADDRESS instruction even in situations
3322 where legitimate_la_operand_p (SRC) returns false. */
3323
3324 void
3325 s390_load_address (rtx dst, rtx src)
3326 {
3327 if (TARGET_64BIT)
3328 emit_move_insn (dst, src);
3329 else
3330 emit_insn (gen_force_la_31 (dst, src));
3331 }
3332
3333 /* Return a legitimate reference for ORIG (an address) using the
3334 register REG. If REG is 0, a new pseudo is generated.
3335
3336 There are two types of references that must be handled:
3337
3338 1. Global data references must load the address from the GOT, via
3339 the PIC reg. An insn is emitted to do this load, and the reg is
3340 returned.
3341
3342 2. Static data references, constant pool addresses, and code labels
3343 compute the address as an offset from the GOT, whose base is in
3344 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3345 differentiate them from global data objects. The returned
3346 address is the PIC reg + an unspec constant.
3347
3348 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3349 reg also appears in the address. */
3350
3351 rtx
3352 legitimize_pic_address (rtx orig, rtx reg)
3353 {
3354 rtx addr = orig;
3355 rtx new_rtx = orig;
3356 rtx base;
3357
3358 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3359
3360 if (GET_CODE (addr) == LABEL_REF
3361 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3362 {
3363 /* This is a local symbol. */
3364 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3365 {
3366 /* Access local symbols PC-relative via LARL.
3367 This is the same as in the non-PIC case, so it is
3368 handled automatically ... */
3369 }
3370 else
3371 {
3372 /* Access local symbols relative to the GOT. */
3373
3374 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3375
3376 if (reload_in_progress || reload_completed)
3377 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3378
3379 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3380 addr = gen_rtx_CONST (Pmode, addr);
3381 addr = force_const_mem (Pmode, addr);
3382 emit_move_insn (temp, addr);
3383
3384 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3385 if (reg != 0)
3386 {
3387 s390_load_address (reg, new_rtx);
3388 new_rtx = reg;
3389 }
3390 }
3391 }
3392 else if (GET_CODE (addr) == SYMBOL_REF)
3393 {
3394 if (reg == 0)
3395 reg = gen_reg_rtx (Pmode);
3396
3397 if (flag_pic == 1)
3398 {
3399 /* Assume GOT offset < 4k. This is handled the same way
3400 in both 31- and 64-bit code (@GOT). */
3401
3402 if (reload_in_progress || reload_completed)
3403 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3404
3405 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3406 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3407 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3408 new_rtx = gen_const_mem (Pmode, new_rtx);
3409 emit_move_insn (reg, new_rtx);
3410 new_rtx = reg;
3411 }
3412 else if (TARGET_CPU_ZARCH)
3413 {
3414 /* If the GOT offset might be >= 4k, we determine the position
3415 of the GOT entry via a PC-relative LARL (@GOTENT). */
3416
3417 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3418
3419 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3420 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3421
3422 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3423 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3424
3425 if (!TARGET_Z10)
3426 {
3427 emit_move_insn (temp, new_rtx);
3428 new_rtx = gen_const_mem (Pmode, temp);
3429 }
3430 else
3431 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
3432 emit_move_insn (reg, new_rtx);
3433 new_rtx = reg;
3434 }
3435 else
3436 {
3437 /* If the GOT offset might be >= 4k, we have to load it
3438 from the literal pool (@GOT). */
3439
3440 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3441
3442 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3443 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3444
3445 if (reload_in_progress || reload_completed)
3446 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3447
3448 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3449 addr = gen_rtx_CONST (Pmode, addr);
3450 addr = force_const_mem (Pmode, addr);
3451 emit_move_insn (temp, addr);
3452
3453 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3454 new_rtx = gen_const_mem (Pmode, new_rtx);
3455 emit_move_insn (reg, new_rtx);
3456 new_rtx = reg;
3457 }
3458 }
3459 else
3460 {
3461 if (GET_CODE (addr) == CONST)
3462 {
3463 addr = XEXP (addr, 0);
3464 if (GET_CODE (addr) == UNSPEC)
3465 {
3466 gcc_assert (XVECLEN (addr, 0) == 1);
3467 switch (XINT (addr, 1))
3468 {
3469 /* If someone moved a GOT-relative UNSPEC
3470 out of the literal pool, force them back in. */
3471 case UNSPEC_GOTOFF:
3472 case UNSPEC_PLTOFF:
3473 new_rtx = force_const_mem (Pmode, orig);
3474 break;
3475
3476 /* @GOT is OK as is if small. */
3477 case UNSPEC_GOT:
3478 if (flag_pic == 2)
3479 new_rtx = force_const_mem (Pmode, orig);
3480 break;
3481
3482 /* @GOTENT is OK as is. */
3483 case UNSPEC_GOTENT:
3484 break;
3485
3486 /* @PLT is OK as is on 64-bit, must be converted to
3487 GOT-relative @PLTOFF on 31-bit. */
3488 case UNSPEC_PLT:
3489 if (!TARGET_CPU_ZARCH)
3490 {
3491 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3492
3493 if (reload_in_progress || reload_completed)
3494 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3495
3496 addr = XVECEXP (addr, 0, 0);
3497 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3498 UNSPEC_PLTOFF);
3499 addr = gen_rtx_CONST (Pmode, addr);
3500 addr = force_const_mem (Pmode, addr);
3501 emit_move_insn (temp, addr);
3502
3503 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3504 if (reg != 0)
3505 {
3506 s390_load_address (reg, new_rtx);
3507 new_rtx = reg;
3508 }
3509 }
3510 break;
3511
3512 /* Everything else cannot happen. */
3513 default:
3514 gcc_unreachable ();
3515 }
3516 }
3517 else
3518 gcc_assert (GET_CODE (addr) == PLUS);
3519 }
3520 if (GET_CODE (addr) == PLUS)
3521 {
3522 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3523
3524 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3525 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3526
3527 /* Check first to see if this is a constant offset
3528 from a local symbol reference. */
3529 if ((GET_CODE (op0) == LABEL_REF
3530 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3531 && GET_CODE (op1) == CONST_INT)
3532 {
3533 if (TARGET_CPU_ZARCH
3534 && larl_operand (op0, VOIDmode)
3535 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3536 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3537 {
3538 if (INTVAL (op1) & 1)
3539 {
3540 /* LARL can't handle odd offsets, so emit a
3541 pair of LARL and LA. */
3542 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3543
3544 if (!DISP_IN_RANGE (INTVAL (op1)))
3545 {
3546 HOST_WIDE_INT even = INTVAL (op1) - 1;
3547 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3548 op0 = gen_rtx_CONST (Pmode, op0);
3549 op1 = const1_rtx;
3550 }
3551
3552 emit_move_insn (temp, op0);
3553 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3554
3555 if (reg != 0)
3556 {
3557 s390_load_address (reg, new_rtx);
3558 new_rtx = reg;
3559 }
3560 }
3561 else
3562 {
3563 /* If the offset is even, we can just use LARL.
3564 This will happen automatically. */
3565 }
3566 }
3567 else
3568 {
3569 /* Access local symbols relative to the GOT. */
3570
3571 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3572
3573 if (reload_in_progress || reload_completed)
3574 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3575
3576 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3577 UNSPEC_GOTOFF);
3578 addr = gen_rtx_PLUS (Pmode, addr, op1);
3579 addr = gen_rtx_CONST (Pmode, addr);
3580 addr = force_const_mem (Pmode, addr);
3581 emit_move_insn (temp, addr);
3582
3583 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3584 if (reg != 0)
3585 {
3586 s390_load_address (reg, new_rtx);
3587 new_rtx = reg;
3588 }
3589 }
3590 }
3591
3592 /* Now, check whether it is a GOT relative symbol plus offset
3593 that was pulled out of the literal pool. Force it back in. */
3594
3595 else if (GET_CODE (op0) == UNSPEC
3596 && GET_CODE (op1) == CONST_INT
3597 && XINT (op0, 1) == UNSPEC_GOTOFF)
3598 {
3599 gcc_assert (XVECLEN (op0, 0) == 1);
3600
3601 new_rtx = force_const_mem (Pmode, orig);
3602 }
3603
3604 /* Otherwise, compute the sum. */
3605 else
3606 {
3607 base = legitimize_pic_address (XEXP (addr, 0), reg);
3608 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3609 base == reg ? NULL_RTX : reg);
3610 if (GET_CODE (new_rtx) == CONST_INT)
3611 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
3612 else
3613 {
3614 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3615 {
3616 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3617 new_rtx = XEXP (new_rtx, 1);
3618 }
3619 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3620 }
3621
3622 if (GET_CODE (new_rtx) == CONST)
3623 new_rtx = XEXP (new_rtx, 0);
3624 new_rtx = force_operand (new_rtx, 0);
3625 }
3626 }
3627 }
3628 return new_rtx;
3629 }
3630
3631 /* Load the thread pointer into a register. */
3632
3633 rtx
3634 s390_get_thread_pointer (void)
3635 {
3636 rtx tp = gen_reg_rtx (Pmode);
3637
3638 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3639 mark_reg_pointer (tp, BITS_PER_WORD);
3640
3641 return tp;
3642 }
3643
3644 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3645 in s390_tls_symbol which always refers to __tls_get_offset.
3646 The returned offset is written to RESULT_REG and an USE rtx is
3647 generated for TLS_CALL. */
3648
3649 static GTY(()) rtx s390_tls_symbol;
3650
3651 static void
3652 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3653 {
3654 rtx insn;
3655
3656 if (!flag_pic)
3657 emit_insn (s390_load_got ());
3658
3659 if (!s390_tls_symbol)
3660 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3661
3662 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3663 gen_rtx_REG (Pmode, RETURN_REGNUM));
3664
3665 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3666 RTL_CONST_CALL_P (insn) = 1;
3667 }
3668
3669 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3670 this (thread-local) address. REG may be used as temporary. */
3671
3672 static rtx
3673 legitimize_tls_address (rtx addr, rtx reg)
3674 {
3675 rtx new_rtx, tls_call, temp, base, r2, insn;
3676
3677 if (GET_CODE (addr) == SYMBOL_REF)
3678 switch (tls_symbolic_operand (addr))
3679 {
3680 case TLS_MODEL_GLOBAL_DYNAMIC:
3681 start_sequence ();
3682 r2 = gen_rtx_REG (Pmode, 2);
3683 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3684 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3685 new_rtx = force_const_mem (Pmode, new_rtx);
3686 emit_move_insn (r2, new_rtx);
3687 s390_emit_tls_call_insn (r2, tls_call);
3688 insn = get_insns ();
3689 end_sequence ();
3690
3691 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3692 temp = gen_reg_rtx (Pmode);
3693 emit_libcall_block (insn, temp, r2, new_rtx);
3694
3695 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3696 if (reg != 0)
3697 {
3698 s390_load_address (reg, new_rtx);
3699 new_rtx = reg;
3700 }
3701 break;
3702
3703 case TLS_MODEL_LOCAL_DYNAMIC:
3704 start_sequence ();
3705 r2 = gen_rtx_REG (Pmode, 2);
3706 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3707 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3708 new_rtx = force_const_mem (Pmode, new_rtx);
3709 emit_move_insn (r2, new_rtx);
3710 s390_emit_tls_call_insn (r2, tls_call);
3711 insn = get_insns ();
3712 end_sequence ();
3713
3714 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3715 temp = gen_reg_rtx (Pmode);
3716 emit_libcall_block (insn, temp, r2, new_rtx);
3717
3718 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3719 base = gen_reg_rtx (Pmode);
3720 s390_load_address (base, new_rtx);
3721
3722 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3723 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3724 new_rtx = force_const_mem (Pmode, new_rtx);
3725 temp = gen_reg_rtx (Pmode);
3726 emit_move_insn (temp, new_rtx);
3727
3728 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3729 if (reg != 0)
3730 {
3731 s390_load_address (reg, new_rtx);
3732 new_rtx = reg;
3733 }
3734 break;
3735
3736 case TLS_MODEL_INITIAL_EXEC:
3737 if (flag_pic == 1)
3738 {
3739 /* Assume GOT offset < 4k. This is handled the same way
3740 in both 31- and 64-bit code. */
3741
3742 if (reload_in_progress || reload_completed)
3743 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3744
3745 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3746 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3747 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3748 new_rtx = gen_const_mem (Pmode, new_rtx);
3749 temp = gen_reg_rtx (Pmode);
3750 emit_move_insn (temp, new_rtx);
3751 }
3752 else if (TARGET_CPU_ZARCH)
3753 {
3754 /* If the GOT offset might be >= 4k, we determine the position
3755 of the GOT entry via a PC-relative LARL. */
3756
3757 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3758 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3759 temp = gen_reg_rtx (Pmode);
3760 emit_move_insn (temp, new_rtx);
3761
3762 new_rtx = gen_const_mem (Pmode, temp);
3763 temp = gen_reg_rtx (Pmode);
3764 emit_move_insn (temp, new_rtx);
3765 }
3766 else if (flag_pic)
3767 {
3768 /* If the GOT offset might be >= 4k, we have to load it
3769 from the literal pool. */
3770
3771 if (reload_in_progress || reload_completed)
3772 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3773
3774 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3775 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3776 new_rtx = force_const_mem (Pmode, new_rtx);
3777 temp = gen_reg_rtx (Pmode);
3778 emit_move_insn (temp, new_rtx);
3779
3780 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3781 new_rtx = gen_const_mem (Pmode, new_rtx);
3782
3783 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3784 temp = gen_reg_rtx (Pmode);
3785 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3786 }
3787 else
3788 {
3789 /* In position-dependent code, load the absolute address of
3790 the GOT entry from the literal pool. */
3791
3792 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3793 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3794 new_rtx = force_const_mem (Pmode, new_rtx);
3795 temp = gen_reg_rtx (Pmode);
3796 emit_move_insn (temp, new_rtx);
3797
3798 new_rtx = temp;
3799 new_rtx = gen_const_mem (Pmode, new_rtx);
3800 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3801 temp = gen_reg_rtx (Pmode);
3802 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3803 }
3804
3805 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3806 if (reg != 0)
3807 {
3808 s390_load_address (reg, new_rtx);
3809 new_rtx = reg;
3810 }
3811 break;
3812
3813 case TLS_MODEL_LOCAL_EXEC:
3814 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3815 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3816 new_rtx = force_const_mem (Pmode, new_rtx);
3817 temp = gen_reg_rtx (Pmode);
3818 emit_move_insn (temp, new_rtx);
3819
3820 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3821 if (reg != 0)
3822 {
3823 s390_load_address (reg, new_rtx);
3824 new_rtx = reg;
3825 }
3826 break;
3827
3828 default:
3829 gcc_unreachable ();
3830 }
3831
3832 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3833 {
3834 switch (XINT (XEXP (addr, 0), 1))
3835 {
3836 case UNSPEC_INDNTPOFF:
3837 gcc_assert (TARGET_CPU_ZARCH);
3838 new_rtx = addr;
3839 break;
3840
3841 default:
3842 gcc_unreachable ();
3843 }
3844 }
3845
3846 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3847 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3848 {
3849 new_rtx = XEXP (XEXP (addr, 0), 0);
3850 if (GET_CODE (new_rtx) != SYMBOL_REF)
3851 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3852
3853 new_rtx = legitimize_tls_address (new_rtx, reg);
3854 new_rtx = plus_constant (Pmode, new_rtx,
3855 INTVAL (XEXP (XEXP (addr, 0), 1)));
3856 new_rtx = force_operand (new_rtx, 0);
3857 }
3858
3859 else
3860 gcc_unreachable (); /* for now ... */
3861
3862 return new_rtx;
3863 }
3864
3865 /* Emit insns making the address in operands[1] valid for a standard
3866 move to operands[0]. operands[1] is replaced by an address which
3867 should be used instead of the former RTX to emit the move
3868 pattern. */
3869
3870 void
3871 emit_symbolic_move (rtx *operands)
3872 {
3873 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3874
3875 if (GET_CODE (operands[0]) == MEM)
3876 operands[1] = force_reg (Pmode, operands[1]);
3877 else if (TLS_SYMBOLIC_CONST (operands[1]))
3878 operands[1] = legitimize_tls_address (operands[1], temp);
3879 else if (flag_pic)
3880 operands[1] = legitimize_pic_address (operands[1], temp);
3881 }
3882
3883 /* Try machine-dependent ways of modifying an illegitimate address X
3884 to be legitimate. If we find one, return the new, valid address.
3885
3886 OLDX is the address as it was before break_out_memory_refs was called.
3887 In some cases it is useful to look at this to decide what needs to be done.
3888
3889 MODE is the mode of the operand pointed to by X.
3890
3891 When -fpic is used, special handling is needed for symbolic references.
3892 See comments by legitimize_pic_address for details. */
3893
3894 static rtx
3895 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3896 enum machine_mode mode ATTRIBUTE_UNUSED)
3897 {
3898 rtx constant_term = const0_rtx;
3899
3900 if (TLS_SYMBOLIC_CONST (x))
3901 {
3902 x = legitimize_tls_address (x, 0);
3903
3904 if (s390_legitimate_address_p (mode, x, FALSE))
3905 return x;
3906 }
3907 else if (GET_CODE (x) == PLUS
3908 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3909 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3910 {
3911 return x;
3912 }
3913 else if (flag_pic)
3914 {
3915 if (SYMBOLIC_CONST (x)
3916 || (GET_CODE (x) == PLUS
3917 && (SYMBOLIC_CONST (XEXP (x, 0))
3918 || SYMBOLIC_CONST (XEXP (x, 1)))))
3919 x = legitimize_pic_address (x, 0);
3920
3921 if (s390_legitimate_address_p (mode, x, FALSE))
3922 return x;
3923 }
3924
3925 x = eliminate_constant_term (x, &constant_term);
3926
3927 /* Optimize loading of large displacements by splitting them
3928 into the multiple of 4K and the rest; this allows the
3929 former to be CSE'd if possible.
3930
3931 Don't do this if the displacement is added to a register
3932 pointing into the stack frame, as the offsets will
3933 change later anyway. */
3934
3935 if (GET_CODE (constant_term) == CONST_INT
3936 && !TARGET_LONG_DISPLACEMENT
3937 && !DISP_IN_RANGE (INTVAL (constant_term))
3938 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3939 {
3940 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3941 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3942
3943 rtx temp = gen_reg_rtx (Pmode);
3944 rtx val = force_operand (GEN_INT (upper), temp);
3945 if (val != temp)
3946 emit_move_insn (temp, val);
3947
3948 x = gen_rtx_PLUS (Pmode, x, temp);
3949 constant_term = GEN_INT (lower);
3950 }
3951
3952 if (GET_CODE (x) == PLUS)
3953 {
3954 if (GET_CODE (XEXP (x, 0)) == REG)
3955 {
3956 rtx temp = gen_reg_rtx (Pmode);
3957 rtx val = force_operand (XEXP (x, 1), temp);
3958 if (val != temp)
3959 emit_move_insn (temp, val);
3960
3961 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3962 }
3963
3964 else if (GET_CODE (XEXP (x, 1)) == REG)
3965 {
3966 rtx temp = gen_reg_rtx (Pmode);
3967 rtx val = force_operand (XEXP (x, 0), temp);
3968 if (val != temp)
3969 emit_move_insn (temp, val);
3970
3971 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3972 }
3973 }
3974
3975 if (constant_term != const0_rtx)
3976 x = gen_rtx_PLUS (Pmode, x, constant_term);
3977
3978 return x;
3979 }
3980
3981 /* Try a machine-dependent way of reloading an illegitimate address AD
3982 operand. If we find one, push the reload and return the new address.
3983
3984 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3985 and TYPE is the reload type of the current reload. */
3986
3987 rtx
3988 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3989 int opnum, int type)
3990 {
3991 if (!optimize || TARGET_LONG_DISPLACEMENT)
3992 return NULL_RTX;
3993
3994 if (GET_CODE (ad) == PLUS)
3995 {
3996 rtx tem = simplify_binary_operation (PLUS, Pmode,
3997 XEXP (ad, 0), XEXP (ad, 1));
3998 if (tem)
3999 ad = tem;
4000 }
4001
4002 if (GET_CODE (ad) == PLUS
4003 && GET_CODE (XEXP (ad, 0)) == REG
4004 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4005 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4006 {
4007 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4008 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4009 rtx cst, tem, new_rtx;
4010
4011 cst = GEN_INT (upper);
4012 if (!legitimate_reload_constant_p (cst))
4013 cst = force_const_mem (Pmode, cst);
4014
4015 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4016 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4017
4018 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4019 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4020 opnum, (enum reload_type) type);
4021 return new_rtx;
4022 }
4023
4024 return NULL_RTX;
4025 }
4026
4027 /* Emit code to move LEN bytes from DST to SRC. */
4028
4029 bool
4030 s390_expand_movmem (rtx dst, rtx src, rtx len)
4031 {
4032 /* When tuning for z10 or higher we rely on the Glibc functions to
4033 do the right thing. Only for constant lengths below 64k we will
4034 generate inline code. */
4035 if (s390_tune >= PROCESSOR_2097_Z10
4036 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4037 return false;
4038
4039 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4040 {
4041 if (INTVAL (len) > 0)
4042 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4043 }
4044
4045 else if (TARGET_MVCLE)
4046 {
4047 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4048 }
4049
4050 else
4051 {
4052 rtx dst_addr, src_addr, count, blocks, temp;
4053 rtx loop_start_label = gen_label_rtx ();
4054 rtx loop_end_label = gen_label_rtx ();
4055 rtx end_label = gen_label_rtx ();
4056 enum machine_mode mode;
4057
4058 mode = GET_MODE (len);
4059 if (mode == VOIDmode)
4060 mode = Pmode;
4061
4062 dst_addr = gen_reg_rtx (Pmode);
4063 src_addr = gen_reg_rtx (Pmode);
4064 count = gen_reg_rtx (mode);
4065 blocks = gen_reg_rtx (mode);
4066
4067 convert_move (count, len, 1);
4068 emit_cmp_and_jump_insns (count, const0_rtx,
4069 EQ, NULL_RTX, mode, 1, end_label);
4070
4071 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4072 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4073 dst = change_address (dst, VOIDmode, dst_addr);
4074 src = change_address (src, VOIDmode, src_addr);
4075
4076 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4077 OPTAB_DIRECT);
4078 if (temp != count)
4079 emit_move_insn (count, temp);
4080
4081 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4082 OPTAB_DIRECT);
4083 if (temp != blocks)
4084 emit_move_insn (blocks, temp);
4085
4086 emit_cmp_and_jump_insns (blocks, const0_rtx,
4087 EQ, NULL_RTX, mode, 1, loop_end_label);
4088
4089 emit_label (loop_start_label);
4090
4091 if (TARGET_Z10
4092 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4093 {
4094 rtx prefetch;
4095
4096 /* Issue a read prefetch for the +3 cache line. */
4097 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4098 const0_rtx, const0_rtx);
4099 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4100 emit_insn (prefetch);
4101
4102 /* Issue a write prefetch for the +3 cache line. */
4103 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4104 const1_rtx, const0_rtx);
4105 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4106 emit_insn (prefetch);
4107 }
4108
4109 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4110 s390_load_address (dst_addr,
4111 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4112 s390_load_address (src_addr,
4113 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4114
4115 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4116 OPTAB_DIRECT);
4117 if (temp != blocks)
4118 emit_move_insn (blocks, temp);
4119
4120 emit_cmp_and_jump_insns (blocks, const0_rtx,
4121 EQ, NULL_RTX, mode, 1, loop_end_label);
4122
4123 emit_jump (loop_start_label);
4124 emit_label (loop_end_label);
4125
4126 emit_insn (gen_movmem_short (dst, src,
4127 convert_to_mode (Pmode, count, 1)));
4128 emit_label (end_label);
4129 }
4130 return true;
4131 }
4132
4133 /* Emit code to set LEN bytes at DST to VAL.
4134 Make use of clrmem if VAL is zero. */
4135
4136 void
4137 s390_expand_setmem (rtx dst, rtx len, rtx val)
4138 {
4139 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4140 return;
4141
4142 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4143
4144 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4145 {
4146 if (val == const0_rtx && INTVAL (len) <= 256)
4147 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4148 else
4149 {
4150 /* Initialize memory by storing the first byte. */
4151 emit_move_insn (adjust_address (dst, QImode, 0), val);
4152
4153 if (INTVAL (len) > 1)
4154 {
4155 /* Initiate 1 byte overlap move.
4156 The first byte of DST is propagated through DSTP1.
4157 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4158 DST is set to size 1 so the rest of the memory location
4159 does not count as source operand. */
4160 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4161 set_mem_size (dst, 1);
4162
4163 emit_insn (gen_movmem_short (dstp1, dst,
4164 GEN_INT (INTVAL (len) - 2)));
4165 }
4166 }
4167 }
4168
4169 else if (TARGET_MVCLE)
4170 {
4171 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4172 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4173 }
4174
4175 else
4176 {
4177 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4178 rtx loop_start_label = gen_label_rtx ();
4179 rtx loop_end_label = gen_label_rtx ();
4180 rtx end_label = gen_label_rtx ();
4181 enum machine_mode mode;
4182
4183 mode = GET_MODE (len);
4184 if (mode == VOIDmode)
4185 mode = Pmode;
4186
4187 dst_addr = gen_reg_rtx (Pmode);
4188 count = gen_reg_rtx (mode);
4189 blocks = gen_reg_rtx (mode);
4190
4191 convert_move (count, len, 1);
4192 emit_cmp_and_jump_insns (count, const0_rtx,
4193 EQ, NULL_RTX, mode, 1, end_label);
4194
4195 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4196 dst = change_address (dst, VOIDmode, dst_addr);
4197
4198 if (val == const0_rtx)
4199 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4200 OPTAB_DIRECT);
4201 else
4202 {
4203 dstp1 = adjust_address (dst, VOIDmode, 1);
4204 set_mem_size (dst, 1);
4205
4206 /* Initialize memory by storing the first byte. */
4207 emit_move_insn (adjust_address (dst, QImode, 0), val);
4208
4209 /* If count is 1 we are done. */
4210 emit_cmp_and_jump_insns (count, const1_rtx,
4211 EQ, NULL_RTX, mode, 1, end_label);
4212
4213 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4214 OPTAB_DIRECT);
4215 }
4216 if (temp != count)
4217 emit_move_insn (count, temp);
4218
4219 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4220 OPTAB_DIRECT);
4221 if (temp != blocks)
4222 emit_move_insn (blocks, temp);
4223
4224 emit_cmp_and_jump_insns (blocks, const0_rtx,
4225 EQ, NULL_RTX, mode, 1, loop_end_label);
4226
4227 emit_label (loop_start_label);
4228
4229 if (TARGET_Z10
4230 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4231 {
4232 /* Issue a write prefetch for the +4 cache line. */
4233 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4234 GEN_INT (1024)),
4235 const1_rtx, const0_rtx);
4236 emit_insn (prefetch);
4237 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4238 }
4239
4240 if (val == const0_rtx)
4241 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4242 else
4243 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4244 s390_load_address (dst_addr,
4245 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4246
4247 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4248 OPTAB_DIRECT);
4249 if (temp != blocks)
4250 emit_move_insn (blocks, temp);
4251
4252 emit_cmp_and_jump_insns (blocks, const0_rtx,
4253 EQ, NULL_RTX, mode, 1, loop_end_label);
4254
4255 emit_jump (loop_start_label);
4256 emit_label (loop_end_label);
4257
4258 if (val == const0_rtx)
4259 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4260 else
4261 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4262 emit_label (end_label);
4263 }
4264 }
4265
4266 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4267 and return the result in TARGET. */
4268
4269 bool
4270 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4271 {
4272 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4273 rtx tmp;
4274
4275 /* When tuning for z10 or higher we rely on the Glibc functions to
4276 do the right thing. Only for constant lengths below 64k we will
4277 generate inline code. */
4278 if (s390_tune >= PROCESSOR_2097_Z10
4279 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4280 return false;
4281
4282 /* As the result of CMPINT is inverted compared to what we need,
4283 we have to swap the operands. */
4284 tmp = op0; op0 = op1; op1 = tmp;
4285
4286 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4287 {
4288 if (INTVAL (len) > 0)
4289 {
4290 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4291 emit_insn (gen_cmpint (target, ccreg));
4292 }
4293 else
4294 emit_move_insn (target, const0_rtx);
4295 }
4296 else if (TARGET_MVCLE)
4297 {
4298 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4299 emit_insn (gen_cmpint (target, ccreg));
4300 }
4301 else
4302 {
4303 rtx addr0, addr1, count, blocks, temp;
4304 rtx loop_start_label = gen_label_rtx ();
4305 rtx loop_end_label = gen_label_rtx ();
4306 rtx end_label = gen_label_rtx ();
4307 enum machine_mode mode;
4308
4309 mode = GET_MODE (len);
4310 if (mode == VOIDmode)
4311 mode = Pmode;
4312
4313 addr0 = gen_reg_rtx (Pmode);
4314 addr1 = gen_reg_rtx (Pmode);
4315 count = gen_reg_rtx (mode);
4316 blocks = gen_reg_rtx (mode);
4317
4318 convert_move (count, len, 1);
4319 emit_cmp_and_jump_insns (count, const0_rtx,
4320 EQ, NULL_RTX, mode, 1, end_label);
4321
4322 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4323 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4324 op0 = change_address (op0, VOIDmode, addr0);
4325 op1 = change_address (op1, VOIDmode, addr1);
4326
4327 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4328 OPTAB_DIRECT);
4329 if (temp != count)
4330 emit_move_insn (count, temp);
4331
4332 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4333 OPTAB_DIRECT);
4334 if (temp != blocks)
4335 emit_move_insn (blocks, temp);
4336
4337 emit_cmp_and_jump_insns (blocks, const0_rtx,
4338 EQ, NULL_RTX, mode, 1, loop_end_label);
4339
4340 emit_label (loop_start_label);
4341
4342 if (TARGET_Z10
4343 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4344 {
4345 rtx prefetch;
4346
4347 /* Issue a read prefetch for the +2 cache line of operand 1. */
4348 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4349 const0_rtx, const0_rtx);
4350 emit_insn (prefetch);
4351 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4352
4353 /* Issue a read prefetch for the +2 cache line of operand 2. */
4354 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4355 const0_rtx, const0_rtx);
4356 emit_insn (prefetch);
4357 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4358 }
4359
4360 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4361 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4362 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4363 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4364 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4365 emit_jump_insn (temp);
4366
4367 s390_load_address (addr0,
4368 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4369 s390_load_address (addr1,
4370 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4371
4372 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4373 OPTAB_DIRECT);
4374 if (temp != blocks)
4375 emit_move_insn (blocks, temp);
4376
4377 emit_cmp_and_jump_insns (blocks, const0_rtx,
4378 EQ, NULL_RTX, mode, 1, loop_end_label);
4379
4380 emit_jump (loop_start_label);
4381 emit_label (loop_end_label);
4382
4383 emit_insn (gen_cmpmem_short (op0, op1,
4384 convert_to_mode (Pmode, count, 1)));
4385 emit_label (end_label);
4386
4387 emit_insn (gen_cmpint (target, ccreg));
4388 }
4389 return true;
4390 }
4391
4392
4393 /* Expand conditional increment or decrement using alc/slb instructions.
4394 Should generate code setting DST to either SRC or SRC + INCREMENT,
4395 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4396 Returns true if successful, false otherwise.
4397
4398 That makes it possible to implement some if-constructs without jumps e.g.:
4399 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4400 unsigned int a, b, c;
4401 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4402 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4403 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4404 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4405
4406 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4407 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4408 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4409 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4410 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4411
4412 bool
4413 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4414 rtx dst, rtx src, rtx increment)
4415 {
4416 enum machine_mode cmp_mode;
4417 enum machine_mode cc_mode;
4418 rtx op_res;
4419 rtx insn;
4420 rtvec p;
4421 int ret;
4422
4423 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4424 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4425 cmp_mode = SImode;
4426 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4427 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4428 cmp_mode = DImode;
4429 else
4430 return false;
4431
4432 /* Try ADD LOGICAL WITH CARRY. */
4433 if (increment == const1_rtx)
4434 {
4435 /* Determine CC mode to use. */
4436 if (cmp_code == EQ || cmp_code == NE)
4437 {
4438 if (cmp_op1 != const0_rtx)
4439 {
4440 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4441 NULL_RTX, 0, OPTAB_WIDEN);
4442 cmp_op1 = const0_rtx;
4443 }
4444
4445 cmp_code = cmp_code == EQ ? LEU : GTU;
4446 }
4447
4448 if (cmp_code == LTU || cmp_code == LEU)
4449 {
4450 rtx tem = cmp_op0;
4451 cmp_op0 = cmp_op1;
4452 cmp_op1 = tem;
4453 cmp_code = swap_condition (cmp_code);
4454 }
4455
4456 switch (cmp_code)
4457 {
4458 case GTU:
4459 cc_mode = CCUmode;
4460 break;
4461
4462 case GEU:
4463 cc_mode = CCL3mode;
4464 break;
4465
4466 default:
4467 return false;
4468 }
4469
4470 /* Emit comparison instruction pattern. */
4471 if (!register_operand (cmp_op0, cmp_mode))
4472 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4473
4474 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4475 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4476 /* We use insn_invalid_p here to add clobbers if required. */
4477 ret = insn_invalid_p (emit_insn (insn), false);
4478 gcc_assert (!ret);
4479
4480 /* Emit ALC instruction pattern. */
4481 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4482 gen_rtx_REG (cc_mode, CC_REGNUM),
4483 const0_rtx);
4484
4485 if (src != const0_rtx)
4486 {
4487 if (!register_operand (src, GET_MODE (dst)))
4488 src = force_reg (GET_MODE (dst), src);
4489
4490 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4491 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4492 }
4493
4494 p = rtvec_alloc (2);
4495 RTVEC_ELT (p, 0) =
4496 gen_rtx_SET (VOIDmode, dst, op_res);
4497 RTVEC_ELT (p, 1) =
4498 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4499 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4500
4501 return true;
4502 }
4503
4504 /* Try SUBTRACT LOGICAL WITH BORROW. */
4505 if (increment == constm1_rtx)
4506 {
4507 /* Determine CC mode to use. */
4508 if (cmp_code == EQ || cmp_code == NE)
4509 {
4510 if (cmp_op1 != const0_rtx)
4511 {
4512 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4513 NULL_RTX, 0, OPTAB_WIDEN);
4514 cmp_op1 = const0_rtx;
4515 }
4516
4517 cmp_code = cmp_code == EQ ? LEU : GTU;
4518 }
4519
4520 if (cmp_code == GTU || cmp_code == GEU)
4521 {
4522 rtx tem = cmp_op0;
4523 cmp_op0 = cmp_op1;
4524 cmp_op1 = tem;
4525 cmp_code = swap_condition (cmp_code);
4526 }
4527
4528 switch (cmp_code)
4529 {
4530 case LEU:
4531 cc_mode = CCUmode;
4532 break;
4533
4534 case LTU:
4535 cc_mode = CCL3mode;
4536 break;
4537
4538 default:
4539 return false;
4540 }
4541
4542 /* Emit comparison instruction pattern. */
4543 if (!register_operand (cmp_op0, cmp_mode))
4544 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4545
4546 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4547 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4548 /* We use insn_invalid_p here to add clobbers if required. */
4549 ret = insn_invalid_p (emit_insn (insn), false);
4550 gcc_assert (!ret);
4551
4552 /* Emit SLB instruction pattern. */
4553 if (!register_operand (src, GET_MODE (dst)))
4554 src = force_reg (GET_MODE (dst), src);
4555
4556 op_res = gen_rtx_MINUS (GET_MODE (dst),
4557 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4558 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4559 gen_rtx_REG (cc_mode, CC_REGNUM),
4560 const0_rtx));
4561 p = rtvec_alloc (2);
4562 RTVEC_ELT (p, 0) =
4563 gen_rtx_SET (VOIDmode, dst, op_res);
4564 RTVEC_ELT (p, 1) =
4565 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4566 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4567
4568 return true;
4569 }
4570
4571 return false;
4572 }
4573
4574 /* Expand code for the insv template. Return true if successful. */
4575
4576 bool
4577 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4578 {
4579 int bitsize = INTVAL (op1);
4580 int bitpos = INTVAL (op2);
4581 enum machine_mode mode = GET_MODE (dest);
4582 enum machine_mode smode;
4583 int smode_bsize, mode_bsize;
4584 rtx op, clobber;
4585
4586 /* Generate INSERT IMMEDIATE (IILL et al). */
4587 /* (set (ze (reg)) (const_int)). */
4588 if (TARGET_ZARCH
4589 && register_operand (dest, word_mode)
4590 && (bitpos % 16) == 0
4591 && (bitsize % 16) == 0
4592 && const_int_operand (src, VOIDmode))
4593 {
4594 HOST_WIDE_INT val = INTVAL (src);
4595 int regpos = bitpos + bitsize;
4596
4597 while (regpos > bitpos)
4598 {
4599 enum machine_mode putmode;
4600 int putsize;
4601
4602 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4603 putmode = SImode;
4604 else
4605 putmode = HImode;
4606
4607 putsize = GET_MODE_BITSIZE (putmode);
4608 regpos -= putsize;
4609 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4610 GEN_INT (putsize),
4611 GEN_INT (regpos)),
4612 gen_int_mode (val, putmode));
4613 val >>= putsize;
4614 }
4615 gcc_assert (regpos == bitpos);
4616 return true;
4617 }
4618
4619 smode = smallest_mode_for_size (bitsize, MODE_INT);
4620 smode_bsize = GET_MODE_BITSIZE (smode);
4621 mode_bsize = GET_MODE_BITSIZE (mode);
4622
4623 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4624 if (bitpos == 0
4625 && (bitsize % BITS_PER_UNIT) == 0
4626 && MEM_P (dest)
4627 && (register_operand (src, word_mode)
4628 || const_int_operand (src, VOIDmode)))
4629 {
4630 /* Emit standard pattern if possible. */
4631 if (smode_bsize == bitsize)
4632 {
4633 emit_move_insn (adjust_address (dest, smode, 0),
4634 gen_lowpart (smode, src));
4635 return true;
4636 }
4637
4638 /* (set (ze (mem)) (const_int)). */
4639 else if (const_int_operand (src, VOIDmode))
4640 {
4641 int size = bitsize / BITS_PER_UNIT;
4642 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
4643 BLKmode,
4644 UNITS_PER_WORD - size);
4645
4646 dest = adjust_address (dest, BLKmode, 0);
4647 set_mem_size (dest, size);
4648 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4649 return true;
4650 }
4651
4652 /* (set (ze (mem)) (reg)). */
4653 else if (register_operand (src, word_mode))
4654 {
4655 if (bitsize <= 32)
4656 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4657 const0_rtx), src);
4658 else
4659 {
4660 /* Emit st,stcmh sequence. */
4661 int stcmh_width = bitsize - 32;
4662 int size = stcmh_width / BITS_PER_UNIT;
4663
4664 emit_move_insn (adjust_address (dest, SImode, size),
4665 gen_lowpart (SImode, src));
4666 set_mem_size (dest, size);
4667 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4668 GEN_INT (stcmh_width),
4669 const0_rtx),
4670 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
4671 }
4672 return true;
4673 }
4674 }
4675
4676 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4677 if ((bitpos % BITS_PER_UNIT) == 0
4678 && (bitsize % BITS_PER_UNIT) == 0
4679 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
4680 && MEM_P (src)
4681 && (mode == DImode || mode == SImode)
4682 && register_operand (dest, mode))
4683 {
4684 /* Emit a strict_low_part pattern if possible. */
4685 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
4686 {
4687 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
4688 op = gen_rtx_SET (VOIDmode, op, gen_lowpart (smode, src));
4689 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4690 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4691 return true;
4692 }
4693
4694 /* ??? There are more powerful versions of ICM that are not
4695 completely represented in the md file. */
4696 }
4697
4698 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4699 if (TARGET_Z10 && (mode == DImode || mode == SImode))
4700 {
4701 enum machine_mode mode_s = GET_MODE (src);
4702
4703 if (mode_s == VOIDmode)
4704 {
4705 /* Assume const_int etc already in the proper mode. */
4706 src = force_reg (mode, src);
4707 }
4708 else if (mode_s != mode)
4709 {
4710 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
4711 src = force_reg (mode_s, src);
4712 src = gen_lowpart (mode, src);
4713 }
4714
4715 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
4716 op = gen_rtx_SET (VOIDmode, op, src);
4717 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4718 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4719
4720 return true;
4721 }
4722
4723 return false;
4724 }
4725
4726 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4727 register that holds VAL of mode MODE shifted by COUNT bits. */
4728
4729 static inline rtx
4730 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4731 {
4732 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4733 NULL_RTX, 1, OPTAB_DIRECT);
4734 return expand_simple_binop (SImode, ASHIFT, val, count,
4735 NULL_RTX, 1, OPTAB_DIRECT);
4736 }
4737
4738 /* Structure to hold the initial parameters for a compare_and_swap operation
4739 in HImode and QImode. */
4740
4741 struct alignment_context
4742 {
4743 rtx memsi; /* SI aligned memory location. */
4744 rtx shift; /* Bit offset with regard to lsb. */
4745 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4746 rtx modemaski; /* ~modemask */
4747 bool aligned; /* True if memory is aligned, false else. */
4748 };
4749
4750 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4751 structure AC for transparent simplifying, if the memory alignment is known
4752 to be at least 32bit. MEM is the memory location for the actual operation
4753 and MODE its mode. */
4754
4755 static void
4756 init_alignment_context (struct alignment_context *ac, rtx mem,
4757 enum machine_mode mode)
4758 {
4759 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4760 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4761
4762 if (ac->aligned)
4763 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4764 else
4765 {
4766 /* Alignment is unknown. */
4767 rtx byteoffset, addr, align;
4768
4769 /* Force the address into a register. */
4770 addr = force_reg (Pmode, XEXP (mem, 0));
4771
4772 /* Align it to SImode. */
4773 align = expand_simple_binop (Pmode, AND, addr,
4774 GEN_INT (-GET_MODE_SIZE (SImode)),
4775 NULL_RTX, 1, OPTAB_DIRECT);
4776 /* Generate MEM. */
4777 ac->memsi = gen_rtx_MEM (SImode, align);
4778 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4779 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4780 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4781
4782 /* Calculate shiftcount. */
4783 byteoffset = expand_simple_binop (Pmode, AND, addr,
4784 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4785 NULL_RTX, 1, OPTAB_DIRECT);
4786 /* As we already have some offset, evaluate the remaining distance. */
4787 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4788 NULL_RTX, 1, OPTAB_DIRECT);
4789 }
4790
4791 /* Shift is the byte count, but we need the bitcount. */
4792 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
4793 NULL_RTX, 1, OPTAB_DIRECT);
4794
4795 /* Calculate masks. */
4796 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4797 GEN_INT (GET_MODE_MASK (mode)),
4798 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
4799 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
4800 NULL_RTX, 1);
4801 }
4802
4803 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4804 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4805 perform the merge in SEQ2. */
4806
4807 static rtx
4808 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
4809 enum machine_mode mode, rtx val, rtx ins)
4810 {
4811 rtx tmp;
4812
4813 if (ac->aligned)
4814 {
4815 start_sequence ();
4816 tmp = copy_to_mode_reg (SImode, val);
4817 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
4818 const0_rtx, ins))
4819 {
4820 *seq1 = NULL;
4821 *seq2 = get_insns ();
4822 end_sequence ();
4823 return tmp;
4824 }
4825 end_sequence ();
4826 }
4827
4828 /* Failed to use insv. Generate a two part shift and mask. */
4829 start_sequence ();
4830 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
4831 *seq1 = get_insns ();
4832 end_sequence ();
4833
4834 start_sequence ();
4835 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
4836 *seq2 = get_insns ();
4837 end_sequence ();
4838
4839 return tmp;
4840 }
4841
4842 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4843 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4844 value to set if CMP == MEM. */
4845
4846 void
4847 s390_expand_cs_hqi (enum machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
4848 rtx cmp, rtx new_rtx, bool is_weak)
4849 {
4850 struct alignment_context ac;
4851 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
4852 rtx res = gen_reg_rtx (SImode);
4853 rtx csloop = NULL, csend = NULL;
4854
4855 gcc_assert (MEM_P (mem));
4856
4857 init_alignment_context (&ac, mem, mode);
4858
4859 /* Load full word. Subsequent loads are performed by CS. */
4860 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4861 NULL_RTX, 1, OPTAB_DIRECT);
4862
4863 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4864 possible, we try to use insv to make this happen efficiently. If
4865 that fails we'll generate code both inside and outside the loop. */
4866 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
4867 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
4868
4869 if (seq0)
4870 emit_insn (seq0);
4871 if (seq1)
4872 emit_insn (seq1);
4873
4874 /* Start CS loop. */
4875 if (!is_weak)
4876 {
4877 /* Begin assuming success. */
4878 emit_move_insn (btarget, const1_rtx);
4879
4880 csloop = gen_label_rtx ();
4881 csend = gen_label_rtx ();
4882 emit_label (csloop);
4883 }
4884
4885 /* val = "<mem>00..0<mem>"
4886 * cmp = "00..0<cmp>00..0"
4887 * new = "00..0<new>00..0"
4888 */
4889
4890 emit_insn (seq2);
4891 emit_insn (seq3);
4892
4893 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
4894 if (is_weak)
4895 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
4896 else
4897 {
4898 rtx tmp;
4899
4900 /* Jump to end if we're done (likely?). */
4901 s390_emit_jump (csend, cc);
4902
4903 /* Check for changes outside mode, and loop internal if so.
4904 Arrange the moves so that the compare is adjacent to the
4905 branch so that we can generate CRJ. */
4906 tmp = copy_to_reg (val);
4907 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
4908 1, OPTAB_DIRECT);
4909 cc = s390_emit_compare (NE, val, tmp);
4910 s390_emit_jump (csloop, cc);
4911
4912 /* Failed. */
4913 emit_move_insn (btarget, const0_rtx);
4914 emit_label (csend);
4915 }
4916
4917 /* Return the correct part of the bitfield. */
4918 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4919 NULL_RTX, 1, OPTAB_DIRECT), 1);
4920 }
4921
4922 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4923 and VAL the value to play with. If AFTER is true then store the value
4924 MEM holds after the operation, if AFTER is false then store the value MEM
4925 holds before the operation. If TARGET is zero then discard that value, else
4926 store it to TARGET. */
4927
4928 void
4929 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4930 rtx target, rtx mem, rtx val, bool after)
4931 {
4932 struct alignment_context ac;
4933 rtx cmp;
4934 rtx new_rtx = gen_reg_rtx (SImode);
4935 rtx orig = gen_reg_rtx (SImode);
4936 rtx csloop = gen_label_rtx ();
4937
4938 gcc_assert (!target || register_operand (target, VOIDmode));
4939 gcc_assert (MEM_P (mem));
4940
4941 init_alignment_context (&ac, mem, mode);
4942
4943 /* Shift val to the correct bit positions.
4944 Preserve "icm", but prevent "ex icm". */
4945 if (!(ac.aligned && code == SET && MEM_P (val)))
4946 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4947
4948 /* Further preparation insns. */
4949 if (code == PLUS || code == MINUS)
4950 emit_move_insn (orig, val);
4951 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4952 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4953 NULL_RTX, 1, OPTAB_DIRECT);
4954
4955 /* Load full word. Subsequent loads are performed by CS. */
4956 cmp = force_reg (SImode, ac.memsi);
4957
4958 /* Start CS loop. */
4959 emit_label (csloop);
4960 emit_move_insn (new_rtx, cmp);
4961
4962 /* Patch new with val at correct position. */
4963 switch (code)
4964 {
4965 case PLUS:
4966 case MINUS:
4967 val = expand_simple_binop (SImode, code, new_rtx, orig,
4968 NULL_RTX, 1, OPTAB_DIRECT);
4969 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4970 NULL_RTX, 1, OPTAB_DIRECT);
4971 /* FALLTHRU */
4972 case SET:
4973 if (ac.aligned && MEM_P (val))
4974 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
4975 0, 0, SImode, val);
4976 else
4977 {
4978 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4979 NULL_RTX, 1, OPTAB_DIRECT);
4980 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4981 NULL_RTX, 1, OPTAB_DIRECT);
4982 }
4983 break;
4984 case AND:
4985 case IOR:
4986 case XOR:
4987 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4988 NULL_RTX, 1, OPTAB_DIRECT);
4989 break;
4990 case MULT: /* NAND */
4991 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4992 NULL_RTX, 1, OPTAB_DIRECT);
4993 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4994 NULL_RTX, 1, OPTAB_DIRECT);
4995 break;
4996 default:
4997 gcc_unreachable ();
4998 }
4999
5000 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
5001 ac.memsi, cmp, new_rtx));
5002
5003 /* Return the correct part of the bitfield. */
5004 if (target)
5005 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
5006 after ? new_rtx : cmp, ac.shift,
5007 NULL_RTX, 1, OPTAB_DIRECT), 1);
5008 }
5009
5010 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5011 We need to emit DTP-relative relocations. */
5012
5013 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5014
5015 static void
5016 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
5017 {
5018 switch (size)
5019 {
5020 case 4:
5021 fputs ("\t.long\t", file);
5022 break;
5023 case 8:
5024 fputs ("\t.quad\t", file);
5025 break;
5026 default:
5027 gcc_unreachable ();
5028 }
5029 output_addr_const (file, x);
5030 fputs ("@DTPOFF", file);
5031 }
5032
5033 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5034 /* Implement TARGET_MANGLE_TYPE. */
5035
5036 static const char *
5037 s390_mangle_type (const_tree type)
5038 {
5039 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
5040 && TARGET_LONG_DOUBLE_128)
5041 return "g";
5042
5043 /* For all other types, use normal C++ mangling. */
5044 return NULL;
5045 }
5046 #endif
5047
5048 /* In the name of slightly smaller debug output, and to cater to
5049 general assembler lossage, recognize various UNSPEC sequences
5050 and turn them back into a direct symbol reference. */
5051
5052 static rtx
5053 s390_delegitimize_address (rtx orig_x)
5054 {
5055 rtx x, y;
5056
5057 orig_x = delegitimize_mem_from_attrs (orig_x);
5058 x = orig_x;
5059
5060 /* Extract the symbol ref from:
5061 (plus:SI (reg:SI 12 %r12)
5062 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5063 UNSPEC_GOTOFF/PLTOFF)))
5064 and
5065 (plus:SI (reg:SI 12 %r12)
5066 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5067 UNSPEC_GOTOFF/PLTOFF)
5068 (const_int 4 [0x4])))) */
5069 if (GET_CODE (x) == PLUS
5070 && REG_P (XEXP (x, 0))
5071 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5072 && GET_CODE (XEXP (x, 1)) == CONST)
5073 {
5074 HOST_WIDE_INT offset = 0;
5075
5076 /* The const operand. */
5077 y = XEXP (XEXP (x, 1), 0);
5078
5079 if (GET_CODE (y) == PLUS
5080 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5081 {
5082 offset = INTVAL (XEXP (y, 1));
5083 y = XEXP (y, 0);
5084 }
5085
5086 if (GET_CODE (y) == UNSPEC
5087 && (XINT (y, 1) == UNSPEC_GOTOFF
5088 || XINT (y, 1) == UNSPEC_PLTOFF))
5089 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
5090 }
5091
5092 if (GET_CODE (x) != MEM)
5093 return orig_x;
5094
5095 x = XEXP (x, 0);
5096 if (GET_CODE (x) == PLUS
5097 && GET_CODE (XEXP (x, 1)) == CONST
5098 && GET_CODE (XEXP (x, 0)) == REG
5099 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5100 {
5101 y = XEXP (XEXP (x, 1), 0);
5102 if (GET_CODE (y) == UNSPEC
5103 && XINT (y, 1) == UNSPEC_GOT)
5104 y = XVECEXP (y, 0, 0);
5105 else
5106 return orig_x;
5107 }
5108 else if (GET_CODE (x) == CONST)
5109 {
5110 /* Extract the symbol ref from:
5111 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5112 UNSPEC_PLT/GOTENT))) */
5113
5114 y = XEXP (x, 0);
5115 if (GET_CODE (y) == UNSPEC
5116 && (XINT (y, 1) == UNSPEC_GOTENT
5117 || XINT (y, 1) == UNSPEC_PLT))
5118 y = XVECEXP (y, 0, 0);
5119 else
5120 return orig_x;
5121 }
5122 else
5123 return orig_x;
5124
5125 if (GET_MODE (orig_x) != Pmode)
5126 {
5127 if (GET_MODE (orig_x) == BLKmode)
5128 return orig_x;
5129 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5130 if (y == NULL_RTX)
5131 return orig_x;
5132 }
5133 return y;
5134 }
5135
5136 /* Output operand OP to stdio stream FILE.
5137 OP is an address (register + offset) which is not used to address data;
5138 instead the rightmost bits are interpreted as the value. */
5139
5140 static void
5141 print_shift_count_operand (FILE *file, rtx op)
5142 {
5143 HOST_WIDE_INT offset;
5144 rtx base;
5145
5146 /* Extract base register and offset. */
5147 if (!s390_decompose_shift_count (op, &base, &offset))
5148 gcc_unreachable ();
5149
5150 /* Sanity check. */
5151 if (base)
5152 {
5153 gcc_assert (GET_CODE (base) == REG);
5154 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5155 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5156 }
5157
5158 /* Offsets are constricted to twelve bits. */
5159 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5160 if (base)
5161 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5162 }
5163
5164 /* See 'get_some_local_dynamic_name'. */
5165
5166 static int
5167 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5168 {
5169 rtx x = *px;
5170
5171 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5172 {
5173 x = get_pool_constant (x);
5174 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5175 }
5176
5177 if (GET_CODE (x) == SYMBOL_REF
5178 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5179 {
5180 cfun->machine->some_ld_name = XSTR (x, 0);
5181 return 1;
5182 }
5183
5184 return 0;
5185 }
5186
5187 /* Locate some local-dynamic symbol still in use by this function
5188 so that we can print its name in local-dynamic base patterns. */
5189
5190 static const char *
5191 get_some_local_dynamic_name (void)
5192 {
5193 rtx insn;
5194
5195 if (cfun->machine->some_ld_name)
5196 return cfun->machine->some_ld_name;
5197
5198 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5199 if (INSN_P (insn)
5200 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5201 return cfun->machine->some_ld_name;
5202
5203 gcc_unreachable ();
5204 }
5205
5206 /* Output machine-dependent UNSPECs occurring in address constant X
5207 in assembler syntax to stdio stream FILE. Returns true if the
5208 constant X could be recognized, false otherwise. */
5209
5210 static bool
5211 s390_output_addr_const_extra (FILE *file, rtx x)
5212 {
5213 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5214 switch (XINT (x, 1))
5215 {
5216 case UNSPEC_GOTENT:
5217 output_addr_const (file, XVECEXP (x, 0, 0));
5218 fprintf (file, "@GOTENT");
5219 return true;
5220 case UNSPEC_GOT:
5221 output_addr_const (file, XVECEXP (x, 0, 0));
5222 fprintf (file, "@GOT");
5223 return true;
5224 case UNSPEC_GOTOFF:
5225 output_addr_const (file, XVECEXP (x, 0, 0));
5226 fprintf (file, "@GOTOFF");
5227 return true;
5228 case UNSPEC_PLT:
5229 output_addr_const (file, XVECEXP (x, 0, 0));
5230 fprintf (file, "@PLT");
5231 return true;
5232 case UNSPEC_PLTOFF:
5233 output_addr_const (file, XVECEXP (x, 0, 0));
5234 fprintf (file, "@PLTOFF");
5235 return true;
5236 case UNSPEC_TLSGD:
5237 output_addr_const (file, XVECEXP (x, 0, 0));
5238 fprintf (file, "@TLSGD");
5239 return true;
5240 case UNSPEC_TLSLDM:
5241 assemble_name (file, get_some_local_dynamic_name ());
5242 fprintf (file, "@TLSLDM");
5243 return true;
5244 case UNSPEC_DTPOFF:
5245 output_addr_const (file, XVECEXP (x, 0, 0));
5246 fprintf (file, "@DTPOFF");
5247 return true;
5248 case UNSPEC_NTPOFF:
5249 output_addr_const (file, XVECEXP (x, 0, 0));
5250 fprintf (file, "@NTPOFF");
5251 return true;
5252 case UNSPEC_GOTNTPOFF:
5253 output_addr_const (file, XVECEXP (x, 0, 0));
5254 fprintf (file, "@GOTNTPOFF");
5255 return true;
5256 case UNSPEC_INDNTPOFF:
5257 output_addr_const (file, XVECEXP (x, 0, 0));
5258 fprintf (file, "@INDNTPOFF");
5259 return true;
5260 }
5261
5262 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5263 switch (XINT (x, 1))
5264 {
5265 case UNSPEC_POOL_OFFSET:
5266 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5267 output_addr_const (file, x);
5268 return true;
5269 }
5270 return false;
5271 }
5272
5273 /* Output address operand ADDR in assembler syntax to
5274 stdio stream FILE. */
5275
5276 void
5277 print_operand_address (FILE *file, rtx addr)
5278 {
5279 struct s390_address ad;
5280
5281 if (s390_loadrelative_operand_p (addr))
5282 {
5283 if (!TARGET_Z10)
5284 {
5285 output_operand_lossage ("symbolic memory references are "
5286 "only supported on z10 or later");
5287 return;
5288 }
5289 output_addr_const (file, addr);
5290 return;
5291 }
5292
5293 if (!s390_decompose_address (addr, &ad)
5294 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5295 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5296 output_operand_lossage ("cannot decompose address");
5297
5298 if (ad.disp)
5299 output_addr_const (file, ad.disp);
5300 else
5301 fprintf (file, "0");
5302
5303 if (ad.base && ad.indx)
5304 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5305 reg_names[REGNO (ad.base)]);
5306 else if (ad.base)
5307 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5308 }
5309
5310 /* Output operand X in assembler syntax to stdio stream FILE.
5311 CODE specified the format flag. The following format flags
5312 are recognized:
5313
5314 'C': print opcode suffix for branch condition.
5315 'D': print opcode suffix for inverse branch condition.
5316 'E': print opcode suffix for branch on index instruction.
5317 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5318 'G': print the size of the operand in bytes.
5319 'O': print only the displacement of a memory reference.
5320 'R': print only the base register of a memory reference.
5321 'S': print S-type memory reference (base+displacement).
5322 'N': print the second word of a DImode operand.
5323 'M': print the second word of a TImode operand.
5324 'Y': print shift count operand.
5325
5326 'b': print integer X as if it's an unsigned byte.
5327 'c': print integer X as if it's an signed byte.
5328 'x': print integer X as if it's an unsigned halfword.
5329 'h': print integer X as if it's a signed halfword.
5330 'i': print the first nonzero HImode part of X.
5331 'j': print the first HImode part unequal to -1 of X.
5332 'k': print the first nonzero SImode part of X.
5333 'm': print the first SImode part unequal to -1 of X.
5334 'o': print integer X as if it's an unsigned 32bit word. */
5335
5336 void
5337 print_operand (FILE *file, rtx x, int code)
5338 {
5339 switch (code)
5340 {
5341 case 'C':
5342 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5343 return;
5344
5345 case 'D':
5346 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5347 return;
5348
5349 case 'E':
5350 if (GET_CODE (x) == LE)
5351 fprintf (file, "l");
5352 else if (GET_CODE (x) == GT)
5353 fprintf (file, "h");
5354 else
5355 output_operand_lossage ("invalid comparison operator "
5356 "for 'E' output modifier");
5357 return;
5358
5359 case 'J':
5360 if (GET_CODE (x) == SYMBOL_REF)
5361 {
5362 fprintf (file, "%s", ":tls_load:");
5363 output_addr_const (file, x);
5364 }
5365 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5366 {
5367 fprintf (file, "%s", ":tls_gdcall:");
5368 output_addr_const (file, XVECEXP (x, 0, 0));
5369 }
5370 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5371 {
5372 fprintf (file, "%s", ":tls_ldcall:");
5373 assemble_name (file, get_some_local_dynamic_name ());
5374 }
5375 else
5376 output_operand_lossage ("invalid reference for 'J' output modifier");
5377 return;
5378
5379 case 'G':
5380 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5381 return;
5382
5383 case 'O':
5384 {
5385 struct s390_address ad;
5386 int ret;
5387
5388 if (!MEM_P (x))
5389 {
5390 output_operand_lossage ("memory reference expected for "
5391 "'O' output modifier");
5392 return;
5393 }
5394
5395 ret = s390_decompose_address (XEXP (x, 0), &ad);
5396
5397 if (!ret
5398 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5399 || ad.indx)
5400 {
5401 output_operand_lossage ("invalid address for 'O' output modifier");
5402 return;
5403 }
5404
5405 if (ad.disp)
5406 output_addr_const (file, ad.disp);
5407 else
5408 fprintf (file, "0");
5409 }
5410 return;
5411
5412 case 'R':
5413 {
5414 struct s390_address ad;
5415 int ret;
5416
5417 if (!MEM_P (x))
5418 {
5419 output_operand_lossage ("memory reference expected for "
5420 "'R' output modifier");
5421 return;
5422 }
5423
5424 ret = s390_decompose_address (XEXP (x, 0), &ad);
5425
5426 if (!ret
5427 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5428 || ad.indx)
5429 {
5430 output_operand_lossage ("invalid address for 'R' output modifier");
5431 return;
5432 }
5433
5434 if (ad.base)
5435 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5436 else
5437 fprintf (file, "0");
5438 }
5439 return;
5440
5441 case 'S':
5442 {
5443 struct s390_address ad;
5444 int ret;
5445
5446 if (!MEM_P (x))
5447 {
5448 output_operand_lossage ("memory reference expected for "
5449 "'S' output modifier");
5450 return;
5451 }
5452 ret = s390_decompose_address (XEXP (x, 0), &ad);
5453
5454 if (!ret
5455 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5456 || ad.indx)
5457 {
5458 output_operand_lossage ("invalid address for 'S' output modifier");
5459 return;
5460 }
5461
5462 if (ad.disp)
5463 output_addr_const (file, ad.disp);
5464 else
5465 fprintf (file, "0");
5466
5467 if (ad.base)
5468 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5469 }
5470 return;
5471
5472 case 'N':
5473 if (GET_CODE (x) == REG)
5474 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5475 else if (GET_CODE (x) == MEM)
5476 x = change_address (x, VOIDmode,
5477 plus_constant (Pmode, XEXP (x, 0), 4));
5478 else
5479 output_operand_lossage ("register or memory expression expected "
5480 "for 'N' output modifier");
5481 break;
5482
5483 case 'M':
5484 if (GET_CODE (x) == REG)
5485 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5486 else if (GET_CODE (x) == MEM)
5487 x = change_address (x, VOIDmode,
5488 plus_constant (Pmode, XEXP (x, 0), 8));
5489 else
5490 output_operand_lossage ("register or memory expression expected "
5491 "for 'M' output modifier");
5492 break;
5493
5494 case 'Y':
5495 print_shift_count_operand (file, x);
5496 return;
5497 }
5498
5499 switch (GET_CODE (x))
5500 {
5501 case REG:
5502 fprintf (file, "%s", reg_names[REGNO (x)]);
5503 break;
5504
5505 case MEM:
5506 output_address (XEXP (x, 0));
5507 break;
5508
5509 case CONST:
5510 case CODE_LABEL:
5511 case LABEL_REF:
5512 case SYMBOL_REF:
5513 output_addr_const (file, x);
5514 break;
5515
5516 case CONST_INT:
5517 if (code == 'b')
5518 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5519 else if (code == 'c')
5520 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5521 else if (code == 'x')
5522 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5523 else if (code == 'h')
5524 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5525 else if (code == 'i')
5526 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5527 s390_extract_part (x, HImode, 0));
5528 else if (code == 'j')
5529 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5530 s390_extract_part (x, HImode, -1));
5531 else if (code == 'k')
5532 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5533 s390_extract_part (x, SImode, 0));
5534 else if (code == 'm')
5535 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5536 s390_extract_part (x, SImode, -1));
5537 else if (code == 'o')
5538 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5539 else
5540 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5541 break;
5542
5543 case CONST_DOUBLE:
5544 gcc_assert (GET_MODE (x) == VOIDmode);
5545 if (code == 'b')
5546 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5547 else if (code == 'x')
5548 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5549 else if (code == 'h')
5550 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5551 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5552 else
5553 {
5554 if (code == 0)
5555 output_operand_lossage ("invalid constant - try using "
5556 "an output modifier");
5557 else
5558 output_operand_lossage ("invalid constant for output modifier '%c'",
5559 code);
5560 }
5561 break;
5562
5563 default:
5564 if (code == 0)
5565 output_operand_lossage ("invalid expression - try using "
5566 "an output modifier");
5567 else
5568 output_operand_lossage ("invalid expression for output "
5569 "modifier '%c'", code);
5570 break;
5571 }
5572 }
5573
5574 /* Target hook for assembling integer objects. We need to define it
5575 here to work a round a bug in some versions of GAS, which couldn't
5576 handle values smaller than INT_MIN when printed in decimal. */
5577
5578 static bool
5579 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5580 {
5581 if (size == 8 && aligned_p
5582 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5583 {
5584 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5585 INTVAL (x));
5586 return true;
5587 }
5588 return default_assemble_integer (x, size, aligned_p);
5589 }
5590
5591 /* Returns true if register REGNO is used for forming
5592 a memory address in expression X. */
5593
5594 static bool
5595 reg_used_in_mem_p (int regno, rtx x)
5596 {
5597 enum rtx_code code = GET_CODE (x);
5598 int i, j;
5599 const char *fmt;
5600
5601 if (code == MEM)
5602 {
5603 if (refers_to_regno_p (regno, regno+1,
5604 XEXP (x, 0), 0))
5605 return true;
5606 }
5607 else if (code == SET
5608 && GET_CODE (SET_DEST (x)) == PC)
5609 {
5610 if (refers_to_regno_p (regno, regno+1,
5611 SET_SRC (x), 0))
5612 return true;
5613 }
5614
5615 fmt = GET_RTX_FORMAT (code);
5616 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5617 {
5618 if (fmt[i] == 'e'
5619 && reg_used_in_mem_p (regno, XEXP (x, i)))
5620 return true;
5621
5622 else if (fmt[i] == 'E')
5623 for (j = 0; j < XVECLEN (x, i); j++)
5624 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5625 return true;
5626 }
5627 return false;
5628 }
5629
5630 /* Returns true if expression DEP_RTX sets an address register
5631 used by instruction INSN to address memory. */
5632
5633 static bool
5634 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5635 {
5636 rtx target, pat;
5637
5638 if (GET_CODE (dep_rtx) == INSN)
5639 dep_rtx = PATTERN (dep_rtx);
5640
5641 if (GET_CODE (dep_rtx) == SET)
5642 {
5643 target = SET_DEST (dep_rtx);
5644 if (GET_CODE (target) == STRICT_LOW_PART)
5645 target = XEXP (target, 0);
5646 while (GET_CODE (target) == SUBREG)
5647 target = SUBREG_REG (target);
5648
5649 if (GET_CODE (target) == REG)
5650 {
5651 int regno = REGNO (target);
5652
5653 if (s390_safe_attr_type (insn) == TYPE_LA)
5654 {
5655 pat = PATTERN (insn);
5656 if (GET_CODE (pat) == PARALLEL)
5657 {
5658 gcc_assert (XVECLEN (pat, 0) == 2);
5659 pat = XVECEXP (pat, 0, 0);
5660 }
5661 gcc_assert (GET_CODE (pat) == SET);
5662 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5663 }
5664 else if (get_attr_atype (insn) == ATYPE_AGEN)
5665 return reg_used_in_mem_p (regno, PATTERN (insn));
5666 }
5667 }
5668 return false;
5669 }
5670
5671 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5672
5673 int
5674 s390_agen_dep_p (rtx dep_insn, rtx insn)
5675 {
5676 rtx dep_rtx = PATTERN (dep_insn);
5677 int i;
5678
5679 if (GET_CODE (dep_rtx) == SET
5680 && addr_generation_dependency_p (dep_rtx, insn))
5681 return 1;
5682 else if (GET_CODE (dep_rtx) == PARALLEL)
5683 {
5684 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5685 {
5686 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5687 return 1;
5688 }
5689 }
5690 return 0;
5691 }
5692
5693
5694 /* A C statement (sans semicolon) to update the integer scheduling priority
5695 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5696 reduce the priority to execute INSN later. Do not define this macro if
5697 you do not need to adjust the scheduling priorities of insns.
5698
5699 A STD instruction should be scheduled earlier,
5700 in order to use the bypass. */
5701 static int
5702 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5703 {
5704 if (! INSN_P (insn))
5705 return priority;
5706
5707 if (s390_tune != PROCESSOR_2084_Z990
5708 && s390_tune != PROCESSOR_2094_Z9_109
5709 && s390_tune != PROCESSOR_2097_Z10
5710 && s390_tune != PROCESSOR_2817_Z196)
5711 return priority;
5712
5713 switch (s390_safe_attr_type (insn))
5714 {
5715 case TYPE_FSTOREDF:
5716 case TYPE_FSTORESF:
5717 priority = priority << 3;
5718 break;
5719 case TYPE_STORE:
5720 case TYPE_STM:
5721 priority = priority << 1;
5722 break;
5723 default:
5724 break;
5725 }
5726 return priority;
5727 }
5728
5729
5730 /* The number of instructions that can be issued per cycle. */
5731
5732 static int
5733 s390_issue_rate (void)
5734 {
5735 switch (s390_tune)
5736 {
5737 case PROCESSOR_2084_Z990:
5738 case PROCESSOR_2094_Z9_109:
5739 case PROCESSOR_2817_Z196:
5740 return 3;
5741 case PROCESSOR_2097_Z10:
5742 return 2;
5743 default:
5744 return 1;
5745 }
5746 }
5747
5748 static int
5749 s390_first_cycle_multipass_dfa_lookahead (void)
5750 {
5751 return 4;
5752 }
5753
5754 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5755 Fix up MEMs as required. */
5756
5757 static void
5758 annotate_constant_pool_refs (rtx *x)
5759 {
5760 int i, j;
5761 const char *fmt;
5762
5763 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5764 || !CONSTANT_POOL_ADDRESS_P (*x));
5765
5766 /* Literal pool references can only occur inside a MEM ... */
5767 if (GET_CODE (*x) == MEM)
5768 {
5769 rtx memref = XEXP (*x, 0);
5770
5771 if (GET_CODE (memref) == SYMBOL_REF
5772 && CONSTANT_POOL_ADDRESS_P (memref))
5773 {
5774 rtx base = cfun->machine->base_reg;
5775 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5776 UNSPEC_LTREF);
5777
5778 *x = replace_equiv_address (*x, addr);
5779 return;
5780 }
5781
5782 if (GET_CODE (memref) == CONST
5783 && GET_CODE (XEXP (memref, 0)) == PLUS
5784 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5785 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5786 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5787 {
5788 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5789 rtx sym = XEXP (XEXP (memref, 0), 0);
5790 rtx base = cfun->machine->base_reg;
5791 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5792 UNSPEC_LTREF);
5793
5794 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
5795 return;
5796 }
5797 }
5798
5799 /* ... or a load-address type pattern. */
5800 if (GET_CODE (*x) == SET)
5801 {
5802 rtx addrref = SET_SRC (*x);
5803
5804 if (GET_CODE (addrref) == SYMBOL_REF
5805 && CONSTANT_POOL_ADDRESS_P (addrref))
5806 {
5807 rtx base = cfun->machine->base_reg;
5808 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5809 UNSPEC_LTREF);
5810
5811 SET_SRC (*x) = addr;
5812 return;
5813 }
5814
5815 if (GET_CODE (addrref) == CONST
5816 && GET_CODE (XEXP (addrref, 0)) == PLUS
5817 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5818 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5819 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5820 {
5821 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5822 rtx sym = XEXP (XEXP (addrref, 0), 0);
5823 rtx base = cfun->machine->base_reg;
5824 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5825 UNSPEC_LTREF);
5826
5827 SET_SRC (*x) = plus_constant (Pmode, addr, off);
5828 return;
5829 }
5830 }
5831
5832 /* Annotate LTREL_BASE as well. */
5833 if (GET_CODE (*x) == UNSPEC
5834 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5835 {
5836 rtx base = cfun->machine->base_reg;
5837 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5838 UNSPEC_LTREL_BASE);
5839 return;
5840 }
5841
5842 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5843 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5844 {
5845 if (fmt[i] == 'e')
5846 {
5847 annotate_constant_pool_refs (&XEXP (*x, i));
5848 }
5849 else if (fmt[i] == 'E')
5850 {
5851 for (j = 0; j < XVECLEN (*x, i); j++)
5852 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5853 }
5854 }
5855 }
5856
5857 /* Split all branches that exceed the maximum distance.
5858 Returns true if this created a new literal pool entry. */
5859
5860 static int
5861 s390_split_branches (void)
5862 {
5863 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5864 int new_literal = 0, ret;
5865 rtx insn, pat, tmp, target;
5866 rtx *label;
5867
5868 /* We need correct insn addresses. */
5869
5870 shorten_branches (get_insns ());
5871
5872 /* Find all branches that exceed 64KB, and split them. */
5873
5874 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5875 {
5876 if (GET_CODE (insn) != JUMP_INSN)
5877 continue;
5878
5879 pat = PATTERN (insn);
5880 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5881 pat = XVECEXP (pat, 0, 0);
5882 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5883 continue;
5884
5885 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5886 {
5887 label = &SET_SRC (pat);
5888 }
5889 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5890 {
5891 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5892 label = &XEXP (SET_SRC (pat), 1);
5893 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5894 label = &XEXP (SET_SRC (pat), 2);
5895 else
5896 continue;
5897 }
5898 else
5899 continue;
5900
5901 if (get_attr_length (insn) <= 4)
5902 continue;
5903
5904 /* We are going to use the return register as scratch register,
5905 make sure it will be saved/restored by the prologue/epilogue. */
5906 cfun_frame_layout.save_return_addr_p = 1;
5907
5908 if (!flag_pic)
5909 {
5910 new_literal = 1;
5911 tmp = force_const_mem (Pmode, *label);
5912 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5913 INSN_ADDRESSES_NEW (tmp, -1);
5914 annotate_constant_pool_refs (&PATTERN (tmp));
5915
5916 target = temp_reg;
5917 }
5918 else
5919 {
5920 new_literal = 1;
5921 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5922 UNSPEC_LTREL_OFFSET);
5923 target = gen_rtx_CONST (Pmode, target);
5924 target = force_const_mem (Pmode, target);
5925 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5926 INSN_ADDRESSES_NEW (tmp, -1);
5927 annotate_constant_pool_refs (&PATTERN (tmp));
5928
5929 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5930 cfun->machine->base_reg),
5931 UNSPEC_LTREL_BASE);
5932 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5933 }
5934
5935 ret = validate_change (insn, label, target, 0);
5936 gcc_assert (ret);
5937 }
5938
5939 return new_literal;
5940 }
5941
5942
5943 /* Find an annotated literal pool symbol referenced in RTX X,
5944 and store it at REF. Will abort if X contains references to
5945 more than one such pool symbol; multiple references to the same
5946 symbol are allowed, however.
5947
5948 The rtx pointed to by REF must be initialized to NULL_RTX
5949 by the caller before calling this routine. */
5950
5951 static void
5952 find_constant_pool_ref (rtx x, rtx *ref)
5953 {
5954 int i, j;
5955 const char *fmt;
5956
5957 /* Ignore LTREL_BASE references. */
5958 if (GET_CODE (x) == UNSPEC
5959 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5960 return;
5961 /* Likewise POOL_ENTRY insns. */
5962 if (GET_CODE (x) == UNSPEC_VOLATILE
5963 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5964 return;
5965
5966 gcc_assert (GET_CODE (x) != SYMBOL_REF
5967 || !CONSTANT_POOL_ADDRESS_P (x));
5968
5969 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5970 {
5971 rtx sym = XVECEXP (x, 0, 0);
5972 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5973 && CONSTANT_POOL_ADDRESS_P (sym));
5974
5975 if (*ref == NULL_RTX)
5976 *ref = sym;
5977 else
5978 gcc_assert (*ref == sym);
5979
5980 return;
5981 }
5982
5983 fmt = GET_RTX_FORMAT (GET_CODE (x));
5984 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5985 {
5986 if (fmt[i] == 'e')
5987 {
5988 find_constant_pool_ref (XEXP (x, i), ref);
5989 }
5990 else if (fmt[i] == 'E')
5991 {
5992 for (j = 0; j < XVECLEN (x, i); j++)
5993 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5994 }
5995 }
5996 }
5997
5998 /* Replace every reference to the annotated literal pool
5999 symbol REF in X by its base plus OFFSET. */
6000
6001 static void
6002 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
6003 {
6004 int i, j;
6005 const char *fmt;
6006
6007 gcc_assert (*x != ref);
6008
6009 if (GET_CODE (*x) == UNSPEC
6010 && XINT (*x, 1) == UNSPEC_LTREF
6011 && XVECEXP (*x, 0, 0) == ref)
6012 {
6013 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
6014 return;
6015 }
6016
6017 if (GET_CODE (*x) == PLUS
6018 && GET_CODE (XEXP (*x, 1)) == CONST_INT
6019 && GET_CODE (XEXP (*x, 0)) == UNSPEC
6020 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
6021 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
6022 {
6023 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
6024 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
6025 return;
6026 }
6027
6028 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6029 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6030 {
6031 if (fmt[i] == 'e')
6032 {
6033 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
6034 }
6035 else if (fmt[i] == 'E')
6036 {
6037 for (j = 0; j < XVECLEN (*x, i); j++)
6038 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
6039 }
6040 }
6041 }
6042
6043 /* Check whether X contains an UNSPEC_LTREL_BASE.
6044 Return its constant pool symbol if found, NULL_RTX otherwise. */
6045
6046 static rtx
6047 find_ltrel_base (rtx x)
6048 {
6049 int i, j;
6050 const char *fmt;
6051
6052 if (GET_CODE (x) == UNSPEC
6053 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6054 return XVECEXP (x, 0, 0);
6055
6056 fmt = GET_RTX_FORMAT (GET_CODE (x));
6057 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6058 {
6059 if (fmt[i] == 'e')
6060 {
6061 rtx fnd = find_ltrel_base (XEXP (x, i));
6062 if (fnd)
6063 return fnd;
6064 }
6065 else if (fmt[i] == 'E')
6066 {
6067 for (j = 0; j < XVECLEN (x, i); j++)
6068 {
6069 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6070 if (fnd)
6071 return fnd;
6072 }
6073 }
6074 }
6075
6076 return NULL_RTX;
6077 }
6078
6079 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6080
6081 static void
6082 replace_ltrel_base (rtx *x)
6083 {
6084 int i, j;
6085 const char *fmt;
6086
6087 if (GET_CODE (*x) == UNSPEC
6088 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6089 {
6090 *x = XVECEXP (*x, 0, 1);
6091 return;
6092 }
6093
6094 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6095 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6096 {
6097 if (fmt[i] == 'e')
6098 {
6099 replace_ltrel_base (&XEXP (*x, i));
6100 }
6101 else if (fmt[i] == 'E')
6102 {
6103 for (j = 0; j < XVECLEN (*x, i); j++)
6104 replace_ltrel_base (&XVECEXP (*x, i, j));
6105 }
6106 }
6107 }
6108
6109
6110 /* We keep a list of constants which we have to add to internal
6111 constant tables in the middle of large functions. */
6112
6113 #define NR_C_MODES 11
6114 enum machine_mode constant_modes[NR_C_MODES] =
6115 {
6116 TFmode, TImode, TDmode,
6117 DFmode, DImode, DDmode,
6118 SFmode, SImode, SDmode,
6119 HImode,
6120 QImode
6121 };
6122
6123 struct constant
6124 {
6125 struct constant *next;
6126 rtx value;
6127 rtx label;
6128 };
6129
6130 struct constant_pool
6131 {
6132 struct constant_pool *next;
6133 rtx first_insn;
6134 rtx pool_insn;
6135 bitmap insns;
6136 rtx emit_pool_after;
6137
6138 struct constant *constants[NR_C_MODES];
6139 struct constant *execute;
6140 rtx label;
6141 int size;
6142 };
6143
6144 /* Allocate new constant_pool structure. */
6145
6146 static struct constant_pool *
6147 s390_alloc_pool (void)
6148 {
6149 struct constant_pool *pool;
6150 int i;
6151
6152 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6153 pool->next = NULL;
6154 for (i = 0; i < NR_C_MODES; i++)
6155 pool->constants[i] = NULL;
6156
6157 pool->execute = NULL;
6158 pool->label = gen_label_rtx ();
6159 pool->first_insn = NULL_RTX;
6160 pool->pool_insn = NULL_RTX;
6161 pool->insns = BITMAP_ALLOC (NULL);
6162 pool->size = 0;
6163 pool->emit_pool_after = NULL_RTX;
6164
6165 return pool;
6166 }
6167
6168 /* Create new constant pool covering instructions starting at INSN
6169 and chain it to the end of POOL_LIST. */
6170
6171 static struct constant_pool *
6172 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6173 {
6174 struct constant_pool *pool, **prev;
6175
6176 pool = s390_alloc_pool ();
6177 pool->first_insn = insn;
6178
6179 for (prev = pool_list; *prev; prev = &(*prev)->next)
6180 ;
6181 *prev = pool;
6182
6183 return pool;
6184 }
6185
6186 /* End range of instructions covered by POOL at INSN and emit
6187 placeholder insn representing the pool. */
6188
6189 static void
6190 s390_end_pool (struct constant_pool *pool, rtx insn)
6191 {
6192 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6193
6194 if (!insn)
6195 insn = get_last_insn ();
6196
6197 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6198 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6199 }
6200
6201 /* Add INSN to the list of insns covered by POOL. */
6202
6203 static void
6204 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6205 {
6206 bitmap_set_bit (pool->insns, INSN_UID (insn));
6207 }
6208
6209 /* Return pool out of POOL_LIST that covers INSN. */
6210
6211 static struct constant_pool *
6212 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6213 {
6214 struct constant_pool *pool;
6215
6216 for (pool = pool_list; pool; pool = pool->next)
6217 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6218 break;
6219
6220 return pool;
6221 }
6222
6223 /* Add constant VAL of mode MODE to the constant pool POOL. */
6224
6225 static void
6226 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6227 {
6228 struct constant *c;
6229 int i;
6230
6231 for (i = 0; i < NR_C_MODES; i++)
6232 if (constant_modes[i] == mode)
6233 break;
6234 gcc_assert (i != NR_C_MODES);
6235
6236 for (c = pool->constants[i]; c != NULL; c = c->next)
6237 if (rtx_equal_p (val, c->value))
6238 break;
6239
6240 if (c == NULL)
6241 {
6242 c = (struct constant *) xmalloc (sizeof *c);
6243 c->value = val;
6244 c->label = gen_label_rtx ();
6245 c->next = pool->constants[i];
6246 pool->constants[i] = c;
6247 pool->size += GET_MODE_SIZE (mode);
6248 }
6249 }
6250
6251 /* Return an rtx that represents the offset of X from the start of
6252 pool POOL. */
6253
6254 static rtx
6255 s390_pool_offset (struct constant_pool *pool, rtx x)
6256 {
6257 rtx label;
6258
6259 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6260 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6261 UNSPEC_POOL_OFFSET);
6262 return gen_rtx_CONST (GET_MODE (x), x);
6263 }
6264
6265 /* Find constant VAL of mode MODE in the constant pool POOL.
6266 Return an RTX describing the distance from the start of
6267 the pool to the location of the new constant. */
6268
6269 static rtx
6270 s390_find_constant (struct constant_pool *pool, rtx val,
6271 enum machine_mode mode)
6272 {
6273 struct constant *c;
6274 int i;
6275
6276 for (i = 0; i < NR_C_MODES; i++)
6277 if (constant_modes[i] == mode)
6278 break;
6279 gcc_assert (i != NR_C_MODES);
6280
6281 for (c = pool->constants[i]; c != NULL; c = c->next)
6282 if (rtx_equal_p (val, c->value))
6283 break;
6284
6285 gcc_assert (c);
6286
6287 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6288 }
6289
6290 /* Check whether INSN is an execute. Return the label_ref to its
6291 execute target template if so, NULL_RTX otherwise. */
6292
6293 static rtx
6294 s390_execute_label (rtx insn)
6295 {
6296 if (GET_CODE (insn) == INSN
6297 && GET_CODE (PATTERN (insn)) == PARALLEL
6298 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6299 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6300 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6301
6302 return NULL_RTX;
6303 }
6304
6305 /* Add execute target for INSN to the constant pool POOL. */
6306
6307 static void
6308 s390_add_execute (struct constant_pool *pool, rtx insn)
6309 {
6310 struct constant *c;
6311
6312 for (c = pool->execute; c != NULL; c = c->next)
6313 if (INSN_UID (insn) == INSN_UID (c->value))
6314 break;
6315
6316 if (c == NULL)
6317 {
6318 c = (struct constant *) xmalloc (sizeof *c);
6319 c->value = insn;
6320 c->label = gen_label_rtx ();
6321 c->next = pool->execute;
6322 pool->execute = c;
6323 pool->size += 6;
6324 }
6325 }
6326
6327 /* Find execute target for INSN in the constant pool POOL.
6328 Return an RTX describing the distance from the start of
6329 the pool to the location of the execute target. */
6330
6331 static rtx
6332 s390_find_execute (struct constant_pool *pool, rtx insn)
6333 {
6334 struct constant *c;
6335
6336 for (c = pool->execute; c != NULL; c = c->next)
6337 if (INSN_UID (insn) == INSN_UID (c->value))
6338 break;
6339
6340 gcc_assert (c);
6341
6342 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6343 }
6344
6345 /* For an execute INSN, extract the execute target template. */
6346
6347 static rtx
6348 s390_execute_target (rtx insn)
6349 {
6350 rtx pattern = PATTERN (insn);
6351 gcc_assert (s390_execute_label (insn));
6352
6353 if (XVECLEN (pattern, 0) == 2)
6354 {
6355 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6356 }
6357 else
6358 {
6359 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6360 int i;
6361
6362 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6363 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6364
6365 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6366 }
6367
6368 return pattern;
6369 }
6370
6371 /* Indicate that INSN cannot be duplicated. This is the case for
6372 execute insns that carry a unique label. */
6373
6374 static bool
6375 s390_cannot_copy_insn_p (rtx insn)
6376 {
6377 rtx label = s390_execute_label (insn);
6378 return label && label != const0_rtx;
6379 }
6380
6381 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6382 do not emit the pool base label. */
6383
6384 static void
6385 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6386 {
6387 struct constant *c;
6388 rtx insn = pool->pool_insn;
6389 int i;
6390
6391 /* Switch to rodata section. */
6392 if (TARGET_CPU_ZARCH)
6393 {
6394 insn = emit_insn_after (gen_pool_section_start (), insn);
6395 INSN_ADDRESSES_NEW (insn, -1);
6396 }
6397
6398 /* Ensure minimum pool alignment. */
6399 if (TARGET_CPU_ZARCH)
6400 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6401 else
6402 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6403 INSN_ADDRESSES_NEW (insn, -1);
6404
6405 /* Emit pool base label. */
6406 if (!remote_label)
6407 {
6408 insn = emit_label_after (pool->label, insn);
6409 INSN_ADDRESSES_NEW (insn, -1);
6410 }
6411
6412 /* Dump constants in descending alignment requirement order,
6413 ensuring proper alignment for every constant. */
6414 for (i = 0; i < NR_C_MODES; i++)
6415 for (c = pool->constants[i]; c; c = c->next)
6416 {
6417 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6418 rtx value = copy_rtx (c->value);
6419 if (GET_CODE (value) == CONST
6420 && GET_CODE (XEXP (value, 0)) == UNSPEC
6421 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6422 && XVECLEN (XEXP (value, 0), 0) == 1)
6423 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6424
6425 insn = emit_label_after (c->label, insn);
6426 INSN_ADDRESSES_NEW (insn, -1);
6427
6428 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6429 gen_rtvec (1, value),
6430 UNSPECV_POOL_ENTRY);
6431 insn = emit_insn_after (value, insn);
6432 INSN_ADDRESSES_NEW (insn, -1);
6433 }
6434
6435 /* Ensure minimum alignment for instructions. */
6436 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6437 INSN_ADDRESSES_NEW (insn, -1);
6438
6439 /* Output in-pool execute template insns. */
6440 for (c = pool->execute; c; c = c->next)
6441 {
6442 insn = emit_label_after (c->label, insn);
6443 INSN_ADDRESSES_NEW (insn, -1);
6444
6445 insn = emit_insn_after (s390_execute_target (c->value), insn);
6446 INSN_ADDRESSES_NEW (insn, -1);
6447 }
6448
6449 /* Switch back to previous section. */
6450 if (TARGET_CPU_ZARCH)
6451 {
6452 insn = emit_insn_after (gen_pool_section_end (), insn);
6453 INSN_ADDRESSES_NEW (insn, -1);
6454 }
6455
6456 insn = emit_barrier_after (insn);
6457 INSN_ADDRESSES_NEW (insn, -1);
6458
6459 /* Remove placeholder insn. */
6460 remove_insn (pool->pool_insn);
6461 }
6462
6463 /* Free all memory used by POOL. */
6464
6465 static void
6466 s390_free_pool (struct constant_pool *pool)
6467 {
6468 struct constant *c, *next;
6469 int i;
6470
6471 for (i = 0; i < NR_C_MODES; i++)
6472 for (c = pool->constants[i]; c; c = next)
6473 {
6474 next = c->next;
6475 free (c);
6476 }
6477
6478 for (c = pool->execute; c; c = next)
6479 {
6480 next = c->next;
6481 free (c);
6482 }
6483
6484 BITMAP_FREE (pool->insns);
6485 free (pool);
6486 }
6487
6488
6489 /* Collect main literal pool. Return NULL on overflow. */
6490
6491 static struct constant_pool *
6492 s390_mainpool_start (void)
6493 {
6494 struct constant_pool *pool;
6495 rtx insn;
6496
6497 pool = s390_alloc_pool ();
6498
6499 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6500 {
6501 if (GET_CODE (insn) == INSN
6502 && GET_CODE (PATTERN (insn)) == SET
6503 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6504 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6505 {
6506 gcc_assert (!pool->pool_insn);
6507 pool->pool_insn = insn;
6508 }
6509
6510 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6511 {
6512 s390_add_execute (pool, insn);
6513 }
6514 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6515 {
6516 rtx pool_ref = NULL_RTX;
6517 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6518 if (pool_ref)
6519 {
6520 rtx constant = get_pool_constant (pool_ref);
6521 enum machine_mode mode = get_pool_mode (pool_ref);
6522 s390_add_constant (pool, constant, mode);
6523 }
6524 }
6525
6526 /* If hot/cold partitioning is enabled we have to make sure that
6527 the literal pool is emitted in the same section where the
6528 initialization of the literal pool base pointer takes place.
6529 emit_pool_after is only used in the non-overflow case on non
6530 Z cpus where we can emit the literal pool at the end of the
6531 function body within the text section. */
6532 if (NOTE_P (insn)
6533 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6534 && !pool->emit_pool_after)
6535 pool->emit_pool_after = PREV_INSN (insn);
6536 }
6537
6538 gcc_assert (pool->pool_insn || pool->size == 0);
6539
6540 if (pool->size >= 4096)
6541 {
6542 /* We're going to chunkify the pool, so remove the main
6543 pool placeholder insn. */
6544 remove_insn (pool->pool_insn);
6545
6546 s390_free_pool (pool);
6547 pool = NULL;
6548 }
6549
6550 /* If the functions ends with the section where the literal pool
6551 should be emitted set the marker to its end. */
6552 if (pool && !pool->emit_pool_after)
6553 pool->emit_pool_after = get_last_insn ();
6554
6555 return pool;
6556 }
6557
6558 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6559 Modify the current function to output the pool constants as well as
6560 the pool register setup instruction. */
6561
6562 static void
6563 s390_mainpool_finish (struct constant_pool *pool)
6564 {
6565 rtx base_reg = cfun->machine->base_reg;
6566 rtx insn;
6567
6568 /* If the pool is empty, we're done. */
6569 if (pool->size == 0)
6570 {
6571 /* We don't actually need a base register after all. */
6572 cfun->machine->base_reg = NULL_RTX;
6573
6574 if (pool->pool_insn)
6575 remove_insn (pool->pool_insn);
6576 s390_free_pool (pool);
6577 return;
6578 }
6579
6580 /* We need correct insn addresses. */
6581 shorten_branches (get_insns ());
6582
6583 /* On zSeries, we use a LARL to load the pool register. The pool is
6584 located in the .rodata section, so we emit it after the function. */
6585 if (TARGET_CPU_ZARCH)
6586 {
6587 insn = gen_main_base_64 (base_reg, pool->label);
6588 insn = emit_insn_after (insn, pool->pool_insn);
6589 INSN_ADDRESSES_NEW (insn, -1);
6590 remove_insn (pool->pool_insn);
6591
6592 insn = get_last_insn ();
6593 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6594 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6595
6596 s390_dump_pool (pool, 0);
6597 }
6598
6599 /* On S/390, if the total size of the function's code plus literal pool
6600 does not exceed 4096 bytes, we use BASR to set up a function base
6601 pointer, and emit the literal pool at the end of the function. */
6602 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6603 + pool->size + 8 /* alignment slop */ < 4096)
6604 {
6605 insn = gen_main_base_31_small (base_reg, pool->label);
6606 insn = emit_insn_after (insn, pool->pool_insn);
6607 INSN_ADDRESSES_NEW (insn, -1);
6608 remove_insn (pool->pool_insn);
6609
6610 insn = emit_label_after (pool->label, insn);
6611 INSN_ADDRESSES_NEW (insn, -1);
6612
6613 /* emit_pool_after will be set by s390_mainpool_start to the
6614 last insn of the section where the literal pool should be
6615 emitted. */
6616 insn = pool->emit_pool_after;
6617
6618 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6619 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6620
6621 s390_dump_pool (pool, 1);
6622 }
6623
6624 /* Otherwise, we emit an inline literal pool and use BASR to branch
6625 over it, setting up the pool register at the same time. */
6626 else
6627 {
6628 rtx pool_end = gen_label_rtx ();
6629
6630 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6631 insn = emit_jump_insn_after (insn, pool->pool_insn);
6632 JUMP_LABEL (insn) = pool_end;
6633 INSN_ADDRESSES_NEW (insn, -1);
6634 remove_insn (pool->pool_insn);
6635
6636 insn = emit_label_after (pool->label, insn);
6637 INSN_ADDRESSES_NEW (insn, -1);
6638
6639 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6640 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6641
6642 insn = emit_label_after (pool_end, pool->pool_insn);
6643 INSN_ADDRESSES_NEW (insn, -1);
6644
6645 s390_dump_pool (pool, 1);
6646 }
6647
6648
6649 /* Replace all literal pool references. */
6650
6651 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6652 {
6653 if (INSN_P (insn))
6654 replace_ltrel_base (&PATTERN (insn));
6655
6656 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6657 {
6658 rtx addr, pool_ref = NULL_RTX;
6659 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6660 if (pool_ref)
6661 {
6662 if (s390_execute_label (insn))
6663 addr = s390_find_execute (pool, insn);
6664 else
6665 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6666 get_pool_mode (pool_ref));
6667
6668 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6669 INSN_CODE (insn) = -1;
6670 }
6671 }
6672 }
6673
6674
6675 /* Free the pool. */
6676 s390_free_pool (pool);
6677 }
6678
6679 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6680 We have decided we cannot use this pool, so revert all changes
6681 to the current function that were done by s390_mainpool_start. */
6682 static void
6683 s390_mainpool_cancel (struct constant_pool *pool)
6684 {
6685 /* We didn't actually change the instruction stream, so simply
6686 free the pool memory. */
6687 s390_free_pool (pool);
6688 }
6689
6690
6691 /* Chunkify the literal pool. */
6692
6693 #define S390_POOL_CHUNK_MIN 0xc00
6694 #define S390_POOL_CHUNK_MAX 0xe00
6695
6696 static struct constant_pool *
6697 s390_chunkify_start (void)
6698 {
6699 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6700 int extra_size = 0;
6701 bitmap far_labels;
6702 rtx pending_ltrel = NULL_RTX;
6703 rtx insn;
6704
6705 rtx (*gen_reload_base) (rtx, rtx) =
6706 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6707
6708
6709 /* We need correct insn addresses. */
6710
6711 shorten_branches (get_insns ());
6712
6713 /* Scan all insns and move literals to pool chunks. */
6714
6715 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6716 {
6717 bool section_switch_p = false;
6718
6719 /* Check for pending LTREL_BASE. */
6720 if (INSN_P (insn))
6721 {
6722 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6723 if (ltrel_base)
6724 {
6725 gcc_assert (ltrel_base == pending_ltrel);
6726 pending_ltrel = NULL_RTX;
6727 }
6728 }
6729
6730 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6731 {
6732 if (!curr_pool)
6733 curr_pool = s390_start_pool (&pool_list, insn);
6734
6735 s390_add_execute (curr_pool, insn);
6736 s390_add_pool_insn (curr_pool, insn);
6737 }
6738 else if (GET_CODE (insn) == INSN || CALL_P (insn))
6739 {
6740 rtx pool_ref = NULL_RTX;
6741 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6742 if (pool_ref)
6743 {
6744 rtx constant = get_pool_constant (pool_ref);
6745 enum machine_mode mode = get_pool_mode (pool_ref);
6746
6747 if (!curr_pool)
6748 curr_pool = s390_start_pool (&pool_list, insn);
6749
6750 s390_add_constant (curr_pool, constant, mode);
6751 s390_add_pool_insn (curr_pool, insn);
6752
6753 /* Don't split the pool chunk between a LTREL_OFFSET load
6754 and the corresponding LTREL_BASE. */
6755 if (GET_CODE (constant) == CONST
6756 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6757 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6758 {
6759 gcc_assert (!pending_ltrel);
6760 pending_ltrel = pool_ref;
6761 }
6762 }
6763 }
6764
6765 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6766 {
6767 if (curr_pool)
6768 s390_add_pool_insn (curr_pool, insn);
6769 /* An LTREL_BASE must follow within the same basic block. */
6770 gcc_assert (!pending_ltrel);
6771 }
6772
6773 if (NOTE_P (insn))
6774 switch (NOTE_KIND (insn))
6775 {
6776 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
6777 section_switch_p = true;
6778 break;
6779 case NOTE_INSN_VAR_LOCATION:
6780 case NOTE_INSN_CALL_ARG_LOCATION:
6781 continue;
6782 default:
6783 break;
6784 }
6785
6786 if (!curr_pool
6787 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6788 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6789 continue;
6790
6791 if (TARGET_CPU_ZARCH)
6792 {
6793 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6794 continue;
6795
6796 s390_end_pool (curr_pool, NULL_RTX);
6797 curr_pool = NULL;
6798 }
6799 else
6800 {
6801 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6802 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6803 + extra_size;
6804
6805 /* We will later have to insert base register reload insns.
6806 Those will have an effect on code size, which we need to
6807 consider here. This calculation makes rather pessimistic
6808 worst-case assumptions. */
6809 if (GET_CODE (insn) == CODE_LABEL)
6810 extra_size += 6;
6811
6812 if (chunk_size < S390_POOL_CHUNK_MIN
6813 && curr_pool->size < S390_POOL_CHUNK_MIN
6814 && !section_switch_p)
6815 continue;
6816
6817 /* Pool chunks can only be inserted after BARRIERs ... */
6818 if (GET_CODE (insn) == BARRIER)
6819 {
6820 s390_end_pool (curr_pool, insn);
6821 curr_pool = NULL;
6822 extra_size = 0;
6823 }
6824
6825 /* ... so if we don't find one in time, create one. */
6826 else if (chunk_size > S390_POOL_CHUNK_MAX
6827 || curr_pool->size > S390_POOL_CHUNK_MAX
6828 || section_switch_p)
6829 {
6830 rtx label, jump, barrier, next, prev;
6831
6832 if (!section_switch_p)
6833 {
6834 /* We can insert the barrier only after a 'real' insn. */
6835 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6836 continue;
6837 if (get_attr_length (insn) == 0)
6838 continue;
6839 /* Don't separate LTREL_BASE from the corresponding
6840 LTREL_OFFSET load. */
6841 if (pending_ltrel)
6842 continue;
6843 next = insn;
6844 do
6845 {
6846 insn = next;
6847 next = NEXT_INSN (insn);
6848 }
6849 while (next
6850 && NOTE_P (next)
6851 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
6852 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
6853 }
6854 else
6855 {
6856 gcc_assert (!pending_ltrel);
6857
6858 /* The old pool has to end before the section switch
6859 note in order to make it part of the current
6860 section. */
6861 insn = PREV_INSN (insn);
6862 }
6863
6864 label = gen_label_rtx ();
6865 prev = insn;
6866 if (prev && NOTE_P (prev))
6867 prev = prev_nonnote_insn (prev);
6868 if (prev)
6869 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
6870 INSN_LOCATOR (prev));
6871 else
6872 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
6873 barrier = emit_barrier_after (jump);
6874 insn = emit_label_after (label, barrier);
6875 JUMP_LABEL (jump) = label;
6876 LABEL_NUSES (label) = 1;
6877
6878 INSN_ADDRESSES_NEW (jump, -1);
6879 INSN_ADDRESSES_NEW (barrier, -1);
6880 INSN_ADDRESSES_NEW (insn, -1);
6881
6882 s390_end_pool (curr_pool, barrier);
6883 curr_pool = NULL;
6884 extra_size = 0;
6885 }
6886 }
6887 }
6888
6889 if (curr_pool)
6890 s390_end_pool (curr_pool, NULL_RTX);
6891 gcc_assert (!pending_ltrel);
6892
6893 /* Find all labels that are branched into
6894 from an insn belonging to a different chunk. */
6895
6896 far_labels = BITMAP_ALLOC (NULL);
6897
6898 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6899 {
6900 /* Labels marked with LABEL_PRESERVE_P can be target
6901 of non-local jumps, so we have to mark them.
6902 The same holds for named labels.
6903
6904 Don't do that, however, if it is the label before
6905 a jump table. */
6906
6907 if (GET_CODE (insn) == CODE_LABEL
6908 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6909 {
6910 rtx vec_insn = next_real_insn (insn);
6911 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6912 PATTERN (vec_insn) : NULL_RTX;
6913 if (!vec_pat
6914 || !(GET_CODE (vec_pat) == ADDR_VEC
6915 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6916 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6917 }
6918
6919 /* If we have a direct jump (conditional or unconditional)
6920 or a casesi jump, check all potential targets. */
6921 else if (GET_CODE (insn) == JUMP_INSN)
6922 {
6923 rtx pat = PATTERN (insn);
6924 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6925 pat = XVECEXP (pat, 0, 0);
6926
6927 if (GET_CODE (pat) == SET)
6928 {
6929 rtx label = JUMP_LABEL (insn);
6930 if (label)
6931 {
6932 if (s390_find_pool (pool_list, label)
6933 != s390_find_pool (pool_list, insn))
6934 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6935 }
6936 }
6937 else if (GET_CODE (pat) == PARALLEL
6938 && XVECLEN (pat, 0) == 2
6939 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6940 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6941 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6942 {
6943 /* Find the jump table used by this casesi jump. */
6944 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6945 rtx vec_insn = next_real_insn (vec_label);
6946 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6947 PATTERN (vec_insn) : NULL_RTX;
6948 if (vec_pat
6949 && (GET_CODE (vec_pat) == ADDR_VEC
6950 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6951 {
6952 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6953
6954 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6955 {
6956 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6957
6958 if (s390_find_pool (pool_list, label)
6959 != s390_find_pool (pool_list, insn))
6960 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6961 }
6962 }
6963 }
6964 }
6965 }
6966
6967 /* Insert base register reload insns before every pool. */
6968
6969 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6970 {
6971 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6972 curr_pool->label);
6973 rtx insn = curr_pool->first_insn;
6974 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6975 }
6976
6977 /* Insert base register reload insns at every far label. */
6978
6979 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6980 if (GET_CODE (insn) == CODE_LABEL
6981 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6982 {
6983 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6984 if (pool)
6985 {
6986 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6987 pool->label);
6988 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6989 }
6990 }
6991
6992
6993 BITMAP_FREE (far_labels);
6994
6995
6996 /* Recompute insn addresses. */
6997
6998 init_insn_lengths ();
6999 shorten_branches (get_insns ());
7000
7001 return pool_list;
7002 }
7003
7004 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7005 After we have decided to use this list, finish implementing
7006 all changes to the current function as required. */
7007
7008 static void
7009 s390_chunkify_finish (struct constant_pool *pool_list)
7010 {
7011 struct constant_pool *curr_pool = NULL;
7012 rtx insn;
7013
7014
7015 /* Replace all literal pool references. */
7016
7017 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7018 {
7019 if (INSN_P (insn))
7020 replace_ltrel_base (&PATTERN (insn));
7021
7022 curr_pool = s390_find_pool (pool_list, insn);
7023 if (!curr_pool)
7024 continue;
7025
7026 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
7027 {
7028 rtx addr, pool_ref = NULL_RTX;
7029 find_constant_pool_ref (PATTERN (insn), &pool_ref);
7030 if (pool_ref)
7031 {
7032 if (s390_execute_label (insn))
7033 addr = s390_find_execute (curr_pool, insn);
7034 else
7035 addr = s390_find_constant (curr_pool,
7036 get_pool_constant (pool_ref),
7037 get_pool_mode (pool_ref));
7038
7039 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
7040 INSN_CODE (insn) = -1;
7041 }
7042 }
7043 }
7044
7045 /* Dump out all literal pools. */
7046
7047 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7048 s390_dump_pool (curr_pool, 0);
7049
7050 /* Free pool list. */
7051
7052 while (pool_list)
7053 {
7054 struct constant_pool *next = pool_list->next;
7055 s390_free_pool (pool_list);
7056 pool_list = next;
7057 }
7058 }
7059
7060 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7061 We have decided we cannot use this list, so revert all changes
7062 to the current function that were done by s390_chunkify_start. */
7063
7064 static void
7065 s390_chunkify_cancel (struct constant_pool *pool_list)
7066 {
7067 struct constant_pool *curr_pool = NULL;
7068 rtx insn;
7069
7070 /* Remove all pool placeholder insns. */
7071
7072 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7073 {
7074 /* Did we insert an extra barrier? Remove it. */
7075 rtx barrier = PREV_INSN (curr_pool->pool_insn);
7076 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
7077 rtx label = NEXT_INSN (curr_pool->pool_insn);
7078
7079 if (jump && GET_CODE (jump) == JUMP_INSN
7080 && barrier && GET_CODE (barrier) == BARRIER
7081 && label && GET_CODE (label) == CODE_LABEL
7082 && GET_CODE (PATTERN (jump)) == SET
7083 && SET_DEST (PATTERN (jump)) == pc_rtx
7084 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7085 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7086 {
7087 remove_insn (jump);
7088 remove_insn (barrier);
7089 remove_insn (label);
7090 }
7091
7092 remove_insn (curr_pool->pool_insn);
7093 }
7094
7095 /* Remove all base register reload insns. */
7096
7097 for (insn = get_insns (); insn; )
7098 {
7099 rtx next_insn = NEXT_INSN (insn);
7100
7101 if (GET_CODE (insn) == INSN
7102 && GET_CODE (PATTERN (insn)) == SET
7103 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7104 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7105 remove_insn (insn);
7106
7107 insn = next_insn;
7108 }
7109
7110 /* Free pool list. */
7111
7112 while (pool_list)
7113 {
7114 struct constant_pool *next = pool_list->next;
7115 s390_free_pool (pool_list);
7116 pool_list = next;
7117 }
7118 }
7119
7120 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7121
7122 void
7123 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7124 {
7125 REAL_VALUE_TYPE r;
7126
7127 switch (GET_MODE_CLASS (mode))
7128 {
7129 case MODE_FLOAT:
7130 case MODE_DECIMAL_FLOAT:
7131 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7132
7133 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7134 assemble_real (r, mode, align);
7135 break;
7136
7137 case MODE_INT:
7138 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7139 mark_symbol_refs_as_used (exp);
7140 break;
7141
7142 default:
7143 gcc_unreachable ();
7144 }
7145 }
7146
7147
7148 /* Return an RTL expression representing the value of the return address
7149 for the frame COUNT steps up from the current frame. FRAME is the
7150 frame pointer of that frame. */
7151
7152 rtx
7153 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7154 {
7155 int offset;
7156 rtx addr;
7157
7158 /* Without backchain, we fail for all but the current frame. */
7159
7160 if (!TARGET_BACKCHAIN && count > 0)
7161 return NULL_RTX;
7162
7163 /* For the current frame, we need to make sure the initial
7164 value of RETURN_REGNUM is actually saved. */
7165
7166 if (count == 0)
7167 {
7168 /* On non-z architectures branch splitting could overwrite r14. */
7169 if (TARGET_CPU_ZARCH)
7170 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7171 else
7172 {
7173 cfun_frame_layout.save_return_addr_p = true;
7174 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7175 }
7176 }
7177
7178 if (TARGET_PACKED_STACK)
7179 offset = -2 * UNITS_PER_LONG;
7180 else
7181 offset = RETURN_REGNUM * UNITS_PER_LONG;
7182
7183 addr = plus_constant (Pmode, frame, offset);
7184 addr = memory_address (Pmode, addr);
7185 return gen_rtx_MEM (Pmode, addr);
7186 }
7187
7188 /* Return an RTL expression representing the back chain stored in
7189 the current stack frame. */
7190
7191 rtx
7192 s390_back_chain_rtx (void)
7193 {
7194 rtx chain;
7195
7196 gcc_assert (TARGET_BACKCHAIN);
7197
7198 if (TARGET_PACKED_STACK)
7199 chain = plus_constant (Pmode, stack_pointer_rtx,
7200 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7201 else
7202 chain = stack_pointer_rtx;
7203
7204 chain = gen_rtx_MEM (Pmode, chain);
7205 return chain;
7206 }
7207
7208 /* Find first call clobbered register unused in a function.
7209 This could be used as base register in a leaf function
7210 or for holding the return address before epilogue. */
7211
7212 static int
7213 find_unused_clobbered_reg (void)
7214 {
7215 int i;
7216 for (i = 0; i < 6; i++)
7217 if (!df_regs_ever_live_p (i))
7218 return i;
7219 return 0;
7220 }
7221
7222
7223 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7224 clobbered hard regs in SETREG. */
7225
7226 static void
7227 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7228 {
7229 int *regs_ever_clobbered = (int *)data;
7230 unsigned int i, regno;
7231 enum machine_mode mode = GET_MODE (setreg);
7232
7233 if (GET_CODE (setreg) == SUBREG)
7234 {
7235 rtx inner = SUBREG_REG (setreg);
7236 if (!GENERAL_REG_P (inner))
7237 return;
7238 regno = subreg_regno (setreg);
7239 }
7240 else if (GENERAL_REG_P (setreg))
7241 regno = REGNO (setreg);
7242 else
7243 return;
7244
7245 for (i = regno;
7246 i < regno + HARD_REGNO_NREGS (regno, mode);
7247 i++)
7248 regs_ever_clobbered[i] = 1;
7249 }
7250
7251 /* Walks through all basic blocks of the current function looking
7252 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7253 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7254 each of those regs. */
7255
7256 static void
7257 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7258 {
7259 basic_block cur_bb;
7260 rtx cur_insn;
7261 unsigned int i;
7262
7263 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7264
7265 /* For non-leaf functions we have to consider all call clobbered regs to be
7266 clobbered. */
7267 if (!crtl->is_leaf)
7268 {
7269 for (i = 0; i < 16; i++)
7270 regs_ever_clobbered[i] = call_really_used_regs[i];
7271 }
7272
7273 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7274 this work is done by liveness analysis (mark_regs_live_at_end).
7275 Special care is needed for functions containing landing pads. Landing pads
7276 may use the eh registers, but the code which sets these registers is not
7277 contained in that function. Hence s390_regs_ever_clobbered is not able to
7278 deal with this automatically. */
7279 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7280 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7281 if (crtl->calls_eh_return
7282 || (cfun->machine->has_landing_pad_p
7283 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7284 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7285
7286 /* For nonlocal gotos all call-saved registers have to be saved.
7287 This flag is also set for the unwinding code in libgcc.
7288 See expand_builtin_unwind_init. For regs_ever_live this is done by
7289 reload. */
7290 if (cfun->has_nonlocal_label)
7291 for (i = 0; i < 16; i++)
7292 if (!call_really_used_regs[i])
7293 regs_ever_clobbered[i] = 1;
7294
7295 FOR_EACH_BB (cur_bb)
7296 {
7297 FOR_BB_INSNS (cur_bb, cur_insn)
7298 {
7299 if (INSN_P (cur_insn))
7300 note_stores (PATTERN (cur_insn),
7301 s390_reg_clobbered_rtx,
7302 regs_ever_clobbered);
7303 }
7304 }
7305 }
7306
7307 /* Determine the frame area which actually has to be accessed
7308 in the function epilogue. The values are stored at the
7309 given pointers AREA_BOTTOM (address of the lowest used stack
7310 address) and AREA_TOP (address of the first item which does
7311 not belong to the stack frame). */
7312
7313 static void
7314 s390_frame_area (int *area_bottom, int *area_top)
7315 {
7316 int b, t;
7317 int i;
7318
7319 b = INT_MAX;
7320 t = INT_MIN;
7321
7322 if (cfun_frame_layout.first_restore_gpr != -1)
7323 {
7324 b = (cfun_frame_layout.gprs_offset
7325 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7326 t = b + (cfun_frame_layout.last_restore_gpr
7327 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7328 }
7329
7330 if (TARGET_64BIT && cfun_save_high_fprs_p)
7331 {
7332 b = MIN (b, cfun_frame_layout.f8_offset);
7333 t = MAX (t, (cfun_frame_layout.f8_offset
7334 + cfun_frame_layout.high_fprs * 8));
7335 }
7336
7337 if (!TARGET_64BIT)
7338 for (i = 2; i < 4; i++)
7339 if (cfun_fpr_bit_p (i))
7340 {
7341 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7342 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7343 }
7344
7345 *area_bottom = b;
7346 *area_top = t;
7347 }
7348
7349 /* Fill cfun->machine with info about register usage of current function.
7350 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7351
7352 static void
7353 s390_register_info (int clobbered_regs[])
7354 {
7355 int i, j;
7356
7357 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7358 cfun_frame_layout.fpr_bitmap = 0;
7359 cfun_frame_layout.high_fprs = 0;
7360 if (TARGET_64BIT)
7361 for (i = 24; i < 32; i++)
7362 if (df_regs_ever_live_p (i) && !global_regs[i])
7363 {
7364 cfun_set_fpr_bit (i - 16);
7365 cfun_frame_layout.high_fprs++;
7366 }
7367
7368 /* Find first and last gpr to be saved. We trust regs_ever_live
7369 data, except that we don't save and restore global registers.
7370
7371 Also, all registers with special meaning to the compiler need
7372 to be handled extra. */
7373
7374 s390_regs_ever_clobbered (clobbered_regs);
7375
7376 for (i = 0; i < 16; i++)
7377 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7378
7379 if (frame_pointer_needed)
7380 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7381
7382 if (flag_pic)
7383 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7384 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7385
7386 clobbered_regs[BASE_REGNUM]
7387 |= (cfun->machine->base_reg
7388 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7389
7390 clobbered_regs[RETURN_REGNUM]
7391 |= (!crtl->is_leaf
7392 || TARGET_TPF_PROFILING
7393 || cfun->machine->split_branches_pending_p
7394 || cfun_frame_layout.save_return_addr_p
7395 || crtl->calls_eh_return
7396 || cfun->stdarg);
7397
7398 clobbered_regs[STACK_POINTER_REGNUM]
7399 |= (!crtl->is_leaf
7400 || TARGET_TPF_PROFILING
7401 || cfun_save_high_fprs_p
7402 || get_frame_size () > 0
7403 || cfun->calls_alloca
7404 || cfun->stdarg);
7405
7406 for (i = 6; i < 16; i++)
7407 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7408 break;
7409 for (j = 15; j > i; j--)
7410 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7411 break;
7412
7413 if (i == 16)
7414 {
7415 /* Nothing to save/restore. */
7416 cfun_frame_layout.first_save_gpr_slot = -1;
7417 cfun_frame_layout.last_save_gpr_slot = -1;
7418 cfun_frame_layout.first_save_gpr = -1;
7419 cfun_frame_layout.first_restore_gpr = -1;
7420 cfun_frame_layout.last_save_gpr = -1;
7421 cfun_frame_layout.last_restore_gpr = -1;
7422 }
7423 else
7424 {
7425 /* Save slots for gprs from i to j. */
7426 cfun_frame_layout.first_save_gpr_slot = i;
7427 cfun_frame_layout.last_save_gpr_slot = j;
7428
7429 for (i = cfun_frame_layout.first_save_gpr_slot;
7430 i < cfun_frame_layout.last_save_gpr_slot + 1;
7431 i++)
7432 if (clobbered_regs[i])
7433 break;
7434
7435 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7436 if (clobbered_regs[j])
7437 break;
7438
7439 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7440 {
7441 /* Nothing to save/restore. */
7442 cfun_frame_layout.first_save_gpr = -1;
7443 cfun_frame_layout.first_restore_gpr = -1;
7444 cfun_frame_layout.last_save_gpr = -1;
7445 cfun_frame_layout.last_restore_gpr = -1;
7446 }
7447 else
7448 {
7449 /* Save / Restore from gpr i to j. */
7450 cfun_frame_layout.first_save_gpr = i;
7451 cfun_frame_layout.first_restore_gpr = i;
7452 cfun_frame_layout.last_save_gpr = j;
7453 cfun_frame_layout.last_restore_gpr = j;
7454 }
7455 }
7456
7457 if (cfun->stdarg)
7458 {
7459 /* Varargs functions need to save gprs 2 to 6. */
7460 if (cfun->va_list_gpr_size
7461 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7462 {
7463 int min_gpr = crtl->args.info.gprs;
7464 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7465 if (max_gpr > GP_ARG_NUM_REG)
7466 max_gpr = GP_ARG_NUM_REG;
7467
7468 if (cfun_frame_layout.first_save_gpr == -1
7469 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7470 {
7471 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7472 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7473 }
7474
7475 if (cfun_frame_layout.last_save_gpr == -1
7476 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7477 {
7478 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7479 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7480 }
7481 }
7482
7483 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7484 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7485 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7486 {
7487 int min_fpr = crtl->args.info.fprs;
7488 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7489 if (max_fpr > FP_ARG_NUM_REG)
7490 max_fpr = FP_ARG_NUM_REG;
7491
7492 /* ??? This is currently required to ensure proper location
7493 of the fpr save slots within the va_list save area. */
7494 if (TARGET_PACKED_STACK)
7495 min_fpr = 0;
7496
7497 for (i = min_fpr; i < max_fpr; i++)
7498 cfun_set_fpr_bit (i);
7499 }
7500 }
7501
7502 if (!TARGET_64BIT)
7503 for (i = 2; i < 4; i++)
7504 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7505 cfun_set_fpr_bit (i);
7506 }
7507
7508 /* Fill cfun->machine with info about frame of current function. */
7509
7510 static void
7511 s390_frame_info (void)
7512 {
7513 int i;
7514
7515 cfun_frame_layout.frame_size = get_frame_size ();
7516 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7517 fatal_error ("total size of local variables exceeds architecture limit");
7518
7519 if (!TARGET_PACKED_STACK)
7520 {
7521 cfun_frame_layout.backchain_offset = 0;
7522 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7523 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7524 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7525 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7526 * UNITS_PER_LONG);
7527 }
7528 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7529 {
7530 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7531 - UNITS_PER_LONG);
7532 cfun_frame_layout.gprs_offset
7533 = (cfun_frame_layout.backchain_offset
7534 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7535 * UNITS_PER_LONG);
7536
7537 if (TARGET_64BIT)
7538 {
7539 cfun_frame_layout.f4_offset
7540 = (cfun_frame_layout.gprs_offset
7541 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7542
7543 cfun_frame_layout.f0_offset
7544 = (cfun_frame_layout.f4_offset
7545 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7546 }
7547 else
7548 {
7549 /* On 31 bit we have to care about alignment of the
7550 floating point regs to provide fastest access. */
7551 cfun_frame_layout.f0_offset
7552 = ((cfun_frame_layout.gprs_offset
7553 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7554 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7555
7556 cfun_frame_layout.f4_offset
7557 = (cfun_frame_layout.f0_offset
7558 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7559 }
7560 }
7561 else /* no backchain */
7562 {
7563 cfun_frame_layout.f4_offset
7564 = (STACK_POINTER_OFFSET
7565 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7566
7567 cfun_frame_layout.f0_offset
7568 = (cfun_frame_layout.f4_offset
7569 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7570
7571 cfun_frame_layout.gprs_offset
7572 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7573 }
7574
7575 if (crtl->is_leaf
7576 && !TARGET_TPF_PROFILING
7577 && cfun_frame_layout.frame_size == 0
7578 && !cfun_save_high_fprs_p
7579 && !cfun->calls_alloca
7580 && !cfun->stdarg)
7581 return;
7582
7583 if (!TARGET_PACKED_STACK)
7584 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7585 + crtl->outgoing_args_size
7586 + cfun_frame_layout.high_fprs * 8);
7587 else
7588 {
7589 if (TARGET_BACKCHAIN)
7590 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7591
7592 /* No alignment trouble here because f8-f15 are only saved under
7593 64 bit. */
7594 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7595 cfun_frame_layout.f4_offset),
7596 cfun_frame_layout.gprs_offset)
7597 - cfun_frame_layout.high_fprs * 8);
7598
7599 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7600
7601 for (i = 0; i < 8; i++)
7602 if (cfun_fpr_bit_p (i))
7603 cfun_frame_layout.frame_size += 8;
7604
7605 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7606
7607 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7608 the frame size to sustain 8 byte alignment of stack frames. */
7609 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7610 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7611 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7612
7613 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7614 }
7615 }
7616
7617 /* Generate frame layout. Fills in register and frame data for the current
7618 function in cfun->machine. This routine can be called multiple times;
7619 it will re-do the complete frame layout every time. */
7620
7621 static void
7622 s390_init_frame_layout (void)
7623 {
7624 HOST_WIDE_INT frame_size;
7625 int base_used;
7626 int clobbered_regs[16];
7627
7628 /* On S/390 machines, we may need to perform branch splitting, which
7629 will require both base and return address register. We have no
7630 choice but to assume we're going to need them until right at the
7631 end of the machine dependent reorg phase. */
7632 if (!TARGET_CPU_ZARCH)
7633 cfun->machine->split_branches_pending_p = true;
7634
7635 do
7636 {
7637 frame_size = cfun_frame_layout.frame_size;
7638
7639 /* Try to predict whether we'll need the base register. */
7640 base_used = cfun->machine->split_branches_pending_p
7641 || crtl->uses_const_pool
7642 || (!DISP_IN_RANGE (frame_size)
7643 && !CONST_OK_FOR_K (frame_size));
7644
7645 /* Decide which register to use as literal pool base. In small
7646 leaf functions, try to use an unused call-clobbered register
7647 as base register to avoid save/restore overhead. */
7648 if (!base_used)
7649 cfun->machine->base_reg = NULL_RTX;
7650 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
7651 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7652 else
7653 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7654
7655 s390_register_info (clobbered_regs);
7656 s390_frame_info ();
7657 }
7658 while (frame_size != cfun_frame_layout.frame_size);
7659 }
7660
7661 /* Update frame layout. Recompute actual register save data based on
7662 current info and update regs_ever_live for the special registers.
7663 May be called multiple times, but may never cause *more* registers
7664 to be saved than s390_init_frame_layout allocated room for. */
7665
7666 static void
7667 s390_update_frame_layout (void)
7668 {
7669 int clobbered_regs[16];
7670
7671 s390_register_info (clobbered_regs);
7672
7673 df_set_regs_ever_live (BASE_REGNUM,
7674 clobbered_regs[BASE_REGNUM] ? true : false);
7675 df_set_regs_ever_live (RETURN_REGNUM,
7676 clobbered_regs[RETURN_REGNUM] ? true : false);
7677 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7678 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7679
7680 if (cfun->machine->base_reg)
7681 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7682 }
7683
7684 /* Return true if it is legal to put a value with MODE into REGNO. */
7685
7686 bool
7687 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7688 {
7689 switch (REGNO_REG_CLASS (regno))
7690 {
7691 case FP_REGS:
7692 if (REGNO_PAIR_OK (regno, mode))
7693 {
7694 if (mode == SImode || mode == DImode)
7695 return true;
7696
7697 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7698 return true;
7699 }
7700 break;
7701 case ADDR_REGS:
7702 if (FRAME_REGNO_P (regno) && mode == Pmode)
7703 return true;
7704
7705 /* fallthrough */
7706 case GENERAL_REGS:
7707 if (REGNO_PAIR_OK (regno, mode))
7708 {
7709 if (TARGET_ZARCH
7710 || (mode != TFmode && mode != TCmode && mode != TDmode))
7711 return true;
7712 }
7713 break;
7714 case CC_REGS:
7715 if (GET_MODE_CLASS (mode) == MODE_CC)
7716 return true;
7717 break;
7718 case ACCESS_REGS:
7719 if (REGNO_PAIR_OK (regno, mode))
7720 {
7721 if (mode == SImode || mode == Pmode)
7722 return true;
7723 }
7724 break;
7725 default:
7726 return false;
7727 }
7728
7729 return false;
7730 }
7731
7732 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7733
7734 bool
7735 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7736 {
7737 /* Once we've decided upon a register to use as base register, it must
7738 no longer be used for any other purpose. */
7739 if (cfun->machine->base_reg)
7740 if (REGNO (cfun->machine->base_reg) == old_reg
7741 || REGNO (cfun->machine->base_reg) == new_reg)
7742 return false;
7743
7744 return true;
7745 }
7746
7747 /* Maximum number of registers to represent a value of mode MODE
7748 in a register of class RCLASS. */
7749
7750 int
7751 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7752 {
7753 switch (rclass)
7754 {
7755 case FP_REGS:
7756 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7757 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7758 else
7759 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7760 case ACCESS_REGS:
7761 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7762 default:
7763 break;
7764 }
7765 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7766 }
7767
7768 /* Return true if register FROM can be eliminated via register TO. */
7769
7770 static bool
7771 s390_can_eliminate (const int from, const int to)
7772 {
7773 /* On zSeries machines, we have not marked the base register as fixed.
7774 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7775 If a function requires the base register, we say here that this
7776 elimination cannot be performed. This will cause reload to free
7777 up the base register (as if it were fixed). On the other hand,
7778 if the current function does *not* require the base register, we
7779 say here the elimination succeeds, which in turn allows reload
7780 to allocate the base register for any other purpose. */
7781 if (from == BASE_REGNUM && to == BASE_REGNUM)
7782 {
7783 if (TARGET_CPU_ZARCH)
7784 {
7785 s390_init_frame_layout ();
7786 return cfun->machine->base_reg == NULL_RTX;
7787 }
7788
7789 return false;
7790 }
7791
7792 /* Everything else must point into the stack frame. */
7793 gcc_assert (to == STACK_POINTER_REGNUM
7794 || to == HARD_FRAME_POINTER_REGNUM);
7795
7796 gcc_assert (from == FRAME_POINTER_REGNUM
7797 || from == ARG_POINTER_REGNUM
7798 || from == RETURN_ADDRESS_POINTER_REGNUM);
7799
7800 /* Make sure we actually saved the return address. */
7801 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7802 if (!crtl->calls_eh_return
7803 && !cfun->stdarg
7804 && !cfun_frame_layout.save_return_addr_p)
7805 return false;
7806
7807 return true;
7808 }
7809
7810 /* Return offset between register FROM and TO initially after prolog. */
7811
7812 HOST_WIDE_INT
7813 s390_initial_elimination_offset (int from, int to)
7814 {
7815 HOST_WIDE_INT offset;
7816 int index;
7817
7818 /* ??? Why are we called for non-eliminable pairs? */
7819 if (!s390_can_eliminate (from, to))
7820 return 0;
7821
7822 switch (from)
7823 {
7824 case FRAME_POINTER_REGNUM:
7825 offset = (get_frame_size()
7826 + STACK_POINTER_OFFSET
7827 + crtl->outgoing_args_size);
7828 break;
7829
7830 case ARG_POINTER_REGNUM:
7831 s390_init_frame_layout ();
7832 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7833 break;
7834
7835 case RETURN_ADDRESS_POINTER_REGNUM:
7836 s390_init_frame_layout ();
7837 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7838 gcc_assert (index >= 0);
7839 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7840 offset += index * UNITS_PER_LONG;
7841 break;
7842
7843 case BASE_REGNUM:
7844 offset = 0;
7845 break;
7846
7847 default:
7848 gcc_unreachable ();
7849 }
7850
7851 return offset;
7852 }
7853
7854 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7855 to register BASE. Return generated insn. */
7856
7857 static rtx
7858 save_fpr (rtx base, int offset, int regnum)
7859 {
7860 rtx addr;
7861 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7862
7863 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7864 set_mem_alias_set (addr, get_varargs_alias_set ());
7865 else
7866 set_mem_alias_set (addr, get_frame_alias_set ());
7867
7868 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7869 }
7870
7871 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7872 to register BASE. Return generated insn. */
7873
7874 static rtx
7875 restore_fpr (rtx base, int offset, int regnum)
7876 {
7877 rtx addr;
7878 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
7879 set_mem_alias_set (addr, get_frame_alias_set ());
7880
7881 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7882 }
7883
7884 /* Return true if REGNO is a global register, but not one
7885 of the special ones that need to be saved/restored in anyway. */
7886
7887 static inline bool
7888 global_not_special_regno_p (int regno)
7889 {
7890 return (global_regs[regno]
7891 /* These registers are special and need to be
7892 restored in any case. */
7893 && !(regno == STACK_POINTER_REGNUM
7894 || regno == RETURN_REGNUM
7895 || regno == BASE_REGNUM
7896 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7897 }
7898
7899 /* Generate insn to save registers FIRST to LAST into
7900 the register save area located at offset OFFSET
7901 relative to register BASE. */
7902
7903 static rtx
7904 save_gprs (rtx base, int offset, int first, int last)
7905 {
7906 rtx addr, insn, note;
7907 int i;
7908
7909 addr = plus_constant (Pmode, base, offset);
7910 addr = gen_rtx_MEM (Pmode, addr);
7911
7912 set_mem_alias_set (addr, get_frame_alias_set ());
7913
7914 /* Special-case single register. */
7915 if (first == last)
7916 {
7917 if (TARGET_64BIT)
7918 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7919 else
7920 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7921
7922 if (!global_not_special_regno_p (first))
7923 RTX_FRAME_RELATED_P (insn) = 1;
7924 return insn;
7925 }
7926
7927
7928 insn = gen_store_multiple (addr,
7929 gen_rtx_REG (Pmode, first),
7930 GEN_INT (last - first + 1));
7931
7932 if (first <= 6 && cfun->stdarg)
7933 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7934 {
7935 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7936
7937 if (first + i <= 6)
7938 set_mem_alias_set (mem, get_varargs_alias_set ());
7939 }
7940
7941 /* We need to set the FRAME_RELATED flag on all SETs
7942 inside the store-multiple pattern.
7943
7944 However, we must not emit DWARF records for registers 2..5
7945 if they are stored for use by variable arguments ...
7946
7947 ??? Unfortunately, it is not enough to simply not the
7948 FRAME_RELATED flags for those SETs, because the first SET
7949 of the PARALLEL is always treated as if it had the flag
7950 set, even if it does not. Therefore we emit a new pattern
7951 without those registers as REG_FRAME_RELATED_EXPR note. */
7952
7953 if (first >= 6 && !global_not_special_regno_p (first))
7954 {
7955 rtx pat = PATTERN (insn);
7956
7957 for (i = 0; i < XVECLEN (pat, 0); i++)
7958 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7959 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7960 0, i)))))
7961 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7962
7963 RTX_FRAME_RELATED_P (insn) = 1;
7964 }
7965 else if (last >= 6)
7966 {
7967 int start;
7968
7969 for (start = first >= 6 ? first : 6; start <= last; start++)
7970 if (!global_not_special_regno_p (start))
7971 break;
7972
7973 if (start > last)
7974 return insn;
7975
7976 addr = plus_constant (Pmode, base,
7977 offset + (start - first) * UNITS_PER_LONG);
7978 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7979 gen_rtx_REG (Pmode, start),
7980 GEN_INT (last - start + 1));
7981 note = PATTERN (note);
7982
7983 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7984
7985 for (i = 0; i < XVECLEN (note, 0); i++)
7986 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7987 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7988 0, i)))))
7989 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7990
7991 RTX_FRAME_RELATED_P (insn) = 1;
7992 }
7993
7994 return insn;
7995 }
7996
7997 /* Generate insn to restore registers FIRST to LAST from
7998 the register save area located at offset OFFSET
7999 relative to register BASE. */
8000
8001 static rtx
8002 restore_gprs (rtx base, int offset, int first, int last)
8003 {
8004 rtx addr, insn;
8005
8006 addr = plus_constant (Pmode, base, offset);
8007 addr = gen_rtx_MEM (Pmode, addr);
8008 set_mem_alias_set (addr, get_frame_alias_set ());
8009
8010 /* Special-case single register. */
8011 if (first == last)
8012 {
8013 if (TARGET_64BIT)
8014 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
8015 else
8016 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
8017
8018 return insn;
8019 }
8020
8021 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
8022 addr,
8023 GEN_INT (last - first + 1));
8024 return insn;
8025 }
8026
8027 /* Return insn sequence to load the GOT register. */
8028
8029 static GTY(()) rtx got_symbol;
8030 rtx
8031 s390_load_got (void)
8032 {
8033 rtx insns;
8034
8035 /* We cannot use pic_offset_table_rtx here since we use this
8036 function also for non-pic if __tls_get_offset is called and in
8037 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8038 aren't usable. */
8039 rtx got_rtx = gen_rtx_REG (Pmode, 12);
8040
8041 if (!got_symbol)
8042 {
8043 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8044 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
8045 }
8046
8047 start_sequence ();
8048
8049 if (TARGET_CPU_ZARCH)
8050 {
8051 emit_move_insn (got_rtx, got_symbol);
8052 }
8053 else
8054 {
8055 rtx offset;
8056
8057 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
8058 UNSPEC_LTREL_OFFSET);
8059 offset = gen_rtx_CONST (Pmode, offset);
8060 offset = force_const_mem (Pmode, offset);
8061
8062 emit_move_insn (got_rtx, offset);
8063
8064 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
8065 UNSPEC_LTREL_BASE);
8066 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
8067
8068 emit_move_insn (got_rtx, offset);
8069 }
8070
8071 insns = get_insns ();
8072 end_sequence ();
8073 return insns;
8074 }
8075
8076 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8077 and the change to the stack pointer. */
8078
8079 static void
8080 s390_emit_stack_tie (void)
8081 {
8082 rtx mem = gen_frame_mem (BLKmode,
8083 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8084
8085 emit_insn (gen_stack_tie (mem));
8086 }
8087
8088 /* Expand the prologue into a bunch of separate insns. */
8089
8090 void
8091 s390_emit_prologue (void)
8092 {
8093 rtx insn, addr;
8094 rtx temp_reg;
8095 int i;
8096 int offset;
8097 int next_fpr = 0;
8098
8099 /* Complete frame layout. */
8100
8101 s390_update_frame_layout ();
8102
8103 /* Annotate all constant pool references to let the scheduler know
8104 they implicitly use the base register. */
8105
8106 push_topmost_sequence ();
8107
8108 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8109 if (INSN_P (insn))
8110 {
8111 annotate_constant_pool_refs (&PATTERN (insn));
8112 df_insn_rescan (insn);
8113 }
8114
8115 pop_topmost_sequence ();
8116
8117 /* Choose best register to use for temp use within prologue.
8118 See below for why TPF must use the register 1. */
8119
8120 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8121 && !crtl->is_leaf
8122 && !TARGET_TPF_PROFILING)
8123 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8124 else
8125 temp_reg = gen_rtx_REG (Pmode, 1);
8126
8127 /* Save call saved gprs. */
8128 if (cfun_frame_layout.first_save_gpr != -1)
8129 {
8130 insn = save_gprs (stack_pointer_rtx,
8131 cfun_frame_layout.gprs_offset +
8132 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8133 - cfun_frame_layout.first_save_gpr_slot),
8134 cfun_frame_layout.first_save_gpr,
8135 cfun_frame_layout.last_save_gpr);
8136 emit_insn (insn);
8137 }
8138
8139 /* Dummy insn to mark literal pool slot. */
8140
8141 if (cfun->machine->base_reg)
8142 emit_insn (gen_main_pool (cfun->machine->base_reg));
8143
8144 offset = cfun_frame_layout.f0_offset;
8145
8146 /* Save f0 and f2. */
8147 for (i = 0; i < 2; i++)
8148 {
8149 if (cfun_fpr_bit_p (i))
8150 {
8151 save_fpr (stack_pointer_rtx, offset, i + 16);
8152 offset += 8;
8153 }
8154 else if (!TARGET_PACKED_STACK)
8155 offset += 8;
8156 }
8157
8158 /* Save f4 and f6. */
8159 offset = cfun_frame_layout.f4_offset;
8160 for (i = 2; i < 4; i++)
8161 {
8162 if (cfun_fpr_bit_p (i))
8163 {
8164 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8165 offset += 8;
8166
8167 /* If f4 and f6 are call clobbered they are saved due to stdargs and
8168 therefore are not frame related. */
8169 if (!call_really_used_regs[i + 16])
8170 RTX_FRAME_RELATED_P (insn) = 1;
8171 }
8172 else if (!TARGET_PACKED_STACK)
8173 offset += 8;
8174 }
8175
8176 if (TARGET_PACKED_STACK
8177 && cfun_save_high_fprs_p
8178 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8179 {
8180 offset = (cfun_frame_layout.f8_offset
8181 + (cfun_frame_layout.high_fprs - 1) * 8);
8182
8183 for (i = 15; i > 7 && offset >= 0; i--)
8184 if (cfun_fpr_bit_p (i))
8185 {
8186 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8187
8188 RTX_FRAME_RELATED_P (insn) = 1;
8189 offset -= 8;
8190 }
8191 if (offset >= cfun_frame_layout.f8_offset)
8192 next_fpr = i + 16;
8193 }
8194
8195 if (!TARGET_PACKED_STACK)
8196 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8197
8198 if (flag_stack_usage_info)
8199 current_function_static_stack_size = cfun_frame_layout.frame_size;
8200
8201 /* Decrement stack pointer. */
8202
8203 if (cfun_frame_layout.frame_size > 0)
8204 {
8205 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8206 rtx real_frame_off;
8207
8208 if (s390_stack_size)
8209 {
8210 HOST_WIDE_INT stack_guard;
8211
8212 if (s390_stack_guard)
8213 stack_guard = s390_stack_guard;
8214 else
8215 {
8216 /* If no value for stack guard is provided the smallest power of 2
8217 larger than the current frame size is chosen. */
8218 stack_guard = 1;
8219 while (stack_guard < cfun_frame_layout.frame_size)
8220 stack_guard <<= 1;
8221 }
8222
8223 if (cfun_frame_layout.frame_size >= s390_stack_size)
8224 {
8225 warning (0, "frame size of function %qs is %wd"
8226 " bytes exceeding user provided stack limit of "
8227 "%d bytes. "
8228 "An unconditional trap is added.",
8229 current_function_name(), cfun_frame_layout.frame_size,
8230 s390_stack_size);
8231 emit_insn (gen_trap ());
8232 }
8233 else
8234 {
8235 /* stack_guard has to be smaller than s390_stack_size.
8236 Otherwise we would emit an AND with zero which would
8237 not match the test under mask pattern. */
8238 if (stack_guard >= s390_stack_size)
8239 {
8240 warning (0, "frame size of function %qs is %wd"
8241 " bytes which is more than half the stack size. "
8242 "The dynamic check would not be reliable. "
8243 "No check emitted for this function.",
8244 current_function_name(),
8245 cfun_frame_layout.frame_size);
8246 }
8247 else
8248 {
8249 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8250 & ~(stack_guard - 1));
8251
8252 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8253 GEN_INT (stack_check_mask));
8254 if (TARGET_64BIT)
8255 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8256 t, const0_rtx),
8257 t, const0_rtx, const0_rtx));
8258 else
8259 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8260 t, const0_rtx),
8261 t, const0_rtx, const0_rtx));
8262 }
8263 }
8264 }
8265
8266 if (s390_warn_framesize > 0
8267 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8268 warning (0, "frame size of %qs is %wd bytes",
8269 current_function_name (), cfun_frame_layout.frame_size);
8270
8271 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8272 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8273
8274 /* Save incoming stack pointer into temp reg. */
8275 if (TARGET_BACKCHAIN || next_fpr)
8276 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8277
8278 /* Subtract frame size from stack pointer. */
8279
8280 if (DISP_IN_RANGE (INTVAL (frame_off)))
8281 {
8282 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8283 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8284 frame_off));
8285 insn = emit_insn (insn);
8286 }
8287 else
8288 {
8289 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8290 frame_off = force_const_mem (Pmode, frame_off);
8291
8292 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8293 annotate_constant_pool_refs (&PATTERN (insn));
8294 }
8295
8296 RTX_FRAME_RELATED_P (insn) = 1;
8297 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8298 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8299 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8300 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8301 real_frame_off)));
8302
8303 /* Set backchain. */
8304
8305 if (TARGET_BACKCHAIN)
8306 {
8307 if (cfun_frame_layout.backchain_offset)
8308 addr = gen_rtx_MEM (Pmode,
8309 plus_constant (Pmode, stack_pointer_rtx,
8310 cfun_frame_layout.backchain_offset));
8311 else
8312 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8313 set_mem_alias_set (addr, get_frame_alias_set ());
8314 insn = emit_insn (gen_move_insn (addr, temp_reg));
8315 }
8316
8317 /* If we support non-call exceptions (e.g. for Java),
8318 we need to make sure the backchain pointer is set up
8319 before any possibly trapping memory access. */
8320 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8321 {
8322 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8323 emit_clobber (addr);
8324 }
8325 }
8326
8327 /* Save fprs 8 - 15 (64 bit ABI). */
8328
8329 if (cfun_save_high_fprs_p && next_fpr)
8330 {
8331 /* If the stack might be accessed through a different register
8332 we have to make sure that the stack pointer decrement is not
8333 moved below the use of the stack slots. */
8334 s390_emit_stack_tie ();
8335
8336 insn = emit_insn (gen_add2_insn (temp_reg,
8337 GEN_INT (cfun_frame_layout.f8_offset)));
8338
8339 offset = 0;
8340
8341 for (i = 24; i <= next_fpr; i++)
8342 if (cfun_fpr_bit_p (i - 16))
8343 {
8344 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
8345 cfun_frame_layout.frame_size
8346 + cfun_frame_layout.f8_offset
8347 + offset);
8348
8349 insn = save_fpr (temp_reg, offset, i);
8350 offset += 8;
8351 RTX_FRAME_RELATED_P (insn) = 1;
8352 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8353 gen_rtx_SET (VOIDmode,
8354 gen_rtx_MEM (DFmode, addr),
8355 gen_rtx_REG (DFmode, i)));
8356 }
8357 }
8358
8359 /* Set frame pointer, if needed. */
8360
8361 if (frame_pointer_needed)
8362 {
8363 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8364 RTX_FRAME_RELATED_P (insn) = 1;
8365 }
8366
8367 /* Set up got pointer, if needed. */
8368
8369 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8370 {
8371 rtx insns = s390_load_got ();
8372
8373 for (insn = insns; insn; insn = NEXT_INSN (insn))
8374 annotate_constant_pool_refs (&PATTERN (insn));
8375
8376 emit_insn (insns);
8377 }
8378
8379 if (TARGET_TPF_PROFILING)
8380 {
8381 /* Generate a BAS instruction to serve as a function
8382 entry intercept to facilitate the use of tracing
8383 algorithms located at the branch target. */
8384 emit_insn (gen_prologue_tpf ());
8385
8386 /* Emit a blockage here so that all code
8387 lies between the profiling mechanisms. */
8388 emit_insn (gen_blockage ());
8389 }
8390 }
8391
8392 /* Expand the epilogue into a bunch of separate insns. */
8393
8394 void
8395 s390_emit_epilogue (bool sibcall)
8396 {
8397 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8398 int area_bottom, area_top, offset = 0;
8399 int next_offset;
8400 rtvec p;
8401 int i;
8402
8403 if (TARGET_TPF_PROFILING)
8404 {
8405
8406 /* Generate a BAS instruction to serve as a function
8407 entry intercept to facilitate the use of tracing
8408 algorithms located at the branch target. */
8409
8410 /* Emit a blockage here so that all code
8411 lies between the profiling mechanisms. */
8412 emit_insn (gen_blockage ());
8413
8414 emit_insn (gen_epilogue_tpf ());
8415 }
8416
8417 /* Check whether to use frame or stack pointer for restore. */
8418
8419 frame_pointer = (frame_pointer_needed
8420 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8421
8422 s390_frame_area (&area_bottom, &area_top);
8423
8424 /* Check whether we can access the register save area.
8425 If not, increment the frame pointer as required. */
8426
8427 if (area_top <= area_bottom)
8428 {
8429 /* Nothing to restore. */
8430 }
8431 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8432 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8433 {
8434 /* Area is in range. */
8435 offset = cfun_frame_layout.frame_size;
8436 }
8437 else
8438 {
8439 rtx insn, frame_off, cfa;
8440
8441 offset = area_bottom < 0 ? -area_bottom : 0;
8442 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8443
8444 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8445 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8446 if (DISP_IN_RANGE (INTVAL (frame_off)))
8447 {
8448 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8449 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8450 insn = emit_insn (insn);
8451 }
8452 else
8453 {
8454 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8455 frame_off = force_const_mem (Pmode, frame_off);
8456
8457 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8458 annotate_constant_pool_refs (&PATTERN (insn));
8459 }
8460 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8461 RTX_FRAME_RELATED_P (insn) = 1;
8462 }
8463
8464 /* Restore call saved fprs. */
8465
8466 if (TARGET_64BIT)
8467 {
8468 if (cfun_save_high_fprs_p)
8469 {
8470 next_offset = cfun_frame_layout.f8_offset;
8471 for (i = 24; i < 32; i++)
8472 {
8473 if (cfun_fpr_bit_p (i - 16))
8474 {
8475 restore_fpr (frame_pointer,
8476 offset + next_offset, i);
8477 cfa_restores
8478 = alloc_reg_note (REG_CFA_RESTORE,
8479 gen_rtx_REG (DFmode, i), cfa_restores);
8480 next_offset += 8;
8481 }
8482 }
8483 }
8484
8485 }
8486 else
8487 {
8488 next_offset = cfun_frame_layout.f4_offset;
8489 for (i = 18; i < 20; i++)
8490 {
8491 if (cfun_fpr_bit_p (i - 16))
8492 {
8493 restore_fpr (frame_pointer,
8494 offset + next_offset, i);
8495 cfa_restores
8496 = alloc_reg_note (REG_CFA_RESTORE,
8497 gen_rtx_REG (DFmode, i), cfa_restores);
8498 next_offset += 8;
8499 }
8500 else if (!TARGET_PACKED_STACK)
8501 next_offset += 8;
8502 }
8503
8504 }
8505
8506 /* Return register. */
8507
8508 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8509
8510 /* Restore call saved gprs. */
8511
8512 if (cfun_frame_layout.first_restore_gpr != -1)
8513 {
8514 rtx insn, addr;
8515 int i;
8516
8517 /* Check for global register and save them
8518 to stack location from where they get restored. */
8519
8520 for (i = cfun_frame_layout.first_restore_gpr;
8521 i <= cfun_frame_layout.last_restore_gpr;
8522 i++)
8523 {
8524 if (global_not_special_regno_p (i))
8525 {
8526 addr = plus_constant (Pmode, frame_pointer,
8527 offset + cfun_frame_layout.gprs_offset
8528 + (i - cfun_frame_layout.first_save_gpr_slot)
8529 * UNITS_PER_LONG);
8530 addr = gen_rtx_MEM (Pmode, addr);
8531 set_mem_alias_set (addr, get_frame_alias_set ());
8532 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8533 }
8534 else
8535 cfa_restores
8536 = alloc_reg_note (REG_CFA_RESTORE,
8537 gen_rtx_REG (Pmode, i), cfa_restores);
8538 }
8539
8540 if (! sibcall)
8541 {
8542 /* Fetch return address from stack before load multiple,
8543 this will do good for scheduling. */
8544
8545 if (cfun_frame_layout.save_return_addr_p
8546 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8547 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8548 {
8549 int return_regnum = find_unused_clobbered_reg();
8550 if (!return_regnum)
8551 return_regnum = 4;
8552 return_reg = gen_rtx_REG (Pmode, return_regnum);
8553
8554 addr = plus_constant (Pmode, frame_pointer,
8555 offset + cfun_frame_layout.gprs_offset
8556 + (RETURN_REGNUM
8557 - cfun_frame_layout.first_save_gpr_slot)
8558 * UNITS_PER_LONG);
8559 addr = gen_rtx_MEM (Pmode, addr);
8560 set_mem_alias_set (addr, get_frame_alias_set ());
8561 emit_move_insn (return_reg, addr);
8562 }
8563 }
8564
8565 insn = restore_gprs (frame_pointer,
8566 offset + cfun_frame_layout.gprs_offset
8567 + (cfun_frame_layout.first_restore_gpr
8568 - cfun_frame_layout.first_save_gpr_slot)
8569 * UNITS_PER_LONG,
8570 cfun_frame_layout.first_restore_gpr,
8571 cfun_frame_layout.last_restore_gpr);
8572 insn = emit_insn (insn);
8573 REG_NOTES (insn) = cfa_restores;
8574 add_reg_note (insn, REG_CFA_DEF_CFA,
8575 plus_constant (Pmode, stack_pointer_rtx,
8576 STACK_POINTER_OFFSET));
8577 RTX_FRAME_RELATED_P (insn) = 1;
8578 }
8579
8580 if (! sibcall)
8581 {
8582
8583 /* Return to caller. */
8584
8585 p = rtvec_alloc (2);
8586
8587 RTVEC_ELT (p, 0) = ret_rtx;
8588 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8589 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8590 }
8591 }
8592
8593
8594 /* Return the size in bytes of a function argument of
8595 type TYPE and/or mode MODE. At least one of TYPE or
8596 MODE must be specified. */
8597
8598 static int
8599 s390_function_arg_size (enum machine_mode mode, const_tree type)
8600 {
8601 if (type)
8602 return int_size_in_bytes (type);
8603
8604 /* No type info available for some library calls ... */
8605 if (mode != BLKmode)
8606 return GET_MODE_SIZE (mode);
8607
8608 /* If we have neither type nor mode, abort */
8609 gcc_unreachable ();
8610 }
8611
8612 /* Return true if a function argument of type TYPE and mode MODE
8613 is to be passed in a floating-point register, if available. */
8614
8615 static bool
8616 s390_function_arg_float (enum machine_mode mode, const_tree type)
8617 {
8618 int size = s390_function_arg_size (mode, type);
8619 if (size > 8)
8620 return false;
8621
8622 /* Soft-float changes the ABI: no floating-point registers are used. */
8623 if (TARGET_SOFT_FLOAT)
8624 return false;
8625
8626 /* No type info available for some library calls ... */
8627 if (!type)
8628 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8629
8630 /* The ABI says that record types with a single member are treated
8631 just like that member would be. */
8632 while (TREE_CODE (type) == RECORD_TYPE)
8633 {
8634 tree field, single = NULL_TREE;
8635
8636 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8637 {
8638 if (TREE_CODE (field) != FIELD_DECL)
8639 continue;
8640
8641 if (single == NULL_TREE)
8642 single = TREE_TYPE (field);
8643 else
8644 return false;
8645 }
8646
8647 if (single == NULL_TREE)
8648 return false;
8649 else
8650 type = single;
8651 }
8652
8653 return TREE_CODE (type) == REAL_TYPE;
8654 }
8655
8656 /* Return true if a function argument of type TYPE and mode MODE
8657 is to be passed in an integer register, or a pair of integer
8658 registers, if available. */
8659
8660 static bool
8661 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8662 {
8663 int size = s390_function_arg_size (mode, type);
8664 if (size > 8)
8665 return false;
8666
8667 /* No type info available for some library calls ... */
8668 if (!type)
8669 return GET_MODE_CLASS (mode) == MODE_INT
8670 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8671
8672 /* We accept small integral (and similar) types. */
8673 if (INTEGRAL_TYPE_P (type)
8674 || POINTER_TYPE_P (type)
8675 || TREE_CODE (type) == NULLPTR_TYPE
8676 || TREE_CODE (type) == OFFSET_TYPE
8677 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8678 return true;
8679
8680 /* We also accept structs of size 1, 2, 4, 8 that are not
8681 passed in floating-point registers. */
8682 if (AGGREGATE_TYPE_P (type)
8683 && exact_log2 (size) >= 0
8684 && !s390_function_arg_float (mode, type))
8685 return true;
8686
8687 return false;
8688 }
8689
8690 /* Return 1 if a function argument of type TYPE and mode MODE
8691 is to be passed by reference. The ABI specifies that only
8692 structures of size 1, 2, 4, or 8 bytes are passed by value,
8693 all other structures (and complex numbers) are passed by
8694 reference. */
8695
8696 static bool
8697 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
8698 enum machine_mode mode, const_tree type,
8699 bool named ATTRIBUTE_UNUSED)
8700 {
8701 int size = s390_function_arg_size (mode, type);
8702 if (size > 8)
8703 return true;
8704
8705 if (type)
8706 {
8707 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8708 return 1;
8709
8710 if (TREE_CODE (type) == COMPLEX_TYPE
8711 || TREE_CODE (type) == VECTOR_TYPE)
8712 return 1;
8713 }
8714
8715 return 0;
8716 }
8717
8718 /* Update the data in CUM to advance over an argument of mode MODE and
8719 data type TYPE. (TYPE is null for libcalls where that information
8720 may not be available.). The boolean NAMED specifies whether the
8721 argument is a named argument (as opposed to an unnamed argument
8722 matching an ellipsis). */
8723
8724 static void
8725 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
8726 const_tree type, bool named ATTRIBUTE_UNUSED)
8727 {
8728 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8729
8730 if (s390_function_arg_float (mode, type))
8731 {
8732 cum->fprs += 1;
8733 }
8734 else if (s390_function_arg_integer (mode, type))
8735 {
8736 int size = s390_function_arg_size (mode, type);
8737 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8738 }
8739 else
8740 gcc_unreachable ();
8741 }
8742
8743 /* Define where to put the arguments to a function.
8744 Value is zero to push the argument on the stack,
8745 or a hard register in which to store the argument.
8746
8747 MODE is the argument's machine mode.
8748 TYPE is the data type of the argument (as a tree).
8749 This is null for libcalls where that information may
8750 not be available.
8751 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8752 the preceding args and about the function being called.
8753 NAMED is nonzero if this argument is a named parameter
8754 (otherwise it is an extra parameter matching an ellipsis).
8755
8756 On S/390, we use general purpose registers 2 through 6 to
8757 pass integer, pointer, and certain structure arguments, and
8758 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8759 to pass floating point arguments. All remaining arguments
8760 are pushed to the stack. */
8761
8762 static rtx
8763 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
8764 const_tree type, bool named ATTRIBUTE_UNUSED)
8765 {
8766 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
8767
8768 if (s390_function_arg_float (mode, type))
8769 {
8770 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8771 return 0;
8772 else
8773 return gen_rtx_REG (mode, cum->fprs + 16);
8774 }
8775 else if (s390_function_arg_integer (mode, type))
8776 {
8777 int size = s390_function_arg_size (mode, type);
8778 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8779
8780 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8781 return 0;
8782 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8783 return gen_rtx_REG (mode, cum->gprs + 2);
8784 else if (n_gprs == 2)
8785 {
8786 rtvec p = rtvec_alloc (2);
8787
8788 RTVEC_ELT (p, 0)
8789 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8790 const0_rtx);
8791 RTVEC_ELT (p, 1)
8792 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8793 GEN_INT (4));
8794
8795 return gen_rtx_PARALLEL (mode, p);
8796 }
8797 }
8798
8799 /* After the real arguments, expand_call calls us once again
8800 with a void_type_node type. Whatever we return here is
8801 passed as operand 2 to the call expanders.
8802
8803 We don't need this feature ... */
8804 else if (type == void_type_node)
8805 return const0_rtx;
8806
8807 gcc_unreachable ();
8808 }
8809
8810 /* Return true if return values of type TYPE should be returned
8811 in a memory buffer whose address is passed by the caller as
8812 hidden first argument. */
8813
8814 static bool
8815 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8816 {
8817 /* We accept small integral (and similar) types. */
8818 if (INTEGRAL_TYPE_P (type)
8819 || POINTER_TYPE_P (type)
8820 || TREE_CODE (type) == OFFSET_TYPE
8821 || TREE_CODE (type) == REAL_TYPE)
8822 return int_size_in_bytes (type) > 8;
8823
8824 /* Aggregates and similar constructs are always returned
8825 in memory. */
8826 if (AGGREGATE_TYPE_P (type)
8827 || TREE_CODE (type) == COMPLEX_TYPE
8828 || TREE_CODE (type) == VECTOR_TYPE)
8829 return true;
8830
8831 /* ??? We get called on all sorts of random stuff from
8832 aggregate_value_p. We can't abort, but it's not clear
8833 what's safe to return. Pretend it's a struct I guess. */
8834 return true;
8835 }
8836
8837 /* Function arguments and return values are promoted to word size. */
8838
8839 static enum machine_mode
8840 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8841 int *punsignedp,
8842 const_tree fntype ATTRIBUTE_UNUSED,
8843 int for_return ATTRIBUTE_UNUSED)
8844 {
8845 if (INTEGRAL_MODE_P (mode)
8846 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8847 {
8848 if (type != NULL_TREE && POINTER_TYPE_P (type))
8849 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8850 return Pmode;
8851 }
8852
8853 return mode;
8854 }
8855
8856 /* Define where to return a (scalar) value of type RET_TYPE.
8857 If RET_TYPE is null, define where to return a (scalar)
8858 value of mode MODE from a libcall. */
8859
8860 static rtx
8861 s390_function_and_libcall_value (enum machine_mode mode,
8862 const_tree ret_type,
8863 const_tree fntype_or_decl,
8864 bool outgoing ATTRIBUTE_UNUSED)
8865 {
8866 /* For normal functions perform the promotion as
8867 promote_function_mode would do. */
8868 if (ret_type)
8869 {
8870 int unsignedp = TYPE_UNSIGNED (ret_type);
8871 mode = promote_function_mode (ret_type, mode, &unsignedp,
8872 fntype_or_decl, 1);
8873 }
8874
8875 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8876 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8877
8878 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8879 return gen_rtx_REG (mode, 16);
8880 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8881 || UNITS_PER_LONG == UNITS_PER_WORD)
8882 return gen_rtx_REG (mode, 2);
8883 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8884 {
8885 /* This case is triggered when returning a 64 bit value with
8886 -m31 -mzarch. Although the value would fit into a single
8887 register it has to be forced into a 32 bit register pair in
8888 order to match the ABI. */
8889 rtvec p = rtvec_alloc (2);
8890
8891 RTVEC_ELT (p, 0)
8892 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8893 RTVEC_ELT (p, 1)
8894 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8895
8896 return gen_rtx_PARALLEL (mode, p);
8897 }
8898
8899 gcc_unreachable ();
8900 }
8901
8902 /* Define where to return a scalar return value of type RET_TYPE. */
8903
8904 static rtx
8905 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
8906 bool outgoing)
8907 {
8908 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
8909 fn_decl_or_type, outgoing);
8910 }
8911
8912 /* Define where to return a scalar libcall return value of mode
8913 MODE. */
8914
8915 static rtx
8916 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
8917 {
8918 return s390_function_and_libcall_value (mode, NULL_TREE,
8919 NULL_TREE, true);
8920 }
8921
8922
8923 /* Create and return the va_list datatype.
8924
8925 On S/390, va_list is an array type equivalent to
8926
8927 typedef struct __va_list_tag
8928 {
8929 long __gpr;
8930 long __fpr;
8931 void *__overflow_arg_area;
8932 void *__reg_save_area;
8933 } va_list[1];
8934
8935 where __gpr and __fpr hold the number of general purpose
8936 or floating point arguments used up to now, respectively,
8937 __overflow_arg_area points to the stack location of the
8938 next argument passed on the stack, and __reg_save_area
8939 always points to the start of the register area in the
8940 call frame of the current function. The function prologue
8941 saves all registers used for argument passing into this
8942 area if the function uses variable arguments. */
8943
8944 static tree
8945 s390_build_builtin_va_list (void)
8946 {
8947 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8948
8949 record = lang_hooks.types.make_type (RECORD_TYPE);
8950
8951 type_decl =
8952 build_decl (BUILTINS_LOCATION,
8953 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8954
8955 f_gpr = build_decl (BUILTINS_LOCATION,
8956 FIELD_DECL, get_identifier ("__gpr"),
8957 long_integer_type_node);
8958 f_fpr = build_decl (BUILTINS_LOCATION,
8959 FIELD_DECL, get_identifier ("__fpr"),
8960 long_integer_type_node);
8961 f_ovf = build_decl (BUILTINS_LOCATION,
8962 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8963 ptr_type_node);
8964 f_sav = build_decl (BUILTINS_LOCATION,
8965 FIELD_DECL, get_identifier ("__reg_save_area"),
8966 ptr_type_node);
8967
8968 va_list_gpr_counter_field = f_gpr;
8969 va_list_fpr_counter_field = f_fpr;
8970
8971 DECL_FIELD_CONTEXT (f_gpr) = record;
8972 DECL_FIELD_CONTEXT (f_fpr) = record;
8973 DECL_FIELD_CONTEXT (f_ovf) = record;
8974 DECL_FIELD_CONTEXT (f_sav) = record;
8975
8976 TYPE_STUB_DECL (record) = type_decl;
8977 TYPE_NAME (record) = type_decl;
8978 TYPE_FIELDS (record) = f_gpr;
8979 DECL_CHAIN (f_gpr) = f_fpr;
8980 DECL_CHAIN (f_fpr) = f_ovf;
8981 DECL_CHAIN (f_ovf) = f_sav;
8982
8983 layout_type (record);
8984
8985 /* The correct type is an array type of one element. */
8986 return build_array_type (record, build_index_type (size_zero_node));
8987 }
8988
8989 /* Implement va_start by filling the va_list structure VALIST.
8990 STDARG_P is always true, and ignored.
8991 NEXTARG points to the first anonymous stack argument.
8992
8993 The following global variables are used to initialize
8994 the va_list structure:
8995
8996 crtl->args.info:
8997 holds number of gprs and fprs used for named arguments.
8998 crtl->args.arg_offset_rtx:
8999 holds the offset of the first anonymous stack argument
9000 (relative to the virtual arg pointer). */
9001
9002 static void
9003 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
9004 {
9005 HOST_WIDE_INT n_gpr, n_fpr;
9006 int off;
9007 tree f_gpr, f_fpr, f_ovf, f_sav;
9008 tree gpr, fpr, ovf, sav, t;
9009
9010 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9011 f_fpr = DECL_CHAIN (f_gpr);
9012 f_ovf = DECL_CHAIN (f_fpr);
9013 f_sav = DECL_CHAIN (f_ovf);
9014
9015 valist = build_simple_mem_ref (valist);
9016 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9017 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9018 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9019 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9020
9021 /* Count number of gp and fp argument registers used. */
9022
9023 n_gpr = crtl->args.info.gprs;
9024 n_fpr = crtl->args.info.fprs;
9025
9026 if (cfun->va_list_gpr_size)
9027 {
9028 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9029 build_int_cst (NULL_TREE, n_gpr));
9030 TREE_SIDE_EFFECTS (t) = 1;
9031 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9032 }
9033
9034 if (cfun->va_list_fpr_size)
9035 {
9036 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9037 build_int_cst (NULL_TREE, n_fpr));
9038 TREE_SIDE_EFFECTS (t) = 1;
9039 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9040 }
9041
9042 /* Find the overflow area. */
9043 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
9044 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
9045 {
9046 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9047
9048 off = INTVAL (crtl->args.arg_offset_rtx);
9049 off = off < 0 ? 0 : off;
9050 if (TARGET_DEBUG_ARG)
9051 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9052 (int)n_gpr, (int)n_fpr, off);
9053
9054 t = fold_build_pointer_plus_hwi (t, off);
9055
9056 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9057 TREE_SIDE_EFFECTS (t) = 1;
9058 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9059 }
9060
9061 /* Find the register save area. */
9062 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
9063 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
9064 {
9065 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
9066 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
9067
9068 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9069 TREE_SIDE_EFFECTS (t) = 1;
9070 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9071 }
9072 }
9073
9074 /* Implement va_arg by updating the va_list structure
9075 VALIST as required to retrieve an argument of type
9076 TYPE, and returning that argument.
9077
9078 Generates code equivalent to:
9079
9080 if (integral value) {
9081 if (size <= 4 && args.gpr < 5 ||
9082 size > 4 && args.gpr < 4 )
9083 ret = args.reg_save_area[args.gpr+8]
9084 else
9085 ret = *args.overflow_arg_area++;
9086 } else if (float value) {
9087 if (args.fgpr < 2)
9088 ret = args.reg_save_area[args.fpr+64]
9089 else
9090 ret = *args.overflow_arg_area++;
9091 } else if (aggregate value) {
9092 if (args.gpr < 5)
9093 ret = *args.reg_save_area[args.gpr]
9094 else
9095 ret = **args.overflow_arg_area++;
9096 } */
9097
9098 static tree
9099 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9100 gimple_seq *post_p ATTRIBUTE_UNUSED)
9101 {
9102 tree f_gpr, f_fpr, f_ovf, f_sav;
9103 tree gpr, fpr, ovf, sav, reg, t, u;
9104 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9105 tree lab_false, lab_over, addr;
9106
9107 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9108 f_fpr = DECL_CHAIN (f_gpr);
9109 f_ovf = DECL_CHAIN (f_fpr);
9110 f_sav = DECL_CHAIN (f_ovf);
9111
9112 valist = build_va_arg_indirect_ref (valist);
9113 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9114 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9115 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9116
9117 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9118 both appear on a lhs. */
9119 valist = unshare_expr (valist);
9120 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9121
9122 size = int_size_in_bytes (type);
9123
9124 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9125 {
9126 if (TARGET_DEBUG_ARG)
9127 {
9128 fprintf (stderr, "va_arg: aggregate type");
9129 debug_tree (type);
9130 }
9131
9132 /* Aggregates are passed by reference. */
9133 indirect_p = 1;
9134 reg = gpr;
9135 n_reg = 1;
9136
9137 /* kernel stack layout on 31 bit: It is assumed here that no padding
9138 will be added by s390_frame_info because for va_args always an even
9139 number of gprs has to be saved r15-r2 = 14 regs. */
9140 sav_ofs = 2 * UNITS_PER_LONG;
9141 sav_scale = UNITS_PER_LONG;
9142 size = UNITS_PER_LONG;
9143 max_reg = GP_ARG_NUM_REG - n_reg;
9144 }
9145 else if (s390_function_arg_float (TYPE_MODE (type), type))
9146 {
9147 if (TARGET_DEBUG_ARG)
9148 {
9149 fprintf (stderr, "va_arg: float type");
9150 debug_tree (type);
9151 }
9152
9153 /* FP args go in FP registers, if present. */
9154 indirect_p = 0;
9155 reg = fpr;
9156 n_reg = 1;
9157 sav_ofs = 16 * UNITS_PER_LONG;
9158 sav_scale = 8;
9159 max_reg = FP_ARG_NUM_REG - n_reg;
9160 }
9161 else
9162 {
9163 if (TARGET_DEBUG_ARG)
9164 {
9165 fprintf (stderr, "va_arg: other type");
9166 debug_tree (type);
9167 }
9168
9169 /* Otherwise into GP registers. */
9170 indirect_p = 0;
9171 reg = gpr;
9172 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9173
9174 /* kernel stack layout on 31 bit: It is assumed here that no padding
9175 will be added by s390_frame_info because for va_args always an even
9176 number of gprs has to be saved r15-r2 = 14 regs. */
9177 sav_ofs = 2 * UNITS_PER_LONG;
9178
9179 if (size < UNITS_PER_LONG)
9180 sav_ofs += UNITS_PER_LONG - size;
9181
9182 sav_scale = UNITS_PER_LONG;
9183 max_reg = GP_ARG_NUM_REG - n_reg;
9184 }
9185
9186 /* Pull the value out of the saved registers ... */
9187
9188 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9189 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9190 addr = create_tmp_var (ptr_type_node, "addr");
9191
9192 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9193 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9194 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9195 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9196 gimplify_and_add (t, pre_p);
9197
9198 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9199 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9200 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9201 t = fold_build_pointer_plus (t, u);
9202
9203 gimplify_assign (addr, t, pre_p);
9204
9205 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9206
9207 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9208
9209
9210 /* ... Otherwise out of the overflow area. */
9211
9212 t = ovf;
9213 if (size < UNITS_PER_LONG)
9214 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9215
9216 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9217
9218 gimplify_assign (addr, t, pre_p);
9219
9220 t = fold_build_pointer_plus_hwi (t, size);
9221 gimplify_assign (ovf, t, pre_p);
9222
9223 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9224
9225
9226 /* Increment register save count. */
9227
9228 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9229 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9230 gimplify_and_add (u, pre_p);
9231
9232 if (indirect_p)
9233 {
9234 t = build_pointer_type_for_mode (build_pointer_type (type),
9235 ptr_mode, true);
9236 addr = fold_convert (t, addr);
9237 addr = build_va_arg_indirect_ref (addr);
9238 }
9239 else
9240 {
9241 t = build_pointer_type_for_mode (type, ptr_mode, true);
9242 addr = fold_convert (t, addr);
9243 }
9244
9245 return build_va_arg_indirect_ref (addr);
9246 }
9247
9248
9249 /* Builtins. */
9250
9251 enum s390_builtin
9252 {
9253 S390_BUILTIN_THREAD_POINTER,
9254 S390_BUILTIN_SET_THREAD_POINTER,
9255
9256 S390_BUILTIN_max
9257 };
9258
9259 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9260 CODE_FOR_get_tp_64,
9261 CODE_FOR_set_tp_64
9262 };
9263
9264 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9265 CODE_FOR_get_tp_31,
9266 CODE_FOR_set_tp_31
9267 };
9268
9269 static void
9270 s390_init_builtins (void)
9271 {
9272 tree ftype;
9273
9274 ftype = build_function_type_list (ptr_type_node, NULL_TREE);
9275 add_builtin_function ("__builtin_thread_pointer", ftype,
9276 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9277 NULL, NULL_TREE);
9278
9279 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9280 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9281 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9282 NULL, NULL_TREE);
9283 }
9284
9285 /* Expand an expression EXP that calls a built-in function,
9286 with result going to TARGET if that's convenient
9287 (and in mode MODE if that's convenient).
9288 SUBTARGET may be used as the target for computing one of EXP's operands.
9289 IGNORE is nonzero if the value is to be ignored. */
9290
9291 static rtx
9292 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9293 enum machine_mode mode ATTRIBUTE_UNUSED,
9294 int ignore ATTRIBUTE_UNUSED)
9295 {
9296 #define MAX_ARGS 2
9297
9298 enum insn_code const *code_for_builtin =
9299 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9300
9301 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9302 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9303 enum insn_code icode;
9304 rtx op[MAX_ARGS], pat;
9305 int arity;
9306 bool nonvoid;
9307 tree arg;
9308 call_expr_arg_iterator iter;
9309
9310 if (fcode >= S390_BUILTIN_max)
9311 internal_error ("bad builtin fcode");
9312 icode = code_for_builtin[fcode];
9313 if (icode == 0)
9314 internal_error ("bad builtin fcode");
9315
9316 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9317
9318 arity = 0;
9319 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9320 {
9321 const struct insn_operand_data *insn_op;
9322
9323 if (arg == error_mark_node)
9324 return NULL_RTX;
9325 if (arity > MAX_ARGS)
9326 return NULL_RTX;
9327
9328 insn_op = &insn_data[icode].operand[arity + nonvoid];
9329
9330 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9331
9332 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9333 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9334 arity++;
9335 }
9336
9337 if (nonvoid)
9338 {
9339 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9340 if (!target
9341 || GET_MODE (target) != tmode
9342 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9343 target = gen_reg_rtx (tmode);
9344 }
9345
9346 switch (arity)
9347 {
9348 case 0:
9349 pat = GEN_FCN (icode) (target);
9350 break;
9351 case 1:
9352 if (nonvoid)
9353 pat = GEN_FCN (icode) (target, op[0]);
9354 else
9355 pat = GEN_FCN (icode) (op[0]);
9356 break;
9357 case 2:
9358 pat = GEN_FCN (icode) (target, op[0], op[1]);
9359 break;
9360 default:
9361 gcc_unreachable ();
9362 }
9363 if (!pat)
9364 return NULL_RTX;
9365 emit_insn (pat);
9366
9367 if (nonvoid)
9368 return target;
9369 else
9370 return const0_rtx;
9371 }
9372
9373
9374 /* Output assembly code for the trampoline template to
9375 stdio stream FILE.
9376
9377 On S/390, we use gpr 1 internally in the trampoline code;
9378 gpr 0 is used to hold the static chain. */
9379
9380 static void
9381 s390_asm_trampoline_template (FILE *file)
9382 {
9383 rtx op[2];
9384 op[0] = gen_rtx_REG (Pmode, 0);
9385 op[1] = gen_rtx_REG (Pmode, 1);
9386
9387 if (TARGET_64BIT)
9388 {
9389 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9390 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9391 output_asm_insn ("br\t%1", op); /* 2 byte */
9392 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9393 }
9394 else
9395 {
9396 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9397 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9398 output_asm_insn ("br\t%1", op); /* 2 byte */
9399 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9400 }
9401 }
9402
9403 /* Emit RTL insns to initialize the variable parts of a trampoline.
9404 FNADDR is an RTX for the address of the function's pure code.
9405 CXT is an RTX for the static chain value for the function. */
9406
9407 static void
9408 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9409 {
9410 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9411 rtx mem;
9412
9413 emit_block_move (m_tramp, assemble_trampoline_template (),
9414 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9415
9416 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9417 emit_move_insn (mem, cxt);
9418 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9419 emit_move_insn (mem, fnaddr);
9420 }
9421
9422 /* Output assembler code to FILE to increment profiler label # LABELNO
9423 for profiling a function entry. */
9424
9425 void
9426 s390_function_profiler (FILE *file, int labelno)
9427 {
9428 rtx op[7];
9429
9430 char label[128];
9431 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9432
9433 fprintf (file, "# function profiler \n");
9434
9435 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9436 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9437 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
9438
9439 op[2] = gen_rtx_REG (Pmode, 1);
9440 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9441 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9442
9443 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9444 if (flag_pic)
9445 {
9446 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9447 op[4] = gen_rtx_CONST (Pmode, op[4]);
9448 }
9449
9450 if (TARGET_64BIT)
9451 {
9452 output_asm_insn ("stg\t%0,%1", op);
9453 output_asm_insn ("larl\t%2,%3", op);
9454 output_asm_insn ("brasl\t%0,%4", op);
9455 output_asm_insn ("lg\t%0,%1", op);
9456 }
9457 else if (!flag_pic)
9458 {
9459 op[6] = gen_label_rtx ();
9460
9461 output_asm_insn ("st\t%0,%1", op);
9462 output_asm_insn ("bras\t%2,%l6", op);
9463 output_asm_insn (".long\t%4", op);
9464 output_asm_insn (".long\t%3", op);
9465 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9466 output_asm_insn ("l\t%0,0(%2)", op);
9467 output_asm_insn ("l\t%2,4(%2)", op);
9468 output_asm_insn ("basr\t%0,%0", op);
9469 output_asm_insn ("l\t%0,%1", op);
9470 }
9471 else
9472 {
9473 op[5] = gen_label_rtx ();
9474 op[6] = gen_label_rtx ();
9475
9476 output_asm_insn ("st\t%0,%1", op);
9477 output_asm_insn ("bras\t%2,%l6", op);
9478 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9479 output_asm_insn (".long\t%4-%l5", op);
9480 output_asm_insn (".long\t%3-%l5", op);
9481 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9482 output_asm_insn ("lr\t%0,%2", op);
9483 output_asm_insn ("a\t%0,0(%2)", op);
9484 output_asm_insn ("a\t%2,4(%2)", op);
9485 output_asm_insn ("basr\t%0,%0", op);
9486 output_asm_insn ("l\t%0,%1", op);
9487 }
9488 }
9489
9490 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9491 into its SYMBOL_REF_FLAGS. */
9492
9493 static void
9494 s390_encode_section_info (tree decl, rtx rtl, int first)
9495 {
9496 default_encode_section_info (decl, rtl, first);
9497
9498 if (TREE_CODE (decl) == VAR_DECL)
9499 {
9500 /* If a variable has a forced alignment to < 2 bytes, mark it
9501 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9502 operand. */
9503 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9504 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9505 if (!DECL_SIZE (decl)
9506 || !DECL_ALIGN (decl)
9507 || !host_integerp (DECL_SIZE (decl), 0)
9508 || (DECL_ALIGN (decl) <= 64
9509 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9510 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9511 }
9512
9513 /* Literal pool references don't have a decl so they are handled
9514 differently here. We rely on the information in the MEM_ALIGN
9515 entry to decide upon natural alignment. */
9516 if (MEM_P (rtl)
9517 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9518 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9519 && (MEM_ALIGN (rtl) == 0
9520 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9521 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9522 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9523 }
9524
9525 /* Output thunk to FILE that implements a C++ virtual function call (with
9526 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9527 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9528 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9529 relative to the resulting this pointer. */
9530
9531 static void
9532 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9533 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9534 tree function)
9535 {
9536 rtx op[10];
9537 int nonlocal = 0;
9538
9539 /* Make sure unwind info is emitted for the thunk if needed. */
9540 final_start_function (emit_barrier (), file, 1);
9541
9542 /* Operand 0 is the target function. */
9543 op[0] = XEXP (DECL_RTL (function), 0);
9544 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9545 {
9546 nonlocal = 1;
9547 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9548 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9549 op[0] = gen_rtx_CONST (Pmode, op[0]);
9550 }
9551
9552 /* Operand 1 is the 'this' pointer. */
9553 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9554 op[1] = gen_rtx_REG (Pmode, 3);
9555 else
9556 op[1] = gen_rtx_REG (Pmode, 2);
9557
9558 /* Operand 2 is the delta. */
9559 op[2] = GEN_INT (delta);
9560
9561 /* Operand 3 is the vcall_offset. */
9562 op[3] = GEN_INT (vcall_offset);
9563
9564 /* Operand 4 is the temporary register. */
9565 op[4] = gen_rtx_REG (Pmode, 1);
9566
9567 /* Operands 5 to 8 can be used as labels. */
9568 op[5] = NULL_RTX;
9569 op[6] = NULL_RTX;
9570 op[7] = NULL_RTX;
9571 op[8] = NULL_RTX;
9572
9573 /* Operand 9 can be used for temporary register. */
9574 op[9] = NULL_RTX;
9575
9576 /* Generate code. */
9577 if (TARGET_64BIT)
9578 {
9579 /* Setup literal pool pointer if required. */
9580 if ((!DISP_IN_RANGE (delta)
9581 && !CONST_OK_FOR_K (delta)
9582 && !CONST_OK_FOR_Os (delta))
9583 || (!DISP_IN_RANGE (vcall_offset)
9584 && !CONST_OK_FOR_K (vcall_offset)
9585 && !CONST_OK_FOR_Os (vcall_offset)))
9586 {
9587 op[5] = gen_label_rtx ();
9588 output_asm_insn ("larl\t%4,%5", op);
9589 }
9590
9591 /* Add DELTA to this pointer. */
9592 if (delta)
9593 {
9594 if (CONST_OK_FOR_J (delta))
9595 output_asm_insn ("la\t%1,%2(%1)", op);
9596 else if (DISP_IN_RANGE (delta))
9597 output_asm_insn ("lay\t%1,%2(%1)", op);
9598 else if (CONST_OK_FOR_K (delta))
9599 output_asm_insn ("aghi\t%1,%2", op);
9600 else if (CONST_OK_FOR_Os (delta))
9601 output_asm_insn ("agfi\t%1,%2", op);
9602 else
9603 {
9604 op[6] = gen_label_rtx ();
9605 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9606 }
9607 }
9608
9609 /* Perform vcall adjustment. */
9610 if (vcall_offset)
9611 {
9612 if (DISP_IN_RANGE (vcall_offset))
9613 {
9614 output_asm_insn ("lg\t%4,0(%1)", op);
9615 output_asm_insn ("ag\t%1,%3(%4)", op);
9616 }
9617 else if (CONST_OK_FOR_K (vcall_offset))
9618 {
9619 output_asm_insn ("lghi\t%4,%3", op);
9620 output_asm_insn ("ag\t%4,0(%1)", op);
9621 output_asm_insn ("ag\t%1,0(%4)", op);
9622 }
9623 else if (CONST_OK_FOR_Os (vcall_offset))
9624 {
9625 output_asm_insn ("lgfi\t%4,%3", op);
9626 output_asm_insn ("ag\t%4,0(%1)", op);
9627 output_asm_insn ("ag\t%1,0(%4)", op);
9628 }
9629 else
9630 {
9631 op[7] = gen_label_rtx ();
9632 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9633 output_asm_insn ("ag\t%4,0(%1)", op);
9634 output_asm_insn ("ag\t%1,0(%4)", op);
9635 }
9636 }
9637
9638 /* Jump to target. */
9639 output_asm_insn ("jg\t%0", op);
9640
9641 /* Output literal pool if required. */
9642 if (op[5])
9643 {
9644 output_asm_insn (".align\t4", op);
9645 targetm.asm_out.internal_label (file, "L",
9646 CODE_LABEL_NUMBER (op[5]));
9647 }
9648 if (op[6])
9649 {
9650 targetm.asm_out.internal_label (file, "L",
9651 CODE_LABEL_NUMBER (op[6]));
9652 output_asm_insn (".long\t%2", op);
9653 }
9654 if (op[7])
9655 {
9656 targetm.asm_out.internal_label (file, "L",
9657 CODE_LABEL_NUMBER (op[7]));
9658 output_asm_insn (".long\t%3", op);
9659 }
9660 }
9661 else
9662 {
9663 /* Setup base pointer if required. */
9664 if (!vcall_offset
9665 || (!DISP_IN_RANGE (delta)
9666 && !CONST_OK_FOR_K (delta)
9667 && !CONST_OK_FOR_Os (delta))
9668 || (!DISP_IN_RANGE (delta)
9669 && !CONST_OK_FOR_K (vcall_offset)
9670 && !CONST_OK_FOR_Os (vcall_offset)))
9671 {
9672 op[5] = gen_label_rtx ();
9673 output_asm_insn ("basr\t%4,0", op);
9674 targetm.asm_out.internal_label (file, "L",
9675 CODE_LABEL_NUMBER (op[5]));
9676 }
9677
9678 /* Add DELTA to this pointer. */
9679 if (delta)
9680 {
9681 if (CONST_OK_FOR_J (delta))
9682 output_asm_insn ("la\t%1,%2(%1)", op);
9683 else if (DISP_IN_RANGE (delta))
9684 output_asm_insn ("lay\t%1,%2(%1)", op);
9685 else if (CONST_OK_FOR_K (delta))
9686 output_asm_insn ("ahi\t%1,%2", op);
9687 else if (CONST_OK_FOR_Os (delta))
9688 output_asm_insn ("afi\t%1,%2", op);
9689 else
9690 {
9691 op[6] = gen_label_rtx ();
9692 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9693 }
9694 }
9695
9696 /* Perform vcall adjustment. */
9697 if (vcall_offset)
9698 {
9699 if (CONST_OK_FOR_J (vcall_offset))
9700 {
9701 output_asm_insn ("l\t%4,0(%1)", op);
9702 output_asm_insn ("a\t%1,%3(%4)", op);
9703 }
9704 else if (DISP_IN_RANGE (vcall_offset))
9705 {
9706 output_asm_insn ("l\t%4,0(%1)", op);
9707 output_asm_insn ("ay\t%1,%3(%4)", op);
9708 }
9709 else if (CONST_OK_FOR_K (vcall_offset))
9710 {
9711 output_asm_insn ("lhi\t%4,%3", op);
9712 output_asm_insn ("a\t%4,0(%1)", op);
9713 output_asm_insn ("a\t%1,0(%4)", op);
9714 }
9715 else if (CONST_OK_FOR_Os (vcall_offset))
9716 {
9717 output_asm_insn ("iilf\t%4,%3", op);
9718 output_asm_insn ("a\t%4,0(%1)", op);
9719 output_asm_insn ("a\t%1,0(%4)", op);
9720 }
9721 else
9722 {
9723 op[7] = gen_label_rtx ();
9724 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9725 output_asm_insn ("a\t%4,0(%1)", op);
9726 output_asm_insn ("a\t%1,0(%4)", op);
9727 }
9728
9729 /* We had to clobber the base pointer register.
9730 Re-setup the base pointer (with a different base). */
9731 op[5] = gen_label_rtx ();
9732 output_asm_insn ("basr\t%4,0", op);
9733 targetm.asm_out.internal_label (file, "L",
9734 CODE_LABEL_NUMBER (op[5]));
9735 }
9736
9737 /* Jump to target. */
9738 op[8] = gen_label_rtx ();
9739
9740 if (!flag_pic)
9741 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9742 else if (!nonlocal)
9743 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9744 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9745 else if (flag_pic == 1)
9746 {
9747 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9748 output_asm_insn ("l\t%4,%0(%4)", op);
9749 }
9750 else if (flag_pic == 2)
9751 {
9752 op[9] = gen_rtx_REG (Pmode, 0);
9753 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9754 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9755 output_asm_insn ("ar\t%4,%9", op);
9756 output_asm_insn ("l\t%4,0(%4)", op);
9757 }
9758
9759 output_asm_insn ("br\t%4", op);
9760
9761 /* Output literal pool. */
9762 output_asm_insn (".align\t4", op);
9763
9764 if (nonlocal && flag_pic == 2)
9765 output_asm_insn (".long\t%0", op);
9766 if (nonlocal)
9767 {
9768 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9769 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9770 }
9771
9772 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9773 if (!flag_pic)
9774 output_asm_insn (".long\t%0", op);
9775 else
9776 output_asm_insn (".long\t%0-%5", op);
9777
9778 if (op[6])
9779 {
9780 targetm.asm_out.internal_label (file, "L",
9781 CODE_LABEL_NUMBER (op[6]));
9782 output_asm_insn (".long\t%2", op);
9783 }
9784 if (op[7])
9785 {
9786 targetm.asm_out.internal_label (file, "L",
9787 CODE_LABEL_NUMBER (op[7]));
9788 output_asm_insn (".long\t%3", op);
9789 }
9790 }
9791 final_end_function ();
9792 }
9793
9794 static bool
9795 s390_valid_pointer_mode (enum machine_mode mode)
9796 {
9797 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9798 }
9799
9800 /* Checks whether the given CALL_EXPR would use a caller
9801 saved register. This is used to decide whether sibling call
9802 optimization could be performed on the respective function
9803 call. */
9804
9805 static bool
9806 s390_call_saved_register_used (tree call_expr)
9807 {
9808 CUMULATIVE_ARGS cum_v;
9809 cumulative_args_t cum;
9810 tree parameter;
9811 enum machine_mode mode;
9812 tree type;
9813 rtx parm_rtx;
9814 int reg, i;
9815
9816 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
9817 cum = pack_cumulative_args (&cum_v);
9818
9819 for (i = 0; i < call_expr_nargs (call_expr); i++)
9820 {
9821 parameter = CALL_EXPR_ARG (call_expr, i);
9822 gcc_assert (parameter);
9823
9824 /* For an undeclared variable passed as parameter we will get
9825 an ERROR_MARK node here. */
9826 if (TREE_CODE (parameter) == ERROR_MARK)
9827 return true;
9828
9829 type = TREE_TYPE (parameter);
9830 gcc_assert (type);
9831
9832 mode = TYPE_MODE (type);
9833 gcc_assert (mode);
9834
9835 if (pass_by_reference (&cum_v, mode, type, true))
9836 {
9837 mode = Pmode;
9838 type = build_pointer_type (type);
9839 }
9840
9841 parm_rtx = s390_function_arg (cum, mode, type, 0);
9842
9843 s390_function_arg_advance (cum, mode, type, 0);
9844
9845 if (!parm_rtx)
9846 continue;
9847
9848 if (REG_P (parm_rtx))
9849 {
9850 for (reg = 0;
9851 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9852 reg++)
9853 if (!call_used_regs[reg + REGNO (parm_rtx)])
9854 return true;
9855 }
9856
9857 if (GET_CODE (parm_rtx) == PARALLEL)
9858 {
9859 int i;
9860
9861 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9862 {
9863 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9864
9865 gcc_assert (REG_P (r));
9866
9867 for (reg = 0;
9868 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9869 reg++)
9870 if (!call_used_regs[reg + REGNO (r)])
9871 return true;
9872 }
9873 }
9874
9875 }
9876 return false;
9877 }
9878
9879 /* Return true if the given call expression can be
9880 turned into a sibling call.
9881 DECL holds the declaration of the function to be called whereas
9882 EXP is the call expression itself. */
9883
9884 static bool
9885 s390_function_ok_for_sibcall (tree decl, tree exp)
9886 {
9887 /* The TPF epilogue uses register 1. */
9888 if (TARGET_TPF_PROFILING)
9889 return false;
9890
9891 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9892 which would have to be restored before the sibcall. */
9893 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9894 return false;
9895
9896 /* Register 6 on s390 is available as an argument register but unfortunately
9897 "caller saved". This makes functions needing this register for arguments
9898 not suitable for sibcalls. */
9899 return !s390_call_saved_register_used (exp);
9900 }
9901
9902 /* Return the fixed registers used for condition codes. */
9903
9904 static bool
9905 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9906 {
9907 *p1 = CC_REGNUM;
9908 *p2 = INVALID_REGNUM;
9909
9910 return true;
9911 }
9912
9913 /* This function is used by the call expanders of the machine description.
9914 It emits the call insn itself together with the necessary operations
9915 to adjust the target address and returns the emitted insn.
9916 ADDR_LOCATION is the target address rtx
9917 TLS_CALL the location of the thread-local symbol
9918 RESULT_REG the register where the result of the call should be stored
9919 RETADDR_REG the register where the return address should be stored
9920 If this parameter is NULL_RTX the call is considered
9921 to be a sibling call. */
9922
9923 rtx
9924 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9925 rtx retaddr_reg)
9926 {
9927 bool plt_call = false;
9928 rtx insn;
9929 rtx call;
9930 rtx clobber;
9931 rtvec vec;
9932
9933 /* Direct function calls need special treatment. */
9934 if (GET_CODE (addr_location) == SYMBOL_REF)
9935 {
9936 /* When calling a global routine in PIC mode, we must
9937 replace the symbol itself with the PLT stub. */
9938 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9939 {
9940 if (retaddr_reg != NULL_RTX)
9941 {
9942 addr_location = gen_rtx_UNSPEC (Pmode,
9943 gen_rtvec (1, addr_location),
9944 UNSPEC_PLT);
9945 addr_location = gen_rtx_CONST (Pmode, addr_location);
9946 plt_call = true;
9947 }
9948 else
9949 /* For -fpic code the PLT entries might use r12 which is
9950 call-saved. Therefore we cannot do a sibcall when
9951 calling directly using a symbol ref. When reaching
9952 this point we decided (in s390_function_ok_for_sibcall)
9953 to do a sibcall for a function pointer but one of the
9954 optimizers was able to get rid of the function pointer
9955 by propagating the symbol ref into the call. This
9956 optimization is illegal for S/390 so we turn the direct
9957 call into a indirect call again. */
9958 addr_location = force_reg (Pmode, addr_location);
9959 }
9960
9961 /* Unless we can use the bras(l) insn, force the
9962 routine address into a register. */
9963 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9964 {
9965 if (flag_pic)
9966 addr_location = legitimize_pic_address (addr_location, 0);
9967 else
9968 addr_location = force_reg (Pmode, addr_location);
9969 }
9970 }
9971
9972 /* If it is already an indirect call or the code above moved the
9973 SYMBOL_REF to somewhere else make sure the address can be found in
9974 register 1. */
9975 if (retaddr_reg == NULL_RTX
9976 && GET_CODE (addr_location) != SYMBOL_REF
9977 && !plt_call)
9978 {
9979 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9980 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9981 }
9982
9983 addr_location = gen_rtx_MEM (QImode, addr_location);
9984 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9985
9986 if (result_reg != NULL_RTX)
9987 call = gen_rtx_SET (VOIDmode, result_reg, call);
9988
9989 if (retaddr_reg != NULL_RTX)
9990 {
9991 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9992
9993 if (tls_call != NULL_RTX)
9994 vec = gen_rtvec (3, call, clobber,
9995 gen_rtx_USE (VOIDmode, tls_call));
9996 else
9997 vec = gen_rtvec (2, call, clobber);
9998
9999 call = gen_rtx_PARALLEL (VOIDmode, vec);
10000 }
10001
10002 insn = emit_call_insn (call);
10003
10004 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
10005 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
10006 {
10007 /* s390_function_ok_for_sibcall should
10008 have denied sibcalls in this case. */
10009 gcc_assert (retaddr_reg != NULL_RTX);
10010 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
10011 }
10012 return insn;
10013 }
10014
10015 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
10016
10017 static void
10018 s390_conditional_register_usage (void)
10019 {
10020 int i;
10021
10022 if (flag_pic)
10023 {
10024 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10025 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10026 }
10027 if (TARGET_CPU_ZARCH)
10028 {
10029 fixed_regs[BASE_REGNUM] = 0;
10030 call_used_regs[BASE_REGNUM] = 0;
10031 fixed_regs[RETURN_REGNUM] = 0;
10032 call_used_regs[RETURN_REGNUM] = 0;
10033 }
10034 if (TARGET_64BIT)
10035 {
10036 for (i = 24; i < 32; i++)
10037 call_used_regs[i] = call_really_used_regs[i] = 0;
10038 }
10039 else
10040 {
10041 for (i = 18; i < 20; i++)
10042 call_used_regs[i] = call_really_used_regs[i] = 0;
10043 }
10044
10045 if (TARGET_SOFT_FLOAT)
10046 {
10047 for (i = 16; i < 32; i++)
10048 call_used_regs[i] = fixed_regs[i] = 1;
10049 }
10050 }
10051
10052 /* Corresponding function to eh_return expander. */
10053
10054 static GTY(()) rtx s390_tpf_eh_return_symbol;
10055 void
10056 s390_emit_tpf_eh_return (rtx target)
10057 {
10058 rtx insn, reg;
10059
10060 if (!s390_tpf_eh_return_symbol)
10061 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
10062
10063 reg = gen_rtx_REG (Pmode, 2);
10064
10065 emit_move_insn (reg, target);
10066 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
10067 gen_rtx_REG (Pmode, RETURN_REGNUM));
10068 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
10069
10070 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
10071 }
10072
10073 /* Rework the prologue/epilogue to avoid saving/restoring
10074 registers unnecessarily. */
10075
10076 static void
10077 s390_optimize_prologue (void)
10078 {
10079 rtx insn, new_insn, next_insn;
10080
10081 /* Do a final recompute of the frame-related data. */
10082
10083 s390_update_frame_layout ();
10084
10085 /* If all special registers are in fact used, there's nothing we
10086 can do, so no point in walking the insn list. */
10087
10088 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10089 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10090 && (TARGET_CPU_ZARCH
10091 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10092 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10093 return;
10094
10095 /* Search for prologue/epilogue insns and replace them. */
10096
10097 for (insn = get_insns (); insn; insn = next_insn)
10098 {
10099 int first, last, off;
10100 rtx set, base, offset;
10101
10102 next_insn = NEXT_INSN (insn);
10103
10104 if (GET_CODE (insn) != INSN)
10105 continue;
10106
10107 if (GET_CODE (PATTERN (insn)) == PARALLEL
10108 && store_multiple_operation (PATTERN (insn), VOIDmode))
10109 {
10110 set = XVECEXP (PATTERN (insn), 0, 0);
10111 first = REGNO (SET_SRC (set));
10112 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10113 offset = const0_rtx;
10114 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10115 off = INTVAL (offset);
10116
10117 if (GET_CODE (base) != REG || off < 0)
10118 continue;
10119 if (cfun_frame_layout.first_save_gpr != -1
10120 && (cfun_frame_layout.first_save_gpr < first
10121 || cfun_frame_layout.last_save_gpr > last))
10122 continue;
10123 if (REGNO (base) != STACK_POINTER_REGNUM
10124 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10125 continue;
10126 if (first > BASE_REGNUM || last < BASE_REGNUM)
10127 continue;
10128
10129 if (cfun_frame_layout.first_save_gpr != -1)
10130 {
10131 new_insn = save_gprs (base,
10132 off + (cfun_frame_layout.first_save_gpr
10133 - first) * UNITS_PER_LONG,
10134 cfun_frame_layout.first_save_gpr,
10135 cfun_frame_layout.last_save_gpr);
10136 new_insn = emit_insn_before (new_insn, insn);
10137 INSN_ADDRESSES_NEW (new_insn, -1);
10138 }
10139
10140 remove_insn (insn);
10141 continue;
10142 }
10143
10144 if (cfun_frame_layout.first_save_gpr == -1
10145 && GET_CODE (PATTERN (insn)) == SET
10146 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
10147 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
10148 || (!TARGET_CPU_ZARCH
10149 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
10150 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
10151 {
10152 set = PATTERN (insn);
10153 first = REGNO (SET_SRC (set));
10154 offset = const0_rtx;
10155 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10156 off = INTVAL (offset);
10157
10158 if (GET_CODE (base) != REG || off < 0)
10159 continue;
10160 if (REGNO (base) != STACK_POINTER_REGNUM
10161 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10162 continue;
10163
10164 remove_insn (insn);
10165 continue;
10166 }
10167
10168 if (GET_CODE (PATTERN (insn)) == PARALLEL
10169 && load_multiple_operation (PATTERN (insn), VOIDmode))
10170 {
10171 set = XVECEXP (PATTERN (insn), 0, 0);
10172 first = REGNO (SET_DEST (set));
10173 last = first + XVECLEN (PATTERN (insn), 0) - 1;
10174 offset = const0_rtx;
10175 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10176 off = INTVAL (offset);
10177
10178 if (GET_CODE (base) != REG || off < 0)
10179 continue;
10180 if (cfun_frame_layout.first_restore_gpr != -1
10181 && (cfun_frame_layout.first_restore_gpr < first
10182 || cfun_frame_layout.last_restore_gpr > last))
10183 continue;
10184 if (REGNO (base) != STACK_POINTER_REGNUM
10185 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10186 continue;
10187 if (first > BASE_REGNUM || last < BASE_REGNUM)
10188 continue;
10189
10190 if (cfun_frame_layout.first_restore_gpr != -1)
10191 {
10192 new_insn = restore_gprs (base,
10193 off + (cfun_frame_layout.first_restore_gpr
10194 - first) * UNITS_PER_LONG,
10195 cfun_frame_layout.first_restore_gpr,
10196 cfun_frame_layout.last_restore_gpr);
10197 new_insn = emit_insn_before (new_insn, insn);
10198 INSN_ADDRESSES_NEW (new_insn, -1);
10199 }
10200
10201 remove_insn (insn);
10202 continue;
10203 }
10204
10205 if (cfun_frame_layout.first_restore_gpr == -1
10206 && GET_CODE (PATTERN (insn)) == SET
10207 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10208 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10209 || (!TARGET_CPU_ZARCH
10210 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10211 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10212 {
10213 set = PATTERN (insn);
10214 first = REGNO (SET_DEST (set));
10215 offset = const0_rtx;
10216 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10217 off = INTVAL (offset);
10218
10219 if (GET_CODE (base) != REG || off < 0)
10220 continue;
10221 if (REGNO (base) != STACK_POINTER_REGNUM
10222 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10223 continue;
10224
10225 remove_insn (insn);
10226 continue;
10227 }
10228 }
10229 }
10230
10231 /* On z10 and later the dynamic branch prediction must see the
10232 backward jump within a certain windows. If not it falls back to
10233 the static prediction. This function rearranges the loop backward
10234 branch in a way which makes the static prediction always correct.
10235 The function returns true if it added an instruction. */
10236 static bool
10237 s390_fix_long_loop_prediction (rtx insn)
10238 {
10239 rtx set = single_set (insn);
10240 rtx code_label, label_ref, new_label;
10241 rtx uncond_jump;
10242 rtx cur_insn;
10243 rtx tmp;
10244 int distance;
10245
10246 /* This will exclude branch on count and branch on index patterns
10247 since these are correctly statically predicted. */
10248 if (!set
10249 || SET_DEST (set) != pc_rtx
10250 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10251 return false;
10252
10253 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10254 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10255
10256 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10257
10258 code_label = XEXP (label_ref, 0);
10259
10260 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10261 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10262 || (INSN_ADDRESSES (INSN_UID (insn))
10263 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10264 return false;
10265
10266 for (distance = 0, cur_insn = PREV_INSN (insn);
10267 distance < PREDICT_DISTANCE - 6;
10268 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10269 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10270 return false;
10271
10272 new_label = gen_label_rtx ();
10273 uncond_jump = emit_jump_insn_after (
10274 gen_rtx_SET (VOIDmode, pc_rtx,
10275 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10276 insn);
10277 emit_label_after (new_label, uncond_jump);
10278
10279 tmp = XEXP (SET_SRC (set), 1);
10280 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10281 XEXP (SET_SRC (set), 2) = tmp;
10282 INSN_CODE (insn) = -1;
10283
10284 XEXP (label_ref, 0) = new_label;
10285 JUMP_LABEL (insn) = new_label;
10286 JUMP_LABEL (uncond_jump) = code_label;
10287
10288 return true;
10289 }
10290
10291 /* Returns 1 if INSN reads the value of REG for purposes not related
10292 to addressing of memory, and 0 otherwise. */
10293 static int
10294 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10295 {
10296 return reg_referenced_p (reg, PATTERN (insn))
10297 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10298 }
10299
10300 /* Starting from INSN find_cond_jump looks downwards in the insn
10301 stream for a single jump insn which is the last user of the
10302 condition code set in INSN. */
10303 static rtx
10304 find_cond_jump (rtx insn)
10305 {
10306 for (; insn; insn = NEXT_INSN (insn))
10307 {
10308 rtx ite, cc;
10309
10310 if (LABEL_P (insn))
10311 break;
10312
10313 if (!JUMP_P (insn))
10314 {
10315 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10316 break;
10317 continue;
10318 }
10319
10320 /* This will be triggered by a return. */
10321 if (GET_CODE (PATTERN (insn)) != SET)
10322 break;
10323
10324 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10325 ite = SET_SRC (PATTERN (insn));
10326
10327 if (GET_CODE (ite) != IF_THEN_ELSE)
10328 break;
10329
10330 cc = XEXP (XEXP (ite, 0), 0);
10331 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10332 break;
10333
10334 if (find_reg_note (insn, REG_DEAD, cc))
10335 return insn;
10336 break;
10337 }
10338
10339 return NULL_RTX;
10340 }
10341
10342 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10343 the semantics does not change. If NULL_RTX is passed as COND the
10344 function tries to find the conditional jump starting with INSN. */
10345 static void
10346 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10347 {
10348 rtx tmp = *op0;
10349
10350 if (cond == NULL_RTX)
10351 {
10352 rtx jump = find_cond_jump (NEXT_INSN (insn));
10353 jump = jump ? single_set (jump) : NULL_RTX;
10354
10355 if (jump == NULL_RTX)
10356 return;
10357
10358 cond = XEXP (XEXP (jump, 1), 0);
10359 }
10360
10361 *op0 = *op1;
10362 *op1 = tmp;
10363 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10364 }
10365
10366 /* On z10, instructions of the compare-and-branch family have the
10367 property to access the register occurring as second operand with
10368 its bits complemented. If such a compare is grouped with a second
10369 instruction that accesses the same register non-complemented, and
10370 if that register's value is delivered via a bypass, then the
10371 pipeline recycles, thereby causing significant performance decline.
10372 This function locates such situations and exchanges the two
10373 operands of the compare. The function return true whenever it
10374 added an insn. */
10375 static bool
10376 s390_z10_optimize_cmp (rtx insn)
10377 {
10378 rtx prev_insn, next_insn;
10379 bool insn_added_p = false;
10380 rtx cond, *op0, *op1;
10381
10382 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10383 {
10384 /* Handle compare and branch and branch on count
10385 instructions. */
10386 rtx pattern = single_set (insn);
10387
10388 if (!pattern
10389 || SET_DEST (pattern) != pc_rtx
10390 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10391 return false;
10392
10393 cond = XEXP (SET_SRC (pattern), 0);
10394 op0 = &XEXP (cond, 0);
10395 op1 = &XEXP (cond, 1);
10396 }
10397 else if (GET_CODE (PATTERN (insn)) == SET)
10398 {
10399 rtx src, dest;
10400
10401 /* Handle normal compare instructions. */
10402 src = SET_SRC (PATTERN (insn));
10403 dest = SET_DEST (PATTERN (insn));
10404
10405 if (!REG_P (dest)
10406 || !CC_REGNO_P (REGNO (dest))
10407 || GET_CODE (src) != COMPARE)
10408 return false;
10409
10410 /* s390_swap_cmp will try to find the conditional
10411 jump when passing NULL_RTX as condition. */
10412 cond = NULL_RTX;
10413 op0 = &XEXP (src, 0);
10414 op1 = &XEXP (src, 1);
10415 }
10416 else
10417 return false;
10418
10419 if (!REG_P (*op0) || !REG_P (*op1))
10420 return false;
10421
10422 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10423 return false;
10424
10425 /* Swap the COMPARE arguments and its mask if there is a
10426 conflicting access in the previous insn. */
10427 prev_insn = prev_active_insn (insn);
10428 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10429 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10430 s390_swap_cmp (cond, op0, op1, insn);
10431
10432 /* Check if there is a conflict with the next insn. If there
10433 was no conflict with the previous insn, then swap the
10434 COMPARE arguments and its mask. If we already swapped
10435 the operands, or if swapping them would cause a conflict
10436 with the previous insn, issue a NOP after the COMPARE in
10437 order to separate the two instuctions. */
10438 next_insn = next_active_insn (insn);
10439 if (next_insn != NULL_RTX && INSN_P (next_insn)
10440 && s390_non_addr_reg_read_p (*op1, next_insn))
10441 {
10442 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10443 && s390_non_addr_reg_read_p (*op0, prev_insn))
10444 {
10445 if (REGNO (*op1) == 0)
10446 emit_insn_after (gen_nop1 (), insn);
10447 else
10448 emit_insn_after (gen_nop (), insn);
10449 insn_added_p = true;
10450 }
10451 else
10452 s390_swap_cmp (cond, op0, op1, insn);
10453 }
10454 return insn_added_p;
10455 }
10456
10457 /* Perform machine-dependent processing. */
10458
10459 static void
10460 s390_reorg (void)
10461 {
10462 bool pool_overflow = false;
10463
10464 /* Make sure all splits have been performed; splits after
10465 machine_dependent_reorg might confuse insn length counts. */
10466 split_all_insns_noflow ();
10467
10468 /* Install the main literal pool and the associated base
10469 register load insns.
10470
10471 In addition, there are two problematic situations we need
10472 to correct:
10473
10474 - the literal pool might be > 4096 bytes in size, so that
10475 some of its elements cannot be directly accessed
10476
10477 - a branch target might be > 64K away from the branch, so that
10478 it is not possible to use a PC-relative instruction.
10479
10480 To fix those, we split the single literal pool into multiple
10481 pool chunks, reloading the pool base register at various
10482 points throughout the function to ensure it always points to
10483 the pool chunk the following code expects, and / or replace
10484 PC-relative branches by absolute branches.
10485
10486 However, the two problems are interdependent: splitting the
10487 literal pool can move a branch further away from its target,
10488 causing the 64K limit to overflow, and on the other hand,
10489 replacing a PC-relative branch by an absolute branch means
10490 we need to put the branch target address into the literal
10491 pool, possibly causing it to overflow.
10492
10493 So, we loop trying to fix up both problems until we manage
10494 to satisfy both conditions at the same time. Note that the
10495 loop is guaranteed to terminate as every pass of the loop
10496 strictly decreases the total number of PC-relative branches
10497 in the function. (This is not completely true as there
10498 might be branch-over-pool insns introduced by chunkify_start.
10499 Those never need to be split however.) */
10500
10501 for (;;)
10502 {
10503 struct constant_pool *pool = NULL;
10504
10505 /* Collect the literal pool. */
10506 if (!pool_overflow)
10507 {
10508 pool = s390_mainpool_start ();
10509 if (!pool)
10510 pool_overflow = true;
10511 }
10512
10513 /* If literal pool overflowed, start to chunkify it. */
10514 if (pool_overflow)
10515 pool = s390_chunkify_start ();
10516
10517 /* Split out-of-range branches. If this has created new
10518 literal pool entries, cancel current chunk list and
10519 recompute it. zSeries machines have large branch
10520 instructions, so we never need to split a branch. */
10521 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10522 {
10523 if (pool_overflow)
10524 s390_chunkify_cancel (pool);
10525 else
10526 s390_mainpool_cancel (pool);
10527
10528 continue;
10529 }
10530
10531 /* If we made it up to here, both conditions are satisfied.
10532 Finish up literal pool related changes. */
10533 if (pool_overflow)
10534 s390_chunkify_finish (pool);
10535 else
10536 s390_mainpool_finish (pool);
10537
10538 /* We're done splitting branches. */
10539 cfun->machine->split_branches_pending_p = false;
10540 break;
10541 }
10542
10543 /* Generate out-of-pool execute target insns. */
10544 if (TARGET_CPU_ZARCH)
10545 {
10546 rtx insn, label, target;
10547
10548 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10549 {
10550 label = s390_execute_label (insn);
10551 if (!label)
10552 continue;
10553
10554 gcc_assert (label != const0_rtx);
10555
10556 target = emit_label (XEXP (label, 0));
10557 INSN_ADDRESSES_NEW (target, -1);
10558
10559 target = emit_insn (s390_execute_target (insn));
10560 INSN_ADDRESSES_NEW (target, -1);
10561 }
10562 }
10563
10564 /* Try to optimize prologue and epilogue further. */
10565 s390_optimize_prologue ();
10566
10567 /* Walk over the insns and do some >=z10 specific changes. */
10568 if (s390_tune == PROCESSOR_2097_Z10
10569 || s390_tune == PROCESSOR_2817_Z196)
10570 {
10571 rtx insn;
10572 bool insn_added_p = false;
10573
10574 /* The insn lengths and addresses have to be up to date for the
10575 following manipulations. */
10576 shorten_branches (get_insns ());
10577
10578 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10579 {
10580 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10581 continue;
10582
10583 if (JUMP_P (insn))
10584 insn_added_p |= s390_fix_long_loop_prediction (insn);
10585
10586 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10587 || GET_CODE (PATTERN (insn)) == SET)
10588 && s390_tune == PROCESSOR_2097_Z10)
10589 insn_added_p |= s390_z10_optimize_cmp (insn);
10590 }
10591
10592 /* Adjust branches if we added new instructions. */
10593 if (insn_added_p)
10594 shorten_branches (get_insns ());
10595 }
10596 }
10597
10598 /* Return true if INSN is a fp load insn writing register REGNO. */
10599 static inline bool
10600 s390_fpload_toreg (rtx insn, unsigned int regno)
10601 {
10602 rtx set;
10603 enum attr_type flag = s390_safe_attr_type (insn);
10604
10605 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10606 return false;
10607
10608 set = single_set (insn);
10609
10610 if (set == NULL_RTX)
10611 return false;
10612
10613 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10614 return false;
10615
10616 if (REGNO (SET_DEST (set)) != regno)
10617 return false;
10618
10619 return true;
10620 }
10621
10622 /* This value describes the distance to be avoided between an
10623 aritmetic fp instruction and an fp load writing the same register.
10624 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10625 fine but the exact value has to be avoided. Otherwise the FP
10626 pipeline will throw an exception causing a major penalty. */
10627 #define Z10_EARLYLOAD_DISTANCE 7
10628
10629 /* Rearrange the ready list in order to avoid the situation described
10630 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10631 moved to the very end of the ready list. */
10632 static void
10633 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10634 {
10635 unsigned int regno;
10636 int nready = *nready_p;
10637 rtx tmp;
10638 int i;
10639 rtx insn;
10640 rtx set;
10641 enum attr_type flag;
10642 int distance;
10643
10644 /* Skip DISTANCE - 1 active insns. */
10645 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10646 distance > 0 && insn != NULL_RTX;
10647 distance--, insn = prev_active_insn (insn))
10648 if (CALL_P (insn) || JUMP_P (insn))
10649 return;
10650
10651 if (insn == NULL_RTX)
10652 return;
10653
10654 set = single_set (insn);
10655
10656 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10657 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10658 return;
10659
10660 flag = s390_safe_attr_type (insn);
10661
10662 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10663 return;
10664
10665 regno = REGNO (SET_DEST (set));
10666 i = nready - 1;
10667
10668 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10669 i--;
10670
10671 if (!i)
10672 return;
10673
10674 tmp = ready[i];
10675 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10676 ready[0] = tmp;
10677 }
10678
10679 /* This function is called via hook TARGET_SCHED_REORDER before
10680 issuing one insn from list READY which contains *NREADYP entries.
10681 For target z10 it reorders load instructions to avoid early load
10682 conflicts in the floating point pipeline */
10683 static int
10684 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10685 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10686 {
10687 if (s390_tune == PROCESSOR_2097_Z10)
10688 if (reload_completed && *nreadyp > 1)
10689 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10690
10691 return s390_issue_rate ();
10692 }
10693
10694 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10695 the scheduler has issued INSN. It stores the last issued insn into
10696 last_scheduled_insn in order to make it available for
10697 s390_sched_reorder. */
10698 static int
10699 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10700 int verbose ATTRIBUTE_UNUSED,
10701 rtx insn, int more)
10702 {
10703 last_scheduled_insn = insn;
10704
10705 if (GET_CODE (PATTERN (insn)) != USE
10706 && GET_CODE (PATTERN (insn)) != CLOBBER)
10707 return more - 1;
10708 else
10709 return more;
10710 }
10711
10712 static void
10713 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10714 int verbose ATTRIBUTE_UNUSED,
10715 int max_ready ATTRIBUTE_UNUSED)
10716 {
10717 last_scheduled_insn = NULL_RTX;
10718 }
10719
10720 /* This function checks the whole of insn X for memory references. The
10721 function always returns zero because the framework it is called
10722 from would stop recursively analyzing the insn upon a return value
10723 other than zero. The real result of this function is updating
10724 counter variable MEM_COUNT. */
10725 static int
10726 check_dpu (rtx *x, unsigned *mem_count)
10727 {
10728 if (*x != NULL_RTX && MEM_P (*x))
10729 (*mem_count)++;
10730 return 0;
10731 }
10732
10733 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10734 a new number struct loop *loop should be unrolled if tuned for cpus with
10735 a built-in stride prefetcher.
10736 The loop is analyzed for memory accesses by calling check_dpu for
10737 each rtx of the loop. Depending on the loop_depth and the amount of
10738 memory accesses a new number <=nunroll is returned to improve the
10739 behaviour of the hardware prefetch unit. */
10740 static unsigned
10741 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10742 {
10743 basic_block *bbs;
10744 rtx insn;
10745 unsigned i;
10746 unsigned mem_count = 0;
10747
10748 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10749 return nunroll;
10750
10751 /* Count the number of memory references within the loop body. */
10752 bbs = get_loop_body (loop);
10753 for (i = 0; i < loop->num_nodes; i++)
10754 {
10755 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10756 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10757 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10758 }
10759 free (bbs);
10760
10761 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10762 if (mem_count == 0)
10763 return nunroll;
10764
10765 switch (loop_depth(loop))
10766 {
10767 case 1:
10768 return MIN (nunroll, 28 / mem_count);
10769 case 2:
10770 return MIN (nunroll, 22 / mem_count);
10771 default:
10772 return MIN (nunroll, 16 / mem_count);
10773 }
10774 }
10775
10776 /* Initialize GCC target structure. */
10777
10778 #undef TARGET_ASM_ALIGNED_HI_OP
10779 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10780 #undef TARGET_ASM_ALIGNED_DI_OP
10781 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10782 #undef TARGET_ASM_INTEGER
10783 #define TARGET_ASM_INTEGER s390_assemble_integer
10784
10785 #undef TARGET_ASM_OPEN_PAREN
10786 #define TARGET_ASM_OPEN_PAREN ""
10787
10788 #undef TARGET_ASM_CLOSE_PAREN
10789 #define TARGET_ASM_CLOSE_PAREN ""
10790
10791 #undef TARGET_OPTION_OVERRIDE
10792 #define TARGET_OPTION_OVERRIDE s390_option_override
10793
10794 #undef TARGET_ENCODE_SECTION_INFO
10795 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10796
10797 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10798 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10799
10800 #ifdef HAVE_AS_TLS
10801 #undef TARGET_HAVE_TLS
10802 #define TARGET_HAVE_TLS true
10803 #endif
10804 #undef TARGET_CANNOT_FORCE_CONST_MEM
10805 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10806
10807 #undef TARGET_DELEGITIMIZE_ADDRESS
10808 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10809
10810 #undef TARGET_LEGITIMIZE_ADDRESS
10811 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10812
10813 #undef TARGET_RETURN_IN_MEMORY
10814 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10815
10816 #undef TARGET_INIT_BUILTINS
10817 #define TARGET_INIT_BUILTINS s390_init_builtins
10818 #undef TARGET_EXPAND_BUILTIN
10819 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10820
10821 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
10822 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
10823
10824 #undef TARGET_ASM_OUTPUT_MI_THUNK
10825 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10826 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10827 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10828
10829 #undef TARGET_SCHED_ADJUST_PRIORITY
10830 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10831 #undef TARGET_SCHED_ISSUE_RATE
10832 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10833 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10834 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10835
10836 #undef TARGET_SCHED_VARIABLE_ISSUE
10837 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10838 #undef TARGET_SCHED_REORDER
10839 #define TARGET_SCHED_REORDER s390_sched_reorder
10840 #undef TARGET_SCHED_INIT
10841 #define TARGET_SCHED_INIT s390_sched_init
10842
10843 #undef TARGET_CANNOT_COPY_INSN_P
10844 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10845 #undef TARGET_RTX_COSTS
10846 #define TARGET_RTX_COSTS s390_rtx_costs
10847 #undef TARGET_ADDRESS_COST
10848 #define TARGET_ADDRESS_COST s390_address_cost
10849 #undef TARGET_REGISTER_MOVE_COST
10850 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
10851 #undef TARGET_MEMORY_MOVE_COST
10852 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
10853
10854 #undef TARGET_MACHINE_DEPENDENT_REORG
10855 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10856
10857 #undef TARGET_VALID_POINTER_MODE
10858 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10859
10860 #undef TARGET_BUILD_BUILTIN_VA_LIST
10861 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10862 #undef TARGET_EXPAND_BUILTIN_VA_START
10863 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10864 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10865 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10866
10867 #undef TARGET_PROMOTE_FUNCTION_MODE
10868 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10869 #undef TARGET_PASS_BY_REFERENCE
10870 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10871
10872 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10873 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10874 #undef TARGET_FUNCTION_ARG
10875 #define TARGET_FUNCTION_ARG s390_function_arg
10876 #undef TARGET_FUNCTION_ARG_ADVANCE
10877 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10878 #undef TARGET_FUNCTION_VALUE
10879 #define TARGET_FUNCTION_VALUE s390_function_value
10880 #undef TARGET_LIBCALL_VALUE
10881 #define TARGET_LIBCALL_VALUE s390_libcall_value
10882
10883 #undef TARGET_FIXED_CONDITION_CODE_REGS
10884 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10885
10886 #undef TARGET_CC_MODES_COMPATIBLE
10887 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10888
10889 #undef TARGET_INVALID_WITHIN_DOLOOP
10890 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10891
10892 #ifdef HAVE_AS_TLS
10893 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10894 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10895 #endif
10896
10897 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10898 #undef TARGET_MANGLE_TYPE
10899 #define TARGET_MANGLE_TYPE s390_mangle_type
10900 #endif
10901
10902 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10903 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10904
10905 #undef TARGET_PREFERRED_RELOAD_CLASS
10906 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
10907
10908 #undef TARGET_SECONDARY_RELOAD
10909 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10910
10911 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10912 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10913
10914 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10915 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10916
10917 #undef TARGET_LEGITIMATE_ADDRESS_P
10918 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10919
10920 #undef TARGET_LEGITIMATE_CONSTANT_P
10921 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
10922
10923 #undef TARGET_CAN_ELIMINATE
10924 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10925
10926 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10927 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10928
10929 #undef TARGET_LOOP_UNROLL_ADJUST
10930 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10931
10932 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10933 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10934 #undef TARGET_TRAMPOLINE_INIT
10935 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10936
10937 #undef TARGET_UNWIND_WORD_MODE
10938 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10939
10940 struct gcc_target targetm = TARGET_INITIALIZER;
10941
10942 #include "gt-s390.h"