]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/s390/s390.c
target-def.h (TARGET_HAVE_NAMED_SECTIONS): Move to common/common-target-def.h.
[thirdparty/gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
5 Ulrich Weigand (uweigand@de.ibm.com) and
6 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "tm_p.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "reload.h"
43 #include "diagnostic-core.h"
44 #include "basic-block.h"
45 #include "integrate.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "debug.h"
50 #include "langhooks.h"
51 #include "optabs.h"
52 #include "gimple.h"
53 #include "df.h"
54 #include "params.h"
55 #include "cfgloop.h"
56 #include "opts.h"
57
58 /* Define the specific costs for a given cpu. */
59
60 struct processor_costs
61 {
62 /* multiplication */
63 const int m; /* cost of an M instruction. */
64 const int mghi; /* cost of an MGHI instruction. */
65 const int mh; /* cost of an MH instruction. */
66 const int mhi; /* cost of an MHI instruction. */
67 const int ml; /* cost of an ML instruction. */
68 const int mr; /* cost of an MR instruction. */
69 const int ms; /* cost of an MS instruction. */
70 const int msg; /* cost of an MSG instruction. */
71 const int msgf; /* cost of an MSGF instruction. */
72 const int msgfr; /* cost of an MSGFR instruction. */
73 const int msgr; /* cost of an MSGR instruction. */
74 const int msr; /* cost of an MSR instruction. */
75 const int mult_df; /* cost of multiplication in DFmode. */
76 const int mxbr;
77 /* square root */
78 const int sqxbr; /* cost of square root in TFmode. */
79 const int sqdbr; /* cost of square root in DFmode. */
80 const int sqebr; /* cost of square root in SFmode. */
81 /* multiply and add */
82 const int madbr; /* cost of multiply and add in DFmode. */
83 const int maebr; /* cost of multiply and add in SFmode. */
84 /* division */
85 const int dxbr;
86 const int ddbr;
87 const int debr;
88 const int dlgr;
89 const int dlr;
90 const int dr;
91 const int dsgfr;
92 const int dsgr;
93 };
94
95 const struct processor_costs *s390_cost;
96
97 static const
98 struct processor_costs z900_cost =
99 {
100 COSTS_N_INSNS (5), /* M */
101 COSTS_N_INSNS (10), /* MGHI */
102 COSTS_N_INSNS (5), /* MH */
103 COSTS_N_INSNS (4), /* MHI */
104 COSTS_N_INSNS (5), /* ML */
105 COSTS_N_INSNS (5), /* MR */
106 COSTS_N_INSNS (4), /* MS */
107 COSTS_N_INSNS (15), /* MSG */
108 COSTS_N_INSNS (7), /* MSGF */
109 COSTS_N_INSNS (7), /* MSGFR */
110 COSTS_N_INSNS (10), /* MSGR */
111 COSTS_N_INSNS (4), /* MSR */
112 COSTS_N_INSNS (7), /* multiplication in DFmode */
113 COSTS_N_INSNS (13), /* MXBR */
114 COSTS_N_INSNS (136), /* SQXBR */
115 COSTS_N_INSNS (44), /* SQDBR */
116 COSTS_N_INSNS (35), /* SQEBR */
117 COSTS_N_INSNS (18), /* MADBR */
118 COSTS_N_INSNS (13), /* MAEBR */
119 COSTS_N_INSNS (134), /* DXBR */
120 COSTS_N_INSNS (30), /* DDBR */
121 COSTS_N_INSNS (27), /* DEBR */
122 COSTS_N_INSNS (220), /* DLGR */
123 COSTS_N_INSNS (34), /* DLR */
124 COSTS_N_INSNS (34), /* DR */
125 COSTS_N_INSNS (32), /* DSGFR */
126 COSTS_N_INSNS (32), /* DSGR */
127 };
128
129 static const
130 struct processor_costs z990_cost =
131 {
132 COSTS_N_INSNS (4), /* M */
133 COSTS_N_INSNS (2), /* MGHI */
134 COSTS_N_INSNS (2), /* MH */
135 COSTS_N_INSNS (2), /* MHI */
136 COSTS_N_INSNS (4), /* ML */
137 COSTS_N_INSNS (4), /* MR */
138 COSTS_N_INSNS (5), /* MS */
139 COSTS_N_INSNS (6), /* MSG */
140 COSTS_N_INSNS (4), /* MSGF */
141 COSTS_N_INSNS (4), /* MSGFR */
142 COSTS_N_INSNS (4), /* MSGR */
143 COSTS_N_INSNS (4), /* MSR */
144 COSTS_N_INSNS (1), /* multiplication in DFmode */
145 COSTS_N_INSNS (28), /* MXBR */
146 COSTS_N_INSNS (130), /* SQXBR */
147 COSTS_N_INSNS (66), /* SQDBR */
148 COSTS_N_INSNS (38), /* SQEBR */
149 COSTS_N_INSNS (1), /* MADBR */
150 COSTS_N_INSNS (1), /* MAEBR */
151 COSTS_N_INSNS (60), /* DXBR */
152 COSTS_N_INSNS (40), /* DDBR */
153 COSTS_N_INSNS (26), /* DEBR */
154 COSTS_N_INSNS (176), /* DLGR */
155 COSTS_N_INSNS (31), /* DLR */
156 COSTS_N_INSNS (31), /* DR */
157 COSTS_N_INSNS (31), /* DSGFR */
158 COSTS_N_INSNS (31), /* DSGR */
159 };
160
161 static const
162 struct processor_costs z9_109_cost =
163 {
164 COSTS_N_INSNS (4), /* M */
165 COSTS_N_INSNS (2), /* MGHI */
166 COSTS_N_INSNS (2), /* MH */
167 COSTS_N_INSNS (2), /* MHI */
168 COSTS_N_INSNS (4), /* ML */
169 COSTS_N_INSNS (4), /* MR */
170 COSTS_N_INSNS (5), /* MS */
171 COSTS_N_INSNS (6), /* MSG */
172 COSTS_N_INSNS (4), /* MSGF */
173 COSTS_N_INSNS (4), /* MSGFR */
174 COSTS_N_INSNS (4), /* MSGR */
175 COSTS_N_INSNS (4), /* MSR */
176 COSTS_N_INSNS (1), /* multiplication in DFmode */
177 COSTS_N_INSNS (28), /* MXBR */
178 COSTS_N_INSNS (130), /* SQXBR */
179 COSTS_N_INSNS (66), /* SQDBR */
180 COSTS_N_INSNS (38), /* SQEBR */
181 COSTS_N_INSNS (1), /* MADBR */
182 COSTS_N_INSNS (1), /* MAEBR */
183 COSTS_N_INSNS (60), /* DXBR */
184 COSTS_N_INSNS (40), /* DDBR */
185 COSTS_N_INSNS (26), /* DEBR */
186 COSTS_N_INSNS (30), /* DLGR */
187 COSTS_N_INSNS (23), /* DLR */
188 COSTS_N_INSNS (23), /* DR */
189 COSTS_N_INSNS (24), /* DSGFR */
190 COSTS_N_INSNS (24), /* DSGR */
191 };
192
193 static const
194 struct processor_costs z10_cost =
195 {
196 COSTS_N_INSNS (10), /* M */
197 COSTS_N_INSNS (10), /* MGHI */
198 COSTS_N_INSNS (10), /* MH */
199 COSTS_N_INSNS (10), /* MHI */
200 COSTS_N_INSNS (10), /* ML */
201 COSTS_N_INSNS (10), /* MR */
202 COSTS_N_INSNS (10), /* MS */
203 COSTS_N_INSNS (10), /* MSG */
204 COSTS_N_INSNS (10), /* MSGF */
205 COSTS_N_INSNS (10), /* MSGFR */
206 COSTS_N_INSNS (10), /* MSGR */
207 COSTS_N_INSNS (10), /* MSR */
208 COSTS_N_INSNS (1) , /* multiplication in DFmode */
209 COSTS_N_INSNS (50), /* MXBR */
210 COSTS_N_INSNS (120), /* SQXBR */
211 COSTS_N_INSNS (52), /* SQDBR */
212 COSTS_N_INSNS (38), /* SQEBR */
213 COSTS_N_INSNS (1), /* MADBR */
214 COSTS_N_INSNS (1), /* MAEBR */
215 COSTS_N_INSNS (111), /* DXBR */
216 COSTS_N_INSNS (39), /* DDBR */
217 COSTS_N_INSNS (32), /* DEBR */
218 COSTS_N_INSNS (160), /* DLGR */
219 COSTS_N_INSNS (71), /* DLR */
220 COSTS_N_INSNS (71), /* DR */
221 COSTS_N_INSNS (71), /* DSGFR */
222 COSTS_N_INSNS (71), /* DSGR */
223 };
224
225 static const
226 struct processor_costs z196_cost =
227 {
228 COSTS_N_INSNS (7), /* M */
229 COSTS_N_INSNS (5), /* MGHI */
230 COSTS_N_INSNS (5), /* MH */
231 COSTS_N_INSNS (5), /* MHI */
232 COSTS_N_INSNS (7), /* ML */
233 COSTS_N_INSNS (7), /* MR */
234 COSTS_N_INSNS (6), /* MS */
235 COSTS_N_INSNS (8), /* MSG */
236 COSTS_N_INSNS (6), /* MSGF */
237 COSTS_N_INSNS (6), /* MSGFR */
238 COSTS_N_INSNS (8), /* MSGR */
239 COSTS_N_INSNS (6), /* MSR */
240 COSTS_N_INSNS (1) , /* multiplication in DFmode */
241 COSTS_N_INSNS (40), /* MXBR B+40 */
242 COSTS_N_INSNS (100), /* SQXBR B+100 */
243 COSTS_N_INSNS (42), /* SQDBR B+42 */
244 COSTS_N_INSNS (28), /* SQEBR B+28 */
245 COSTS_N_INSNS (1), /* MADBR B */
246 COSTS_N_INSNS (1), /* MAEBR B */
247 COSTS_N_INSNS (101), /* DXBR B+101 */
248 COSTS_N_INSNS (29), /* DDBR */
249 COSTS_N_INSNS (22), /* DEBR */
250 COSTS_N_INSNS (160), /* DLGR cracked */
251 COSTS_N_INSNS (160), /* DLR cracked */
252 COSTS_N_INSNS (160), /* DR expanded */
253 COSTS_N_INSNS (160), /* DSGFR cracked */
254 COSTS_N_INSNS (160), /* DSGR cracked */
255 };
256
257 extern int reload_completed;
258
259 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
260 static rtx last_scheduled_insn;
261
262 /* Structure used to hold the components of a S/390 memory
263 address. A legitimate address on S/390 is of the general
264 form
265 base + index + displacement
266 where any of the components is optional.
267
268 base and index are registers of the class ADDR_REGS,
269 displacement is an unsigned 12-bit immediate constant. */
270
271 struct s390_address
272 {
273 rtx base;
274 rtx indx;
275 rtx disp;
276 bool pointer;
277 bool literal_pool;
278 };
279
280 /* The following structure is embedded in the machine
281 specific part of struct function. */
282
283 struct GTY (()) s390_frame_layout
284 {
285 /* Offset within stack frame. */
286 HOST_WIDE_INT gprs_offset;
287 HOST_WIDE_INT f0_offset;
288 HOST_WIDE_INT f4_offset;
289 HOST_WIDE_INT f8_offset;
290 HOST_WIDE_INT backchain_offset;
291
292 /* Number of first and last gpr where slots in the register
293 save area are reserved for. */
294 int first_save_gpr_slot;
295 int last_save_gpr_slot;
296
297 /* Number of first and last gpr to be saved, restored. */
298 int first_save_gpr;
299 int first_restore_gpr;
300 int last_save_gpr;
301 int last_restore_gpr;
302
303 /* Bits standing for floating point registers. Set, if the
304 respective register has to be saved. Starting with reg 16 (f0)
305 at the rightmost bit.
306 Bit 15 - 8 7 6 5 4 3 2 1 0
307 fpr 15 - 8 7 5 3 1 6 4 2 0
308 reg 31 - 24 23 22 21 20 19 18 17 16 */
309 unsigned int fpr_bitmap;
310
311 /* Number of floating point registers f8-f15 which must be saved. */
312 int high_fprs;
313
314 /* Set if return address needs to be saved.
315 This flag is set by s390_return_addr_rtx if it could not use
316 the initial value of r14 and therefore depends on r14 saved
317 to the stack. */
318 bool save_return_addr_p;
319
320 /* Size of stack frame. */
321 HOST_WIDE_INT frame_size;
322 };
323
324 /* Define the structure for the machine field in struct function. */
325
326 struct GTY(()) machine_function
327 {
328 struct s390_frame_layout frame_layout;
329
330 /* Literal pool base register. */
331 rtx base_reg;
332
333 /* True if we may need to perform branch splitting. */
334 bool split_branches_pending_p;
335
336 /* Some local-dynamic TLS symbol name. */
337 const char *some_ld_name;
338
339 bool has_landing_pad_p;
340 };
341
342 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
343
344 #define cfun_frame_layout (cfun->machine->frame_layout)
345 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
346 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
347 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
348 #define cfun_set_fpr_bit(BITNUM) (cfun->machine->frame_layout.fpr_bitmap |= \
349 (1 << (BITNUM)))
350 #define cfun_fpr_bit_p(BITNUM) (!!(cfun->machine->frame_layout.fpr_bitmap & \
351 (1 << (BITNUM))))
352
353 /* Number of GPRs and FPRs used for argument passing. */
354 #define GP_ARG_NUM_REG 5
355 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
356
357 /* A couple of shortcuts. */
358 #define CONST_OK_FOR_J(x) \
359 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
360 #define CONST_OK_FOR_K(x) \
361 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
362 #define CONST_OK_FOR_Os(x) \
363 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
364 #define CONST_OK_FOR_Op(x) \
365 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
366 #define CONST_OK_FOR_On(x) \
367 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
368
369 #define REGNO_PAIR_OK(REGNO, MODE) \
370 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
371
372 /* That's the read ahead of the dynamic branch prediction unit in
373 bytes on a z10 (or higher) CPU. */
374 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
375
376 /* Return the alignment for LABEL. We default to the -falign-labels
377 value except for the literal pool base label. */
378 int
379 s390_label_align (rtx label)
380 {
381 rtx prev_insn = prev_active_insn (label);
382
383 if (prev_insn == NULL_RTX)
384 goto old;
385
386 prev_insn = single_set (prev_insn);
387
388 if (prev_insn == NULL_RTX)
389 goto old;
390
391 prev_insn = SET_SRC (prev_insn);
392
393 /* Don't align literal pool base labels. */
394 if (GET_CODE (prev_insn) == UNSPEC
395 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
396 return 0;
397
398 old:
399 return align_labels_log;
400 }
401
402 static enum machine_mode
403 s390_libgcc_cmp_return_mode (void)
404 {
405 return TARGET_64BIT ? DImode : SImode;
406 }
407
408 static enum machine_mode
409 s390_libgcc_shift_count_mode (void)
410 {
411 return TARGET_64BIT ? DImode : SImode;
412 }
413
414 static enum machine_mode
415 s390_unwind_word_mode (void)
416 {
417 return TARGET_64BIT ? DImode : SImode;
418 }
419
420 /* Return true if the back end supports mode MODE. */
421 static bool
422 s390_scalar_mode_supported_p (enum machine_mode mode)
423 {
424 /* In contrast to the default implementation reject TImode constants on 31bit
425 TARGET_ZARCH for ABI compliance. */
426 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
427 return false;
428
429 if (DECIMAL_FLOAT_MODE_P (mode))
430 return default_decimal_float_supported_p ();
431
432 return default_scalar_mode_supported_p (mode);
433 }
434
435 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
436
437 void
438 s390_set_has_landing_pad_p (bool value)
439 {
440 cfun->machine->has_landing_pad_p = value;
441 }
442
443 /* If two condition code modes are compatible, return a condition code
444 mode which is compatible with both. Otherwise, return
445 VOIDmode. */
446
447 static enum machine_mode
448 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
449 {
450 if (m1 == m2)
451 return m1;
452
453 switch (m1)
454 {
455 case CCZmode:
456 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
457 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
458 return m2;
459 return VOIDmode;
460
461 case CCSmode:
462 case CCUmode:
463 case CCTmode:
464 case CCSRmode:
465 case CCURmode:
466 case CCZ1mode:
467 if (m2 == CCZmode)
468 return m1;
469
470 return VOIDmode;
471
472 default:
473 return VOIDmode;
474 }
475 return VOIDmode;
476 }
477
478 /* Return true if SET either doesn't set the CC register, or else
479 the source and destination have matching CC modes and that
480 CC mode is at least as constrained as REQ_MODE. */
481
482 static bool
483 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
484 {
485 enum machine_mode set_mode;
486
487 gcc_assert (GET_CODE (set) == SET);
488
489 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
490 return 1;
491
492 set_mode = GET_MODE (SET_DEST (set));
493 switch (set_mode)
494 {
495 case CCSmode:
496 case CCSRmode:
497 case CCUmode:
498 case CCURmode:
499 case CCLmode:
500 case CCL1mode:
501 case CCL2mode:
502 case CCL3mode:
503 case CCT1mode:
504 case CCT2mode:
505 case CCT3mode:
506 if (req_mode != set_mode)
507 return 0;
508 break;
509
510 case CCZmode:
511 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
512 && req_mode != CCSRmode && req_mode != CCURmode)
513 return 0;
514 break;
515
516 case CCAPmode:
517 case CCANmode:
518 if (req_mode != CCAmode)
519 return 0;
520 break;
521
522 default:
523 gcc_unreachable ();
524 }
525
526 return (GET_MODE (SET_SRC (set)) == set_mode);
527 }
528
529 /* Return true if every SET in INSN that sets the CC register
530 has source and destination with matching CC modes and that
531 CC mode is at least as constrained as REQ_MODE.
532 If REQ_MODE is VOIDmode, always return false. */
533
534 bool
535 s390_match_ccmode (rtx insn, enum machine_mode req_mode)
536 {
537 int i;
538
539 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
540 if (req_mode == VOIDmode)
541 return false;
542
543 if (GET_CODE (PATTERN (insn)) == SET)
544 return s390_match_ccmode_set (PATTERN (insn), req_mode);
545
546 if (GET_CODE (PATTERN (insn)) == PARALLEL)
547 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
548 {
549 rtx set = XVECEXP (PATTERN (insn), 0, i);
550 if (GET_CODE (set) == SET)
551 if (!s390_match_ccmode_set (set, req_mode))
552 return false;
553 }
554
555 return true;
556 }
557
558 /* If a test-under-mask instruction can be used to implement
559 (compare (and ... OP1) OP2), return the CC mode required
560 to do that. Otherwise, return VOIDmode.
561 MIXED is true if the instruction can distinguish between
562 CC1 and CC2 for mixed selected bits (TMxx), it is false
563 if the instruction cannot (TM). */
564
565 enum machine_mode
566 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
567 {
568 int bit0, bit1;
569
570 /* ??? Fixme: should work on CONST_DOUBLE as well. */
571 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
572 return VOIDmode;
573
574 /* Selected bits all zero: CC0.
575 e.g.: int a; if ((a & (16 + 128)) == 0) */
576 if (INTVAL (op2) == 0)
577 return CCTmode;
578
579 /* Selected bits all one: CC3.
580 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
581 if (INTVAL (op2) == INTVAL (op1))
582 return CCT3mode;
583
584 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
585 int a;
586 if ((a & (16 + 128)) == 16) -> CCT1
587 if ((a & (16 + 128)) == 128) -> CCT2 */
588 if (mixed)
589 {
590 bit1 = exact_log2 (INTVAL (op2));
591 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
592 if (bit0 != -1 && bit1 != -1)
593 return bit0 > bit1 ? CCT1mode : CCT2mode;
594 }
595
596 return VOIDmode;
597 }
598
599 /* Given a comparison code OP (EQ, NE, etc.) and the operands
600 OP0 and OP1 of a COMPARE, return the mode to be used for the
601 comparison. */
602
603 enum machine_mode
604 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
605 {
606 switch (code)
607 {
608 case EQ:
609 case NE:
610 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
611 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
612 return CCAPmode;
613 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
614 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
615 return CCAPmode;
616 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
617 || GET_CODE (op1) == NEG)
618 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
619 return CCLmode;
620
621 if (GET_CODE (op0) == AND)
622 {
623 /* Check whether we can potentially do it via TM. */
624 enum machine_mode ccmode;
625 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
626 if (ccmode != VOIDmode)
627 {
628 /* Relax CCTmode to CCZmode to allow fall-back to AND
629 if that turns out to be beneficial. */
630 return ccmode == CCTmode ? CCZmode : ccmode;
631 }
632 }
633
634 if (register_operand (op0, HImode)
635 && GET_CODE (op1) == CONST_INT
636 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
637 return CCT3mode;
638 if (register_operand (op0, QImode)
639 && GET_CODE (op1) == CONST_INT
640 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
641 return CCT3mode;
642
643 return CCZmode;
644
645 case LE:
646 case LT:
647 case GE:
648 case GT:
649 /* The only overflow condition of NEG and ABS happens when
650 -INT_MAX is used as parameter, which stays negative. So
651 we have an overflow from a positive value to a negative.
652 Using CCAP mode the resulting cc can be used for comparisons. */
653 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
654 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
655 return CCAPmode;
656
657 /* If constants are involved in an add instruction it is possible to use
658 the resulting cc for comparisons with zero. Knowing the sign of the
659 constant the overflow behavior gets predictable. e.g.:
660 int a, b; if ((b = a + c) > 0)
661 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
662 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
663 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
664 {
665 if (INTVAL (XEXP((op0), 1)) < 0)
666 return CCANmode;
667 else
668 return CCAPmode;
669 }
670 /* Fall through. */
671 case UNORDERED:
672 case ORDERED:
673 case UNEQ:
674 case UNLE:
675 case UNLT:
676 case UNGE:
677 case UNGT:
678 case LTGT:
679 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
680 && GET_CODE (op1) != CONST_INT)
681 return CCSRmode;
682 return CCSmode;
683
684 case LTU:
685 case GEU:
686 if (GET_CODE (op0) == PLUS
687 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
688 return CCL1mode;
689
690 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
691 && GET_CODE (op1) != CONST_INT)
692 return CCURmode;
693 return CCUmode;
694
695 case LEU:
696 case GTU:
697 if (GET_CODE (op0) == MINUS
698 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
699 return CCL2mode;
700
701 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
702 && GET_CODE (op1) != CONST_INT)
703 return CCURmode;
704 return CCUmode;
705
706 default:
707 gcc_unreachable ();
708 }
709 }
710
711 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
712 that we can implement more efficiently. */
713
714 void
715 s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
716 {
717 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
718 if ((*code == EQ || *code == NE)
719 && *op1 == const0_rtx
720 && GET_CODE (*op0) == ZERO_EXTRACT
721 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
722 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
723 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
724 {
725 rtx inner = XEXP (*op0, 0);
726 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
727 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
728 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
729
730 if (len > 0 && len < modesize
731 && pos >= 0 && pos + len <= modesize
732 && modesize <= HOST_BITS_PER_WIDE_INT)
733 {
734 unsigned HOST_WIDE_INT block;
735 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
736 block <<= modesize - pos - len;
737
738 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
739 gen_int_mode (block, GET_MODE (inner)));
740 }
741 }
742
743 /* Narrow AND of memory against immediate to enable TM. */
744 if ((*code == EQ || *code == NE)
745 && *op1 == const0_rtx
746 && GET_CODE (*op0) == AND
747 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
748 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
749 {
750 rtx inner = XEXP (*op0, 0);
751 rtx mask = XEXP (*op0, 1);
752
753 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
754 if (GET_CODE (inner) == SUBREG
755 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
756 && (GET_MODE_SIZE (GET_MODE (inner))
757 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
758 && ((INTVAL (mask)
759 & GET_MODE_MASK (GET_MODE (inner))
760 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
761 == 0))
762 inner = SUBREG_REG (inner);
763
764 /* Do not change volatile MEMs. */
765 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
766 {
767 int part = s390_single_part (XEXP (*op0, 1),
768 GET_MODE (inner), QImode, 0);
769 if (part >= 0)
770 {
771 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
772 inner = adjust_address_nv (inner, QImode, part);
773 *op0 = gen_rtx_AND (QImode, inner, mask);
774 }
775 }
776 }
777
778 /* Narrow comparisons against 0xffff to HImode if possible. */
779 if ((*code == EQ || *code == NE)
780 && GET_CODE (*op1) == CONST_INT
781 && INTVAL (*op1) == 0xffff
782 && SCALAR_INT_MODE_P (GET_MODE (*op0))
783 && (nonzero_bits (*op0, GET_MODE (*op0))
784 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
785 {
786 *op0 = gen_lowpart (HImode, *op0);
787 *op1 = constm1_rtx;
788 }
789
790 /* Remove redundant UNSPEC_CCU_TO_INT conversions if possible. */
791 if (GET_CODE (*op0) == UNSPEC
792 && XINT (*op0, 1) == UNSPEC_CCU_TO_INT
793 && XVECLEN (*op0, 0) == 1
794 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
795 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
796 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
797 && *op1 == const0_rtx)
798 {
799 enum rtx_code new_code = UNKNOWN;
800 switch (*code)
801 {
802 case EQ: new_code = EQ; break;
803 case NE: new_code = NE; break;
804 case LT: new_code = GTU; break;
805 case GT: new_code = LTU; break;
806 case LE: new_code = GEU; break;
807 case GE: new_code = LEU; break;
808 default: break;
809 }
810
811 if (new_code != UNKNOWN)
812 {
813 *op0 = XVECEXP (*op0, 0, 0);
814 *code = new_code;
815 }
816 }
817
818 /* Remove redundant UNSPEC_CCZ_TO_INT conversions if possible. */
819 if (GET_CODE (*op0) == UNSPEC
820 && XINT (*op0, 1) == UNSPEC_CCZ_TO_INT
821 && XVECLEN (*op0, 0) == 1
822 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCZmode
823 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
824 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
825 && *op1 == const0_rtx)
826 {
827 enum rtx_code new_code = UNKNOWN;
828 switch (*code)
829 {
830 case EQ: new_code = EQ; break;
831 case NE: new_code = NE; break;
832 default: break;
833 }
834
835 if (new_code != UNKNOWN)
836 {
837 *op0 = XVECEXP (*op0, 0, 0);
838 *code = new_code;
839 }
840 }
841
842 /* Simplify cascaded EQ, NE with const0_rtx. */
843 if ((*code == NE || *code == EQ)
844 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
845 && GET_MODE (*op0) == SImode
846 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
847 && REG_P (XEXP (*op0, 0))
848 && XEXP (*op0, 1) == const0_rtx
849 && *op1 == const0_rtx)
850 {
851 if ((*code == EQ && GET_CODE (*op0) == NE)
852 || (*code == NE && GET_CODE (*op0) == EQ))
853 *code = EQ;
854 else
855 *code = NE;
856 *op0 = XEXP (*op0, 0);
857 }
858
859 /* Prefer register over memory as first operand. */
860 if (MEM_P (*op0) && REG_P (*op1))
861 {
862 rtx tem = *op0; *op0 = *op1; *op1 = tem;
863 *code = swap_condition (*code);
864 }
865 }
866
867 /* Emit a compare instruction suitable to implement the comparison
868 OP0 CODE OP1. Return the correct condition RTL to be placed in
869 the IF_THEN_ELSE of the conditional branch testing the result. */
870
871 rtx
872 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
873 {
874 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
875 rtx cc;
876
877 /* Do not output a redundant compare instruction if a compare_and_swap
878 pattern already computed the result and the machine modes are compatible. */
879 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
880 {
881 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
882 == GET_MODE (op0));
883 cc = op0;
884 }
885 else
886 {
887 cc = gen_rtx_REG (mode, CC_REGNUM);
888 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
889 }
890
891 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
892 }
893
894 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
895 matches CMP.
896 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
897 conditional branch testing the result. */
898
899 static rtx
900 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem, rtx cmp, rtx new_rtx)
901 {
902 emit_insn (gen_sync_compare_and_swapsi (old, mem, cmp, new_rtx));
903 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM), const0_rtx);
904 }
905
906 /* Emit a jump instruction to TARGET. If COND is NULL_RTX, emit an
907 unconditional jump, else a conditional jump under condition COND. */
908
909 void
910 s390_emit_jump (rtx target, rtx cond)
911 {
912 rtx insn;
913
914 target = gen_rtx_LABEL_REF (VOIDmode, target);
915 if (cond)
916 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
917
918 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
919 emit_jump_insn (insn);
920 }
921
922 /* Return branch condition mask to implement a branch
923 specified by CODE. Return -1 for invalid comparisons. */
924
925 int
926 s390_branch_condition_mask (rtx code)
927 {
928 const int CC0 = 1 << 3;
929 const int CC1 = 1 << 2;
930 const int CC2 = 1 << 1;
931 const int CC3 = 1 << 0;
932
933 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
934 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
935 gcc_assert (XEXP (code, 1) == const0_rtx);
936
937 switch (GET_MODE (XEXP (code, 0)))
938 {
939 case CCZmode:
940 case CCZ1mode:
941 switch (GET_CODE (code))
942 {
943 case EQ: return CC0;
944 case NE: return CC1 | CC2 | CC3;
945 default: return -1;
946 }
947 break;
948
949 case CCT1mode:
950 switch (GET_CODE (code))
951 {
952 case EQ: return CC1;
953 case NE: return CC0 | CC2 | CC3;
954 default: return -1;
955 }
956 break;
957
958 case CCT2mode:
959 switch (GET_CODE (code))
960 {
961 case EQ: return CC2;
962 case NE: return CC0 | CC1 | CC3;
963 default: return -1;
964 }
965 break;
966
967 case CCT3mode:
968 switch (GET_CODE (code))
969 {
970 case EQ: return CC3;
971 case NE: return CC0 | CC1 | CC2;
972 default: return -1;
973 }
974 break;
975
976 case CCLmode:
977 switch (GET_CODE (code))
978 {
979 case EQ: return CC0 | CC2;
980 case NE: return CC1 | CC3;
981 default: return -1;
982 }
983 break;
984
985 case CCL1mode:
986 switch (GET_CODE (code))
987 {
988 case LTU: return CC2 | CC3; /* carry */
989 case GEU: return CC0 | CC1; /* no carry */
990 default: return -1;
991 }
992 break;
993
994 case CCL2mode:
995 switch (GET_CODE (code))
996 {
997 case GTU: return CC0 | CC1; /* borrow */
998 case LEU: return CC2 | CC3; /* no borrow */
999 default: return -1;
1000 }
1001 break;
1002
1003 case CCL3mode:
1004 switch (GET_CODE (code))
1005 {
1006 case EQ: return CC0 | CC2;
1007 case NE: return CC1 | CC3;
1008 case LTU: return CC1;
1009 case GTU: return CC3;
1010 case LEU: return CC1 | CC2;
1011 case GEU: return CC2 | CC3;
1012 default: return -1;
1013 }
1014
1015 case CCUmode:
1016 switch (GET_CODE (code))
1017 {
1018 case EQ: return CC0;
1019 case NE: return CC1 | CC2 | CC3;
1020 case LTU: return CC1;
1021 case GTU: return CC2;
1022 case LEU: return CC0 | CC1;
1023 case GEU: return CC0 | CC2;
1024 default: return -1;
1025 }
1026 break;
1027
1028 case CCURmode:
1029 switch (GET_CODE (code))
1030 {
1031 case EQ: return CC0;
1032 case NE: return CC2 | CC1 | CC3;
1033 case LTU: return CC2;
1034 case GTU: return CC1;
1035 case LEU: return CC0 | CC2;
1036 case GEU: return CC0 | CC1;
1037 default: return -1;
1038 }
1039 break;
1040
1041 case CCAPmode:
1042 switch (GET_CODE (code))
1043 {
1044 case EQ: return CC0;
1045 case NE: return CC1 | CC2 | CC3;
1046 case LT: return CC1 | CC3;
1047 case GT: return CC2;
1048 case LE: return CC0 | CC1 | CC3;
1049 case GE: return CC0 | CC2;
1050 default: return -1;
1051 }
1052 break;
1053
1054 case CCANmode:
1055 switch (GET_CODE (code))
1056 {
1057 case EQ: return CC0;
1058 case NE: return CC1 | CC2 | CC3;
1059 case LT: return CC1;
1060 case GT: return CC2 | CC3;
1061 case LE: return CC0 | CC1;
1062 case GE: return CC0 | CC2 | CC3;
1063 default: return -1;
1064 }
1065 break;
1066
1067 case CCSmode:
1068 switch (GET_CODE (code))
1069 {
1070 case EQ: return CC0;
1071 case NE: return CC1 | CC2 | CC3;
1072 case LT: return CC1;
1073 case GT: return CC2;
1074 case LE: return CC0 | CC1;
1075 case GE: return CC0 | CC2;
1076 case UNORDERED: return CC3;
1077 case ORDERED: return CC0 | CC1 | CC2;
1078 case UNEQ: return CC0 | CC3;
1079 case UNLT: return CC1 | CC3;
1080 case UNGT: return CC2 | CC3;
1081 case UNLE: return CC0 | CC1 | CC3;
1082 case UNGE: return CC0 | CC2 | CC3;
1083 case LTGT: return CC1 | CC2;
1084 default: return -1;
1085 }
1086 break;
1087
1088 case CCSRmode:
1089 switch (GET_CODE (code))
1090 {
1091 case EQ: return CC0;
1092 case NE: return CC2 | CC1 | CC3;
1093 case LT: return CC2;
1094 case GT: return CC1;
1095 case LE: return CC0 | CC2;
1096 case GE: return CC0 | CC1;
1097 case UNORDERED: return CC3;
1098 case ORDERED: return CC0 | CC2 | CC1;
1099 case UNEQ: return CC0 | CC3;
1100 case UNLT: return CC2 | CC3;
1101 case UNGT: return CC1 | CC3;
1102 case UNLE: return CC0 | CC2 | CC3;
1103 case UNGE: return CC0 | CC1 | CC3;
1104 case LTGT: return CC2 | CC1;
1105 default: return -1;
1106 }
1107 break;
1108
1109 default:
1110 return -1;
1111 }
1112 }
1113
1114
1115 /* Return branch condition mask to implement a compare and branch
1116 specified by CODE. Return -1 for invalid comparisons. */
1117
1118 int
1119 s390_compare_and_branch_condition_mask (rtx code)
1120 {
1121 const int CC0 = 1 << 3;
1122 const int CC1 = 1 << 2;
1123 const int CC2 = 1 << 1;
1124
1125 switch (GET_CODE (code))
1126 {
1127 case EQ:
1128 return CC0;
1129 case NE:
1130 return CC1 | CC2;
1131 case LT:
1132 case LTU:
1133 return CC1;
1134 case GT:
1135 case GTU:
1136 return CC2;
1137 case LE:
1138 case LEU:
1139 return CC0 | CC1;
1140 case GE:
1141 case GEU:
1142 return CC0 | CC2;
1143 default:
1144 gcc_unreachable ();
1145 }
1146 return -1;
1147 }
1148
1149 /* If INV is false, return assembler mnemonic string to implement
1150 a branch specified by CODE. If INV is true, return mnemonic
1151 for the corresponding inverted branch. */
1152
1153 static const char *
1154 s390_branch_condition_mnemonic (rtx code, int inv)
1155 {
1156 int mask;
1157
1158 static const char *const mnemonic[16] =
1159 {
1160 NULL, "o", "h", "nle",
1161 "l", "nhe", "lh", "ne",
1162 "e", "nlh", "he", "nl",
1163 "le", "nh", "no", NULL
1164 };
1165
1166 if (GET_CODE (XEXP (code, 0)) == REG
1167 && REGNO (XEXP (code, 0)) == CC_REGNUM
1168 && XEXP (code, 1) == const0_rtx)
1169 mask = s390_branch_condition_mask (code);
1170 else
1171 mask = s390_compare_and_branch_condition_mask (code);
1172
1173 gcc_assert (mask >= 0);
1174
1175 if (inv)
1176 mask ^= 15;
1177
1178 gcc_assert (mask >= 1 && mask <= 14);
1179
1180 return mnemonic[mask];
1181 }
1182
1183 /* Return the part of op which has a value different from def.
1184 The size of the part is determined by mode.
1185 Use this function only if you already know that op really
1186 contains such a part. */
1187
1188 unsigned HOST_WIDE_INT
1189 s390_extract_part (rtx op, enum machine_mode mode, int def)
1190 {
1191 unsigned HOST_WIDE_INT value = 0;
1192 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1193 int part_bits = GET_MODE_BITSIZE (mode);
1194 unsigned HOST_WIDE_INT part_mask
1195 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1196 int i;
1197
1198 for (i = 0; i < max_parts; i++)
1199 {
1200 if (i == 0)
1201 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1202 else
1203 value >>= part_bits;
1204
1205 if ((value & part_mask) != (def & part_mask))
1206 return value & part_mask;
1207 }
1208
1209 gcc_unreachable ();
1210 }
1211
1212 /* If OP is an integer constant of mode MODE with exactly one
1213 part of mode PART_MODE unequal to DEF, return the number of that
1214 part. Otherwise, return -1. */
1215
1216 int
1217 s390_single_part (rtx op,
1218 enum machine_mode mode,
1219 enum machine_mode part_mode,
1220 int def)
1221 {
1222 unsigned HOST_WIDE_INT value = 0;
1223 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1224 unsigned HOST_WIDE_INT part_mask
1225 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1226 int i, part = -1;
1227
1228 if (GET_CODE (op) != CONST_INT)
1229 return -1;
1230
1231 for (i = 0; i < n_parts; i++)
1232 {
1233 if (i == 0)
1234 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1235 else
1236 value >>= GET_MODE_BITSIZE (part_mode);
1237
1238 if ((value & part_mask) != (def & part_mask))
1239 {
1240 if (part != -1)
1241 return -1;
1242 else
1243 part = i;
1244 }
1245 }
1246 return part == -1 ? -1 : n_parts - 1 - part;
1247 }
1248
1249 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1250 bits and no other bits are set in IN. POS and LENGTH can be used
1251 to obtain the start position and the length of the bitfield.
1252
1253 POS gives the position of the first bit of the bitfield counting
1254 from the lowest order bit starting with zero. In order to use this
1255 value for S/390 instructions this has to be converted to "bits big
1256 endian" style. */
1257
1258 bool
1259 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1260 int *pos, int *length)
1261 {
1262 int tmp_pos = 0;
1263 int tmp_length = 0;
1264 int i;
1265 unsigned HOST_WIDE_INT mask = 1ULL;
1266 bool contiguous = false;
1267
1268 for (i = 0; i < size; mask <<= 1, i++)
1269 {
1270 if (contiguous)
1271 {
1272 if (mask & in)
1273 tmp_length++;
1274 else
1275 break;
1276 }
1277 else
1278 {
1279 if (mask & in)
1280 {
1281 contiguous = true;
1282 tmp_length++;
1283 }
1284 else
1285 tmp_pos++;
1286 }
1287 }
1288
1289 if (!tmp_length)
1290 return false;
1291
1292 /* Calculate a mask for all bits beyond the contiguous bits. */
1293 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1294
1295 if (mask & in)
1296 return false;
1297
1298 if (tmp_length + tmp_pos - 1 > size)
1299 return false;
1300
1301 if (length)
1302 *length = tmp_length;
1303
1304 if (pos)
1305 *pos = tmp_pos;
1306
1307 return true;
1308 }
1309
1310 /* Check whether we can (and want to) split a double-word
1311 move in mode MODE from SRC to DST into two single-word
1312 moves, moving the subword FIRST_SUBWORD first. */
1313
1314 bool
1315 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1316 {
1317 /* Floating point registers cannot be split. */
1318 if (FP_REG_P (src) || FP_REG_P (dst))
1319 return false;
1320
1321 /* We don't need to split if operands are directly accessible. */
1322 if (s_operand (src, mode) || s_operand (dst, mode))
1323 return false;
1324
1325 /* Non-offsettable memory references cannot be split. */
1326 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1327 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1328 return false;
1329
1330 /* Moving the first subword must not clobber a register
1331 needed to move the second subword. */
1332 if (register_operand (dst, mode))
1333 {
1334 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1335 if (reg_overlap_mentioned_p (subreg, src))
1336 return false;
1337 }
1338
1339 return true;
1340 }
1341
1342 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1343 and [MEM2, MEM2 + SIZE] do overlap and false
1344 otherwise. */
1345
1346 bool
1347 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1348 {
1349 rtx addr1, addr2, addr_delta;
1350 HOST_WIDE_INT delta;
1351
1352 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1353 return true;
1354
1355 if (size == 0)
1356 return false;
1357
1358 addr1 = XEXP (mem1, 0);
1359 addr2 = XEXP (mem2, 0);
1360
1361 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1362
1363 /* This overlapping check is used by peepholes merging memory block operations.
1364 Overlapping operations would otherwise be recognized by the S/390 hardware
1365 and would fall back to a slower implementation. Allowing overlapping
1366 operations would lead to slow code but not to wrong code. Therefore we are
1367 somewhat optimistic if we cannot prove that the memory blocks are
1368 overlapping.
1369 That's why we return false here although this may accept operations on
1370 overlapping memory areas. */
1371 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1372 return false;
1373
1374 delta = INTVAL (addr_delta);
1375
1376 if (delta == 0
1377 || (delta > 0 && delta < size)
1378 || (delta < 0 && -delta < size))
1379 return true;
1380
1381 return false;
1382 }
1383
1384 /* Check whether the address of memory reference MEM2 equals exactly
1385 the address of memory reference MEM1 plus DELTA. Return true if
1386 we can prove this to be the case, false otherwise. */
1387
1388 bool
1389 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1390 {
1391 rtx addr1, addr2, addr_delta;
1392
1393 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1394 return false;
1395
1396 addr1 = XEXP (mem1, 0);
1397 addr2 = XEXP (mem2, 0);
1398
1399 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1400 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1401 return false;
1402
1403 return true;
1404 }
1405
1406 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1407
1408 void
1409 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1410 rtx *operands)
1411 {
1412 enum machine_mode wmode = mode;
1413 rtx dst = operands[0];
1414 rtx src1 = operands[1];
1415 rtx src2 = operands[2];
1416 rtx op, clob, tem;
1417
1418 /* If we cannot handle the operation directly, use a temp register. */
1419 if (!s390_logical_operator_ok_p (operands))
1420 dst = gen_reg_rtx (mode);
1421
1422 /* QImode and HImode patterns make sense only if we have a destination
1423 in memory. Otherwise perform the operation in SImode. */
1424 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1425 wmode = SImode;
1426
1427 /* Widen operands if required. */
1428 if (mode != wmode)
1429 {
1430 if (GET_CODE (dst) == SUBREG
1431 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1432 dst = tem;
1433 else if (REG_P (dst))
1434 dst = gen_rtx_SUBREG (wmode, dst, 0);
1435 else
1436 dst = gen_reg_rtx (wmode);
1437
1438 if (GET_CODE (src1) == SUBREG
1439 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1440 src1 = tem;
1441 else if (GET_MODE (src1) != VOIDmode)
1442 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1443
1444 if (GET_CODE (src2) == SUBREG
1445 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1446 src2 = tem;
1447 else if (GET_MODE (src2) != VOIDmode)
1448 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1449 }
1450
1451 /* Emit the instruction. */
1452 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1453 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1454 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1455
1456 /* Fix up the destination if needed. */
1457 if (dst != operands[0])
1458 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1459 }
1460
1461 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1462
1463 bool
1464 s390_logical_operator_ok_p (rtx *operands)
1465 {
1466 /* If the destination operand is in memory, it needs to coincide
1467 with one of the source operands. After reload, it has to be
1468 the first source operand. */
1469 if (GET_CODE (operands[0]) == MEM)
1470 return rtx_equal_p (operands[0], operands[1])
1471 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1472
1473 return true;
1474 }
1475
1476 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1477 operand IMMOP to switch from SS to SI type instructions. */
1478
1479 void
1480 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1481 {
1482 int def = code == AND ? -1 : 0;
1483 HOST_WIDE_INT mask;
1484 int part;
1485
1486 gcc_assert (GET_CODE (*memop) == MEM);
1487 gcc_assert (!MEM_VOLATILE_P (*memop));
1488
1489 mask = s390_extract_part (*immop, QImode, def);
1490 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1491 gcc_assert (part >= 0);
1492
1493 *memop = adjust_address (*memop, QImode, part);
1494 *immop = gen_int_mode (mask, QImode);
1495 }
1496
1497
1498 /* How to allocate a 'struct machine_function'. */
1499
1500 static struct machine_function *
1501 s390_init_machine_status (void)
1502 {
1503 return ggc_alloc_cleared_machine_function ();
1504 }
1505
1506 static void
1507 s390_option_override (void)
1508 {
1509 /* Set up function hooks. */
1510 init_machine_status = s390_init_machine_status;
1511
1512 /* Architecture mode defaults according to ABI. */
1513 if (!(target_flags_explicit & MASK_ZARCH))
1514 {
1515 if (TARGET_64BIT)
1516 target_flags |= MASK_ZARCH;
1517 else
1518 target_flags &= ~MASK_ZARCH;
1519 }
1520
1521 /* Set the march default in case it hasn't been specified on
1522 cmdline. */
1523 if (s390_arch == PROCESSOR_max)
1524 {
1525 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
1526 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
1527 s390_arch_flags = processor_flags_table[(int)s390_arch];
1528 }
1529
1530 /* Determine processor to tune for. */
1531 if (s390_tune == PROCESSOR_max)
1532 {
1533 s390_tune = s390_arch;
1534 s390_tune_flags = s390_arch_flags;
1535 }
1536
1537 /* Sanity checks. */
1538 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
1539 error ("z/Architecture mode not supported on %s", s390_arch_string);
1540 if (TARGET_64BIT && !TARGET_ZARCH)
1541 error ("64-bit ABI not supported in ESA/390 mode");
1542
1543 if (TARGET_HARD_DFP && !TARGET_DFP)
1544 {
1545 if (target_flags_explicit & MASK_HARD_DFP)
1546 {
1547 if (!TARGET_CPU_DFP)
1548 error ("hardware decimal floating point instructions"
1549 " not available on %s", s390_arch_string);
1550 if (!TARGET_ZARCH)
1551 error ("hardware decimal floating point instructions"
1552 " not available in ESA/390 mode");
1553 }
1554 else
1555 target_flags &= ~MASK_HARD_DFP;
1556 }
1557
1558 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
1559 {
1560 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
1561 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
1562
1563 target_flags &= ~MASK_HARD_DFP;
1564 }
1565
1566 /* Set processor cost function. */
1567 switch (s390_tune)
1568 {
1569 case PROCESSOR_2084_Z990:
1570 s390_cost = &z990_cost;
1571 break;
1572 case PROCESSOR_2094_Z9_109:
1573 s390_cost = &z9_109_cost;
1574 break;
1575 case PROCESSOR_2097_Z10:
1576 s390_cost = &z10_cost;
1577 case PROCESSOR_2817_Z196:
1578 s390_cost = &z196_cost;
1579 break;
1580 default:
1581 s390_cost = &z900_cost;
1582 }
1583
1584 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
1585 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
1586 "in combination");
1587
1588 if (s390_stack_size)
1589 {
1590 if (s390_stack_guard >= s390_stack_size)
1591 error ("stack size must be greater than the stack guard value");
1592 else if (s390_stack_size > 1 << 16)
1593 error ("stack size must not be greater than 64k");
1594 }
1595 else if (s390_stack_guard)
1596 error ("-mstack-guard implies use of -mstack-size");
1597
1598 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1599 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1600 target_flags |= MASK_LONG_DOUBLE_128;
1601 #endif
1602
1603 if (s390_tune == PROCESSOR_2097_Z10
1604 || s390_tune == PROCESSOR_2817_Z196)
1605 {
1606 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
1607 global_options.x_param_values,
1608 global_options_set.x_param_values);
1609 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
1610 global_options.x_param_values,
1611 global_options_set.x_param_values);
1612 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
1613 global_options.x_param_values,
1614 global_options_set.x_param_values);
1615 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
1616 global_options.x_param_values,
1617 global_options_set.x_param_values);
1618 }
1619
1620 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
1621 global_options.x_param_values,
1622 global_options_set.x_param_values);
1623 /* values for loop prefetching */
1624 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
1625 global_options.x_param_values,
1626 global_options_set.x_param_values);
1627 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
1628 global_options.x_param_values,
1629 global_options_set.x_param_values);
1630 /* s390 has more than 2 levels and the size is much larger. Since
1631 we are always running virtualized assume that we only get a small
1632 part of the caches above l1. */
1633 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
1634 global_options.x_param_values,
1635 global_options_set.x_param_values);
1636 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
1637 global_options.x_param_values,
1638 global_options_set.x_param_values);
1639 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
1640 global_options.x_param_values,
1641 global_options_set.x_param_values);
1642
1643 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
1644 requires the arch flags to be evaluated already. Since prefetching
1645 is beneficial on s390, we enable it if available. */
1646 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
1647 flag_prefetch_loop_arrays = 1;
1648 }
1649
1650 /* Map for smallest class containing reg regno. */
1651
1652 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1653 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1654 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1655 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1656 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1657 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1658 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1659 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1660 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1661 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1662 ACCESS_REGS, ACCESS_REGS
1663 };
1664
1665 /* Return attribute type of insn. */
1666
1667 static enum attr_type
1668 s390_safe_attr_type (rtx insn)
1669 {
1670 if (recog_memoized (insn) >= 0)
1671 return get_attr_type (insn);
1672 else
1673 return TYPE_NONE;
1674 }
1675
1676 /* Return true if DISP is a valid short displacement. */
1677
1678 static bool
1679 s390_short_displacement (rtx disp)
1680 {
1681 /* No displacement is OK. */
1682 if (!disp)
1683 return true;
1684
1685 /* Without the long displacement facility we don't need to
1686 distingiush between long and short displacement. */
1687 if (!TARGET_LONG_DISPLACEMENT)
1688 return true;
1689
1690 /* Integer displacement in range. */
1691 if (GET_CODE (disp) == CONST_INT)
1692 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1693
1694 /* GOT offset is not OK, the GOT can be large. */
1695 if (GET_CODE (disp) == CONST
1696 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1697 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1698 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1699 return false;
1700
1701 /* All other symbolic constants are literal pool references,
1702 which are OK as the literal pool must be small. */
1703 if (GET_CODE (disp) == CONST)
1704 return true;
1705
1706 return false;
1707 }
1708
1709 /* Decompose a RTL expression ADDR for a memory address into
1710 its components, returned in OUT.
1711
1712 Returns false if ADDR is not a valid memory address, true
1713 otherwise. If OUT is NULL, don't return the components,
1714 but check for validity only.
1715
1716 Note: Only addresses in canonical form are recognized.
1717 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1718 canonical form so that they will be recognized. */
1719
1720 static int
1721 s390_decompose_address (rtx addr, struct s390_address *out)
1722 {
1723 HOST_WIDE_INT offset = 0;
1724 rtx base = NULL_RTX;
1725 rtx indx = NULL_RTX;
1726 rtx disp = NULL_RTX;
1727 rtx orig_disp;
1728 bool pointer = false;
1729 bool base_ptr = false;
1730 bool indx_ptr = false;
1731 bool literal_pool = false;
1732
1733 /* We may need to substitute the literal pool base register into the address
1734 below. However, at this point we do not know which register is going to
1735 be used as base, so we substitute the arg pointer register. This is going
1736 to be treated as holding a pointer below -- it shouldn't be used for any
1737 other purpose. */
1738 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1739
1740 /* Decompose address into base + index + displacement. */
1741
1742 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1743 base = addr;
1744
1745 else if (GET_CODE (addr) == PLUS)
1746 {
1747 rtx op0 = XEXP (addr, 0);
1748 rtx op1 = XEXP (addr, 1);
1749 enum rtx_code code0 = GET_CODE (op0);
1750 enum rtx_code code1 = GET_CODE (op1);
1751
1752 if (code0 == REG || code0 == UNSPEC)
1753 {
1754 if (code1 == REG || code1 == UNSPEC)
1755 {
1756 indx = op0; /* index + base */
1757 base = op1;
1758 }
1759
1760 else
1761 {
1762 base = op0; /* base + displacement */
1763 disp = op1;
1764 }
1765 }
1766
1767 else if (code0 == PLUS)
1768 {
1769 indx = XEXP (op0, 0); /* index + base + disp */
1770 base = XEXP (op0, 1);
1771 disp = op1;
1772 }
1773
1774 else
1775 {
1776 return false;
1777 }
1778 }
1779
1780 else
1781 disp = addr; /* displacement */
1782
1783 /* Extract integer part of displacement. */
1784 orig_disp = disp;
1785 if (disp)
1786 {
1787 if (GET_CODE (disp) == CONST_INT)
1788 {
1789 offset = INTVAL (disp);
1790 disp = NULL_RTX;
1791 }
1792 else if (GET_CODE (disp) == CONST
1793 && GET_CODE (XEXP (disp, 0)) == PLUS
1794 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1795 {
1796 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1797 disp = XEXP (XEXP (disp, 0), 0);
1798 }
1799 }
1800
1801 /* Strip off CONST here to avoid special case tests later. */
1802 if (disp && GET_CODE (disp) == CONST)
1803 disp = XEXP (disp, 0);
1804
1805 /* We can convert literal pool addresses to
1806 displacements by basing them off the base register. */
1807 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1808 {
1809 /* Either base or index must be free to hold the base register. */
1810 if (!base)
1811 base = fake_pool_base, literal_pool = true;
1812 else if (!indx)
1813 indx = fake_pool_base, literal_pool = true;
1814 else
1815 return false;
1816
1817 /* Mark up the displacement. */
1818 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1819 UNSPEC_LTREL_OFFSET);
1820 }
1821
1822 /* Validate base register. */
1823 if (base)
1824 {
1825 if (GET_CODE (base) == UNSPEC)
1826 switch (XINT (base, 1))
1827 {
1828 case UNSPEC_LTREF:
1829 if (!disp)
1830 disp = gen_rtx_UNSPEC (Pmode,
1831 gen_rtvec (1, XVECEXP (base, 0, 0)),
1832 UNSPEC_LTREL_OFFSET);
1833 else
1834 return false;
1835
1836 base = XVECEXP (base, 0, 1);
1837 break;
1838
1839 case UNSPEC_LTREL_BASE:
1840 if (XVECLEN (base, 0) == 1)
1841 base = fake_pool_base, literal_pool = true;
1842 else
1843 base = XVECEXP (base, 0, 1);
1844 break;
1845
1846 default:
1847 return false;
1848 }
1849
1850 if (!REG_P (base)
1851 || (GET_MODE (base) != SImode
1852 && GET_MODE (base) != Pmode))
1853 return false;
1854
1855 if (REGNO (base) == STACK_POINTER_REGNUM
1856 || REGNO (base) == FRAME_POINTER_REGNUM
1857 || ((reload_completed || reload_in_progress)
1858 && frame_pointer_needed
1859 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1860 || REGNO (base) == ARG_POINTER_REGNUM
1861 || (flag_pic
1862 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1863 pointer = base_ptr = true;
1864
1865 if ((reload_completed || reload_in_progress)
1866 && base == cfun->machine->base_reg)
1867 pointer = base_ptr = literal_pool = true;
1868 }
1869
1870 /* Validate index register. */
1871 if (indx)
1872 {
1873 if (GET_CODE (indx) == UNSPEC)
1874 switch (XINT (indx, 1))
1875 {
1876 case UNSPEC_LTREF:
1877 if (!disp)
1878 disp = gen_rtx_UNSPEC (Pmode,
1879 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1880 UNSPEC_LTREL_OFFSET);
1881 else
1882 return false;
1883
1884 indx = XVECEXP (indx, 0, 1);
1885 break;
1886
1887 case UNSPEC_LTREL_BASE:
1888 if (XVECLEN (indx, 0) == 1)
1889 indx = fake_pool_base, literal_pool = true;
1890 else
1891 indx = XVECEXP (indx, 0, 1);
1892 break;
1893
1894 default:
1895 return false;
1896 }
1897
1898 if (!REG_P (indx)
1899 || (GET_MODE (indx) != SImode
1900 && GET_MODE (indx) != Pmode))
1901 return false;
1902
1903 if (REGNO (indx) == STACK_POINTER_REGNUM
1904 || REGNO (indx) == FRAME_POINTER_REGNUM
1905 || ((reload_completed || reload_in_progress)
1906 && frame_pointer_needed
1907 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1908 || REGNO (indx) == ARG_POINTER_REGNUM
1909 || (flag_pic
1910 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1911 pointer = indx_ptr = true;
1912
1913 if ((reload_completed || reload_in_progress)
1914 && indx == cfun->machine->base_reg)
1915 pointer = indx_ptr = literal_pool = true;
1916 }
1917
1918 /* Prefer to use pointer as base, not index. */
1919 if (base && indx && !base_ptr
1920 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1921 {
1922 rtx tmp = base;
1923 base = indx;
1924 indx = tmp;
1925 }
1926
1927 /* Validate displacement. */
1928 if (!disp)
1929 {
1930 /* If virtual registers are involved, the displacement will change later
1931 anyway as the virtual registers get eliminated. This could make a
1932 valid displacement invalid, but it is more likely to make an invalid
1933 displacement valid, because we sometimes access the register save area
1934 via negative offsets to one of those registers.
1935 Thus we don't check the displacement for validity here. If after
1936 elimination the displacement turns out to be invalid after all,
1937 this is fixed up by reload in any case. */
1938 if (base != arg_pointer_rtx
1939 && indx != arg_pointer_rtx
1940 && base != return_address_pointer_rtx
1941 && indx != return_address_pointer_rtx
1942 && base != frame_pointer_rtx
1943 && indx != frame_pointer_rtx
1944 && base != virtual_stack_vars_rtx
1945 && indx != virtual_stack_vars_rtx)
1946 if (!DISP_IN_RANGE (offset))
1947 return false;
1948 }
1949 else
1950 {
1951 /* All the special cases are pointers. */
1952 pointer = true;
1953
1954 /* In the small-PIC case, the linker converts @GOT
1955 and @GOTNTPOFF offsets to possible displacements. */
1956 if (GET_CODE (disp) == UNSPEC
1957 && (XINT (disp, 1) == UNSPEC_GOT
1958 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1959 && flag_pic == 1)
1960 {
1961 ;
1962 }
1963
1964 /* Accept pool label offsets. */
1965 else if (GET_CODE (disp) == UNSPEC
1966 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
1967 ;
1968
1969 /* Accept literal pool references. */
1970 else if (GET_CODE (disp) == UNSPEC
1971 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
1972 {
1973 /* In case CSE pulled a non literal pool reference out of
1974 the pool we have to reject the address. This is
1975 especially important when loading the GOT pointer on non
1976 zarch CPUs. In this case the literal pool contains an lt
1977 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
1978 will most likely exceed the displacement. */
1979 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
1980 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
1981 return false;
1982
1983 orig_disp = gen_rtx_CONST (Pmode, disp);
1984 if (offset)
1985 {
1986 /* If we have an offset, make sure it does not
1987 exceed the size of the constant pool entry. */
1988 rtx sym = XVECEXP (disp, 0, 0);
1989 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
1990 return false;
1991
1992 orig_disp = plus_constant (orig_disp, offset);
1993 }
1994 }
1995
1996 else
1997 return false;
1998 }
1999
2000 if (!base && !indx)
2001 pointer = true;
2002
2003 if (out)
2004 {
2005 out->base = base;
2006 out->indx = indx;
2007 out->disp = orig_disp;
2008 out->pointer = pointer;
2009 out->literal_pool = literal_pool;
2010 }
2011
2012 return true;
2013 }
2014
2015 /* Decompose a RTL expression OP for a shift count into its components,
2016 and return the base register in BASE and the offset in OFFSET.
2017
2018 Return true if OP is a valid shift count, false if not. */
2019
2020 bool
2021 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2022 {
2023 HOST_WIDE_INT off = 0;
2024
2025 /* We can have an integer constant, an address register,
2026 or a sum of the two. */
2027 if (GET_CODE (op) == CONST_INT)
2028 {
2029 off = INTVAL (op);
2030 op = NULL_RTX;
2031 }
2032 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2033 {
2034 off = INTVAL (XEXP (op, 1));
2035 op = XEXP (op, 0);
2036 }
2037 while (op && GET_CODE (op) == SUBREG)
2038 op = SUBREG_REG (op);
2039
2040 if (op && GET_CODE (op) != REG)
2041 return false;
2042
2043 if (offset)
2044 *offset = off;
2045 if (base)
2046 *base = op;
2047
2048 return true;
2049 }
2050
2051
2052 /* Return true if CODE is a valid address without index. */
2053
2054 bool
2055 s390_legitimate_address_without_index_p (rtx op)
2056 {
2057 struct s390_address addr;
2058
2059 if (!s390_decompose_address (XEXP (op, 0), &addr))
2060 return false;
2061 if (addr.indx)
2062 return false;
2063
2064 return true;
2065 }
2066
2067
2068 /* Return true if ADDR is of kind symbol_ref or symbol_ref + const_int
2069 and return these parts in SYMREF and ADDEND. You can pass NULL in
2070 SYMREF and/or ADDEND if you are not interested in these values.
2071 Literal pool references are *not* considered symbol references. */
2072
2073 static bool
2074 s390_symref_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2075 {
2076 HOST_WIDE_INT tmpaddend = 0;
2077
2078 if (GET_CODE (addr) == CONST)
2079 addr = XEXP (addr, 0);
2080
2081 if (GET_CODE (addr) == PLUS)
2082 {
2083 if (GET_CODE (XEXP (addr, 0)) == SYMBOL_REF
2084 && !CONSTANT_POOL_ADDRESS_P (XEXP (addr, 0))
2085 && CONST_INT_P (XEXP (addr, 1)))
2086 {
2087 tmpaddend = INTVAL (XEXP (addr, 1));
2088 addr = XEXP (addr, 0);
2089 }
2090 else
2091 return false;
2092 }
2093 else
2094 if (GET_CODE (addr) != SYMBOL_REF || CONSTANT_POOL_ADDRESS_P (addr))
2095 return false;
2096
2097 if (symref)
2098 *symref = addr;
2099 if (addend)
2100 *addend = tmpaddend;
2101
2102 return true;
2103 }
2104
2105
2106 /* Return true if the address in OP is valid for constraint letter C
2107 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2108 pool MEMs should be accepted. Only the Q, R, S, T constraint
2109 letters are allowed for C. */
2110
2111 static int
2112 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2113 {
2114 struct s390_address addr;
2115 bool decomposed = false;
2116
2117 /* This check makes sure that no symbolic address (except literal
2118 pool references) are accepted by the R or T constraints. */
2119 if (s390_symref_operand_p (op, NULL, NULL))
2120 return 0;
2121
2122 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2123 if (!lit_pool_ok)
2124 {
2125 if (!s390_decompose_address (op, &addr))
2126 return 0;
2127 if (addr.literal_pool)
2128 return 0;
2129 decomposed = true;
2130 }
2131
2132 switch (c)
2133 {
2134 case 'Q': /* no index short displacement */
2135 if (!decomposed && !s390_decompose_address (op, &addr))
2136 return 0;
2137 if (addr.indx)
2138 return 0;
2139 if (!s390_short_displacement (addr.disp))
2140 return 0;
2141 break;
2142
2143 case 'R': /* with index short displacement */
2144 if (TARGET_LONG_DISPLACEMENT)
2145 {
2146 if (!decomposed && !s390_decompose_address (op, &addr))
2147 return 0;
2148 if (!s390_short_displacement (addr.disp))
2149 return 0;
2150 }
2151 /* Any invalid address here will be fixed up by reload,
2152 so accept it for the most generic constraint. */
2153 break;
2154
2155 case 'S': /* no index long displacement */
2156 if (!TARGET_LONG_DISPLACEMENT)
2157 return 0;
2158 if (!decomposed && !s390_decompose_address (op, &addr))
2159 return 0;
2160 if (addr.indx)
2161 return 0;
2162 if (s390_short_displacement (addr.disp))
2163 return 0;
2164 break;
2165
2166 case 'T': /* with index long displacement */
2167 if (!TARGET_LONG_DISPLACEMENT)
2168 return 0;
2169 /* Any invalid address here will be fixed up by reload,
2170 so accept it for the most generic constraint. */
2171 if ((decomposed || s390_decompose_address (op, &addr))
2172 && s390_short_displacement (addr.disp))
2173 return 0;
2174 break;
2175 default:
2176 return 0;
2177 }
2178 return 1;
2179 }
2180
2181
2182 /* Evaluates constraint strings described by the regular expression
2183 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2184 the constraint given in STR, or 0 else. */
2185
2186 int
2187 s390_mem_constraint (const char *str, rtx op)
2188 {
2189 char c = str[0];
2190
2191 switch (c)
2192 {
2193 case 'A':
2194 /* Check for offsettable variants of memory constraints. */
2195 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2196 return 0;
2197 if ((reload_completed || reload_in_progress)
2198 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2199 return 0;
2200 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2201 case 'B':
2202 /* Check for non-literal-pool variants of memory constraints. */
2203 if (!MEM_P (op))
2204 return 0;
2205 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2206 case 'Q':
2207 case 'R':
2208 case 'S':
2209 case 'T':
2210 if (GET_CODE (op) != MEM)
2211 return 0;
2212 return s390_check_qrst_address (c, XEXP (op, 0), true);
2213 case 'U':
2214 return (s390_check_qrst_address ('Q', op, true)
2215 || s390_check_qrst_address ('R', op, true));
2216 case 'W':
2217 return (s390_check_qrst_address ('S', op, true)
2218 || s390_check_qrst_address ('T', op, true));
2219 case 'Y':
2220 /* Simply check for the basic form of a shift count. Reload will
2221 take care of making sure we have a proper base register. */
2222 if (!s390_decompose_shift_count (op, NULL, NULL))
2223 return 0;
2224 break;
2225 case 'Z':
2226 return s390_check_qrst_address (str[1], op, true);
2227 default:
2228 return 0;
2229 }
2230 return 1;
2231 }
2232
2233
2234 /* Evaluates constraint strings starting with letter O. Input
2235 parameter C is the second letter following the "O" in the constraint
2236 string. Returns 1 if VALUE meets the respective constraint and 0
2237 otherwise. */
2238
2239 int
2240 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2241 {
2242 if (!TARGET_EXTIMM)
2243 return 0;
2244
2245 switch (c)
2246 {
2247 case 's':
2248 return trunc_int_for_mode (value, SImode) == value;
2249
2250 case 'p':
2251 return value == 0
2252 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2253
2254 case 'n':
2255 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2256
2257 default:
2258 gcc_unreachable ();
2259 }
2260 }
2261
2262
2263 /* Evaluates constraint strings starting with letter N. Parameter STR
2264 contains the letters following letter "N" in the constraint string.
2265 Returns true if VALUE matches the constraint. */
2266
2267 int
2268 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2269 {
2270 enum machine_mode mode, part_mode;
2271 int def;
2272 int part, part_goal;
2273
2274
2275 if (str[0] == 'x')
2276 part_goal = -1;
2277 else
2278 part_goal = str[0] - '0';
2279
2280 switch (str[1])
2281 {
2282 case 'Q':
2283 part_mode = QImode;
2284 break;
2285 case 'H':
2286 part_mode = HImode;
2287 break;
2288 case 'S':
2289 part_mode = SImode;
2290 break;
2291 default:
2292 return 0;
2293 }
2294
2295 switch (str[2])
2296 {
2297 case 'H':
2298 mode = HImode;
2299 break;
2300 case 'S':
2301 mode = SImode;
2302 break;
2303 case 'D':
2304 mode = DImode;
2305 break;
2306 default:
2307 return 0;
2308 }
2309
2310 switch (str[3])
2311 {
2312 case '0':
2313 def = 0;
2314 break;
2315 case 'F':
2316 def = -1;
2317 break;
2318 default:
2319 return 0;
2320 }
2321
2322 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2323 return 0;
2324
2325 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2326 if (part < 0)
2327 return 0;
2328 if (part_goal != -1 && part_goal != part)
2329 return 0;
2330
2331 return 1;
2332 }
2333
2334
2335 /* Returns true if the input parameter VALUE is a float zero. */
2336
2337 int
2338 s390_float_const_zero_p (rtx value)
2339 {
2340 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2341 && value == CONST0_RTX (GET_MODE (value)));
2342 }
2343
2344 /* Implement TARGET_REGISTER_MOVE_COST. */
2345
2346 static int
2347 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2348 reg_class_t from, reg_class_t to)
2349 {
2350 /* On s390, copy between fprs and gprs is expensive. */
2351 if ((reg_classes_intersect_p (from, GENERAL_REGS)
2352 && reg_classes_intersect_p (to, FP_REGS))
2353 || (reg_classes_intersect_p (from, FP_REGS)
2354 && reg_classes_intersect_p (to, GENERAL_REGS)))
2355 return 10;
2356
2357 return 1;
2358 }
2359
2360 /* Implement TARGET_MEMORY_MOVE_COST. */
2361
2362 static int
2363 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2364 reg_class_t rclass ATTRIBUTE_UNUSED,
2365 bool in ATTRIBUTE_UNUSED)
2366 {
2367 return 1;
2368 }
2369
2370 /* Compute a (partial) cost for rtx X. Return true if the complete
2371 cost has been computed, and false if subexpressions should be
2372 scanned. In either case, *TOTAL contains the cost result.
2373 CODE contains GET_CODE (x), OUTER_CODE contains the code
2374 of the superexpression of x. */
2375
2376 static bool
2377 s390_rtx_costs (rtx x, int code, int outer_code, int *total,
2378 bool speed ATTRIBUTE_UNUSED)
2379 {
2380 switch (code)
2381 {
2382 case CONST:
2383 case CONST_INT:
2384 case LABEL_REF:
2385 case SYMBOL_REF:
2386 case CONST_DOUBLE:
2387 case MEM:
2388 *total = 0;
2389 return true;
2390
2391 case ASHIFT:
2392 case ASHIFTRT:
2393 case LSHIFTRT:
2394 case ROTATE:
2395 case ROTATERT:
2396 case AND:
2397 case IOR:
2398 case XOR:
2399 case NEG:
2400 case NOT:
2401 *total = COSTS_N_INSNS (1);
2402 return false;
2403
2404 case PLUS:
2405 case MINUS:
2406 *total = COSTS_N_INSNS (1);
2407 return false;
2408
2409 case MULT:
2410 switch (GET_MODE (x))
2411 {
2412 case SImode:
2413 {
2414 rtx left = XEXP (x, 0);
2415 rtx right = XEXP (x, 1);
2416 if (GET_CODE (right) == CONST_INT
2417 && CONST_OK_FOR_K (INTVAL (right)))
2418 *total = s390_cost->mhi;
2419 else if (GET_CODE (left) == SIGN_EXTEND)
2420 *total = s390_cost->mh;
2421 else
2422 *total = s390_cost->ms; /* msr, ms, msy */
2423 break;
2424 }
2425 case DImode:
2426 {
2427 rtx left = XEXP (x, 0);
2428 rtx right = XEXP (x, 1);
2429 if (TARGET_ZARCH)
2430 {
2431 if (GET_CODE (right) == CONST_INT
2432 && CONST_OK_FOR_K (INTVAL (right)))
2433 *total = s390_cost->mghi;
2434 else if (GET_CODE (left) == SIGN_EXTEND)
2435 *total = s390_cost->msgf;
2436 else
2437 *total = s390_cost->msg; /* msgr, msg */
2438 }
2439 else /* TARGET_31BIT */
2440 {
2441 if (GET_CODE (left) == SIGN_EXTEND
2442 && GET_CODE (right) == SIGN_EXTEND)
2443 /* mulsidi case: mr, m */
2444 *total = s390_cost->m;
2445 else if (GET_CODE (left) == ZERO_EXTEND
2446 && GET_CODE (right) == ZERO_EXTEND
2447 && TARGET_CPU_ZARCH)
2448 /* umulsidi case: ml, mlr */
2449 *total = s390_cost->ml;
2450 else
2451 /* Complex calculation is required. */
2452 *total = COSTS_N_INSNS (40);
2453 }
2454 break;
2455 }
2456 case SFmode:
2457 case DFmode:
2458 *total = s390_cost->mult_df;
2459 break;
2460 case TFmode:
2461 *total = s390_cost->mxbr;
2462 break;
2463 default:
2464 return false;
2465 }
2466 return false;
2467
2468 case FMA:
2469 switch (GET_MODE (x))
2470 {
2471 case DFmode:
2472 *total = s390_cost->madbr;
2473 break;
2474 case SFmode:
2475 *total = s390_cost->maebr;
2476 break;
2477 default:
2478 return false;
2479 }
2480 /* Negate in the third argument is free: FMSUB. */
2481 if (GET_CODE (XEXP (x, 2)) == NEG)
2482 {
2483 *total += (rtx_cost (XEXP (x, 0), FMA, speed)
2484 + rtx_cost (XEXP (x, 1), FMA, speed)
2485 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, speed));
2486 return true;
2487 }
2488 return false;
2489
2490 case UDIV:
2491 case UMOD:
2492 if (GET_MODE (x) == TImode) /* 128 bit division */
2493 *total = s390_cost->dlgr;
2494 else if (GET_MODE (x) == DImode)
2495 {
2496 rtx right = XEXP (x, 1);
2497 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2498 *total = s390_cost->dlr;
2499 else /* 64 by 64 bit division */
2500 *total = s390_cost->dlgr;
2501 }
2502 else if (GET_MODE (x) == SImode) /* 32 bit division */
2503 *total = s390_cost->dlr;
2504 return false;
2505
2506 case DIV:
2507 case MOD:
2508 if (GET_MODE (x) == DImode)
2509 {
2510 rtx right = XEXP (x, 1);
2511 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2512 if (TARGET_ZARCH)
2513 *total = s390_cost->dsgfr;
2514 else
2515 *total = s390_cost->dr;
2516 else /* 64 by 64 bit division */
2517 *total = s390_cost->dsgr;
2518 }
2519 else if (GET_MODE (x) == SImode) /* 32 bit division */
2520 *total = s390_cost->dlr;
2521 else if (GET_MODE (x) == SFmode)
2522 {
2523 *total = s390_cost->debr;
2524 }
2525 else if (GET_MODE (x) == DFmode)
2526 {
2527 *total = s390_cost->ddbr;
2528 }
2529 else if (GET_MODE (x) == TFmode)
2530 {
2531 *total = s390_cost->dxbr;
2532 }
2533 return false;
2534
2535 case SQRT:
2536 if (GET_MODE (x) == SFmode)
2537 *total = s390_cost->sqebr;
2538 else if (GET_MODE (x) == DFmode)
2539 *total = s390_cost->sqdbr;
2540 else /* TFmode */
2541 *total = s390_cost->sqxbr;
2542 return false;
2543
2544 case SIGN_EXTEND:
2545 case ZERO_EXTEND:
2546 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2547 || outer_code == PLUS || outer_code == MINUS
2548 || outer_code == COMPARE)
2549 *total = 0;
2550 return false;
2551
2552 case COMPARE:
2553 *total = COSTS_N_INSNS (1);
2554 if (GET_CODE (XEXP (x, 0)) == AND
2555 && GET_CODE (XEXP (x, 1)) == CONST_INT
2556 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2557 {
2558 rtx op0 = XEXP (XEXP (x, 0), 0);
2559 rtx op1 = XEXP (XEXP (x, 0), 1);
2560 rtx op2 = XEXP (x, 1);
2561
2562 if (memory_operand (op0, GET_MODE (op0))
2563 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2564 return true;
2565 if (register_operand (op0, GET_MODE (op0))
2566 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2567 return true;
2568 }
2569 return false;
2570
2571 default:
2572 return false;
2573 }
2574 }
2575
2576 /* Return the cost of an address rtx ADDR. */
2577
2578 static int
2579 s390_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2580 {
2581 struct s390_address ad;
2582 if (!s390_decompose_address (addr, &ad))
2583 return 1000;
2584
2585 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2586 }
2587
2588 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2589 otherwise return 0. */
2590
2591 int
2592 tls_symbolic_operand (rtx op)
2593 {
2594 if (GET_CODE (op) != SYMBOL_REF)
2595 return 0;
2596 return SYMBOL_REF_TLS_MODEL (op);
2597 }
2598 \f
2599 /* Split DImode access register reference REG (on 64-bit) into its constituent
2600 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2601 gen_highpart cannot be used as they assume all registers are word-sized,
2602 while our access registers have only half that size. */
2603
2604 void
2605 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2606 {
2607 gcc_assert (TARGET_64BIT);
2608 gcc_assert (ACCESS_REG_P (reg));
2609 gcc_assert (GET_MODE (reg) == DImode);
2610 gcc_assert (!(REGNO (reg) & 1));
2611
2612 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2613 *hi = gen_rtx_REG (SImode, REGNO (reg));
2614 }
2615
2616 /* Return true if OP contains a symbol reference */
2617
2618 bool
2619 symbolic_reference_mentioned_p (rtx op)
2620 {
2621 const char *fmt;
2622 int i;
2623
2624 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2625 return 1;
2626
2627 fmt = GET_RTX_FORMAT (GET_CODE (op));
2628 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2629 {
2630 if (fmt[i] == 'E')
2631 {
2632 int j;
2633
2634 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2635 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2636 return 1;
2637 }
2638
2639 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2640 return 1;
2641 }
2642
2643 return 0;
2644 }
2645
2646 /* Return true if OP contains a reference to a thread-local symbol. */
2647
2648 bool
2649 tls_symbolic_reference_mentioned_p (rtx op)
2650 {
2651 const char *fmt;
2652 int i;
2653
2654 if (GET_CODE (op) == SYMBOL_REF)
2655 return tls_symbolic_operand (op);
2656
2657 fmt = GET_RTX_FORMAT (GET_CODE (op));
2658 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2659 {
2660 if (fmt[i] == 'E')
2661 {
2662 int j;
2663
2664 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2665 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2666 return true;
2667 }
2668
2669 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2670 return true;
2671 }
2672
2673 return false;
2674 }
2675
2676
2677 /* Return true if OP is a legitimate general operand when
2678 generating PIC code. It is given that flag_pic is on
2679 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2680
2681 int
2682 legitimate_pic_operand_p (rtx op)
2683 {
2684 /* Accept all non-symbolic constants. */
2685 if (!SYMBOLIC_CONST (op))
2686 return 1;
2687
2688 /* Reject everything else; must be handled
2689 via emit_symbolic_move. */
2690 return 0;
2691 }
2692
2693 /* Returns true if the constant value OP is a legitimate general operand.
2694 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2695
2696 static bool
2697 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2698 {
2699 /* Accept all non-symbolic constants. */
2700 if (!SYMBOLIC_CONST (op))
2701 return 1;
2702
2703 /* Accept immediate LARL operands. */
2704 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2705 return 1;
2706
2707 /* Thread-local symbols are never legal constants. This is
2708 so that emit_call knows that computing such addresses
2709 might require a function call. */
2710 if (TLS_SYMBOLIC_CONST (op))
2711 return 0;
2712
2713 /* In the PIC case, symbolic constants must *not* be
2714 forced into the literal pool. We accept them here,
2715 so that they will be handled by emit_symbolic_move. */
2716 if (flag_pic)
2717 return 1;
2718
2719 /* All remaining non-PIC symbolic constants are
2720 forced into the literal pool. */
2721 return 0;
2722 }
2723
2724 /* Determine if it's legal to put X into the constant pool. This
2725 is not possible if X contains the address of a symbol that is
2726 not constant (TLS) or not known at final link time (PIC). */
2727
2728 static bool
2729 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2730 {
2731 switch (GET_CODE (x))
2732 {
2733 case CONST_INT:
2734 case CONST_DOUBLE:
2735 /* Accept all non-symbolic constants. */
2736 return false;
2737
2738 case LABEL_REF:
2739 /* Labels are OK iff we are non-PIC. */
2740 return flag_pic != 0;
2741
2742 case SYMBOL_REF:
2743 /* 'Naked' TLS symbol references are never OK,
2744 non-TLS symbols are OK iff we are non-PIC. */
2745 if (tls_symbolic_operand (x))
2746 return true;
2747 else
2748 return flag_pic != 0;
2749
2750 case CONST:
2751 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2752 case PLUS:
2753 case MINUS:
2754 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2755 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2756
2757 case UNSPEC:
2758 switch (XINT (x, 1))
2759 {
2760 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2761 case UNSPEC_LTREL_OFFSET:
2762 case UNSPEC_GOT:
2763 case UNSPEC_GOTOFF:
2764 case UNSPEC_PLTOFF:
2765 case UNSPEC_TLSGD:
2766 case UNSPEC_TLSLDM:
2767 case UNSPEC_NTPOFF:
2768 case UNSPEC_DTPOFF:
2769 case UNSPEC_GOTNTPOFF:
2770 case UNSPEC_INDNTPOFF:
2771 return false;
2772
2773 /* If the literal pool shares the code section, be put
2774 execute template placeholders into the pool as well. */
2775 case UNSPEC_INSN:
2776 return TARGET_CPU_ZARCH;
2777
2778 default:
2779 return true;
2780 }
2781 break;
2782
2783 default:
2784 gcc_unreachable ();
2785 }
2786 }
2787
2788 /* Returns true if the constant value OP is a legitimate general
2789 operand during and after reload. The difference to
2790 legitimate_constant_p is that this function will not accept
2791 a constant that would need to be forced to the literal pool
2792 before it can be used as operand.
2793 This function accepts all constants which can be loaded directly
2794 into a GPR. */
2795
2796 bool
2797 legitimate_reload_constant_p (rtx op)
2798 {
2799 /* Accept la(y) operands. */
2800 if (GET_CODE (op) == CONST_INT
2801 && DISP_IN_RANGE (INTVAL (op)))
2802 return true;
2803
2804 /* Accept l(g)hi/l(g)fi operands. */
2805 if (GET_CODE (op) == CONST_INT
2806 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2807 return true;
2808
2809 /* Accept lliXX operands. */
2810 if (TARGET_ZARCH
2811 && GET_CODE (op) == CONST_INT
2812 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2813 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2814 return true;
2815
2816 if (TARGET_EXTIMM
2817 && GET_CODE (op) == CONST_INT
2818 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2819 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2820 return true;
2821
2822 /* Accept larl operands. */
2823 if (TARGET_CPU_ZARCH
2824 && larl_operand (op, VOIDmode))
2825 return true;
2826
2827 /* Accept floating-point zero operands that fit into a single GPR. */
2828 if (GET_CODE (op) == CONST_DOUBLE
2829 && s390_float_const_zero_p (op)
2830 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2831 return true;
2832
2833 /* Accept double-word operands that can be split. */
2834 if (GET_CODE (op) == CONST_INT
2835 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2836 {
2837 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2838 rtx hi = operand_subword (op, 0, 0, dword_mode);
2839 rtx lo = operand_subword (op, 1, 0, dword_mode);
2840 return legitimate_reload_constant_p (hi)
2841 && legitimate_reload_constant_p (lo);
2842 }
2843
2844 /* Everything else cannot be handled without reload. */
2845 return false;
2846 }
2847
2848 /* Returns true if the constant value OP is a legitimate fp operand
2849 during and after reload.
2850 This function accepts all constants which can be loaded directly
2851 into an FPR. */
2852
2853 static bool
2854 legitimate_reload_fp_constant_p (rtx op)
2855 {
2856 /* Accept floating-point zero operands if the load zero instruction
2857 can be used. */
2858 if (TARGET_Z196
2859 && GET_CODE (op) == CONST_DOUBLE
2860 && s390_float_const_zero_p (op))
2861 return true;
2862
2863 return false;
2864 }
2865
2866 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2867 return the class of reg to actually use. */
2868
2869 static reg_class_t
2870 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2871 {
2872 switch (GET_CODE (op))
2873 {
2874 /* Constants we cannot reload into general registers
2875 must be forced into the literal pool. */
2876 case CONST_DOUBLE:
2877 case CONST_INT:
2878 if (reg_class_subset_p (GENERAL_REGS, rclass)
2879 && legitimate_reload_constant_p (op))
2880 return GENERAL_REGS;
2881 else if (reg_class_subset_p (ADDR_REGS, rclass)
2882 && legitimate_reload_constant_p (op))
2883 return ADDR_REGS;
2884 else if (reg_class_subset_p (FP_REGS, rclass)
2885 && legitimate_reload_fp_constant_p (op))
2886 return FP_REGS;
2887 return NO_REGS;
2888
2889 /* If a symbolic constant or a PLUS is reloaded,
2890 it is most likely being used as an address, so
2891 prefer ADDR_REGS. If 'class' is not a superset
2892 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2893 case LABEL_REF:
2894 case SYMBOL_REF:
2895 case CONST:
2896 if (!legitimate_reload_constant_p (op))
2897 return NO_REGS;
2898 /* fallthrough */
2899 case PLUS:
2900 /* load address will be used. */
2901 if (reg_class_subset_p (ADDR_REGS, rclass))
2902 return ADDR_REGS;
2903 else
2904 return NO_REGS;
2905
2906 default:
2907 break;
2908 }
2909
2910 return rclass;
2911 }
2912
2913 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2914 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2915 aligned. */
2916
2917 bool
2918 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2919 {
2920 HOST_WIDE_INT addend;
2921 rtx symref;
2922
2923 if (!s390_symref_operand_p (addr, &symref, &addend))
2924 return false;
2925
2926 return (!SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref)
2927 && !(addend & (alignment - 1)));
2928 }
2929
2930 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
2931 operand SCRATCH is used to reload the even part of the address and
2932 adding one. */
2933
2934 void
2935 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
2936 {
2937 HOST_WIDE_INT addend;
2938 rtx symref;
2939
2940 if (!s390_symref_operand_p (addr, &symref, &addend))
2941 gcc_unreachable ();
2942
2943 if (!(addend & 1))
2944 /* Easy case. The addend is even so larl will do fine. */
2945 emit_move_insn (reg, addr);
2946 else
2947 {
2948 /* We can leave the scratch register untouched if the target
2949 register is a valid base register. */
2950 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
2951 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
2952 scratch = reg;
2953
2954 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
2955 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
2956
2957 if (addend != 1)
2958 emit_move_insn (scratch,
2959 gen_rtx_CONST (Pmode,
2960 gen_rtx_PLUS (Pmode, symref,
2961 GEN_INT (addend - 1))));
2962 else
2963 emit_move_insn (scratch, symref);
2964
2965 /* Increment the address using la in order to avoid clobbering cc. */
2966 emit_move_insn (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
2967 }
2968 }
2969
2970 /* Generate what is necessary to move between REG and MEM using
2971 SCRATCH. The direction is given by TOMEM. */
2972
2973 void
2974 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
2975 {
2976 /* Reload might have pulled a constant out of the literal pool.
2977 Force it back in. */
2978 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
2979 || GET_CODE (mem) == CONST)
2980 mem = force_const_mem (GET_MODE (reg), mem);
2981
2982 gcc_assert (MEM_P (mem));
2983
2984 /* For a load from memory we can leave the scratch register
2985 untouched if the target register is a valid base register. */
2986 if (!tomem
2987 && REGNO (reg) < FIRST_PSEUDO_REGISTER
2988 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
2989 && GET_MODE (reg) == GET_MODE (scratch))
2990 scratch = reg;
2991
2992 /* Load address into scratch register. Since we can't have a
2993 secondary reload for a secondary reload we have to cover the case
2994 where larl would need a secondary reload here as well. */
2995 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
2996
2997 /* Now we can use a standard load/store to do the move. */
2998 if (tomem)
2999 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3000 else
3001 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3002 }
3003
3004 /* Inform reload about cases where moving X with a mode MODE to a register in
3005 RCLASS requires an extra scratch or immediate register. Return the class
3006 needed for the immediate register. */
3007
3008 static reg_class_t
3009 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3010 enum machine_mode mode, secondary_reload_info *sri)
3011 {
3012 enum reg_class rclass = (enum reg_class) rclass_i;
3013
3014 /* Intermediate register needed. */
3015 if (reg_classes_intersect_p (CC_REGS, rclass))
3016 return GENERAL_REGS;
3017
3018 if (TARGET_Z10)
3019 {
3020 HOST_WIDE_INT offset;
3021 rtx symref;
3022
3023 /* On z10 several optimizer steps may generate larl operands with
3024 an odd addend. */
3025 if (in_p
3026 && s390_symref_operand_p (x, &symref, &offset)
3027 && mode == Pmode
3028 && !SYMBOL_REF_ALIGN1_P (symref)
3029 && (offset & 1) == 1)
3030 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3031 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3032
3033 /* On z10 we need a scratch register when moving QI, TI or floating
3034 point mode values from or to a memory location with a SYMBOL_REF
3035 or if the symref addend of a SI or DI move is not aligned to the
3036 width of the access. */
3037 if (MEM_P (x)
3038 && s390_symref_operand_p (XEXP (x, 0), NULL, NULL)
3039 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3040 || (!TARGET_ZARCH && mode == DImode)
3041 || ((mode == HImode || mode == SImode || mode == DImode)
3042 && (!s390_check_symref_alignment (XEXP (x, 0),
3043 GET_MODE_SIZE (mode))))))
3044 {
3045 #define __SECONDARY_RELOAD_CASE(M,m) \
3046 case M##mode: \
3047 if (TARGET_64BIT) \
3048 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3049 CODE_FOR_reload##m##di_tomem_z10; \
3050 else \
3051 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3052 CODE_FOR_reload##m##si_tomem_z10; \
3053 break;
3054
3055 switch (GET_MODE (x))
3056 {
3057 __SECONDARY_RELOAD_CASE (QI, qi);
3058 __SECONDARY_RELOAD_CASE (HI, hi);
3059 __SECONDARY_RELOAD_CASE (SI, si);
3060 __SECONDARY_RELOAD_CASE (DI, di);
3061 __SECONDARY_RELOAD_CASE (TI, ti);
3062 __SECONDARY_RELOAD_CASE (SF, sf);
3063 __SECONDARY_RELOAD_CASE (DF, df);
3064 __SECONDARY_RELOAD_CASE (TF, tf);
3065 __SECONDARY_RELOAD_CASE (SD, sd);
3066 __SECONDARY_RELOAD_CASE (DD, dd);
3067 __SECONDARY_RELOAD_CASE (TD, td);
3068
3069 default:
3070 gcc_unreachable ();
3071 }
3072 #undef __SECONDARY_RELOAD_CASE
3073 }
3074 }
3075
3076 /* We need a scratch register when loading a PLUS expression which
3077 is not a legitimate operand of the LOAD ADDRESS instruction. */
3078 if (in_p && s390_plus_operand (x, mode))
3079 sri->icode = (TARGET_64BIT ?
3080 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3081
3082 /* Performing a multiword move from or to memory we have to make sure the
3083 second chunk in memory is addressable without causing a displacement
3084 overflow. If that would be the case we calculate the address in
3085 a scratch register. */
3086 if (MEM_P (x)
3087 && GET_CODE (XEXP (x, 0)) == PLUS
3088 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3089 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3090 + GET_MODE_SIZE (mode) - 1))
3091 {
3092 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3093 in a s_operand address since we may fallback to lm/stm. So we only
3094 have to care about overflows in the b+i+d case. */
3095 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3096 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3097 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3098 /* For FP_REGS no lm/stm is available so this check is triggered
3099 for displacement overflows in b+i+d and b+d like addresses. */
3100 || (reg_classes_intersect_p (FP_REGS, rclass)
3101 && s390_class_max_nregs (FP_REGS, mode) > 1))
3102 {
3103 if (in_p)
3104 sri->icode = (TARGET_64BIT ?
3105 CODE_FOR_reloaddi_nonoffmem_in :
3106 CODE_FOR_reloadsi_nonoffmem_in);
3107 else
3108 sri->icode = (TARGET_64BIT ?
3109 CODE_FOR_reloaddi_nonoffmem_out :
3110 CODE_FOR_reloadsi_nonoffmem_out);
3111 }
3112 }
3113
3114 /* A scratch address register is needed when a symbolic constant is
3115 copied to r0 compiling with -fPIC. In other cases the target
3116 register might be used as temporary (see legitimize_pic_address). */
3117 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3118 sri->icode = (TARGET_64BIT ?
3119 CODE_FOR_reloaddi_PIC_addr :
3120 CODE_FOR_reloadsi_PIC_addr);
3121
3122 /* Either scratch or no register needed. */
3123 return NO_REGS;
3124 }
3125
3126 /* Generate code to load SRC, which is PLUS that is not a
3127 legitimate operand for the LA instruction, into TARGET.
3128 SCRATCH may be used as scratch register. */
3129
3130 void
3131 s390_expand_plus_operand (rtx target, rtx src,
3132 rtx scratch)
3133 {
3134 rtx sum1, sum2;
3135 struct s390_address ad;
3136
3137 /* src must be a PLUS; get its two operands. */
3138 gcc_assert (GET_CODE (src) == PLUS);
3139 gcc_assert (GET_MODE (src) == Pmode);
3140
3141 /* Check if any of the two operands is already scheduled
3142 for replacement by reload. This can happen e.g. when
3143 float registers occur in an address. */
3144 sum1 = find_replacement (&XEXP (src, 0));
3145 sum2 = find_replacement (&XEXP (src, 1));
3146 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3147
3148 /* If the address is already strictly valid, there's nothing to do. */
3149 if (!s390_decompose_address (src, &ad)
3150 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3151 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3152 {
3153 /* Otherwise, one of the operands cannot be an address register;
3154 we reload its value into the scratch register. */
3155 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3156 {
3157 emit_move_insn (scratch, sum1);
3158 sum1 = scratch;
3159 }
3160 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3161 {
3162 emit_move_insn (scratch, sum2);
3163 sum2 = scratch;
3164 }
3165
3166 /* According to the way these invalid addresses are generated
3167 in reload.c, it should never happen (at least on s390) that
3168 *neither* of the PLUS components, after find_replacements
3169 was applied, is an address register. */
3170 if (sum1 == scratch && sum2 == scratch)
3171 {
3172 debug_rtx (src);
3173 gcc_unreachable ();
3174 }
3175
3176 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3177 }
3178
3179 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3180 is only ever performed on addresses, so we can mark the
3181 sum as legitimate for LA in any case. */
3182 s390_load_address (target, src);
3183 }
3184
3185
3186 /* Return true if ADDR is a valid memory address.
3187 STRICT specifies whether strict register checking applies. */
3188
3189 static bool
3190 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3191 {
3192 struct s390_address ad;
3193
3194 if (TARGET_Z10
3195 && larl_operand (addr, VOIDmode)
3196 && (mode == VOIDmode
3197 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3198 return true;
3199
3200 if (!s390_decompose_address (addr, &ad))
3201 return false;
3202
3203 if (strict)
3204 {
3205 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3206 return false;
3207
3208 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3209 return false;
3210 }
3211 else
3212 {
3213 if (ad.base
3214 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3215 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3216 return false;
3217
3218 if (ad.indx
3219 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3220 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3221 return false;
3222 }
3223 return true;
3224 }
3225
3226 /* Return true if OP is a valid operand for the LA instruction.
3227 In 31-bit, we need to prove that the result is used as an
3228 address, as LA performs only a 31-bit addition. */
3229
3230 bool
3231 legitimate_la_operand_p (rtx op)
3232 {
3233 struct s390_address addr;
3234 if (!s390_decompose_address (op, &addr))
3235 return false;
3236
3237 return (TARGET_64BIT || addr.pointer);
3238 }
3239
3240 /* Return true if it is valid *and* preferable to use LA to
3241 compute the sum of OP1 and OP2. */
3242
3243 bool
3244 preferred_la_operand_p (rtx op1, rtx op2)
3245 {
3246 struct s390_address addr;
3247
3248 if (op2 != const0_rtx)
3249 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3250
3251 if (!s390_decompose_address (op1, &addr))
3252 return false;
3253 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3254 return false;
3255 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3256 return false;
3257
3258 /* Avoid LA instructions with index register on z196; it is
3259 preferable to use regular add instructions when possible. */
3260 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3261 return false;
3262
3263 if (!TARGET_64BIT && !addr.pointer)
3264 return false;
3265
3266 if (addr.pointer)
3267 return true;
3268
3269 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3270 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3271 return true;
3272
3273 return false;
3274 }
3275
3276 /* Emit a forced load-address operation to load SRC into DST.
3277 This will use the LOAD ADDRESS instruction even in situations
3278 where legitimate_la_operand_p (SRC) returns false. */
3279
3280 void
3281 s390_load_address (rtx dst, rtx src)
3282 {
3283 if (TARGET_64BIT)
3284 emit_move_insn (dst, src);
3285 else
3286 emit_insn (gen_force_la_31 (dst, src));
3287 }
3288
3289 /* Return a legitimate reference for ORIG (an address) using the
3290 register REG. If REG is 0, a new pseudo is generated.
3291
3292 There are two types of references that must be handled:
3293
3294 1. Global data references must load the address from the GOT, via
3295 the PIC reg. An insn is emitted to do this load, and the reg is
3296 returned.
3297
3298 2. Static data references, constant pool addresses, and code labels
3299 compute the address as an offset from the GOT, whose base is in
3300 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3301 differentiate them from global data objects. The returned
3302 address is the PIC reg + an unspec constant.
3303
3304 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3305 reg also appears in the address. */
3306
3307 rtx
3308 legitimize_pic_address (rtx orig, rtx reg)
3309 {
3310 rtx addr = orig;
3311 rtx new_rtx = orig;
3312 rtx base;
3313
3314 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3315
3316 if (GET_CODE (addr) == LABEL_REF
3317 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr)))
3318 {
3319 /* This is a local symbol. */
3320 if (TARGET_CPU_ZARCH && larl_operand (addr, VOIDmode))
3321 {
3322 /* Access local symbols PC-relative via LARL.
3323 This is the same as in the non-PIC case, so it is
3324 handled automatically ... */
3325 }
3326 else
3327 {
3328 /* Access local symbols relative to the GOT. */
3329
3330 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3331
3332 if (reload_in_progress || reload_completed)
3333 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3334
3335 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3336 addr = gen_rtx_CONST (Pmode, addr);
3337 addr = force_const_mem (Pmode, addr);
3338 emit_move_insn (temp, addr);
3339
3340 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3341 if (reg != 0)
3342 {
3343 s390_load_address (reg, new_rtx);
3344 new_rtx = reg;
3345 }
3346 }
3347 }
3348 else if (GET_CODE (addr) == SYMBOL_REF)
3349 {
3350 if (reg == 0)
3351 reg = gen_reg_rtx (Pmode);
3352
3353 if (flag_pic == 1)
3354 {
3355 /* Assume GOT offset < 4k. This is handled the same way
3356 in both 31- and 64-bit code (@GOT). */
3357
3358 if (reload_in_progress || reload_completed)
3359 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3360
3361 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3362 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3363 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3364 new_rtx = gen_const_mem (Pmode, new_rtx);
3365 emit_move_insn (reg, new_rtx);
3366 new_rtx = reg;
3367 }
3368 else if (TARGET_CPU_ZARCH)
3369 {
3370 /* If the GOT offset might be >= 4k, we determine the position
3371 of the GOT entry via a PC-relative LARL (@GOTENT). */
3372
3373 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3374
3375 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3376 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3377
3378 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3379 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3380 emit_move_insn (temp, new_rtx);
3381
3382 new_rtx = gen_const_mem (Pmode, temp);
3383 emit_move_insn (reg, new_rtx);
3384 new_rtx = reg;
3385 }
3386 else
3387 {
3388 /* If the GOT offset might be >= 4k, we have to load it
3389 from the literal pool (@GOT). */
3390
3391 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3392
3393 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3394 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3395
3396 if (reload_in_progress || reload_completed)
3397 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3398
3399 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3400 addr = gen_rtx_CONST (Pmode, addr);
3401 addr = force_const_mem (Pmode, addr);
3402 emit_move_insn (temp, addr);
3403
3404 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3405 new_rtx = gen_const_mem (Pmode, new_rtx);
3406 emit_move_insn (reg, new_rtx);
3407 new_rtx = reg;
3408 }
3409 }
3410 else
3411 {
3412 if (GET_CODE (addr) == CONST)
3413 {
3414 addr = XEXP (addr, 0);
3415 if (GET_CODE (addr) == UNSPEC)
3416 {
3417 gcc_assert (XVECLEN (addr, 0) == 1);
3418 switch (XINT (addr, 1))
3419 {
3420 /* If someone moved a GOT-relative UNSPEC
3421 out of the literal pool, force them back in. */
3422 case UNSPEC_GOTOFF:
3423 case UNSPEC_PLTOFF:
3424 new_rtx = force_const_mem (Pmode, orig);
3425 break;
3426
3427 /* @GOT is OK as is if small. */
3428 case UNSPEC_GOT:
3429 if (flag_pic == 2)
3430 new_rtx = force_const_mem (Pmode, orig);
3431 break;
3432
3433 /* @GOTENT is OK as is. */
3434 case UNSPEC_GOTENT:
3435 break;
3436
3437 /* @PLT is OK as is on 64-bit, must be converted to
3438 GOT-relative @PLTOFF on 31-bit. */
3439 case UNSPEC_PLT:
3440 if (!TARGET_CPU_ZARCH)
3441 {
3442 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3443
3444 if (reload_in_progress || reload_completed)
3445 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3446
3447 addr = XVECEXP (addr, 0, 0);
3448 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3449 UNSPEC_PLTOFF);
3450 addr = gen_rtx_CONST (Pmode, addr);
3451 addr = force_const_mem (Pmode, addr);
3452 emit_move_insn (temp, addr);
3453
3454 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3455 if (reg != 0)
3456 {
3457 s390_load_address (reg, new_rtx);
3458 new_rtx = reg;
3459 }
3460 }
3461 break;
3462
3463 /* Everything else cannot happen. */
3464 default:
3465 gcc_unreachable ();
3466 }
3467 }
3468 else
3469 gcc_assert (GET_CODE (addr) == PLUS);
3470 }
3471 if (GET_CODE (addr) == PLUS)
3472 {
3473 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
3474
3475 gcc_assert (!TLS_SYMBOLIC_CONST (op0));
3476 gcc_assert (!TLS_SYMBOLIC_CONST (op1));
3477
3478 /* Check first to see if this is a constant offset
3479 from a local symbol reference. */
3480 if ((GET_CODE (op0) == LABEL_REF
3481 || (GET_CODE (op0) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op0)))
3482 && GET_CODE (op1) == CONST_INT)
3483 {
3484 if (TARGET_CPU_ZARCH
3485 && larl_operand (op0, VOIDmode)
3486 && INTVAL (op1) < (HOST_WIDE_INT)1 << 31
3487 && INTVAL (op1) >= -((HOST_WIDE_INT)1 << 31))
3488 {
3489 if (INTVAL (op1) & 1)
3490 {
3491 /* LARL can't handle odd offsets, so emit a
3492 pair of LARL and LA. */
3493 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3494
3495 if (!DISP_IN_RANGE (INTVAL (op1)))
3496 {
3497 HOST_WIDE_INT even = INTVAL (op1) - 1;
3498 op0 = gen_rtx_PLUS (Pmode, op0, GEN_INT (even));
3499 op0 = gen_rtx_CONST (Pmode, op0);
3500 op1 = const1_rtx;
3501 }
3502
3503 emit_move_insn (temp, op0);
3504 new_rtx = gen_rtx_PLUS (Pmode, temp, op1);
3505
3506 if (reg != 0)
3507 {
3508 s390_load_address (reg, new_rtx);
3509 new_rtx = reg;
3510 }
3511 }
3512 else
3513 {
3514 /* If the offset is even, we can just use LARL.
3515 This will happen automatically. */
3516 }
3517 }
3518 else
3519 {
3520 /* Access local symbols relative to the GOT. */
3521
3522 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3523
3524 if (reload_in_progress || reload_completed)
3525 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3526
3527 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
3528 UNSPEC_GOTOFF);
3529 addr = gen_rtx_PLUS (Pmode, addr, op1);
3530 addr = gen_rtx_CONST (Pmode, addr);
3531 addr = force_const_mem (Pmode, addr);
3532 emit_move_insn (temp, addr);
3533
3534 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3535 if (reg != 0)
3536 {
3537 s390_load_address (reg, new_rtx);
3538 new_rtx = reg;
3539 }
3540 }
3541 }
3542
3543 /* Now, check whether it is a GOT relative symbol plus offset
3544 that was pulled out of the literal pool. Force it back in. */
3545
3546 else if (GET_CODE (op0) == UNSPEC
3547 && GET_CODE (op1) == CONST_INT
3548 && XINT (op0, 1) == UNSPEC_GOTOFF)
3549 {
3550 gcc_assert (XVECLEN (op0, 0) == 1);
3551
3552 new_rtx = force_const_mem (Pmode, orig);
3553 }
3554
3555 /* Otherwise, compute the sum. */
3556 else
3557 {
3558 base = legitimize_pic_address (XEXP (addr, 0), reg);
3559 new_rtx = legitimize_pic_address (XEXP (addr, 1),
3560 base == reg ? NULL_RTX : reg);
3561 if (GET_CODE (new_rtx) == CONST_INT)
3562 new_rtx = plus_constant (base, INTVAL (new_rtx));
3563 else
3564 {
3565 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3566 {
3567 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3568 new_rtx = XEXP (new_rtx, 1);
3569 }
3570 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3571 }
3572
3573 if (GET_CODE (new_rtx) == CONST)
3574 new_rtx = XEXP (new_rtx, 0);
3575 new_rtx = force_operand (new_rtx, 0);
3576 }
3577 }
3578 }
3579 return new_rtx;
3580 }
3581
3582 /* Load the thread pointer into a register. */
3583
3584 rtx
3585 s390_get_thread_pointer (void)
3586 {
3587 rtx tp = gen_reg_rtx (Pmode);
3588
3589 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3590 mark_reg_pointer (tp, BITS_PER_WORD);
3591
3592 return tp;
3593 }
3594
3595 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3596 in s390_tls_symbol which always refers to __tls_get_offset.
3597 The returned offset is written to RESULT_REG and an USE rtx is
3598 generated for TLS_CALL. */
3599
3600 static GTY(()) rtx s390_tls_symbol;
3601
3602 static void
3603 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3604 {
3605 rtx insn;
3606
3607 gcc_assert (flag_pic);
3608
3609 if (!s390_tls_symbol)
3610 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3611
3612 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3613 gen_rtx_REG (Pmode, RETURN_REGNUM));
3614
3615 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3616 RTL_CONST_CALL_P (insn) = 1;
3617 }
3618
3619 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3620 this (thread-local) address. REG may be used as temporary. */
3621
3622 static rtx
3623 legitimize_tls_address (rtx addr, rtx reg)
3624 {
3625 rtx new_rtx, tls_call, temp, base, r2, insn;
3626
3627 if (GET_CODE (addr) == SYMBOL_REF)
3628 switch (tls_symbolic_operand (addr))
3629 {
3630 case TLS_MODEL_GLOBAL_DYNAMIC:
3631 start_sequence ();
3632 r2 = gen_rtx_REG (Pmode, 2);
3633 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3634 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3635 new_rtx = force_const_mem (Pmode, new_rtx);
3636 emit_move_insn (r2, new_rtx);
3637 s390_emit_tls_call_insn (r2, tls_call);
3638 insn = get_insns ();
3639 end_sequence ();
3640
3641 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3642 temp = gen_reg_rtx (Pmode);
3643 emit_libcall_block (insn, temp, r2, new_rtx);
3644
3645 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3646 if (reg != 0)
3647 {
3648 s390_load_address (reg, new_rtx);
3649 new_rtx = reg;
3650 }
3651 break;
3652
3653 case TLS_MODEL_LOCAL_DYNAMIC:
3654 start_sequence ();
3655 r2 = gen_rtx_REG (Pmode, 2);
3656 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3657 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3658 new_rtx = force_const_mem (Pmode, new_rtx);
3659 emit_move_insn (r2, new_rtx);
3660 s390_emit_tls_call_insn (r2, tls_call);
3661 insn = get_insns ();
3662 end_sequence ();
3663
3664 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3665 temp = gen_reg_rtx (Pmode);
3666 emit_libcall_block (insn, temp, r2, new_rtx);
3667
3668 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3669 base = gen_reg_rtx (Pmode);
3670 s390_load_address (base, new_rtx);
3671
3672 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3673 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3674 new_rtx = force_const_mem (Pmode, new_rtx);
3675 temp = gen_reg_rtx (Pmode);
3676 emit_move_insn (temp, new_rtx);
3677
3678 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3679 if (reg != 0)
3680 {
3681 s390_load_address (reg, new_rtx);
3682 new_rtx = reg;
3683 }
3684 break;
3685
3686 case TLS_MODEL_INITIAL_EXEC:
3687 if (flag_pic == 1)
3688 {
3689 /* Assume GOT offset < 4k. This is handled the same way
3690 in both 31- and 64-bit code. */
3691
3692 if (reload_in_progress || reload_completed)
3693 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3694
3695 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3696 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3697 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3698 new_rtx = gen_const_mem (Pmode, new_rtx);
3699 temp = gen_reg_rtx (Pmode);
3700 emit_move_insn (temp, new_rtx);
3701 }
3702 else if (TARGET_CPU_ZARCH)
3703 {
3704 /* If the GOT offset might be >= 4k, we determine the position
3705 of the GOT entry via a PC-relative LARL. */
3706
3707 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3708 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3709 temp = gen_reg_rtx (Pmode);
3710 emit_move_insn (temp, new_rtx);
3711
3712 new_rtx = gen_const_mem (Pmode, temp);
3713 temp = gen_reg_rtx (Pmode);
3714 emit_move_insn (temp, new_rtx);
3715 }
3716 else if (flag_pic)
3717 {
3718 /* If the GOT offset might be >= 4k, we have to load it
3719 from the literal pool. */
3720
3721 if (reload_in_progress || reload_completed)
3722 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3723
3724 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3725 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3726 new_rtx = force_const_mem (Pmode, new_rtx);
3727 temp = gen_reg_rtx (Pmode);
3728 emit_move_insn (temp, new_rtx);
3729
3730 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3731 new_rtx = gen_const_mem (Pmode, new_rtx);
3732
3733 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3734 temp = gen_reg_rtx (Pmode);
3735 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3736 }
3737 else
3738 {
3739 /* In position-dependent code, load the absolute address of
3740 the GOT entry from the literal pool. */
3741
3742 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3743 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3744 new_rtx = force_const_mem (Pmode, new_rtx);
3745 temp = gen_reg_rtx (Pmode);
3746 emit_move_insn (temp, new_rtx);
3747
3748 new_rtx = temp;
3749 new_rtx = gen_const_mem (Pmode, new_rtx);
3750 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3751 temp = gen_reg_rtx (Pmode);
3752 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3753 }
3754
3755 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3756 if (reg != 0)
3757 {
3758 s390_load_address (reg, new_rtx);
3759 new_rtx = reg;
3760 }
3761 break;
3762
3763 case TLS_MODEL_LOCAL_EXEC:
3764 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3765 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3766 new_rtx = force_const_mem (Pmode, new_rtx);
3767 temp = gen_reg_rtx (Pmode);
3768 emit_move_insn (temp, new_rtx);
3769
3770 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3771 if (reg != 0)
3772 {
3773 s390_load_address (reg, new_rtx);
3774 new_rtx = reg;
3775 }
3776 break;
3777
3778 default:
3779 gcc_unreachable ();
3780 }
3781
3782 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3783 {
3784 switch (XINT (XEXP (addr, 0), 1))
3785 {
3786 case UNSPEC_INDNTPOFF:
3787 gcc_assert (TARGET_CPU_ZARCH);
3788 new_rtx = addr;
3789 break;
3790
3791 default:
3792 gcc_unreachable ();
3793 }
3794 }
3795
3796 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3797 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3798 {
3799 new_rtx = XEXP (XEXP (addr, 0), 0);
3800 if (GET_CODE (new_rtx) != SYMBOL_REF)
3801 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3802
3803 new_rtx = legitimize_tls_address (new_rtx, reg);
3804 new_rtx = plus_constant (new_rtx, INTVAL (XEXP (XEXP (addr, 0), 1)));
3805 new_rtx = force_operand (new_rtx, 0);
3806 }
3807
3808 else
3809 gcc_unreachable (); /* for now ... */
3810
3811 return new_rtx;
3812 }
3813
3814 /* Emit insns making the address in operands[1] valid for a standard
3815 move to operands[0]. operands[1] is replaced by an address which
3816 should be used instead of the former RTX to emit the move
3817 pattern. */
3818
3819 void
3820 emit_symbolic_move (rtx *operands)
3821 {
3822 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3823
3824 if (GET_CODE (operands[0]) == MEM)
3825 operands[1] = force_reg (Pmode, operands[1]);
3826 else if (TLS_SYMBOLIC_CONST (operands[1]))
3827 operands[1] = legitimize_tls_address (operands[1], temp);
3828 else if (flag_pic)
3829 operands[1] = legitimize_pic_address (operands[1], temp);
3830 }
3831
3832 /* Try machine-dependent ways of modifying an illegitimate address X
3833 to be legitimate. If we find one, return the new, valid address.
3834
3835 OLDX is the address as it was before break_out_memory_refs was called.
3836 In some cases it is useful to look at this to decide what needs to be done.
3837
3838 MODE is the mode of the operand pointed to by X.
3839
3840 When -fpic is used, special handling is needed for symbolic references.
3841 See comments by legitimize_pic_address for details. */
3842
3843 static rtx
3844 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3845 enum machine_mode mode ATTRIBUTE_UNUSED)
3846 {
3847 rtx constant_term = const0_rtx;
3848
3849 if (TLS_SYMBOLIC_CONST (x))
3850 {
3851 x = legitimize_tls_address (x, 0);
3852
3853 if (s390_legitimate_address_p (mode, x, FALSE))
3854 return x;
3855 }
3856 else if (GET_CODE (x) == PLUS
3857 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3858 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3859 {
3860 return x;
3861 }
3862 else if (flag_pic)
3863 {
3864 if (SYMBOLIC_CONST (x)
3865 || (GET_CODE (x) == PLUS
3866 && (SYMBOLIC_CONST (XEXP (x, 0))
3867 || SYMBOLIC_CONST (XEXP (x, 1)))))
3868 x = legitimize_pic_address (x, 0);
3869
3870 if (s390_legitimate_address_p (mode, x, FALSE))
3871 return x;
3872 }
3873
3874 x = eliminate_constant_term (x, &constant_term);
3875
3876 /* Optimize loading of large displacements by splitting them
3877 into the multiple of 4K and the rest; this allows the
3878 former to be CSE'd if possible.
3879
3880 Don't do this if the displacement is added to a register
3881 pointing into the stack frame, as the offsets will
3882 change later anyway. */
3883
3884 if (GET_CODE (constant_term) == CONST_INT
3885 && !TARGET_LONG_DISPLACEMENT
3886 && !DISP_IN_RANGE (INTVAL (constant_term))
3887 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3888 {
3889 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3890 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3891
3892 rtx temp = gen_reg_rtx (Pmode);
3893 rtx val = force_operand (GEN_INT (upper), temp);
3894 if (val != temp)
3895 emit_move_insn (temp, val);
3896
3897 x = gen_rtx_PLUS (Pmode, x, temp);
3898 constant_term = GEN_INT (lower);
3899 }
3900
3901 if (GET_CODE (x) == PLUS)
3902 {
3903 if (GET_CODE (XEXP (x, 0)) == REG)
3904 {
3905 rtx temp = gen_reg_rtx (Pmode);
3906 rtx val = force_operand (XEXP (x, 1), temp);
3907 if (val != temp)
3908 emit_move_insn (temp, val);
3909
3910 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3911 }
3912
3913 else if (GET_CODE (XEXP (x, 1)) == REG)
3914 {
3915 rtx temp = gen_reg_rtx (Pmode);
3916 rtx val = force_operand (XEXP (x, 0), temp);
3917 if (val != temp)
3918 emit_move_insn (temp, val);
3919
3920 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
3921 }
3922 }
3923
3924 if (constant_term != const0_rtx)
3925 x = gen_rtx_PLUS (Pmode, x, constant_term);
3926
3927 return x;
3928 }
3929
3930 /* Try a machine-dependent way of reloading an illegitimate address AD
3931 operand. If we find one, push the reload and return the new address.
3932
3933 MODE is the mode of the enclosing MEM. OPNUM is the operand number
3934 and TYPE is the reload type of the current reload. */
3935
3936 rtx
3937 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
3938 int opnum, int type)
3939 {
3940 if (!optimize || TARGET_LONG_DISPLACEMENT)
3941 return NULL_RTX;
3942
3943 if (GET_CODE (ad) == PLUS)
3944 {
3945 rtx tem = simplify_binary_operation (PLUS, Pmode,
3946 XEXP (ad, 0), XEXP (ad, 1));
3947 if (tem)
3948 ad = tem;
3949 }
3950
3951 if (GET_CODE (ad) == PLUS
3952 && GET_CODE (XEXP (ad, 0)) == REG
3953 && GET_CODE (XEXP (ad, 1)) == CONST_INT
3954 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
3955 {
3956 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
3957 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
3958 rtx cst, tem, new_rtx;
3959
3960 cst = GEN_INT (upper);
3961 if (!legitimate_reload_constant_p (cst))
3962 cst = force_const_mem (Pmode, cst);
3963
3964 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
3965 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
3966
3967 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
3968 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
3969 opnum, (enum reload_type) type);
3970 return new_rtx;
3971 }
3972
3973 return NULL_RTX;
3974 }
3975
3976 /* Emit code to move LEN bytes from DST to SRC. */
3977
3978 void
3979 s390_expand_movmem (rtx dst, rtx src, rtx len)
3980 {
3981 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
3982 {
3983 if (INTVAL (len) > 0)
3984 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
3985 }
3986
3987 else if (TARGET_MVCLE)
3988 {
3989 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
3990 }
3991
3992 else
3993 {
3994 rtx dst_addr, src_addr, count, blocks, temp;
3995 rtx loop_start_label = gen_label_rtx ();
3996 rtx loop_end_label = gen_label_rtx ();
3997 rtx end_label = gen_label_rtx ();
3998 enum machine_mode mode;
3999
4000 mode = GET_MODE (len);
4001 if (mode == VOIDmode)
4002 mode = Pmode;
4003
4004 dst_addr = gen_reg_rtx (Pmode);
4005 src_addr = gen_reg_rtx (Pmode);
4006 count = gen_reg_rtx (mode);
4007 blocks = gen_reg_rtx (mode);
4008
4009 convert_move (count, len, 1);
4010 emit_cmp_and_jump_insns (count, const0_rtx,
4011 EQ, NULL_RTX, mode, 1, end_label);
4012
4013 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4014 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4015 dst = change_address (dst, VOIDmode, dst_addr);
4016 src = change_address (src, VOIDmode, src_addr);
4017
4018 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4019 OPTAB_DIRECT);
4020 if (temp != count)
4021 emit_move_insn (count, temp);
4022
4023 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4024 OPTAB_DIRECT);
4025 if (temp != blocks)
4026 emit_move_insn (blocks, temp);
4027
4028 emit_cmp_and_jump_insns (blocks, const0_rtx,
4029 EQ, NULL_RTX, mode, 1, loop_end_label);
4030
4031 emit_label (loop_start_label);
4032
4033 if (TARGET_Z10
4034 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4035 {
4036 rtx prefetch;
4037
4038 /* Issue a read prefetch for the +3 cache line. */
4039 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4040 const0_rtx, const0_rtx);
4041 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4042 emit_insn (prefetch);
4043
4044 /* Issue a write prefetch for the +3 cache line. */
4045 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4046 const1_rtx, const0_rtx);
4047 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4048 emit_insn (prefetch);
4049 }
4050
4051 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4052 s390_load_address (dst_addr,
4053 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4054 s390_load_address (src_addr,
4055 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4056
4057 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4058 OPTAB_DIRECT);
4059 if (temp != blocks)
4060 emit_move_insn (blocks, temp);
4061
4062 emit_cmp_and_jump_insns (blocks, const0_rtx,
4063 EQ, NULL_RTX, mode, 1, loop_end_label);
4064
4065 emit_jump (loop_start_label);
4066 emit_label (loop_end_label);
4067
4068 emit_insn (gen_movmem_short (dst, src,
4069 convert_to_mode (Pmode, count, 1)));
4070 emit_label (end_label);
4071 }
4072 }
4073
4074 /* Emit code to set LEN bytes at DST to VAL.
4075 Make use of clrmem if VAL is zero. */
4076
4077 void
4078 s390_expand_setmem (rtx dst, rtx len, rtx val)
4079 {
4080 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4081 return;
4082
4083 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4084
4085 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4086 {
4087 if (val == const0_rtx && INTVAL (len) <= 256)
4088 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4089 else
4090 {
4091 /* Initialize memory by storing the first byte. */
4092 emit_move_insn (adjust_address (dst, QImode, 0), val);
4093
4094 if (INTVAL (len) > 1)
4095 {
4096 /* Initiate 1 byte overlap move.
4097 The first byte of DST is propagated through DSTP1.
4098 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4099 DST is set to size 1 so the rest of the memory location
4100 does not count as source operand. */
4101 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4102 set_mem_size (dst, const1_rtx);
4103
4104 emit_insn (gen_movmem_short (dstp1, dst,
4105 GEN_INT (INTVAL (len) - 2)));
4106 }
4107 }
4108 }
4109
4110 else if (TARGET_MVCLE)
4111 {
4112 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4113 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4114 }
4115
4116 else
4117 {
4118 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4119 rtx loop_start_label = gen_label_rtx ();
4120 rtx loop_end_label = gen_label_rtx ();
4121 rtx end_label = gen_label_rtx ();
4122 enum machine_mode mode;
4123
4124 mode = GET_MODE (len);
4125 if (mode == VOIDmode)
4126 mode = Pmode;
4127
4128 dst_addr = gen_reg_rtx (Pmode);
4129 count = gen_reg_rtx (mode);
4130 blocks = gen_reg_rtx (mode);
4131
4132 convert_move (count, len, 1);
4133 emit_cmp_and_jump_insns (count, const0_rtx,
4134 EQ, NULL_RTX, mode, 1, end_label);
4135
4136 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4137 dst = change_address (dst, VOIDmode, dst_addr);
4138
4139 if (val == const0_rtx)
4140 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4141 OPTAB_DIRECT);
4142 else
4143 {
4144 dstp1 = adjust_address (dst, VOIDmode, 1);
4145 set_mem_size (dst, const1_rtx);
4146
4147 /* Initialize memory by storing the first byte. */
4148 emit_move_insn (adjust_address (dst, QImode, 0), val);
4149
4150 /* If count is 1 we are done. */
4151 emit_cmp_and_jump_insns (count, const1_rtx,
4152 EQ, NULL_RTX, mode, 1, end_label);
4153
4154 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4155 OPTAB_DIRECT);
4156 }
4157 if (temp != count)
4158 emit_move_insn (count, temp);
4159
4160 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4161 OPTAB_DIRECT);
4162 if (temp != blocks)
4163 emit_move_insn (blocks, temp);
4164
4165 emit_cmp_and_jump_insns (blocks, const0_rtx,
4166 EQ, NULL_RTX, mode, 1, loop_end_label);
4167
4168 emit_label (loop_start_label);
4169
4170 if (TARGET_Z10
4171 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4172 {
4173 /* Issue a write prefetch for the +4 cache line. */
4174 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4175 GEN_INT (1024)),
4176 const1_rtx, const0_rtx);
4177 emit_insn (prefetch);
4178 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4179 }
4180
4181 if (val == const0_rtx)
4182 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4183 else
4184 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4185 s390_load_address (dst_addr,
4186 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4187
4188 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4189 OPTAB_DIRECT);
4190 if (temp != blocks)
4191 emit_move_insn (blocks, temp);
4192
4193 emit_cmp_and_jump_insns (blocks, const0_rtx,
4194 EQ, NULL_RTX, mode, 1, loop_end_label);
4195
4196 emit_jump (loop_start_label);
4197 emit_label (loop_end_label);
4198
4199 if (val == const0_rtx)
4200 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4201 else
4202 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4203 emit_label (end_label);
4204 }
4205 }
4206
4207 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4208 and return the result in TARGET. */
4209
4210 void
4211 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4212 {
4213 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4214 rtx tmp;
4215
4216 /* As the result of CMPINT is inverted compared to what we need,
4217 we have to swap the operands. */
4218 tmp = op0; op0 = op1; op1 = tmp;
4219
4220 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4221 {
4222 if (INTVAL (len) > 0)
4223 {
4224 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4225 emit_insn (gen_cmpint (target, ccreg));
4226 }
4227 else
4228 emit_move_insn (target, const0_rtx);
4229 }
4230 else if (TARGET_MVCLE)
4231 {
4232 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4233 emit_insn (gen_cmpint (target, ccreg));
4234 }
4235 else
4236 {
4237 rtx addr0, addr1, count, blocks, temp;
4238 rtx loop_start_label = gen_label_rtx ();
4239 rtx loop_end_label = gen_label_rtx ();
4240 rtx end_label = gen_label_rtx ();
4241 enum machine_mode mode;
4242
4243 mode = GET_MODE (len);
4244 if (mode == VOIDmode)
4245 mode = Pmode;
4246
4247 addr0 = gen_reg_rtx (Pmode);
4248 addr1 = gen_reg_rtx (Pmode);
4249 count = gen_reg_rtx (mode);
4250 blocks = gen_reg_rtx (mode);
4251
4252 convert_move (count, len, 1);
4253 emit_cmp_and_jump_insns (count, const0_rtx,
4254 EQ, NULL_RTX, mode, 1, end_label);
4255
4256 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4257 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4258 op0 = change_address (op0, VOIDmode, addr0);
4259 op1 = change_address (op1, VOIDmode, addr1);
4260
4261 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4262 OPTAB_DIRECT);
4263 if (temp != count)
4264 emit_move_insn (count, temp);
4265
4266 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4267 OPTAB_DIRECT);
4268 if (temp != blocks)
4269 emit_move_insn (blocks, temp);
4270
4271 emit_cmp_and_jump_insns (blocks, const0_rtx,
4272 EQ, NULL_RTX, mode, 1, loop_end_label);
4273
4274 emit_label (loop_start_label);
4275
4276 if (TARGET_Z10
4277 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4278 {
4279 rtx prefetch;
4280
4281 /* Issue a read prefetch for the +2 cache line of operand 1. */
4282 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4283 const0_rtx, const0_rtx);
4284 emit_insn (prefetch);
4285 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4286
4287 /* Issue a read prefetch for the +2 cache line of operand 2. */
4288 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4289 const0_rtx, const0_rtx);
4290 emit_insn (prefetch);
4291 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4292 }
4293
4294 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4295 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4296 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4297 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4298 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4299 emit_jump_insn (temp);
4300
4301 s390_load_address (addr0,
4302 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4303 s390_load_address (addr1,
4304 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4305
4306 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4307 OPTAB_DIRECT);
4308 if (temp != blocks)
4309 emit_move_insn (blocks, temp);
4310
4311 emit_cmp_and_jump_insns (blocks, const0_rtx,
4312 EQ, NULL_RTX, mode, 1, loop_end_label);
4313
4314 emit_jump (loop_start_label);
4315 emit_label (loop_end_label);
4316
4317 emit_insn (gen_cmpmem_short (op0, op1,
4318 convert_to_mode (Pmode, count, 1)));
4319 emit_label (end_label);
4320
4321 emit_insn (gen_cmpint (target, ccreg));
4322 }
4323 }
4324
4325
4326 /* Expand conditional increment or decrement using alc/slb instructions.
4327 Should generate code setting DST to either SRC or SRC + INCREMENT,
4328 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4329 Returns true if successful, false otherwise.
4330
4331 That makes it possible to implement some if-constructs without jumps e.g.:
4332 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4333 unsigned int a, b, c;
4334 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4335 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4336 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4337 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4338
4339 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4340 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4341 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4342 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4343 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4344
4345 bool
4346 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4347 rtx dst, rtx src, rtx increment)
4348 {
4349 enum machine_mode cmp_mode;
4350 enum machine_mode cc_mode;
4351 rtx op_res;
4352 rtx insn;
4353 rtvec p;
4354 int ret;
4355
4356 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4357 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4358 cmp_mode = SImode;
4359 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4360 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4361 cmp_mode = DImode;
4362 else
4363 return false;
4364
4365 /* Try ADD LOGICAL WITH CARRY. */
4366 if (increment == const1_rtx)
4367 {
4368 /* Determine CC mode to use. */
4369 if (cmp_code == EQ || cmp_code == NE)
4370 {
4371 if (cmp_op1 != const0_rtx)
4372 {
4373 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4374 NULL_RTX, 0, OPTAB_WIDEN);
4375 cmp_op1 = const0_rtx;
4376 }
4377
4378 cmp_code = cmp_code == EQ ? LEU : GTU;
4379 }
4380
4381 if (cmp_code == LTU || cmp_code == LEU)
4382 {
4383 rtx tem = cmp_op0;
4384 cmp_op0 = cmp_op1;
4385 cmp_op1 = tem;
4386 cmp_code = swap_condition (cmp_code);
4387 }
4388
4389 switch (cmp_code)
4390 {
4391 case GTU:
4392 cc_mode = CCUmode;
4393 break;
4394
4395 case GEU:
4396 cc_mode = CCL3mode;
4397 break;
4398
4399 default:
4400 return false;
4401 }
4402
4403 /* Emit comparison instruction pattern. */
4404 if (!register_operand (cmp_op0, cmp_mode))
4405 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4406
4407 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4408 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4409 /* We use insn_invalid_p here to add clobbers if required. */
4410 ret = insn_invalid_p (emit_insn (insn));
4411 gcc_assert (!ret);
4412
4413 /* Emit ALC instruction pattern. */
4414 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4415 gen_rtx_REG (cc_mode, CC_REGNUM),
4416 const0_rtx);
4417
4418 if (src != const0_rtx)
4419 {
4420 if (!register_operand (src, GET_MODE (dst)))
4421 src = force_reg (GET_MODE (dst), src);
4422
4423 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4424 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4425 }
4426
4427 p = rtvec_alloc (2);
4428 RTVEC_ELT (p, 0) =
4429 gen_rtx_SET (VOIDmode, dst, op_res);
4430 RTVEC_ELT (p, 1) =
4431 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4432 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4433
4434 return true;
4435 }
4436
4437 /* Try SUBTRACT LOGICAL WITH BORROW. */
4438 if (increment == constm1_rtx)
4439 {
4440 /* Determine CC mode to use. */
4441 if (cmp_code == EQ || cmp_code == NE)
4442 {
4443 if (cmp_op1 != const0_rtx)
4444 {
4445 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4446 NULL_RTX, 0, OPTAB_WIDEN);
4447 cmp_op1 = const0_rtx;
4448 }
4449
4450 cmp_code = cmp_code == EQ ? LEU : GTU;
4451 }
4452
4453 if (cmp_code == GTU || cmp_code == GEU)
4454 {
4455 rtx tem = cmp_op0;
4456 cmp_op0 = cmp_op1;
4457 cmp_op1 = tem;
4458 cmp_code = swap_condition (cmp_code);
4459 }
4460
4461 switch (cmp_code)
4462 {
4463 case LEU:
4464 cc_mode = CCUmode;
4465 break;
4466
4467 case LTU:
4468 cc_mode = CCL3mode;
4469 break;
4470
4471 default:
4472 return false;
4473 }
4474
4475 /* Emit comparison instruction pattern. */
4476 if (!register_operand (cmp_op0, cmp_mode))
4477 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4478
4479 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4480 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4481 /* We use insn_invalid_p here to add clobbers if required. */
4482 ret = insn_invalid_p (emit_insn (insn));
4483 gcc_assert (!ret);
4484
4485 /* Emit SLB instruction pattern. */
4486 if (!register_operand (src, GET_MODE (dst)))
4487 src = force_reg (GET_MODE (dst), src);
4488
4489 op_res = gen_rtx_MINUS (GET_MODE (dst),
4490 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4491 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4492 gen_rtx_REG (cc_mode, CC_REGNUM),
4493 const0_rtx));
4494 p = rtvec_alloc (2);
4495 RTVEC_ELT (p, 0) =
4496 gen_rtx_SET (VOIDmode, dst, op_res);
4497 RTVEC_ELT (p, 1) =
4498 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4499 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4500
4501 return true;
4502 }
4503
4504 return false;
4505 }
4506
4507 /* Expand code for the insv template. Return true if successful. */
4508
4509 bool
4510 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4511 {
4512 int bitsize = INTVAL (op1);
4513 int bitpos = INTVAL (op2);
4514
4515 /* On z10 we can use the risbg instruction to implement insv. */
4516 if (TARGET_Z10
4517 && ((GET_MODE (dest) == DImode && GET_MODE (src) == DImode)
4518 || (GET_MODE (dest) == SImode && GET_MODE (src) == SImode)))
4519 {
4520 rtx op;
4521 rtx clobber;
4522
4523 op = gen_rtx_SET (GET_MODE(src),
4524 gen_rtx_ZERO_EXTRACT (GET_MODE (dest), dest, op1, op2),
4525 src);
4526 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4527 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4528
4529 return true;
4530 }
4531
4532 /* We need byte alignment. */
4533 if (bitsize % BITS_PER_UNIT)
4534 return false;
4535
4536 if (bitpos == 0
4537 && memory_operand (dest, VOIDmode)
4538 && (register_operand (src, word_mode)
4539 || const_int_operand (src, VOIDmode)))
4540 {
4541 /* Emit standard pattern if possible. */
4542 enum machine_mode mode = smallest_mode_for_size (bitsize, MODE_INT);
4543 if (GET_MODE_BITSIZE (mode) == bitsize)
4544 emit_move_insn (adjust_address (dest, mode, 0), gen_lowpart (mode, src));
4545
4546 /* (set (ze (mem)) (const_int)). */
4547 else if (const_int_operand (src, VOIDmode))
4548 {
4549 int size = bitsize / BITS_PER_UNIT;
4550 rtx src_mem = adjust_address (force_const_mem (word_mode, src), BLKmode,
4551 GET_MODE_SIZE (word_mode) - size);
4552
4553 dest = adjust_address (dest, BLKmode, 0);
4554 set_mem_size (dest, GEN_INT (size));
4555 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4556 }
4557
4558 /* (set (ze (mem)) (reg)). */
4559 else if (register_operand (src, word_mode))
4560 {
4561 if (bitsize <= GET_MODE_BITSIZE (SImode))
4562 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4563 const0_rtx), src);
4564 else
4565 {
4566 /* Emit st,stcmh sequence. */
4567 int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
4568 int size = stcmh_width / BITS_PER_UNIT;
4569
4570 emit_move_insn (adjust_address (dest, SImode, size),
4571 gen_lowpart (SImode, src));
4572 set_mem_size (dest, GEN_INT (size));
4573 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
4574 (stcmh_width), const0_rtx),
4575 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT
4576 (GET_MODE_BITSIZE (SImode))));
4577 }
4578 }
4579 else
4580 return false;
4581
4582 return true;
4583 }
4584
4585 /* (set (ze (reg)) (const_int)). */
4586 if (TARGET_ZARCH
4587 && register_operand (dest, word_mode)
4588 && (bitpos % 16) == 0
4589 && (bitsize % 16) == 0
4590 && const_int_operand (src, VOIDmode))
4591 {
4592 HOST_WIDE_INT val = INTVAL (src);
4593 int regpos = bitpos + bitsize;
4594
4595 while (regpos > bitpos)
4596 {
4597 enum machine_mode putmode;
4598 int putsize;
4599
4600 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4601 putmode = SImode;
4602 else
4603 putmode = HImode;
4604
4605 putsize = GET_MODE_BITSIZE (putmode);
4606 regpos -= putsize;
4607 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4608 GEN_INT (putsize),
4609 GEN_INT (regpos)),
4610 gen_int_mode (val, putmode));
4611 val >>= putsize;
4612 }
4613 gcc_assert (regpos == bitpos);
4614 return true;
4615 }
4616
4617 return false;
4618 }
4619
4620 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4621 register that holds VAL of mode MODE shifted by COUNT bits. */
4622
4623 static inline rtx
4624 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4625 {
4626 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4627 NULL_RTX, 1, OPTAB_DIRECT);
4628 return expand_simple_binop (SImode, ASHIFT, val, count,
4629 NULL_RTX, 1, OPTAB_DIRECT);
4630 }
4631
4632 /* Structure to hold the initial parameters for a compare_and_swap operation
4633 in HImode and QImode. */
4634
4635 struct alignment_context
4636 {
4637 rtx memsi; /* SI aligned memory location. */
4638 rtx shift; /* Bit offset with regard to lsb. */
4639 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4640 rtx modemaski; /* ~modemask */
4641 bool aligned; /* True if memory is aligned, false else. */
4642 };
4643
4644 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4645 structure AC for transparent simplifying, if the memory alignment is known
4646 to be at least 32bit. MEM is the memory location for the actual operation
4647 and MODE its mode. */
4648
4649 static void
4650 init_alignment_context (struct alignment_context *ac, rtx mem,
4651 enum machine_mode mode)
4652 {
4653 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4654 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4655
4656 if (ac->aligned)
4657 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4658 else
4659 {
4660 /* Alignment is unknown. */
4661 rtx byteoffset, addr, align;
4662
4663 /* Force the address into a register. */
4664 addr = force_reg (Pmode, XEXP (mem, 0));
4665
4666 /* Align it to SImode. */
4667 align = expand_simple_binop (Pmode, AND, addr,
4668 GEN_INT (-GET_MODE_SIZE (SImode)),
4669 NULL_RTX, 1, OPTAB_DIRECT);
4670 /* Generate MEM. */
4671 ac->memsi = gen_rtx_MEM (SImode, align);
4672 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4673 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4674 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4675
4676 /* Calculate shiftcount. */
4677 byteoffset = expand_simple_binop (Pmode, AND, addr,
4678 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4679 NULL_RTX, 1, OPTAB_DIRECT);
4680 /* As we already have some offset, evaluate the remaining distance. */
4681 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4682 NULL_RTX, 1, OPTAB_DIRECT);
4683
4684 }
4685 /* Shift is the byte count, but we need the bitcount. */
4686 ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
4687 NULL_RTX, 1, OPTAB_DIRECT);
4688 /* Calculate masks. */
4689 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4690 GEN_INT (GET_MODE_MASK (mode)), ac->shift,
4691 NULL_RTX, 1, OPTAB_DIRECT);
4692 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
4693 }
4694
4695 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4696 the memory location, CMP the old value to compare MEM with and NEW_RTX the value
4697 to set if CMP == MEM.
4698 CMP is never in memory for compare_and_swap_cc because
4699 expand_bool_compare_and_swap puts it into a register for later compare. */
4700
4701 void
4702 s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx new_rtx)
4703 {
4704 struct alignment_context ac;
4705 rtx cmpv, newv, val, resv, cc;
4706 rtx res = gen_reg_rtx (SImode);
4707 rtx csloop = gen_label_rtx ();
4708 rtx csend = gen_label_rtx ();
4709
4710 gcc_assert (register_operand (target, VOIDmode));
4711 gcc_assert (MEM_P (mem));
4712
4713 init_alignment_context (&ac, mem, mode);
4714
4715 /* Shift the values to the correct bit positions. */
4716 if (!(ac.aligned && MEM_P (cmp)))
4717 cmp = s390_expand_mask_and_shift (cmp, mode, ac.shift);
4718 if (!(ac.aligned && MEM_P (new_rtx)))
4719 new_rtx = s390_expand_mask_and_shift (new_rtx, mode, ac.shift);
4720
4721 /* Load full word. Subsequent loads are performed by CS. */
4722 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4723 NULL_RTX, 1, OPTAB_DIRECT);
4724
4725 /* Start CS loop. */
4726 emit_label (csloop);
4727 /* val = "<mem>00..0<mem>"
4728 * cmp = "00..0<cmp>00..0"
4729 * new = "00..0<new>00..0"
4730 */
4731
4732 /* Patch cmp and new with val at correct position. */
4733 if (ac.aligned && MEM_P (cmp))
4734 {
4735 cmpv = force_reg (SImode, val);
4736 store_bit_field (cmpv, GET_MODE_BITSIZE (mode), 0, SImode, cmp);
4737 }
4738 else
4739 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
4740 NULL_RTX, 1, OPTAB_DIRECT));
4741 if (ac.aligned && MEM_P (new_rtx))
4742 {
4743 newv = force_reg (SImode, val);
4744 store_bit_field (newv, GET_MODE_BITSIZE (mode), 0, SImode, new_rtx);
4745 }
4746 else
4747 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
4748 NULL_RTX, 1, OPTAB_DIRECT));
4749
4750 /* Jump to end if we're done (likely?). */
4751 s390_emit_jump (csend, s390_emit_compare_and_swap (EQ, res, ac.memsi,
4752 cmpv, newv));
4753
4754 /* Check for changes outside mode. */
4755 resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
4756 NULL_RTX, 1, OPTAB_DIRECT);
4757 cc = s390_emit_compare (NE, resv, val);
4758 emit_move_insn (val, resv);
4759 /* Loop internal if so. */
4760 s390_emit_jump (csloop, cc);
4761
4762 emit_label (csend);
4763
4764 /* Return the correct part of the bitfield. */
4765 convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4766 NULL_RTX, 1, OPTAB_DIRECT), 1);
4767 }
4768
4769 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4770 and VAL the value to play with. If AFTER is true then store the value
4771 MEM holds after the operation, if AFTER is false then store the value MEM
4772 holds before the operation. If TARGET is zero then discard that value, else
4773 store it to TARGET. */
4774
4775 void
4776 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4777 rtx target, rtx mem, rtx val, bool after)
4778 {
4779 struct alignment_context ac;
4780 rtx cmp;
4781 rtx new_rtx = gen_reg_rtx (SImode);
4782 rtx orig = gen_reg_rtx (SImode);
4783 rtx csloop = gen_label_rtx ();
4784
4785 gcc_assert (!target || register_operand (target, VOIDmode));
4786 gcc_assert (MEM_P (mem));
4787
4788 init_alignment_context (&ac, mem, mode);
4789
4790 /* Shift val to the correct bit positions.
4791 Preserve "icm", but prevent "ex icm". */
4792 if (!(ac.aligned && code == SET && MEM_P (val)))
4793 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4794
4795 /* Further preparation insns. */
4796 if (code == PLUS || code == MINUS)
4797 emit_move_insn (orig, val);
4798 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4799 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4800 NULL_RTX, 1, OPTAB_DIRECT);
4801
4802 /* Load full word. Subsequent loads are performed by CS. */
4803 cmp = force_reg (SImode, ac.memsi);
4804
4805 /* Start CS loop. */
4806 emit_label (csloop);
4807 emit_move_insn (new_rtx, cmp);
4808
4809 /* Patch new with val at correct position. */
4810 switch (code)
4811 {
4812 case PLUS:
4813 case MINUS:
4814 val = expand_simple_binop (SImode, code, new_rtx, orig,
4815 NULL_RTX, 1, OPTAB_DIRECT);
4816 val = expand_simple_binop (SImode, AND, val, ac.modemask,
4817 NULL_RTX, 1, OPTAB_DIRECT);
4818 /* FALLTHRU */
4819 case SET:
4820 if (ac.aligned && MEM_P (val))
4821 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0, SImode, val);
4822 else
4823 {
4824 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
4825 NULL_RTX, 1, OPTAB_DIRECT);
4826 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
4827 NULL_RTX, 1, OPTAB_DIRECT);
4828 }
4829 break;
4830 case AND:
4831 case IOR:
4832 case XOR:
4833 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
4834 NULL_RTX, 1, OPTAB_DIRECT);
4835 break;
4836 case MULT: /* NAND */
4837 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
4838 NULL_RTX, 1, OPTAB_DIRECT);
4839 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
4840 NULL_RTX, 1, OPTAB_DIRECT);
4841 break;
4842 default:
4843 gcc_unreachable ();
4844 }
4845
4846 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
4847 ac.memsi, cmp, new_rtx));
4848
4849 /* Return the correct part of the bitfield. */
4850 if (target)
4851 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
4852 after ? new_rtx : cmp, ac.shift,
4853 NULL_RTX, 1, OPTAB_DIRECT), 1);
4854 }
4855
4856 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4857 We need to emit DTP-relative relocations. */
4858
4859 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
4860
4861 static void
4862 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
4863 {
4864 switch (size)
4865 {
4866 case 4:
4867 fputs ("\t.long\t", file);
4868 break;
4869 case 8:
4870 fputs ("\t.quad\t", file);
4871 break;
4872 default:
4873 gcc_unreachable ();
4874 }
4875 output_addr_const (file, x);
4876 fputs ("@DTPOFF", file);
4877 }
4878
4879 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
4880 /* Implement TARGET_MANGLE_TYPE. */
4881
4882 static const char *
4883 s390_mangle_type (const_tree type)
4884 {
4885 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
4886 && TARGET_LONG_DOUBLE_128)
4887 return "g";
4888
4889 /* For all other types, use normal C++ mangling. */
4890 return NULL;
4891 }
4892 #endif
4893
4894 /* In the name of slightly smaller debug output, and to cater to
4895 general assembler lossage, recognize various UNSPEC sequences
4896 and turn them back into a direct symbol reference. */
4897
4898 static rtx
4899 s390_delegitimize_address (rtx orig_x)
4900 {
4901 rtx x, y;
4902
4903 orig_x = delegitimize_mem_from_attrs (orig_x);
4904 x = orig_x;
4905
4906 /* Extract the symbol ref from:
4907 (plus:SI (reg:SI 12 %r12)
4908 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
4909 UNSPEC_GOTOFF/PLTOFF)))
4910 and
4911 (plus:SI (reg:SI 12 %r12)
4912 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
4913 UNSPEC_GOTOFF/PLTOFF)
4914 (const_int 4 [0x4])))) */
4915 if (GET_CODE (x) == PLUS
4916 && REG_P (XEXP (x, 0))
4917 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
4918 && GET_CODE (XEXP (x, 1)) == CONST)
4919 {
4920 HOST_WIDE_INT offset = 0;
4921
4922 /* The const operand. */
4923 y = XEXP (XEXP (x, 1), 0);
4924
4925 if (GET_CODE (y) == PLUS
4926 && GET_CODE (XEXP (y, 1)) == CONST_INT)
4927 {
4928 offset = INTVAL (XEXP (y, 1));
4929 y = XEXP (y, 0);
4930 }
4931
4932 if (GET_CODE (y) == UNSPEC
4933 && (XINT (y, 1) == UNSPEC_GOTOFF
4934 || XINT (y, 1) == UNSPEC_PLTOFF))
4935 return plus_constant (XVECEXP (y, 0, 0), offset);
4936 }
4937
4938 if (GET_CODE (x) != MEM)
4939 return orig_x;
4940
4941 x = XEXP (x, 0);
4942 if (GET_CODE (x) == PLUS
4943 && GET_CODE (XEXP (x, 1)) == CONST
4944 && GET_CODE (XEXP (x, 0)) == REG
4945 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
4946 {
4947 y = XEXP (XEXP (x, 1), 0);
4948 if (GET_CODE (y) == UNSPEC
4949 && XINT (y, 1) == UNSPEC_GOT)
4950 y = XVECEXP (y, 0, 0);
4951 else
4952 return orig_x;
4953 }
4954 else if (GET_CODE (x) == CONST)
4955 {
4956 /* Extract the symbol ref from:
4957 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
4958 UNSPEC_PLT/GOTENT))) */
4959
4960 y = XEXP (x, 0);
4961 if (GET_CODE (y) == UNSPEC
4962 && (XINT (y, 1) == UNSPEC_GOTENT
4963 || XINT (y, 1) == UNSPEC_PLT))
4964 y = XVECEXP (y, 0, 0);
4965 else
4966 return orig_x;
4967 }
4968 else
4969 return orig_x;
4970
4971 if (GET_MODE (orig_x) != Pmode)
4972 {
4973 if (GET_MODE (orig_x) == BLKmode)
4974 return orig_x;
4975 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
4976 if (y == NULL_RTX)
4977 return orig_x;
4978 }
4979 return y;
4980 }
4981
4982 /* Output operand OP to stdio stream FILE.
4983 OP is an address (register + offset) which is not used to address data;
4984 instead the rightmost bits are interpreted as the value. */
4985
4986 static void
4987 print_shift_count_operand (FILE *file, rtx op)
4988 {
4989 HOST_WIDE_INT offset;
4990 rtx base;
4991
4992 /* Extract base register and offset. */
4993 if (!s390_decompose_shift_count (op, &base, &offset))
4994 gcc_unreachable ();
4995
4996 /* Sanity check. */
4997 if (base)
4998 {
4999 gcc_assert (GET_CODE (base) == REG);
5000 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5001 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5002 }
5003
5004 /* Offsets are constricted to twelve bits. */
5005 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5006 if (base)
5007 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5008 }
5009
5010 /* See 'get_some_local_dynamic_name'. */
5011
5012 static int
5013 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5014 {
5015 rtx x = *px;
5016
5017 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5018 {
5019 x = get_pool_constant (x);
5020 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5021 }
5022
5023 if (GET_CODE (x) == SYMBOL_REF
5024 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5025 {
5026 cfun->machine->some_ld_name = XSTR (x, 0);
5027 return 1;
5028 }
5029
5030 return 0;
5031 }
5032
5033 /* Locate some local-dynamic symbol still in use by this function
5034 so that we can print its name in local-dynamic base patterns. */
5035
5036 static const char *
5037 get_some_local_dynamic_name (void)
5038 {
5039 rtx insn;
5040
5041 if (cfun->machine->some_ld_name)
5042 return cfun->machine->some_ld_name;
5043
5044 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5045 if (INSN_P (insn)
5046 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5047 return cfun->machine->some_ld_name;
5048
5049 gcc_unreachable ();
5050 }
5051
5052 /* Output machine-dependent UNSPECs occurring in address constant X
5053 in assembler syntax to stdio stream FILE. Returns true if the
5054 constant X could be recognized, false otherwise. */
5055
5056 static bool
5057 s390_output_addr_const_extra (FILE *file, rtx x)
5058 {
5059 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5060 switch (XINT (x, 1))
5061 {
5062 case UNSPEC_GOTENT:
5063 output_addr_const (file, XVECEXP (x, 0, 0));
5064 fprintf (file, "@GOTENT");
5065 return true;
5066 case UNSPEC_GOT:
5067 output_addr_const (file, XVECEXP (x, 0, 0));
5068 fprintf (file, "@GOT");
5069 return true;
5070 case UNSPEC_GOTOFF:
5071 output_addr_const (file, XVECEXP (x, 0, 0));
5072 fprintf (file, "@GOTOFF");
5073 return true;
5074 case UNSPEC_PLT:
5075 output_addr_const (file, XVECEXP (x, 0, 0));
5076 fprintf (file, "@PLT");
5077 return true;
5078 case UNSPEC_PLTOFF:
5079 output_addr_const (file, XVECEXP (x, 0, 0));
5080 fprintf (file, "@PLTOFF");
5081 return true;
5082 case UNSPEC_TLSGD:
5083 output_addr_const (file, XVECEXP (x, 0, 0));
5084 fprintf (file, "@TLSGD");
5085 return true;
5086 case UNSPEC_TLSLDM:
5087 assemble_name (file, get_some_local_dynamic_name ());
5088 fprintf (file, "@TLSLDM");
5089 return true;
5090 case UNSPEC_DTPOFF:
5091 output_addr_const (file, XVECEXP (x, 0, 0));
5092 fprintf (file, "@DTPOFF");
5093 return true;
5094 case UNSPEC_NTPOFF:
5095 output_addr_const (file, XVECEXP (x, 0, 0));
5096 fprintf (file, "@NTPOFF");
5097 return true;
5098 case UNSPEC_GOTNTPOFF:
5099 output_addr_const (file, XVECEXP (x, 0, 0));
5100 fprintf (file, "@GOTNTPOFF");
5101 return true;
5102 case UNSPEC_INDNTPOFF:
5103 output_addr_const (file, XVECEXP (x, 0, 0));
5104 fprintf (file, "@INDNTPOFF");
5105 return true;
5106 }
5107
5108 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5109 switch (XINT (x, 1))
5110 {
5111 case UNSPEC_POOL_OFFSET:
5112 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5113 output_addr_const (file, x);
5114 return true;
5115 }
5116 return false;
5117 }
5118
5119 /* Output address operand ADDR in assembler syntax to
5120 stdio stream FILE. */
5121
5122 void
5123 print_operand_address (FILE *file, rtx addr)
5124 {
5125 struct s390_address ad;
5126
5127 if (s390_symref_operand_p (addr, NULL, NULL))
5128 {
5129 if (!TARGET_Z10)
5130 {
5131 output_operand_lossage ("symbolic memory references are "
5132 "only supported on z10 or later");
5133 return;
5134 }
5135 output_addr_const (file, addr);
5136 return;
5137 }
5138
5139 if (!s390_decompose_address (addr, &ad)
5140 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5141 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5142 output_operand_lossage ("cannot decompose address");
5143
5144 if (ad.disp)
5145 output_addr_const (file, ad.disp);
5146 else
5147 fprintf (file, "0");
5148
5149 if (ad.base && ad.indx)
5150 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5151 reg_names[REGNO (ad.base)]);
5152 else if (ad.base)
5153 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5154 }
5155
5156 /* Output operand X in assembler syntax to stdio stream FILE.
5157 CODE specified the format flag. The following format flags
5158 are recognized:
5159
5160 'C': print opcode suffix for branch condition.
5161 'D': print opcode suffix for inverse branch condition.
5162 'E': print opcode suffix for branch on index instruction.
5163 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5164 'G': print the size of the operand in bytes.
5165 'O': print only the displacement of a memory reference.
5166 'R': print only the base register of a memory reference.
5167 'S': print S-type memory reference (base+displacement).
5168 'N': print the second word of a DImode operand.
5169 'M': print the second word of a TImode operand.
5170 'Y': print shift count operand.
5171
5172 'b': print integer X as if it's an unsigned byte.
5173 'c': print integer X as if it's an signed byte.
5174 'x': print integer X as if it's an unsigned halfword.
5175 'h': print integer X as if it's a signed halfword.
5176 'i': print the first nonzero HImode part of X.
5177 'j': print the first HImode part unequal to -1 of X.
5178 'k': print the first nonzero SImode part of X.
5179 'm': print the first SImode part unequal to -1 of X.
5180 'o': print integer X as if it's an unsigned 32bit word. */
5181
5182 void
5183 print_operand (FILE *file, rtx x, int code)
5184 {
5185 switch (code)
5186 {
5187 case 'C':
5188 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5189 return;
5190
5191 case 'D':
5192 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5193 return;
5194
5195 case 'E':
5196 if (GET_CODE (x) == LE)
5197 fprintf (file, "l");
5198 else if (GET_CODE (x) == GT)
5199 fprintf (file, "h");
5200 else
5201 output_operand_lossage ("invalid comparison operator "
5202 "for 'E' output modifier");
5203 return;
5204
5205 case 'J':
5206 if (GET_CODE (x) == SYMBOL_REF)
5207 {
5208 fprintf (file, "%s", ":tls_load:");
5209 output_addr_const (file, x);
5210 }
5211 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5212 {
5213 fprintf (file, "%s", ":tls_gdcall:");
5214 output_addr_const (file, XVECEXP (x, 0, 0));
5215 }
5216 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5217 {
5218 fprintf (file, "%s", ":tls_ldcall:");
5219 assemble_name (file, get_some_local_dynamic_name ());
5220 }
5221 else
5222 output_operand_lossage ("invalid reference for 'J' output modifier");
5223 return;
5224
5225 case 'G':
5226 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5227 return;
5228
5229 case 'O':
5230 {
5231 struct s390_address ad;
5232 int ret;
5233
5234 if (!MEM_P (x))
5235 {
5236 output_operand_lossage ("memory reference expected for "
5237 "'O' output modifier");
5238 return;
5239 }
5240
5241 ret = s390_decompose_address (XEXP (x, 0), &ad);
5242
5243 if (!ret
5244 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5245 || ad.indx)
5246 {
5247 output_operand_lossage ("invalid address for 'O' output modifier");
5248 return;
5249 }
5250
5251 if (ad.disp)
5252 output_addr_const (file, ad.disp);
5253 else
5254 fprintf (file, "0");
5255 }
5256 return;
5257
5258 case 'R':
5259 {
5260 struct s390_address ad;
5261 int ret;
5262
5263 if (!MEM_P (x))
5264 {
5265 output_operand_lossage ("memory reference expected for "
5266 "'R' output modifier");
5267 return;
5268 }
5269
5270 ret = s390_decompose_address (XEXP (x, 0), &ad);
5271
5272 if (!ret
5273 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5274 || ad.indx)
5275 {
5276 output_operand_lossage ("invalid address for 'R' output modifier");
5277 return;
5278 }
5279
5280 if (ad.base)
5281 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5282 else
5283 fprintf (file, "0");
5284 }
5285 return;
5286
5287 case 'S':
5288 {
5289 struct s390_address ad;
5290 int ret;
5291
5292 if (!MEM_P (x))
5293 {
5294 output_operand_lossage ("memory reference expected for "
5295 "'S' output modifier");
5296 return;
5297 }
5298 ret = s390_decompose_address (XEXP (x, 0), &ad);
5299
5300 if (!ret
5301 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5302 || ad.indx)
5303 {
5304 output_operand_lossage ("invalid address for 'S' output modifier");
5305 return;
5306 }
5307
5308 if (ad.disp)
5309 output_addr_const (file, ad.disp);
5310 else
5311 fprintf (file, "0");
5312
5313 if (ad.base)
5314 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5315 }
5316 return;
5317
5318 case 'N':
5319 if (GET_CODE (x) == REG)
5320 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5321 else if (GET_CODE (x) == MEM)
5322 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 4));
5323 else
5324 output_operand_lossage ("register or memory expression expected "
5325 "for 'N' output modifier");
5326 break;
5327
5328 case 'M':
5329 if (GET_CODE (x) == REG)
5330 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5331 else if (GET_CODE (x) == MEM)
5332 x = change_address (x, VOIDmode, plus_constant (XEXP (x, 0), 8));
5333 else
5334 output_operand_lossage ("register or memory expression expected "
5335 "for 'M' output modifier");
5336 break;
5337
5338 case 'Y':
5339 print_shift_count_operand (file, x);
5340 return;
5341 }
5342
5343 switch (GET_CODE (x))
5344 {
5345 case REG:
5346 fprintf (file, "%s", reg_names[REGNO (x)]);
5347 break;
5348
5349 case MEM:
5350 output_address (XEXP (x, 0));
5351 break;
5352
5353 case CONST:
5354 case CODE_LABEL:
5355 case LABEL_REF:
5356 case SYMBOL_REF:
5357 output_addr_const (file, x);
5358 break;
5359
5360 case CONST_INT:
5361 if (code == 'b')
5362 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xff);
5363 else if (code == 'c')
5364 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xff) ^ 0x80) - 0x80);
5365 else if (code == 'x')
5366 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
5367 else if (code == 'h')
5368 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
5369 else if (code == 'i')
5370 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5371 s390_extract_part (x, HImode, 0));
5372 else if (code == 'j')
5373 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5374 s390_extract_part (x, HImode, -1));
5375 else if (code == 'k')
5376 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5377 s390_extract_part (x, SImode, 0));
5378 else if (code == 'm')
5379 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5380 s390_extract_part (x, SImode, -1));
5381 else if (code == 'o')
5382 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffffffff);
5383 else
5384 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5385 break;
5386
5387 case CONST_DOUBLE:
5388 gcc_assert (GET_MODE (x) == VOIDmode);
5389 if (code == 'b')
5390 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5391 else if (code == 'x')
5392 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5393 else if (code == 'h')
5394 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5395 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5396 else
5397 {
5398 if (code == 0)
5399 output_operand_lossage ("invalid constant - try using "
5400 "an output modifier");
5401 else
5402 output_operand_lossage ("invalid constant for output modifier '%c'",
5403 code);
5404 }
5405 break;
5406
5407 default:
5408 if (code == 0)
5409 output_operand_lossage ("invalid expression - try using "
5410 "an output modifier");
5411 else
5412 output_operand_lossage ("invalid expression for output "
5413 "modifier '%c'", code);
5414 break;
5415 }
5416 }
5417
5418 /* Target hook for assembling integer objects. We need to define it
5419 here to work a round a bug in some versions of GAS, which couldn't
5420 handle values smaller than INT_MIN when printed in decimal. */
5421
5422 static bool
5423 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5424 {
5425 if (size == 8 && aligned_p
5426 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5427 {
5428 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5429 INTVAL (x));
5430 return true;
5431 }
5432 return default_assemble_integer (x, size, aligned_p);
5433 }
5434
5435 /* Returns true if register REGNO is used for forming
5436 a memory address in expression X. */
5437
5438 static bool
5439 reg_used_in_mem_p (int regno, rtx x)
5440 {
5441 enum rtx_code code = GET_CODE (x);
5442 int i, j;
5443 const char *fmt;
5444
5445 if (code == MEM)
5446 {
5447 if (refers_to_regno_p (regno, regno+1,
5448 XEXP (x, 0), 0))
5449 return true;
5450 }
5451 else if (code == SET
5452 && GET_CODE (SET_DEST (x)) == PC)
5453 {
5454 if (refers_to_regno_p (regno, regno+1,
5455 SET_SRC (x), 0))
5456 return true;
5457 }
5458
5459 fmt = GET_RTX_FORMAT (code);
5460 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5461 {
5462 if (fmt[i] == 'e'
5463 && reg_used_in_mem_p (regno, XEXP (x, i)))
5464 return true;
5465
5466 else if (fmt[i] == 'E')
5467 for (j = 0; j < XVECLEN (x, i); j++)
5468 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5469 return true;
5470 }
5471 return false;
5472 }
5473
5474 /* Returns true if expression DEP_RTX sets an address register
5475 used by instruction INSN to address memory. */
5476
5477 static bool
5478 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5479 {
5480 rtx target, pat;
5481
5482 if (GET_CODE (dep_rtx) == INSN)
5483 dep_rtx = PATTERN (dep_rtx);
5484
5485 if (GET_CODE (dep_rtx) == SET)
5486 {
5487 target = SET_DEST (dep_rtx);
5488 if (GET_CODE (target) == STRICT_LOW_PART)
5489 target = XEXP (target, 0);
5490 while (GET_CODE (target) == SUBREG)
5491 target = SUBREG_REG (target);
5492
5493 if (GET_CODE (target) == REG)
5494 {
5495 int regno = REGNO (target);
5496
5497 if (s390_safe_attr_type (insn) == TYPE_LA)
5498 {
5499 pat = PATTERN (insn);
5500 if (GET_CODE (pat) == PARALLEL)
5501 {
5502 gcc_assert (XVECLEN (pat, 0) == 2);
5503 pat = XVECEXP (pat, 0, 0);
5504 }
5505 gcc_assert (GET_CODE (pat) == SET);
5506 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5507 }
5508 else if (get_attr_atype (insn) == ATYPE_AGEN)
5509 return reg_used_in_mem_p (regno, PATTERN (insn));
5510 }
5511 }
5512 return false;
5513 }
5514
5515 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5516
5517 int
5518 s390_agen_dep_p (rtx dep_insn, rtx insn)
5519 {
5520 rtx dep_rtx = PATTERN (dep_insn);
5521 int i;
5522
5523 if (GET_CODE (dep_rtx) == SET
5524 && addr_generation_dependency_p (dep_rtx, insn))
5525 return 1;
5526 else if (GET_CODE (dep_rtx) == PARALLEL)
5527 {
5528 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5529 {
5530 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5531 return 1;
5532 }
5533 }
5534 return 0;
5535 }
5536
5537
5538 /* A C statement (sans semicolon) to update the integer scheduling priority
5539 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5540 reduce the priority to execute INSN later. Do not define this macro if
5541 you do not need to adjust the scheduling priorities of insns.
5542
5543 A STD instruction should be scheduled earlier,
5544 in order to use the bypass. */
5545 static int
5546 s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
5547 {
5548 if (! INSN_P (insn))
5549 return priority;
5550
5551 if (s390_tune != PROCESSOR_2084_Z990
5552 && s390_tune != PROCESSOR_2094_Z9_109
5553 && s390_tune != PROCESSOR_2097_Z10
5554 && s390_tune != PROCESSOR_2817_Z196)
5555 return priority;
5556
5557 switch (s390_safe_attr_type (insn))
5558 {
5559 case TYPE_FSTOREDF:
5560 case TYPE_FSTORESF:
5561 priority = priority << 3;
5562 break;
5563 case TYPE_STORE:
5564 case TYPE_STM:
5565 priority = priority << 1;
5566 break;
5567 default:
5568 break;
5569 }
5570 return priority;
5571 }
5572
5573
5574 /* The number of instructions that can be issued per cycle. */
5575
5576 static int
5577 s390_issue_rate (void)
5578 {
5579 switch (s390_tune)
5580 {
5581 case PROCESSOR_2084_Z990:
5582 case PROCESSOR_2094_Z9_109:
5583 case PROCESSOR_2817_Z196:
5584 return 3;
5585 case PROCESSOR_2097_Z10:
5586 return 2;
5587 default:
5588 return 1;
5589 }
5590 }
5591
5592 static int
5593 s390_first_cycle_multipass_dfa_lookahead (void)
5594 {
5595 return 4;
5596 }
5597
5598 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5599 Fix up MEMs as required. */
5600
5601 static void
5602 annotate_constant_pool_refs (rtx *x)
5603 {
5604 int i, j;
5605 const char *fmt;
5606
5607 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5608 || !CONSTANT_POOL_ADDRESS_P (*x));
5609
5610 /* Literal pool references can only occur inside a MEM ... */
5611 if (GET_CODE (*x) == MEM)
5612 {
5613 rtx memref = XEXP (*x, 0);
5614
5615 if (GET_CODE (memref) == SYMBOL_REF
5616 && CONSTANT_POOL_ADDRESS_P (memref))
5617 {
5618 rtx base = cfun->machine->base_reg;
5619 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5620 UNSPEC_LTREF);
5621
5622 *x = replace_equiv_address (*x, addr);
5623 return;
5624 }
5625
5626 if (GET_CODE (memref) == CONST
5627 && GET_CODE (XEXP (memref, 0)) == PLUS
5628 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5629 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5630 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5631 {
5632 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5633 rtx sym = XEXP (XEXP (memref, 0), 0);
5634 rtx base = cfun->machine->base_reg;
5635 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5636 UNSPEC_LTREF);
5637
5638 *x = replace_equiv_address (*x, plus_constant (addr, off));
5639 return;
5640 }
5641 }
5642
5643 /* ... or a load-address type pattern. */
5644 if (GET_CODE (*x) == SET)
5645 {
5646 rtx addrref = SET_SRC (*x);
5647
5648 if (GET_CODE (addrref) == SYMBOL_REF
5649 && CONSTANT_POOL_ADDRESS_P (addrref))
5650 {
5651 rtx base = cfun->machine->base_reg;
5652 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5653 UNSPEC_LTREF);
5654
5655 SET_SRC (*x) = addr;
5656 return;
5657 }
5658
5659 if (GET_CODE (addrref) == CONST
5660 && GET_CODE (XEXP (addrref, 0)) == PLUS
5661 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5662 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5663 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5664 {
5665 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5666 rtx sym = XEXP (XEXP (addrref, 0), 0);
5667 rtx base = cfun->machine->base_reg;
5668 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5669 UNSPEC_LTREF);
5670
5671 SET_SRC (*x) = plus_constant (addr, off);
5672 return;
5673 }
5674 }
5675
5676 /* Annotate LTREL_BASE as well. */
5677 if (GET_CODE (*x) == UNSPEC
5678 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5679 {
5680 rtx base = cfun->machine->base_reg;
5681 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
5682 UNSPEC_LTREL_BASE);
5683 return;
5684 }
5685
5686 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5687 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5688 {
5689 if (fmt[i] == 'e')
5690 {
5691 annotate_constant_pool_refs (&XEXP (*x, i));
5692 }
5693 else if (fmt[i] == 'E')
5694 {
5695 for (j = 0; j < XVECLEN (*x, i); j++)
5696 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
5697 }
5698 }
5699 }
5700
5701 /* Split all branches that exceed the maximum distance.
5702 Returns true if this created a new literal pool entry. */
5703
5704 static int
5705 s390_split_branches (void)
5706 {
5707 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
5708 int new_literal = 0, ret;
5709 rtx insn, pat, tmp, target;
5710 rtx *label;
5711
5712 /* We need correct insn addresses. */
5713
5714 shorten_branches (get_insns ());
5715
5716 /* Find all branches that exceed 64KB, and split them. */
5717
5718 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5719 {
5720 if (GET_CODE (insn) != JUMP_INSN)
5721 continue;
5722
5723 pat = PATTERN (insn);
5724 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
5725 pat = XVECEXP (pat, 0, 0);
5726 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
5727 continue;
5728
5729 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
5730 {
5731 label = &SET_SRC (pat);
5732 }
5733 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
5734 {
5735 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
5736 label = &XEXP (SET_SRC (pat), 1);
5737 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
5738 label = &XEXP (SET_SRC (pat), 2);
5739 else
5740 continue;
5741 }
5742 else
5743 continue;
5744
5745 if (get_attr_length (insn) <= 4)
5746 continue;
5747
5748 /* We are going to use the return register as scratch register,
5749 make sure it will be saved/restored by the prologue/epilogue. */
5750 cfun_frame_layout.save_return_addr_p = 1;
5751
5752 if (!flag_pic)
5753 {
5754 new_literal = 1;
5755 tmp = force_const_mem (Pmode, *label);
5756 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, tmp), insn);
5757 INSN_ADDRESSES_NEW (tmp, -1);
5758 annotate_constant_pool_refs (&PATTERN (tmp));
5759
5760 target = temp_reg;
5761 }
5762 else
5763 {
5764 new_literal = 1;
5765 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
5766 UNSPEC_LTREL_OFFSET);
5767 target = gen_rtx_CONST (Pmode, target);
5768 target = force_const_mem (Pmode, target);
5769 tmp = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
5770 INSN_ADDRESSES_NEW (tmp, -1);
5771 annotate_constant_pool_refs (&PATTERN (tmp));
5772
5773 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
5774 cfun->machine->base_reg),
5775 UNSPEC_LTREL_BASE);
5776 target = gen_rtx_PLUS (Pmode, temp_reg, target);
5777 }
5778
5779 ret = validate_change (insn, label, target, 0);
5780 gcc_assert (ret);
5781 }
5782
5783 return new_literal;
5784 }
5785
5786
5787 /* Find an annotated literal pool symbol referenced in RTX X,
5788 and store it at REF. Will abort if X contains references to
5789 more than one such pool symbol; multiple references to the same
5790 symbol are allowed, however.
5791
5792 The rtx pointed to by REF must be initialized to NULL_RTX
5793 by the caller before calling this routine. */
5794
5795 static void
5796 find_constant_pool_ref (rtx x, rtx *ref)
5797 {
5798 int i, j;
5799 const char *fmt;
5800
5801 /* Ignore LTREL_BASE references. */
5802 if (GET_CODE (x) == UNSPEC
5803 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5804 return;
5805 /* Likewise POOL_ENTRY insns. */
5806 if (GET_CODE (x) == UNSPEC_VOLATILE
5807 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
5808 return;
5809
5810 gcc_assert (GET_CODE (x) != SYMBOL_REF
5811 || !CONSTANT_POOL_ADDRESS_P (x));
5812
5813 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
5814 {
5815 rtx sym = XVECEXP (x, 0, 0);
5816 gcc_assert (GET_CODE (sym) == SYMBOL_REF
5817 && CONSTANT_POOL_ADDRESS_P (sym));
5818
5819 if (*ref == NULL_RTX)
5820 *ref = sym;
5821 else
5822 gcc_assert (*ref == sym);
5823
5824 return;
5825 }
5826
5827 fmt = GET_RTX_FORMAT (GET_CODE (x));
5828 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5829 {
5830 if (fmt[i] == 'e')
5831 {
5832 find_constant_pool_ref (XEXP (x, i), ref);
5833 }
5834 else if (fmt[i] == 'E')
5835 {
5836 for (j = 0; j < XVECLEN (x, i); j++)
5837 find_constant_pool_ref (XVECEXP (x, i, j), ref);
5838 }
5839 }
5840 }
5841
5842 /* Replace every reference to the annotated literal pool
5843 symbol REF in X by its base plus OFFSET. */
5844
5845 static void
5846 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
5847 {
5848 int i, j;
5849 const char *fmt;
5850
5851 gcc_assert (*x != ref);
5852
5853 if (GET_CODE (*x) == UNSPEC
5854 && XINT (*x, 1) == UNSPEC_LTREF
5855 && XVECEXP (*x, 0, 0) == ref)
5856 {
5857 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
5858 return;
5859 }
5860
5861 if (GET_CODE (*x) == PLUS
5862 && GET_CODE (XEXP (*x, 1)) == CONST_INT
5863 && GET_CODE (XEXP (*x, 0)) == UNSPEC
5864 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
5865 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
5866 {
5867 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
5868 *x = plus_constant (addr, INTVAL (XEXP (*x, 1)));
5869 return;
5870 }
5871
5872 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5873 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5874 {
5875 if (fmt[i] == 'e')
5876 {
5877 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
5878 }
5879 else if (fmt[i] == 'E')
5880 {
5881 for (j = 0; j < XVECLEN (*x, i); j++)
5882 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
5883 }
5884 }
5885 }
5886
5887 /* Check whether X contains an UNSPEC_LTREL_BASE.
5888 Return its constant pool symbol if found, NULL_RTX otherwise. */
5889
5890 static rtx
5891 find_ltrel_base (rtx x)
5892 {
5893 int i, j;
5894 const char *fmt;
5895
5896 if (GET_CODE (x) == UNSPEC
5897 && XINT (x, 1) == UNSPEC_LTREL_BASE)
5898 return XVECEXP (x, 0, 0);
5899
5900 fmt = GET_RTX_FORMAT (GET_CODE (x));
5901 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5902 {
5903 if (fmt[i] == 'e')
5904 {
5905 rtx fnd = find_ltrel_base (XEXP (x, i));
5906 if (fnd)
5907 return fnd;
5908 }
5909 else if (fmt[i] == 'E')
5910 {
5911 for (j = 0; j < XVECLEN (x, i); j++)
5912 {
5913 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
5914 if (fnd)
5915 return fnd;
5916 }
5917 }
5918 }
5919
5920 return NULL_RTX;
5921 }
5922
5923 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
5924
5925 static void
5926 replace_ltrel_base (rtx *x)
5927 {
5928 int i, j;
5929 const char *fmt;
5930
5931 if (GET_CODE (*x) == UNSPEC
5932 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
5933 {
5934 *x = XVECEXP (*x, 0, 1);
5935 return;
5936 }
5937
5938 fmt = GET_RTX_FORMAT (GET_CODE (*x));
5939 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
5940 {
5941 if (fmt[i] == 'e')
5942 {
5943 replace_ltrel_base (&XEXP (*x, i));
5944 }
5945 else if (fmt[i] == 'E')
5946 {
5947 for (j = 0; j < XVECLEN (*x, i); j++)
5948 replace_ltrel_base (&XVECEXP (*x, i, j));
5949 }
5950 }
5951 }
5952
5953
5954 /* We keep a list of constants which we have to add to internal
5955 constant tables in the middle of large functions. */
5956
5957 #define NR_C_MODES 11
5958 enum machine_mode constant_modes[NR_C_MODES] =
5959 {
5960 TFmode, TImode, TDmode,
5961 DFmode, DImode, DDmode,
5962 SFmode, SImode, SDmode,
5963 HImode,
5964 QImode
5965 };
5966
5967 struct constant
5968 {
5969 struct constant *next;
5970 rtx value;
5971 rtx label;
5972 };
5973
5974 struct constant_pool
5975 {
5976 struct constant_pool *next;
5977 rtx first_insn;
5978 rtx pool_insn;
5979 bitmap insns;
5980 rtx emit_pool_after;
5981
5982 struct constant *constants[NR_C_MODES];
5983 struct constant *execute;
5984 rtx label;
5985 int size;
5986 };
5987
5988 /* Allocate new constant_pool structure. */
5989
5990 static struct constant_pool *
5991 s390_alloc_pool (void)
5992 {
5993 struct constant_pool *pool;
5994 int i;
5995
5996 pool = (struct constant_pool *) xmalloc (sizeof *pool);
5997 pool->next = NULL;
5998 for (i = 0; i < NR_C_MODES; i++)
5999 pool->constants[i] = NULL;
6000
6001 pool->execute = NULL;
6002 pool->label = gen_label_rtx ();
6003 pool->first_insn = NULL_RTX;
6004 pool->pool_insn = NULL_RTX;
6005 pool->insns = BITMAP_ALLOC (NULL);
6006 pool->size = 0;
6007 pool->emit_pool_after = NULL_RTX;
6008
6009 return pool;
6010 }
6011
6012 /* Create new constant pool covering instructions starting at INSN
6013 and chain it to the end of POOL_LIST. */
6014
6015 static struct constant_pool *
6016 s390_start_pool (struct constant_pool **pool_list, rtx insn)
6017 {
6018 struct constant_pool *pool, **prev;
6019
6020 pool = s390_alloc_pool ();
6021 pool->first_insn = insn;
6022
6023 for (prev = pool_list; *prev; prev = &(*prev)->next)
6024 ;
6025 *prev = pool;
6026
6027 return pool;
6028 }
6029
6030 /* End range of instructions covered by POOL at INSN and emit
6031 placeholder insn representing the pool. */
6032
6033 static void
6034 s390_end_pool (struct constant_pool *pool, rtx insn)
6035 {
6036 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6037
6038 if (!insn)
6039 insn = get_last_insn ();
6040
6041 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6042 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6043 }
6044
6045 /* Add INSN to the list of insns covered by POOL. */
6046
6047 static void
6048 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6049 {
6050 bitmap_set_bit (pool->insns, INSN_UID (insn));
6051 }
6052
6053 /* Return pool out of POOL_LIST that covers INSN. */
6054
6055 static struct constant_pool *
6056 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6057 {
6058 struct constant_pool *pool;
6059
6060 for (pool = pool_list; pool; pool = pool->next)
6061 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6062 break;
6063
6064 return pool;
6065 }
6066
6067 /* Add constant VAL of mode MODE to the constant pool POOL. */
6068
6069 static void
6070 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6071 {
6072 struct constant *c;
6073 int i;
6074
6075 for (i = 0; i < NR_C_MODES; i++)
6076 if (constant_modes[i] == mode)
6077 break;
6078 gcc_assert (i != NR_C_MODES);
6079
6080 for (c = pool->constants[i]; c != NULL; c = c->next)
6081 if (rtx_equal_p (val, c->value))
6082 break;
6083
6084 if (c == NULL)
6085 {
6086 c = (struct constant *) xmalloc (sizeof *c);
6087 c->value = val;
6088 c->label = gen_label_rtx ();
6089 c->next = pool->constants[i];
6090 pool->constants[i] = c;
6091 pool->size += GET_MODE_SIZE (mode);
6092 }
6093 }
6094
6095 /* Return an rtx that represents the offset of X from the start of
6096 pool POOL. */
6097
6098 static rtx
6099 s390_pool_offset (struct constant_pool *pool, rtx x)
6100 {
6101 rtx label;
6102
6103 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6104 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6105 UNSPEC_POOL_OFFSET);
6106 return gen_rtx_CONST (GET_MODE (x), x);
6107 }
6108
6109 /* Find constant VAL of mode MODE in the constant pool POOL.
6110 Return an RTX describing the distance from the start of
6111 the pool to the location of the new constant. */
6112
6113 static rtx
6114 s390_find_constant (struct constant_pool *pool, rtx val,
6115 enum machine_mode mode)
6116 {
6117 struct constant *c;
6118 int i;
6119
6120 for (i = 0; i < NR_C_MODES; i++)
6121 if (constant_modes[i] == mode)
6122 break;
6123 gcc_assert (i != NR_C_MODES);
6124
6125 for (c = pool->constants[i]; c != NULL; c = c->next)
6126 if (rtx_equal_p (val, c->value))
6127 break;
6128
6129 gcc_assert (c);
6130
6131 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6132 }
6133
6134 /* Check whether INSN is an execute. Return the label_ref to its
6135 execute target template if so, NULL_RTX otherwise. */
6136
6137 static rtx
6138 s390_execute_label (rtx insn)
6139 {
6140 if (GET_CODE (insn) == INSN
6141 && GET_CODE (PATTERN (insn)) == PARALLEL
6142 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6143 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6144 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6145
6146 return NULL_RTX;
6147 }
6148
6149 /* Add execute target for INSN to the constant pool POOL. */
6150
6151 static void
6152 s390_add_execute (struct constant_pool *pool, rtx insn)
6153 {
6154 struct constant *c;
6155
6156 for (c = pool->execute; c != NULL; c = c->next)
6157 if (INSN_UID (insn) == INSN_UID (c->value))
6158 break;
6159
6160 if (c == NULL)
6161 {
6162 c = (struct constant *) xmalloc (sizeof *c);
6163 c->value = insn;
6164 c->label = gen_label_rtx ();
6165 c->next = pool->execute;
6166 pool->execute = c;
6167 pool->size += 6;
6168 }
6169 }
6170
6171 /* Find execute target for INSN in the constant pool POOL.
6172 Return an RTX describing the distance from the start of
6173 the pool to the location of the execute target. */
6174
6175 static rtx
6176 s390_find_execute (struct constant_pool *pool, rtx insn)
6177 {
6178 struct constant *c;
6179
6180 for (c = pool->execute; c != NULL; c = c->next)
6181 if (INSN_UID (insn) == INSN_UID (c->value))
6182 break;
6183
6184 gcc_assert (c);
6185
6186 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6187 }
6188
6189 /* For an execute INSN, extract the execute target template. */
6190
6191 static rtx
6192 s390_execute_target (rtx insn)
6193 {
6194 rtx pattern = PATTERN (insn);
6195 gcc_assert (s390_execute_label (insn));
6196
6197 if (XVECLEN (pattern, 0) == 2)
6198 {
6199 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6200 }
6201 else
6202 {
6203 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6204 int i;
6205
6206 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6207 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6208
6209 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6210 }
6211
6212 return pattern;
6213 }
6214
6215 /* Indicate that INSN cannot be duplicated. This is the case for
6216 execute insns that carry a unique label. */
6217
6218 static bool
6219 s390_cannot_copy_insn_p (rtx insn)
6220 {
6221 rtx label = s390_execute_label (insn);
6222 return label && label != const0_rtx;
6223 }
6224
6225 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6226 do not emit the pool base label. */
6227
6228 static void
6229 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6230 {
6231 struct constant *c;
6232 rtx insn = pool->pool_insn;
6233 int i;
6234
6235 /* Switch to rodata section. */
6236 if (TARGET_CPU_ZARCH)
6237 {
6238 insn = emit_insn_after (gen_pool_section_start (), insn);
6239 INSN_ADDRESSES_NEW (insn, -1);
6240 }
6241
6242 /* Ensure minimum pool alignment. */
6243 if (TARGET_CPU_ZARCH)
6244 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6245 else
6246 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6247 INSN_ADDRESSES_NEW (insn, -1);
6248
6249 /* Emit pool base label. */
6250 if (!remote_label)
6251 {
6252 insn = emit_label_after (pool->label, insn);
6253 INSN_ADDRESSES_NEW (insn, -1);
6254 }
6255
6256 /* Dump constants in descending alignment requirement order,
6257 ensuring proper alignment for every constant. */
6258 for (i = 0; i < NR_C_MODES; i++)
6259 for (c = pool->constants[i]; c; c = c->next)
6260 {
6261 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6262 rtx value = copy_rtx (c->value);
6263 if (GET_CODE (value) == CONST
6264 && GET_CODE (XEXP (value, 0)) == UNSPEC
6265 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6266 && XVECLEN (XEXP (value, 0), 0) == 1)
6267 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6268
6269 insn = emit_label_after (c->label, insn);
6270 INSN_ADDRESSES_NEW (insn, -1);
6271
6272 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6273 gen_rtvec (1, value),
6274 UNSPECV_POOL_ENTRY);
6275 insn = emit_insn_after (value, insn);
6276 INSN_ADDRESSES_NEW (insn, -1);
6277 }
6278
6279 /* Ensure minimum alignment for instructions. */
6280 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6281 INSN_ADDRESSES_NEW (insn, -1);
6282
6283 /* Output in-pool execute template insns. */
6284 for (c = pool->execute; c; c = c->next)
6285 {
6286 insn = emit_label_after (c->label, insn);
6287 INSN_ADDRESSES_NEW (insn, -1);
6288
6289 insn = emit_insn_after (s390_execute_target (c->value), insn);
6290 INSN_ADDRESSES_NEW (insn, -1);
6291 }
6292
6293 /* Switch back to previous section. */
6294 if (TARGET_CPU_ZARCH)
6295 {
6296 insn = emit_insn_after (gen_pool_section_end (), insn);
6297 INSN_ADDRESSES_NEW (insn, -1);
6298 }
6299
6300 insn = emit_barrier_after (insn);
6301 INSN_ADDRESSES_NEW (insn, -1);
6302
6303 /* Remove placeholder insn. */
6304 remove_insn (pool->pool_insn);
6305 }
6306
6307 /* Free all memory used by POOL. */
6308
6309 static void
6310 s390_free_pool (struct constant_pool *pool)
6311 {
6312 struct constant *c, *next;
6313 int i;
6314
6315 for (i = 0; i < NR_C_MODES; i++)
6316 for (c = pool->constants[i]; c; c = next)
6317 {
6318 next = c->next;
6319 free (c);
6320 }
6321
6322 for (c = pool->execute; c; c = next)
6323 {
6324 next = c->next;
6325 free (c);
6326 }
6327
6328 BITMAP_FREE (pool->insns);
6329 free (pool);
6330 }
6331
6332
6333 /* Collect main literal pool. Return NULL on overflow. */
6334
6335 static struct constant_pool *
6336 s390_mainpool_start (void)
6337 {
6338 struct constant_pool *pool;
6339 rtx insn;
6340
6341 pool = s390_alloc_pool ();
6342
6343 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6344 {
6345 if (GET_CODE (insn) == INSN
6346 && GET_CODE (PATTERN (insn)) == SET
6347 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6348 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6349 {
6350 gcc_assert (!pool->pool_insn);
6351 pool->pool_insn = insn;
6352 }
6353
6354 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6355 {
6356 s390_add_execute (pool, insn);
6357 }
6358 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6359 {
6360 rtx pool_ref = NULL_RTX;
6361 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6362 if (pool_ref)
6363 {
6364 rtx constant = get_pool_constant (pool_ref);
6365 enum machine_mode mode = get_pool_mode (pool_ref);
6366 s390_add_constant (pool, constant, mode);
6367 }
6368 }
6369
6370 /* If hot/cold partitioning is enabled we have to make sure that
6371 the literal pool is emitted in the same section where the
6372 initialization of the literal pool base pointer takes place.
6373 emit_pool_after is only used in the non-overflow case on non
6374 Z cpus where we can emit the literal pool at the end of the
6375 function body within the text section. */
6376 if (NOTE_P (insn)
6377 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6378 && !pool->emit_pool_after)
6379 pool->emit_pool_after = PREV_INSN (insn);
6380 }
6381
6382 gcc_assert (pool->pool_insn || pool->size == 0);
6383
6384 if (pool->size >= 4096)
6385 {
6386 /* We're going to chunkify the pool, so remove the main
6387 pool placeholder insn. */
6388 remove_insn (pool->pool_insn);
6389
6390 s390_free_pool (pool);
6391 pool = NULL;
6392 }
6393
6394 /* If the functions ends with the section where the literal pool
6395 should be emitted set the marker to its end. */
6396 if (pool && !pool->emit_pool_after)
6397 pool->emit_pool_after = get_last_insn ();
6398
6399 return pool;
6400 }
6401
6402 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6403 Modify the current function to output the pool constants as well as
6404 the pool register setup instruction. */
6405
6406 static void
6407 s390_mainpool_finish (struct constant_pool *pool)
6408 {
6409 rtx base_reg = cfun->machine->base_reg;
6410 rtx insn;
6411
6412 /* If the pool is empty, we're done. */
6413 if (pool->size == 0)
6414 {
6415 /* We don't actually need a base register after all. */
6416 cfun->machine->base_reg = NULL_RTX;
6417
6418 if (pool->pool_insn)
6419 remove_insn (pool->pool_insn);
6420 s390_free_pool (pool);
6421 return;
6422 }
6423
6424 /* We need correct insn addresses. */
6425 shorten_branches (get_insns ());
6426
6427 /* On zSeries, we use a LARL to load the pool register. The pool is
6428 located in the .rodata section, so we emit it after the function. */
6429 if (TARGET_CPU_ZARCH)
6430 {
6431 insn = gen_main_base_64 (base_reg, pool->label);
6432 insn = emit_insn_after (insn, pool->pool_insn);
6433 INSN_ADDRESSES_NEW (insn, -1);
6434 remove_insn (pool->pool_insn);
6435
6436 insn = get_last_insn ();
6437 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6438 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6439
6440 s390_dump_pool (pool, 0);
6441 }
6442
6443 /* On S/390, if the total size of the function's code plus literal pool
6444 does not exceed 4096 bytes, we use BASR to set up a function base
6445 pointer, and emit the literal pool at the end of the function. */
6446 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6447 + pool->size + 8 /* alignment slop */ < 4096)
6448 {
6449 insn = gen_main_base_31_small (base_reg, pool->label);
6450 insn = emit_insn_after (insn, pool->pool_insn);
6451 INSN_ADDRESSES_NEW (insn, -1);
6452 remove_insn (pool->pool_insn);
6453
6454 insn = emit_label_after (pool->label, insn);
6455 INSN_ADDRESSES_NEW (insn, -1);
6456
6457 /* emit_pool_after will be set by s390_mainpool_start to the
6458 last insn of the section where the literal pool should be
6459 emitted. */
6460 insn = pool->emit_pool_after;
6461
6462 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6463 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6464
6465 s390_dump_pool (pool, 1);
6466 }
6467
6468 /* Otherwise, we emit an inline literal pool and use BASR to branch
6469 over it, setting up the pool register at the same time. */
6470 else
6471 {
6472 rtx pool_end = gen_label_rtx ();
6473
6474 insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
6475 insn = emit_insn_after (insn, pool->pool_insn);
6476 INSN_ADDRESSES_NEW (insn, -1);
6477 remove_insn (pool->pool_insn);
6478
6479 insn = emit_label_after (pool->label, insn);
6480 INSN_ADDRESSES_NEW (insn, -1);
6481
6482 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6483 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6484
6485 insn = emit_label_after (pool_end, pool->pool_insn);
6486 INSN_ADDRESSES_NEW (insn, -1);
6487
6488 s390_dump_pool (pool, 1);
6489 }
6490
6491
6492 /* Replace all literal pool references. */
6493
6494 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6495 {
6496 if (INSN_P (insn))
6497 replace_ltrel_base (&PATTERN (insn));
6498
6499 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6500 {
6501 rtx addr, pool_ref = NULL_RTX;
6502 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6503 if (pool_ref)
6504 {
6505 if (s390_execute_label (insn))
6506 addr = s390_find_execute (pool, insn);
6507 else
6508 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6509 get_pool_mode (pool_ref));
6510
6511 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6512 INSN_CODE (insn) = -1;
6513 }
6514 }
6515 }
6516
6517
6518 /* Free the pool. */
6519 s390_free_pool (pool);
6520 }
6521
6522 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6523 We have decided we cannot use this pool, so revert all changes
6524 to the current function that were done by s390_mainpool_start. */
6525 static void
6526 s390_mainpool_cancel (struct constant_pool *pool)
6527 {
6528 /* We didn't actually change the instruction stream, so simply
6529 free the pool memory. */
6530 s390_free_pool (pool);
6531 }
6532
6533
6534 /* Chunkify the literal pool. */
6535
6536 #define S390_POOL_CHUNK_MIN 0xc00
6537 #define S390_POOL_CHUNK_MAX 0xe00
6538
6539 static struct constant_pool *
6540 s390_chunkify_start (void)
6541 {
6542 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6543 int extra_size = 0;
6544 bitmap far_labels;
6545 rtx pending_ltrel = NULL_RTX;
6546 rtx insn;
6547
6548 rtx (*gen_reload_base) (rtx, rtx) =
6549 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6550
6551
6552 /* We need correct insn addresses. */
6553
6554 shorten_branches (get_insns ());
6555
6556 /* Scan all insns and move literals to pool chunks. */
6557
6558 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6559 {
6560 bool section_switch_p = false;
6561
6562 /* Check for pending LTREL_BASE. */
6563 if (INSN_P (insn))
6564 {
6565 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6566 if (ltrel_base)
6567 {
6568 gcc_assert (ltrel_base == pending_ltrel);
6569 pending_ltrel = NULL_RTX;
6570 }
6571 }
6572
6573 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6574 {
6575 if (!curr_pool)
6576 curr_pool = s390_start_pool (&pool_list, insn);
6577
6578 s390_add_execute (curr_pool, insn);
6579 s390_add_pool_insn (curr_pool, insn);
6580 }
6581 else if (GET_CODE (insn) == INSN || CALL_P (insn))
6582 {
6583 rtx pool_ref = NULL_RTX;
6584 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6585 if (pool_ref)
6586 {
6587 rtx constant = get_pool_constant (pool_ref);
6588 enum machine_mode mode = get_pool_mode (pool_ref);
6589
6590 if (!curr_pool)
6591 curr_pool = s390_start_pool (&pool_list, insn);
6592
6593 s390_add_constant (curr_pool, constant, mode);
6594 s390_add_pool_insn (curr_pool, insn);
6595
6596 /* Don't split the pool chunk between a LTREL_OFFSET load
6597 and the corresponding LTREL_BASE. */
6598 if (GET_CODE (constant) == CONST
6599 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6600 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6601 {
6602 gcc_assert (!pending_ltrel);
6603 pending_ltrel = pool_ref;
6604 }
6605 }
6606 /* Make sure we do not split between a call and its
6607 corresponding CALL_ARG_LOCATION note. */
6608 if (CALL_P (insn))
6609 {
6610 rtx next = NEXT_INSN (insn);
6611 if (next && NOTE_P (next)
6612 && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
6613 continue;
6614 }
6615 }
6616
6617 if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CODE_LABEL)
6618 {
6619 if (curr_pool)
6620 s390_add_pool_insn (curr_pool, insn);
6621 /* An LTREL_BASE must follow within the same basic block. */
6622 gcc_assert (!pending_ltrel);
6623 }
6624
6625 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
6626 section_switch_p = true;
6627
6628 if (!curr_pool
6629 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6630 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6631 continue;
6632
6633 if (TARGET_CPU_ZARCH)
6634 {
6635 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6636 continue;
6637
6638 s390_end_pool (curr_pool, NULL_RTX);
6639 curr_pool = NULL;
6640 }
6641 else
6642 {
6643 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6644 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6645 + extra_size;
6646
6647 /* We will later have to insert base register reload insns.
6648 Those will have an effect on code size, which we need to
6649 consider here. This calculation makes rather pessimistic
6650 worst-case assumptions. */
6651 if (GET_CODE (insn) == CODE_LABEL)
6652 extra_size += 6;
6653
6654 if (chunk_size < S390_POOL_CHUNK_MIN
6655 && curr_pool->size < S390_POOL_CHUNK_MIN
6656 && !section_switch_p)
6657 continue;
6658
6659 /* Pool chunks can only be inserted after BARRIERs ... */
6660 if (GET_CODE (insn) == BARRIER)
6661 {
6662 s390_end_pool (curr_pool, insn);
6663 curr_pool = NULL;
6664 extra_size = 0;
6665 }
6666
6667 /* ... so if we don't find one in time, create one. */
6668 else if (chunk_size > S390_POOL_CHUNK_MAX
6669 || curr_pool->size > S390_POOL_CHUNK_MAX
6670 || section_switch_p)
6671 {
6672 rtx label, jump, barrier;
6673
6674 if (!section_switch_p)
6675 {
6676 /* We can insert the barrier only after a 'real' insn. */
6677 if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
6678 continue;
6679 if (get_attr_length (insn) == 0)
6680 continue;
6681 /* Don't separate LTREL_BASE from the corresponding
6682 LTREL_OFFSET load. */
6683 if (pending_ltrel)
6684 continue;
6685 }
6686 else
6687 {
6688 gcc_assert (!pending_ltrel);
6689
6690 /* The old pool has to end before the section switch
6691 note in order to make it part of the current
6692 section. */
6693 insn = PREV_INSN (insn);
6694 }
6695
6696 label = gen_label_rtx ();
6697 jump = emit_jump_insn_after (gen_jump (label), insn);
6698 barrier = emit_barrier_after (jump);
6699 insn = emit_label_after (label, barrier);
6700 JUMP_LABEL (jump) = label;
6701 LABEL_NUSES (label) = 1;
6702
6703 INSN_ADDRESSES_NEW (jump, -1);
6704 INSN_ADDRESSES_NEW (barrier, -1);
6705 INSN_ADDRESSES_NEW (insn, -1);
6706
6707 s390_end_pool (curr_pool, barrier);
6708 curr_pool = NULL;
6709 extra_size = 0;
6710 }
6711 }
6712 }
6713
6714 if (curr_pool)
6715 s390_end_pool (curr_pool, NULL_RTX);
6716 gcc_assert (!pending_ltrel);
6717
6718 /* Find all labels that are branched into
6719 from an insn belonging to a different chunk. */
6720
6721 far_labels = BITMAP_ALLOC (NULL);
6722
6723 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6724 {
6725 /* Labels marked with LABEL_PRESERVE_P can be target
6726 of non-local jumps, so we have to mark them.
6727 The same holds for named labels.
6728
6729 Don't do that, however, if it is the label before
6730 a jump table. */
6731
6732 if (GET_CODE (insn) == CODE_LABEL
6733 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
6734 {
6735 rtx vec_insn = next_real_insn (insn);
6736 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6737 PATTERN (vec_insn) : NULL_RTX;
6738 if (!vec_pat
6739 || !(GET_CODE (vec_pat) == ADDR_VEC
6740 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6741 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
6742 }
6743
6744 /* If we have a direct jump (conditional or unconditional)
6745 or a casesi jump, check all potential targets. */
6746 else if (GET_CODE (insn) == JUMP_INSN)
6747 {
6748 rtx pat = PATTERN (insn);
6749 if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 2)
6750 pat = XVECEXP (pat, 0, 0);
6751
6752 if (GET_CODE (pat) == SET)
6753 {
6754 rtx label = JUMP_LABEL (insn);
6755 if (label)
6756 {
6757 if (s390_find_pool (pool_list, label)
6758 != s390_find_pool (pool_list, insn))
6759 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6760 }
6761 }
6762 else if (GET_CODE (pat) == PARALLEL
6763 && XVECLEN (pat, 0) == 2
6764 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
6765 && GET_CODE (XVECEXP (pat, 0, 1)) == USE
6766 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == LABEL_REF)
6767 {
6768 /* Find the jump table used by this casesi jump. */
6769 rtx vec_label = XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0);
6770 rtx vec_insn = next_real_insn (vec_label);
6771 rtx vec_pat = vec_insn && GET_CODE (vec_insn) == JUMP_INSN ?
6772 PATTERN (vec_insn) : NULL_RTX;
6773 if (vec_pat
6774 && (GET_CODE (vec_pat) == ADDR_VEC
6775 || GET_CODE (vec_pat) == ADDR_DIFF_VEC))
6776 {
6777 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
6778
6779 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
6780 {
6781 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
6782
6783 if (s390_find_pool (pool_list, label)
6784 != s390_find_pool (pool_list, insn))
6785 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
6786 }
6787 }
6788 }
6789 }
6790 }
6791
6792 /* Insert base register reload insns before every pool. */
6793
6794 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6795 {
6796 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6797 curr_pool->label);
6798 rtx insn = curr_pool->first_insn;
6799 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
6800 }
6801
6802 /* Insert base register reload insns at every far label. */
6803
6804 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6805 if (GET_CODE (insn) == CODE_LABEL
6806 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
6807 {
6808 struct constant_pool *pool = s390_find_pool (pool_list, insn);
6809 if (pool)
6810 {
6811 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
6812 pool->label);
6813 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
6814 }
6815 }
6816
6817
6818 BITMAP_FREE (far_labels);
6819
6820
6821 /* Recompute insn addresses. */
6822
6823 init_insn_lengths ();
6824 shorten_branches (get_insns ());
6825
6826 return pool_list;
6827 }
6828
6829 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6830 After we have decided to use this list, finish implementing
6831 all changes to the current function as required. */
6832
6833 static void
6834 s390_chunkify_finish (struct constant_pool *pool_list)
6835 {
6836 struct constant_pool *curr_pool = NULL;
6837 rtx insn;
6838
6839
6840 /* Replace all literal pool references. */
6841
6842 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6843 {
6844 if (INSN_P (insn))
6845 replace_ltrel_base (&PATTERN (insn));
6846
6847 curr_pool = s390_find_pool (pool_list, insn);
6848 if (!curr_pool)
6849 continue;
6850
6851 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
6852 {
6853 rtx addr, pool_ref = NULL_RTX;
6854 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6855 if (pool_ref)
6856 {
6857 if (s390_execute_label (insn))
6858 addr = s390_find_execute (curr_pool, insn);
6859 else
6860 addr = s390_find_constant (curr_pool,
6861 get_pool_constant (pool_ref),
6862 get_pool_mode (pool_ref));
6863
6864 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6865 INSN_CODE (insn) = -1;
6866 }
6867 }
6868 }
6869
6870 /* Dump out all literal pools. */
6871
6872 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6873 s390_dump_pool (curr_pool, 0);
6874
6875 /* Free pool list. */
6876
6877 while (pool_list)
6878 {
6879 struct constant_pool *next = pool_list->next;
6880 s390_free_pool (pool_list);
6881 pool_list = next;
6882 }
6883 }
6884
6885 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
6886 We have decided we cannot use this list, so revert all changes
6887 to the current function that were done by s390_chunkify_start. */
6888
6889 static void
6890 s390_chunkify_cancel (struct constant_pool *pool_list)
6891 {
6892 struct constant_pool *curr_pool = NULL;
6893 rtx insn;
6894
6895 /* Remove all pool placeholder insns. */
6896
6897 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
6898 {
6899 /* Did we insert an extra barrier? Remove it. */
6900 rtx barrier = PREV_INSN (curr_pool->pool_insn);
6901 rtx jump = barrier? PREV_INSN (barrier) : NULL_RTX;
6902 rtx label = NEXT_INSN (curr_pool->pool_insn);
6903
6904 if (jump && GET_CODE (jump) == JUMP_INSN
6905 && barrier && GET_CODE (barrier) == BARRIER
6906 && label && GET_CODE (label) == CODE_LABEL
6907 && GET_CODE (PATTERN (jump)) == SET
6908 && SET_DEST (PATTERN (jump)) == pc_rtx
6909 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
6910 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
6911 {
6912 remove_insn (jump);
6913 remove_insn (barrier);
6914 remove_insn (label);
6915 }
6916
6917 remove_insn (curr_pool->pool_insn);
6918 }
6919
6920 /* Remove all base register reload insns. */
6921
6922 for (insn = get_insns (); insn; )
6923 {
6924 rtx next_insn = NEXT_INSN (insn);
6925
6926 if (GET_CODE (insn) == INSN
6927 && GET_CODE (PATTERN (insn)) == SET
6928 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
6929 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
6930 remove_insn (insn);
6931
6932 insn = next_insn;
6933 }
6934
6935 /* Free pool list. */
6936
6937 while (pool_list)
6938 {
6939 struct constant_pool *next = pool_list->next;
6940 s390_free_pool (pool_list);
6941 pool_list = next;
6942 }
6943 }
6944
6945 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
6946
6947 void
6948 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
6949 {
6950 REAL_VALUE_TYPE r;
6951
6952 switch (GET_MODE_CLASS (mode))
6953 {
6954 case MODE_FLOAT:
6955 case MODE_DECIMAL_FLOAT:
6956 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
6957
6958 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
6959 assemble_real (r, mode, align);
6960 break;
6961
6962 case MODE_INT:
6963 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
6964 mark_symbol_refs_as_used (exp);
6965 break;
6966
6967 default:
6968 gcc_unreachable ();
6969 }
6970 }
6971
6972
6973 /* Return an RTL expression representing the value of the return address
6974 for the frame COUNT steps up from the current frame. FRAME is the
6975 frame pointer of that frame. */
6976
6977 rtx
6978 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
6979 {
6980 int offset;
6981 rtx addr;
6982
6983 /* Without backchain, we fail for all but the current frame. */
6984
6985 if (!TARGET_BACKCHAIN && count > 0)
6986 return NULL_RTX;
6987
6988 /* For the current frame, we need to make sure the initial
6989 value of RETURN_REGNUM is actually saved. */
6990
6991 if (count == 0)
6992 {
6993 /* On non-z architectures branch splitting could overwrite r14. */
6994 if (TARGET_CPU_ZARCH)
6995 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
6996 else
6997 {
6998 cfun_frame_layout.save_return_addr_p = true;
6999 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7000 }
7001 }
7002
7003 if (TARGET_PACKED_STACK)
7004 offset = -2 * UNITS_PER_LONG;
7005 else
7006 offset = RETURN_REGNUM * UNITS_PER_LONG;
7007
7008 addr = plus_constant (frame, offset);
7009 addr = memory_address (Pmode, addr);
7010 return gen_rtx_MEM (Pmode, addr);
7011 }
7012
7013 /* Return an RTL expression representing the back chain stored in
7014 the current stack frame. */
7015
7016 rtx
7017 s390_back_chain_rtx (void)
7018 {
7019 rtx chain;
7020
7021 gcc_assert (TARGET_BACKCHAIN);
7022
7023 if (TARGET_PACKED_STACK)
7024 chain = plus_constant (stack_pointer_rtx,
7025 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7026 else
7027 chain = stack_pointer_rtx;
7028
7029 chain = gen_rtx_MEM (Pmode, chain);
7030 return chain;
7031 }
7032
7033 /* Find first call clobbered register unused in a function.
7034 This could be used as base register in a leaf function
7035 or for holding the return address before epilogue. */
7036
7037 static int
7038 find_unused_clobbered_reg (void)
7039 {
7040 int i;
7041 for (i = 0; i < 6; i++)
7042 if (!df_regs_ever_live_p (i))
7043 return i;
7044 return 0;
7045 }
7046
7047
7048 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7049 clobbered hard regs in SETREG. */
7050
7051 static void
7052 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7053 {
7054 int *regs_ever_clobbered = (int *)data;
7055 unsigned int i, regno;
7056 enum machine_mode mode = GET_MODE (setreg);
7057
7058 if (GET_CODE (setreg) == SUBREG)
7059 {
7060 rtx inner = SUBREG_REG (setreg);
7061 if (!GENERAL_REG_P (inner))
7062 return;
7063 regno = subreg_regno (setreg);
7064 }
7065 else if (GENERAL_REG_P (setreg))
7066 regno = REGNO (setreg);
7067 else
7068 return;
7069
7070 for (i = regno;
7071 i < regno + HARD_REGNO_NREGS (regno, mode);
7072 i++)
7073 regs_ever_clobbered[i] = 1;
7074 }
7075
7076 /* Walks through all basic blocks of the current function looking
7077 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7078 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7079 each of those regs. */
7080
7081 static void
7082 s390_regs_ever_clobbered (int *regs_ever_clobbered)
7083 {
7084 basic_block cur_bb;
7085 rtx cur_insn;
7086 unsigned int i;
7087
7088 memset (regs_ever_clobbered, 0, 16 * sizeof (int));
7089
7090 /* For non-leaf functions we have to consider all call clobbered regs to be
7091 clobbered. */
7092 if (!current_function_is_leaf)
7093 {
7094 for (i = 0; i < 16; i++)
7095 regs_ever_clobbered[i] = call_really_used_regs[i];
7096 }
7097
7098 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7099 this work is done by liveness analysis (mark_regs_live_at_end).
7100 Special care is needed for functions containing landing pads. Landing pads
7101 may use the eh registers, but the code which sets these registers is not
7102 contained in that function. Hence s390_regs_ever_clobbered is not able to
7103 deal with this automatically. */
7104 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7105 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7106 if (crtl->calls_eh_return
7107 || (cfun->machine->has_landing_pad_p
7108 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7109 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7110
7111 /* For nonlocal gotos all call-saved registers have to be saved.
7112 This flag is also set for the unwinding code in libgcc.
7113 See expand_builtin_unwind_init. For regs_ever_live this is done by
7114 reload. */
7115 if (cfun->has_nonlocal_label)
7116 for (i = 0; i < 16; i++)
7117 if (!call_really_used_regs[i])
7118 regs_ever_clobbered[i] = 1;
7119
7120 FOR_EACH_BB (cur_bb)
7121 {
7122 FOR_BB_INSNS (cur_bb, cur_insn)
7123 {
7124 if (INSN_P (cur_insn))
7125 note_stores (PATTERN (cur_insn),
7126 s390_reg_clobbered_rtx,
7127 regs_ever_clobbered);
7128 }
7129 }
7130 }
7131
7132 /* Determine the frame area which actually has to be accessed
7133 in the function epilogue. The values are stored at the
7134 given pointers AREA_BOTTOM (address of the lowest used stack
7135 address) and AREA_TOP (address of the first item which does
7136 not belong to the stack frame). */
7137
7138 static void
7139 s390_frame_area (int *area_bottom, int *area_top)
7140 {
7141 int b, t;
7142 int i;
7143
7144 b = INT_MAX;
7145 t = INT_MIN;
7146
7147 if (cfun_frame_layout.first_restore_gpr != -1)
7148 {
7149 b = (cfun_frame_layout.gprs_offset
7150 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7151 t = b + (cfun_frame_layout.last_restore_gpr
7152 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7153 }
7154
7155 if (TARGET_64BIT && cfun_save_high_fprs_p)
7156 {
7157 b = MIN (b, cfun_frame_layout.f8_offset);
7158 t = MAX (t, (cfun_frame_layout.f8_offset
7159 + cfun_frame_layout.high_fprs * 8));
7160 }
7161
7162 if (!TARGET_64BIT)
7163 for (i = 2; i < 4; i++)
7164 if (cfun_fpr_bit_p (i))
7165 {
7166 b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
7167 t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
7168 }
7169
7170 *area_bottom = b;
7171 *area_top = t;
7172 }
7173
7174 /* Fill cfun->machine with info about register usage of current function.
7175 Return in CLOBBERED_REGS which GPRs are currently considered set. */
7176
7177 static void
7178 s390_register_info (int clobbered_regs[])
7179 {
7180 int i, j;
7181
7182 /* fprs 8 - 15 are call saved for 64 Bit ABI. */
7183 cfun_frame_layout.fpr_bitmap = 0;
7184 cfun_frame_layout.high_fprs = 0;
7185 if (TARGET_64BIT)
7186 for (i = 24; i < 32; i++)
7187 if (df_regs_ever_live_p (i) && !global_regs[i])
7188 {
7189 cfun_set_fpr_bit (i - 16);
7190 cfun_frame_layout.high_fprs++;
7191 }
7192
7193 /* Find first and last gpr to be saved. We trust regs_ever_live
7194 data, except that we don't save and restore global registers.
7195
7196 Also, all registers with special meaning to the compiler need
7197 to be handled extra. */
7198
7199 s390_regs_ever_clobbered (clobbered_regs);
7200
7201 for (i = 0; i < 16; i++)
7202 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i] && !fixed_regs[i];
7203
7204 if (frame_pointer_needed)
7205 clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
7206
7207 if (flag_pic)
7208 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7209 |= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7210
7211 clobbered_regs[BASE_REGNUM]
7212 |= (cfun->machine->base_reg
7213 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7214
7215 clobbered_regs[RETURN_REGNUM]
7216 |= (!current_function_is_leaf
7217 || TARGET_TPF_PROFILING
7218 || cfun->machine->split_branches_pending_p
7219 || cfun_frame_layout.save_return_addr_p
7220 || crtl->calls_eh_return
7221 || cfun->stdarg);
7222
7223 clobbered_regs[STACK_POINTER_REGNUM]
7224 |= (!current_function_is_leaf
7225 || TARGET_TPF_PROFILING
7226 || cfun_save_high_fprs_p
7227 || get_frame_size () > 0
7228 || cfun->calls_alloca
7229 || cfun->stdarg);
7230
7231 for (i = 6; i < 16; i++)
7232 if (df_regs_ever_live_p (i) || clobbered_regs[i])
7233 break;
7234 for (j = 15; j > i; j--)
7235 if (df_regs_ever_live_p (j) || clobbered_regs[j])
7236 break;
7237
7238 if (i == 16)
7239 {
7240 /* Nothing to save/restore. */
7241 cfun_frame_layout.first_save_gpr_slot = -1;
7242 cfun_frame_layout.last_save_gpr_slot = -1;
7243 cfun_frame_layout.first_save_gpr = -1;
7244 cfun_frame_layout.first_restore_gpr = -1;
7245 cfun_frame_layout.last_save_gpr = -1;
7246 cfun_frame_layout.last_restore_gpr = -1;
7247 }
7248 else
7249 {
7250 /* Save slots for gprs from i to j. */
7251 cfun_frame_layout.first_save_gpr_slot = i;
7252 cfun_frame_layout.last_save_gpr_slot = j;
7253
7254 for (i = cfun_frame_layout.first_save_gpr_slot;
7255 i < cfun_frame_layout.last_save_gpr_slot + 1;
7256 i++)
7257 if (clobbered_regs[i])
7258 break;
7259
7260 for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
7261 if (clobbered_regs[j])
7262 break;
7263
7264 if (i == cfun_frame_layout.last_save_gpr_slot + 1)
7265 {
7266 /* Nothing to save/restore. */
7267 cfun_frame_layout.first_save_gpr = -1;
7268 cfun_frame_layout.first_restore_gpr = -1;
7269 cfun_frame_layout.last_save_gpr = -1;
7270 cfun_frame_layout.last_restore_gpr = -1;
7271 }
7272 else
7273 {
7274 /* Save / Restore from gpr i to j. */
7275 cfun_frame_layout.first_save_gpr = i;
7276 cfun_frame_layout.first_restore_gpr = i;
7277 cfun_frame_layout.last_save_gpr = j;
7278 cfun_frame_layout.last_restore_gpr = j;
7279 }
7280 }
7281
7282 if (cfun->stdarg)
7283 {
7284 /* Varargs functions need to save gprs 2 to 6. */
7285 if (cfun->va_list_gpr_size
7286 && crtl->args.info.gprs < GP_ARG_NUM_REG)
7287 {
7288 int min_gpr = crtl->args.info.gprs;
7289 int max_gpr = min_gpr + cfun->va_list_gpr_size;
7290 if (max_gpr > GP_ARG_NUM_REG)
7291 max_gpr = GP_ARG_NUM_REG;
7292
7293 if (cfun_frame_layout.first_save_gpr == -1
7294 || cfun_frame_layout.first_save_gpr > 2 + min_gpr)
7295 {
7296 cfun_frame_layout.first_save_gpr = 2 + min_gpr;
7297 cfun_frame_layout.first_save_gpr_slot = 2 + min_gpr;
7298 }
7299
7300 if (cfun_frame_layout.last_save_gpr == -1
7301 || cfun_frame_layout.last_save_gpr < 2 + max_gpr - 1)
7302 {
7303 cfun_frame_layout.last_save_gpr = 2 + max_gpr - 1;
7304 cfun_frame_layout.last_save_gpr_slot = 2 + max_gpr - 1;
7305 }
7306 }
7307
7308 /* Mark f0, f2 for 31 bit and f0-f4 for 64 bit to be saved. */
7309 if (TARGET_HARD_FLOAT && cfun->va_list_fpr_size
7310 && crtl->args.info.fprs < FP_ARG_NUM_REG)
7311 {
7312 int min_fpr = crtl->args.info.fprs;
7313 int max_fpr = min_fpr + cfun->va_list_fpr_size;
7314 if (max_fpr > FP_ARG_NUM_REG)
7315 max_fpr = FP_ARG_NUM_REG;
7316
7317 /* ??? This is currently required to ensure proper location
7318 of the fpr save slots within the va_list save area. */
7319 if (TARGET_PACKED_STACK)
7320 min_fpr = 0;
7321
7322 for (i = min_fpr; i < max_fpr; i++)
7323 cfun_set_fpr_bit (i);
7324 }
7325 }
7326
7327 if (!TARGET_64BIT)
7328 for (i = 2; i < 4; i++)
7329 if (df_regs_ever_live_p (i + 16) && !global_regs[i + 16])
7330 cfun_set_fpr_bit (i);
7331 }
7332
7333 /* Fill cfun->machine with info about frame of current function. */
7334
7335 static void
7336 s390_frame_info (void)
7337 {
7338 int i;
7339
7340 cfun_frame_layout.frame_size = get_frame_size ();
7341 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7342 fatal_error ("total size of local variables exceeds architecture limit");
7343
7344 if (!TARGET_PACKED_STACK)
7345 {
7346 cfun_frame_layout.backchain_offset = 0;
7347 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7348 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7349 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7350 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7351 * UNITS_PER_LONG);
7352 }
7353 else if (TARGET_BACKCHAIN) /* kernel stack layout */
7354 {
7355 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7356 - UNITS_PER_LONG);
7357 cfun_frame_layout.gprs_offset
7358 = (cfun_frame_layout.backchain_offset
7359 - (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
7360 * UNITS_PER_LONG);
7361
7362 if (TARGET_64BIT)
7363 {
7364 cfun_frame_layout.f4_offset
7365 = (cfun_frame_layout.gprs_offset
7366 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7367
7368 cfun_frame_layout.f0_offset
7369 = (cfun_frame_layout.f4_offset
7370 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7371 }
7372 else
7373 {
7374 /* On 31 bit we have to care about alignment of the
7375 floating point regs to provide fastest access. */
7376 cfun_frame_layout.f0_offset
7377 = ((cfun_frame_layout.gprs_offset
7378 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
7379 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7380
7381 cfun_frame_layout.f4_offset
7382 = (cfun_frame_layout.f0_offset
7383 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7384 }
7385 }
7386 else /* no backchain */
7387 {
7388 cfun_frame_layout.f4_offset
7389 = (STACK_POINTER_OFFSET
7390 - 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
7391
7392 cfun_frame_layout.f0_offset
7393 = (cfun_frame_layout.f4_offset
7394 - 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
7395
7396 cfun_frame_layout.gprs_offset
7397 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7398 }
7399
7400 if (current_function_is_leaf
7401 && !TARGET_TPF_PROFILING
7402 && cfun_frame_layout.frame_size == 0
7403 && !cfun_save_high_fprs_p
7404 && !cfun->calls_alloca
7405 && !cfun->stdarg)
7406 return;
7407
7408 if (!TARGET_PACKED_STACK)
7409 cfun_frame_layout.frame_size += (STACK_POINTER_OFFSET
7410 + crtl->outgoing_args_size
7411 + cfun_frame_layout.high_fprs * 8);
7412 else
7413 {
7414 if (TARGET_BACKCHAIN)
7415 cfun_frame_layout.frame_size += UNITS_PER_LONG;
7416
7417 /* No alignment trouble here because f8-f15 are only saved under
7418 64 bit. */
7419 cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
7420 cfun_frame_layout.f4_offset),
7421 cfun_frame_layout.gprs_offset)
7422 - cfun_frame_layout.high_fprs * 8);
7423
7424 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7425
7426 for (i = 0; i < 8; i++)
7427 if (cfun_fpr_bit_p (i))
7428 cfun_frame_layout.frame_size += 8;
7429
7430 cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
7431
7432 /* If under 31 bit an odd number of gprs has to be saved we have to adjust
7433 the frame size to sustain 8 byte alignment of stack frames. */
7434 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7435 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7436 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7437
7438 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7439 }
7440 }
7441
7442 /* Generate frame layout. Fills in register and frame data for the current
7443 function in cfun->machine. This routine can be called multiple times;
7444 it will re-do the complete frame layout every time. */
7445
7446 static void
7447 s390_init_frame_layout (void)
7448 {
7449 HOST_WIDE_INT frame_size;
7450 int base_used;
7451 int clobbered_regs[16];
7452
7453 /* On S/390 machines, we may need to perform branch splitting, which
7454 will require both base and return address register. We have no
7455 choice but to assume we're going to need them until right at the
7456 end of the machine dependent reorg phase. */
7457 if (!TARGET_CPU_ZARCH)
7458 cfun->machine->split_branches_pending_p = true;
7459
7460 do
7461 {
7462 frame_size = cfun_frame_layout.frame_size;
7463
7464 /* Try to predict whether we'll need the base register. */
7465 base_used = cfun->machine->split_branches_pending_p
7466 || crtl->uses_const_pool
7467 || (!DISP_IN_RANGE (frame_size)
7468 && !CONST_OK_FOR_K (frame_size));
7469
7470 /* Decide which register to use as literal pool base. In small
7471 leaf functions, try to use an unused call-clobbered register
7472 as base register to avoid save/restore overhead. */
7473 if (!base_used)
7474 cfun->machine->base_reg = NULL_RTX;
7475 else if (current_function_is_leaf && !df_regs_ever_live_p (5))
7476 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7477 else
7478 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7479
7480 s390_register_info (clobbered_regs);
7481 s390_frame_info ();
7482 }
7483 while (frame_size != cfun_frame_layout.frame_size);
7484 }
7485
7486 /* Update frame layout. Recompute actual register save data based on
7487 current info and update regs_ever_live for the special registers.
7488 May be called multiple times, but may never cause *more* registers
7489 to be saved than s390_init_frame_layout allocated room for. */
7490
7491 static void
7492 s390_update_frame_layout (void)
7493 {
7494 int clobbered_regs[16];
7495
7496 s390_register_info (clobbered_regs);
7497
7498 df_set_regs_ever_live (BASE_REGNUM,
7499 clobbered_regs[BASE_REGNUM] ? true : false);
7500 df_set_regs_ever_live (RETURN_REGNUM,
7501 clobbered_regs[RETURN_REGNUM] ? true : false);
7502 df_set_regs_ever_live (STACK_POINTER_REGNUM,
7503 clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
7504
7505 if (cfun->machine->base_reg)
7506 df_set_regs_ever_live (REGNO (cfun->machine->base_reg), true);
7507 }
7508
7509 /* Return true if it is legal to put a value with MODE into REGNO. */
7510
7511 bool
7512 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
7513 {
7514 switch (REGNO_REG_CLASS (regno))
7515 {
7516 case FP_REGS:
7517 if (REGNO_PAIR_OK (regno, mode))
7518 {
7519 if (mode == SImode || mode == DImode)
7520 return true;
7521
7522 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
7523 return true;
7524 }
7525 break;
7526 case ADDR_REGS:
7527 if (FRAME_REGNO_P (regno) && mode == Pmode)
7528 return true;
7529
7530 /* fallthrough */
7531 case GENERAL_REGS:
7532 if (REGNO_PAIR_OK (regno, mode))
7533 {
7534 if (TARGET_ZARCH
7535 || (mode != TFmode && mode != TCmode && mode != TDmode))
7536 return true;
7537 }
7538 break;
7539 case CC_REGS:
7540 if (GET_MODE_CLASS (mode) == MODE_CC)
7541 return true;
7542 break;
7543 case ACCESS_REGS:
7544 if (REGNO_PAIR_OK (regno, mode))
7545 {
7546 if (mode == SImode || mode == Pmode)
7547 return true;
7548 }
7549 break;
7550 default:
7551 return false;
7552 }
7553
7554 return false;
7555 }
7556
7557 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
7558
7559 bool
7560 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
7561 {
7562 /* Once we've decided upon a register to use as base register, it must
7563 no longer be used for any other purpose. */
7564 if (cfun->machine->base_reg)
7565 if (REGNO (cfun->machine->base_reg) == old_reg
7566 || REGNO (cfun->machine->base_reg) == new_reg)
7567 return false;
7568
7569 return true;
7570 }
7571
7572 /* Maximum number of registers to represent a value of mode MODE
7573 in a register of class RCLASS. */
7574
7575 bool
7576 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
7577 {
7578 switch (rclass)
7579 {
7580 case FP_REGS:
7581 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
7582 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
7583 else
7584 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
7585 case ACCESS_REGS:
7586 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
7587 default:
7588 break;
7589 }
7590 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7591 }
7592
7593 /* Return true if register FROM can be eliminated via register TO. */
7594
7595 static bool
7596 s390_can_eliminate (const int from, const int to)
7597 {
7598 /* On zSeries machines, we have not marked the base register as fixed.
7599 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
7600 If a function requires the base register, we say here that this
7601 elimination cannot be performed. This will cause reload to free
7602 up the base register (as if it were fixed). On the other hand,
7603 if the current function does *not* require the base register, we
7604 say here the elimination succeeds, which in turn allows reload
7605 to allocate the base register for any other purpose. */
7606 if (from == BASE_REGNUM && to == BASE_REGNUM)
7607 {
7608 if (TARGET_CPU_ZARCH)
7609 {
7610 s390_init_frame_layout ();
7611 return cfun->machine->base_reg == NULL_RTX;
7612 }
7613
7614 return false;
7615 }
7616
7617 /* Everything else must point into the stack frame. */
7618 gcc_assert (to == STACK_POINTER_REGNUM
7619 || to == HARD_FRAME_POINTER_REGNUM);
7620
7621 gcc_assert (from == FRAME_POINTER_REGNUM
7622 || from == ARG_POINTER_REGNUM
7623 || from == RETURN_ADDRESS_POINTER_REGNUM);
7624
7625 /* Make sure we actually saved the return address. */
7626 if (from == RETURN_ADDRESS_POINTER_REGNUM)
7627 if (!crtl->calls_eh_return
7628 && !cfun->stdarg
7629 && !cfun_frame_layout.save_return_addr_p)
7630 return false;
7631
7632 return true;
7633 }
7634
7635 /* Return offset between register FROM and TO initially after prolog. */
7636
7637 HOST_WIDE_INT
7638 s390_initial_elimination_offset (int from, int to)
7639 {
7640 HOST_WIDE_INT offset;
7641 int index;
7642
7643 /* ??? Why are we called for non-eliminable pairs? */
7644 if (!s390_can_eliminate (from, to))
7645 return 0;
7646
7647 switch (from)
7648 {
7649 case FRAME_POINTER_REGNUM:
7650 offset = (get_frame_size()
7651 + STACK_POINTER_OFFSET
7652 + crtl->outgoing_args_size);
7653 break;
7654
7655 case ARG_POINTER_REGNUM:
7656 s390_init_frame_layout ();
7657 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
7658 break;
7659
7660 case RETURN_ADDRESS_POINTER_REGNUM:
7661 s390_init_frame_layout ();
7662 index = RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot;
7663 gcc_assert (index >= 0);
7664 offset = cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset;
7665 offset += index * UNITS_PER_LONG;
7666 break;
7667
7668 case BASE_REGNUM:
7669 offset = 0;
7670 break;
7671
7672 default:
7673 gcc_unreachable ();
7674 }
7675
7676 return offset;
7677 }
7678
7679 /* Emit insn to save fpr REGNUM at offset OFFSET relative
7680 to register BASE. Return generated insn. */
7681
7682 static rtx
7683 save_fpr (rtx base, int offset, int regnum)
7684 {
7685 rtx addr;
7686 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7687
7688 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
7689 set_mem_alias_set (addr, get_varargs_alias_set ());
7690 else
7691 set_mem_alias_set (addr, get_frame_alias_set ());
7692
7693 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
7694 }
7695
7696 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
7697 to register BASE. Return generated insn. */
7698
7699 static rtx
7700 restore_fpr (rtx base, int offset, int regnum)
7701 {
7702 rtx addr;
7703 addr = gen_rtx_MEM (DFmode, plus_constant (base, offset));
7704 set_mem_alias_set (addr, get_frame_alias_set ());
7705
7706 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
7707 }
7708
7709 /* Return true if REGNO is a global register, but not one
7710 of the special ones that need to be saved/restored in anyway. */
7711
7712 static inline bool
7713 global_not_special_regno_p (int regno)
7714 {
7715 return (global_regs[regno]
7716 /* These registers are special and need to be
7717 restored in any case. */
7718 && !(regno == STACK_POINTER_REGNUM
7719 || regno == RETURN_REGNUM
7720 || regno == BASE_REGNUM
7721 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
7722 }
7723
7724 /* Generate insn to save registers FIRST to LAST into
7725 the register save area located at offset OFFSET
7726 relative to register BASE. */
7727
7728 static rtx
7729 save_gprs (rtx base, int offset, int first, int last)
7730 {
7731 rtx addr, insn, note;
7732 int i;
7733
7734 addr = plus_constant (base, offset);
7735 addr = gen_rtx_MEM (Pmode, addr);
7736
7737 set_mem_alias_set (addr, get_frame_alias_set ());
7738
7739 /* Special-case single register. */
7740 if (first == last)
7741 {
7742 if (TARGET_64BIT)
7743 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
7744 else
7745 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
7746
7747 if (!global_not_special_regno_p (first))
7748 RTX_FRAME_RELATED_P (insn) = 1;
7749 return insn;
7750 }
7751
7752
7753 insn = gen_store_multiple (addr,
7754 gen_rtx_REG (Pmode, first),
7755 GEN_INT (last - first + 1));
7756
7757 if (first <= 6 && cfun->stdarg)
7758 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7759 {
7760 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
7761
7762 if (first + i <= 6)
7763 set_mem_alias_set (mem, get_varargs_alias_set ());
7764 }
7765
7766 /* We need to set the FRAME_RELATED flag on all SETs
7767 inside the store-multiple pattern.
7768
7769 However, we must not emit DWARF records for registers 2..5
7770 if they are stored for use by variable arguments ...
7771
7772 ??? Unfortunately, it is not enough to simply not the
7773 FRAME_RELATED flags for those SETs, because the first SET
7774 of the PARALLEL is always treated as if it had the flag
7775 set, even if it does not. Therefore we emit a new pattern
7776 without those registers as REG_FRAME_RELATED_EXPR note. */
7777
7778 if (first >= 6 && !global_not_special_regno_p (first))
7779 {
7780 rtx pat = PATTERN (insn);
7781
7782 for (i = 0; i < XVECLEN (pat, 0); i++)
7783 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
7784 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
7785 0, i)))))
7786 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
7787
7788 RTX_FRAME_RELATED_P (insn) = 1;
7789 }
7790 else if (last >= 6)
7791 {
7792 int start;
7793
7794 for (start = first >= 6 ? first : 6; start <= last; start++)
7795 if (!global_not_special_regno_p (start))
7796 break;
7797
7798 if (start > last)
7799 return insn;
7800
7801 addr = plus_constant (base, offset + (start - first) * UNITS_PER_LONG);
7802 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
7803 gen_rtx_REG (Pmode, start),
7804 GEN_INT (last - start + 1));
7805 note = PATTERN (note);
7806
7807 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
7808
7809 for (i = 0; i < XVECLEN (note, 0); i++)
7810 if (GET_CODE (XVECEXP (note, 0, i)) == SET
7811 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
7812 0, i)))))
7813 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
7814
7815 RTX_FRAME_RELATED_P (insn) = 1;
7816 }
7817
7818 return insn;
7819 }
7820
7821 /* Generate insn to restore registers FIRST to LAST from
7822 the register save area located at offset OFFSET
7823 relative to register BASE. */
7824
7825 static rtx
7826 restore_gprs (rtx base, int offset, int first, int last)
7827 {
7828 rtx addr, insn;
7829
7830 addr = plus_constant (base, offset);
7831 addr = gen_rtx_MEM (Pmode, addr);
7832 set_mem_alias_set (addr, get_frame_alias_set ());
7833
7834 /* Special-case single register. */
7835 if (first == last)
7836 {
7837 if (TARGET_64BIT)
7838 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
7839 else
7840 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
7841
7842 return insn;
7843 }
7844
7845 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
7846 addr,
7847 GEN_INT (last - first + 1));
7848 return insn;
7849 }
7850
7851 /* Return insn sequence to load the GOT register. */
7852
7853 static GTY(()) rtx got_symbol;
7854 rtx
7855 s390_load_got (void)
7856 {
7857 rtx insns;
7858
7859 if (!got_symbol)
7860 {
7861 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7862 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
7863 }
7864
7865 start_sequence ();
7866
7867 if (TARGET_CPU_ZARCH)
7868 {
7869 emit_move_insn (pic_offset_table_rtx, got_symbol);
7870 }
7871 else
7872 {
7873 rtx offset;
7874
7875 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
7876 UNSPEC_LTREL_OFFSET);
7877 offset = gen_rtx_CONST (Pmode, offset);
7878 offset = force_const_mem (Pmode, offset);
7879
7880 emit_move_insn (pic_offset_table_rtx, offset);
7881
7882 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
7883 UNSPEC_LTREL_BASE);
7884 offset = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, offset);
7885
7886 emit_move_insn (pic_offset_table_rtx, offset);
7887 }
7888
7889 insns = get_insns ();
7890 end_sequence ();
7891 return insns;
7892 }
7893
7894 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
7895 and the change to the stack pointer. */
7896
7897 static void
7898 s390_emit_stack_tie (void)
7899 {
7900 rtx mem = gen_frame_mem (BLKmode,
7901 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
7902
7903 emit_insn (gen_stack_tie (mem));
7904 }
7905
7906 /* Expand the prologue into a bunch of separate insns. */
7907
7908 void
7909 s390_emit_prologue (void)
7910 {
7911 rtx insn, addr;
7912 rtx temp_reg;
7913 int i;
7914 int offset;
7915 int next_fpr = 0;
7916
7917 /* Complete frame layout. */
7918
7919 s390_update_frame_layout ();
7920
7921 /* Annotate all constant pool references to let the scheduler know
7922 they implicitly use the base register. */
7923
7924 push_topmost_sequence ();
7925
7926 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7927 if (INSN_P (insn))
7928 {
7929 annotate_constant_pool_refs (&PATTERN (insn));
7930 df_insn_rescan (insn);
7931 }
7932
7933 pop_topmost_sequence ();
7934
7935 /* Choose best register to use for temp use within prologue.
7936 See below for why TPF must use the register 1. */
7937
7938 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
7939 && !current_function_is_leaf
7940 && !TARGET_TPF_PROFILING)
7941 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7942 else
7943 temp_reg = gen_rtx_REG (Pmode, 1);
7944
7945 /* Save call saved gprs. */
7946 if (cfun_frame_layout.first_save_gpr != -1)
7947 {
7948 insn = save_gprs (stack_pointer_rtx,
7949 cfun_frame_layout.gprs_offset +
7950 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
7951 - cfun_frame_layout.first_save_gpr_slot),
7952 cfun_frame_layout.first_save_gpr,
7953 cfun_frame_layout.last_save_gpr);
7954 emit_insn (insn);
7955 }
7956
7957 /* Dummy insn to mark literal pool slot. */
7958
7959 if (cfun->machine->base_reg)
7960 emit_insn (gen_main_pool (cfun->machine->base_reg));
7961
7962 offset = cfun_frame_layout.f0_offset;
7963
7964 /* Save f0 and f2. */
7965 for (i = 0; i < 2; i++)
7966 {
7967 if (cfun_fpr_bit_p (i))
7968 {
7969 save_fpr (stack_pointer_rtx, offset, i + 16);
7970 offset += 8;
7971 }
7972 else if (!TARGET_PACKED_STACK)
7973 offset += 8;
7974 }
7975
7976 /* Save f4 and f6. */
7977 offset = cfun_frame_layout.f4_offset;
7978 for (i = 2; i < 4; i++)
7979 {
7980 if (cfun_fpr_bit_p (i))
7981 {
7982 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
7983 offset += 8;
7984
7985 /* If f4 and f6 are call clobbered they are saved due to stdargs and
7986 therefore are not frame related. */
7987 if (!call_really_used_regs[i + 16])
7988 RTX_FRAME_RELATED_P (insn) = 1;
7989 }
7990 else if (!TARGET_PACKED_STACK)
7991 offset += 8;
7992 }
7993
7994 if (TARGET_PACKED_STACK
7995 && cfun_save_high_fprs_p
7996 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
7997 {
7998 offset = (cfun_frame_layout.f8_offset
7999 + (cfun_frame_layout.high_fprs - 1) * 8);
8000
8001 for (i = 15; i > 7 && offset >= 0; i--)
8002 if (cfun_fpr_bit_p (i))
8003 {
8004 insn = save_fpr (stack_pointer_rtx, offset, i + 16);
8005
8006 RTX_FRAME_RELATED_P (insn) = 1;
8007 offset -= 8;
8008 }
8009 if (offset >= cfun_frame_layout.f8_offset)
8010 next_fpr = i + 16;
8011 }
8012
8013 if (!TARGET_PACKED_STACK)
8014 next_fpr = cfun_save_high_fprs_p ? 31 : 0;
8015
8016 if (flag_stack_usage_info)
8017 current_function_static_stack_size = cfun_frame_layout.frame_size;
8018
8019 /* Decrement stack pointer. */
8020
8021 if (cfun_frame_layout.frame_size > 0)
8022 {
8023 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8024 rtx real_frame_off;
8025
8026 if (s390_stack_size)
8027 {
8028 HOST_WIDE_INT stack_guard;
8029
8030 if (s390_stack_guard)
8031 stack_guard = s390_stack_guard;
8032 else
8033 {
8034 /* If no value for stack guard is provided the smallest power of 2
8035 larger than the current frame size is chosen. */
8036 stack_guard = 1;
8037 while (stack_guard < cfun_frame_layout.frame_size)
8038 stack_guard <<= 1;
8039 }
8040
8041 if (cfun_frame_layout.frame_size >= s390_stack_size)
8042 {
8043 warning (0, "frame size of function %qs is %wd"
8044 " bytes exceeding user provided stack limit of "
8045 "%d bytes. "
8046 "An unconditional trap is added.",
8047 current_function_name(), cfun_frame_layout.frame_size,
8048 s390_stack_size);
8049 emit_insn (gen_trap ());
8050 }
8051 else
8052 {
8053 /* stack_guard has to be smaller than s390_stack_size.
8054 Otherwise we would emit an AND with zero which would
8055 not match the test under mask pattern. */
8056 if (stack_guard >= s390_stack_size)
8057 {
8058 warning (0, "frame size of function %qs is %wd"
8059 " bytes which is more than half the stack size. "
8060 "The dynamic check would not be reliable. "
8061 "No check emitted for this function.",
8062 current_function_name(),
8063 cfun_frame_layout.frame_size);
8064 }
8065 else
8066 {
8067 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8068 & ~(stack_guard - 1));
8069
8070 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8071 GEN_INT (stack_check_mask));
8072 if (TARGET_64BIT)
8073 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8074 t, const0_rtx),
8075 t, const0_rtx, const0_rtx));
8076 else
8077 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8078 t, const0_rtx),
8079 t, const0_rtx, const0_rtx));
8080 }
8081 }
8082 }
8083
8084 if (s390_warn_framesize > 0
8085 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8086 warning (0, "frame size of %qs is %wd bytes",
8087 current_function_name (), cfun_frame_layout.frame_size);
8088
8089 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8090 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8091
8092 /* Save incoming stack pointer into temp reg. */
8093 if (TARGET_BACKCHAIN || next_fpr)
8094 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8095
8096 /* Subtract frame size from stack pointer. */
8097
8098 if (DISP_IN_RANGE (INTVAL (frame_off)))
8099 {
8100 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8101 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8102 frame_off));
8103 insn = emit_insn (insn);
8104 }
8105 else
8106 {
8107 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8108 frame_off = force_const_mem (Pmode, frame_off);
8109
8110 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8111 annotate_constant_pool_refs (&PATTERN (insn));
8112 }
8113
8114 RTX_FRAME_RELATED_P (insn) = 1;
8115 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8116 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8117 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8118 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8119 real_frame_off)));
8120
8121 /* Set backchain. */
8122
8123 if (TARGET_BACKCHAIN)
8124 {
8125 if (cfun_frame_layout.backchain_offset)
8126 addr = gen_rtx_MEM (Pmode,
8127 plus_constant (stack_pointer_rtx,
8128 cfun_frame_layout.backchain_offset));
8129 else
8130 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8131 set_mem_alias_set (addr, get_frame_alias_set ());
8132 insn = emit_insn (gen_move_insn (addr, temp_reg));
8133 }
8134
8135 /* If we support non-call exceptions (e.g. for Java),
8136 we need to make sure the backchain pointer is set up
8137 before any possibly trapping memory access. */
8138 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8139 {
8140 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8141 emit_clobber (addr);
8142 }
8143 }
8144
8145 /* Save fprs 8 - 15 (64 bit ABI). */
8146
8147 if (cfun_save_high_fprs_p && next_fpr)
8148 {
8149 /* If the stack might be accessed through a different register
8150 we have to make sure that the stack pointer decrement is not
8151 moved below the use of the stack slots. */
8152 s390_emit_stack_tie ();
8153
8154 insn = emit_insn (gen_add2_insn (temp_reg,
8155 GEN_INT (cfun_frame_layout.f8_offset)));
8156
8157 offset = 0;
8158
8159 for (i = 24; i <= next_fpr; i++)
8160 if (cfun_fpr_bit_p (i - 16))
8161 {
8162 rtx addr = plus_constant (stack_pointer_rtx,
8163 cfun_frame_layout.frame_size
8164 + cfun_frame_layout.f8_offset
8165 + offset);
8166
8167 insn = save_fpr (temp_reg, offset, i);
8168 offset += 8;
8169 RTX_FRAME_RELATED_P (insn) = 1;
8170 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8171 gen_rtx_SET (VOIDmode,
8172 gen_rtx_MEM (DFmode, addr),
8173 gen_rtx_REG (DFmode, i)));
8174 }
8175 }
8176
8177 /* Set frame pointer, if needed. */
8178
8179 if (frame_pointer_needed)
8180 {
8181 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8182 RTX_FRAME_RELATED_P (insn) = 1;
8183 }
8184
8185 /* Set up got pointer, if needed. */
8186
8187 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8188 {
8189 rtx insns = s390_load_got ();
8190
8191 for (insn = insns; insn; insn = NEXT_INSN (insn))
8192 annotate_constant_pool_refs (&PATTERN (insn));
8193
8194 emit_insn (insns);
8195 }
8196
8197 if (TARGET_TPF_PROFILING)
8198 {
8199 /* Generate a BAS instruction to serve as a function
8200 entry intercept to facilitate the use of tracing
8201 algorithms located at the branch target. */
8202 emit_insn (gen_prologue_tpf ());
8203
8204 /* Emit a blockage here so that all code
8205 lies between the profiling mechanisms. */
8206 emit_insn (gen_blockage ());
8207 }
8208 }
8209
8210 /* Expand the epilogue into a bunch of separate insns. */
8211
8212 void
8213 s390_emit_epilogue (bool sibcall)
8214 {
8215 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8216 int area_bottom, area_top, offset = 0;
8217 int next_offset;
8218 rtvec p;
8219 int i;
8220
8221 if (TARGET_TPF_PROFILING)
8222 {
8223
8224 /* Generate a BAS instruction to serve as a function
8225 entry intercept to facilitate the use of tracing
8226 algorithms located at the branch target. */
8227
8228 /* Emit a blockage here so that all code
8229 lies between the profiling mechanisms. */
8230 emit_insn (gen_blockage ());
8231
8232 emit_insn (gen_epilogue_tpf ());
8233 }
8234
8235 /* Check whether to use frame or stack pointer for restore. */
8236
8237 frame_pointer = (frame_pointer_needed
8238 ? hard_frame_pointer_rtx : stack_pointer_rtx);
8239
8240 s390_frame_area (&area_bottom, &area_top);
8241
8242 /* Check whether we can access the register save area.
8243 If not, increment the frame pointer as required. */
8244
8245 if (area_top <= area_bottom)
8246 {
8247 /* Nothing to restore. */
8248 }
8249 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
8250 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8251 {
8252 /* Area is in range. */
8253 offset = cfun_frame_layout.frame_size;
8254 }
8255 else
8256 {
8257 rtx insn, frame_off, cfa;
8258
8259 offset = area_bottom < 0 ? -area_bottom : 0;
8260 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
8261
8262 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
8263 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8264 if (DISP_IN_RANGE (INTVAL (frame_off)))
8265 {
8266 insn = gen_rtx_SET (VOIDmode, frame_pointer,
8267 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
8268 insn = emit_insn (insn);
8269 }
8270 else
8271 {
8272 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8273 frame_off = force_const_mem (Pmode, frame_off);
8274
8275 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
8276 annotate_constant_pool_refs (&PATTERN (insn));
8277 }
8278 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
8279 RTX_FRAME_RELATED_P (insn) = 1;
8280 }
8281
8282 /* Restore call saved fprs. */
8283
8284 if (TARGET_64BIT)
8285 {
8286 if (cfun_save_high_fprs_p)
8287 {
8288 next_offset = cfun_frame_layout.f8_offset;
8289 for (i = 24; i < 32; i++)
8290 {
8291 if (cfun_fpr_bit_p (i - 16))
8292 {
8293 restore_fpr (frame_pointer,
8294 offset + next_offset, i);
8295 cfa_restores
8296 = alloc_reg_note (REG_CFA_RESTORE,
8297 gen_rtx_REG (DFmode, i), cfa_restores);
8298 next_offset += 8;
8299 }
8300 }
8301 }
8302
8303 }
8304 else
8305 {
8306 next_offset = cfun_frame_layout.f4_offset;
8307 for (i = 18; i < 20; i++)
8308 {
8309 if (cfun_fpr_bit_p (i - 16))
8310 {
8311 restore_fpr (frame_pointer,
8312 offset + next_offset, i);
8313 cfa_restores
8314 = alloc_reg_note (REG_CFA_RESTORE,
8315 gen_rtx_REG (DFmode, i), cfa_restores);
8316 next_offset += 8;
8317 }
8318 else if (!TARGET_PACKED_STACK)
8319 next_offset += 8;
8320 }
8321
8322 }
8323
8324 /* Return register. */
8325
8326 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8327
8328 /* Restore call saved gprs. */
8329
8330 if (cfun_frame_layout.first_restore_gpr != -1)
8331 {
8332 rtx insn, addr;
8333 int i;
8334
8335 /* Check for global register and save them
8336 to stack location from where they get restored. */
8337
8338 for (i = cfun_frame_layout.first_restore_gpr;
8339 i <= cfun_frame_layout.last_restore_gpr;
8340 i++)
8341 {
8342 if (global_not_special_regno_p (i))
8343 {
8344 addr = plus_constant (frame_pointer,
8345 offset + cfun_frame_layout.gprs_offset
8346 + (i - cfun_frame_layout.first_save_gpr_slot)
8347 * UNITS_PER_LONG);
8348 addr = gen_rtx_MEM (Pmode, addr);
8349 set_mem_alias_set (addr, get_frame_alias_set ());
8350 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
8351 }
8352 else
8353 cfa_restores
8354 = alloc_reg_note (REG_CFA_RESTORE,
8355 gen_rtx_REG (Pmode, i), cfa_restores);
8356 }
8357
8358 if (! sibcall)
8359 {
8360 /* Fetch return address from stack before load multiple,
8361 this will do good for scheduling. */
8362
8363 if (cfun_frame_layout.save_return_addr_p
8364 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
8365 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
8366 {
8367 int return_regnum = find_unused_clobbered_reg();
8368 if (!return_regnum)
8369 return_regnum = 4;
8370 return_reg = gen_rtx_REG (Pmode, return_regnum);
8371
8372 addr = plus_constant (frame_pointer,
8373 offset + cfun_frame_layout.gprs_offset
8374 + (RETURN_REGNUM
8375 - cfun_frame_layout.first_save_gpr_slot)
8376 * UNITS_PER_LONG);
8377 addr = gen_rtx_MEM (Pmode, addr);
8378 set_mem_alias_set (addr, get_frame_alias_set ());
8379 emit_move_insn (return_reg, addr);
8380 }
8381 }
8382
8383 insn = restore_gprs (frame_pointer,
8384 offset + cfun_frame_layout.gprs_offset
8385 + (cfun_frame_layout.first_restore_gpr
8386 - cfun_frame_layout.first_save_gpr_slot)
8387 * UNITS_PER_LONG,
8388 cfun_frame_layout.first_restore_gpr,
8389 cfun_frame_layout.last_restore_gpr);
8390 insn = emit_insn (insn);
8391 REG_NOTES (insn) = cfa_restores;
8392 add_reg_note (insn, REG_CFA_DEF_CFA,
8393 plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET));
8394 RTX_FRAME_RELATED_P (insn) = 1;
8395 }
8396
8397 if (! sibcall)
8398 {
8399
8400 /* Return to caller. */
8401
8402 p = rtvec_alloc (2);
8403
8404 RTVEC_ELT (p, 0) = ret_rtx;
8405 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
8406 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
8407 }
8408 }
8409
8410
8411 /* Return the size in bytes of a function argument of
8412 type TYPE and/or mode MODE. At least one of TYPE or
8413 MODE must be specified. */
8414
8415 static int
8416 s390_function_arg_size (enum machine_mode mode, const_tree type)
8417 {
8418 if (type)
8419 return int_size_in_bytes (type);
8420
8421 /* No type info available for some library calls ... */
8422 if (mode != BLKmode)
8423 return GET_MODE_SIZE (mode);
8424
8425 /* If we have neither type nor mode, abort */
8426 gcc_unreachable ();
8427 }
8428
8429 /* Return true if a function argument of type TYPE and mode MODE
8430 is to be passed in a floating-point register, if available. */
8431
8432 static bool
8433 s390_function_arg_float (enum machine_mode mode, const_tree type)
8434 {
8435 int size = s390_function_arg_size (mode, type);
8436 if (size > 8)
8437 return false;
8438
8439 /* Soft-float changes the ABI: no floating-point registers are used. */
8440 if (TARGET_SOFT_FLOAT)
8441 return false;
8442
8443 /* No type info available for some library calls ... */
8444 if (!type)
8445 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
8446
8447 /* The ABI says that record types with a single member are treated
8448 just like that member would be. */
8449 while (TREE_CODE (type) == RECORD_TYPE)
8450 {
8451 tree field, single = NULL_TREE;
8452
8453 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
8454 {
8455 if (TREE_CODE (field) != FIELD_DECL)
8456 continue;
8457
8458 if (single == NULL_TREE)
8459 single = TREE_TYPE (field);
8460 else
8461 return false;
8462 }
8463
8464 if (single == NULL_TREE)
8465 return false;
8466 else
8467 type = single;
8468 }
8469
8470 return TREE_CODE (type) == REAL_TYPE;
8471 }
8472
8473 /* Return true if a function argument of type TYPE and mode MODE
8474 is to be passed in an integer register, or a pair of integer
8475 registers, if available. */
8476
8477 static bool
8478 s390_function_arg_integer (enum machine_mode mode, const_tree type)
8479 {
8480 int size = s390_function_arg_size (mode, type);
8481 if (size > 8)
8482 return false;
8483
8484 /* No type info available for some library calls ... */
8485 if (!type)
8486 return GET_MODE_CLASS (mode) == MODE_INT
8487 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
8488
8489 /* We accept small integral (and similar) types. */
8490 if (INTEGRAL_TYPE_P (type)
8491 || POINTER_TYPE_P (type)
8492 || TREE_CODE (type) == NULLPTR_TYPE
8493 || TREE_CODE (type) == OFFSET_TYPE
8494 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
8495 return true;
8496
8497 /* We also accept structs of size 1, 2, 4, 8 that are not
8498 passed in floating-point registers. */
8499 if (AGGREGATE_TYPE_P (type)
8500 && exact_log2 (size) >= 0
8501 && !s390_function_arg_float (mode, type))
8502 return true;
8503
8504 return false;
8505 }
8506
8507 /* Return 1 if a function argument of type TYPE and mode MODE
8508 is to be passed by reference. The ABI specifies that only
8509 structures of size 1, 2, 4, or 8 bytes are passed by value,
8510 all other structures (and complex numbers) are passed by
8511 reference. */
8512
8513 static bool
8514 s390_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
8515 enum machine_mode mode, const_tree type,
8516 bool named ATTRIBUTE_UNUSED)
8517 {
8518 int size = s390_function_arg_size (mode, type);
8519 if (size > 8)
8520 return true;
8521
8522 if (type)
8523 {
8524 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
8525 return 1;
8526
8527 if (TREE_CODE (type) == COMPLEX_TYPE
8528 || TREE_CODE (type) == VECTOR_TYPE)
8529 return 1;
8530 }
8531
8532 return 0;
8533 }
8534
8535 /* Update the data in CUM to advance over an argument of mode MODE and
8536 data type TYPE. (TYPE is null for libcalls where that information
8537 may not be available.). The boolean NAMED specifies whether the
8538 argument is a named argument (as opposed to an unnamed argument
8539 matching an ellipsis). */
8540
8541 static void
8542 s390_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8543 const_tree type, bool named ATTRIBUTE_UNUSED)
8544 {
8545 if (s390_function_arg_float (mode, type))
8546 {
8547 cum->fprs += 1;
8548 }
8549 else if (s390_function_arg_integer (mode, type))
8550 {
8551 int size = s390_function_arg_size (mode, type);
8552 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
8553 }
8554 else
8555 gcc_unreachable ();
8556 }
8557
8558 /* Define where to put the arguments to a function.
8559 Value is zero to push the argument on the stack,
8560 or a hard register in which to store the argument.
8561
8562 MODE is the argument's machine mode.
8563 TYPE is the data type of the argument (as a tree).
8564 This is null for libcalls where that information may
8565 not be available.
8566 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8567 the preceding args and about the function being called.
8568 NAMED is nonzero if this argument is a named parameter
8569 (otherwise it is an extra parameter matching an ellipsis).
8570
8571 On S/390, we use general purpose registers 2 through 6 to
8572 pass integer, pointer, and certain structure arguments, and
8573 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
8574 to pass floating point arguments. All remaining arguments
8575 are pushed to the stack. */
8576
8577 static rtx
8578 s390_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8579 const_tree type, bool named ATTRIBUTE_UNUSED)
8580 {
8581 if (s390_function_arg_float (mode, type))
8582 {
8583 if (cum->fprs + 1 > FP_ARG_NUM_REG)
8584 return 0;
8585 else
8586 return gen_rtx_REG (mode, cum->fprs + 16);
8587 }
8588 else if (s390_function_arg_integer (mode, type))
8589 {
8590 int size = s390_function_arg_size (mode, type);
8591 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8592
8593 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
8594 return 0;
8595 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
8596 return gen_rtx_REG (mode, cum->gprs + 2);
8597 else if (n_gprs == 2)
8598 {
8599 rtvec p = rtvec_alloc (2);
8600
8601 RTVEC_ELT (p, 0)
8602 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
8603 const0_rtx);
8604 RTVEC_ELT (p, 1)
8605 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
8606 GEN_INT (4));
8607
8608 return gen_rtx_PARALLEL (mode, p);
8609 }
8610 }
8611
8612 /* After the real arguments, expand_call calls us once again
8613 with a void_type_node type. Whatever we return here is
8614 passed as operand 2 to the call expanders.
8615
8616 We don't need this feature ... */
8617 else if (type == void_type_node)
8618 return const0_rtx;
8619
8620 gcc_unreachable ();
8621 }
8622
8623 /* Return true if return values of type TYPE should be returned
8624 in a memory buffer whose address is passed by the caller as
8625 hidden first argument. */
8626
8627 static bool
8628 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
8629 {
8630 /* We accept small integral (and similar) types. */
8631 if (INTEGRAL_TYPE_P (type)
8632 || POINTER_TYPE_P (type)
8633 || TREE_CODE (type) == OFFSET_TYPE
8634 || TREE_CODE (type) == REAL_TYPE)
8635 return int_size_in_bytes (type) > 8;
8636
8637 /* Aggregates and similar constructs are always returned
8638 in memory. */
8639 if (AGGREGATE_TYPE_P (type)
8640 || TREE_CODE (type) == COMPLEX_TYPE
8641 || TREE_CODE (type) == VECTOR_TYPE)
8642 return true;
8643
8644 /* ??? We get called on all sorts of random stuff from
8645 aggregate_value_p. We can't abort, but it's not clear
8646 what's safe to return. Pretend it's a struct I guess. */
8647 return true;
8648 }
8649
8650 /* Function arguments and return values are promoted to word size. */
8651
8652 static enum machine_mode
8653 s390_promote_function_mode (const_tree type, enum machine_mode mode,
8654 int *punsignedp,
8655 const_tree fntype ATTRIBUTE_UNUSED,
8656 int for_return ATTRIBUTE_UNUSED)
8657 {
8658 if (INTEGRAL_MODE_P (mode)
8659 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
8660 {
8661 if (type != NULL_TREE && POINTER_TYPE_P (type))
8662 *punsignedp = POINTERS_EXTEND_UNSIGNED;
8663 return Pmode;
8664 }
8665
8666 return mode;
8667 }
8668
8669 /* Define where to return a (scalar) value of type RET_TYPE.
8670 If RET_TYPE is null, define where to return a (scalar)
8671 value of mode MODE from a libcall. */
8672
8673 static rtx
8674 s390_function_and_libcall_value (enum machine_mode mode,
8675 const_tree ret_type,
8676 const_tree fntype_or_decl,
8677 bool outgoing ATTRIBUTE_UNUSED)
8678 {
8679 /* For normal functions perform the promotion as
8680 promote_function_mode would do. */
8681 if (ret_type)
8682 {
8683 int unsignedp = TYPE_UNSIGNED (ret_type);
8684 mode = promote_function_mode (ret_type, mode, &unsignedp,
8685 fntype_or_decl, 1);
8686 }
8687
8688 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
8689 gcc_assert (GET_MODE_SIZE (mode) <= 8);
8690
8691 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
8692 return gen_rtx_REG (mode, 16);
8693 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
8694 || UNITS_PER_LONG == UNITS_PER_WORD)
8695 return gen_rtx_REG (mode, 2);
8696 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
8697 {
8698 /* This case is triggered when returning a 64 bit value with
8699 -m31 -mzarch. Although the value would fit into a single
8700 register it has to be forced into a 32 bit register pair in
8701 order to match the ABI. */
8702 rtvec p = rtvec_alloc (2);
8703
8704 RTVEC_ELT (p, 0)
8705 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
8706 RTVEC_ELT (p, 1)
8707 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
8708
8709 return gen_rtx_PARALLEL (mode, p);
8710 }
8711
8712 gcc_unreachable ();
8713 }
8714
8715 /* Define where to return a scalar return value of type RET_TYPE. */
8716
8717 static rtx
8718 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
8719 bool outgoing)
8720 {
8721 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
8722 fn_decl_or_type, outgoing);
8723 }
8724
8725 /* Define where to return a scalar libcall return value of mode
8726 MODE. */
8727
8728 static rtx
8729 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
8730 {
8731 return s390_function_and_libcall_value (mode, NULL_TREE,
8732 NULL_TREE, true);
8733 }
8734
8735
8736 /* Create and return the va_list datatype.
8737
8738 On S/390, va_list is an array type equivalent to
8739
8740 typedef struct __va_list_tag
8741 {
8742 long __gpr;
8743 long __fpr;
8744 void *__overflow_arg_area;
8745 void *__reg_save_area;
8746 } va_list[1];
8747
8748 where __gpr and __fpr hold the number of general purpose
8749 or floating point arguments used up to now, respectively,
8750 __overflow_arg_area points to the stack location of the
8751 next argument passed on the stack, and __reg_save_area
8752 always points to the start of the register area in the
8753 call frame of the current function. The function prologue
8754 saves all registers used for argument passing into this
8755 area if the function uses variable arguments. */
8756
8757 static tree
8758 s390_build_builtin_va_list (void)
8759 {
8760 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
8761
8762 record = lang_hooks.types.make_type (RECORD_TYPE);
8763
8764 type_decl =
8765 build_decl (BUILTINS_LOCATION,
8766 TYPE_DECL, get_identifier ("__va_list_tag"), record);
8767
8768 f_gpr = build_decl (BUILTINS_LOCATION,
8769 FIELD_DECL, get_identifier ("__gpr"),
8770 long_integer_type_node);
8771 f_fpr = build_decl (BUILTINS_LOCATION,
8772 FIELD_DECL, get_identifier ("__fpr"),
8773 long_integer_type_node);
8774 f_ovf = build_decl (BUILTINS_LOCATION,
8775 FIELD_DECL, get_identifier ("__overflow_arg_area"),
8776 ptr_type_node);
8777 f_sav = build_decl (BUILTINS_LOCATION,
8778 FIELD_DECL, get_identifier ("__reg_save_area"),
8779 ptr_type_node);
8780
8781 va_list_gpr_counter_field = f_gpr;
8782 va_list_fpr_counter_field = f_fpr;
8783
8784 DECL_FIELD_CONTEXT (f_gpr) = record;
8785 DECL_FIELD_CONTEXT (f_fpr) = record;
8786 DECL_FIELD_CONTEXT (f_ovf) = record;
8787 DECL_FIELD_CONTEXT (f_sav) = record;
8788
8789 TYPE_STUB_DECL (record) = type_decl;
8790 TYPE_NAME (record) = type_decl;
8791 TYPE_FIELDS (record) = f_gpr;
8792 DECL_CHAIN (f_gpr) = f_fpr;
8793 DECL_CHAIN (f_fpr) = f_ovf;
8794 DECL_CHAIN (f_ovf) = f_sav;
8795
8796 layout_type (record);
8797
8798 /* The correct type is an array type of one element. */
8799 return build_array_type (record, build_index_type (size_zero_node));
8800 }
8801
8802 /* Implement va_start by filling the va_list structure VALIST.
8803 STDARG_P is always true, and ignored.
8804 NEXTARG points to the first anonymous stack argument.
8805
8806 The following global variables are used to initialize
8807 the va_list structure:
8808
8809 crtl->args.info:
8810 holds number of gprs and fprs used for named arguments.
8811 crtl->args.arg_offset_rtx:
8812 holds the offset of the first anonymous stack argument
8813 (relative to the virtual arg pointer). */
8814
8815 static void
8816 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
8817 {
8818 HOST_WIDE_INT n_gpr, n_fpr;
8819 int off;
8820 tree f_gpr, f_fpr, f_ovf, f_sav;
8821 tree gpr, fpr, ovf, sav, t;
8822
8823 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8824 f_fpr = DECL_CHAIN (f_gpr);
8825 f_ovf = DECL_CHAIN (f_fpr);
8826 f_sav = DECL_CHAIN (f_ovf);
8827
8828 valist = build_simple_mem_ref (valist);
8829 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8830 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8831 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8832 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8833
8834 /* Count number of gp and fp argument registers used. */
8835
8836 n_gpr = crtl->args.info.gprs;
8837 n_fpr = crtl->args.info.fprs;
8838
8839 if (cfun->va_list_gpr_size)
8840 {
8841 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
8842 build_int_cst (NULL_TREE, n_gpr));
8843 TREE_SIDE_EFFECTS (t) = 1;
8844 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8845 }
8846
8847 if (cfun->va_list_fpr_size)
8848 {
8849 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
8850 build_int_cst (NULL_TREE, n_fpr));
8851 TREE_SIDE_EFFECTS (t) = 1;
8852 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8853 }
8854
8855 /* Find the overflow area. */
8856 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
8857 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
8858 {
8859 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
8860
8861 off = INTVAL (crtl->args.arg_offset_rtx);
8862 off = off < 0 ? 0 : off;
8863 if (TARGET_DEBUG_ARG)
8864 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
8865 (int)n_gpr, (int)n_fpr, off);
8866
8867 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t, size_int (off));
8868
8869 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
8870 TREE_SIDE_EFFECTS (t) = 1;
8871 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8872 }
8873
8874 /* Find the register save area. */
8875 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
8876 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
8877 {
8878 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
8879 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
8880 size_int (-RETURN_REGNUM * UNITS_PER_LONG));
8881
8882 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
8883 TREE_SIDE_EFFECTS (t) = 1;
8884 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
8885 }
8886 }
8887
8888 /* Implement va_arg by updating the va_list structure
8889 VALIST as required to retrieve an argument of type
8890 TYPE, and returning that argument.
8891
8892 Generates code equivalent to:
8893
8894 if (integral value) {
8895 if (size <= 4 && args.gpr < 5 ||
8896 size > 4 && args.gpr < 4 )
8897 ret = args.reg_save_area[args.gpr+8]
8898 else
8899 ret = *args.overflow_arg_area++;
8900 } else if (float value) {
8901 if (args.fgpr < 2)
8902 ret = args.reg_save_area[args.fpr+64]
8903 else
8904 ret = *args.overflow_arg_area++;
8905 } else if (aggregate value) {
8906 if (args.gpr < 5)
8907 ret = *args.reg_save_area[args.gpr]
8908 else
8909 ret = **args.overflow_arg_area++;
8910 } */
8911
8912 static tree
8913 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
8914 gimple_seq *post_p ATTRIBUTE_UNUSED)
8915 {
8916 tree f_gpr, f_fpr, f_ovf, f_sav;
8917 tree gpr, fpr, ovf, sav, reg, t, u;
8918 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
8919 tree lab_false, lab_over, addr;
8920
8921 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
8922 f_fpr = DECL_CHAIN (f_gpr);
8923 f_ovf = DECL_CHAIN (f_fpr);
8924 f_sav = DECL_CHAIN (f_ovf);
8925
8926 valist = build_va_arg_indirect_ref (valist);
8927 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
8928 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
8929 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
8930
8931 /* The tree for args* cannot be shared between gpr/fpr and ovf since
8932 both appear on a lhs. */
8933 valist = unshare_expr (valist);
8934 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
8935
8936 size = int_size_in_bytes (type);
8937
8938 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
8939 {
8940 if (TARGET_DEBUG_ARG)
8941 {
8942 fprintf (stderr, "va_arg: aggregate type");
8943 debug_tree (type);
8944 }
8945
8946 /* Aggregates are passed by reference. */
8947 indirect_p = 1;
8948 reg = gpr;
8949 n_reg = 1;
8950
8951 /* kernel stack layout on 31 bit: It is assumed here that no padding
8952 will be added by s390_frame_info because for va_args always an even
8953 number of gprs has to be saved r15-r2 = 14 regs. */
8954 sav_ofs = 2 * UNITS_PER_LONG;
8955 sav_scale = UNITS_PER_LONG;
8956 size = UNITS_PER_LONG;
8957 max_reg = GP_ARG_NUM_REG - n_reg;
8958 }
8959 else if (s390_function_arg_float (TYPE_MODE (type), type))
8960 {
8961 if (TARGET_DEBUG_ARG)
8962 {
8963 fprintf (stderr, "va_arg: float type");
8964 debug_tree (type);
8965 }
8966
8967 /* FP args go in FP registers, if present. */
8968 indirect_p = 0;
8969 reg = fpr;
8970 n_reg = 1;
8971 sav_ofs = 16 * UNITS_PER_LONG;
8972 sav_scale = 8;
8973 max_reg = FP_ARG_NUM_REG - n_reg;
8974 }
8975 else
8976 {
8977 if (TARGET_DEBUG_ARG)
8978 {
8979 fprintf (stderr, "va_arg: other type");
8980 debug_tree (type);
8981 }
8982
8983 /* Otherwise into GP registers. */
8984 indirect_p = 0;
8985 reg = gpr;
8986 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
8987
8988 /* kernel stack layout on 31 bit: It is assumed here that no padding
8989 will be added by s390_frame_info because for va_args always an even
8990 number of gprs has to be saved r15-r2 = 14 regs. */
8991 sav_ofs = 2 * UNITS_PER_LONG;
8992
8993 if (size < UNITS_PER_LONG)
8994 sav_ofs += UNITS_PER_LONG - size;
8995
8996 sav_scale = UNITS_PER_LONG;
8997 max_reg = GP_ARG_NUM_REG - n_reg;
8998 }
8999
9000 /* Pull the value out of the saved registers ... */
9001
9002 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9003 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9004 addr = create_tmp_var (ptr_type_node, "addr");
9005
9006 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9007 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9008 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9009 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9010 gimplify_and_add (t, pre_p);
9011
9012 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
9013 size_int (sav_ofs));
9014 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9015 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9016 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
9017
9018 gimplify_assign (addr, t, pre_p);
9019
9020 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9021
9022 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9023
9024
9025 /* ... Otherwise out of the overflow area. */
9026
9027 t = ovf;
9028 if (size < UNITS_PER_LONG)
9029 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
9030 size_int (UNITS_PER_LONG - size));
9031
9032 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9033
9034 gimplify_assign (addr, t, pre_p);
9035
9036 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
9037 size_int (size));
9038 gimplify_assign (ovf, t, pre_p);
9039
9040 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9041
9042
9043 /* Increment register save count. */
9044
9045 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9046 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9047 gimplify_and_add (u, pre_p);
9048
9049 if (indirect_p)
9050 {
9051 t = build_pointer_type_for_mode (build_pointer_type (type),
9052 ptr_mode, true);
9053 addr = fold_convert (t, addr);
9054 addr = build_va_arg_indirect_ref (addr);
9055 }
9056 else
9057 {
9058 t = build_pointer_type_for_mode (type, ptr_mode, true);
9059 addr = fold_convert (t, addr);
9060 }
9061
9062 return build_va_arg_indirect_ref (addr);
9063 }
9064
9065
9066 /* Builtins. */
9067
9068 enum s390_builtin
9069 {
9070 S390_BUILTIN_THREAD_POINTER,
9071 S390_BUILTIN_SET_THREAD_POINTER,
9072
9073 S390_BUILTIN_max
9074 };
9075
9076 static enum insn_code const code_for_builtin_64[S390_BUILTIN_max] = {
9077 CODE_FOR_get_tp_64,
9078 CODE_FOR_set_tp_64
9079 };
9080
9081 static enum insn_code const code_for_builtin_31[S390_BUILTIN_max] = {
9082 CODE_FOR_get_tp_31,
9083 CODE_FOR_set_tp_31
9084 };
9085
9086 static void
9087 s390_init_builtins (void)
9088 {
9089 tree ftype;
9090
9091 ftype = build_function_type_list (ptr_type_node, NULL_TREE);
9092 add_builtin_function ("__builtin_thread_pointer", ftype,
9093 S390_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
9094 NULL, NULL_TREE);
9095
9096 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
9097 add_builtin_function ("__builtin_set_thread_pointer", ftype,
9098 S390_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
9099 NULL, NULL_TREE);
9100 }
9101
9102 /* Expand an expression EXP that calls a built-in function,
9103 with result going to TARGET if that's convenient
9104 (and in mode MODE if that's convenient).
9105 SUBTARGET may be used as the target for computing one of EXP's operands.
9106 IGNORE is nonzero if the value is to be ignored. */
9107
9108 static rtx
9109 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9110 enum machine_mode mode ATTRIBUTE_UNUSED,
9111 int ignore ATTRIBUTE_UNUSED)
9112 {
9113 #define MAX_ARGS 2
9114
9115 enum insn_code const *code_for_builtin =
9116 TARGET_64BIT ? code_for_builtin_64 : code_for_builtin_31;
9117
9118 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9119 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9120 enum insn_code icode;
9121 rtx op[MAX_ARGS], pat;
9122 int arity;
9123 bool nonvoid;
9124 tree arg;
9125 call_expr_arg_iterator iter;
9126
9127 if (fcode >= S390_BUILTIN_max)
9128 internal_error ("bad builtin fcode");
9129 icode = code_for_builtin[fcode];
9130 if (icode == 0)
9131 internal_error ("bad builtin fcode");
9132
9133 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9134
9135 arity = 0;
9136 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9137 {
9138 const struct insn_operand_data *insn_op;
9139
9140 if (arg == error_mark_node)
9141 return NULL_RTX;
9142 if (arity > MAX_ARGS)
9143 return NULL_RTX;
9144
9145 insn_op = &insn_data[icode].operand[arity + nonvoid];
9146
9147 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
9148
9149 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
9150 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
9151 arity++;
9152 }
9153
9154 if (nonvoid)
9155 {
9156 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9157 if (!target
9158 || GET_MODE (target) != tmode
9159 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
9160 target = gen_reg_rtx (tmode);
9161 }
9162
9163 switch (arity)
9164 {
9165 case 0:
9166 pat = GEN_FCN (icode) (target);
9167 break;
9168 case 1:
9169 if (nonvoid)
9170 pat = GEN_FCN (icode) (target, op[0]);
9171 else
9172 pat = GEN_FCN (icode) (op[0]);
9173 break;
9174 case 2:
9175 pat = GEN_FCN (icode) (target, op[0], op[1]);
9176 break;
9177 default:
9178 gcc_unreachable ();
9179 }
9180 if (!pat)
9181 return NULL_RTX;
9182 emit_insn (pat);
9183
9184 if (nonvoid)
9185 return target;
9186 else
9187 return const0_rtx;
9188 }
9189
9190
9191 /* Output assembly code for the trampoline template to
9192 stdio stream FILE.
9193
9194 On S/390, we use gpr 1 internally in the trampoline code;
9195 gpr 0 is used to hold the static chain. */
9196
9197 static void
9198 s390_asm_trampoline_template (FILE *file)
9199 {
9200 rtx op[2];
9201 op[0] = gen_rtx_REG (Pmode, 0);
9202 op[1] = gen_rtx_REG (Pmode, 1);
9203
9204 if (TARGET_64BIT)
9205 {
9206 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9207 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
9208 output_asm_insn ("br\t%1", op); /* 2 byte */
9209 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
9210 }
9211 else
9212 {
9213 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
9214 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
9215 output_asm_insn ("br\t%1", op); /* 2 byte */
9216 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
9217 }
9218 }
9219
9220 /* Emit RTL insns to initialize the variable parts of a trampoline.
9221 FNADDR is an RTX for the address of the function's pure code.
9222 CXT is an RTX for the static chain value for the function. */
9223
9224 static void
9225 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
9226 {
9227 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
9228 rtx mem;
9229
9230 emit_block_move (m_tramp, assemble_trampoline_template (),
9231 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
9232
9233 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
9234 emit_move_insn (mem, cxt);
9235 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
9236 emit_move_insn (mem, fnaddr);
9237 }
9238
9239 /* Output assembler code to FILE to increment profiler label # LABELNO
9240 for profiling a function entry. */
9241
9242 void
9243 s390_function_profiler (FILE *file, int labelno)
9244 {
9245 rtx op[7];
9246
9247 char label[128];
9248 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
9249
9250 fprintf (file, "# function profiler \n");
9251
9252 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
9253 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
9254 op[1] = gen_rtx_MEM (Pmode, plus_constant (op[1], UNITS_PER_LONG));
9255
9256 op[2] = gen_rtx_REG (Pmode, 1);
9257 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
9258 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
9259
9260 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
9261 if (flag_pic)
9262 {
9263 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
9264 op[4] = gen_rtx_CONST (Pmode, op[4]);
9265 }
9266
9267 if (TARGET_64BIT)
9268 {
9269 output_asm_insn ("stg\t%0,%1", op);
9270 output_asm_insn ("larl\t%2,%3", op);
9271 output_asm_insn ("brasl\t%0,%4", op);
9272 output_asm_insn ("lg\t%0,%1", op);
9273 }
9274 else if (!flag_pic)
9275 {
9276 op[6] = gen_label_rtx ();
9277
9278 output_asm_insn ("st\t%0,%1", op);
9279 output_asm_insn ("bras\t%2,%l6", op);
9280 output_asm_insn (".long\t%4", op);
9281 output_asm_insn (".long\t%3", op);
9282 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9283 output_asm_insn ("l\t%0,0(%2)", op);
9284 output_asm_insn ("l\t%2,4(%2)", op);
9285 output_asm_insn ("basr\t%0,%0", op);
9286 output_asm_insn ("l\t%0,%1", op);
9287 }
9288 else
9289 {
9290 op[5] = gen_label_rtx ();
9291 op[6] = gen_label_rtx ();
9292
9293 output_asm_insn ("st\t%0,%1", op);
9294 output_asm_insn ("bras\t%2,%l6", op);
9295 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
9296 output_asm_insn (".long\t%4-%l5", op);
9297 output_asm_insn (".long\t%3-%l5", op);
9298 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
9299 output_asm_insn ("lr\t%0,%2", op);
9300 output_asm_insn ("a\t%0,0(%2)", op);
9301 output_asm_insn ("a\t%2,4(%2)", op);
9302 output_asm_insn ("basr\t%0,%0", op);
9303 output_asm_insn ("l\t%0,%1", op);
9304 }
9305 }
9306
9307 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
9308 into its SYMBOL_REF_FLAGS. */
9309
9310 static void
9311 s390_encode_section_info (tree decl, rtx rtl, int first)
9312 {
9313 default_encode_section_info (decl, rtl, first);
9314
9315 if (TREE_CODE (decl) == VAR_DECL)
9316 {
9317 /* If a variable has a forced alignment to < 2 bytes, mark it
9318 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
9319 operand. */
9320 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
9321 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
9322 if (!DECL_SIZE (decl)
9323 || !DECL_ALIGN (decl)
9324 || !host_integerp (DECL_SIZE (decl), 0)
9325 || (DECL_ALIGN (decl) <= 64
9326 && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0)))
9327 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9328 }
9329
9330 /* Literal pool references don't have a decl so they are handled
9331 differently here. We rely on the information in the MEM_ALIGN
9332 entry to decide upon natural alignment. */
9333 if (MEM_P (rtl)
9334 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
9335 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
9336 && (MEM_ALIGN (rtl) == 0
9337 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
9338 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
9339 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
9340 }
9341
9342 /* Output thunk to FILE that implements a C++ virtual function call (with
9343 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
9344 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
9345 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
9346 relative to the resulting this pointer. */
9347
9348 static void
9349 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9350 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9351 tree function)
9352 {
9353 rtx op[10];
9354 int nonlocal = 0;
9355
9356 /* Make sure unwind info is emitted for the thunk if needed. */
9357 final_start_function (emit_barrier (), file, 1);
9358
9359 /* Operand 0 is the target function. */
9360 op[0] = XEXP (DECL_RTL (function), 0);
9361 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
9362 {
9363 nonlocal = 1;
9364 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
9365 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
9366 op[0] = gen_rtx_CONST (Pmode, op[0]);
9367 }
9368
9369 /* Operand 1 is the 'this' pointer. */
9370 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9371 op[1] = gen_rtx_REG (Pmode, 3);
9372 else
9373 op[1] = gen_rtx_REG (Pmode, 2);
9374
9375 /* Operand 2 is the delta. */
9376 op[2] = GEN_INT (delta);
9377
9378 /* Operand 3 is the vcall_offset. */
9379 op[3] = GEN_INT (vcall_offset);
9380
9381 /* Operand 4 is the temporary register. */
9382 op[4] = gen_rtx_REG (Pmode, 1);
9383
9384 /* Operands 5 to 8 can be used as labels. */
9385 op[5] = NULL_RTX;
9386 op[6] = NULL_RTX;
9387 op[7] = NULL_RTX;
9388 op[8] = NULL_RTX;
9389
9390 /* Operand 9 can be used for temporary register. */
9391 op[9] = NULL_RTX;
9392
9393 /* Generate code. */
9394 if (TARGET_64BIT)
9395 {
9396 /* Setup literal pool pointer if required. */
9397 if ((!DISP_IN_RANGE (delta)
9398 && !CONST_OK_FOR_K (delta)
9399 && !CONST_OK_FOR_Os (delta))
9400 || (!DISP_IN_RANGE (vcall_offset)
9401 && !CONST_OK_FOR_K (vcall_offset)
9402 && !CONST_OK_FOR_Os (vcall_offset)))
9403 {
9404 op[5] = gen_label_rtx ();
9405 output_asm_insn ("larl\t%4,%5", op);
9406 }
9407
9408 /* Add DELTA to this pointer. */
9409 if (delta)
9410 {
9411 if (CONST_OK_FOR_J (delta))
9412 output_asm_insn ("la\t%1,%2(%1)", op);
9413 else if (DISP_IN_RANGE (delta))
9414 output_asm_insn ("lay\t%1,%2(%1)", op);
9415 else if (CONST_OK_FOR_K (delta))
9416 output_asm_insn ("aghi\t%1,%2", op);
9417 else if (CONST_OK_FOR_Os (delta))
9418 output_asm_insn ("agfi\t%1,%2", op);
9419 else
9420 {
9421 op[6] = gen_label_rtx ();
9422 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
9423 }
9424 }
9425
9426 /* Perform vcall adjustment. */
9427 if (vcall_offset)
9428 {
9429 if (DISP_IN_RANGE (vcall_offset))
9430 {
9431 output_asm_insn ("lg\t%4,0(%1)", op);
9432 output_asm_insn ("ag\t%1,%3(%4)", op);
9433 }
9434 else if (CONST_OK_FOR_K (vcall_offset))
9435 {
9436 output_asm_insn ("lghi\t%4,%3", op);
9437 output_asm_insn ("ag\t%4,0(%1)", op);
9438 output_asm_insn ("ag\t%1,0(%4)", op);
9439 }
9440 else if (CONST_OK_FOR_Os (vcall_offset))
9441 {
9442 output_asm_insn ("lgfi\t%4,%3", op);
9443 output_asm_insn ("ag\t%4,0(%1)", op);
9444 output_asm_insn ("ag\t%1,0(%4)", op);
9445 }
9446 else
9447 {
9448 op[7] = gen_label_rtx ();
9449 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
9450 output_asm_insn ("ag\t%4,0(%1)", op);
9451 output_asm_insn ("ag\t%1,0(%4)", op);
9452 }
9453 }
9454
9455 /* Jump to target. */
9456 output_asm_insn ("jg\t%0", op);
9457
9458 /* Output literal pool if required. */
9459 if (op[5])
9460 {
9461 output_asm_insn (".align\t4", op);
9462 targetm.asm_out.internal_label (file, "L",
9463 CODE_LABEL_NUMBER (op[5]));
9464 }
9465 if (op[6])
9466 {
9467 targetm.asm_out.internal_label (file, "L",
9468 CODE_LABEL_NUMBER (op[6]));
9469 output_asm_insn (".long\t%2", op);
9470 }
9471 if (op[7])
9472 {
9473 targetm.asm_out.internal_label (file, "L",
9474 CODE_LABEL_NUMBER (op[7]));
9475 output_asm_insn (".long\t%3", op);
9476 }
9477 }
9478 else
9479 {
9480 /* Setup base pointer if required. */
9481 if (!vcall_offset
9482 || (!DISP_IN_RANGE (delta)
9483 && !CONST_OK_FOR_K (delta)
9484 && !CONST_OK_FOR_Os (delta))
9485 || (!DISP_IN_RANGE (delta)
9486 && !CONST_OK_FOR_K (vcall_offset)
9487 && !CONST_OK_FOR_Os (vcall_offset)))
9488 {
9489 op[5] = gen_label_rtx ();
9490 output_asm_insn ("basr\t%4,0", op);
9491 targetm.asm_out.internal_label (file, "L",
9492 CODE_LABEL_NUMBER (op[5]));
9493 }
9494
9495 /* Add DELTA to this pointer. */
9496 if (delta)
9497 {
9498 if (CONST_OK_FOR_J (delta))
9499 output_asm_insn ("la\t%1,%2(%1)", op);
9500 else if (DISP_IN_RANGE (delta))
9501 output_asm_insn ("lay\t%1,%2(%1)", op);
9502 else if (CONST_OK_FOR_K (delta))
9503 output_asm_insn ("ahi\t%1,%2", op);
9504 else if (CONST_OK_FOR_Os (delta))
9505 output_asm_insn ("afi\t%1,%2", op);
9506 else
9507 {
9508 op[6] = gen_label_rtx ();
9509 output_asm_insn ("a\t%1,%6-%5(%4)", op);
9510 }
9511 }
9512
9513 /* Perform vcall adjustment. */
9514 if (vcall_offset)
9515 {
9516 if (CONST_OK_FOR_J (vcall_offset))
9517 {
9518 output_asm_insn ("l\t%4,0(%1)", op);
9519 output_asm_insn ("a\t%1,%3(%4)", op);
9520 }
9521 else if (DISP_IN_RANGE (vcall_offset))
9522 {
9523 output_asm_insn ("l\t%4,0(%1)", op);
9524 output_asm_insn ("ay\t%1,%3(%4)", op);
9525 }
9526 else if (CONST_OK_FOR_K (vcall_offset))
9527 {
9528 output_asm_insn ("lhi\t%4,%3", op);
9529 output_asm_insn ("a\t%4,0(%1)", op);
9530 output_asm_insn ("a\t%1,0(%4)", op);
9531 }
9532 else if (CONST_OK_FOR_Os (vcall_offset))
9533 {
9534 output_asm_insn ("iilf\t%4,%3", op);
9535 output_asm_insn ("a\t%4,0(%1)", op);
9536 output_asm_insn ("a\t%1,0(%4)", op);
9537 }
9538 else
9539 {
9540 op[7] = gen_label_rtx ();
9541 output_asm_insn ("l\t%4,%7-%5(%4)", op);
9542 output_asm_insn ("a\t%4,0(%1)", op);
9543 output_asm_insn ("a\t%1,0(%4)", op);
9544 }
9545
9546 /* We had to clobber the base pointer register.
9547 Re-setup the base pointer (with a different base). */
9548 op[5] = gen_label_rtx ();
9549 output_asm_insn ("basr\t%4,0", op);
9550 targetm.asm_out.internal_label (file, "L",
9551 CODE_LABEL_NUMBER (op[5]));
9552 }
9553
9554 /* Jump to target. */
9555 op[8] = gen_label_rtx ();
9556
9557 if (!flag_pic)
9558 output_asm_insn ("l\t%4,%8-%5(%4)", op);
9559 else if (!nonlocal)
9560 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9561 /* We cannot call through .plt, since .plt requires %r12 loaded. */
9562 else if (flag_pic == 1)
9563 {
9564 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9565 output_asm_insn ("l\t%4,%0(%4)", op);
9566 }
9567 else if (flag_pic == 2)
9568 {
9569 op[9] = gen_rtx_REG (Pmode, 0);
9570 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
9571 output_asm_insn ("a\t%4,%8-%5(%4)", op);
9572 output_asm_insn ("ar\t%4,%9", op);
9573 output_asm_insn ("l\t%4,0(%4)", op);
9574 }
9575
9576 output_asm_insn ("br\t%4", op);
9577
9578 /* Output literal pool. */
9579 output_asm_insn (".align\t4", op);
9580
9581 if (nonlocal && flag_pic == 2)
9582 output_asm_insn (".long\t%0", op);
9583 if (nonlocal)
9584 {
9585 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9586 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
9587 }
9588
9589 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
9590 if (!flag_pic)
9591 output_asm_insn (".long\t%0", op);
9592 else
9593 output_asm_insn (".long\t%0-%5", op);
9594
9595 if (op[6])
9596 {
9597 targetm.asm_out.internal_label (file, "L",
9598 CODE_LABEL_NUMBER (op[6]));
9599 output_asm_insn (".long\t%2", op);
9600 }
9601 if (op[7])
9602 {
9603 targetm.asm_out.internal_label (file, "L",
9604 CODE_LABEL_NUMBER (op[7]));
9605 output_asm_insn (".long\t%3", op);
9606 }
9607 }
9608 final_end_function ();
9609 }
9610
9611 static bool
9612 s390_valid_pointer_mode (enum machine_mode mode)
9613 {
9614 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9615 }
9616
9617 /* Checks whether the given CALL_EXPR would use a caller
9618 saved register. This is used to decide whether sibling call
9619 optimization could be performed on the respective function
9620 call. */
9621
9622 static bool
9623 s390_call_saved_register_used (tree call_expr)
9624 {
9625 CUMULATIVE_ARGS cum;
9626 tree parameter;
9627 enum machine_mode mode;
9628 tree type;
9629 rtx parm_rtx;
9630 int reg, i;
9631
9632 INIT_CUMULATIVE_ARGS (cum, NULL, NULL, 0, 0);
9633
9634 for (i = 0; i < call_expr_nargs (call_expr); i++)
9635 {
9636 parameter = CALL_EXPR_ARG (call_expr, i);
9637 gcc_assert (parameter);
9638
9639 /* For an undeclared variable passed as parameter we will get
9640 an ERROR_MARK node here. */
9641 if (TREE_CODE (parameter) == ERROR_MARK)
9642 return true;
9643
9644 type = TREE_TYPE (parameter);
9645 gcc_assert (type);
9646
9647 mode = TYPE_MODE (type);
9648 gcc_assert (mode);
9649
9650 if (pass_by_reference (&cum, mode, type, true))
9651 {
9652 mode = Pmode;
9653 type = build_pointer_type (type);
9654 }
9655
9656 parm_rtx = s390_function_arg (&cum, mode, type, 0);
9657
9658 s390_function_arg_advance (&cum, mode, type, 0);
9659
9660 if (!parm_rtx)
9661 continue;
9662
9663 if (REG_P (parm_rtx))
9664 {
9665 for (reg = 0;
9666 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
9667 reg++)
9668 if (!call_used_regs[reg + REGNO (parm_rtx)])
9669 return true;
9670 }
9671
9672 if (GET_CODE (parm_rtx) == PARALLEL)
9673 {
9674 int i;
9675
9676 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
9677 {
9678 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
9679
9680 gcc_assert (REG_P (r));
9681
9682 for (reg = 0;
9683 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
9684 reg++)
9685 if (!call_used_regs[reg + REGNO (r)])
9686 return true;
9687 }
9688 }
9689
9690 }
9691 return false;
9692 }
9693
9694 /* Return true if the given call expression can be
9695 turned into a sibling call.
9696 DECL holds the declaration of the function to be called whereas
9697 EXP is the call expression itself. */
9698
9699 static bool
9700 s390_function_ok_for_sibcall (tree decl, tree exp)
9701 {
9702 /* The TPF epilogue uses register 1. */
9703 if (TARGET_TPF_PROFILING)
9704 return false;
9705
9706 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
9707 which would have to be restored before the sibcall. */
9708 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
9709 return false;
9710
9711 /* Register 6 on s390 is available as an argument register but unfortunately
9712 "caller saved". This makes functions needing this register for arguments
9713 not suitable for sibcalls. */
9714 return !s390_call_saved_register_used (exp);
9715 }
9716
9717 /* Return the fixed registers used for condition codes. */
9718
9719 static bool
9720 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9721 {
9722 *p1 = CC_REGNUM;
9723 *p2 = INVALID_REGNUM;
9724
9725 return true;
9726 }
9727
9728 /* This function is used by the call expanders of the machine description.
9729 It emits the call insn itself together with the necessary operations
9730 to adjust the target address and returns the emitted insn.
9731 ADDR_LOCATION is the target address rtx
9732 TLS_CALL the location of the thread-local symbol
9733 RESULT_REG the register where the result of the call should be stored
9734 RETADDR_REG the register where the return address should be stored
9735 If this parameter is NULL_RTX the call is considered
9736 to be a sibling call. */
9737
9738 rtx
9739 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
9740 rtx retaddr_reg)
9741 {
9742 bool plt_call = false;
9743 rtx insn;
9744 rtx call;
9745 rtx clobber;
9746 rtvec vec;
9747
9748 /* Direct function calls need special treatment. */
9749 if (GET_CODE (addr_location) == SYMBOL_REF)
9750 {
9751 /* When calling a global routine in PIC mode, we must
9752 replace the symbol itself with the PLT stub. */
9753 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
9754 {
9755 if (retaddr_reg != NULL_RTX)
9756 {
9757 addr_location = gen_rtx_UNSPEC (Pmode,
9758 gen_rtvec (1, addr_location),
9759 UNSPEC_PLT);
9760 addr_location = gen_rtx_CONST (Pmode, addr_location);
9761 plt_call = true;
9762 }
9763 else
9764 /* For -fpic code the PLT entries might use r12 which is
9765 call-saved. Therefore we cannot do a sibcall when
9766 calling directly using a symbol ref. When reaching
9767 this point we decided (in s390_function_ok_for_sibcall)
9768 to do a sibcall for a function pointer but one of the
9769 optimizers was able to get rid of the function pointer
9770 by propagating the symbol ref into the call. This
9771 optimization is illegal for S/390 so we turn the direct
9772 call into a indirect call again. */
9773 addr_location = force_reg (Pmode, addr_location);
9774 }
9775
9776 /* Unless we can use the bras(l) insn, force the
9777 routine address into a register. */
9778 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
9779 {
9780 if (flag_pic)
9781 addr_location = legitimize_pic_address (addr_location, 0);
9782 else
9783 addr_location = force_reg (Pmode, addr_location);
9784 }
9785 }
9786
9787 /* If it is already an indirect call or the code above moved the
9788 SYMBOL_REF to somewhere else make sure the address can be found in
9789 register 1. */
9790 if (retaddr_reg == NULL_RTX
9791 && GET_CODE (addr_location) != SYMBOL_REF
9792 && !plt_call)
9793 {
9794 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
9795 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
9796 }
9797
9798 addr_location = gen_rtx_MEM (QImode, addr_location);
9799 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
9800
9801 if (result_reg != NULL_RTX)
9802 call = gen_rtx_SET (VOIDmode, result_reg, call);
9803
9804 if (retaddr_reg != NULL_RTX)
9805 {
9806 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
9807
9808 if (tls_call != NULL_RTX)
9809 vec = gen_rtvec (3, call, clobber,
9810 gen_rtx_USE (VOIDmode, tls_call));
9811 else
9812 vec = gen_rtvec (2, call, clobber);
9813
9814 call = gen_rtx_PARALLEL (VOIDmode, vec);
9815 }
9816
9817 insn = emit_call_insn (call);
9818
9819 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
9820 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
9821 {
9822 /* s390_function_ok_for_sibcall should
9823 have denied sibcalls in this case. */
9824 gcc_assert (retaddr_reg != NULL_RTX);
9825
9826 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
9827 }
9828 return insn;
9829 }
9830
9831 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
9832
9833 static void
9834 s390_conditional_register_usage (void)
9835 {
9836 int i;
9837
9838 if (flag_pic)
9839 {
9840 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9841 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9842 }
9843 if (TARGET_CPU_ZARCH)
9844 {
9845 fixed_regs[BASE_REGNUM] = 0;
9846 call_used_regs[BASE_REGNUM] = 0;
9847 fixed_regs[RETURN_REGNUM] = 0;
9848 call_used_regs[RETURN_REGNUM] = 0;
9849 }
9850 if (TARGET_64BIT)
9851 {
9852 for (i = 24; i < 32; i++)
9853 call_used_regs[i] = call_really_used_regs[i] = 0;
9854 }
9855 else
9856 {
9857 for (i = 18; i < 20; i++)
9858 call_used_regs[i] = call_really_used_regs[i] = 0;
9859 }
9860
9861 if (TARGET_SOFT_FLOAT)
9862 {
9863 for (i = 16; i < 32; i++)
9864 call_used_regs[i] = fixed_regs[i] = 1;
9865 }
9866 }
9867
9868 /* Corresponding function to eh_return expander. */
9869
9870 static GTY(()) rtx s390_tpf_eh_return_symbol;
9871 void
9872 s390_emit_tpf_eh_return (rtx target)
9873 {
9874 rtx insn, reg;
9875
9876 if (!s390_tpf_eh_return_symbol)
9877 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
9878
9879 reg = gen_rtx_REG (Pmode, 2);
9880
9881 emit_move_insn (reg, target);
9882 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
9883 gen_rtx_REG (Pmode, RETURN_REGNUM));
9884 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
9885
9886 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
9887 }
9888
9889 /* Rework the prologue/epilogue to avoid saving/restoring
9890 registers unnecessarily. */
9891
9892 static void
9893 s390_optimize_prologue (void)
9894 {
9895 rtx insn, new_insn, next_insn;
9896
9897 /* Do a final recompute of the frame-related data. */
9898
9899 s390_update_frame_layout ();
9900
9901 /* If all special registers are in fact used, there's nothing we
9902 can do, so no point in walking the insn list. */
9903
9904 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
9905 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
9906 && (TARGET_CPU_ZARCH
9907 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
9908 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
9909 return;
9910
9911 /* Search for prologue/epilogue insns and replace them. */
9912
9913 for (insn = get_insns (); insn; insn = next_insn)
9914 {
9915 int first, last, off;
9916 rtx set, base, offset;
9917
9918 next_insn = NEXT_INSN (insn);
9919
9920 if (GET_CODE (insn) != INSN)
9921 continue;
9922
9923 if (GET_CODE (PATTERN (insn)) == PARALLEL
9924 && store_multiple_operation (PATTERN (insn), VOIDmode))
9925 {
9926 set = XVECEXP (PATTERN (insn), 0, 0);
9927 first = REGNO (SET_SRC (set));
9928 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9929 offset = const0_rtx;
9930 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9931 off = INTVAL (offset);
9932
9933 if (GET_CODE (base) != REG || off < 0)
9934 continue;
9935 if (cfun_frame_layout.first_save_gpr != -1
9936 && (cfun_frame_layout.first_save_gpr < first
9937 || cfun_frame_layout.last_save_gpr > last))
9938 continue;
9939 if (REGNO (base) != STACK_POINTER_REGNUM
9940 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9941 continue;
9942 if (first > BASE_REGNUM || last < BASE_REGNUM)
9943 continue;
9944
9945 if (cfun_frame_layout.first_save_gpr != -1)
9946 {
9947 new_insn = save_gprs (base,
9948 off + (cfun_frame_layout.first_save_gpr
9949 - first) * UNITS_PER_LONG,
9950 cfun_frame_layout.first_save_gpr,
9951 cfun_frame_layout.last_save_gpr);
9952 new_insn = emit_insn_before (new_insn, insn);
9953 INSN_ADDRESSES_NEW (new_insn, -1);
9954 }
9955
9956 remove_insn (insn);
9957 continue;
9958 }
9959
9960 if (cfun_frame_layout.first_save_gpr == -1
9961 && GET_CODE (PATTERN (insn)) == SET
9962 && GET_CODE (SET_SRC (PATTERN (insn))) == REG
9963 && (REGNO (SET_SRC (PATTERN (insn))) == BASE_REGNUM
9964 || (!TARGET_CPU_ZARCH
9965 && REGNO (SET_SRC (PATTERN (insn))) == RETURN_REGNUM))
9966 && GET_CODE (SET_DEST (PATTERN (insn))) == MEM)
9967 {
9968 set = PATTERN (insn);
9969 first = REGNO (SET_SRC (set));
9970 offset = const0_rtx;
9971 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
9972 off = INTVAL (offset);
9973
9974 if (GET_CODE (base) != REG || off < 0)
9975 continue;
9976 if (REGNO (base) != STACK_POINTER_REGNUM
9977 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
9978 continue;
9979
9980 remove_insn (insn);
9981 continue;
9982 }
9983
9984 if (GET_CODE (PATTERN (insn)) == PARALLEL
9985 && load_multiple_operation (PATTERN (insn), VOIDmode))
9986 {
9987 set = XVECEXP (PATTERN (insn), 0, 0);
9988 first = REGNO (SET_DEST (set));
9989 last = first + XVECLEN (PATTERN (insn), 0) - 1;
9990 offset = const0_rtx;
9991 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
9992 off = INTVAL (offset);
9993
9994 if (GET_CODE (base) != REG || off < 0)
9995 continue;
9996 if (cfun_frame_layout.first_restore_gpr != -1
9997 && (cfun_frame_layout.first_restore_gpr < first
9998 || cfun_frame_layout.last_restore_gpr > last))
9999 continue;
10000 if (REGNO (base) != STACK_POINTER_REGNUM
10001 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10002 continue;
10003 if (first > BASE_REGNUM || last < BASE_REGNUM)
10004 continue;
10005
10006 if (cfun_frame_layout.first_restore_gpr != -1)
10007 {
10008 new_insn = restore_gprs (base,
10009 off + (cfun_frame_layout.first_restore_gpr
10010 - first) * UNITS_PER_LONG,
10011 cfun_frame_layout.first_restore_gpr,
10012 cfun_frame_layout.last_restore_gpr);
10013 new_insn = emit_insn_before (new_insn, insn);
10014 INSN_ADDRESSES_NEW (new_insn, -1);
10015 }
10016
10017 remove_insn (insn);
10018 continue;
10019 }
10020
10021 if (cfun_frame_layout.first_restore_gpr == -1
10022 && GET_CODE (PATTERN (insn)) == SET
10023 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
10024 && (REGNO (SET_DEST (PATTERN (insn))) == BASE_REGNUM
10025 || (!TARGET_CPU_ZARCH
10026 && REGNO (SET_DEST (PATTERN (insn))) == RETURN_REGNUM))
10027 && GET_CODE (SET_SRC (PATTERN (insn))) == MEM)
10028 {
10029 set = PATTERN (insn);
10030 first = REGNO (SET_DEST (set));
10031 offset = const0_rtx;
10032 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
10033 off = INTVAL (offset);
10034
10035 if (GET_CODE (base) != REG || off < 0)
10036 continue;
10037 if (REGNO (base) != STACK_POINTER_REGNUM
10038 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10039 continue;
10040
10041 remove_insn (insn);
10042 continue;
10043 }
10044 }
10045 }
10046
10047 /* On z10 and later the dynamic branch prediction must see the
10048 backward jump within a certain windows. If not it falls back to
10049 the static prediction. This function rearranges the loop backward
10050 branch in a way which makes the static prediction always correct.
10051 The function returns true if it added an instruction. */
10052 static bool
10053 s390_fix_long_loop_prediction (rtx insn)
10054 {
10055 rtx set = single_set (insn);
10056 rtx code_label, label_ref, new_label;
10057 rtx uncond_jump;
10058 rtx cur_insn;
10059 rtx tmp;
10060 int distance;
10061
10062 /* This will exclude branch on count and branch on index patterns
10063 since these are correctly statically predicted. */
10064 if (!set
10065 || SET_DEST (set) != pc_rtx
10066 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
10067 return false;
10068
10069 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
10070 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
10071
10072 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
10073
10074 code_label = XEXP (label_ref, 0);
10075
10076 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
10077 || INSN_ADDRESSES (INSN_UID (insn)) == -1
10078 || (INSN_ADDRESSES (INSN_UID (insn))
10079 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
10080 return false;
10081
10082 for (distance = 0, cur_insn = PREV_INSN (insn);
10083 distance < PREDICT_DISTANCE - 6;
10084 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
10085 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
10086 return false;
10087
10088 new_label = gen_label_rtx ();
10089 uncond_jump = emit_jump_insn_after (
10090 gen_rtx_SET (VOIDmode, pc_rtx,
10091 gen_rtx_LABEL_REF (VOIDmode, code_label)),
10092 insn);
10093 emit_label_after (new_label, uncond_jump);
10094
10095 tmp = XEXP (SET_SRC (set), 1);
10096 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
10097 XEXP (SET_SRC (set), 2) = tmp;
10098 INSN_CODE (insn) = -1;
10099
10100 XEXP (label_ref, 0) = new_label;
10101 JUMP_LABEL (insn) = new_label;
10102 JUMP_LABEL (uncond_jump) = code_label;
10103
10104 return true;
10105 }
10106
10107 /* Returns 1 if INSN reads the value of REG for purposes not related
10108 to addressing of memory, and 0 otherwise. */
10109 static int
10110 s390_non_addr_reg_read_p (rtx reg, rtx insn)
10111 {
10112 return reg_referenced_p (reg, PATTERN (insn))
10113 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
10114 }
10115
10116 /* Starting from INSN find_cond_jump looks downwards in the insn
10117 stream for a single jump insn which is the last user of the
10118 condition code set in INSN. */
10119 static rtx
10120 find_cond_jump (rtx insn)
10121 {
10122 for (; insn; insn = NEXT_INSN (insn))
10123 {
10124 rtx ite, cc;
10125
10126 if (LABEL_P (insn))
10127 break;
10128
10129 if (!JUMP_P (insn))
10130 {
10131 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
10132 break;
10133 continue;
10134 }
10135
10136 /* This will be triggered by a return. */
10137 if (GET_CODE (PATTERN (insn)) != SET)
10138 break;
10139
10140 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
10141 ite = SET_SRC (PATTERN (insn));
10142
10143 if (GET_CODE (ite) != IF_THEN_ELSE)
10144 break;
10145
10146 cc = XEXP (XEXP (ite, 0), 0);
10147 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
10148 break;
10149
10150 if (find_reg_note (insn, REG_DEAD, cc))
10151 return insn;
10152 break;
10153 }
10154
10155 return NULL_RTX;
10156 }
10157
10158 /* Swap the condition in COND and the operands in OP0 and OP1 so that
10159 the semantics does not change. If NULL_RTX is passed as COND the
10160 function tries to find the conditional jump starting with INSN. */
10161 static void
10162 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx insn)
10163 {
10164 rtx tmp = *op0;
10165
10166 if (cond == NULL_RTX)
10167 {
10168 rtx jump = find_cond_jump (NEXT_INSN (insn));
10169 jump = jump ? single_set (jump) : NULL_RTX;
10170
10171 if (jump == NULL_RTX)
10172 return;
10173
10174 cond = XEXP (XEXP (jump, 1), 0);
10175 }
10176
10177 *op0 = *op1;
10178 *op1 = tmp;
10179 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
10180 }
10181
10182 /* On z10, instructions of the compare-and-branch family have the
10183 property to access the register occurring as second operand with
10184 its bits complemented. If such a compare is grouped with a second
10185 instruction that accesses the same register non-complemented, and
10186 if that register's value is delivered via a bypass, then the
10187 pipeline recycles, thereby causing significant performance decline.
10188 This function locates such situations and exchanges the two
10189 operands of the compare. The function return true whenever it
10190 added an insn. */
10191 static bool
10192 s390_z10_optimize_cmp (rtx insn)
10193 {
10194 rtx prev_insn, next_insn;
10195 bool insn_added_p = false;
10196 rtx cond, *op0, *op1;
10197
10198 if (GET_CODE (PATTERN (insn)) == PARALLEL)
10199 {
10200 /* Handle compare and branch and branch on count
10201 instructions. */
10202 rtx pattern = single_set (insn);
10203
10204 if (!pattern
10205 || SET_DEST (pattern) != pc_rtx
10206 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
10207 return false;
10208
10209 cond = XEXP (SET_SRC (pattern), 0);
10210 op0 = &XEXP (cond, 0);
10211 op1 = &XEXP (cond, 1);
10212 }
10213 else if (GET_CODE (PATTERN (insn)) == SET)
10214 {
10215 rtx src, dest;
10216
10217 /* Handle normal compare instructions. */
10218 src = SET_SRC (PATTERN (insn));
10219 dest = SET_DEST (PATTERN (insn));
10220
10221 if (!REG_P (dest)
10222 || !CC_REGNO_P (REGNO (dest))
10223 || GET_CODE (src) != COMPARE)
10224 return false;
10225
10226 /* s390_swap_cmp will try to find the conditional
10227 jump when passing NULL_RTX as condition. */
10228 cond = NULL_RTX;
10229 op0 = &XEXP (src, 0);
10230 op1 = &XEXP (src, 1);
10231 }
10232 else
10233 return false;
10234
10235 if (!REG_P (*op0) || !REG_P (*op1))
10236 return false;
10237
10238 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
10239 return false;
10240
10241 /* Swap the COMPARE arguments and its mask if there is a
10242 conflicting access in the previous insn. */
10243 prev_insn = prev_active_insn (insn);
10244 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10245 && reg_referenced_p (*op1, PATTERN (prev_insn)))
10246 s390_swap_cmp (cond, op0, op1, insn);
10247
10248 /* Check if there is a conflict with the next insn. If there
10249 was no conflict with the previous insn, then swap the
10250 COMPARE arguments and its mask. If we already swapped
10251 the operands, or if swapping them would cause a conflict
10252 with the previous insn, issue a NOP after the COMPARE in
10253 order to separate the two instuctions. */
10254 next_insn = next_active_insn (insn);
10255 if (next_insn != NULL_RTX && INSN_P (next_insn)
10256 && s390_non_addr_reg_read_p (*op1, next_insn))
10257 {
10258 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
10259 && s390_non_addr_reg_read_p (*op0, prev_insn))
10260 {
10261 if (REGNO (*op1) == 0)
10262 emit_insn_after (gen_nop1 (), insn);
10263 else
10264 emit_insn_after (gen_nop (), insn);
10265 insn_added_p = true;
10266 }
10267 else
10268 s390_swap_cmp (cond, op0, op1, insn);
10269 }
10270 return insn_added_p;
10271 }
10272
10273 /* Perform machine-dependent processing. */
10274
10275 static void
10276 s390_reorg (void)
10277 {
10278 bool pool_overflow = false;
10279
10280 /* Make sure all splits have been performed; splits after
10281 machine_dependent_reorg might confuse insn length counts. */
10282 split_all_insns_noflow ();
10283
10284 /* Install the main literal pool and the associated base
10285 register load insns.
10286
10287 In addition, there are two problematic situations we need
10288 to correct:
10289
10290 - the literal pool might be > 4096 bytes in size, so that
10291 some of its elements cannot be directly accessed
10292
10293 - a branch target might be > 64K away from the branch, so that
10294 it is not possible to use a PC-relative instruction.
10295
10296 To fix those, we split the single literal pool into multiple
10297 pool chunks, reloading the pool base register at various
10298 points throughout the function to ensure it always points to
10299 the pool chunk the following code expects, and / or replace
10300 PC-relative branches by absolute branches.
10301
10302 However, the two problems are interdependent: splitting the
10303 literal pool can move a branch further away from its target,
10304 causing the 64K limit to overflow, and on the other hand,
10305 replacing a PC-relative branch by an absolute branch means
10306 we need to put the branch target address into the literal
10307 pool, possibly causing it to overflow.
10308
10309 So, we loop trying to fix up both problems until we manage
10310 to satisfy both conditions at the same time. Note that the
10311 loop is guaranteed to terminate as every pass of the loop
10312 strictly decreases the total number of PC-relative branches
10313 in the function. (This is not completely true as there
10314 might be branch-over-pool insns introduced by chunkify_start.
10315 Those never need to be split however.) */
10316
10317 for (;;)
10318 {
10319 struct constant_pool *pool = NULL;
10320
10321 /* Collect the literal pool. */
10322 if (!pool_overflow)
10323 {
10324 pool = s390_mainpool_start ();
10325 if (!pool)
10326 pool_overflow = true;
10327 }
10328
10329 /* If literal pool overflowed, start to chunkify it. */
10330 if (pool_overflow)
10331 pool = s390_chunkify_start ();
10332
10333 /* Split out-of-range branches. If this has created new
10334 literal pool entries, cancel current chunk list and
10335 recompute it. zSeries machines have large branch
10336 instructions, so we never need to split a branch. */
10337 if (!TARGET_CPU_ZARCH && s390_split_branches ())
10338 {
10339 if (pool_overflow)
10340 s390_chunkify_cancel (pool);
10341 else
10342 s390_mainpool_cancel (pool);
10343
10344 continue;
10345 }
10346
10347 /* If we made it up to here, both conditions are satisfied.
10348 Finish up literal pool related changes. */
10349 if (pool_overflow)
10350 s390_chunkify_finish (pool);
10351 else
10352 s390_mainpool_finish (pool);
10353
10354 /* We're done splitting branches. */
10355 cfun->machine->split_branches_pending_p = false;
10356 break;
10357 }
10358
10359 /* Generate out-of-pool execute target insns. */
10360 if (TARGET_CPU_ZARCH)
10361 {
10362 rtx insn, label, target;
10363
10364 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10365 {
10366 label = s390_execute_label (insn);
10367 if (!label)
10368 continue;
10369
10370 gcc_assert (label != const0_rtx);
10371
10372 target = emit_label (XEXP (label, 0));
10373 INSN_ADDRESSES_NEW (target, -1);
10374
10375 target = emit_insn (s390_execute_target (insn));
10376 INSN_ADDRESSES_NEW (target, -1);
10377 }
10378 }
10379
10380 /* Try to optimize prologue and epilogue further. */
10381 s390_optimize_prologue ();
10382
10383 /* Walk over the insns and do some >=z10 specific changes. */
10384 if (s390_tune == PROCESSOR_2097_Z10
10385 || s390_tune == PROCESSOR_2817_Z196)
10386 {
10387 rtx insn;
10388 bool insn_added_p = false;
10389
10390 /* The insn lengths and addresses have to be up to date for the
10391 following manipulations. */
10392 shorten_branches (get_insns ());
10393
10394 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10395 {
10396 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10397 continue;
10398
10399 if (JUMP_P (insn))
10400 insn_added_p |= s390_fix_long_loop_prediction (insn);
10401
10402 if ((GET_CODE (PATTERN (insn)) == PARALLEL
10403 || GET_CODE (PATTERN (insn)) == SET)
10404 && s390_tune == PROCESSOR_2097_Z10)
10405 insn_added_p |= s390_z10_optimize_cmp (insn);
10406 }
10407
10408 /* Adjust branches if we added new instructions. */
10409 if (insn_added_p)
10410 shorten_branches (get_insns ());
10411 }
10412 }
10413
10414 /* Return true if INSN is a fp load insn writing register REGNO. */
10415 static inline bool
10416 s390_fpload_toreg (rtx insn, unsigned int regno)
10417 {
10418 rtx set;
10419 enum attr_type flag = s390_safe_attr_type (insn);
10420
10421 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
10422 return false;
10423
10424 set = single_set (insn);
10425
10426 if (set == NULL_RTX)
10427 return false;
10428
10429 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
10430 return false;
10431
10432 if (REGNO (SET_DEST (set)) != regno)
10433 return false;
10434
10435 return true;
10436 }
10437
10438 /* This value describes the distance to be avoided between an
10439 aritmetic fp instruction and an fp load writing the same register.
10440 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
10441 fine but the exact value has to be avoided. Otherwise the FP
10442 pipeline will throw an exception causing a major penalty. */
10443 #define Z10_EARLYLOAD_DISTANCE 7
10444
10445 /* Rearrange the ready list in order to avoid the situation described
10446 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
10447 moved to the very end of the ready list. */
10448 static void
10449 s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
10450 {
10451 unsigned int regno;
10452 int nready = *nready_p;
10453 rtx tmp;
10454 int i;
10455 rtx insn;
10456 rtx set;
10457 enum attr_type flag;
10458 int distance;
10459
10460 /* Skip DISTANCE - 1 active insns. */
10461 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
10462 distance > 0 && insn != NULL_RTX;
10463 distance--, insn = prev_active_insn (insn))
10464 if (CALL_P (insn) || JUMP_P (insn))
10465 return;
10466
10467 if (insn == NULL_RTX)
10468 return;
10469
10470 set = single_set (insn);
10471
10472 if (set == NULL_RTX || !REG_P (SET_DEST (set))
10473 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
10474 return;
10475
10476 flag = s390_safe_attr_type (insn);
10477
10478 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
10479 return;
10480
10481 regno = REGNO (SET_DEST (set));
10482 i = nready - 1;
10483
10484 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
10485 i--;
10486
10487 if (!i)
10488 return;
10489
10490 tmp = ready[i];
10491 memmove (&ready[1], &ready[0], sizeof (rtx) * i);
10492 ready[0] = tmp;
10493 }
10494
10495 /* This function is called via hook TARGET_SCHED_REORDER before
10496 issueing one insn from list READY which contains *NREADYP entries.
10497 For target z10 it reorders load instructions to avoid early load
10498 conflicts in the floating point pipeline */
10499 static int
10500 s390_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10501 rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
10502 {
10503 if (s390_tune == PROCESSOR_2097_Z10)
10504 if (reload_completed && *nreadyp > 1)
10505 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
10506
10507 return s390_issue_rate ();
10508 }
10509
10510 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
10511 the scheduler has issued INSN. It stores the last issued insn into
10512 last_scheduled_insn in order to make it available for
10513 s390_sched_reorder. */
10514 static int
10515 s390_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
10516 int verbose ATTRIBUTE_UNUSED,
10517 rtx insn, int more)
10518 {
10519 last_scheduled_insn = insn;
10520
10521 if (GET_CODE (PATTERN (insn)) != USE
10522 && GET_CODE (PATTERN (insn)) != CLOBBER)
10523 return more - 1;
10524 else
10525 return more;
10526 }
10527
10528 static void
10529 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
10530 int verbose ATTRIBUTE_UNUSED,
10531 int max_ready ATTRIBUTE_UNUSED)
10532 {
10533 last_scheduled_insn = NULL_RTX;
10534 }
10535
10536 /* This function checks the whole of insn X for memory references. The
10537 function always returns zero because the framework it is called
10538 from would stop recursively analyzing the insn upon a return value
10539 other than zero. The real result of this function is updating
10540 counter variable MEM_COUNT. */
10541 static int
10542 check_dpu (rtx *x, unsigned *mem_count)
10543 {
10544 if (*x != NULL_RTX && MEM_P (*x))
10545 (*mem_count)++;
10546 return 0;
10547 }
10548
10549 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
10550 a new number struct loop *loop should be unrolled if tuned for cpus with
10551 a built-in stride prefetcher.
10552 The loop is analyzed for memory accesses by calling check_dpu for
10553 each rtx of the loop. Depending on the loop_depth and the amount of
10554 memory accesses a new number <=nunroll is returned to improve the
10555 behaviour of the hardware prefetch unit. */
10556 static unsigned
10557 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
10558 {
10559 basic_block *bbs;
10560 rtx insn;
10561 unsigned i;
10562 unsigned mem_count = 0;
10563
10564 if (s390_tune != PROCESSOR_2097_Z10 && s390_tune != PROCESSOR_2817_Z196)
10565 return nunroll;
10566
10567 /* Count the number of memory references within the loop body. */
10568 bbs = get_loop_body (loop);
10569 for (i = 0; i < loop->num_nodes; i++)
10570 {
10571 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
10572 if (INSN_P (insn) && INSN_CODE (insn) != -1)
10573 for_each_rtx (&insn, (rtx_function) check_dpu, &mem_count);
10574 }
10575 free (bbs);
10576
10577 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
10578 if (mem_count == 0)
10579 return nunroll;
10580
10581 switch (loop_depth(loop))
10582 {
10583 case 1:
10584 return MIN (nunroll, 28 / mem_count);
10585 case 2:
10586 return MIN (nunroll, 22 / mem_count);
10587 default:
10588 return MIN (nunroll, 16 / mem_count);
10589 }
10590 }
10591
10592 /* Initialize GCC target structure. */
10593
10594 #undef TARGET_ASM_ALIGNED_HI_OP
10595 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10596 #undef TARGET_ASM_ALIGNED_DI_OP
10597 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10598 #undef TARGET_ASM_INTEGER
10599 #define TARGET_ASM_INTEGER s390_assemble_integer
10600
10601 #undef TARGET_ASM_OPEN_PAREN
10602 #define TARGET_ASM_OPEN_PAREN ""
10603
10604 #undef TARGET_ASM_CLOSE_PAREN
10605 #define TARGET_ASM_CLOSE_PAREN ""
10606
10607 #undef TARGET_OPTION_OVERRIDE
10608 #define TARGET_OPTION_OVERRIDE s390_option_override
10609
10610 #undef TARGET_ENCODE_SECTION_INFO
10611 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
10612
10613 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10614 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10615
10616 #ifdef HAVE_AS_TLS
10617 #undef TARGET_HAVE_TLS
10618 #define TARGET_HAVE_TLS true
10619 #endif
10620 #undef TARGET_CANNOT_FORCE_CONST_MEM
10621 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
10622
10623 #undef TARGET_DELEGITIMIZE_ADDRESS
10624 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
10625
10626 #undef TARGET_LEGITIMIZE_ADDRESS
10627 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
10628
10629 #undef TARGET_RETURN_IN_MEMORY
10630 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
10631
10632 #undef TARGET_INIT_BUILTINS
10633 #define TARGET_INIT_BUILTINS s390_init_builtins
10634 #undef TARGET_EXPAND_BUILTIN
10635 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
10636
10637 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
10638 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
10639
10640 #undef TARGET_ASM_OUTPUT_MI_THUNK
10641 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
10642 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10643 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10644
10645 #undef TARGET_SCHED_ADJUST_PRIORITY
10646 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
10647 #undef TARGET_SCHED_ISSUE_RATE
10648 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
10649 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10650 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
10651
10652 #undef TARGET_SCHED_VARIABLE_ISSUE
10653 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
10654 #undef TARGET_SCHED_REORDER
10655 #define TARGET_SCHED_REORDER s390_sched_reorder
10656 #undef TARGET_SCHED_INIT
10657 #define TARGET_SCHED_INIT s390_sched_init
10658
10659 #undef TARGET_CANNOT_COPY_INSN_P
10660 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
10661 #undef TARGET_RTX_COSTS
10662 #define TARGET_RTX_COSTS s390_rtx_costs
10663 #undef TARGET_ADDRESS_COST
10664 #define TARGET_ADDRESS_COST s390_address_cost
10665 #undef TARGET_REGISTER_MOVE_COST
10666 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
10667 #undef TARGET_MEMORY_MOVE_COST
10668 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
10669
10670 #undef TARGET_MACHINE_DEPENDENT_REORG
10671 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
10672
10673 #undef TARGET_VALID_POINTER_MODE
10674 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
10675
10676 #undef TARGET_BUILD_BUILTIN_VA_LIST
10677 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
10678 #undef TARGET_EXPAND_BUILTIN_VA_START
10679 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
10680 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10681 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
10682
10683 #undef TARGET_PROMOTE_FUNCTION_MODE
10684 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
10685 #undef TARGET_PASS_BY_REFERENCE
10686 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
10687
10688 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10689 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
10690 #undef TARGET_FUNCTION_ARG
10691 #define TARGET_FUNCTION_ARG s390_function_arg
10692 #undef TARGET_FUNCTION_ARG_ADVANCE
10693 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
10694 #undef TARGET_FUNCTION_VALUE
10695 #define TARGET_FUNCTION_VALUE s390_function_value
10696 #undef TARGET_LIBCALL_VALUE
10697 #define TARGET_LIBCALL_VALUE s390_libcall_value
10698
10699 #undef TARGET_FIXED_CONDITION_CODE_REGS
10700 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
10701
10702 #undef TARGET_CC_MODES_COMPATIBLE
10703 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
10704
10705 #undef TARGET_INVALID_WITHIN_DOLOOP
10706 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
10707
10708 #ifdef HAVE_AS_TLS
10709 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
10710 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
10711 #endif
10712
10713 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10714 #undef TARGET_MANGLE_TYPE
10715 #define TARGET_MANGLE_TYPE s390_mangle_type
10716 #endif
10717
10718 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10719 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
10720
10721 #undef TARGET_PREFERRED_RELOAD_CLASS
10722 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
10723
10724 #undef TARGET_SECONDARY_RELOAD
10725 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
10726
10727 #undef TARGET_LIBGCC_CMP_RETURN_MODE
10728 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
10729
10730 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
10731 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
10732
10733 #undef TARGET_LEGITIMATE_ADDRESS_P
10734 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
10735
10736 #undef TARGET_LEGITIMATE_CONSTANT_P
10737 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
10738
10739 #undef TARGET_CAN_ELIMINATE
10740 #define TARGET_CAN_ELIMINATE s390_can_eliminate
10741
10742 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10743 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
10744
10745 #undef TARGET_LOOP_UNROLL_ADJUST
10746 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
10747
10748 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
10749 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
10750 #undef TARGET_TRAMPOLINE_INIT
10751 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
10752
10753 #undef TARGET_UNWIND_WORD_MODE
10754 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
10755
10756 struct gcc_target targetm = TARGET_INITIALIZER;
10757
10758 #include "gt-s390.h"