]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/s390/s390.c
Update copyright years.
[thirdparty/gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2016 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "target-globals.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "gimple.h"
32 #include "cfghooks.h"
33 #include "cfgloop.h"
34 #include "df.h"
35 #include "tm_p.h"
36 #include "stringpool.h"
37 #include "expmed.h"
38 #include "optabs.h"
39 #include "regs.h"
40 #include "emit-rtl.h"
41 #include "recog.h"
42 #include "cgraph.h"
43 #include "diagnostic-core.h"
44 #include "diagnostic.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "print-tree.h"
48 #include "stor-layout.h"
49 #include "varasm.h"
50 #include "calls.h"
51 #include "conditions.h"
52 #include "output.h"
53 #include "insn-attr.h"
54 #include "flags.h"
55 #include "except.h"
56 #include "dojump.h"
57 #include "explow.h"
58 #include "stmt.h"
59 #include "expr.h"
60 #include "reload.h"
61 #include "cfgrtl.h"
62 #include "cfganal.h"
63 #include "lcm.h"
64 #include "cfgbuild.h"
65 #include "cfgcleanup.h"
66 #include "debug.h"
67 #include "langhooks.h"
68 #include "internal-fn.h"
69 #include "gimple-fold.h"
70 #include "tree-eh.h"
71 #include "gimplify.h"
72 #include "params.h"
73 #include "opts.h"
74 #include "tree-pass.h"
75 #include "context.h"
76 #include "builtins.h"
77 #include "rtl-iter.h"
78 #include "intl.h"
79 #include "tm-constrs.h"
80
81 /* This file should be included last. */
82 #include "target-def.h"
83
84 /* Remember the last target of s390_set_current_function. */
85 static GTY(()) tree s390_previous_fndecl;
86
87 /* Define the specific costs for a given cpu. */
88
89 struct processor_costs
90 {
91 /* multiplication */
92 const int m; /* cost of an M instruction. */
93 const int mghi; /* cost of an MGHI instruction. */
94 const int mh; /* cost of an MH instruction. */
95 const int mhi; /* cost of an MHI instruction. */
96 const int ml; /* cost of an ML instruction. */
97 const int mr; /* cost of an MR instruction. */
98 const int ms; /* cost of an MS instruction. */
99 const int msg; /* cost of an MSG instruction. */
100 const int msgf; /* cost of an MSGF instruction. */
101 const int msgfr; /* cost of an MSGFR instruction. */
102 const int msgr; /* cost of an MSGR instruction. */
103 const int msr; /* cost of an MSR instruction. */
104 const int mult_df; /* cost of multiplication in DFmode. */
105 const int mxbr;
106 /* square root */
107 const int sqxbr; /* cost of square root in TFmode. */
108 const int sqdbr; /* cost of square root in DFmode. */
109 const int sqebr; /* cost of square root in SFmode. */
110 /* multiply and add */
111 const int madbr; /* cost of multiply and add in DFmode. */
112 const int maebr; /* cost of multiply and add in SFmode. */
113 /* division */
114 const int dxbr;
115 const int ddbr;
116 const int debr;
117 const int dlgr;
118 const int dlr;
119 const int dr;
120 const int dsgfr;
121 const int dsgr;
122 };
123
124 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
125
126 static const
127 struct processor_costs z900_cost =
128 {
129 COSTS_N_INSNS (5), /* M */
130 COSTS_N_INSNS (10), /* MGHI */
131 COSTS_N_INSNS (5), /* MH */
132 COSTS_N_INSNS (4), /* MHI */
133 COSTS_N_INSNS (5), /* ML */
134 COSTS_N_INSNS (5), /* MR */
135 COSTS_N_INSNS (4), /* MS */
136 COSTS_N_INSNS (15), /* MSG */
137 COSTS_N_INSNS (7), /* MSGF */
138 COSTS_N_INSNS (7), /* MSGFR */
139 COSTS_N_INSNS (10), /* MSGR */
140 COSTS_N_INSNS (4), /* MSR */
141 COSTS_N_INSNS (7), /* multiplication in DFmode */
142 COSTS_N_INSNS (13), /* MXBR */
143 COSTS_N_INSNS (136), /* SQXBR */
144 COSTS_N_INSNS (44), /* SQDBR */
145 COSTS_N_INSNS (35), /* SQEBR */
146 COSTS_N_INSNS (18), /* MADBR */
147 COSTS_N_INSNS (13), /* MAEBR */
148 COSTS_N_INSNS (134), /* DXBR */
149 COSTS_N_INSNS (30), /* DDBR */
150 COSTS_N_INSNS (27), /* DEBR */
151 COSTS_N_INSNS (220), /* DLGR */
152 COSTS_N_INSNS (34), /* DLR */
153 COSTS_N_INSNS (34), /* DR */
154 COSTS_N_INSNS (32), /* DSGFR */
155 COSTS_N_INSNS (32), /* DSGR */
156 };
157
158 static const
159 struct processor_costs z990_cost =
160 {
161 COSTS_N_INSNS (4), /* M */
162 COSTS_N_INSNS (2), /* MGHI */
163 COSTS_N_INSNS (2), /* MH */
164 COSTS_N_INSNS (2), /* MHI */
165 COSTS_N_INSNS (4), /* ML */
166 COSTS_N_INSNS (4), /* MR */
167 COSTS_N_INSNS (5), /* MS */
168 COSTS_N_INSNS (6), /* MSG */
169 COSTS_N_INSNS (4), /* MSGF */
170 COSTS_N_INSNS (4), /* MSGFR */
171 COSTS_N_INSNS (4), /* MSGR */
172 COSTS_N_INSNS (4), /* MSR */
173 COSTS_N_INSNS (1), /* multiplication in DFmode */
174 COSTS_N_INSNS (28), /* MXBR */
175 COSTS_N_INSNS (130), /* SQXBR */
176 COSTS_N_INSNS (66), /* SQDBR */
177 COSTS_N_INSNS (38), /* SQEBR */
178 COSTS_N_INSNS (1), /* MADBR */
179 COSTS_N_INSNS (1), /* MAEBR */
180 COSTS_N_INSNS (60), /* DXBR */
181 COSTS_N_INSNS (40), /* DDBR */
182 COSTS_N_INSNS (26), /* DEBR */
183 COSTS_N_INSNS (176), /* DLGR */
184 COSTS_N_INSNS (31), /* DLR */
185 COSTS_N_INSNS (31), /* DR */
186 COSTS_N_INSNS (31), /* DSGFR */
187 COSTS_N_INSNS (31), /* DSGR */
188 };
189
190 static const
191 struct processor_costs z9_109_cost =
192 {
193 COSTS_N_INSNS (4), /* M */
194 COSTS_N_INSNS (2), /* MGHI */
195 COSTS_N_INSNS (2), /* MH */
196 COSTS_N_INSNS (2), /* MHI */
197 COSTS_N_INSNS (4), /* ML */
198 COSTS_N_INSNS (4), /* MR */
199 COSTS_N_INSNS (5), /* MS */
200 COSTS_N_INSNS (6), /* MSG */
201 COSTS_N_INSNS (4), /* MSGF */
202 COSTS_N_INSNS (4), /* MSGFR */
203 COSTS_N_INSNS (4), /* MSGR */
204 COSTS_N_INSNS (4), /* MSR */
205 COSTS_N_INSNS (1), /* multiplication in DFmode */
206 COSTS_N_INSNS (28), /* MXBR */
207 COSTS_N_INSNS (130), /* SQXBR */
208 COSTS_N_INSNS (66), /* SQDBR */
209 COSTS_N_INSNS (38), /* SQEBR */
210 COSTS_N_INSNS (1), /* MADBR */
211 COSTS_N_INSNS (1), /* MAEBR */
212 COSTS_N_INSNS (60), /* DXBR */
213 COSTS_N_INSNS (40), /* DDBR */
214 COSTS_N_INSNS (26), /* DEBR */
215 COSTS_N_INSNS (30), /* DLGR */
216 COSTS_N_INSNS (23), /* DLR */
217 COSTS_N_INSNS (23), /* DR */
218 COSTS_N_INSNS (24), /* DSGFR */
219 COSTS_N_INSNS (24), /* DSGR */
220 };
221
222 static const
223 struct processor_costs z10_cost =
224 {
225 COSTS_N_INSNS (10), /* M */
226 COSTS_N_INSNS (10), /* MGHI */
227 COSTS_N_INSNS (10), /* MH */
228 COSTS_N_INSNS (10), /* MHI */
229 COSTS_N_INSNS (10), /* ML */
230 COSTS_N_INSNS (10), /* MR */
231 COSTS_N_INSNS (10), /* MS */
232 COSTS_N_INSNS (10), /* MSG */
233 COSTS_N_INSNS (10), /* MSGF */
234 COSTS_N_INSNS (10), /* MSGFR */
235 COSTS_N_INSNS (10), /* MSGR */
236 COSTS_N_INSNS (10), /* MSR */
237 COSTS_N_INSNS (1) , /* multiplication in DFmode */
238 COSTS_N_INSNS (50), /* MXBR */
239 COSTS_N_INSNS (120), /* SQXBR */
240 COSTS_N_INSNS (52), /* SQDBR */
241 COSTS_N_INSNS (38), /* SQEBR */
242 COSTS_N_INSNS (1), /* MADBR */
243 COSTS_N_INSNS (1), /* MAEBR */
244 COSTS_N_INSNS (111), /* DXBR */
245 COSTS_N_INSNS (39), /* DDBR */
246 COSTS_N_INSNS (32), /* DEBR */
247 COSTS_N_INSNS (160), /* DLGR */
248 COSTS_N_INSNS (71), /* DLR */
249 COSTS_N_INSNS (71), /* DR */
250 COSTS_N_INSNS (71), /* DSGFR */
251 COSTS_N_INSNS (71), /* DSGR */
252 };
253
254 static const
255 struct processor_costs z196_cost =
256 {
257 COSTS_N_INSNS (7), /* M */
258 COSTS_N_INSNS (5), /* MGHI */
259 COSTS_N_INSNS (5), /* MH */
260 COSTS_N_INSNS (5), /* MHI */
261 COSTS_N_INSNS (7), /* ML */
262 COSTS_N_INSNS (7), /* MR */
263 COSTS_N_INSNS (6), /* MS */
264 COSTS_N_INSNS (8), /* MSG */
265 COSTS_N_INSNS (6), /* MSGF */
266 COSTS_N_INSNS (6), /* MSGFR */
267 COSTS_N_INSNS (8), /* MSGR */
268 COSTS_N_INSNS (6), /* MSR */
269 COSTS_N_INSNS (1) , /* multiplication in DFmode */
270 COSTS_N_INSNS (40), /* MXBR B+40 */
271 COSTS_N_INSNS (100), /* SQXBR B+100 */
272 COSTS_N_INSNS (42), /* SQDBR B+42 */
273 COSTS_N_INSNS (28), /* SQEBR B+28 */
274 COSTS_N_INSNS (1), /* MADBR B */
275 COSTS_N_INSNS (1), /* MAEBR B */
276 COSTS_N_INSNS (101), /* DXBR B+101 */
277 COSTS_N_INSNS (29), /* DDBR */
278 COSTS_N_INSNS (22), /* DEBR */
279 COSTS_N_INSNS (160), /* DLGR cracked */
280 COSTS_N_INSNS (160), /* DLR cracked */
281 COSTS_N_INSNS (160), /* DR expanded */
282 COSTS_N_INSNS (160), /* DSGFR cracked */
283 COSTS_N_INSNS (160), /* DSGR cracked */
284 };
285
286 static const
287 struct processor_costs zEC12_cost =
288 {
289 COSTS_N_INSNS (7), /* M */
290 COSTS_N_INSNS (5), /* MGHI */
291 COSTS_N_INSNS (5), /* MH */
292 COSTS_N_INSNS (5), /* MHI */
293 COSTS_N_INSNS (7), /* ML */
294 COSTS_N_INSNS (7), /* MR */
295 COSTS_N_INSNS (6), /* MS */
296 COSTS_N_INSNS (8), /* MSG */
297 COSTS_N_INSNS (6), /* MSGF */
298 COSTS_N_INSNS (6), /* MSGFR */
299 COSTS_N_INSNS (8), /* MSGR */
300 COSTS_N_INSNS (6), /* MSR */
301 COSTS_N_INSNS (1) , /* multiplication in DFmode */
302 COSTS_N_INSNS (40), /* MXBR B+40 */
303 COSTS_N_INSNS (100), /* SQXBR B+100 */
304 COSTS_N_INSNS (42), /* SQDBR B+42 */
305 COSTS_N_INSNS (28), /* SQEBR B+28 */
306 COSTS_N_INSNS (1), /* MADBR B */
307 COSTS_N_INSNS (1), /* MAEBR B */
308 COSTS_N_INSNS (131), /* DXBR B+131 */
309 COSTS_N_INSNS (29), /* DDBR */
310 COSTS_N_INSNS (22), /* DEBR */
311 COSTS_N_INSNS (160), /* DLGR cracked */
312 COSTS_N_INSNS (160), /* DLR cracked */
313 COSTS_N_INSNS (160), /* DR expanded */
314 COSTS_N_INSNS (160), /* DSGFR cracked */
315 COSTS_N_INSNS (160), /* DSGR cracked */
316 };
317
318 static struct
319 {
320 const char *const name;
321 const enum processor_type processor;
322 const struct processor_costs *cost;
323 }
324 const processor_table[] =
325 {
326 { "g5", PROCESSOR_9672_G5, &z900_cost },
327 { "g6", PROCESSOR_9672_G6, &z900_cost },
328 { "z900", PROCESSOR_2064_Z900, &z900_cost },
329 { "z990", PROCESSOR_2084_Z990, &z990_cost },
330 { "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
331 { "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
332 { "z10", PROCESSOR_2097_Z10, &z10_cost },
333 { "z196", PROCESSOR_2817_Z196, &z196_cost },
334 { "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
335 { "z13", PROCESSOR_2964_Z13, &zEC12_cost },
336 { "native", PROCESSOR_NATIVE, NULL }
337 };
338
339 extern int reload_completed;
340
341 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
342 static rtx_insn *last_scheduled_insn;
343
344 /* Structure used to hold the components of a S/390 memory
345 address. A legitimate address on S/390 is of the general
346 form
347 base + index + displacement
348 where any of the components is optional.
349
350 base and index are registers of the class ADDR_REGS,
351 displacement is an unsigned 12-bit immediate constant. */
352
353 struct s390_address
354 {
355 rtx base;
356 rtx indx;
357 rtx disp;
358 bool pointer;
359 bool literal_pool;
360 };
361
362 /* The following structure is embedded in the machine
363 specific part of struct function. */
364
365 struct GTY (()) s390_frame_layout
366 {
367 /* Offset within stack frame. */
368 HOST_WIDE_INT gprs_offset;
369 HOST_WIDE_INT f0_offset;
370 HOST_WIDE_INT f4_offset;
371 HOST_WIDE_INT f8_offset;
372 HOST_WIDE_INT backchain_offset;
373
374 /* Number of first and last gpr where slots in the register
375 save area are reserved for. */
376 int first_save_gpr_slot;
377 int last_save_gpr_slot;
378
379 /* Location (FP register number) where GPRs (r0-r15) should
380 be saved to.
381 0 - does not need to be saved at all
382 -1 - stack slot */
383 signed char gpr_save_slots[16];
384
385 /* Number of first and last gpr to be saved, restored. */
386 int first_save_gpr;
387 int first_restore_gpr;
388 int last_save_gpr;
389 int last_restore_gpr;
390
391 /* Bits standing for floating point registers. Set, if the
392 respective register has to be saved. Starting with reg 16 (f0)
393 at the rightmost bit.
394 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
395 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
396 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
397 unsigned int fpr_bitmap;
398
399 /* Number of floating point registers f8-f15 which must be saved. */
400 int high_fprs;
401
402 /* Set if return address needs to be saved.
403 This flag is set by s390_return_addr_rtx if it could not use
404 the initial value of r14 and therefore depends on r14 saved
405 to the stack. */
406 bool save_return_addr_p;
407
408 /* Size of stack frame. */
409 HOST_WIDE_INT frame_size;
410 };
411
412 /* Define the structure for the machine field in struct function. */
413
414 struct GTY(()) machine_function
415 {
416 struct s390_frame_layout frame_layout;
417
418 /* Literal pool base register. */
419 rtx base_reg;
420
421 /* True if we may need to perform branch splitting. */
422 bool split_branches_pending_p;
423
424 bool has_landing_pad_p;
425
426 /* True if the current function may contain a tbegin clobbering
427 FPRs. */
428 bool tbegin_p;
429 };
430
431 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
432
433 #define cfun_frame_layout (cfun->machine->frame_layout)
434 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
435 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
436 ? cfun_frame_layout.fpr_bitmap & 0x0f \
437 : cfun_frame_layout.fpr_bitmap & 0x03))
438 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
439 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
440 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
441 (1 << (REGNO - FPR0_REGNUM)))
442 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
443 (1 << (REGNO - FPR0_REGNUM))))
444 #define cfun_gpr_save_slot(REGNO) \
445 cfun->machine->frame_layout.gpr_save_slots[REGNO]
446
447 /* Number of GPRs and FPRs used for argument passing. */
448 #define GP_ARG_NUM_REG 5
449 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
450 #define VEC_ARG_NUM_REG 8
451
452 /* A couple of shortcuts. */
453 #define CONST_OK_FOR_J(x) \
454 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
455 #define CONST_OK_FOR_K(x) \
456 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
457 #define CONST_OK_FOR_Os(x) \
458 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
459 #define CONST_OK_FOR_Op(x) \
460 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
461 #define CONST_OK_FOR_On(x) \
462 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
463
464 #define REGNO_PAIR_OK(REGNO, MODE) \
465 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
466
467 /* That's the read ahead of the dynamic branch prediction unit in
468 bytes on a z10 (or higher) CPU. */
469 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
470
471
472 /* Indicate which ABI has been used for passing vector args.
473 0 - no vector type arguments have been passed where the ABI is relevant
474 1 - the old ABI has been used
475 2 - a vector type argument has been passed either in a vector register
476 or on the stack by value */
477 static int s390_vector_abi = 0;
478
479 /* Set the vector ABI marker if TYPE is subject to the vector ABI
480 switch. The vector ABI affects only vector data types. There are
481 two aspects of the vector ABI relevant here:
482
483 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
484 ABI and natural alignment with the old.
485
486 2. vector <= 16 bytes are passed in VRs or by value on the stack
487 with the new ABI but by reference on the stack with the old.
488
489 If ARG_P is true TYPE is used for a function argument or return
490 value. The ABI marker then is set for all vector data types. If
491 ARG_P is false only type 1 vectors are being checked. */
492
493 static void
494 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
495 {
496 static hash_set<const_tree> visited_types_hash;
497
498 if (s390_vector_abi)
499 return;
500
501 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
502 return;
503
504 if (visited_types_hash.contains (type))
505 return;
506
507 visited_types_hash.add (type);
508
509 if (VECTOR_TYPE_P (type))
510 {
511 int type_size = int_size_in_bytes (type);
512
513 /* Outside arguments only the alignment is changing and this
514 only happens for vector types >= 16 bytes. */
515 if (!arg_p && type_size < 16)
516 return;
517
518 /* In arguments vector types > 16 are passed as before (GCC
519 never enforced the bigger alignment for arguments which was
520 required by the old vector ABI). However, it might still be
521 ABI relevant due to the changed alignment if it is a struct
522 member. */
523 if (arg_p && type_size > 16 && !in_struct_p)
524 return;
525
526 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
527 }
528 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
529 {
530 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
531 natural alignment there will never be ABI dependent padding
532 in an array type. That's why we do not set in_struct_p to
533 true here. */
534 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
535 }
536 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
537 {
538 tree arg_chain;
539
540 /* Check the return type. */
541 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
542
543 for (arg_chain = TYPE_ARG_TYPES (type);
544 arg_chain;
545 arg_chain = TREE_CHAIN (arg_chain))
546 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
547 }
548 else if (RECORD_OR_UNION_TYPE_P (type))
549 {
550 tree field;
551
552 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
553 {
554 if (TREE_CODE (field) != FIELD_DECL)
555 continue;
556
557 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
558 }
559 }
560 }
561
562
563 /* System z builtins. */
564
565 #include "s390-builtins.h"
566
567 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
568 {
569 #undef B_DEF
570 #undef OB_DEF
571 #undef OB_DEF_VAR
572 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
573 #define OB_DEF(...)
574 #define OB_DEF_VAR(...)
575 #include "s390-builtins.def"
576 0
577 };
578
579 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
580 {
581 #undef B_DEF
582 #undef OB_DEF
583 #undef OB_DEF_VAR
584 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
585 #define OB_DEF(...)
586 #define OB_DEF_VAR(...)
587 #include "s390-builtins.def"
588 0
589 };
590
591 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
592 {
593 #undef B_DEF
594 #undef OB_DEF
595 #undef OB_DEF_VAR
596 #define B_DEF(...)
597 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
598 #define OB_DEF_VAR(...)
599 #include "s390-builtins.def"
600 0
601 };
602
603 const unsigned int
604 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
605 {
606 #undef B_DEF
607 #undef OB_DEF
608 #undef OB_DEF_VAR
609 #define B_DEF(...)
610 #define OB_DEF(...)
611 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, FNTYPE) FLAGS,
612 #include "s390-builtins.def"
613 0
614 };
615
616 tree s390_builtin_types[BT_MAX];
617 tree s390_builtin_fn_types[BT_FN_MAX];
618 tree s390_builtin_decls[S390_BUILTIN_MAX +
619 S390_OVERLOADED_BUILTIN_MAX +
620 S390_OVERLOADED_BUILTIN_VAR_MAX];
621
622 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
623 #undef B_DEF
624 #undef OB_DEF
625 #undef OB_DEF_VAR
626 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
627 #define OB_DEF(...)
628 #define OB_DEF_VAR(...)
629
630 #include "s390-builtins.def"
631 CODE_FOR_nothing
632 };
633
634 static void
635 s390_init_builtins (void)
636 {
637 /* These definitions are being used in s390-builtins.def. */
638 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
639 NULL, NULL);
640 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
641 tree c_uint64_type_node;
642
643 /* The uint64_type_node from tree.c is not compatible to the C99
644 uint64_t data type. What we want is c_uint64_type_node from
645 c-common.c. But since backend code is not supposed to interface
646 with the frontend we recreate it here. */
647 if (TARGET_64BIT)
648 c_uint64_type_node = long_unsigned_type_node;
649 else
650 c_uint64_type_node = long_long_unsigned_type_node;
651
652 #undef DEF_TYPE
653 #define DEF_TYPE(INDEX, BFLAGS, NODE, CONST_P) \
654 if (s390_builtin_types[INDEX] == NULL) \
655 s390_builtin_types[INDEX] = (!CONST_P) ? \
656 (NODE) : build_type_variant ((NODE), 1, 0);
657
658 #undef DEF_POINTER_TYPE
659 #define DEF_POINTER_TYPE(INDEX, BFLAGS, INDEX_BASE) \
660 if (s390_builtin_types[INDEX] == NULL) \
661 s390_builtin_types[INDEX] = \
662 build_pointer_type (s390_builtin_types[INDEX_BASE]);
663
664 #undef DEF_DISTINCT_TYPE
665 #define DEF_DISTINCT_TYPE(INDEX, BFLAGS, INDEX_BASE) \
666 if (s390_builtin_types[INDEX] == NULL) \
667 s390_builtin_types[INDEX] = \
668 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
669
670 #undef DEF_VECTOR_TYPE
671 #define DEF_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
672 if (s390_builtin_types[INDEX] == NULL) \
673 s390_builtin_types[INDEX] = \
674 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
675
676 #undef DEF_OPAQUE_VECTOR_TYPE
677 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
678 if (s390_builtin_types[INDEX] == NULL) \
679 s390_builtin_types[INDEX] = \
680 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
681
682 #undef DEF_FN_TYPE
683 #define DEF_FN_TYPE(INDEX, BFLAGS, args...) \
684 if (s390_builtin_fn_types[INDEX] == NULL) \
685 s390_builtin_fn_types[INDEX] = \
686 build_function_type_list (args, NULL_TREE);
687 #undef DEF_OV_TYPE
688 #define DEF_OV_TYPE(...)
689 #include "s390-builtin-types.def"
690
691 #undef B_DEF
692 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
693 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
694 s390_builtin_decls[S390_BUILTIN_##NAME] = \
695 add_builtin_function ("__builtin_" #NAME, \
696 s390_builtin_fn_types[FNTYPE], \
697 S390_BUILTIN_##NAME, \
698 BUILT_IN_MD, \
699 NULL, \
700 ATTRS);
701 #undef OB_DEF
702 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
703 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
704 == NULL) \
705 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
706 add_builtin_function ("__builtin_" #NAME, \
707 s390_builtin_fn_types[FNTYPE], \
708 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
709 BUILT_IN_MD, \
710 NULL, \
711 0);
712 #undef OB_DEF_VAR
713 #define OB_DEF_VAR(...)
714 #include "s390-builtins.def"
715
716 }
717
718 /* Return true if ARG is appropriate as argument number ARGNUM of
719 builtin DECL. The operand flags from s390-builtins.def have to
720 passed as OP_FLAGS. */
721 bool
722 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
723 {
724 if (O_UIMM_P (op_flags))
725 {
726 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
727 int bitwidth = bitwidths[op_flags - O_U1];
728
729 if (!tree_fits_uhwi_p (arg)
730 || tree_to_uhwi (arg) > ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1)
731 {
732 error("constant argument %d for builtin %qF is out of range (0.."
733 HOST_WIDE_INT_PRINT_UNSIGNED ")",
734 argnum, decl,
735 ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1);
736 return false;
737 }
738 }
739
740 if (O_SIMM_P (op_flags))
741 {
742 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
743 int bitwidth = bitwidths[op_flags - O_S2];
744
745 if (!tree_fits_shwi_p (arg)
746 || tree_to_shwi (arg) < -((HOST_WIDE_INT)1 << (bitwidth - 1))
747 || tree_to_shwi (arg) > (((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1))
748 {
749 error("constant argument %d for builtin %qF is out of range ("
750 HOST_WIDE_INT_PRINT_DEC ".."
751 HOST_WIDE_INT_PRINT_DEC ")",
752 argnum, decl,
753 -((HOST_WIDE_INT)1 << (bitwidth - 1)),
754 ((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1);
755 return false;
756 }
757 }
758 return true;
759 }
760
761 /* Expand an expression EXP that calls a built-in function,
762 with result going to TARGET if that's convenient
763 (and in mode MODE if that's convenient).
764 SUBTARGET may be used as the target for computing one of EXP's operands.
765 IGNORE is nonzero if the value is to be ignored. */
766
767 static rtx
768 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
769 machine_mode mode ATTRIBUTE_UNUSED,
770 int ignore ATTRIBUTE_UNUSED)
771 {
772 #define MAX_ARGS 5
773
774 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
775 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
776 enum insn_code icode;
777 rtx op[MAX_ARGS], pat;
778 int arity;
779 bool nonvoid;
780 tree arg;
781 call_expr_arg_iterator iter;
782 unsigned int all_op_flags = opflags_for_builtin (fcode);
783 machine_mode last_vec_mode = VOIDmode;
784
785 if (TARGET_DEBUG_ARG)
786 {
787 fprintf (stderr,
788 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
789 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
790 bflags_for_builtin (fcode));
791 }
792
793 if (S390_USE_TARGET_ATTRIBUTE)
794 {
795 unsigned int bflags;
796
797 bflags = bflags_for_builtin (fcode);
798 if ((bflags & B_HTM) && !TARGET_HTM)
799 {
800 error ("Builtin %qF is not supported without -mhtm "
801 "(default with -march=zEC12 and higher).", fndecl);
802 return const0_rtx;
803 }
804 if ((bflags & B_VX) && !TARGET_VX)
805 {
806 error ("Builtin %qF is not supported without -mvx "
807 "(default with -march=z13 and higher).", fndecl);
808 return const0_rtx;
809 }
810 }
811 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
812 && fcode < S390_ALL_BUILTIN_MAX)
813 {
814 gcc_unreachable ();
815 }
816 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
817 {
818 icode = code_for_builtin[fcode];
819 /* Set a flag in the machine specific cfun part in order to support
820 saving/restoring of FPRs. */
821 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
822 cfun->machine->tbegin_p = true;
823 }
824 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
825 {
826 error ("Unresolved overloaded builtin");
827 return const0_rtx;
828 }
829 else
830 internal_error ("bad builtin fcode");
831
832 if (icode == 0)
833 internal_error ("bad builtin icode");
834
835 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
836
837 if (nonvoid)
838 {
839 machine_mode tmode = insn_data[icode].operand[0].mode;
840 if (!target
841 || GET_MODE (target) != tmode
842 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
843 target = gen_reg_rtx (tmode);
844
845 /* There are builtins (e.g. vec_promote) with no vector
846 arguments but an element selector. So we have to also look
847 at the vector return type when emitting the modulo
848 operation. */
849 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
850 last_vec_mode = insn_data[icode].operand[0].mode;
851 }
852
853 arity = 0;
854 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
855 {
856 const struct insn_operand_data *insn_op;
857 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
858
859 all_op_flags = all_op_flags >> O_SHIFT;
860
861 if (arg == error_mark_node)
862 return NULL_RTX;
863 if (arity >= MAX_ARGS)
864 return NULL_RTX;
865
866 if (O_IMM_P (op_flags)
867 && TREE_CODE (arg) != INTEGER_CST)
868 {
869 error ("constant value required for builtin %qF argument %d",
870 fndecl, arity + 1);
871 return const0_rtx;
872 }
873
874 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
875 return const0_rtx;
876
877 insn_op = &insn_data[icode].operand[arity + nonvoid];
878 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
879
880 /* expand_expr truncates constants to the target mode only if it
881 is "convenient". However, our checks below rely on this
882 being done. */
883 if (CONST_INT_P (op[arity])
884 && SCALAR_INT_MODE_P (insn_op->mode)
885 && GET_MODE (op[arity]) != insn_op->mode)
886 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
887 insn_op->mode));
888
889 /* Wrap the expanded RTX for pointer types into a MEM expr with
890 the proper mode. This allows us to use e.g. (match_operand
891 "memory_operand"..) in the insn patterns instead of (mem
892 (match_operand "address_operand)). This is helpful for
893 patterns not just accepting MEMs. */
894 if (POINTER_TYPE_P (TREE_TYPE (arg))
895 && insn_op->predicate != address_operand)
896 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
897
898 /* Expand the module operation required on element selectors. */
899 if (op_flags == O_ELEM)
900 {
901 gcc_assert (last_vec_mode != VOIDmode);
902 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
903 op[arity],
904 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
905 NULL_RTX, 1, OPTAB_DIRECT);
906 }
907
908 /* Record the vector mode used for an element selector. This assumes:
909 1. There is no builtin with two different vector modes and an element selector
910 2. The element selector comes after the vector type it is referring to.
911 This currently the true for all the builtins but FIXME we
912 should better check for that. */
913 if (VECTOR_MODE_P (insn_op->mode))
914 last_vec_mode = insn_op->mode;
915
916 if (insn_op->predicate (op[arity], insn_op->mode))
917 {
918 arity++;
919 continue;
920 }
921
922 if (MEM_P (op[arity])
923 && insn_op->predicate == memory_operand
924 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
925 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
926 {
927 op[arity] = replace_equiv_address (op[arity],
928 copy_to_mode_reg (Pmode,
929 XEXP (op[arity], 0)));
930 }
931 else if (GET_MODE (op[arity]) == insn_op->mode
932 || GET_MODE (op[arity]) == VOIDmode
933 || (insn_op->predicate == address_operand
934 && GET_MODE (op[arity]) == Pmode))
935 {
936 /* An address_operand usually has VOIDmode in the expander
937 so we cannot use this. */
938 machine_mode target_mode =
939 (insn_op->predicate == address_operand
940 ? Pmode : insn_op->mode);
941 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
942 }
943
944 if (!insn_op->predicate (op[arity], insn_op->mode))
945 {
946 error ("Invalid argument %d for builtin %qF", arity + 1, fndecl);
947 return const0_rtx;
948 }
949 arity++;
950 }
951
952 switch (arity)
953 {
954 case 0:
955 pat = GEN_FCN (icode) (target);
956 break;
957 case 1:
958 if (nonvoid)
959 pat = GEN_FCN (icode) (target, op[0]);
960 else
961 pat = GEN_FCN (icode) (op[0]);
962 break;
963 case 2:
964 if (nonvoid)
965 pat = GEN_FCN (icode) (target, op[0], op[1]);
966 else
967 pat = GEN_FCN (icode) (op[0], op[1]);
968 break;
969 case 3:
970 if (nonvoid)
971 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
972 else
973 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
974 break;
975 case 4:
976 if (nonvoid)
977 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
978 else
979 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
980 break;
981 case 5:
982 if (nonvoid)
983 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
984 else
985 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
986 break;
987 case 6:
988 if (nonvoid)
989 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
990 else
991 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
992 break;
993 default:
994 gcc_unreachable ();
995 }
996 if (!pat)
997 return NULL_RTX;
998 emit_insn (pat);
999
1000 if (nonvoid)
1001 return target;
1002 else
1003 return const0_rtx;
1004 }
1005
1006
1007 static const int s390_hotpatch_hw_max = 1000000;
1008 static int s390_hotpatch_hw_before_label = 0;
1009 static int s390_hotpatch_hw_after_label = 0;
1010
1011 /* Check whether the hotpatch attribute is applied to a function and, if it has
1012 an argument, the argument is valid. */
1013
1014 static tree
1015 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1016 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1017 {
1018 tree expr;
1019 tree expr2;
1020 int err;
1021
1022 if (TREE_CODE (*node) != FUNCTION_DECL)
1023 {
1024 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1025 name);
1026 *no_add_attrs = true;
1027 }
1028 if (args != NULL && TREE_CHAIN (args) != NULL)
1029 {
1030 expr = TREE_VALUE (args);
1031 expr2 = TREE_VALUE (TREE_CHAIN (args));
1032 }
1033 if (args == NULL || TREE_CHAIN (args) == NULL)
1034 err = 1;
1035 else if (TREE_CODE (expr) != INTEGER_CST
1036 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1037 || wi::gtu_p (expr, s390_hotpatch_hw_max))
1038 err = 1;
1039 else if (TREE_CODE (expr2) != INTEGER_CST
1040 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1041 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1042 err = 1;
1043 else
1044 err = 0;
1045 if (err)
1046 {
1047 error ("requested %qE attribute is not a comma separated pair of"
1048 " non-negative integer constants or too large (max. %d)", name,
1049 s390_hotpatch_hw_max);
1050 *no_add_attrs = true;
1051 }
1052
1053 return NULL_TREE;
1054 }
1055
1056 /* Expand the s390_vector_bool type attribute. */
1057
1058 static tree
1059 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1060 tree args ATTRIBUTE_UNUSED,
1061 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1062 {
1063 tree type = *node, result = NULL_TREE;
1064 machine_mode mode;
1065
1066 while (POINTER_TYPE_P (type)
1067 || TREE_CODE (type) == FUNCTION_TYPE
1068 || TREE_CODE (type) == METHOD_TYPE
1069 || TREE_CODE (type) == ARRAY_TYPE)
1070 type = TREE_TYPE (type);
1071
1072 mode = TYPE_MODE (type);
1073 switch (mode)
1074 {
1075 case DImode: case V2DImode: result = s390_builtin_types[BT_BV2DI]; break;
1076 case SImode: case V4SImode: result = s390_builtin_types[BT_BV4SI]; break;
1077 case HImode: case V8HImode: result = s390_builtin_types[BT_BV8HI]; break;
1078 case QImode: case V16QImode: result = s390_builtin_types[BT_BV16QI];
1079 default: break;
1080 }
1081
1082 *no_add_attrs = true; /* No need to hang on to the attribute. */
1083
1084 if (result)
1085 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1086
1087 return NULL_TREE;
1088 }
1089
1090 static const struct attribute_spec s390_attribute_table[] = {
1091 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1092 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1093 /* End element. */
1094 { NULL, 0, 0, false, false, false, NULL, false }
1095 };
1096
1097 /* Return the alignment for LABEL. We default to the -falign-labels
1098 value except for the literal pool base label. */
1099 int
1100 s390_label_align (rtx label)
1101 {
1102 rtx_insn *prev_insn = prev_active_insn (label);
1103 rtx set, src;
1104
1105 if (prev_insn == NULL_RTX)
1106 goto old;
1107
1108 set = single_set (prev_insn);
1109
1110 if (set == NULL_RTX)
1111 goto old;
1112
1113 src = SET_SRC (set);
1114
1115 /* Don't align literal pool base labels. */
1116 if (GET_CODE (src) == UNSPEC
1117 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1118 return 0;
1119
1120 old:
1121 return align_labels_log;
1122 }
1123
1124 static machine_mode
1125 s390_libgcc_cmp_return_mode (void)
1126 {
1127 return TARGET_64BIT ? DImode : SImode;
1128 }
1129
1130 static machine_mode
1131 s390_libgcc_shift_count_mode (void)
1132 {
1133 return TARGET_64BIT ? DImode : SImode;
1134 }
1135
1136 static machine_mode
1137 s390_unwind_word_mode (void)
1138 {
1139 return TARGET_64BIT ? DImode : SImode;
1140 }
1141
1142 /* Return true if the back end supports mode MODE. */
1143 static bool
1144 s390_scalar_mode_supported_p (machine_mode mode)
1145 {
1146 /* In contrast to the default implementation reject TImode constants on 31bit
1147 TARGET_ZARCH for ABI compliance. */
1148 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1149 return false;
1150
1151 if (DECIMAL_FLOAT_MODE_P (mode))
1152 return default_decimal_float_supported_p ();
1153
1154 return default_scalar_mode_supported_p (mode);
1155 }
1156
1157 /* Return true if the back end supports vector mode MODE. */
1158 static bool
1159 s390_vector_mode_supported_p (machine_mode mode)
1160 {
1161 machine_mode inner;
1162
1163 if (!VECTOR_MODE_P (mode)
1164 || !TARGET_VX
1165 || GET_MODE_SIZE (mode) > 16)
1166 return false;
1167
1168 inner = GET_MODE_INNER (mode);
1169
1170 switch (inner)
1171 {
1172 case QImode:
1173 case HImode:
1174 case SImode:
1175 case DImode:
1176 case TImode:
1177 case SFmode:
1178 case DFmode:
1179 case TFmode:
1180 return true;
1181 default:
1182 return false;
1183 }
1184 }
1185
1186 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1187
1188 void
1189 s390_set_has_landing_pad_p (bool value)
1190 {
1191 cfun->machine->has_landing_pad_p = value;
1192 }
1193
1194 /* If two condition code modes are compatible, return a condition code
1195 mode which is compatible with both. Otherwise, return
1196 VOIDmode. */
1197
1198 static machine_mode
1199 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1200 {
1201 if (m1 == m2)
1202 return m1;
1203
1204 switch (m1)
1205 {
1206 case CCZmode:
1207 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1208 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1209 return m2;
1210 return VOIDmode;
1211
1212 case CCSmode:
1213 case CCUmode:
1214 case CCTmode:
1215 case CCSRmode:
1216 case CCURmode:
1217 case CCZ1mode:
1218 if (m2 == CCZmode)
1219 return m1;
1220
1221 return VOIDmode;
1222
1223 default:
1224 return VOIDmode;
1225 }
1226 return VOIDmode;
1227 }
1228
1229 /* Return true if SET either doesn't set the CC register, or else
1230 the source and destination have matching CC modes and that
1231 CC mode is at least as constrained as REQ_MODE. */
1232
1233 static bool
1234 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1235 {
1236 machine_mode set_mode;
1237
1238 gcc_assert (GET_CODE (set) == SET);
1239
1240 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1241 return 1;
1242
1243 set_mode = GET_MODE (SET_DEST (set));
1244 switch (set_mode)
1245 {
1246 case CCSmode:
1247 case CCSRmode:
1248 case CCUmode:
1249 case CCURmode:
1250 case CCLmode:
1251 case CCL1mode:
1252 case CCL2mode:
1253 case CCL3mode:
1254 case CCT1mode:
1255 case CCT2mode:
1256 case CCT3mode:
1257 case CCVEQmode:
1258 case CCVHmode:
1259 case CCVHUmode:
1260 case CCVFHmode:
1261 case CCVFHEmode:
1262 if (req_mode != set_mode)
1263 return 0;
1264 break;
1265
1266 case CCZmode:
1267 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1268 && req_mode != CCSRmode && req_mode != CCURmode)
1269 return 0;
1270 break;
1271
1272 case CCAPmode:
1273 case CCANmode:
1274 if (req_mode != CCAmode)
1275 return 0;
1276 break;
1277
1278 default:
1279 gcc_unreachable ();
1280 }
1281
1282 return (GET_MODE (SET_SRC (set)) == set_mode);
1283 }
1284
1285 /* Return true if every SET in INSN that sets the CC register
1286 has source and destination with matching CC modes and that
1287 CC mode is at least as constrained as REQ_MODE.
1288 If REQ_MODE is VOIDmode, always return false. */
1289
1290 bool
1291 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1292 {
1293 int i;
1294
1295 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1296 if (req_mode == VOIDmode)
1297 return false;
1298
1299 if (GET_CODE (PATTERN (insn)) == SET)
1300 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1301
1302 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1303 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1304 {
1305 rtx set = XVECEXP (PATTERN (insn), 0, i);
1306 if (GET_CODE (set) == SET)
1307 if (!s390_match_ccmode_set (set, req_mode))
1308 return false;
1309 }
1310
1311 return true;
1312 }
1313
1314 /* If a test-under-mask instruction can be used to implement
1315 (compare (and ... OP1) OP2), return the CC mode required
1316 to do that. Otherwise, return VOIDmode.
1317 MIXED is true if the instruction can distinguish between
1318 CC1 and CC2 for mixed selected bits (TMxx), it is false
1319 if the instruction cannot (TM). */
1320
1321 machine_mode
1322 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1323 {
1324 int bit0, bit1;
1325
1326 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1327 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1328 return VOIDmode;
1329
1330 /* Selected bits all zero: CC0.
1331 e.g.: int a; if ((a & (16 + 128)) == 0) */
1332 if (INTVAL (op2) == 0)
1333 return CCTmode;
1334
1335 /* Selected bits all one: CC3.
1336 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1337 if (INTVAL (op2) == INTVAL (op1))
1338 return CCT3mode;
1339
1340 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1341 int a;
1342 if ((a & (16 + 128)) == 16) -> CCT1
1343 if ((a & (16 + 128)) == 128) -> CCT2 */
1344 if (mixed)
1345 {
1346 bit1 = exact_log2 (INTVAL (op2));
1347 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1348 if (bit0 != -1 && bit1 != -1)
1349 return bit0 > bit1 ? CCT1mode : CCT2mode;
1350 }
1351
1352 return VOIDmode;
1353 }
1354
1355 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1356 OP0 and OP1 of a COMPARE, return the mode to be used for the
1357 comparison. */
1358
1359 machine_mode
1360 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1361 {
1362 if (TARGET_VX
1363 && register_operand (op0, DFmode)
1364 && register_operand (op1, DFmode))
1365 {
1366 /* LT, LE, UNGT, UNGE require swapping OP0 and OP1. Either
1367 s390_emit_compare or s390_canonicalize_comparison will take
1368 care of it. */
1369 switch (code)
1370 {
1371 case EQ:
1372 case NE:
1373 return CCVEQmode;
1374 case GT:
1375 case UNLE:
1376 return CCVFHmode;
1377 case GE:
1378 case UNLT:
1379 return CCVFHEmode;
1380 default:
1381 ;
1382 }
1383 }
1384
1385 switch (code)
1386 {
1387 case EQ:
1388 case NE:
1389 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1390 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1391 return CCAPmode;
1392 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1393 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1394 return CCAPmode;
1395 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1396 || GET_CODE (op1) == NEG)
1397 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1398 return CCLmode;
1399
1400 if (GET_CODE (op0) == AND)
1401 {
1402 /* Check whether we can potentially do it via TM. */
1403 machine_mode ccmode;
1404 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1405 if (ccmode != VOIDmode)
1406 {
1407 /* Relax CCTmode to CCZmode to allow fall-back to AND
1408 if that turns out to be beneficial. */
1409 return ccmode == CCTmode ? CCZmode : ccmode;
1410 }
1411 }
1412
1413 if (register_operand (op0, HImode)
1414 && GET_CODE (op1) == CONST_INT
1415 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1416 return CCT3mode;
1417 if (register_operand (op0, QImode)
1418 && GET_CODE (op1) == CONST_INT
1419 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1420 return CCT3mode;
1421
1422 return CCZmode;
1423
1424 case LE:
1425 case LT:
1426 case GE:
1427 case GT:
1428 /* The only overflow condition of NEG and ABS happens when
1429 -INT_MAX is used as parameter, which stays negative. So
1430 we have an overflow from a positive value to a negative.
1431 Using CCAP mode the resulting cc can be used for comparisons. */
1432 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1433 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1434 return CCAPmode;
1435
1436 /* If constants are involved in an add instruction it is possible to use
1437 the resulting cc for comparisons with zero. Knowing the sign of the
1438 constant the overflow behavior gets predictable. e.g.:
1439 int a, b; if ((b = a + c) > 0)
1440 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1441 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1442 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1443 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1444 /* Avoid INT32_MIN on 32 bit. */
1445 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1446 {
1447 if (INTVAL (XEXP((op0), 1)) < 0)
1448 return CCANmode;
1449 else
1450 return CCAPmode;
1451 }
1452 /* Fall through. */
1453 case UNORDERED:
1454 case ORDERED:
1455 case UNEQ:
1456 case UNLE:
1457 case UNLT:
1458 case UNGE:
1459 case UNGT:
1460 case LTGT:
1461 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1462 && GET_CODE (op1) != CONST_INT)
1463 return CCSRmode;
1464 return CCSmode;
1465
1466 case LTU:
1467 case GEU:
1468 if (GET_CODE (op0) == PLUS
1469 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1470 return CCL1mode;
1471
1472 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1473 && GET_CODE (op1) != CONST_INT)
1474 return CCURmode;
1475 return CCUmode;
1476
1477 case LEU:
1478 case GTU:
1479 if (GET_CODE (op0) == MINUS
1480 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1481 return CCL2mode;
1482
1483 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1484 && GET_CODE (op1) != CONST_INT)
1485 return CCURmode;
1486 return CCUmode;
1487
1488 default:
1489 gcc_unreachable ();
1490 }
1491 }
1492
1493 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1494 that we can implement more efficiently. */
1495
1496 static void
1497 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1498 bool op0_preserve_value)
1499 {
1500 if (op0_preserve_value)
1501 return;
1502
1503 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1504 if ((*code == EQ || *code == NE)
1505 && *op1 == const0_rtx
1506 && GET_CODE (*op0) == ZERO_EXTRACT
1507 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1508 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1509 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1510 {
1511 rtx inner = XEXP (*op0, 0);
1512 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1513 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1514 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1515
1516 if (len > 0 && len < modesize
1517 && pos >= 0 && pos + len <= modesize
1518 && modesize <= HOST_BITS_PER_WIDE_INT)
1519 {
1520 unsigned HOST_WIDE_INT block;
1521 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
1522 block <<= modesize - pos - len;
1523
1524 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1525 gen_int_mode (block, GET_MODE (inner)));
1526 }
1527 }
1528
1529 /* Narrow AND of memory against immediate to enable TM. */
1530 if ((*code == EQ || *code == NE)
1531 && *op1 == const0_rtx
1532 && GET_CODE (*op0) == AND
1533 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1534 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1535 {
1536 rtx inner = XEXP (*op0, 0);
1537 rtx mask = XEXP (*op0, 1);
1538
1539 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1540 if (GET_CODE (inner) == SUBREG
1541 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1542 && (GET_MODE_SIZE (GET_MODE (inner))
1543 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1544 && ((INTVAL (mask)
1545 & GET_MODE_MASK (GET_MODE (inner))
1546 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1547 == 0))
1548 inner = SUBREG_REG (inner);
1549
1550 /* Do not change volatile MEMs. */
1551 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1552 {
1553 int part = s390_single_part (XEXP (*op0, 1),
1554 GET_MODE (inner), QImode, 0);
1555 if (part >= 0)
1556 {
1557 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1558 inner = adjust_address_nv (inner, QImode, part);
1559 *op0 = gen_rtx_AND (QImode, inner, mask);
1560 }
1561 }
1562 }
1563
1564 /* Narrow comparisons against 0xffff to HImode if possible. */
1565 if ((*code == EQ || *code == NE)
1566 && GET_CODE (*op1) == CONST_INT
1567 && INTVAL (*op1) == 0xffff
1568 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1569 && (nonzero_bits (*op0, GET_MODE (*op0))
1570 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
1571 {
1572 *op0 = gen_lowpart (HImode, *op0);
1573 *op1 = constm1_rtx;
1574 }
1575
1576 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1577 if (GET_CODE (*op0) == UNSPEC
1578 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1579 && XVECLEN (*op0, 0) == 1
1580 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1581 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1582 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1583 && *op1 == const0_rtx)
1584 {
1585 enum rtx_code new_code = UNKNOWN;
1586 switch (*code)
1587 {
1588 case EQ: new_code = EQ; break;
1589 case NE: new_code = NE; break;
1590 case LT: new_code = GTU; break;
1591 case GT: new_code = LTU; break;
1592 case LE: new_code = GEU; break;
1593 case GE: new_code = LEU; break;
1594 default: break;
1595 }
1596
1597 if (new_code != UNKNOWN)
1598 {
1599 *op0 = XVECEXP (*op0, 0, 0);
1600 *code = new_code;
1601 }
1602 }
1603
1604 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1605 if (GET_CODE (*op0) == UNSPEC
1606 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1607 && XVECLEN (*op0, 0) == 1
1608 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1609 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1610 && CONST_INT_P (*op1))
1611 {
1612 enum rtx_code new_code = UNKNOWN;
1613 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1614 {
1615 case CCZmode:
1616 case CCRAWmode:
1617 switch (*code)
1618 {
1619 case EQ: new_code = EQ; break;
1620 case NE: new_code = NE; break;
1621 default: break;
1622 }
1623 break;
1624 default: break;
1625 }
1626
1627 if (new_code != UNKNOWN)
1628 {
1629 /* For CCRAWmode put the required cc mask into the second
1630 operand. */
1631 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1632 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1633 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1634 *op0 = XVECEXP (*op0, 0, 0);
1635 *code = new_code;
1636 }
1637 }
1638
1639 /* Simplify cascaded EQ, NE with const0_rtx. */
1640 if ((*code == NE || *code == EQ)
1641 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1642 && GET_MODE (*op0) == SImode
1643 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1644 && REG_P (XEXP (*op0, 0))
1645 && XEXP (*op0, 1) == const0_rtx
1646 && *op1 == const0_rtx)
1647 {
1648 if ((*code == EQ && GET_CODE (*op0) == NE)
1649 || (*code == NE && GET_CODE (*op0) == EQ))
1650 *code = EQ;
1651 else
1652 *code = NE;
1653 *op0 = XEXP (*op0, 0);
1654 }
1655
1656 /* Prefer register over memory as first operand. */
1657 if (MEM_P (*op0) && REG_P (*op1))
1658 {
1659 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1660 *code = (int)swap_condition ((enum rtx_code)*code);
1661 }
1662
1663 /* Using the scalar variants of vector instructions for 64 bit FP
1664 comparisons might require swapping the operands. */
1665 if (TARGET_VX
1666 && register_operand (*op0, DFmode)
1667 && register_operand (*op1, DFmode)
1668 && (*code == LT || *code == LE || *code == UNGT || *code == UNGE))
1669 {
1670 rtx tmp;
1671
1672 switch (*code)
1673 {
1674 case LT: *code = GT; break;
1675 case LE: *code = GE; break;
1676 case UNGT: *code = UNLE; break;
1677 case UNGE: *code = UNLT; break;
1678 default: ;
1679 }
1680 tmp = *op0; *op0 = *op1; *op1 = tmp;
1681 }
1682 }
1683
1684 /* Helper function for s390_emit_compare. If possible emit a 64 bit
1685 FP compare using the single element variant of vector instructions.
1686 Replace CODE with the comparison code to be used in the CC reg
1687 compare and return the condition code register RTX in CC. */
1688
1689 static bool
1690 s390_expand_vec_compare_scalar (enum rtx_code *code, rtx cmp1, rtx cmp2,
1691 rtx *cc)
1692 {
1693 machine_mode cmp_mode;
1694 bool swap_p = false;
1695
1696 switch (*code)
1697 {
1698 case EQ: cmp_mode = CCVEQmode; break;
1699 case NE: cmp_mode = CCVEQmode; break;
1700 case GT: cmp_mode = CCVFHmode; break;
1701 case GE: cmp_mode = CCVFHEmode; break;
1702 case UNLE: cmp_mode = CCVFHmode; break;
1703 case UNLT: cmp_mode = CCVFHEmode; break;
1704 case LT: cmp_mode = CCVFHmode; *code = GT; swap_p = true; break;
1705 case LE: cmp_mode = CCVFHEmode; *code = GE; swap_p = true; break;
1706 case UNGE: cmp_mode = CCVFHmode; *code = UNLE; swap_p = true; break;
1707 case UNGT: cmp_mode = CCVFHEmode; *code = UNLT; swap_p = true; break;
1708 default: return false;
1709 }
1710
1711 if (swap_p)
1712 {
1713 rtx tmp = cmp2;
1714 cmp2 = cmp1;
1715 cmp1 = tmp;
1716 }
1717 *cc = gen_rtx_REG (cmp_mode, CC_REGNUM);
1718 emit_insn (gen_rtx_PARALLEL (VOIDmode,
1719 gen_rtvec (2,
1720 gen_rtx_SET (*cc,
1721 gen_rtx_COMPARE (cmp_mode, cmp1,
1722 cmp2)),
1723 gen_rtx_CLOBBER (VOIDmode,
1724 gen_rtx_SCRATCH (V2DImode)))));
1725 return true;
1726 }
1727
1728
1729 /* Emit a compare instruction suitable to implement the comparison
1730 OP0 CODE OP1. Return the correct condition RTL to be placed in
1731 the IF_THEN_ELSE of the conditional branch testing the result. */
1732
1733 rtx
1734 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1735 {
1736 machine_mode mode = s390_select_ccmode (code, op0, op1);
1737 rtx cc;
1738
1739 if (TARGET_VX
1740 && register_operand (op0, DFmode)
1741 && register_operand (op1, DFmode)
1742 && s390_expand_vec_compare_scalar (&code, op0, op1, &cc))
1743 {
1744 /* Work has been done by s390_expand_vec_compare_scalar already. */
1745 }
1746 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1747 {
1748 /* Do not output a redundant compare instruction if a
1749 compare_and_swap pattern already computed the result and the
1750 machine modes are compatible. */
1751 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1752 == GET_MODE (op0));
1753 cc = op0;
1754 }
1755 else
1756 {
1757 cc = gen_rtx_REG (mode, CC_REGNUM);
1758 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1759 }
1760
1761 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1762 }
1763
1764 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1765 matches CMP.
1766 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1767 conditional branch testing the result. */
1768
1769 static rtx
1770 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1771 rtx cmp, rtx new_rtx)
1772 {
1773 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1774 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1775 const0_rtx);
1776 }
1777
1778 /* Emit a jump instruction to TARGET and return it. If COND is
1779 NULL_RTX, emit an unconditional jump, else a conditional jump under
1780 condition COND. */
1781
1782 rtx_insn *
1783 s390_emit_jump (rtx target, rtx cond)
1784 {
1785 rtx insn;
1786
1787 target = gen_rtx_LABEL_REF (VOIDmode, target);
1788 if (cond)
1789 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1790
1791 insn = gen_rtx_SET (pc_rtx, target);
1792 return emit_jump_insn (insn);
1793 }
1794
1795 /* Return branch condition mask to implement a branch
1796 specified by CODE. Return -1 for invalid comparisons. */
1797
1798 int
1799 s390_branch_condition_mask (rtx code)
1800 {
1801 const int CC0 = 1 << 3;
1802 const int CC1 = 1 << 2;
1803 const int CC2 = 1 << 1;
1804 const int CC3 = 1 << 0;
1805
1806 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1807 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1808 gcc_assert (XEXP (code, 1) == const0_rtx
1809 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1810 && CONST_INT_P (XEXP (code, 1))));
1811
1812
1813 switch (GET_MODE (XEXP (code, 0)))
1814 {
1815 case CCZmode:
1816 case CCZ1mode:
1817 switch (GET_CODE (code))
1818 {
1819 case EQ: return CC0;
1820 case NE: return CC1 | CC2 | CC3;
1821 default: return -1;
1822 }
1823 break;
1824
1825 case CCT1mode:
1826 switch (GET_CODE (code))
1827 {
1828 case EQ: return CC1;
1829 case NE: return CC0 | CC2 | CC3;
1830 default: return -1;
1831 }
1832 break;
1833
1834 case CCT2mode:
1835 switch (GET_CODE (code))
1836 {
1837 case EQ: return CC2;
1838 case NE: return CC0 | CC1 | CC3;
1839 default: return -1;
1840 }
1841 break;
1842
1843 case CCT3mode:
1844 switch (GET_CODE (code))
1845 {
1846 case EQ: return CC3;
1847 case NE: return CC0 | CC1 | CC2;
1848 default: return -1;
1849 }
1850 break;
1851
1852 case CCLmode:
1853 switch (GET_CODE (code))
1854 {
1855 case EQ: return CC0 | CC2;
1856 case NE: return CC1 | CC3;
1857 default: return -1;
1858 }
1859 break;
1860
1861 case CCL1mode:
1862 switch (GET_CODE (code))
1863 {
1864 case LTU: return CC2 | CC3; /* carry */
1865 case GEU: return CC0 | CC1; /* no carry */
1866 default: return -1;
1867 }
1868 break;
1869
1870 case CCL2mode:
1871 switch (GET_CODE (code))
1872 {
1873 case GTU: return CC0 | CC1; /* borrow */
1874 case LEU: return CC2 | CC3; /* no borrow */
1875 default: return -1;
1876 }
1877 break;
1878
1879 case CCL3mode:
1880 switch (GET_CODE (code))
1881 {
1882 case EQ: return CC0 | CC2;
1883 case NE: return CC1 | CC3;
1884 case LTU: return CC1;
1885 case GTU: return CC3;
1886 case LEU: return CC1 | CC2;
1887 case GEU: return CC2 | CC3;
1888 default: return -1;
1889 }
1890
1891 case CCUmode:
1892 switch (GET_CODE (code))
1893 {
1894 case EQ: return CC0;
1895 case NE: return CC1 | CC2 | CC3;
1896 case LTU: return CC1;
1897 case GTU: return CC2;
1898 case LEU: return CC0 | CC1;
1899 case GEU: return CC0 | CC2;
1900 default: return -1;
1901 }
1902 break;
1903
1904 case CCURmode:
1905 switch (GET_CODE (code))
1906 {
1907 case EQ: return CC0;
1908 case NE: return CC2 | CC1 | CC3;
1909 case LTU: return CC2;
1910 case GTU: return CC1;
1911 case LEU: return CC0 | CC2;
1912 case GEU: return CC0 | CC1;
1913 default: return -1;
1914 }
1915 break;
1916
1917 case CCAPmode:
1918 switch (GET_CODE (code))
1919 {
1920 case EQ: return CC0;
1921 case NE: return CC1 | CC2 | CC3;
1922 case LT: return CC1 | CC3;
1923 case GT: return CC2;
1924 case LE: return CC0 | CC1 | CC3;
1925 case GE: return CC0 | CC2;
1926 default: return -1;
1927 }
1928 break;
1929
1930 case CCANmode:
1931 switch (GET_CODE (code))
1932 {
1933 case EQ: return CC0;
1934 case NE: return CC1 | CC2 | CC3;
1935 case LT: return CC1;
1936 case GT: return CC2 | CC3;
1937 case LE: return CC0 | CC1;
1938 case GE: return CC0 | CC2 | CC3;
1939 default: return -1;
1940 }
1941 break;
1942
1943 case CCSmode:
1944 switch (GET_CODE (code))
1945 {
1946 case EQ: return CC0;
1947 case NE: return CC1 | CC2 | CC3;
1948 case LT: return CC1;
1949 case GT: return CC2;
1950 case LE: return CC0 | CC1;
1951 case GE: return CC0 | CC2;
1952 case UNORDERED: return CC3;
1953 case ORDERED: return CC0 | CC1 | CC2;
1954 case UNEQ: return CC0 | CC3;
1955 case UNLT: return CC1 | CC3;
1956 case UNGT: return CC2 | CC3;
1957 case UNLE: return CC0 | CC1 | CC3;
1958 case UNGE: return CC0 | CC2 | CC3;
1959 case LTGT: return CC1 | CC2;
1960 default: return -1;
1961 }
1962 break;
1963
1964 case CCSRmode:
1965 switch (GET_CODE (code))
1966 {
1967 case EQ: return CC0;
1968 case NE: return CC2 | CC1 | CC3;
1969 case LT: return CC2;
1970 case GT: return CC1;
1971 case LE: return CC0 | CC2;
1972 case GE: return CC0 | CC1;
1973 case UNORDERED: return CC3;
1974 case ORDERED: return CC0 | CC2 | CC1;
1975 case UNEQ: return CC0 | CC3;
1976 case UNLT: return CC2 | CC3;
1977 case UNGT: return CC1 | CC3;
1978 case UNLE: return CC0 | CC2 | CC3;
1979 case UNGE: return CC0 | CC1 | CC3;
1980 case LTGT: return CC2 | CC1;
1981 default: return -1;
1982 }
1983 break;
1984
1985 /* Vector comparison modes. */
1986
1987 case CCVEQmode:
1988 switch (GET_CODE (code))
1989 {
1990 case EQ: return CC0;
1991 case NE: return CC3;
1992 default: return -1;
1993 }
1994
1995 case CCVEQANYmode:
1996 switch (GET_CODE (code))
1997 {
1998 case EQ: return CC0 | CC1;
1999 case NE: return CC3 | CC1;
2000 default: return -1;
2001 }
2002
2003 /* Integer vector compare modes. */
2004
2005 case CCVHmode:
2006 switch (GET_CODE (code))
2007 {
2008 case GT: return CC0;
2009 case LE: return CC3;
2010 default: return -1;
2011 }
2012
2013 case CCVHANYmode:
2014 switch (GET_CODE (code))
2015 {
2016 case GT: return CC0 | CC1;
2017 case LE: return CC3 | CC1;
2018 default: return -1;
2019 }
2020
2021 case CCVHUmode:
2022 switch (GET_CODE (code))
2023 {
2024 case GTU: return CC0;
2025 case LEU: return CC3;
2026 default: return -1;
2027 }
2028
2029 case CCVHUANYmode:
2030 switch (GET_CODE (code))
2031 {
2032 case GTU: return CC0 | CC1;
2033 case LEU: return CC3 | CC1;
2034 default: return -1;
2035 }
2036
2037 /* FP vector compare modes. */
2038
2039 case CCVFHmode:
2040 switch (GET_CODE (code))
2041 {
2042 case GT: return CC0;
2043 case UNLE: return CC3;
2044 default: return -1;
2045 }
2046
2047 case CCVFHANYmode:
2048 switch (GET_CODE (code))
2049 {
2050 case GT: return CC0 | CC1;
2051 case UNLE: return CC3 | CC1;
2052 default: return -1;
2053 }
2054
2055 case CCVFHEmode:
2056 switch (GET_CODE (code))
2057 {
2058 case GE: return CC0;
2059 case UNLT: return CC3;
2060 default: return -1;
2061 }
2062
2063 case CCVFHEANYmode:
2064 switch (GET_CODE (code))
2065 {
2066 case GE: return CC0 | CC1;
2067 case UNLT: return CC3 | CC1;
2068 default: return -1;
2069 }
2070
2071
2072 case CCRAWmode:
2073 switch (GET_CODE (code))
2074 {
2075 case EQ:
2076 return INTVAL (XEXP (code, 1));
2077 case NE:
2078 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2079 default:
2080 gcc_unreachable ();
2081 }
2082
2083 default:
2084 return -1;
2085 }
2086 }
2087
2088
2089 /* Return branch condition mask to implement a compare and branch
2090 specified by CODE. Return -1 for invalid comparisons. */
2091
2092 int
2093 s390_compare_and_branch_condition_mask (rtx code)
2094 {
2095 const int CC0 = 1 << 3;
2096 const int CC1 = 1 << 2;
2097 const int CC2 = 1 << 1;
2098
2099 switch (GET_CODE (code))
2100 {
2101 case EQ:
2102 return CC0;
2103 case NE:
2104 return CC1 | CC2;
2105 case LT:
2106 case LTU:
2107 return CC1;
2108 case GT:
2109 case GTU:
2110 return CC2;
2111 case LE:
2112 case LEU:
2113 return CC0 | CC1;
2114 case GE:
2115 case GEU:
2116 return CC0 | CC2;
2117 default:
2118 gcc_unreachable ();
2119 }
2120 return -1;
2121 }
2122
2123 /* If INV is false, return assembler mnemonic string to implement
2124 a branch specified by CODE. If INV is true, return mnemonic
2125 for the corresponding inverted branch. */
2126
2127 static const char *
2128 s390_branch_condition_mnemonic (rtx code, int inv)
2129 {
2130 int mask;
2131
2132 static const char *const mnemonic[16] =
2133 {
2134 NULL, "o", "h", "nle",
2135 "l", "nhe", "lh", "ne",
2136 "e", "nlh", "he", "nl",
2137 "le", "nh", "no", NULL
2138 };
2139
2140 if (GET_CODE (XEXP (code, 0)) == REG
2141 && REGNO (XEXP (code, 0)) == CC_REGNUM
2142 && (XEXP (code, 1) == const0_rtx
2143 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2144 && CONST_INT_P (XEXP (code, 1)))))
2145 mask = s390_branch_condition_mask (code);
2146 else
2147 mask = s390_compare_and_branch_condition_mask (code);
2148
2149 gcc_assert (mask >= 0);
2150
2151 if (inv)
2152 mask ^= 15;
2153
2154 gcc_assert (mask >= 1 && mask <= 14);
2155
2156 return mnemonic[mask];
2157 }
2158
2159 /* Return the part of op which has a value different from def.
2160 The size of the part is determined by mode.
2161 Use this function only if you already know that op really
2162 contains such a part. */
2163
2164 unsigned HOST_WIDE_INT
2165 s390_extract_part (rtx op, machine_mode mode, int def)
2166 {
2167 unsigned HOST_WIDE_INT value = 0;
2168 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2169 int part_bits = GET_MODE_BITSIZE (mode);
2170 unsigned HOST_WIDE_INT part_mask
2171 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
2172 int i;
2173
2174 for (i = 0; i < max_parts; i++)
2175 {
2176 if (i == 0)
2177 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2178 else
2179 value >>= part_bits;
2180
2181 if ((value & part_mask) != (def & part_mask))
2182 return value & part_mask;
2183 }
2184
2185 gcc_unreachable ();
2186 }
2187
2188 /* If OP is an integer constant of mode MODE with exactly one
2189 part of mode PART_MODE unequal to DEF, return the number of that
2190 part. Otherwise, return -1. */
2191
2192 int
2193 s390_single_part (rtx op,
2194 machine_mode mode,
2195 machine_mode part_mode,
2196 int def)
2197 {
2198 unsigned HOST_WIDE_INT value = 0;
2199 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2200 unsigned HOST_WIDE_INT part_mask
2201 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
2202 int i, part = -1;
2203
2204 if (GET_CODE (op) != CONST_INT)
2205 return -1;
2206
2207 for (i = 0; i < n_parts; i++)
2208 {
2209 if (i == 0)
2210 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2211 else
2212 value >>= GET_MODE_BITSIZE (part_mode);
2213
2214 if ((value & part_mask) != (def & part_mask))
2215 {
2216 if (part != -1)
2217 return -1;
2218 else
2219 part = i;
2220 }
2221 }
2222 return part == -1 ? -1 : n_parts - 1 - part;
2223 }
2224
2225 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2226 bits and no other bits are set in IN. POS and LENGTH can be used
2227 to obtain the start position and the length of the bitfield.
2228
2229 POS gives the position of the first bit of the bitfield counting
2230 from the lowest order bit starting with zero. In order to use this
2231 value for S/390 instructions this has to be converted to "bits big
2232 endian" style. */
2233
2234 bool
2235 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
2236 int *pos, int *length)
2237 {
2238 int tmp_pos = 0;
2239 int tmp_length = 0;
2240 int i;
2241 unsigned HOST_WIDE_INT mask = 1ULL;
2242 bool contiguous = false;
2243
2244 for (i = 0; i < size; mask <<= 1, i++)
2245 {
2246 if (contiguous)
2247 {
2248 if (mask & in)
2249 tmp_length++;
2250 else
2251 break;
2252 }
2253 else
2254 {
2255 if (mask & in)
2256 {
2257 contiguous = true;
2258 tmp_length++;
2259 }
2260 else
2261 tmp_pos++;
2262 }
2263 }
2264
2265 if (!tmp_length)
2266 return false;
2267
2268 /* Calculate a mask for all bits beyond the contiguous bits. */
2269 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
2270
2271 if ((unsigned)size < sizeof (HOST_WIDE_INT) * BITS_PER_UNIT)
2272 mask &= (HOST_WIDE_INT_1U << size) - 1;
2273
2274 if (mask & in)
2275 return false;
2276
2277 if (tmp_length + tmp_pos - 1 > size)
2278 return false;
2279
2280 if (length)
2281 *length = tmp_length;
2282
2283 if (pos)
2284 *pos = tmp_pos;
2285
2286 return true;
2287 }
2288
2289 /* Return true if OP contains the same contiguous bitfield in *all*
2290 its elements. START and END can be used to obtain the start and
2291 end position of the bitfield.
2292
2293 START/STOP give the position of the first/last bit of the bitfield
2294 counting from the lowest order bit starting with zero. In order to
2295 use these values for S/390 instructions this has to be converted to
2296 "bits big endian" style. */
2297
2298 bool
2299 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2300 {
2301 unsigned HOST_WIDE_INT mask;
2302 int length, size;
2303 rtx elt;
2304
2305 if (!const_vec_duplicate_p (op, &elt)
2306 || !CONST_INT_P (elt))
2307 return false;
2308
2309 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2310
2311 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2312 if (size > 64)
2313 return false;
2314
2315 mask = UINTVAL (elt);
2316 if (s390_contiguous_bitmask_p (mask, size, start,
2317 end != NULL ? &length : NULL))
2318 {
2319 if (end != NULL)
2320 *end = *start + length - 1;
2321 return true;
2322 }
2323 /* 0xff00000f style immediates can be covered by swapping start and
2324 end indices in vgm. */
2325 if (s390_contiguous_bitmask_p (~mask, size, start,
2326 end != NULL ? &length : NULL))
2327 {
2328 if (end != NULL)
2329 *end = *start - 1;
2330 if (start != NULL)
2331 *start = *start + length;
2332 return true;
2333 }
2334 return false;
2335 }
2336
2337 /* Return true if C consists only of byte chunks being either 0 or
2338 0xff. If MASK is !=NULL a byte mask is generated which is
2339 appropriate for the vector generate byte mask instruction. */
2340
2341 bool
2342 s390_bytemask_vector_p (rtx op, unsigned *mask)
2343 {
2344 int i;
2345 unsigned tmp_mask = 0;
2346 int nunit, unit_size;
2347
2348 if (!VECTOR_MODE_P (GET_MODE (op))
2349 || GET_CODE (op) != CONST_VECTOR
2350 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2351 return false;
2352
2353 nunit = GET_MODE_NUNITS (GET_MODE (op));
2354 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2355
2356 for (i = 0; i < nunit; i++)
2357 {
2358 unsigned HOST_WIDE_INT c;
2359 int j;
2360
2361 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2362 return false;
2363
2364 c = UINTVAL (XVECEXP (op, 0, i));
2365 for (j = 0; j < unit_size; j++)
2366 {
2367 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2368 return false;
2369 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2370 c = c >> BITS_PER_UNIT;
2371 }
2372 }
2373
2374 if (mask != NULL)
2375 *mask = tmp_mask;
2376
2377 return true;
2378 }
2379
2380 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2381 equivalent to a shift followed by the AND. In particular, CONTIG
2382 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2383 for ROTL indicate a rotate to the right. */
2384
2385 bool
2386 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2387 {
2388 int pos, len;
2389 bool ok;
2390
2391 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
2392 gcc_assert (ok);
2393
2394 return ((rotl >= 0 && rotl <= pos)
2395 || (rotl < 0 && -rotl <= bitsize - len - pos));
2396 }
2397
2398 /* Check whether we can (and want to) split a double-word
2399 move in mode MODE from SRC to DST into two single-word
2400 moves, moving the subword FIRST_SUBWORD first. */
2401
2402 bool
2403 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2404 {
2405 /* Floating point and vector registers cannot be split. */
2406 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2407 return false;
2408
2409 /* We don't need to split if operands are directly accessible. */
2410 if (s_operand (src, mode) || s_operand (dst, mode))
2411 return false;
2412
2413 /* Non-offsettable memory references cannot be split. */
2414 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2415 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2416 return false;
2417
2418 /* Moving the first subword must not clobber a register
2419 needed to move the second subword. */
2420 if (register_operand (dst, mode))
2421 {
2422 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2423 if (reg_overlap_mentioned_p (subreg, src))
2424 return false;
2425 }
2426
2427 return true;
2428 }
2429
2430 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2431 and [MEM2, MEM2 + SIZE] do overlap and false
2432 otherwise. */
2433
2434 bool
2435 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2436 {
2437 rtx addr1, addr2, addr_delta;
2438 HOST_WIDE_INT delta;
2439
2440 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2441 return true;
2442
2443 if (size == 0)
2444 return false;
2445
2446 addr1 = XEXP (mem1, 0);
2447 addr2 = XEXP (mem2, 0);
2448
2449 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2450
2451 /* This overlapping check is used by peepholes merging memory block operations.
2452 Overlapping operations would otherwise be recognized by the S/390 hardware
2453 and would fall back to a slower implementation. Allowing overlapping
2454 operations would lead to slow code but not to wrong code. Therefore we are
2455 somewhat optimistic if we cannot prove that the memory blocks are
2456 overlapping.
2457 That's why we return false here although this may accept operations on
2458 overlapping memory areas. */
2459 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2460 return false;
2461
2462 delta = INTVAL (addr_delta);
2463
2464 if (delta == 0
2465 || (delta > 0 && delta < size)
2466 || (delta < 0 && -delta < size))
2467 return true;
2468
2469 return false;
2470 }
2471
2472 /* Check whether the address of memory reference MEM2 equals exactly
2473 the address of memory reference MEM1 plus DELTA. Return true if
2474 we can prove this to be the case, false otherwise. */
2475
2476 bool
2477 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2478 {
2479 rtx addr1, addr2, addr_delta;
2480
2481 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2482 return false;
2483
2484 addr1 = XEXP (mem1, 0);
2485 addr2 = XEXP (mem2, 0);
2486
2487 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2488 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2489 return false;
2490
2491 return true;
2492 }
2493
2494 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2495
2496 void
2497 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2498 rtx *operands)
2499 {
2500 machine_mode wmode = mode;
2501 rtx dst = operands[0];
2502 rtx src1 = operands[1];
2503 rtx src2 = operands[2];
2504 rtx op, clob, tem;
2505
2506 /* If we cannot handle the operation directly, use a temp register. */
2507 if (!s390_logical_operator_ok_p (operands))
2508 dst = gen_reg_rtx (mode);
2509
2510 /* QImode and HImode patterns make sense only if we have a destination
2511 in memory. Otherwise perform the operation in SImode. */
2512 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2513 wmode = SImode;
2514
2515 /* Widen operands if required. */
2516 if (mode != wmode)
2517 {
2518 if (GET_CODE (dst) == SUBREG
2519 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2520 dst = tem;
2521 else if (REG_P (dst))
2522 dst = gen_rtx_SUBREG (wmode, dst, 0);
2523 else
2524 dst = gen_reg_rtx (wmode);
2525
2526 if (GET_CODE (src1) == SUBREG
2527 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2528 src1 = tem;
2529 else if (GET_MODE (src1) != VOIDmode)
2530 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2531
2532 if (GET_CODE (src2) == SUBREG
2533 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2534 src2 = tem;
2535 else if (GET_MODE (src2) != VOIDmode)
2536 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2537 }
2538
2539 /* Emit the instruction. */
2540 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2541 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2542 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2543
2544 /* Fix up the destination if needed. */
2545 if (dst != operands[0])
2546 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2547 }
2548
2549 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2550
2551 bool
2552 s390_logical_operator_ok_p (rtx *operands)
2553 {
2554 /* If the destination operand is in memory, it needs to coincide
2555 with one of the source operands. After reload, it has to be
2556 the first source operand. */
2557 if (GET_CODE (operands[0]) == MEM)
2558 return rtx_equal_p (operands[0], operands[1])
2559 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2560
2561 return true;
2562 }
2563
2564 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2565 operand IMMOP to switch from SS to SI type instructions. */
2566
2567 void
2568 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2569 {
2570 int def = code == AND ? -1 : 0;
2571 HOST_WIDE_INT mask;
2572 int part;
2573
2574 gcc_assert (GET_CODE (*memop) == MEM);
2575 gcc_assert (!MEM_VOLATILE_P (*memop));
2576
2577 mask = s390_extract_part (*immop, QImode, def);
2578 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2579 gcc_assert (part >= 0);
2580
2581 *memop = adjust_address (*memop, QImode, part);
2582 *immop = gen_int_mode (mask, QImode);
2583 }
2584
2585
2586 /* How to allocate a 'struct machine_function'. */
2587
2588 static struct machine_function *
2589 s390_init_machine_status (void)
2590 {
2591 return ggc_cleared_alloc<machine_function> ();
2592 }
2593
2594 /* Map for smallest class containing reg regno. */
2595
2596 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2597 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2598 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2599 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2600 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2601 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2602 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2603 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2604 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2605 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2606 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2607 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2608 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2609 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2610 VEC_REGS, VEC_REGS /* 52 */
2611 };
2612
2613 /* Return attribute type of insn. */
2614
2615 static enum attr_type
2616 s390_safe_attr_type (rtx_insn *insn)
2617 {
2618 if (recog_memoized (insn) >= 0)
2619 return get_attr_type (insn);
2620 else
2621 return TYPE_NONE;
2622 }
2623
2624 /* Return true if DISP is a valid short displacement. */
2625
2626 static bool
2627 s390_short_displacement (rtx disp)
2628 {
2629 /* No displacement is OK. */
2630 if (!disp)
2631 return true;
2632
2633 /* Without the long displacement facility we don't need to
2634 distingiush between long and short displacement. */
2635 if (!TARGET_LONG_DISPLACEMENT)
2636 return true;
2637
2638 /* Integer displacement in range. */
2639 if (GET_CODE (disp) == CONST_INT)
2640 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2641
2642 /* GOT offset is not OK, the GOT can be large. */
2643 if (GET_CODE (disp) == CONST
2644 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2645 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2646 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2647 return false;
2648
2649 /* All other symbolic constants are literal pool references,
2650 which are OK as the literal pool must be small. */
2651 if (GET_CODE (disp) == CONST)
2652 return true;
2653
2654 return false;
2655 }
2656
2657 /* Decompose a RTL expression ADDR for a memory address into
2658 its components, returned in OUT.
2659
2660 Returns false if ADDR is not a valid memory address, true
2661 otherwise. If OUT is NULL, don't return the components,
2662 but check for validity only.
2663
2664 Note: Only addresses in canonical form are recognized.
2665 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2666 canonical form so that they will be recognized. */
2667
2668 static int
2669 s390_decompose_address (rtx addr, struct s390_address *out)
2670 {
2671 HOST_WIDE_INT offset = 0;
2672 rtx base = NULL_RTX;
2673 rtx indx = NULL_RTX;
2674 rtx disp = NULL_RTX;
2675 rtx orig_disp;
2676 bool pointer = false;
2677 bool base_ptr = false;
2678 bool indx_ptr = false;
2679 bool literal_pool = false;
2680
2681 /* We may need to substitute the literal pool base register into the address
2682 below. However, at this point we do not know which register is going to
2683 be used as base, so we substitute the arg pointer register. This is going
2684 to be treated as holding a pointer below -- it shouldn't be used for any
2685 other purpose. */
2686 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2687
2688 /* Decompose address into base + index + displacement. */
2689
2690 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2691 base = addr;
2692
2693 else if (GET_CODE (addr) == PLUS)
2694 {
2695 rtx op0 = XEXP (addr, 0);
2696 rtx op1 = XEXP (addr, 1);
2697 enum rtx_code code0 = GET_CODE (op0);
2698 enum rtx_code code1 = GET_CODE (op1);
2699
2700 if (code0 == REG || code0 == UNSPEC)
2701 {
2702 if (code1 == REG || code1 == UNSPEC)
2703 {
2704 indx = op0; /* index + base */
2705 base = op1;
2706 }
2707
2708 else
2709 {
2710 base = op0; /* base + displacement */
2711 disp = op1;
2712 }
2713 }
2714
2715 else if (code0 == PLUS)
2716 {
2717 indx = XEXP (op0, 0); /* index + base + disp */
2718 base = XEXP (op0, 1);
2719 disp = op1;
2720 }
2721
2722 else
2723 {
2724 return false;
2725 }
2726 }
2727
2728 else
2729 disp = addr; /* displacement */
2730
2731 /* Extract integer part of displacement. */
2732 orig_disp = disp;
2733 if (disp)
2734 {
2735 if (GET_CODE (disp) == CONST_INT)
2736 {
2737 offset = INTVAL (disp);
2738 disp = NULL_RTX;
2739 }
2740 else if (GET_CODE (disp) == CONST
2741 && GET_CODE (XEXP (disp, 0)) == PLUS
2742 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2743 {
2744 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2745 disp = XEXP (XEXP (disp, 0), 0);
2746 }
2747 }
2748
2749 /* Strip off CONST here to avoid special case tests later. */
2750 if (disp && GET_CODE (disp) == CONST)
2751 disp = XEXP (disp, 0);
2752
2753 /* We can convert literal pool addresses to
2754 displacements by basing them off the base register. */
2755 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2756 {
2757 /* Either base or index must be free to hold the base register. */
2758 if (!base)
2759 base = fake_pool_base, literal_pool = true;
2760 else if (!indx)
2761 indx = fake_pool_base, literal_pool = true;
2762 else
2763 return false;
2764
2765 /* Mark up the displacement. */
2766 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2767 UNSPEC_LTREL_OFFSET);
2768 }
2769
2770 /* Validate base register. */
2771 if (base)
2772 {
2773 if (GET_CODE (base) == UNSPEC)
2774 switch (XINT (base, 1))
2775 {
2776 case UNSPEC_LTREF:
2777 if (!disp)
2778 disp = gen_rtx_UNSPEC (Pmode,
2779 gen_rtvec (1, XVECEXP (base, 0, 0)),
2780 UNSPEC_LTREL_OFFSET);
2781 else
2782 return false;
2783
2784 base = XVECEXP (base, 0, 1);
2785 break;
2786
2787 case UNSPEC_LTREL_BASE:
2788 if (XVECLEN (base, 0) == 1)
2789 base = fake_pool_base, literal_pool = true;
2790 else
2791 base = XVECEXP (base, 0, 1);
2792 break;
2793
2794 default:
2795 return false;
2796 }
2797
2798 if (!REG_P (base)
2799 || (GET_MODE (base) != SImode
2800 && GET_MODE (base) != Pmode))
2801 return false;
2802
2803 if (REGNO (base) == STACK_POINTER_REGNUM
2804 || REGNO (base) == FRAME_POINTER_REGNUM
2805 || ((reload_completed || reload_in_progress)
2806 && frame_pointer_needed
2807 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2808 || REGNO (base) == ARG_POINTER_REGNUM
2809 || (flag_pic
2810 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2811 pointer = base_ptr = true;
2812
2813 if ((reload_completed || reload_in_progress)
2814 && base == cfun->machine->base_reg)
2815 pointer = base_ptr = literal_pool = true;
2816 }
2817
2818 /* Validate index register. */
2819 if (indx)
2820 {
2821 if (GET_CODE (indx) == UNSPEC)
2822 switch (XINT (indx, 1))
2823 {
2824 case UNSPEC_LTREF:
2825 if (!disp)
2826 disp = gen_rtx_UNSPEC (Pmode,
2827 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2828 UNSPEC_LTREL_OFFSET);
2829 else
2830 return false;
2831
2832 indx = XVECEXP (indx, 0, 1);
2833 break;
2834
2835 case UNSPEC_LTREL_BASE:
2836 if (XVECLEN (indx, 0) == 1)
2837 indx = fake_pool_base, literal_pool = true;
2838 else
2839 indx = XVECEXP (indx, 0, 1);
2840 break;
2841
2842 default:
2843 return false;
2844 }
2845
2846 if (!REG_P (indx)
2847 || (GET_MODE (indx) != SImode
2848 && GET_MODE (indx) != Pmode))
2849 return false;
2850
2851 if (REGNO (indx) == STACK_POINTER_REGNUM
2852 || REGNO (indx) == FRAME_POINTER_REGNUM
2853 || ((reload_completed || reload_in_progress)
2854 && frame_pointer_needed
2855 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2856 || REGNO (indx) == ARG_POINTER_REGNUM
2857 || (flag_pic
2858 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2859 pointer = indx_ptr = true;
2860
2861 if ((reload_completed || reload_in_progress)
2862 && indx == cfun->machine->base_reg)
2863 pointer = indx_ptr = literal_pool = true;
2864 }
2865
2866 /* Prefer to use pointer as base, not index. */
2867 if (base && indx && !base_ptr
2868 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2869 {
2870 rtx tmp = base;
2871 base = indx;
2872 indx = tmp;
2873 }
2874
2875 /* Validate displacement. */
2876 if (!disp)
2877 {
2878 /* If virtual registers are involved, the displacement will change later
2879 anyway as the virtual registers get eliminated. This could make a
2880 valid displacement invalid, but it is more likely to make an invalid
2881 displacement valid, because we sometimes access the register save area
2882 via negative offsets to one of those registers.
2883 Thus we don't check the displacement for validity here. If after
2884 elimination the displacement turns out to be invalid after all,
2885 this is fixed up by reload in any case. */
2886 /* LRA maintains always displacements up to date and we need to
2887 know the displacement is right during all LRA not only at the
2888 final elimination. */
2889 if (lra_in_progress
2890 || (base != arg_pointer_rtx
2891 && indx != arg_pointer_rtx
2892 && base != return_address_pointer_rtx
2893 && indx != return_address_pointer_rtx
2894 && base != frame_pointer_rtx
2895 && indx != frame_pointer_rtx
2896 && base != virtual_stack_vars_rtx
2897 && indx != virtual_stack_vars_rtx))
2898 if (!DISP_IN_RANGE (offset))
2899 return false;
2900 }
2901 else
2902 {
2903 /* All the special cases are pointers. */
2904 pointer = true;
2905
2906 /* In the small-PIC case, the linker converts @GOT
2907 and @GOTNTPOFF offsets to possible displacements. */
2908 if (GET_CODE (disp) == UNSPEC
2909 && (XINT (disp, 1) == UNSPEC_GOT
2910 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2911 && flag_pic == 1)
2912 {
2913 ;
2914 }
2915
2916 /* Accept pool label offsets. */
2917 else if (GET_CODE (disp) == UNSPEC
2918 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2919 ;
2920
2921 /* Accept literal pool references. */
2922 else if (GET_CODE (disp) == UNSPEC
2923 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2924 {
2925 /* In case CSE pulled a non literal pool reference out of
2926 the pool we have to reject the address. This is
2927 especially important when loading the GOT pointer on non
2928 zarch CPUs. In this case the literal pool contains an lt
2929 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2930 will most likely exceed the displacement. */
2931 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2932 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2933 return false;
2934
2935 orig_disp = gen_rtx_CONST (Pmode, disp);
2936 if (offset)
2937 {
2938 /* If we have an offset, make sure it does not
2939 exceed the size of the constant pool entry. */
2940 rtx sym = XVECEXP (disp, 0, 0);
2941 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2942 return false;
2943
2944 orig_disp = plus_constant (Pmode, orig_disp, offset);
2945 }
2946 }
2947
2948 else
2949 return false;
2950 }
2951
2952 if (!base && !indx)
2953 pointer = true;
2954
2955 if (out)
2956 {
2957 out->base = base;
2958 out->indx = indx;
2959 out->disp = orig_disp;
2960 out->pointer = pointer;
2961 out->literal_pool = literal_pool;
2962 }
2963
2964 return true;
2965 }
2966
2967 /* Decompose a RTL expression OP for a shift count into its components,
2968 and return the base register in BASE and the offset in OFFSET.
2969
2970 Return true if OP is a valid shift count, false if not. */
2971
2972 bool
2973 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2974 {
2975 HOST_WIDE_INT off = 0;
2976
2977 /* We can have an integer constant, an address register,
2978 or a sum of the two. */
2979 if (GET_CODE (op) == CONST_INT)
2980 {
2981 off = INTVAL (op);
2982 op = NULL_RTX;
2983 }
2984 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2985 {
2986 off = INTVAL (XEXP (op, 1));
2987 op = XEXP (op, 0);
2988 }
2989 while (op && GET_CODE (op) == SUBREG)
2990 op = SUBREG_REG (op);
2991
2992 if (op && GET_CODE (op) != REG)
2993 return false;
2994
2995 if (offset)
2996 *offset = off;
2997 if (base)
2998 *base = op;
2999
3000 return true;
3001 }
3002
3003
3004 /* Return true if CODE is a valid address without index. */
3005
3006 bool
3007 s390_legitimate_address_without_index_p (rtx op)
3008 {
3009 struct s390_address addr;
3010
3011 if (!s390_decompose_address (XEXP (op, 0), &addr))
3012 return false;
3013 if (addr.indx)
3014 return false;
3015
3016 return true;
3017 }
3018
3019
3020 /* Return TRUE if ADDR is an operand valid for a load/store relative
3021 instruction. Be aware that the alignment of the operand needs to
3022 be checked separately.
3023 Valid addresses are single references or a sum of a reference and a
3024 constant integer. Return these parts in SYMREF and ADDEND. You can
3025 pass NULL in REF and/or ADDEND if you are not interested in these
3026 values. Literal pool references are *not* considered symbol
3027 references. */
3028
3029 static bool
3030 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3031 {
3032 HOST_WIDE_INT tmpaddend = 0;
3033
3034 if (GET_CODE (addr) == CONST)
3035 addr = XEXP (addr, 0);
3036
3037 if (GET_CODE (addr) == PLUS)
3038 {
3039 if (!CONST_INT_P (XEXP (addr, 1)))
3040 return false;
3041
3042 tmpaddend = INTVAL (XEXP (addr, 1));
3043 addr = XEXP (addr, 0);
3044 }
3045
3046 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3047 || (GET_CODE (addr) == UNSPEC
3048 && (XINT (addr, 1) == UNSPEC_GOTENT
3049 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3050 {
3051 if (symref)
3052 *symref = addr;
3053 if (addend)
3054 *addend = tmpaddend;
3055
3056 return true;
3057 }
3058 return false;
3059 }
3060
3061 /* Return true if the address in OP is valid for constraint letter C
3062 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3063 pool MEMs should be accepted. Only the Q, R, S, T constraint
3064 letters are allowed for C. */
3065
3066 static int
3067 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3068 {
3069 struct s390_address addr;
3070 bool decomposed = false;
3071
3072 /* This check makes sure that no symbolic address (except literal
3073 pool references) are accepted by the R or T constraints. */
3074 if (s390_loadrelative_operand_p (op, NULL, NULL))
3075 return 0;
3076
3077 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3078 if (!lit_pool_ok)
3079 {
3080 if (!s390_decompose_address (op, &addr))
3081 return 0;
3082 if (addr.literal_pool)
3083 return 0;
3084 decomposed = true;
3085 }
3086
3087 switch (c)
3088 {
3089 case 'Q': /* no index short displacement */
3090 if (!decomposed && !s390_decompose_address (op, &addr))
3091 return 0;
3092 if (addr.indx)
3093 return 0;
3094 if (!s390_short_displacement (addr.disp))
3095 return 0;
3096 break;
3097
3098 case 'R': /* with index short displacement */
3099 if (TARGET_LONG_DISPLACEMENT)
3100 {
3101 if (!decomposed && !s390_decompose_address (op, &addr))
3102 return 0;
3103 if (!s390_short_displacement (addr.disp))
3104 return 0;
3105 }
3106 /* Any invalid address here will be fixed up by reload,
3107 so accept it for the most generic constraint. */
3108 break;
3109
3110 case 'S': /* no index long displacement */
3111 if (!TARGET_LONG_DISPLACEMENT)
3112 return 0;
3113 if (!decomposed && !s390_decompose_address (op, &addr))
3114 return 0;
3115 if (addr.indx)
3116 return 0;
3117 if (s390_short_displacement (addr.disp))
3118 return 0;
3119 break;
3120
3121 case 'T': /* with index long displacement */
3122 if (!TARGET_LONG_DISPLACEMENT)
3123 return 0;
3124 /* Any invalid address here will be fixed up by reload,
3125 so accept it for the most generic constraint. */
3126 if ((decomposed || s390_decompose_address (op, &addr))
3127 && s390_short_displacement (addr.disp))
3128 return 0;
3129 break;
3130 default:
3131 return 0;
3132 }
3133 return 1;
3134 }
3135
3136
3137 /* Evaluates constraint strings described by the regular expression
3138 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
3139 the constraint given in STR, or 0 else. */
3140
3141 int
3142 s390_mem_constraint (const char *str, rtx op)
3143 {
3144 char c = str[0];
3145
3146 switch (c)
3147 {
3148 case 'A':
3149 /* Check for offsettable variants of memory constraints. */
3150 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3151 return 0;
3152 if ((reload_completed || reload_in_progress)
3153 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3154 return 0;
3155 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3156 case 'B':
3157 /* Check for non-literal-pool variants of memory constraints. */
3158 if (!MEM_P (op))
3159 return 0;
3160 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3161 case 'Q':
3162 case 'R':
3163 case 'S':
3164 case 'T':
3165 if (GET_CODE (op) != MEM)
3166 return 0;
3167 return s390_check_qrst_address (c, XEXP (op, 0), true);
3168 case 'U':
3169 return (s390_check_qrst_address ('Q', op, true)
3170 || s390_check_qrst_address ('R', op, true));
3171 case 'W':
3172 return (s390_check_qrst_address ('S', op, true)
3173 || s390_check_qrst_address ('T', op, true));
3174 case 'Y':
3175 /* Simply check for the basic form of a shift count. Reload will
3176 take care of making sure we have a proper base register. */
3177 if (!s390_decompose_shift_count (op, NULL, NULL))
3178 return 0;
3179 break;
3180 case 'Z':
3181 return s390_check_qrst_address (str[1], op, true);
3182 default:
3183 return 0;
3184 }
3185 return 1;
3186 }
3187
3188
3189 /* Evaluates constraint strings starting with letter O. Input
3190 parameter C is the second letter following the "O" in the constraint
3191 string. Returns 1 if VALUE meets the respective constraint and 0
3192 otherwise. */
3193
3194 int
3195 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3196 {
3197 if (!TARGET_EXTIMM)
3198 return 0;
3199
3200 switch (c)
3201 {
3202 case 's':
3203 return trunc_int_for_mode (value, SImode) == value;
3204
3205 case 'p':
3206 return value == 0
3207 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3208
3209 case 'n':
3210 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3211
3212 default:
3213 gcc_unreachable ();
3214 }
3215 }
3216
3217
3218 /* Evaluates constraint strings starting with letter N. Parameter STR
3219 contains the letters following letter "N" in the constraint string.
3220 Returns true if VALUE matches the constraint. */
3221
3222 int
3223 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3224 {
3225 machine_mode mode, part_mode;
3226 int def;
3227 int part, part_goal;
3228
3229
3230 if (str[0] == 'x')
3231 part_goal = -1;
3232 else
3233 part_goal = str[0] - '0';
3234
3235 switch (str[1])
3236 {
3237 case 'Q':
3238 part_mode = QImode;
3239 break;
3240 case 'H':
3241 part_mode = HImode;
3242 break;
3243 case 'S':
3244 part_mode = SImode;
3245 break;
3246 default:
3247 return 0;
3248 }
3249
3250 switch (str[2])
3251 {
3252 case 'H':
3253 mode = HImode;
3254 break;
3255 case 'S':
3256 mode = SImode;
3257 break;
3258 case 'D':
3259 mode = DImode;
3260 break;
3261 default:
3262 return 0;
3263 }
3264
3265 switch (str[3])
3266 {
3267 case '0':
3268 def = 0;
3269 break;
3270 case 'F':
3271 def = -1;
3272 break;
3273 default:
3274 return 0;
3275 }
3276
3277 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3278 return 0;
3279
3280 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3281 if (part < 0)
3282 return 0;
3283 if (part_goal != -1 && part_goal != part)
3284 return 0;
3285
3286 return 1;
3287 }
3288
3289
3290 /* Returns true if the input parameter VALUE is a float zero. */
3291
3292 int
3293 s390_float_const_zero_p (rtx value)
3294 {
3295 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3296 && value == CONST0_RTX (GET_MODE (value)));
3297 }
3298
3299 /* Implement TARGET_REGISTER_MOVE_COST. */
3300
3301 static int
3302 s390_register_move_cost (machine_mode mode,
3303 reg_class_t from, reg_class_t to)
3304 {
3305 /* On s390, copy between fprs and gprs is expensive. */
3306
3307 /* It becomes somewhat faster having ldgr/lgdr. */
3308 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3309 {
3310 /* ldgr is single cycle. */
3311 if (reg_classes_intersect_p (from, GENERAL_REGS)
3312 && reg_classes_intersect_p (to, FP_REGS))
3313 return 1;
3314 /* lgdr needs 3 cycles. */
3315 if (reg_classes_intersect_p (to, GENERAL_REGS)
3316 && reg_classes_intersect_p (from, FP_REGS))
3317 return 3;
3318 }
3319
3320 /* Otherwise copying is done via memory. */
3321 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3322 && reg_classes_intersect_p (to, FP_REGS))
3323 || (reg_classes_intersect_p (from, FP_REGS)
3324 && reg_classes_intersect_p (to, GENERAL_REGS)))
3325 return 10;
3326
3327 return 1;
3328 }
3329
3330 /* Implement TARGET_MEMORY_MOVE_COST. */
3331
3332 static int
3333 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3334 reg_class_t rclass ATTRIBUTE_UNUSED,
3335 bool in ATTRIBUTE_UNUSED)
3336 {
3337 return 2;
3338 }
3339
3340 /* Compute a (partial) cost for rtx X. Return true if the complete
3341 cost has been computed, and false if subexpressions should be
3342 scanned. In either case, *TOTAL contains the cost result.
3343 OUTER_CODE contains the code of the superexpression of x. */
3344
3345 static bool
3346 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3347 int opno ATTRIBUTE_UNUSED,
3348 int *total, bool speed ATTRIBUTE_UNUSED)
3349 {
3350 int code = GET_CODE (x);
3351 switch (code)
3352 {
3353 case CONST:
3354 case CONST_INT:
3355 case LABEL_REF:
3356 case SYMBOL_REF:
3357 case CONST_DOUBLE:
3358 case CONST_WIDE_INT:
3359 case MEM:
3360 *total = 0;
3361 return true;
3362
3363 case IOR:
3364 /* risbg */
3365 if (GET_CODE (XEXP (x, 0)) == AND
3366 && GET_CODE (XEXP (x, 1)) == ASHIFT
3367 && REG_P (XEXP (XEXP (x, 0), 0))
3368 && REG_P (XEXP (XEXP (x, 1), 0))
3369 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3370 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3371 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3372 (1UL << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3373 {
3374 *total = COSTS_N_INSNS (2);
3375 return true;
3376 }
3377 case ASHIFT:
3378 case ASHIFTRT:
3379 case LSHIFTRT:
3380 case ROTATE:
3381 case ROTATERT:
3382 case AND:
3383 case XOR:
3384 case NEG:
3385 case NOT:
3386 *total = COSTS_N_INSNS (1);
3387 return false;
3388
3389 case PLUS:
3390 case MINUS:
3391 *total = COSTS_N_INSNS (1);
3392 return false;
3393
3394 case MULT:
3395 switch (mode)
3396 {
3397 case SImode:
3398 {
3399 rtx left = XEXP (x, 0);
3400 rtx right = XEXP (x, 1);
3401 if (GET_CODE (right) == CONST_INT
3402 && CONST_OK_FOR_K (INTVAL (right)))
3403 *total = s390_cost->mhi;
3404 else if (GET_CODE (left) == SIGN_EXTEND)
3405 *total = s390_cost->mh;
3406 else
3407 *total = s390_cost->ms; /* msr, ms, msy */
3408 break;
3409 }
3410 case DImode:
3411 {
3412 rtx left = XEXP (x, 0);
3413 rtx right = XEXP (x, 1);
3414 if (TARGET_ZARCH)
3415 {
3416 if (GET_CODE (right) == CONST_INT
3417 && CONST_OK_FOR_K (INTVAL (right)))
3418 *total = s390_cost->mghi;
3419 else if (GET_CODE (left) == SIGN_EXTEND)
3420 *total = s390_cost->msgf;
3421 else
3422 *total = s390_cost->msg; /* msgr, msg */
3423 }
3424 else /* TARGET_31BIT */
3425 {
3426 if (GET_CODE (left) == SIGN_EXTEND
3427 && GET_CODE (right) == SIGN_EXTEND)
3428 /* mulsidi case: mr, m */
3429 *total = s390_cost->m;
3430 else if (GET_CODE (left) == ZERO_EXTEND
3431 && GET_CODE (right) == ZERO_EXTEND
3432 && TARGET_CPU_ZARCH)
3433 /* umulsidi case: ml, mlr */
3434 *total = s390_cost->ml;
3435 else
3436 /* Complex calculation is required. */
3437 *total = COSTS_N_INSNS (40);
3438 }
3439 break;
3440 }
3441 case SFmode:
3442 case DFmode:
3443 *total = s390_cost->mult_df;
3444 break;
3445 case TFmode:
3446 *total = s390_cost->mxbr;
3447 break;
3448 default:
3449 return false;
3450 }
3451 return false;
3452
3453 case FMA:
3454 switch (mode)
3455 {
3456 case DFmode:
3457 *total = s390_cost->madbr;
3458 break;
3459 case SFmode:
3460 *total = s390_cost->maebr;
3461 break;
3462 default:
3463 return false;
3464 }
3465 /* Negate in the third argument is free: FMSUB. */
3466 if (GET_CODE (XEXP (x, 2)) == NEG)
3467 {
3468 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3469 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3470 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3471 return true;
3472 }
3473 return false;
3474
3475 case UDIV:
3476 case UMOD:
3477 if (mode == TImode) /* 128 bit division */
3478 *total = s390_cost->dlgr;
3479 else if (mode == DImode)
3480 {
3481 rtx right = XEXP (x, 1);
3482 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3483 *total = s390_cost->dlr;
3484 else /* 64 by 64 bit division */
3485 *total = s390_cost->dlgr;
3486 }
3487 else if (mode == SImode) /* 32 bit division */
3488 *total = s390_cost->dlr;
3489 return false;
3490
3491 case DIV:
3492 case MOD:
3493 if (mode == DImode)
3494 {
3495 rtx right = XEXP (x, 1);
3496 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3497 if (TARGET_ZARCH)
3498 *total = s390_cost->dsgfr;
3499 else
3500 *total = s390_cost->dr;
3501 else /* 64 by 64 bit division */
3502 *total = s390_cost->dsgr;
3503 }
3504 else if (mode == SImode) /* 32 bit division */
3505 *total = s390_cost->dlr;
3506 else if (mode == SFmode)
3507 {
3508 *total = s390_cost->debr;
3509 }
3510 else if (mode == DFmode)
3511 {
3512 *total = s390_cost->ddbr;
3513 }
3514 else if (mode == TFmode)
3515 {
3516 *total = s390_cost->dxbr;
3517 }
3518 return false;
3519
3520 case SQRT:
3521 if (mode == SFmode)
3522 *total = s390_cost->sqebr;
3523 else if (mode == DFmode)
3524 *total = s390_cost->sqdbr;
3525 else /* TFmode */
3526 *total = s390_cost->sqxbr;
3527 return false;
3528
3529 case SIGN_EXTEND:
3530 case ZERO_EXTEND:
3531 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3532 || outer_code == PLUS || outer_code == MINUS
3533 || outer_code == COMPARE)
3534 *total = 0;
3535 return false;
3536
3537 case COMPARE:
3538 *total = COSTS_N_INSNS (1);
3539 if (GET_CODE (XEXP (x, 0)) == AND
3540 && GET_CODE (XEXP (x, 1)) == CONST_INT
3541 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3542 {
3543 rtx op0 = XEXP (XEXP (x, 0), 0);
3544 rtx op1 = XEXP (XEXP (x, 0), 1);
3545 rtx op2 = XEXP (x, 1);
3546
3547 if (memory_operand (op0, GET_MODE (op0))
3548 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3549 return true;
3550 if (register_operand (op0, GET_MODE (op0))
3551 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3552 return true;
3553 }
3554 return false;
3555
3556 default:
3557 return false;
3558 }
3559 }
3560
3561 /* Return the cost of an address rtx ADDR. */
3562
3563 static int
3564 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3565 addr_space_t as ATTRIBUTE_UNUSED,
3566 bool speed ATTRIBUTE_UNUSED)
3567 {
3568 struct s390_address ad;
3569 if (!s390_decompose_address (addr, &ad))
3570 return 1000;
3571
3572 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3573 }
3574
3575 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3576 otherwise return 0. */
3577
3578 int
3579 tls_symbolic_operand (rtx op)
3580 {
3581 if (GET_CODE (op) != SYMBOL_REF)
3582 return 0;
3583 return SYMBOL_REF_TLS_MODEL (op);
3584 }
3585 \f
3586 /* Split DImode access register reference REG (on 64-bit) into its constituent
3587 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3588 gen_highpart cannot be used as they assume all registers are word-sized,
3589 while our access registers have only half that size. */
3590
3591 void
3592 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3593 {
3594 gcc_assert (TARGET_64BIT);
3595 gcc_assert (ACCESS_REG_P (reg));
3596 gcc_assert (GET_MODE (reg) == DImode);
3597 gcc_assert (!(REGNO (reg) & 1));
3598
3599 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3600 *hi = gen_rtx_REG (SImode, REGNO (reg));
3601 }
3602
3603 /* Return true if OP contains a symbol reference */
3604
3605 bool
3606 symbolic_reference_mentioned_p (rtx op)
3607 {
3608 const char *fmt;
3609 int i;
3610
3611 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3612 return 1;
3613
3614 fmt = GET_RTX_FORMAT (GET_CODE (op));
3615 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3616 {
3617 if (fmt[i] == 'E')
3618 {
3619 int j;
3620
3621 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3622 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3623 return 1;
3624 }
3625
3626 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3627 return 1;
3628 }
3629
3630 return 0;
3631 }
3632
3633 /* Return true if OP contains a reference to a thread-local symbol. */
3634
3635 bool
3636 tls_symbolic_reference_mentioned_p (rtx op)
3637 {
3638 const char *fmt;
3639 int i;
3640
3641 if (GET_CODE (op) == SYMBOL_REF)
3642 return tls_symbolic_operand (op);
3643
3644 fmt = GET_RTX_FORMAT (GET_CODE (op));
3645 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3646 {
3647 if (fmt[i] == 'E')
3648 {
3649 int j;
3650
3651 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3652 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3653 return true;
3654 }
3655
3656 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3657 return true;
3658 }
3659
3660 return false;
3661 }
3662
3663
3664 /* Return true if OP is a legitimate general operand when
3665 generating PIC code. It is given that flag_pic is on
3666 and that OP satisfies CONSTANT_P. */
3667
3668 int
3669 legitimate_pic_operand_p (rtx op)
3670 {
3671 /* Accept all non-symbolic constants. */
3672 if (!SYMBOLIC_CONST (op))
3673 return 1;
3674
3675 /* Reject everything else; must be handled
3676 via emit_symbolic_move. */
3677 return 0;
3678 }
3679
3680 /* Returns true if the constant value OP is a legitimate general operand.
3681 It is given that OP satisfies CONSTANT_P. */
3682
3683 static bool
3684 s390_legitimate_constant_p (machine_mode mode, rtx op)
3685 {
3686 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3687 {
3688 if (GET_MODE_SIZE (mode) != 16)
3689 return 0;
3690
3691 if (!satisfies_constraint_j00 (op)
3692 && !satisfies_constraint_jm1 (op)
3693 && !satisfies_constraint_jKK (op)
3694 && !satisfies_constraint_jxx (op)
3695 && !satisfies_constraint_jyy (op))
3696 return 0;
3697 }
3698
3699 /* Accept all non-symbolic constants. */
3700 if (!SYMBOLIC_CONST (op))
3701 return 1;
3702
3703 /* Accept immediate LARL operands. */
3704 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3705 return 1;
3706
3707 /* Thread-local symbols are never legal constants. This is
3708 so that emit_call knows that computing such addresses
3709 might require a function call. */
3710 if (TLS_SYMBOLIC_CONST (op))
3711 return 0;
3712
3713 /* In the PIC case, symbolic constants must *not* be
3714 forced into the literal pool. We accept them here,
3715 so that they will be handled by emit_symbolic_move. */
3716 if (flag_pic)
3717 return 1;
3718
3719 /* All remaining non-PIC symbolic constants are
3720 forced into the literal pool. */
3721 return 0;
3722 }
3723
3724 /* Determine if it's legal to put X into the constant pool. This
3725 is not possible if X contains the address of a symbol that is
3726 not constant (TLS) or not known at final link time (PIC). */
3727
3728 static bool
3729 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3730 {
3731 switch (GET_CODE (x))
3732 {
3733 case CONST_INT:
3734 case CONST_DOUBLE:
3735 case CONST_WIDE_INT:
3736 case CONST_VECTOR:
3737 /* Accept all non-symbolic constants. */
3738 return false;
3739
3740 case LABEL_REF:
3741 /* Labels are OK iff we are non-PIC. */
3742 return flag_pic != 0;
3743
3744 case SYMBOL_REF:
3745 /* 'Naked' TLS symbol references are never OK,
3746 non-TLS symbols are OK iff we are non-PIC. */
3747 if (tls_symbolic_operand (x))
3748 return true;
3749 else
3750 return flag_pic != 0;
3751
3752 case CONST:
3753 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3754 case PLUS:
3755 case MINUS:
3756 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3757 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3758
3759 case UNSPEC:
3760 switch (XINT (x, 1))
3761 {
3762 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3763 case UNSPEC_LTREL_OFFSET:
3764 case UNSPEC_GOT:
3765 case UNSPEC_GOTOFF:
3766 case UNSPEC_PLTOFF:
3767 case UNSPEC_TLSGD:
3768 case UNSPEC_TLSLDM:
3769 case UNSPEC_NTPOFF:
3770 case UNSPEC_DTPOFF:
3771 case UNSPEC_GOTNTPOFF:
3772 case UNSPEC_INDNTPOFF:
3773 return false;
3774
3775 /* If the literal pool shares the code section, be put
3776 execute template placeholders into the pool as well. */
3777 case UNSPEC_INSN:
3778 return TARGET_CPU_ZARCH;
3779
3780 default:
3781 return true;
3782 }
3783 break;
3784
3785 default:
3786 gcc_unreachable ();
3787 }
3788 }
3789
3790 /* Returns true if the constant value OP is a legitimate general
3791 operand during and after reload. The difference to
3792 legitimate_constant_p is that this function will not accept
3793 a constant that would need to be forced to the literal pool
3794 before it can be used as operand.
3795 This function accepts all constants which can be loaded directly
3796 into a GPR. */
3797
3798 bool
3799 legitimate_reload_constant_p (rtx op)
3800 {
3801 /* Accept la(y) operands. */
3802 if (GET_CODE (op) == CONST_INT
3803 && DISP_IN_RANGE (INTVAL (op)))
3804 return true;
3805
3806 /* Accept l(g)hi/l(g)fi operands. */
3807 if (GET_CODE (op) == CONST_INT
3808 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3809 return true;
3810
3811 /* Accept lliXX operands. */
3812 if (TARGET_ZARCH
3813 && GET_CODE (op) == CONST_INT
3814 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3815 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3816 return true;
3817
3818 if (TARGET_EXTIMM
3819 && GET_CODE (op) == CONST_INT
3820 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3821 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3822 return true;
3823
3824 /* Accept larl operands. */
3825 if (TARGET_CPU_ZARCH
3826 && larl_operand (op, VOIDmode))
3827 return true;
3828
3829 /* Accept floating-point zero operands that fit into a single GPR. */
3830 if (GET_CODE (op) == CONST_DOUBLE
3831 && s390_float_const_zero_p (op)
3832 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3833 return true;
3834
3835 /* Accept double-word operands that can be split. */
3836 if (GET_CODE (op) == CONST_WIDE_INT
3837 || (GET_CODE (op) == CONST_INT
3838 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
3839 {
3840 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
3841 rtx hi = operand_subword (op, 0, 0, dword_mode);
3842 rtx lo = operand_subword (op, 1, 0, dword_mode);
3843 return legitimate_reload_constant_p (hi)
3844 && legitimate_reload_constant_p (lo);
3845 }
3846
3847 /* Everything else cannot be handled without reload. */
3848 return false;
3849 }
3850
3851 /* Returns true if the constant value OP is a legitimate fp operand
3852 during and after reload.
3853 This function accepts all constants which can be loaded directly
3854 into an FPR. */
3855
3856 static bool
3857 legitimate_reload_fp_constant_p (rtx op)
3858 {
3859 /* Accept floating-point zero operands if the load zero instruction
3860 can be used. Prior to z196 the load fp zero instruction caused a
3861 performance penalty if the result is used as BFP number. */
3862 if (TARGET_Z196
3863 && GET_CODE (op) == CONST_DOUBLE
3864 && s390_float_const_zero_p (op))
3865 return true;
3866
3867 return false;
3868 }
3869
3870 /* Returns true if the constant value OP is a legitimate vector operand
3871 during and after reload.
3872 This function accepts all constants which can be loaded directly
3873 into an VR. */
3874
3875 static bool
3876 legitimate_reload_vector_constant_p (rtx op)
3877 {
3878 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
3879 && (satisfies_constraint_j00 (op)
3880 || satisfies_constraint_jm1 (op)
3881 || satisfies_constraint_jKK (op)
3882 || satisfies_constraint_jxx (op)
3883 || satisfies_constraint_jyy (op)))
3884 return true;
3885
3886 return false;
3887 }
3888
3889 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
3890 return the class of reg to actually use. */
3891
3892 static reg_class_t
3893 s390_preferred_reload_class (rtx op, reg_class_t rclass)
3894 {
3895 switch (GET_CODE (op))
3896 {
3897 /* Constants we cannot reload into general registers
3898 must be forced into the literal pool. */
3899 case CONST_VECTOR:
3900 case CONST_DOUBLE:
3901 case CONST_INT:
3902 case CONST_WIDE_INT:
3903 if (reg_class_subset_p (GENERAL_REGS, rclass)
3904 && legitimate_reload_constant_p (op))
3905 return GENERAL_REGS;
3906 else if (reg_class_subset_p (ADDR_REGS, rclass)
3907 && legitimate_reload_constant_p (op))
3908 return ADDR_REGS;
3909 else if (reg_class_subset_p (FP_REGS, rclass)
3910 && legitimate_reload_fp_constant_p (op))
3911 return FP_REGS;
3912 else if (reg_class_subset_p (VEC_REGS, rclass)
3913 && legitimate_reload_vector_constant_p (op))
3914 return VEC_REGS;
3915
3916 return NO_REGS;
3917
3918 /* If a symbolic constant or a PLUS is reloaded,
3919 it is most likely being used as an address, so
3920 prefer ADDR_REGS. If 'class' is not a superset
3921 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
3922 case CONST:
3923 /* Symrefs cannot be pushed into the literal pool with -fPIC
3924 so we *MUST NOT* return NO_REGS for these cases
3925 (s390_cannot_force_const_mem will return true).
3926
3927 On the other hand we MUST return NO_REGS for symrefs with
3928 invalid addend which might have been pushed to the literal
3929 pool (no -fPIC). Usually we would expect them to be
3930 handled via secondary reload but this does not happen if
3931 they are used as literal pool slot replacement in reload
3932 inheritance (see emit_input_reload_insns). */
3933 if (TARGET_CPU_ZARCH
3934 && GET_CODE (XEXP (op, 0)) == PLUS
3935 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
3936 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
3937 {
3938 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
3939 return ADDR_REGS;
3940 else
3941 return NO_REGS;
3942 }
3943 /* fallthrough */
3944 case LABEL_REF:
3945 case SYMBOL_REF:
3946 if (!legitimate_reload_constant_p (op))
3947 return NO_REGS;
3948 /* fallthrough */
3949 case PLUS:
3950 /* load address will be used. */
3951 if (reg_class_subset_p (ADDR_REGS, rclass))
3952 return ADDR_REGS;
3953 else
3954 return NO_REGS;
3955
3956 default:
3957 break;
3958 }
3959
3960 return rclass;
3961 }
3962
3963 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3964 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3965 aligned. */
3966
3967 bool
3968 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3969 {
3970 HOST_WIDE_INT addend;
3971 rtx symref;
3972
3973 /* The "required alignment" might be 0 (e.g. for certain structs
3974 accessed via BLKmode). Early abort in this case, as well as when
3975 an alignment > 8 is required. */
3976 if (alignment < 2 || alignment > 8)
3977 return false;
3978
3979 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3980 return false;
3981
3982 if (addend & (alignment - 1))
3983 return false;
3984
3985 if (GET_CODE (symref) == SYMBOL_REF)
3986 {
3987 /* We have load-relative instructions for 2-byte, 4-byte, and
3988 8-byte alignment so allow only these. */
3989 switch (alignment)
3990 {
3991 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
3992 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
3993 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
3994 default: return false;
3995 }
3996 }
3997
3998 if (GET_CODE (symref) == UNSPEC
3999 && alignment <= UNITS_PER_LONG)
4000 return true;
4001
4002 return false;
4003 }
4004
4005 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4006 operand SCRATCH is used to reload the even part of the address and
4007 adding one. */
4008
4009 void
4010 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4011 {
4012 HOST_WIDE_INT addend;
4013 rtx symref;
4014
4015 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4016 gcc_unreachable ();
4017
4018 if (!(addend & 1))
4019 /* Easy case. The addend is even so larl will do fine. */
4020 emit_move_insn (reg, addr);
4021 else
4022 {
4023 /* We can leave the scratch register untouched if the target
4024 register is a valid base register. */
4025 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4026 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4027 scratch = reg;
4028
4029 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4030 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4031
4032 if (addend != 1)
4033 emit_move_insn (scratch,
4034 gen_rtx_CONST (Pmode,
4035 gen_rtx_PLUS (Pmode, symref,
4036 GEN_INT (addend - 1))));
4037 else
4038 emit_move_insn (scratch, symref);
4039
4040 /* Increment the address using la in order to avoid clobbering cc. */
4041 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4042 }
4043 }
4044
4045 /* Generate what is necessary to move between REG and MEM using
4046 SCRATCH. The direction is given by TOMEM. */
4047
4048 void
4049 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4050 {
4051 /* Reload might have pulled a constant out of the literal pool.
4052 Force it back in. */
4053 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4054 || GET_CODE (mem) == CONST_WIDE_INT
4055 || GET_CODE (mem) == CONST_VECTOR
4056 || GET_CODE (mem) == CONST)
4057 mem = force_const_mem (GET_MODE (reg), mem);
4058
4059 gcc_assert (MEM_P (mem));
4060
4061 /* For a load from memory we can leave the scratch register
4062 untouched if the target register is a valid base register. */
4063 if (!tomem
4064 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4065 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4066 && GET_MODE (reg) == GET_MODE (scratch))
4067 scratch = reg;
4068
4069 /* Load address into scratch register. Since we can't have a
4070 secondary reload for a secondary reload we have to cover the case
4071 where larl would need a secondary reload here as well. */
4072 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4073
4074 /* Now we can use a standard load/store to do the move. */
4075 if (tomem)
4076 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4077 else
4078 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4079 }
4080
4081 /* Inform reload about cases where moving X with a mode MODE to a register in
4082 RCLASS requires an extra scratch or immediate register. Return the class
4083 needed for the immediate register. */
4084
4085 static reg_class_t
4086 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4087 machine_mode mode, secondary_reload_info *sri)
4088 {
4089 enum reg_class rclass = (enum reg_class) rclass_i;
4090
4091 /* Intermediate register needed. */
4092 if (reg_classes_intersect_p (CC_REGS, rclass))
4093 return GENERAL_REGS;
4094
4095 if (TARGET_VX)
4096 {
4097 /* The vst/vl vector move instructions allow only for short
4098 displacements. */
4099 if (MEM_P (x)
4100 && GET_CODE (XEXP (x, 0)) == PLUS
4101 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4102 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4103 && reg_class_subset_p (rclass, VEC_REGS)
4104 && (!reg_class_subset_p (rclass, FP_REGS)
4105 || (GET_MODE_SIZE (mode) > 8
4106 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4107 {
4108 if (in_p)
4109 sri->icode = (TARGET_64BIT ?
4110 CODE_FOR_reloaddi_la_in :
4111 CODE_FOR_reloadsi_la_in);
4112 else
4113 sri->icode = (TARGET_64BIT ?
4114 CODE_FOR_reloaddi_la_out :
4115 CODE_FOR_reloadsi_la_out);
4116 }
4117 }
4118
4119 if (TARGET_Z10)
4120 {
4121 HOST_WIDE_INT offset;
4122 rtx symref;
4123
4124 /* On z10 several optimizer steps may generate larl operands with
4125 an odd addend. */
4126 if (in_p
4127 && s390_loadrelative_operand_p (x, &symref, &offset)
4128 && mode == Pmode
4129 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4130 && (offset & 1) == 1)
4131 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4132 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4133
4134 /* Handle all the (mem (symref)) accesses we cannot use the z10
4135 instructions for. */
4136 if (MEM_P (x)
4137 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4138 && (mode == QImode
4139 || !reg_class_subset_p (rclass, GENERAL_REGS)
4140 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4141 || !s390_check_symref_alignment (XEXP (x, 0),
4142 GET_MODE_SIZE (mode))))
4143 {
4144 #define __SECONDARY_RELOAD_CASE(M,m) \
4145 case M##mode: \
4146 if (TARGET_64BIT) \
4147 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4148 CODE_FOR_reload##m##di_tomem_z10; \
4149 else \
4150 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4151 CODE_FOR_reload##m##si_tomem_z10; \
4152 break;
4153
4154 switch (GET_MODE (x))
4155 {
4156 __SECONDARY_RELOAD_CASE (QI, qi);
4157 __SECONDARY_RELOAD_CASE (HI, hi);
4158 __SECONDARY_RELOAD_CASE (SI, si);
4159 __SECONDARY_RELOAD_CASE (DI, di);
4160 __SECONDARY_RELOAD_CASE (TI, ti);
4161 __SECONDARY_RELOAD_CASE (SF, sf);
4162 __SECONDARY_RELOAD_CASE (DF, df);
4163 __SECONDARY_RELOAD_CASE (TF, tf);
4164 __SECONDARY_RELOAD_CASE (SD, sd);
4165 __SECONDARY_RELOAD_CASE (DD, dd);
4166 __SECONDARY_RELOAD_CASE (TD, td);
4167 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4168 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4169 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4170 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4171 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4172 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4173 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4174 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4175 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4176 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4177 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4178 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4179 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4180 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4181 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4182 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4183 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4184 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4185 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4186 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4187 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4188 default:
4189 gcc_unreachable ();
4190 }
4191 #undef __SECONDARY_RELOAD_CASE
4192 }
4193 }
4194
4195 /* We need a scratch register when loading a PLUS expression which
4196 is not a legitimate operand of the LOAD ADDRESS instruction. */
4197 /* LRA can deal with transformation of plus op very well -- so we
4198 don't need to prompt LRA in this case. */
4199 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4200 sri->icode = (TARGET_64BIT ?
4201 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4202
4203 /* Performing a multiword move from or to memory we have to make sure the
4204 second chunk in memory is addressable without causing a displacement
4205 overflow. If that would be the case we calculate the address in
4206 a scratch register. */
4207 if (MEM_P (x)
4208 && GET_CODE (XEXP (x, 0)) == PLUS
4209 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4210 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4211 + GET_MODE_SIZE (mode) - 1))
4212 {
4213 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4214 in a s_operand address since we may fallback to lm/stm. So we only
4215 have to care about overflows in the b+i+d case. */
4216 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4217 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4218 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4219 /* For FP_REGS no lm/stm is available so this check is triggered
4220 for displacement overflows in b+i+d and b+d like addresses. */
4221 || (reg_classes_intersect_p (FP_REGS, rclass)
4222 && s390_class_max_nregs (FP_REGS, mode) > 1))
4223 {
4224 if (in_p)
4225 sri->icode = (TARGET_64BIT ?
4226 CODE_FOR_reloaddi_la_in :
4227 CODE_FOR_reloadsi_la_in);
4228 else
4229 sri->icode = (TARGET_64BIT ?
4230 CODE_FOR_reloaddi_la_out :
4231 CODE_FOR_reloadsi_la_out);
4232 }
4233 }
4234
4235 /* A scratch address register is needed when a symbolic constant is
4236 copied to r0 compiling with -fPIC. In other cases the target
4237 register might be used as temporary (see legitimize_pic_address). */
4238 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4239 sri->icode = (TARGET_64BIT ?
4240 CODE_FOR_reloaddi_PIC_addr :
4241 CODE_FOR_reloadsi_PIC_addr);
4242
4243 /* Either scratch or no register needed. */
4244 return NO_REGS;
4245 }
4246
4247 /* Generate code to load SRC, which is PLUS that is not a
4248 legitimate operand for the LA instruction, into TARGET.
4249 SCRATCH may be used as scratch register. */
4250
4251 void
4252 s390_expand_plus_operand (rtx target, rtx src,
4253 rtx scratch)
4254 {
4255 rtx sum1, sum2;
4256 struct s390_address ad;
4257
4258 /* src must be a PLUS; get its two operands. */
4259 gcc_assert (GET_CODE (src) == PLUS);
4260 gcc_assert (GET_MODE (src) == Pmode);
4261
4262 /* Check if any of the two operands is already scheduled
4263 for replacement by reload. This can happen e.g. when
4264 float registers occur in an address. */
4265 sum1 = find_replacement (&XEXP (src, 0));
4266 sum2 = find_replacement (&XEXP (src, 1));
4267 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4268
4269 /* If the address is already strictly valid, there's nothing to do. */
4270 if (!s390_decompose_address (src, &ad)
4271 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4272 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4273 {
4274 /* Otherwise, one of the operands cannot be an address register;
4275 we reload its value into the scratch register. */
4276 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4277 {
4278 emit_move_insn (scratch, sum1);
4279 sum1 = scratch;
4280 }
4281 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4282 {
4283 emit_move_insn (scratch, sum2);
4284 sum2 = scratch;
4285 }
4286
4287 /* According to the way these invalid addresses are generated
4288 in reload.c, it should never happen (at least on s390) that
4289 *neither* of the PLUS components, after find_replacements
4290 was applied, is an address register. */
4291 if (sum1 == scratch && sum2 == scratch)
4292 {
4293 debug_rtx (src);
4294 gcc_unreachable ();
4295 }
4296
4297 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4298 }
4299
4300 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4301 is only ever performed on addresses, so we can mark the
4302 sum as legitimate for LA in any case. */
4303 s390_load_address (target, src);
4304 }
4305
4306
4307 /* Return true if ADDR is a valid memory address.
4308 STRICT specifies whether strict register checking applies. */
4309
4310 static bool
4311 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4312 {
4313 struct s390_address ad;
4314
4315 if (TARGET_Z10
4316 && larl_operand (addr, VOIDmode)
4317 && (mode == VOIDmode
4318 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4319 return true;
4320
4321 if (!s390_decompose_address (addr, &ad))
4322 return false;
4323
4324 if (strict)
4325 {
4326 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4327 return false;
4328
4329 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4330 return false;
4331 }
4332 else
4333 {
4334 if (ad.base
4335 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4336 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4337 return false;
4338
4339 if (ad.indx
4340 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4341 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4342 return false;
4343 }
4344 return true;
4345 }
4346
4347 /* Return true if OP is a valid operand for the LA instruction.
4348 In 31-bit, we need to prove that the result is used as an
4349 address, as LA performs only a 31-bit addition. */
4350
4351 bool
4352 legitimate_la_operand_p (rtx op)
4353 {
4354 struct s390_address addr;
4355 if (!s390_decompose_address (op, &addr))
4356 return false;
4357
4358 return (TARGET_64BIT || addr.pointer);
4359 }
4360
4361 /* Return true if it is valid *and* preferable to use LA to
4362 compute the sum of OP1 and OP2. */
4363
4364 bool
4365 preferred_la_operand_p (rtx op1, rtx op2)
4366 {
4367 struct s390_address addr;
4368
4369 if (op2 != const0_rtx)
4370 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4371
4372 if (!s390_decompose_address (op1, &addr))
4373 return false;
4374 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4375 return false;
4376 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4377 return false;
4378
4379 /* Avoid LA instructions with index register on z196; it is
4380 preferable to use regular add instructions when possible.
4381 Starting with zEC12 the la with index register is "uncracked"
4382 again. */
4383 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4384 return false;
4385
4386 if (!TARGET_64BIT && !addr.pointer)
4387 return false;
4388
4389 if (addr.pointer)
4390 return true;
4391
4392 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4393 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4394 return true;
4395
4396 return false;
4397 }
4398
4399 /* Emit a forced load-address operation to load SRC into DST.
4400 This will use the LOAD ADDRESS instruction even in situations
4401 where legitimate_la_operand_p (SRC) returns false. */
4402
4403 void
4404 s390_load_address (rtx dst, rtx src)
4405 {
4406 if (TARGET_64BIT)
4407 emit_move_insn (dst, src);
4408 else
4409 emit_insn (gen_force_la_31 (dst, src));
4410 }
4411
4412 /* Return a legitimate reference for ORIG (an address) using the
4413 register REG. If REG is 0, a new pseudo is generated.
4414
4415 There are two types of references that must be handled:
4416
4417 1. Global data references must load the address from the GOT, via
4418 the PIC reg. An insn is emitted to do this load, and the reg is
4419 returned.
4420
4421 2. Static data references, constant pool addresses, and code labels
4422 compute the address as an offset from the GOT, whose base is in
4423 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4424 differentiate them from global data objects. The returned
4425 address is the PIC reg + an unspec constant.
4426
4427 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4428 reg also appears in the address. */
4429
4430 rtx
4431 legitimize_pic_address (rtx orig, rtx reg)
4432 {
4433 rtx addr = orig;
4434 rtx addend = const0_rtx;
4435 rtx new_rtx = orig;
4436
4437 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4438
4439 if (GET_CODE (addr) == CONST)
4440 addr = XEXP (addr, 0);
4441
4442 if (GET_CODE (addr) == PLUS)
4443 {
4444 addend = XEXP (addr, 1);
4445 addr = XEXP (addr, 0);
4446 }
4447
4448 if ((GET_CODE (addr) == LABEL_REF
4449 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
4450 || (GET_CODE (addr) == UNSPEC &&
4451 (XINT (addr, 1) == UNSPEC_GOTENT
4452 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4453 && GET_CODE (addend) == CONST_INT)
4454 {
4455 /* This can be locally addressed. */
4456
4457 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4458 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4459 gen_rtx_CONST (Pmode, addr) : addr);
4460
4461 if (TARGET_CPU_ZARCH
4462 && larl_operand (const_addr, VOIDmode)
4463 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
4464 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
4465 {
4466 if (INTVAL (addend) & 1)
4467 {
4468 /* LARL can't handle odd offsets, so emit a pair of LARL
4469 and LA. */
4470 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4471
4472 if (!DISP_IN_RANGE (INTVAL (addend)))
4473 {
4474 HOST_WIDE_INT even = INTVAL (addend) - 1;
4475 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4476 addr = gen_rtx_CONST (Pmode, addr);
4477 addend = const1_rtx;
4478 }
4479
4480 emit_move_insn (temp, addr);
4481 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4482
4483 if (reg != 0)
4484 {
4485 s390_load_address (reg, new_rtx);
4486 new_rtx = reg;
4487 }
4488 }
4489 else
4490 {
4491 /* If the offset is even, we can just use LARL. This
4492 will happen automatically. */
4493 }
4494 }
4495 else
4496 {
4497 /* No larl - Access local symbols relative to the GOT. */
4498
4499 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4500
4501 if (reload_in_progress || reload_completed)
4502 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4503
4504 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4505 if (addend != const0_rtx)
4506 addr = gen_rtx_PLUS (Pmode, addr, addend);
4507 addr = gen_rtx_CONST (Pmode, addr);
4508 addr = force_const_mem (Pmode, addr);
4509 emit_move_insn (temp, addr);
4510
4511 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4512 if (reg != 0)
4513 {
4514 s390_load_address (reg, new_rtx);
4515 new_rtx = reg;
4516 }
4517 }
4518 }
4519 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4520 {
4521 /* A non-local symbol reference without addend.
4522
4523 The symbol ref is wrapped into an UNSPEC to make sure the
4524 proper operand modifier (@GOT or @GOTENT) will be emitted.
4525 This will tell the linker to put the symbol into the GOT.
4526
4527 Additionally the code dereferencing the GOT slot is emitted here.
4528
4529 An addend to the symref needs to be added afterwards.
4530 legitimize_pic_address calls itself recursively to handle
4531 that case. So no need to do it here. */
4532
4533 if (reg == 0)
4534 reg = gen_reg_rtx (Pmode);
4535
4536 if (TARGET_Z10)
4537 {
4538 /* Use load relative if possible.
4539 lgrl <target>, sym@GOTENT */
4540 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4541 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4542 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4543
4544 emit_move_insn (reg, new_rtx);
4545 new_rtx = reg;
4546 }
4547 else if (flag_pic == 1)
4548 {
4549 /* Assume GOT offset is a valid displacement operand (< 4k
4550 or < 512k with z990). This is handled the same way in
4551 both 31- and 64-bit code (@GOT).
4552 lg <target>, sym@GOT(r12) */
4553
4554 if (reload_in_progress || reload_completed)
4555 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4556
4557 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4558 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4559 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4560 new_rtx = gen_const_mem (Pmode, new_rtx);
4561 emit_move_insn (reg, new_rtx);
4562 new_rtx = reg;
4563 }
4564 else if (TARGET_CPU_ZARCH)
4565 {
4566 /* If the GOT offset might be >= 4k, we determine the position
4567 of the GOT entry via a PC-relative LARL (@GOTENT).
4568 larl temp, sym@GOTENT
4569 lg <target>, 0(temp) */
4570
4571 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4572
4573 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4574 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4575
4576 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4577 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4578 emit_move_insn (temp, new_rtx);
4579
4580 new_rtx = gen_const_mem (Pmode, temp);
4581 emit_move_insn (reg, new_rtx);
4582
4583 new_rtx = reg;
4584 }
4585 else
4586 {
4587 /* If the GOT offset might be >= 4k, we have to load it
4588 from the literal pool (@GOT).
4589
4590 lg temp, lit-litbase(r13)
4591 lg <target>, 0(temp)
4592 lit: .long sym@GOT */
4593
4594 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4595
4596 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4597 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4598
4599 if (reload_in_progress || reload_completed)
4600 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4601
4602 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4603 addr = gen_rtx_CONST (Pmode, addr);
4604 addr = force_const_mem (Pmode, addr);
4605 emit_move_insn (temp, addr);
4606
4607 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4608 new_rtx = gen_const_mem (Pmode, new_rtx);
4609 emit_move_insn (reg, new_rtx);
4610 new_rtx = reg;
4611 }
4612 }
4613 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4614 {
4615 gcc_assert (XVECLEN (addr, 0) == 1);
4616 switch (XINT (addr, 1))
4617 {
4618 /* These address symbols (or PLT slots) relative to the GOT
4619 (not GOT slots!). In general this will exceed the
4620 displacement range so these value belong into the literal
4621 pool. */
4622 case UNSPEC_GOTOFF:
4623 case UNSPEC_PLTOFF:
4624 new_rtx = force_const_mem (Pmode, orig);
4625 break;
4626
4627 /* For -fPIC the GOT size might exceed the displacement
4628 range so make sure the value is in the literal pool. */
4629 case UNSPEC_GOT:
4630 if (flag_pic == 2)
4631 new_rtx = force_const_mem (Pmode, orig);
4632 break;
4633
4634 /* For @GOTENT larl is used. This is handled like local
4635 symbol refs. */
4636 case UNSPEC_GOTENT:
4637 gcc_unreachable ();
4638 break;
4639
4640 /* @PLT is OK as is on 64-bit, must be converted to
4641 GOT-relative @PLTOFF on 31-bit. */
4642 case UNSPEC_PLT:
4643 if (!TARGET_CPU_ZARCH)
4644 {
4645 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4646
4647 if (reload_in_progress || reload_completed)
4648 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4649
4650 addr = XVECEXP (addr, 0, 0);
4651 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4652 UNSPEC_PLTOFF);
4653 if (addend != const0_rtx)
4654 addr = gen_rtx_PLUS (Pmode, addr, addend);
4655 addr = gen_rtx_CONST (Pmode, addr);
4656 addr = force_const_mem (Pmode, addr);
4657 emit_move_insn (temp, addr);
4658
4659 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4660 if (reg != 0)
4661 {
4662 s390_load_address (reg, new_rtx);
4663 new_rtx = reg;
4664 }
4665 }
4666 else
4667 /* On 64 bit larl can be used. This case is handled like
4668 local symbol refs. */
4669 gcc_unreachable ();
4670 break;
4671
4672 /* Everything else cannot happen. */
4673 default:
4674 gcc_unreachable ();
4675 }
4676 }
4677 else if (addend != const0_rtx)
4678 {
4679 /* Otherwise, compute the sum. */
4680
4681 rtx base = legitimize_pic_address (addr, reg);
4682 new_rtx = legitimize_pic_address (addend,
4683 base == reg ? NULL_RTX : reg);
4684 if (GET_CODE (new_rtx) == CONST_INT)
4685 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4686 else
4687 {
4688 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4689 {
4690 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4691 new_rtx = XEXP (new_rtx, 1);
4692 }
4693 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4694 }
4695
4696 if (GET_CODE (new_rtx) == CONST)
4697 new_rtx = XEXP (new_rtx, 0);
4698 new_rtx = force_operand (new_rtx, 0);
4699 }
4700
4701 return new_rtx;
4702 }
4703
4704 /* Load the thread pointer into a register. */
4705
4706 rtx
4707 s390_get_thread_pointer (void)
4708 {
4709 rtx tp = gen_reg_rtx (Pmode);
4710
4711 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4712 mark_reg_pointer (tp, BITS_PER_WORD);
4713
4714 return tp;
4715 }
4716
4717 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4718 in s390_tls_symbol which always refers to __tls_get_offset.
4719 The returned offset is written to RESULT_REG and an USE rtx is
4720 generated for TLS_CALL. */
4721
4722 static GTY(()) rtx s390_tls_symbol;
4723
4724 static void
4725 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4726 {
4727 rtx insn;
4728
4729 if (!flag_pic)
4730 emit_insn (s390_load_got ());
4731
4732 if (!s390_tls_symbol)
4733 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4734
4735 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4736 gen_rtx_REG (Pmode, RETURN_REGNUM));
4737
4738 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4739 RTL_CONST_CALL_P (insn) = 1;
4740 }
4741
4742 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4743 this (thread-local) address. REG may be used as temporary. */
4744
4745 static rtx
4746 legitimize_tls_address (rtx addr, rtx reg)
4747 {
4748 rtx new_rtx, tls_call, temp, base, r2, insn;
4749
4750 if (GET_CODE (addr) == SYMBOL_REF)
4751 switch (tls_symbolic_operand (addr))
4752 {
4753 case TLS_MODEL_GLOBAL_DYNAMIC:
4754 start_sequence ();
4755 r2 = gen_rtx_REG (Pmode, 2);
4756 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4757 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4758 new_rtx = force_const_mem (Pmode, new_rtx);
4759 emit_move_insn (r2, new_rtx);
4760 s390_emit_tls_call_insn (r2, tls_call);
4761 insn = get_insns ();
4762 end_sequence ();
4763
4764 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4765 temp = gen_reg_rtx (Pmode);
4766 emit_libcall_block (insn, temp, r2, new_rtx);
4767
4768 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4769 if (reg != 0)
4770 {
4771 s390_load_address (reg, new_rtx);
4772 new_rtx = reg;
4773 }
4774 break;
4775
4776 case TLS_MODEL_LOCAL_DYNAMIC:
4777 start_sequence ();
4778 r2 = gen_rtx_REG (Pmode, 2);
4779 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4780 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4781 new_rtx = force_const_mem (Pmode, new_rtx);
4782 emit_move_insn (r2, new_rtx);
4783 s390_emit_tls_call_insn (r2, tls_call);
4784 insn = get_insns ();
4785 end_sequence ();
4786
4787 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4788 temp = gen_reg_rtx (Pmode);
4789 emit_libcall_block (insn, temp, r2, new_rtx);
4790
4791 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4792 base = gen_reg_rtx (Pmode);
4793 s390_load_address (base, new_rtx);
4794
4795 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4796 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4797 new_rtx = force_const_mem (Pmode, new_rtx);
4798 temp = gen_reg_rtx (Pmode);
4799 emit_move_insn (temp, new_rtx);
4800
4801 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4802 if (reg != 0)
4803 {
4804 s390_load_address (reg, new_rtx);
4805 new_rtx = reg;
4806 }
4807 break;
4808
4809 case TLS_MODEL_INITIAL_EXEC:
4810 if (flag_pic == 1)
4811 {
4812 /* Assume GOT offset < 4k. This is handled the same way
4813 in both 31- and 64-bit code. */
4814
4815 if (reload_in_progress || reload_completed)
4816 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4817
4818 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4819 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4820 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4821 new_rtx = gen_const_mem (Pmode, new_rtx);
4822 temp = gen_reg_rtx (Pmode);
4823 emit_move_insn (temp, new_rtx);
4824 }
4825 else if (TARGET_CPU_ZARCH)
4826 {
4827 /* If the GOT offset might be >= 4k, we determine the position
4828 of the GOT entry via a PC-relative LARL. */
4829
4830 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4831 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4832 temp = gen_reg_rtx (Pmode);
4833 emit_move_insn (temp, new_rtx);
4834
4835 new_rtx = gen_const_mem (Pmode, temp);
4836 temp = gen_reg_rtx (Pmode);
4837 emit_move_insn (temp, new_rtx);
4838 }
4839 else if (flag_pic)
4840 {
4841 /* If the GOT offset might be >= 4k, we have to load it
4842 from the literal pool. */
4843
4844 if (reload_in_progress || reload_completed)
4845 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4846
4847 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4848 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4849 new_rtx = force_const_mem (Pmode, new_rtx);
4850 temp = gen_reg_rtx (Pmode);
4851 emit_move_insn (temp, new_rtx);
4852
4853 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4854 new_rtx = gen_const_mem (Pmode, new_rtx);
4855
4856 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4857 temp = gen_reg_rtx (Pmode);
4858 emit_insn (gen_rtx_SET (temp, new_rtx));
4859 }
4860 else
4861 {
4862 /* In position-dependent code, load the absolute address of
4863 the GOT entry from the literal pool. */
4864
4865 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4866 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4867 new_rtx = force_const_mem (Pmode, new_rtx);
4868 temp = gen_reg_rtx (Pmode);
4869 emit_move_insn (temp, new_rtx);
4870
4871 new_rtx = temp;
4872 new_rtx = gen_const_mem (Pmode, new_rtx);
4873 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4874 temp = gen_reg_rtx (Pmode);
4875 emit_insn (gen_rtx_SET (temp, new_rtx));
4876 }
4877
4878 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4879 if (reg != 0)
4880 {
4881 s390_load_address (reg, new_rtx);
4882 new_rtx = reg;
4883 }
4884 break;
4885
4886 case TLS_MODEL_LOCAL_EXEC:
4887 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4888 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4889 new_rtx = force_const_mem (Pmode, new_rtx);
4890 temp = gen_reg_rtx (Pmode);
4891 emit_move_insn (temp, new_rtx);
4892
4893 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4894 if (reg != 0)
4895 {
4896 s390_load_address (reg, new_rtx);
4897 new_rtx = reg;
4898 }
4899 break;
4900
4901 default:
4902 gcc_unreachable ();
4903 }
4904
4905 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
4906 {
4907 switch (XINT (XEXP (addr, 0), 1))
4908 {
4909 case UNSPEC_INDNTPOFF:
4910 gcc_assert (TARGET_CPU_ZARCH);
4911 new_rtx = addr;
4912 break;
4913
4914 default:
4915 gcc_unreachable ();
4916 }
4917 }
4918
4919 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
4920 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
4921 {
4922 new_rtx = XEXP (XEXP (addr, 0), 0);
4923 if (GET_CODE (new_rtx) != SYMBOL_REF)
4924 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4925
4926 new_rtx = legitimize_tls_address (new_rtx, reg);
4927 new_rtx = plus_constant (Pmode, new_rtx,
4928 INTVAL (XEXP (XEXP (addr, 0), 1)));
4929 new_rtx = force_operand (new_rtx, 0);
4930 }
4931
4932 else
4933 gcc_unreachable (); /* for now ... */
4934
4935 return new_rtx;
4936 }
4937
4938 /* Emit insns making the address in operands[1] valid for a standard
4939 move to operands[0]. operands[1] is replaced by an address which
4940 should be used instead of the former RTX to emit the move
4941 pattern. */
4942
4943 void
4944 emit_symbolic_move (rtx *operands)
4945 {
4946 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
4947
4948 if (GET_CODE (operands[0]) == MEM)
4949 operands[1] = force_reg (Pmode, operands[1]);
4950 else if (TLS_SYMBOLIC_CONST (operands[1]))
4951 operands[1] = legitimize_tls_address (operands[1], temp);
4952 else if (flag_pic)
4953 operands[1] = legitimize_pic_address (operands[1], temp);
4954 }
4955
4956 /* Try machine-dependent ways of modifying an illegitimate address X
4957 to be legitimate. If we find one, return the new, valid address.
4958
4959 OLDX is the address as it was before break_out_memory_refs was called.
4960 In some cases it is useful to look at this to decide what needs to be done.
4961
4962 MODE is the mode of the operand pointed to by X.
4963
4964 When -fpic is used, special handling is needed for symbolic references.
4965 See comments by legitimize_pic_address for details. */
4966
4967 static rtx
4968 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4969 machine_mode mode ATTRIBUTE_UNUSED)
4970 {
4971 rtx constant_term = const0_rtx;
4972
4973 if (TLS_SYMBOLIC_CONST (x))
4974 {
4975 x = legitimize_tls_address (x, 0);
4976
4977 if (s390_legitimate_address_p (mode, x, FALSE))
4978 return x;
4979 }
4980 else if (GET_CODE (x) == PLUS
4981 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
4982 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
4983 {
4984 return x;
4985 }
4986 else if (flag_pic)
4987 {
4988 if (SYMBOLIC_CONST (x)
4989 || (GET_CODE (x) == PLUS
4990 && (SYMBOLIC_CONST (XEXP (x, 0))
4991 || SYMBOLIC_CONST (XEXP (x, 1)))))
4992 x = legitimize_pic_address (x, 0);
4993
4994 if (s390_legitimate_address_p (mode, x, FALSE))
4995 return x;
4996 }
4997
4998 x = eliminate_constant_term (x, &constant_term);
4999
5000 /* Optimize loading of large displacements by splitting them
5001 into the multiple of 4K and the rest; this allows the
5002 former to be CSE'd if possible.
5003
5004 Don't do this if the displacement is added to a register
5005 pointing into the stack frame, as the offsets will
5006 change later anyway. */
5007
5008 if (GET_CODE (constant_term) == CONST_INT
5009 && !TARGET_LONG_DISPLACEMENT
5010 && !DISP_IN_RANGE (INTVAL (constant_term))
5011 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5012 {
5013 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5014 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5015
5016 rtx temp = gen_reg_rtx (Pmode);
5017 rtx val = force_operand (GEN_INT (upper), temp);
5018 if (val != temp)
5019 emit_move_insn (temp, val);
5020
5021 x = gen_rtx_PLUS (Pmode, x, temp);
5022 constant_term = GEN_INT (lower);
5023 }
5024
5025 if (GET_CODE (x) == PLUS)
5026 {
5027 if (GET_CODE (XEXP (x, 0)) == REG)
5028 {
5029 rtx temp = gen_reg_rtx (Pmode);
5030 rtx val = force_operand (XEXP (x, 1), temp);
5031 if (val != temp)
5032 emit_move_insn (temp, val);
5033
5034 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5035 }
5036
5037 else if (GET_CODE (XEXP (x, 1)) == REG)
5038 {
5039 rtx temp = gen_reg_rtx (Pmode);
5040 rtx val = force_operand (XEXP (x, 0), temp);
5041 if (val != temp)
5042 emit_move_insn (temp, val);
5043
5044 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5045 }
5046 }
5047
5048 if (constant_term != const0_rtx)
5049 x = gen_rtx_PLUS (Pmode, x, constant_term);
5050
5051 return x;
5052 }
5053
5054 /* Try a machine-dependent way of reloading an illegitimate address AD
5055 operand. If we find one, push the reload and return the new address.
5056
5057 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5058 and TYPE is the reload type of the current reload. */
5059
5060 rtx
5061 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5062 int opnum, int type)
5063 {
5064 if (!optimize || TARGET_LONG_DISPLACEMENT)
5065 return NULL_RTX;
5066
5067 if (GET_CODE (ad) == PLUS)
5068 {
5069 rtx tem = simplify_binary_operation (PLUS, Pmode,
5070 XEXP (ad, 0), XEXP (ad, 1));
5071 if (tem)
5072 ad = tem;
5073 }
5074
5075 if (GET_CODE (ad) == PLUS
5076 && GET_CODE (XEXP (ad, 0)) == REG
5077 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5078 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5079 {
5080 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5081 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5082 rtx cst, tem, new_rtx;
5083
5084 cst = GEN_INT (upper);
5085 if (!legitimate_reload_constant_p (cst))
5086 cst = force_const_mem (Pmode, cst);
5087
5088 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5089 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5090
5091 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5092 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5093 opnum, (enum reload_type) type);
5094 return new_rtx;
5095 }
5096
5097 return NULL_RTX;
5098 }
5099
5100 /* Emit code to move LEN bytes from DST to SRC. */
5101
5102 bool
5103 s390_expand_movmem (rtx dst, rtx src, rtx len)
5104 {
5105 /* When tuning for z10 or higher we rely on the Glibc functions to
5106 do the right thing. Only for constant lengths below 64k we will
5107 generate inline code. */
5108 if (s390_tune >= PROCESSOR_2097_Z10
5109 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5110 return false;
5111
5112 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5113 {
5114 if (INTVAL (len) > 0)
5115 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
5116 }
5117
5118 else if (TARGET_MVCLE)
5119 {
5120 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5121 }
5122
5123 else
5124 {
5125 rtx dst_addr, src_addr, count, blocks, temp;
5126 rtx_code_label *loop_start_label = gen_label_rtx ();
5127 rtx_code_label *loop_end_label = gen_label_rtx ();
5128 rtx_code_label *end_label = gen_label_rtx ();
5129 machine_mode mode;
5130
5131 mode = GET_MODE (len);
5132 if (mode == VOIDmode)
5133 mode = Pmode;
5134
5135 dst_addr = gen_reg_rtx (Pmode);
5136 src_addr = gen_reg_rtx (Pmode);
5137 count = gen_reg_rtx (mode);
5138 blocks = gen_reg_rtx (mode);
5139
5140 convert_move (count, len, 1);
5141 emit_cmp_and_jump_insns (count, const0_rtx,
5142 EQ, NULL_RTX, mode, 1, end_label);
5143
5144 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5145 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5146 dst = change_address (dst, VOIDmode, dst_addr);
5147 src = change_address (src, VOIDmode, src_addr);
5148
5149 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5150 OPTAB_DIRECT);
5151 if (temp != count)
5152 emit_move_insn (count, temp);
5153
5154 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5155 OPTAB_DIRECT);
5156 if (temp != blocks)
5157 emit_move_insn (blocks, temp);
5158
5159 emit_cmp_and_jump_insns (blocks, const0_rtx,
5160 EQ, NULL_RTX, mode, 1, loop_end_label);
5161
5162 emit_label (loop_start_label);
5163
5164 if (TARGET_Z10
5165 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5166 {
5167 rtx prefetch;
5168
5169 /* Issue a read prefetch for the +3 cache line. */
5170 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5171 const0_rtx, const0_rtx);
5172 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5173 emit_insn (prefetch);
5174
5175 /* Issue a write prefetch for the +3 cache line. */
5176 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5177 const1_rtx, const0_rtx);
5178 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5179 emit_insn (prefetch);
5180 }
5181
5182 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5183 s390_load_address (dst_addr,
5184 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5185 s390_load_address (src_addr,
5186 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5187
5188 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5189 OPTAB_DIRECT);
5190 if (temp != blocks)
5191 emit_move_insn (blocks, temp);
5192
5193 emit_cmp_and_jump_insns (blocks, const0_rtx,
5194 EQ, NULL_RTX, mode, 1, loop_end_label);
5195
5196 emit_jump (loop_start_label);
5197 emit_label (loop_end_label);
5198
5199 emit_insn (gen_movmem_short (dst, src,
5200 convert_to_mode (Pmode, count, 1)));
5201 emit_label (end_label);
5202 }
5203 return true;
5204 }
5205
5206 /* Emit code to set LEN bytes at DST to VAL.
5207 Make use of clrmem if VAL is zero. */
5208
5209 void
5210 s390_expand_setmem (rtx dst, rtx len, rtx val)
5211 {
5212 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
5213 return;
5214
5215 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5216
5217 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
5218 {
5219 if (val == const0_rtx && INTVAL (len) <= 256)
5220 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
5221 else
5222 {
5223 /* Initialize memory by storing the first byte. */
5224 emit_move_insn (adjust_address (dst, QImode, 0), val);
5225
5226 if (INTVAL (len) > 1)
5227 {
5228 /* Initiate 1 byte overlap move.
5229 The first byte of DST is propagated through DSTP1.
5230 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
5231 DST is set to size 1 so the rest of the memory location
5232 does not count as source operand. */
5233 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
5234 set_mem_size (dst, 1);
5235
5236 emit_insn (gen_movmem_short (dstp1, dst,
5237 GEN_INT (INTVAL (len) - 2)));
5238 }
5239 }
5240 }
5241
5242 else if (TARGET_MVCLE)
5243 {
5244 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5245 if (TARGET_64BIT)
5246 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5247 val));
5248 else
5249 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5250 val));
5251 }
5252
5253 else
5254 {
5255 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5256 rtx_code_label *loop_start_label = gen_label_rtx ();
5257 rtx_code_label *loop_end_label = gen_label_rtx ();
5258 rtx_code_label *end_label = gen_label_rtx ();
5259 machine_mode mode;
5260
5261 mode = GET_MODE (len);
5262 if (mode == VOIDmode)
5263 mode = Pmode;
5264
5265 dst_addr = gen_reg_rtx (Pmode);
5266 count = gen_reg_rtx (mode);
5267 blocks = gen_reg_rtx (mode);
5268
5269 convert_move (count, len, 1);
5270 emit_cmp_and_jump_insns (count, const0_rtx,
5271 EQ, NULL_RTX, mode, 1, end_label);
5272
5273 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5274 dst = change_address (dst, VOIDmode, dst_addr);
5275
5276 if (val == const0_rtx)
5277 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5278 OPTAB_DIRECT);
5279 else
5280 {
5281 dstp1 = adjust_address (dst, VOIDmode, 1);
5282 set_mem_size (dst, 1);
5283
5284 /* Initialize memory by storing the first byte. */
5285 emit_move_insn (adjust_address (dst, QImode, 0), val);
5286
5287 /* If count is 1 we are done. */
5288 emit_cmp_and_jump_insns (count, const1_rtx,
5289 EQ, NULL_RTX, mode, 1, end_label);
5290
5291 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
5292 OPTAB_DIRECT);
5293 }
5294 if (temp != count)
5295 emit_move_insn (count, temp);
5296
5297 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5298 OPTAB_DIRECT);
5299 if (temp != blocks)
5300 emit_move_insn (blocks, temp);
5301
5302 emit_cmp_and_jump_insns (blocks, const0_rtx,
5303 EQ, NULL_RTX, mode, 1, loop_end_label);
5304
5305 emit_label (loop_start_label);
5306
5307 if (TARGET_Z10
5308 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5309 {
5310 /* Issue a write prefetch for the +4 cache line. */
5311 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5312 GEN_INT (1024)),
5313 const1_rtx, const0_rtx);
5314 emit_insn (prefetch);
5315 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5316 }
5317
5318 if (val == const0_rtx)
5319 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5320 else
5321 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
5322 s390_load_address (dst_addr,
5323 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5324
5325 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5326 OPTAB_DIRECT);
5327 if (temp != blocks)
5328 emit_move_insn (blocks, temp);
5329
5330 emit_cmp_and_jump_insns (blocks, const0_rtx,
5331 EQ, NULL_RTX, mode, 1, loop_end_label);
5332
5333 emit_jump (loop_start_label);
5334 emit_label (loop_end_label);
5335
5336 if (val == const0_rtx)
5337 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5338 else
5339 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
5340 emit_label (end_label);
5341 }
5342 }
5343
5344 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5345 and return the result in TARGET. */
5346
5347 bool
5348 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5349 {
5350 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5351 rtx tmp;
5352
5353 /* When tuning for z10 or higher we rely on the Glibc functions to
5354 do the right thing. Only for constant lengths below 64k we will
5355 generate inline code. */
5356 if (s390_tune >= PROCESSOR_2097_Z10
5357 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5358 return false;
5359
5360 /* As the result of CMPINT is inverted compared to what we need,
5361 we have to swap the operands. */
5362 tmp = op0; op0 = op1; op1 = tmp;
5363
5364 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5365 {
5366 if (INTVAL (len) > 0)
5367 {
5368 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5369 emit_insn (gen_cmpint (target, ccreg));
5370 }
5371 else
5372 emit_move_insn (target, const0_rtx);
5373 }
5374 else if (TARGET_MVCLE)
5375 {
5376 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5377 emit_insn (gen_cmpint (target, ccreg));
5378 }
5379 else
5380 {
5381 rtx addr0, addr1, count, blocks, temp;
5382 rtx_code_label *loop_start_label = gen_label_rtx ();
5383 rtx_code_label *loop_end_label = gen_label_rtx ();
5384 rtx_code_label *end_label = gen_label_rtx ();
5385 machine_mode mode;
5386
5387 mode = GET_MODE (len);
5388 if (mode == VOIDmode)
5389 mode = Pmode;
5390
5391 addr0 = gen_reg_rtx (Pmode);
5392 addr1 = gen_reg_rtx (Pmode);
5393 count = gen_reg_rtx (mode);
5394 blocks = gen_reg_rtx (mode);
5395
5396 convert_move (count, len, 1);
5397 emit_cmp_and_jump_insns (count, const0_rtx,
5398 EQ, NULL_RTX, mode, 1, end_label);
5399
5400 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5401 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5402 op0 = change_address (op0, VOIDmode, addr0);
5403 op1 = change_address (op1, VOIDmode, addr1);
5404
5405 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5406 OPTAB_DIRECT);
5407 if (temp != count)
5408 emit_move_insn (count, temp);
5409
5410 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5411 OPTAB_DIRECT);
5412 if (temp != blocks)
5413 emit_move_insn (blocks, temp);
5414
5415 emit_cmp_and_jump_insns (blocks, const0_rtx,
5416 EQ, NULL_RTX, mode, 1, loop_end_label);
5417
5418 emit_label (loop_start_label);
5419
5420 if (TARGET_Z10
5421 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5422 {
5423 rtx prefetch;
5424
5425 /* Issue a read prefetch for the +2 cache line of operand 1. */
5426 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5427 const0_rtx, const0_rtx);
5428 emit_insn (prefetch);
5429 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5430
5431 /* Issue a read prefetch for the +2 cache line of operand 2. */
5432 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5433 const0_rtx, const0_rtx);
5434 emit_insn (prefetch);
5435 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5436 }
5437
5438 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5439 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5440 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5441 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5442 temp = gen_rtx_SET (pc_rtx, temp);
5443 emit_jump_insn (temp);
5444
5445 s390_load_address (addr0,
5446 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5447 s390_load_address (addr1,
5448 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5449
5450 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5451 OPTAB_DIRECT);
5452 if (temp != blocks)
5453 emit_move_insn (blocks, temp);
5454
5455 emit_cmp_and_jump_insns (blocks, const0_rtx,
5456 EQ, NULL_RTX, mode, 1, loop_end_label);
5457
5458 emit_jump (loop_start_label);
5459 emit_label (loop_end_label);
5460
5461 emit_insn (gen_cmpmem_short (op0, op1,
5462 convert_to_mode (Pmode, count, 1)));
5463 emit_label (end_label);
5464
5465 emit_insn (gen_cmpint (target, ccreg));
5466 }
5467 return true;
5468 }
5469
5470 /* Emit a conditional jump to LABEL for condition code mask MASK using
5471 comparsion operator COMPARISON. Return the emitted jump insn. */
5472
5473 static rtx
5474 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5475 {
5476 rtx temp;
5477
5478 gcc_assert (comparison == EQ || comparison == NE);
5479 gcc_assert (mask > 0 && mask < 15);
5480
5481 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5482 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5483 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5484 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5485 temp = gen_rtx_SET (pc_rtx, temp);
5486 return emit_jump_insn (temp);
5487 }
5488
5489 /* Emit the instructions to implement strlen of STRING and store the
5490 result in TARGET. The string has the known ALIGNMENT. This
5491 version uses vector instructions and is therefore not appropriate
5492 for targets prior to z13. */
5493
5494 void
5495 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5496 {
5497 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5498 int very_likely = REG_BR_PROB_BASE - 1;
5499 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5500 rtx str_reg = gen_reg_rtx (V16QImode);
5501 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5502 rtx str_idx_reg = gen_reg_rtx (Pmode);
5503 rtx result_reg = gen_reg_rtx (V16QImode);
5504 rtx is_aligned_label = gen_label_rtx ();
5505 rtx into_loop_label = NULL_RTX;
5506 rtx loop_start_label = gen_label_rtx ();
5507 rtx temp;
5508 rtx len = gen_reg_rtx (QImode);
5509 rtx cond;
5510
5511 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5512 emit_move_insn (str_idx_reg, const0_rtx);
5513
5514 if (INTVAL (alignment) < 16)
5515 {
5516 /* Check whether the address happens to be aligned properly so
5517 jump directly to the aligned loop. */
5518 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5519 str_addr_base_reg, GEN_INT (15)),
5520 const0_rtx, EQ, NULL_RTX,
5521 Pmode, 1, is_aligned_label);
5522
5523 temp = gen_reg_rtx (Pmode);
5524 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5525 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5526 gcc_assert (REG_P (temp));
5527 highest_index_to_load_reg =
5528 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5529 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5530 gcc_assert (REG_P (highest_index_to_load_reg));
5531 emit_insn (gen_vllv16qi (str_reg,
5532 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5533 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5534
5535 into_loop_label = gen_label_rtx ();
5536 s390_emit_jump (into_loop_label, NULL_RTX);
5537 emit_barrier ();
5538 }
5539
5540 emit_label (is_aligned_label);
5541 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5542
5543 /* Reaching this point we are only performing 16 bytes aligned
5544 loads. */
5545 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5546
5547 emit_label (loop_start_label);
5548 LABEL_NUSES (loop_start_label) = 1;
5549
5550 /* Load 16 bytes of the string into VR. */
5551 emit_move_insn (str_reg,
5552 gen_rtx_MEM (V16QImode,
5553 gen_rtx_PLUS (Pmode, str_idx_reg,
5554 str_addr_base_reg)));
5555 if (into_loop_label != NULL_RTX)
5556 {
5557 emit_label (into_loop_label);
5558 LABEL_NUSES (into_loop_label) = 1;
5559 }
5560
5561 /* Increment string index by 16 bytes. */
5562 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5563 str_idx_reg, 1, OPTAB_DIRECT);
5564
5565 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5566 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5567
5568 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5569 REG_BR_PROB, very_likely);
5570 emit_insn (gen_vec_extractv16qi (len, result_reg, GEN_INT (7)));
5571
5572 /* If the string pointer wasn't aligned we have loaded less then 16
5573 bytes and the remaining bytes got filled with zeros (by vll).
5574 Now we have to check whether the resulting index lies within the
5575 bytes actually part of the string. */
5576
5577 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5578 highest_index_to_load_reg);
5579 s390_load_address (highest_index_to_load_reg,
5580 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5581 const1_rtx));
5582 if (TARGET_64BIT)
5583 emit_insn (gen_movdicc (str_idx_reg, cond,
5584 highest_index_to_load_reg, str_idx_reg));
5585 else
5586 emit_insn (gen_movsicc (str_idx_reg, cond,
5587 highest_index_to_load_reg, str_idx_reg));
5588
5589 add_int_reg_note (s390_emit_jump (is_aligned_label, cond), REG_BR_PROB,
5590 very_unlikely);
5591
5592 expand_binop (Pmode, add_optab, str_idx_reg,
5593 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5594 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5595 here. */
5596 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5597 convert_to_mode (Pmode, len, 1),
5598 target, 1, OPTAB_DIRECT);
5599 if (temp != target)
5600 emit_move_insn (target, temp);
5601 }
5602
5603 /* Expand conditional increment or decrement using alc/slb instructions.
5604 Should generate code setting DST to either SRC or SRC + INCREMENT,
5605 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5606 Returns true if successful, false otherwise.
5607
5608 That makes it possible to implement some if-constructs without jumps e.g.:
5609 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5610 unsigned int a, b, c;
5611 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5612 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5613 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5614 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5615
5616 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5617 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5618 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5619 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5620 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5621
5622 bool
5623 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5624 rtx dst, rtx src, rtx increment)
5625 {
5626 machine_mode cmp_mode;
5627 machine_mode cc_mode;
5628 rtx op_res;
5629 rtx insn;
5630 rtvec p;
5631 int ret;
5632
5633 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5634 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5635 cmp_mode = SImode;
5636 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5637 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5638 cmp_mode = DImode;
5639 else
5640 return false;
5641
5642 /* Try ADD LOGICAL WITH CARRY. */
5643 if (increment == const1_rtx)
5644 {
5645 /* Determine CC mode to use. */
5646 if (cmp_code == EQ || cmp_code == NE)
5647 {
5648 if (cmp_op1 != const0_rtx)
5649 {
5650 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5651 NULL_RTX, 0, OPTAB_WIDEN);
5652 cmp_op1 = const0_rtx;
5653 }
5654
5655 cmp_code = cmp_code == EQ ? LEU : GTU;
5656 }
5657
5658 if (cmp_code == LTU || cmp_code == LEU)
5659 {
5660 rtx tem = cmp_op0;
5661 cmp_op0 = cmp_op1;
5662 cmp_op1 = tem;
5663 cmp_code = swap_condition (cmp_code);
5664 }
5665
5666 switch (cmp_code)
5667 {
5668 case GTU:
5669 cc_mode = CCUmode;
5670 break;
5671
5672 case GEU:
5673 cc_mode = CCL3mode;
5674 break;
5675
5676 default:
5677 return false;
5678 }
5679
5680 /* Emit comparison instruction pattern. */
5681 if (!register_operand (cmp_op0, cmp_mode))
5682 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5683
5684 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5685 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5686 /* We use insn_invalid_p here to add clobbers if required. */
5687 ret = insn_invalid_p (emit_insn (insn), false);
5688 gcc_assert (!ret);
5689
5690 /* Emit ALC instruction pattern. */
5691 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5692 gen_rtx_REG (cc_mode, CC_REGNUM),
5693 const0_rtx);
5694
5695 if (src != const0_rtx)
5696 {
5697 if (!register_operand (src, GET_MODE (dst)))
5698 src = force_reg (GET_MODE (dst), src);
5699
5700 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
5701 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
5702 }
5703
5704 p = rtvec_alloc (2);
5705 RTVEC_ELT (p, 0) =
5706 gen_rtx_SET (dst, op_res);
5707 RTVEC_ELT (p, 1) =
5708 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5709 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5710
5711 return true;
5712 }
5713
5714 /* Try SUBTRACT LOGICAL WITH BORROW. */
5715 if (increment == constm1_rtx)
5716 {
5717 /* Determine CC mode to use. */
5718 if (cmp_code == EQ || cmp_code == NE)
5719 {
5720 if (cmp_op1 != const0_rtx)
5721 {
5722 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5723 NULL_RTX, 0, OPTAB_WIDEN);
5724 cmp_op1 = const0_rtx;
5725 }
5726
5727 cmp_code = cmp_code == EQ ? LEU : GTU;
5728 }
5729
5730 if (cmp_code == GTU || cmp_code == GEU)
5731 {
5732 rtx tem = cmp_op0;
5733 cmp_op0 = cmp_op1;
5734 cmp_op1 = tem;
5735 cmp_code = swap_condition (cmp_code);
5736 }
5737
5738 switch (cmp_code)
5739 {
5740 case LEU:
5741 cc_mode = CCUmode;
5742 break;
5743
5744 case LTU:
5745 cc_mode = CCL3mode;
5746 break;
5747
5748 default:
5749 return false;
5750 }
5751
5752 /* Emit comparison instruction pattern. */
5753 if (!register_operand (cmp_op0, cmp_mode))
5754 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5755
5756 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5757 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5758 /* We use insn_invalid_p here to add clobbers if required. */
5759 ret = insn_invalid_p (emit_insn (insn), false);
5760 gcc_assert (!ret);
5761
5762 /* Emit SLB instruction pattern. */
5763 if (!register_operand (src, GET_MODE (dst)))
5764 src = force_reg (GET_MODE (dst), src);
5765
5766 op_res = gen_rtx_MINUS (GET_MODE (dst),
5767 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
5768 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5769 gen_rtx_REG (cc_mode, CC_REGNUM),
5770 const0_rtx));
5771 p = rtvec_alloc (2);
5772 RTVEC_ELT (p, 0) =
5773 gen_rtx_SET (dst, op_res);
5774 RTVEC_ELT (p, 1) =
5775 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5776 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5777
5778 return true;
5779 }
5780
5781 return false;
5782 }
5783
5784 /* Expand code for the insv template. Return true if successful. */
5785
5786 bool
5787 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
5788 {
5789 int bitsize = INTVAL (op1);
5790 int bitpos = INTVAL (op2);
5791 machine_mode mode = GET_MODE (dest);
5792 machine_mode smode;
5793 int smode_bsize, mode_bsize;
5794 rtx op, clobber;
5795
5796 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
5797 return false;
5798
5799 /* Generate INSERT IMMEDIATE (IILL et al). */
5800 /* (set (ze (reg)) (const_int)). */
5801 if (TARGET_ZARCH
5802 && register_operand (dest, word_mode)
5803 && (bitpos % 16) == 0
5804 && (bitsize % 16) == 0
5805 && const_int_operand (src, VOIDmode))
5806 {
5807 HOST_WIDE_INT val = INTVAL (src);
5808 int regpos = bitpos + bitsize;
5809
5810 while (regpos > bitpos)
5811 {
5812 machine_mode putmode;
5813 int putsize;
5814
5815 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
5816 putmode = SImode;
5817 else
5818 putmode = HImode;
5819
5820 putsize = GET_MODE_BITSIZE (putmode);
5821 regpos -= putsize;
5822 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
5823 GEN_INT (putsize),
5824 GEN_INT (regpos)),
5825 gen_int_mode (val, putmode));
5826 val >>= putsize;
5827 }
5828 gcc_assert (regpos == bitpos);
5829 return true;
5830 }
5831
5832 smode = smallest_mode_for_size (bitsize, MODE_INT);
5833 smode_bsize = GET_MODE_BITSIZE (smode);
5834 mode_bsize = GET_MODE_BITSIZE (mode);
5835
5836 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
5837 if (bitpos == 0
5838 && (bitsize % BITS_PER_UNIT) == 0
5839 && MEM_P (dest)
5840 && (register_operand (src, word_mode)
5841 || const_int_operand (src, VOIDmode)))
5842 {
5843 /* Emit standard pattern if possible. */
5844 if (smode_bsize == bitsize)
5845 {
5846 emit_move_insn (adjust_address (dest, smode, 0),
5847 gen_lowpart (smode, src));
5848 return true;
5849 }
5850
5851 /* (set (ze (mem)) (const_int)). */
5852 else if (const_int_operand (src, VOIDmode))
5853 {
5854 int size = bitsize / BITS_PER_UNIT;
5855 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
5856 BLKmode,
5857 UNITS_PER_WORD - size);
5858
5859 dest = adjust_address (dest, BLKmode, 0);
5860 set_mem_size (dest, size);
5861 s390_expand_movmem (dest, src_mem, GEN_INT (size));
5862 return true;
5863 }
5864
5865 /* (set (ze (mem)) (reg)). */
5866 else if (register_operand (src, word_mode))
5867 {
5868 if (bitsize <= 32)
5869 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
5870 const0_rtx), src);
5871 else
5872 {
5873 /* Emit st,stcmh sequence. */
5874 int stcmh_width = bitsize - 32;
5875 int size = stcmh_width / BITS_PER_UNIT;
5876
5877 emit_move_insn (adjust_address (dest, SImode, size),
5878 gen_lowpart (SImode, src));
5879 set_mem_size (dest, size);
5880 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
5881 GEN_INT (stcmh_width),
5882 const0_rtx),
5883 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
5884 }
5885 return true;
5886 }
5887 }
5888
5889 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
5890 if ((bitpos % BITS_PER_UNIT) == 0
5891 && (bitsize % BITS_PER_UNIT) == 0
5892 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
5893 && MEM_P (src)
5894 && (mode == DImode || mode == SImode)
5895 && register_operand (dest, mode))
5896 {
5897 /* Emit a strict_low_part pattern if possible. */
5898 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
5899 {
5900 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
5901 op = gen_rtx_SET (op, gen_lowpart (smode, src));
5902 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5903 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
5904 return true;
5905 }
5906
5907 /* ??? There are more powerful versions of ICM that are not
5908 completely represented in the md file. */
5909 }
5910
5911 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
5912 if (TARGET_Z10 && (mode == DImode || mode == SImode))
5913 {
5914 machine_mode mode_s = GET_MODE (src);
5915
5916 if (mode_s == VOIDmode)
5917 {
5918 /* For constant zero values the representation with AND
5919 appears to be folded in more situations than the (set
5920 (zero_extract) ...).
5921 We only do this when the start and end of the bitfield
5922 remain in the same SImode chunk. That way nihf or nilf
5923 can be used.
5924 The AND patterns might still generate a risbg for this. */
5925 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
5926 return false;
5927 else
5928 src = force_reg (mode, src);
5929 }
5930 else if (mode_s != mode)
5931 {
5932 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
5933 src = force_reg (mode_s, src);
5934 src = gen_lowpart (mode, src);
5935 }
5936
5937 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
5938 op = gen_rtx_SET (op, src);
5939
5940 if (!TARGET_ZEC12)
5941 {
5942 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5943 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
5944 }
5945 emit_insn (op);
5946
5947 return true;
5948 }
5949
5950 return false;
5951 }
5952
5953 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
5954 register that holds VAL of mode MODE shifted by COUNT bits. */
5955
5956 static inline rtx
5957 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
5958 {
5959 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
5960 NULL_RTX, 1, OPTAB_DIRECT);
5961 return expand_simple_binop (SImode, ASHIFT, val, count,
5962 NULL_RTX, 1, OPTAB_DIRECT);
5963 }
5964
5965 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
5966 the result in TARGET. */
5967
5968 void
5969 s390_expand_vec_compare (rtx target, enum rtx_code cond,
5970 rtx cmp_op1, rtx cmp_op2)
5971 {
5972 machine_mode mode = GET_MODE (target);
5973 bool neg_p = false, swap_p = false;
5974 rtx tmp;
5975
5976 if (GET_MODE (cmp_op1) == V2DFmode)
5977 {
5978 switch (cond)
5979 {
5980 /* NE a != b -> !(a == b) */
5981 case NE: cond = EQ; neg_p = true; break;
5982 /* UNGT a u> b -> !(b >= a) */
5983 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
5984 /* UNGE a u>= b -> !(b > a) */
5985 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
5986 /* LE: a <= b -> b >= a */
5987 case LE: cond = GE; swap_p = true; break;
5988 /* UNLE: a u<= b -> !(a > b) */
5989 case UNLE: cond = GT; neg_p = true; break;
5990 /* LT: a < b -> b > a */
5991 case LT: cond = GT; swap_p = true; break;
5992 /* UNLT: a u< b -> !(a >= b) */
5993 case UNLT: cond = GE; neg_p = true; break;
5994 case UNEQ:
5995 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
5996 return;
5997 case LTGT:
5998 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
5999 return;
6000 case ORDERED:
6001 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
6002 return;
6003 case UNORDERED:
6004 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
6005 return;
6006 default: break;
6007 }
6008 }
6009 else
6010 {
6011 switch (cond)
6012 {
6013 /* NE: a != b -> !(a == b) */
6014 case NE: cond = EQ; neg_p = true; break;
6015 /* GE: a >= b -> !(b > a) */
6016 case GE: cond = GT; neg_p = true; swap_p = true; break;
6017 /* GEU: a >= b -> !(b > a) */
6018 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6019 /* LE: a <= b -> !(a > b) */
6020 case LE: cond = GT; neg_p = true; break;
6021 /* LEU: a <= b -> !(a > b) */
6022 case LEU: cond = GTU; neg_p = true; break;
6023 /* LT: a < b -> b > a */
6024 case LT: cond = GT; swap_p = true; break;
6025 /* LTU: a < b -> b > a */
6026 case LTU: cond = GTU; swap_p = true; break;
6027 default: break;
6028 }
6029 }
6030
6031 if (swap_p)
6032 {
6033 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6034 }
6035
6036 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6037 mode,
6038 cmp_op1, cmp_op2)));
6039 if (neg_p)
6040 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6041 }
6042
6043 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6044 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6045 elements in CMP1 and CMP2 fulfill the comparison. */
6046 void
6047 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6048 rtx cmp1, rtx cmp2, bool all_p)
6049 {
6050 enum rtx_code new_code = code;
6051 machine_mode cmp_mode, full_cmp_mode, scratch_mode;
6052 rtx tmp_reg = gen_reg_rtx (SImode);
6053 bool swap_p = false;
6054
6055 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6056 {
6057 switch (code)
6058 {
6059 case EQ: cmp_mode = CCVEQmode; break;
6060 case NE: cmp_mode = CCVEQmode; break;
6061 case GT: cmp_mode = CCVHmode; break;
6062 case GE: cmp_mode = CCVHmode; new_code = LE; swap_p = true; break;
6063 case LT: cmp_mode = CCVHmode; new_code = GT; swap_p = true; break;
6064 case LE: cmp_mode = CCVHmode; new_code = LE; break;
6065 case GTU: cmp_mode = CCVHUmode; break;
6066 case GEU: cmp_mode = CCVHUmode; new_code = LEU; swap_p = true; break;
6067 case LTU: cmp_mode = CCVHUmode; new_code = GTU; swap_p = true; break;
6068 case LEU: cmp_mode = CCVHUmode; new_code = LEU; break;
6069 default: gcc_unreachable ();
6070 }
6071 scratch_mode = GET_MODE (cmp1);
6072 }
6073 else if (GET_MODE (cmp1) == V2DFmode)
6074 {
6075 switch (code)
6076 {
6077 case EQ: cmp_mode = CCVEQmode; break;
6078 case NE: cmp_mode = CCVEQmode; break;
6079 case GT: cmp_mode = CCVFHmode; break;
6080 case GE: cmp_mode = CCVFHEmode; break;
6081 case UNLE: cmp_mode = CCVFHmode; break;
6082 case UNLT: cmp_mode = CCVFHEmode; break;
6083 case LT: cmp_mode = CCVFHmode; new_code = GT; swap_p = true; break;
6084 case LE: cmp_mode = CCVFHEmode; new_code = GE; swap_p = true; break;
6085 default: gcc_unreachable ();
6086 }
6087 scratch_mode = V2DImode;
6088 }
6089 else
6090 gcc_unreachable ();
6091
6092 if (!all_p)
6093 switch (cmp_mode)
6094 {
6095 case CCVEQmode: full_cmp_mode = CCVEQANYmode; break;
6096 case CCVHmode: full_cmp_mode = CCVHANYmode; break;
6097 case CCVHUmode: full_cmp_mode = CCVHUANYmode; break;
6098 case CCVFHmode: full_cmp_mode = CCVFHANYmode; break;
6099 case CCVFHEmode: full_cmp_mode = CCVFHEANYmode; break;
6100 default: gcc_unreachable ();
6101 }
6102 else
6103 /* The modes without ANY match the ALL modes. */
6104 full_cmp_mode = cmp_mode;
6105
6106 if (swap_p)
6107 {
6108 rtx tmp = cmp2;
6109 cmp2 = cmp1;
6110 cmp1 = tmp;
6111 }
6112
6113 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6114 gen_rtvec (2, gen_rtx_SET (
6115 gen_rtx_REG (cmp_mode, CC_REGNUM),
6116 gen_rtx_COMPARE (cmp_mode, cmp1, cmp2)),
6117 gen_rtx_CLOBBER (VOIDmode,
6118 gen_rtx_SCRATCH (scratch_mode)))));
6119 emit_move_insn (target, const0_rtx);
6120 emit_move_insn (tmp_reg, const1_rtx);
6121
6122 emit_move_insn (target,
6123 gen_rtx_IF_THEN_ELSE (SImode,
6124 gen_rtx_fmt_ee (new_code, VOIDmode,
6125 gen_rtx_REG (full_cmp_mode, CC_REGNUM),
6126 const0_rtx),
6127 target, tmp_reg));
6128 }
6129
6130 /* Generate a vector comparison expression loading either elements of
6131 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6132 and CMP_OP2. */
6133
6134 void
6135 s390_expand_vcond (rtx target, rtx then, rtx els,
6136 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6137 {
6138 rtx tmp;
6139 machine_mode result_mode;
6140 rtx result_target;
6141
6142 machine_mode target_mode = GET_MODE (target);
6143 machine_mode cmp_mode = GET_MODE (cmp_op1);
6144 rtx op = (cond == LT) ? els : then;
6145
6146 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6147 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6148 for short and byte (x >> 15 and x >> 7 respectively). */
6149 if ((cond == LT || cond == GE)
6150 && target_mode == cmp_mode
6151 && cmp_op2 == CONST0_RTX (cmp_mode)
6152 && op == CONST0_RTX (target_mode)
6153 && s390_vector_mode_supported_p (target_mode)
6154 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6155 {
6156 rtx negop = (cond == LT) ? then : els;
6157
6158 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6159
6160 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6161 if (negop == CONST1_RTX (target_mode))
6162 {
6163 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6164 GEN_INT (shift), target,
6165 1, OPTAB_DIRECT);
6166 if (res != target)
6167 emit_move_insn (target, res);
6168 return;
6169 }
6170
6171 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6172 else if (all_ones_operand (negop, target_mode))
6173 {
6174 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6175 GEN_INT (shift), target,
6176 0, OPTAB_DIRECT);
6177 if (res != target)
6178 emit_move_insn (target, res);
6179 return;
6180 }
6181 }
6182
6183 /* We always use an integral type vector to hold the comparison
6184 result. */
6185 result_mode = cmp_mode == V2DFmode ? V2DImode : cmp_mode;
6186 result_target = gen_reg_rtx (result_mode);
6187
6188 /* We allow vector immediates as comparison operands that
6189 can be handled by the optimization above but not by the
6190 following code. Hence, force them into registers here. */
6191 if (!REG_P (cmp_op1))
6192 cmp_op1 = force_reg (target_mode, cmp_op1);
6193
6194 if (!REG_P (cmp_op2))
6195 cmp_op2 = force_reg (target_mode, cmp_op2);
6196
6197 s390_expand_vec_compare (result_target, cond,
6198 cmp_op1, cmp_op2);
6199
6200 /* If the results are supposed to be either -1 or 0 we are done
6201 since this is what our compare instructions generate anyway. */
6202 if (all_ones_operand (then, GET_MODE (then))
6203 && const0_operand (els, GET_MODE (els)))
6204 {
6205 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6206 result_target, 0));
6207 return;
6208 }
6209
6210 /* Otherwise we will do a vsel afterwards. */
6211 /* This gets triggered e.g.
6212 with gcc.c-torture/compile/pr53410-1.c */
6213 if (!REG_P (then))
6214 then = force_reg (target_mode, then);
6215
6216 if (!REG_P (els))
6217 els = force_reg (target_mode, els);
6218
6219 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6220 result_target,
6221 CONST0_RTX (result_mode));
6222
6223 /* We compared the result against zero above so we have to swap then
6224 and els here. */
6225 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6226
6227 gcc_assert (target_mode == GET_MODE (then));
6228 emit_insn (gen_rtx_SET (target, tmp));
6229 }
6230
6231 /* Emit the RTX necessary to initialize the vector TARGET with values
6232 in VALS. */
6233 void
6234 s390_expand_vec_init (rtx target, rtx vals)
6235 {
6236 machine_mode mode = GET_MODE (target);
6237 machine_mode inner_mode = GET_MODE_INNER (mode);
6238 int n_elts = GET_MODE_NUNITS (mode);
6239 bool all_same = true, all_regs = true, all_const_int = true;
6240 rtx x;
6241 int i;
6242
6243 for (i = 0; i < n_elts; ++i)
6244 {
6245 x = XVECEXP (vals, 0, i);
6246
6247 if (!CONST_INT_P (x))
6248 all_const_int = false;
6249
6250 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6251 all_same = false;
6252
6253 if (!REG_P (x))
6254 all_regs = false;
6255 }
6256
6257 /* Use vector gen mask or vector gen byte mask if possible. */
6258 if (all_same && all_const_int
6259 && (XVECEXP (vals, 0, 0) == const0_rtx
6260 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6261 NULL, NULL)
6262 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6263 {
6264 emit_insn (gen_rtx_SET (target,
6265 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6266 return;
6267 }
6268
6269 if (all_same)
6270 {
6271 emit_insn (gen_rtx_SET (target,
6272 gen_rtx_VEC_DUPLICATE (mode,
6273 XVECEXP (vals, 0, 0))));
6274 return;
6275 }
6276
6277 if (all_regs && REG_P (target) && n_elts == 2 && inner_mode == DImode)
6278 {
6279 /* Use vector load pair. */
6280 emit_insn (gen_rtx_SET (target,
6281 gen_rtx_VEC_CONCAT (mode,
6282 XVECEXP (vals, 0, 0),
6283 XVECEXP (vals, 0, 1))));
6284 return;
6285 }
6286
6287 /* We are about to set the vector elements one by one. Zero out the
6288 full register first in order to help the data flow framework to
6289 detect it as full VR set. */
6290 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6291
6292 /* Unfortunately the vec_init expander is not allowed to fail. So
6293 we have to implement the fallback ourselves. */
6294 for (i = 0; i < n_elts; i++)
6295 emit_insn (gen_rtx_SET (target,
6296 gen_rtx_UNSPEC (mode,
6297 gen_rtvec (3, XVECEXP (vals, 0, i),
6298 GEN_INT (i), target),
6299 UNSPEC_VEC_SET)));
6300 }
6301
6302 /* Structure to hold the initial parameters for a compare_and_swap operation
6303 in HImode and QImode. */
6304
6305 struct alignment_context
6306 {
6307 rtx memsi; /* SI aligned memory location. */
6308 rtx shift; /* Bit offset with regard to lsb. */
6309 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6310 rtx modemaski; /* ~modemask */
6311 bool aligned; /* True if memory is aligned, false else. */
6312 };
6313
6314 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6315 structure AC for transparent simplifying, if the memory alignment is known
6316 to be at least 32bit. MEM is the memory location for the actual operation
6317 and MODE its mode. */
6318
6319 static void
6320 init_alignment_context (struct alignment_context *ac, rtx mem,
6321 machine_mode mode)
6322 {
6323 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6324 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6325
6326 if (ac->aligned)
6327 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6328 else
6329 {
6330 /* Alignment is unknown. */
6331 rtx byteoffset, addr, align;
6332
6333 /* Force the address into a register. */
6334 addr = force_reg (Pmode, XEXP (mem, 0));
6335
6336 /* Align it to SImode. */
6337 align = expand_simple_binop (Pmode, AND, addr,
6338 GEN_INT (-GET_MODE_SIZE (SImode)),
6339 NULL_RTX, 1, OPTAB_DIRECT);
6340 /* Generate MEM. */
6341 ac->memsi = gen_rtx_MEM (SImode, align);
6342 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6343 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6344 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6345
6346 /* Calculate shiftcount. */
6347 byteoffset = expand_simple_binop (Pmode, AND, addr,
6348 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6349 NULL_RTX, 1, OPTAB_DIRECT);
6350 /* As we already have some offset, evaluate the remaining distance. */
6351 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6352 NULL_RTX, 1, OPTAB_DIRECT);
6353 }
6354
6355 /* Shift is the byte count, but we need the bitcount. */
6356 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6357 NULL_RTX, 1, OPTAB_DIRECT);
6358
6359 /* Calculate masks. */
6360 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6361 GEN_INT (GET_MODE_MASK (mode)),
6362 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6363 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6364 NULL_RTX, 1);
6365 }
6366
6367 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6368 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6369 perform the merge in SEQ2. */
6370
6371 static rtx
6372 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6373 machine_mode mode, rtx val, rtx ins)
6374 {
6375 rtx tmp;
6376
6377 if (ac->aligned)
6378 {
6379 start_sequence ();
6380 tmp = copy_to_mode_reg (SImode, val);
6381 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6382 const0_rtx, ins))
6383 {
6384 *seq1 = NULL;
6385 *seq2 = get_insns ();
6386 end_sequence ();
6387 return tmp;
6388 }
6389 end_sequence ();
6390 }
6391
6392 /* Failed to use insv. Generate a two part shift and mask. */
6393 start_sequence ();
6394 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6395 *seq1 = get_insns ();
6396 end_sequence ();
6397
6398 start_sequence ();
6399 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6400 *seq2 = get_insns ();
6401 end_sequence ();
6402
6403 return tmp;
6404 }
6405
6406 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6407 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6408 value to set if CMP == MEM. */
6409
6410 void
6411 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6412 rtx cmp, rtx new_rtx, bool is_weak)
6413 {
6414 struct alignment_context ac;
6415 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6416 rtx res = gen_reg_rtx (SImode);
6417 rtx_code_label *csloop = NULL, *csend = NULL;
6418
6419 gcc_assert (MEM_P (mem));
6420
6421 init_alignment_context (&ac, mem, mode);
6422
6423 /* Load full word. Subsequent loads are performed by CS. */
6424 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6425 NULL_RTX, 1, OPTAB_DIRECT);
6426
6427 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6428 possible, we try to use insv to make this happen efficiently. If
6429 that fails we'll generate code both inside and outside the loop. */
6430 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6431 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6432
6433 if (seq0)
6434 emit_insn (seq0);
6435 if (seq1)
6436 emit_insn (seq1);
6437
6438 /* Start CS loop. */
6439 if (!is_weak)
6440 {
6441 /* Begin assuming success. */
6442 emit_move_insn (btarget, const1_rtx);
6443
6444 csloop = gen_label_rtx ();
6445 csend = gen_label_rtx ();
6446 emit_label (csloop);
6447 }
6448
6449 /* val = "<mem>00..0<mem>"
6450 * cmp = "00..0<cmp>00..0"
6451 * new = "00..0<new>00..0"
6452 */
6453
6454 emit_insn (seq2);
6455 emit_insn (seq3);
6456
6457 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
6458 if (is_weak)
6459 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6460 else
6461 {
6462 rtx tmp;
6463
6464 /* Jump to end if we're done (likely?). */
6465 s390_emit_jump (csend, cc);
6466
6467 /* Check for changes outside mode, and loop internal if so.
6468 Arrange the moves so that the compare is adjacent to the
6469 branch so that we can generate CRJ. */
6470 tmp = copy_to_reg (val);
6471 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6472 1, OPTAB_DIRECT);
6473 cc = s390_emit_compare (NE, val, tmp);
6474 s390_emit_jump (csloop, cc);
6475
6476 /* Failed. */
6477 emit_move_insn (btarget, const0_rtx);
6478 emit_label (csend);
6479 }
6480
6481 /* Return the correct part of the bitfield. */
6482 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6483 NULL_RTX, 1, OPTAB_DIRECT), 1);
6484 }
6485
6486 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
6487 and VAL the value to play with. If AFTER is true then store the value
6488 MEM holds after the operation, if AFTER is false then store the value MEM
6489 holds before the operation. If TARGET is zero then discard that value, else
6490 store it to TARGET. */
6491
6492 void
6493 s390_expand_atomic (machine_mode mode, enum rtx_code code,
6494 rtx target, rtx mem, rtx val, bool after)
6495 {
6496 struct alignment_context ac;
6497 rtx cmp;
6498 rtx new_rtx = gen_reg_rtx (SImode);
6499 rtx orig = gen_reg_rtx (SImode);
6500 rtx_code_label *csloop = gen_label_rtx ();
6501
6502 gcc_assert (!target || register_operand (target, VOIDmode));
6503 gcc_assert (MEM_P (mem));
6504
6505 init_alignment_context (&ac, mem, mode);
6506
6507 /* Shift val to the correct bit positions.
6508 Preserve "icm", but prevent "ex icm". */
6509 if (!(ac.aligned && code == SET && MEM_P (val)))
6510 val = s390_expand_mask_and_shift (val, mode, ac.shift);
6511
6512 /* Further preparation insns. */
6513 if (code == PLUS || code == MINUS)
6514 emit_move_insn (orig, val);
6515 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
6516 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
6517 NULL_RTX, 1, OPTAB_DIRECT);
6518
6519 /* Load full word. Subsequent loads are performed by CS. */
6520 cmp = force_reg (SImode, ac.memsi);
6521
6522 /* Start CS loop. */
6523 emit_label (csloop);
6524 emit_move_insn (new_rtx, cmp);
6525
6526 /* Patch new with val at correct position. */
6527 switch (code)
6528 {
6529 case PLUS:
6530 case MINUS:
6531 val = expand_simple_binop (SImode, code, new_rtx, orig,
6532 NULL_RTX, 1, OPTAB_DIRECT);
6533 val = expand_simple_binop (SImode, AND, val, ac.modemask,
6534 NULL_RTX, 1, OPTAB_DIRECT);
6535 /* FALLTHRU */
6536 case SET:
6537 if (ac.aligned && MEM_P (val))
6538 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
6539 0, 0, SImode, val, false);
6540 else
6541 {
6542 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
6543 NULL_RTX, 1, OPTAB_DIRECT);
6544 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
6545 NULL_RTX, 1, OPTAB_DIRECT);
6546 }
6547 break;
6548 case AND:
6549 case IOR:
6550 case XOR:
6551 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
6552 NULL_RTX, 1, OPTAB_DIRECT);
6553 break;
6554 case MULT: /* NAND */
6555 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
6556 NULL_RTX, 1, OPTAB_DIRECT);
6557 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
6558 NULL_RTX, 1, OPTAB_DIRECT);
6559 break;
6560 default:
6561 gcc_unreachable ();
6562 }
6563
6564 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
6565 ac.memsi, cmp, new_rtx));
6566
6567 /* Return the correct part of the bitfield. */
6568 if (target)
6569 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
6570 after ? new_rtx : cmp, ac.shift,
6571 NULL_RTX, 1, OPTAB_DIRECT), 1);
6572 }
6573
6574 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6575 We need to emit DTP-relative relocations. */
6576
6577 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6578
6579 static void
6580 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
6581 {
6582 switch (size)
6583 {
6584 case 4:
6585 fputs ("\t.long\t", file);
6586 break;
6587 case 8:
6588 fputs ("\t.quad\t", file);
6589 break;
6590 default:
6591 gcc_unreachable ();
6592 }
6593 output_addr_const (file, x);
6594 fputs ("@DTPOFF", file);
6595 }
6596
6597 /* Return the proper mode for REGNO being represented in the dwarf
6598 unwind table. */
6599 machine_mode
6600 s390_dwarf_frame_reg_mode (int regno)
6601 {
6602 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
6603
6604 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
6605 if (GENERAL_REGNO_P (regno))
6606 save_mode = Pmode;
6607
6608 /* The rightmost 64 bits of vector registers are call-clobbered. */
6609 if (GET_MODE_SIZE (save_mode) > 8)
6610 save_mode = DImode;
6611
6612 return save_mode;
6613 }
6614
6615 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
6616 /* Implement TARGET_MANGLE_TYPE. */
6617
6618 static const char *
6619 s390_mangle_type (const_tree type)
6620 {
6621 type = TYPE_MAIN_VARIANT (type);
6622
6623 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
6624 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
6625 return NULL;
6626
6627 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
6628 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
6629 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
6630 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
6631
6632 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
6633 && TARGET_LONG_DOUBLE_128)
6634 return "g";
6635
6636 /* For all other types, use normal C++ mangling. */
6637 return NULL;
6638 }
6639 #endif
6640
6641 /* In the name of slightly smaller debug output, and to cater to
6642 general assembler lossage, recognize various UNSPEC sequences
6643 and turn them back into a direct symbol reference. */
6644
6645 static rtx
6646 s390_delegitimize_address (rtx orig_x)
6647 {
6648 rtx x, y;
6649
6650 orig_x = delegitimize_mem_from_attrs (orig_x);
6651 x = orig_x;
6652
6653 /* Extract the symbol ref from:
6654 (plus:SI (reg:SI 12 %r12)
6655 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
6656 UNSPEC_GOTOFF/PLTOFF)))
6657 and
6658 (plus:SI (reg:SI 12 %r12)
6659 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
6660 UNSPEC_GOTOFF/PLTOFF)
6661 (const_int 4 [0x4])))) */
6662 if (GET_CODE (x) == PLUS
6663 && REG_P (XEXP (x, 0))
6664 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
6665 && GET_CODE (XEXP (x, 1)) == CONST)
6666 {
6667 HOST_WIDE_INT offset = 0;
6668
6669 /* The const operand. */
6670 y = XEXP (XEXP (x, 1), 0);
6671
6672 if (GET_CODE (y) == PLUS
6673 && GET_CODE (XEXP (y, 1)) == CONST_INT)
6674 {
6675 offset = INTVAL (XEXP (y, 1));
6676 y = XEXP (y, 0);
6677 }
6678
6679 if (GET_CODE (y) == UNSPEC
6680 && (XINT (y, 1) == UNSPEC_GOTOFF
6681 || XINT (y, 1) == UNSPEC_PLTOFF))
6682 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
6683 }
6684
6685 if (GET_CODE (x) != MEM)
6686 return orig_x;
6687
6688 x = XEXP (x, 0);
6689 if (GET_CODE (x) == PLUS
6690 && GET_CODE (XEXP (x, 1)) == CONST
6691 && GET_CODE (XEXP (x, 0)) == REG
6692 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6693 {
6694 y = XEXP (XEXP (x, 1), 0);
6695 if (GET_CODE (y) == UNSPEC
6696 && XINT (y, 1) == UNSPEC_GOT)
6697 y = XVECEXP (y, 0, 0);
6698 else
6699 return orig_x;
6700 }
6701 else if (GET_CODE (x) == CONST)
6702 {
6703 /* Extract the symbol ref from:
6704 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
6705 UNSPEC_PLT/GOTENT))) */
6706
6707 y = XEXP (x, 0);
6708 if (GET_CODE (y) == UNSPEC
6709 && (XINT (y, 1) == UNSPEC_GOTENT
6710 || XINT (y, 1) == UNSPEC_PLT))
6711 y = XVECEXP (y, 0, 0);
6712 else
6713 return orig_x;
6714 }
6715 else
6716 return orig_x;
6717
6718 if (GET_MODE (orig_x) != Pmode)
6719 {
6720 if (GET_MODE (orig_x) == BLKmode)
6721 return orig_x;
6722 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
6723 if (y == NULL_RTX)
6724 return orig_x;
6725 }
6726 return y;
6727 }
6728
6729 /* Output operand OP to stdio stream FILE.
6730 OP is an address (register + offset) which is not used to address data;
6731 instead the rightmost bits are interpreted as the value. */
6732
6733 static void
6734 print_shift_count_operand (FILE *file, rtx op)
6735 {
6736 HOST_WIDE_INT offset;
6737 rtx base;
6738
6739 /* Extract base register and offset. */
6740 if (!s390_decompose_shift_count (op, &base, &offset))
6741 gcc_unreachable ();
6742
6743 /* Sanity check. */
6744 if (base)
6745 {
6746 gcc_assert (GET_CODE (base) == REG);
6747 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
6748 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
6749 }
6750
6751 /* Offsets are constricted to twelve bits. */
6752 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
6753 if (base)
6754 fprintf (file, "(%s)", reg_names[REGNO (base)]);
6755 }
6756
6757 /* Assigns the number of NOP halfwords to be emitted before and after the
6758 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
6759 If hotpatching is disabled for the function, the values are set to zero.
6760 */
6761
6762 static void
6763 s390_function_num_hotpatch_hw (tree decl,
6764 int *hw_before,
6765 int *hw_after)
6766 {
6767 tree attr;
6768
6769 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
6770
6771 /* Handle the arguments of the hotpatch attribute. The values
6772 specified via attribute might override the cmdline argument
6773 values. */
6774 if (attr)
6775 {
6776 tree args = TREE_VALUE (attr);
6777
6778 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
6779 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
6780 }
6781 else
6782 {
6783 /* Use the values specified by the cmdline arguments. */
6784 *hw_before = s390_hotpatch_hw_before_label;
6785 *hw_after = s390_hotpatch_hw_after_label;
6786 }
6787 }
6788
6789 /* Write the current .machine and .machinemode specification to the assembler
6790 file. */
6791
6792 #ifdef HAVE_AS_MACHINE_MACHINEMODE
6793 static void
6794 s390_asm_output_machine_for_arch (FILE *asm_out_file)
6795 {
6796 fprintf (asm_out_file, "\t.machinemode %s\n",
6797 (TARGET_ZARCH) ? "zarch" : "esa");
6798 fprintf (asm_out_file, "\t.machine \"%s", processor_table[s390_arch].name);
6799 if (S390_USE_ARCHITECTURE_MODIFIERS)
6800 {
6801 int cpu_flags;
6802
6803 cpu_flags = processor_flags_table[(int) s390_arch];
6804 if (TARGET_HTM && !(cpu_flags & PF_TX))
6805 fprintf (asm_out_file, "+htm");
6806 else if (!TARGET_HTM && (cpu_flags & PF_TX))
6807 fprintf (asm_out_file, "+nohtm");
6808 if (TARGET_VX && !(cpu_flags & PF_VX))
6809 fprintf (asm_out_file, "+vx");
6810 else if (!TARGET_VX && (cpu_flags & PF_VX))
6811 fprintf (asm_out_file, "+novx");
6812 }
6813 fprintf (asm_out_file, "\"\n");
6814 }
6815
6816 /* Write an extra function header before the very start of the function. */
6817
6818 void
6819 s390_asm_output_function_prefix (FILE *asm_out_file,
6820 const char *fnname ATTRIBUTE_UNUSED)
6821 {
6822 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
6823 return;
6824 /* Since only the function specific options are saved but not the indications
6825 which options are set, it's too much work here to figure out which options
6826 have actually changed. Thus, generate .machine and .machinemode whenever a
6827 function has the target attribute or pragma. */
6828 fprintf (asm_out_file, "\t.machinemode push\n");
6829 fprintf (asm_out_file, "\t.machine push\n");
6830 s390_asm_output_machine_for_arch (asm_out_file);
6831 }
6832
6833 /* Write an extra function footer after the very end of the function. */
6834
6835 void
6836 s390_asm_declare_function_size (FILE *asm_out_file,
6837 const char *fnname ATTRIBUTE_UNUSED, tree decl)
6838 {
6839 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
6840 return;
6841 fprintf (asm_out_file, "\t.machine pop\n");
6842 fprintf (asm_out_file, "\t.machinemode pop\n");
6843 }
6844 #endif
6845
6846 /* Write the extra assembler code needed to declare a function properly. */
6847
6848 void
6849 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
6850 tree decl)
6851 {
6852 int hw_before, hw_after;
6853
6854 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
6855 if (hw_before > 0)
6856 {
6857 unsigned int function_alignment;
6858 int i;
6859
6860 /* Add a trampoline code area before the function label and initialize it
6861 with two-byte nop instructions. This area can be overwritten with code
6862 that jumps to a patched version of the function. */
6863 asm_fprintf (asm_out_file, "\tnopr\t%%r7"
6864 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
6865 hw_before);
6866 for (i = 1; i < hw_before; i++)
6867 fputs ("\tnopr\t%r7\n", asm_out_file);
6868
6869 /* Note: The function label must be aligned so that (a) the bytes of the
6870 following nop do not cross a cacheline boundary, and (b) a jump address
6871 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
6872 stored directly before the label without crossing a cacheline
6873 boundary. All this is necessary to make sure the trampoline code can
6874 be changed atomically.
6875 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
6876 if there are NOPs before the function label, the alignment is placed
6877 before them. So it is necessary to duplicate the alignment after the
6878 NOPs. */
6879 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
6880 if (! DECL_USER_ALIGN (decl))
6881 function_alignment = MAX (function_alignment,
6882 (unsigned int) align_functions);
6883 fputs ("\t# alignment for hotpatch\n", asm_out_file);
6884 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
6885 }
6886
6887 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
6888 {
6889 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
6890 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
6891 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
6892 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
6893 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
6894 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
6895 s390_warn_framesize);
6896 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
6897 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
6898 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
6899 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
6900 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
6901 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
6902 TARGET_PACKED_STACK);
6903 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
6904 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
6905 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
6906 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
6907 s390_warn_dynamicstack_p);
6908 }
6909 ASM_OUTPUT_LABEL (asm_out_file, fname);
6910 if (hw_after > 0)
6911 asm_fprintf (asm_out_file,
6912 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
6913 hw_after);
6914 }
6915
6916 /* Output machine-dependent UNSPECs occurring in address constant X
6917 in assembler syntax to stdio stream FILE. Returns true if the
6918 constant X could be recognized, false otherwise. */
6919
6920 static bool
6921 s390_output_addr_const_extra (FILE *file, rtx x)
6922 {
6923 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
6924 switch (XINT (x, 1))
6925 {
6926 case UNSPEC_GOTENT:
6927 output_addr_const (file, XVECEXP (x, 0, 0));
6928 fprintf (file, "@GOTENT");
6929 return true;
6930 case UNSPEC_GOT:
6931 output_addr_const (file, XVECEXP (x, 0, 0));
6932 fprintf (file, "@GOT");
6933 return true;
6934 case UNSPEC_GOTOFF:
6935 output_addr_const (file, XVECEXP (x, 0, 0));
6936 fprintf (file, "@GOTOFF");
6937 return true;
6938 case UNSPEC_PLT:
6939 output_addr_const (file, XVECEXP (x, 0, 0));
6940 fprintf (file, "@PLT");
6941 return true;
6942 case UNSPEC_PLTOFF:
6943 output_addr_const (file, XVECEXP (x, 0, 0));
6944 fprintf (file, "@PLTOFF");
6945 return true;
6946 case UNSPEC_TLSGD:
6947 output_addr_const (file, XVECEXP (x, 0, 0));
6948 fprintf (file, "@TLSGD");
6949 return true;
6950 case UNSPEC_TLSLDM:
6951 assemble_name (file, get_some_local_dynamic_name ());
6952 fprintf (file, "@TLSLDM");
6953 return true;
6954 case UNSPEC_DTPOFF:
6955 output_addr_const (file, XVECEXP (x, 0, 0));
6956 fprintf (file, "@DTPOFF");
6957 return true;
6958 case UNSPEC_NTPOFF:
6959 output_addr_const (file, XVECEXP (x, 0, 0));
6960 fprintf (file, "@NTPOFF");
6961 return true;
6962 case UNSPEC_GOTNTPOFF:
6963 output_addr_const (file, XVECEXP (x, 0, 0));
6964 fprintf (file, "@GOTNTPOFF");
6965 return true;
6966 case UNSPEC_INDNTPOFF:
6967 output_addr_const (file, XVECEXP (x, 0, 0));
6968 fprintf (file, "@INDNTPOFF");
6969 return true;
6970 }
6971
6972 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
6973 switch (XINT (x, 1))
6974 {
6975 case UNSPEC_POOL_OFFSET:
6976 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
6977 output_addr_const (file, x);
6978 return true;
6979 }
6980 return false;
6981 }
6982
6983 /* Output address operand ADDR in assembler syntax to
6984 stdio stream FILE. */
6985
6986 void
6987 print_operand_address (FILE *file, rtx addr)
6988 {
6989 struct s390_address ad;
6990
6991 if (s390_loadrelative_operand_p (addr, NULL, NULL))
6992 {
6993 if (!TARGET_Z10)
6994 {
6995 output_operand_lossage ("symbolic memory references are "
6996 "only supported on z10 or later");
6997 return;
6998 }
6999 output_addr_const (file, addr);
7000 return;
7001 }
7002
7003 if (!s390_decompose_address (addr, &ad)
7004 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7005 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7006 output_operand_lossage ("cannot decompose address");
7007
7008 if (ad.disp)
7009 output_addr_const (file, ad.disp);
7010 else
7011 fprintf (file, "0");
7012
7013 if (ad.base && ad.indx)
7014 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7015 reg_names[REGNO (ad.base)]);
7016 else if (ad.base)
7017 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7018 }
7019
7020 /* Output operand X in assembler syntax to stdio stream FILE.
7021 CODE specified the format flag. The following format flags
7022 are recognized:
7023
7024 'C': print opcode suffix for branch condition.
7025 'D': print opcode suffix for inverse branch condition.
7026 'E': print opcode suffix for branch on index instruction.
7027 'G': print the size of the operand in bytes.
7028 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7029 'M': print the second word of a TImode operand.
7030 'N': print the second word of a DImode operand.
7031 'O': print only the displacement of a memory reference or address.
7032 'R': print only the base register of a memory reference or address.
7033 'S': print S-type memory reference (base+displacement).
7034 'Y': print shift count operand.
7035
7036 'b': print integer X as if it's an unsigned byte.
7037 'c': print integer X as if it's an signed byte.
7038 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7039 'f': "end" contiguous bitmask X in SImode.
7040 'h': print integer X as if it's a signed halfword.
7041 'i': print the first nonzero HImode part of X.
7042 'j': print the first HImode part unequal to -1 of X.
7043 'k': print the first nonzero SImode part of X.
7044 'm': print the first SImode part unequal to -1 of X.
7045 'o': print integer X as if it's an unsigned 32bit word.
7046 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7047 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7048 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7049 'x': print integer X as if it's an unsigned halfword.
7050 'v': print register number as vector register (v1 instead of f1).
7051 */
7052
7053 void
7054 print_operand (FILE *file, rtx x, int code)
7055 {
7056 HOST_WIDE_INT ival;
7057
7058 switch (code)
7059 {
7060 case 'C':
7061 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7062 return;
7063
7064 case 'D':
7065 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7066 return;
7067
7068 case 'E':
7069 if (GET_CODE (x) == LE)
7070 fprintf (file, "l");
7071 else if (GET_CODE (x) == GT)
7072 fprintf (file, "h");
7073 else
7074 output_operand_lossage ("invalid comparison operator "
7075 "for 'E' output modifier");
7076 return;
7077
7078 case 'J':
7079 if (GET_CODE (x) == SYMBOL_REF)
7080 {
7081 fprintf (file, "%s", ":tls_load:");
7082 output_addr_const (file, x);
7083 }
7084 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7085 {
7086 fprintf (file, "%s", ":tls_gdcall:");
7087 output_addr_const (file, XVECEXP (x, 0, 0));
7088 }
7089 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7090 {
7091 fprintf (file, "%s", ":tls_ldcall:");
7092 const char *name = get_some_local_dynamic_name ();
7093 gcc_assert (name);
7094 assemble_name (file, name);
7095 }
7096 else
7097 output_operand_lossage ("invalid reference for 'J' output modifier");
7098 return;
7099
7100 case 'G':
7101 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7102 return;
7103
7104 case 'O':
7105 {
7106 struct s390_address ad;
7107 int ret;
7108
7109 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7110
7111 if (!ret
7112 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7113 || ad.indx)
7114 {
7115 output_operand_lossage ("invalid address for 'O' output modifier");
7116 return;
7117 }
7118
7119 if (ad.disp)
7120 output_addr_const (file, ad.disp);
7121 else
7122 fprintf (file, "0");
7123 }
7124 return;
7125
7126 case 'R':
7127 {
7128 struct s390_address ad;
7129 int ret;
7130
7131 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7132
7133 if (!ret
7134 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7135 || ad.indx)
7136 {
7137 output_operand_lossage ("invalid address for 'R' output modifier");
7138 return;
7139 }
7140
7141 if (ad.base)
7142 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7143 else
7144 fprintf (file, "0");
7145 }
7146 return;
7147
7148 case 'S':
7149 {
7150 struct s390_address ad;
7151 int ret;
7152
7153 if (!MEM_P (x))
7154 {
7155 output_operand_lossage ("memory reference expected for "
7156 "'S' output modifier");
7157 return;
7158 }
7159 ret = s390_decompose_address (XEXP (x, 0), &ad);
7160
7161 if (!ret
7162 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7163 || ad.indx)
7164 {
7165 output_operand_lossage ("invalid address for 'S' output modifier");
7166 return;
7167 }
7168
7169 if (ad.disp)
7170 output_addr_const (file, ad.disp);
7171 else
7172 fprintf (file, "0");
7173
7174 if (ad.base)
7175 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7176 }
7177 return;
7178
7179 case 'N':
7180 if (GET_CODE (x) == REG)
7181 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7182 else if (GET_CODE (x) == MEM)
7183 x = change_address (x, VOIDmode,
7184 plus_constant (Pmode, XEXP (x, 0), 4));
7185 else
7186 output_operand_lossage ("register or memory expression expected "
7187 "for 'N' output modifier");
7188 break;
7189
7190 case 'M':
7191 if (GET_CODE (x) == REG)
7192 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7193 else if (GET_CODE (x) == MEM)
7194 x = change_address (x, VOIDmode,
7195 plus_constant (Pmode, XEXP (x, 0), 8));
7196 else
7197 output_operand_lossage ("register or memory expression expected "
7198 "for 'M' output modifier");
7199 break;
7200
7201 case 'Y':
7202 print_shift_count_operand (file, x);
7203 return;
7204 }
7205
7206 switch (GET_CODE (x))
7207 {
7208 case REG:
7209 /* Print FP regs as fx instead of vx when they are accessed
7210 through non-vector mode. */
7211 if (code == 'v'
7212 || VECTOR_NOFP_REG_P (x)
7213 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7214 || (VECTOR_REG_P (x)
7215 && (GET_MODE_SIZE (GET_MODE (x)) /
7216 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7217 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7218 else
7219 fprintf (file, "%s", reg_names[REGNO (x)]);
7220 break;
7221
7222 case MEM:
7223 output_address (GET_MODE (x), XEXP (x, 0));
7224 break;
7225
7226 case CONST:
7227 case CODE_LABEL:
7228 case LABEL_REF:
7229 case SYMBOL_REF:
7230 output_addr_const (file, x);
7231 break;
7232
7233 case CONST_INT:
7234 ival = INTVAL (x);
7235 switch (code)
7236 {
7237 case 0:
7238 break;
7239 case 'b':
7240 ival &= 0xff;
7241 break;
7242 case 'c':
7243 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7244 break;
7245 case 'x':
7246 ival &= 0xffff;
7247 break;
7248 case 'h':
7249 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7250 break;
7251 case 'i':
7252 ival = s390_extract_part (x, HImode, 0);
7253 break;
7254 case 'j':
7255 ival = s390_extract_part (x, HImode, -1);
7256 break;
7257 case 'k':
7258 ival = s390_extract_part (x, SImode, 0);
7259 break;
7260 case 'm':
7261 ival = s390_extract_part (x, SImode, -1);
7262 break;
7263 case 'o':
7264 ival &= 0xffffffff;
7265 break;
7266 case 'e': case 'f':
7267 case 's': case 't':
7268 {
7269 int pos, len;
7270 bool ok;
7271
7272 len = (code == 's' || code == 'e' ? 64 : 32);
7273 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
7274 gcc_assert (ok);
7275 if (code == 's' || code == 't')
7276 ival = 64 - pos - len;
7277 else
7278 ival = 64 - 1 - pos;
7279 }
7280 break;
7281 default:
7282 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7283 }
7284 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7285 break;
7286
7287 case CONST_WIDE_INT:
7288 if (code == 'b')
7289 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7290 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7291 else if (code == 'x')
7292 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7293 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7294 else if (code == 'h')
7295 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7296 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7297 else
7298 {
7299 if (code == 0)
7300 output_operand_lossage ("invalid constant - try using "
7301 "an output modifier");
7302 else
7303 output_operand_lossage ("invalid constant for output modifier '%c'",
7304 code);
7305 }
7306 break;
7307 case CONST_VECTOR:
7308 switch (code)
7309 {
7310 case 'h':
7311 gcc_assert (const_vec_duplicate_p (x));
7312 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7313 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7314 break;
7315 case 'e':
7316 case 's':
7317 {
7318 int start, stop, inner_len;
7319 bool ok;
7320
7321 inner_len = GET_MODE_UNIT_BITSIZE (GET_MODE (x));
7322 ok = s390_contiguous_bitmask_vector_p (x, &start, &stop);
7323 gcc_assert (ok);
7324 if (code == 's' || code == 't')
7325 ival = inner_len - stop - 1;
7326 else
7327 ival = inner_len - start - 1;
7328 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7329 }
7330 break;
7331 case 't':
7332 {
7333 unsigned mask;
7334 bool ok = s390_bytemask_vector_p (x, &mask);
7335 gcc_assert (ok);
7336 fprintf (file, "%u", mask);
7337 }
7338 break;
7339
7340 default:
7341 output_operand_lossage ("invalid constant vector for output "
7342 "modifier '%c'", code);
7343 }
7344 break;
7345
7346 default:
7347 if (code == 0)
7348 output_operand_lossage ("invalid expression - try using "
7349 "an output modifier");
7350 else
7351 output_operand_lossage ("invalid expression for output "
7352 "modifier '%c'", code);
7353 break;
7354 }
7355 }
7356
7357 /* Target hook for assembling integer objects. We need to define it
7358 here to work a round a bug in some versions of GAS, which couldn't
7359 handle values smaller than INT_MIN when printed in decimal. */
7360
7361 static bool
7362 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7363 {
7364 if (size == 8 && aligned_p
7365 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7366 {
7367 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7368 INTVAL (x));
7369 return true;
7370 }
7371 return default_assemble_integer (x, size, aligned_p);
7372 }
7373
7374 /* Returns true if register REGNO is used for forming
7375 a memory address in expression X. */
7376
7377 static bool
7378 reg_used_in_mem_p (int regno, rtx x)
7379 {
7380 enum rtx_code code = GET_CODE (x);
7381 int i, j;
7382 const char *fmt;
7383
7384 if (code == MEM)
7385 {
7386 if (refers_to_regno_p (regno, XEXP (x, 0)))
7387 return true;
7388 }
7389 else if (code == SET
7390 && GET_CODE (SET_DEST (x)) == PC)
7391 {
7392 if (refers_to_regno_p (regno, SET_SRC (x)))
7393 return true;
7394 }
7395
7396 fmt = GET_RTX_FORMAT (code);
7397 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7398 {
7399 if (fmt[i] == 'e'
7400 && reg_used_in_mem_p (regno, XEXP (x, i)))
7401 return true;
7402
7403 else if (fmt[i] == 'E')
7404 for (j = 0; j < XVECLEN (x, i); j++)
7405 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7406 return true;
7407 }
7408 return false;
7409 }
7410
7411 /* Returns true if expression DEP_RTX sets an address register
7412 used by instruction INSN to address memory. */
7413
7414 static bool
7415 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7416 {
7417 rtx target, pat;
7418
7419 if (NONJUMP_INSN_P (dep_rtx))
7420 dep_rtx = PATTERN (dep_rtx);
7421
7422 if (GET_CODE (dep_rtx) == SET)
7423 {
7424 target = SET_DEST (dep_rtx);
7425 if (GET_CODE (target) == STRICT_LOW_PART)
7426 target = XEXP (target, 0);
7427 while (GET_CODE (target) == SUBREG)
7428 target = SUBREG_REG (target);
7429
7430 if (GET_CODE (target) == REG)
7431 {
7432 int regno = REGNO (target);
7433
7434 if (s390_safe_attr_type (insn) == TYPE_LA)
7435 {
7436 pat = PATTERN (insn);
7437 if (GET_CODE (pat) == PARALLEL)
7438 {
7439 gcc_assert (XVECLEN (pat, 0) == 2);
7440 pat = XVECEXP (pat, 0, 0);
7441 }
7442 gcc_assert (GET_CODE (pat) == SET);
7443 return refers_to_regno_p (regno, SET_SRC (pat));
7444 }
7445 else if (get_attr_atype (insn) == ATYPE_AGEN)
7446 return reg_used_in_mem_p (regno, PATTERN (insn));
7447 }
7448 }
7449 return false;
7450 }
7451
7452 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
7453
7454 int
7455 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
7456 {
7457 rtx dep_rtx = PATTERN (dep_insn);
7458 int i;
7459
7460 if (GET_CODE (dep_rtx) == SET
7461 && addr_generation_dependency_p (dep_rtx, insn))
7462 return 1;
7463 else if (GET_CODE (dep_rtx) == PARALLEL)
7464 {
7465 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
7466 {
7467 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
7468 return 1;
7469 }
7470 }
7471 return 0;
7472 }
7473
7474
7475 /* A C statement (sans semicolon) to update the integer scheduling priority
7476 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
7477 reduce the priority to execute INSN later. Do not define this macro if
7478 you do not need to adjust the scheduling priorities of insns.
7479
7480 A STD instruction should be scheduled earlier,
7481 in order to use the bypass. */
7482 static int
7483 s390_adjust_priority (rtx_insn *insn, int priority)
7484 {
7485 if (! INSN_P (insn))
7486 return priority;
7487
7488 if (s390_tune <= PROCESSOR_2064_Z900)
7489 return priority;
7490
7491 switch (s390_safe_attr_type (insn))
7492 {
7493 case TYPE_FSTOREDF:
7494 case TYPE_FSTORESF:
7495 priority = priority << 3;
7496 break;
7497 case TYPE_STORE:
7498 case TYPE_STM:
7499 priority = priority << 1;
7500 break;
7501 default:
7502 break;
7503 }
7504 return priority;
7505 }
7506
7507
7508 /* The number of instructions that can be issued per cycle. */
7509
7510 static int
7511 s390_issue_rate (void)
7512 {
7513 switch (s390_tune)
7514 {
7515 case PROCESSOR_2084_Z990:
7516 case PROCESSOR_2094_Z9_109:
7517 case PROCESSOR_2094_Z9_EC:
7518 case PROCESSOR_2817_Z196:
7519 return 3;
7520 case PROCESSOR_2097_Z10:
7521 return 2;
7522 case PROCESSOR_9672_G5:
7523 case PROCESSOR_9672_G6:
7524 case PROCESSOR_2064_Z900:
7525 /* Starting with EC12 we use the sched_reorder hook to take care
7526 of instruction dispatch constraints. The algorithm only
7527 picks the best instruction and assumes only a single
7528 instruction gets issued per cycle. */
7529 case PROCESSOR_2827_ZEC12:
7530 case PROCESSOR_2964_Z13:
7531 default:
7532 return 1;
7533 }
7534 }
7535
7536 static int
7537 s390_first_cycle_multipass_dfa_lookahead (void)
7538 {
7539 return 4;
7540 }
7541
7542 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
7543 Fix up MEMs as required. */
7544
7545 static void
7546 annotate_constant_pool_refs (rtx *x)
7547 {
7548 int i, j;
7549 const char *fmt;
7550
7551 gcc_assert (GET_CODE (*x) != SYMBOL_REF
7552 || !CONSTANT_POOL_ADDRESS_P (*x));
7553
7554 /* Literal pool references can only occur inside a MEM ... */
7555 if (GET_CODE (*x) == MEM)
7556 {
7557 rtx memref = XEXP (*x, 0);
7558
7559 if (GET_CODE (memref) == SYMBOL_REF
7560 && CONSTANT_POOL_ADDRESS_P (memref))
7561 {
7562 rtx base = cfun->machine->base_reg;
7563 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
7564 UNSPEC_LTREF);
7565
7566 *x = replace_equiv_address (*x, addr);
7567 return;
7568 }
7569
7570 if (GET_CODE (memref) == CONST
7571 && GET_CODE (XEXP (memref, 0)) == PLUS
7572 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
7573 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
7574 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
7575 {
7576 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
7577 rtx sym = XEXP (XEXP (memref, 0), 0);
7578 rtx base = cfun->machine->base_reg;
7579 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7580 UNSPEC_LTREF);
7581
7582 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
7583 return;
7584 }
7585 }
7586
7587 /* ... or a load-address type pattern. */
7588 if (GET_CODE (*x) == SET)
7589 {
7590 rtx addrref = SET_SRC (*x);
7591
7592 if (GET_CODE (addrref) == SYMBOL_REF
7593 && CONSTANT_POOL_ADDRESS_P (addrref))
7594 {
7595 rtx base = cfun->machine->base_reg;
7596 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
7597 UNSPEC_LTREF);
7598
7599 SET_SRC (*x) = addr;
7600 return;
7601 }
7602
7603 if (GET_CODE (addrref) == CONST
7604 && GET_CODE (XEXP (addrref, 0)) == PLUS
7605 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
7606 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
7607 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
7608 {
7609 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
7610 rtx sym = XEXP (XEXP (addrref, 0), 0);
7611 rtx base = cfun->machine->base_reg;
7612 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7613 UNSPEC_LTREF);
7614
7615 SET_SRC (*x) = plus_constant (Pmode, addr, off);
7616 return;
7617 }
7618 }
7619
7620 /* Annotate LTREL_BASE as well. */
7621 if (GET_CODE (*x) == UNSPEC
7622 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7623 {
7624 rtx base = cfun->machine->base_reg;
7625 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
7626 UNSPEC_LTREL_BASE);
7627 return;
7628 }
7629
7630 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7631 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7632 {
7633 if (fmt[i] == 'e')
7634 {
7635 annotate_constant_pool_refs (&XEXP (*x, i));
7636 }
7637 else if (fmt[i] == 'E')
7638 {
7639 for (j = 0; j < XVECLEN (*x, i); j++)
7640 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
7641 }
7642 }
7643 }
7644
7645 /* Split all branches that exceed the maximum distance.
7646 Returns true if this created a new literal pool entry. */
7647
7648 static int
7649 s390_split_branches (void)
7650 {
7651 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7652 int new_literal = 0, ret;
7653 rtx_insn *insn;
7654 rtx pat, target;
7655 rtx *label;
7656
7657 /* We need correct insn addresses. */
7658
7659 shorten_branches (get_insns ());
7660
7661 /* Find all branches that exceed 64KB, and split them. */
7662
7663 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7664 {
7665 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
7666 continue;
7667
7668 pat = PATTERN (insn);
7669 if (GET_CODE (pat) == PARALLEL)
7670 pat = XVECEXP (pat, 0, 0);
7671 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
7672 continue;
7673
7674 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
7675 {
7676 label = &SET_SRC (pat);
7677 }
7678 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
7679 {
7680 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
7681 label = &XEXP (SET_SRC (pat), 1);
7682 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
7683 label = &XEXP (SET_SRC (pat), 2);
7684 else
7685 continue;
7686 }
7687 else
7688 continue;
7689
7690 if (get_attr_length (insn) <= 4)
7691 continue;
7692
7693 /* We are going to use the return register as scratch register,
7694 make sure it will be saved/restored by the prologue/epilogue. */
7695 cfun_frame_layout.save_return_addr_p = 1;
7696
7697 if (!flag_pic)
7698 {
7699 new_literal = 1;
7700 rtx mem = force_const_mem (Pmode, *label);
7701 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
7702 insn);
7703 INSN_ADDRESSES_NEW (set_insn, -1);
7704 annotate_constant_pool_refs (&PATTERN (set_insn));
7705
7706 target = temp_reg;
7707 }
7708 else
7709 {
7710 new_literal = 1;
7711 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
7712 UNSPEC_LTREL_OFFSET);
7713 target = gen_rtx_CONST (Pmode, target);
7714 target = force_const_mem (Pmode, target);
7715 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
7716 insn);
7717 INSN_ADDRESSES_NEW (set_insn, -1);
7718 annotate_constant_pool_refs (&PATTERN (set_insn));
7719
7720 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
7721 cfun->machine->base_reg),
7722 UNSPEC_LTREL_BASE);
7723 target = gen_rtx_PLUS (Pmode, temp_reg, target);
7724 }
7725
7726 ret = validate_change (insn, label, target, 0);
7727 gcc_assert (ret);
7728 }
7729
7730 return new_literal;
7731 }
7732
7733
7734 /* Find an annotated literal pool symbol referenced in RTX X,
7735 and store it at REF. Will abort if X contains references to
7736 more than one such pool symbol; multiple references to the same
7737 symbol are allowed, however.
7738
7739 The rtx pointed to by REF must be initialized to NULL_RTX
7740 by the caller before calling this routine. */
7741
7742 static void
7743 find_constant_pool_ref (rtx x, rtx *ref)
7744 {
7745 int i, j;
7746 const char *fmt;
7747
7748 /* Ignore LTREL_BASE references. */
7749 if (GET_CODE (x) == UNSPEC
7750 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7751 return;
7752 /* Likewise POOL_ENTRY insns. */
7753 if (GET_CODE (x) == UNSPEC_VOLATILE
7754 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
7755 return;
7756
7757 gcc_assert (GET_CODE (x) != SYMBOL_REF
7758 || !CONSTANT_POOL_ADDRESS_P (x));
7759
7760 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
7761 {
7762 rtx sym = XVECEXP (x, 0, 0);
7763 gcc_assert (GET_CODE (sym) == SYMBOL_REF
7764 && CONSTANT_POOL_ADDRESS_P (sym));
7765
7766 if (*ref == NULL_RTX)
7767 *ref = sym;
7768 else
7769 gcc_assert (*ref == sym);
7770
7771 return;
7772 }
7773
7774 fmt = GET_RTX_FORMAT (GET_CODE (x));
7775 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7776 {
7777 if (fmt[i] == 'e')
7778 {
7779 find_constant_pool_ref (XEXP (x, i), ref);
7780 }
7781 else if (fmt[i] == 'E')
7782 {
7783 for (j = 0; j < XVECLEN (x, i); j++)
7784 find_constant_pool_ref (XVECEXP (x, i, j), ref);
7785 }
7786 }
7787 }
7788
7789 /* Replace every reference to the annotated literal pool
7790 symbol REF in X by its base plus OFFSET. */
7791
7792 static void
7793 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
7794 {
7795 int i, j;
7796 const char *fmt;
7797
7798 gcc_assert (*x != ref);
7799
7800 if (GET_CODE (*x) == UNSPEC
7801 && XINT (*x, 1) == UNSPEC_LTREF
7802 && XVECEXP (*x, 0, 0) == ref)
7803 {
7804 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
7805 return;
7806 }
7807
7808 if (GET_CODE (*x) == PLUS
7809 && GET_CODE (XEXP (*x, 1)) == CONST_INT
7810 && GET_CODE (XEXP (*x, 0)) == UNSPEC
7811 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
7812 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
7813 {
7814 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
7815 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
7816 return;
7817 }
7818
7819 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7820 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7821 {
7822 if (fmt[i] == 'e')
7823 {
7824 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
7825 }
7826 else if (fmt[i] == 'E')
7827 {
7828 for (j = 0; j < XVECLEN (*x, i); j++)
7829 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
7830 }
7831 }
7832 }
7833
7834 /* Check whether X contains an UNSPEC_LTREL_BASE.
7835 Return its constant pool symbol if found, NULL_RTX otherwise. */
7836
7837 static rtx
7838 find_ltrel_base (rtx x)
7839 {
7840 int i, j;
7841 const char *fmt;
7842
7843 if (GET_CODE (x) == UNSPEC
7844 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7845 return XVECEXP (x, 0, 0);
7846
7847 fmt = GET_RTX_FORMAT (GET_CODE (x));
7848 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7849 {
7850 if (fmt[i] == 'e')
7851 {
7852 rtx fnd = find_ltrel_base (XEXP (x, i));
7853 if (fnd)
7854 return fnd;
7855 }
7856 else if (fmt[i] == 'E')
7857 {
7858 for (j = 0; j < XVECLEN (x, i); j++)
7859 {
7860 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
7861 if (fnd)
7862 return fnd;
7863 }
7864 }
7865 }
7866
7867 return NULL_RTX;
7868 }
7869
7870 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
7871
7872 static void
7873 replace_ltrel_base (rtx *x)
7874 {
7875 int i, j;
7876 const char *fmt;
7877
7878 if (GET_CODE (*x) == UNSPEC
7879 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7880 {
7881 *x = XVECEXP (*x, 0, 1);
7882 return;
7883 }
7884
7885 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7886 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7887 {
7888 if (fmt[i] == 'e')
7889 {
7890 replace_ltrel_base (&XEXP (*x, i));
7891 }
7892 else if (fmt[i] == 'E')
7893 {
7894 for (j = 0; j < XVECLEN (*x, i); j++)
7895 replace_ltrel_base (&XVECEXP (*x, i, j));
7896 }
7897 }
7898 }
7899
7900
7901 /* We keep a list of constants which we have to add to internal
7902 constant tables in the middle of large functions. */
7903
7904 #define NR_C_MODES 32
7905 machine_mode constant_modes[NR_C_MODES] =
7906 {
7907 TFmode, TImode, TDmode,
7908 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
7909 V4SFmode, V2DFmode, V1TFmode,
7910 DFmode, DImode, DDmode,
7911 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
7912 SFmode, SImode, SDmode,
7913 V4QImode, V2HImode, V1SImode, V1SFmode,
7914 HImode,
7915 V2QImode, V1HImode,
7916 QImode,
7917 V1QImode
7918 };
7919
7920 struct constant
7921 {
7922 struct constant *next;
7923 rtx value;
7924 rtx_code_label *label;
7925 };
7926
7927 struct constant_pool
7928 {
7929 struct constant_pool *next;
7930 rtx_insn *first_insn;
7931 rtx_insn *pool_insn;
7932 bitmap insns;
7933 rtx_insn *emit_pool_after;
7934
7935 struct constant *constants[NR_C_MODES];
7936 struct constant *execute;
7937 rtx_code_label *label;
7938 int size;
7939 };
7940
7941 /* Allocate new constant_pool structure. */
7942
7943 static struct constant_pool *
7944 s390_alloc_pool (void)
7945 {
7946 struct constant_pool *pool;
7947 int i;
7948
7949 pool = (struct constant_pool *) xmalloc (sizeof *pool);
7950 pool->next = NULL;
7951 for (i = 0; i < NR_C_MODES; i++)
7952 pool->constants[i] = NULL;
7953
7954 pool->execute = NULL;
7955 pool->label = gen_label_rtx ();
7956 pool->first_insn = NULL;
7957 pool->pool_insn = NULL;
7958 pool->insns = BITMAP_ALLOC (NULL);
7959 pool->size = 0;
7960 pool->emit_pool_after = NULL;
7961
7962 return pool;
7963 }
7964
7965 /* Create new constant pool covering instructions starting at INSN
7966 and chain it to the end of POOL_LIST. */
7967
7968 static struct constant_pool *
7969 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
7970 {
7971 struct constant_pool *pool, **prev;
7972
7973 pool = s390_alloc_pool ();
7974 pool->first_insn = insn;
7975
7976 for (prev = pool_list; *prev; prev = &(*prev)->next)
7977 ;
7978 *prev = pool;
7979
7980 return pool;
7981 }
7982
7983 /* End range of instructions covered by POOL at INSN and emit
7984 placeholder insn representing the pool. */
7985
7986 static void
7987 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
7988 {
7989 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
7990
7991 if (!insn)
7992 insn = get_last_insn ();
7993
7994 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
7995 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
7996 }
7997
7998 /* Add INSN to the list of insns covered by POOL. */
7999
8000 static void
8001 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8002 {
8003 bitmap_set_bit (pool->insns, INSN_UID (insn));
8004 }
8005
8006 /* Return pool out of POOL_LIST that covers INSN. */
8007
8008 static struct constant_pool *
8009 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8010 {
8011 struct constant_pool *pool;
8012
8013 for (pool = pool_list; pool; pool = pool->next)
8014 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8015 break;
8016
8017 return pool;
8018 }
8019
8020 /* Add constant VAL of mode MODE to the constant pool POOL. */
8021
8022 static void
8023 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8024 {
8025 struct constant *c;
8026 int i;
8027
8028 for (i = 0; i < NR_C_MODES; i++)
8029 if (constant_modes[i] == mode)
8030 break;
8031 gcc_assert (i != NR_C_MODES);
8032
8033 for (c = pool->constants[i]; c != NULL; c = c->next)
8034 if (rtx_equal_p (val, c->value))
8035 break;
8036
8037 if (c == NULL)
8038 {
8039 c = (struct constant *) xmalloc (sizeof *c);
8040 c->value = val;
8041 c->label = gen_label_rtx ();
8042 c->next = pool->constants[i];
8043 pool->constants[i] = c;
8044 pool->size += GET_MODE_SIZE (mode);
8045 }
8046 }
8047
8048 /* Return an rtx that represents the offset of X from the start of
8049 pool POOL. */
8050
8051 static rtx
8052 s390_pool_offset (struct constant_pool *pool, rtx x)
8053 {
8054 rtx label;
8055
8056 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8057 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8058 UNSPEC_POOL_OFFSET);
8059 return gen_rtx_CONST (GET_MODE (x), x);
8060 }
8061
8062 /* Find constant VAL of mode MODE in the constant pool POOL.
8063 Return an RTX describing the distance from the start of
8064 the pool to the location of the new constant. */
8065
8066 static rtx
8067 s390_find_constant (struct constant_pool *pool, rtx val,
8068 machine_mode mode)
8069 {
8070 struct constant *c;
8071 int i;
8072
8073 for (i = 0; i < NR_C_MODES; i++)
8074 if (constant_modes[i] == mode)
8075 break;
8076 gcc_assert (i != NR_C_MODES);
8077
8078 for (c = pool->constants[i]; c != NULL; c = c->next)
8079 if (rtx_equal_p (val, c->value))
8080 break;
8081
8082 gcc_assert (c);
8083
8084 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8085 }
8086
8087 /* Check whether INSN is an execute. Return the label_ref to its
8088 execute target template if so, NULL_RTX otherwise. */
8089
8090 static rtx
8091 s390_execute_label (rtx insn)
8092 {
8093 if (NONJUMP_INSN_P (insn)
8094 && GET_CODE (PATTERN (insn)) == PARALLEL
8095 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8096 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8097 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8098
8099 return NULL_RTX;
8100 }
8101
8102 /* Add execute target for INSN to the constant pool POOL. */
8103
8104 static void
8105 s390_add_execute (struct constant_pool *pool, rtx insn)
8106 {
8107 struct constant *c;
8108
8109 for (c = pool->execute; c != NULL; c = c->next)
8110 if (INSN_UID (insn) == INSN_UID (c->value))
8111 break;
8112
8113 if (c == NULL)
8114 {
8115 c = (struct constant *) xmalloc (sizeof *c);
8116 c->value = insn;
8117 c->label = gen_label_rtx ();
8118 c->next = pool->execute;
8119 pool->execute = c;
8120 pool->size += 6;
8121 }
8122 }
8123
8124 /* Find execute target for INSN in the constant pool POOL.
8125 Return an RTX describing the distance from the start of
8126 the pool to the location of the execute target. */
8127
8128 static rtx
8129 s390_find_execute (struct constant_pool *pool, rtx insn)
8130 {
8131 struct constant *c;
8132
8133 for (c = pool->execute; c != NULL; c = c->next)
8134 if (INSN_UID (insn) == INSN_UID (c->value))
8135 break;
8136
8137 gcc_assert (c);
8138
8139 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8140 }
8141
8142 /* For an execute INSN, extract the execute target template. */
8143
8144 static rtx
8145 s390_execute_target (rtx insn)
8146 {
8147 rtx pattern = PATTERN (insn);
8148 gcc_assert (s390_execute_label (insn));
8149
8150 if (XVECLEN (pattern, 0) == 2)
8151 {
8152 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8153 }
8154 else
8155 {
8156 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8157 int i;
8158
8159 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8160 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8161
8162 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8163 }
8164
8165 return pattern;
8166 }
8167
8168 /* Indicate that INSN cannot be duplicated. This is the case for
8169 execute insns that carry a unique label. */
8170
8171 static bool
8172 s390_cannot_copy_insn_p (rtx_insn *insn)
8173 {
8174 rtx label = s390_execute_label (insn);
8175 return label && label != const0_rtx;
8176 }
8177
8178 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8179 do not emit the pool base label. */
8180
8181 static void
8182 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8183 {
8184 struct constant *c;
8185 rtx_insn *insn = pool->pool_insn;
8186 int i;
8187
8188 /* Switch to rodata section. */
8189 if (TARGET_CPU_ZARCH)
8190 {
8191 insn = emit_insn_after (gen_pool_section_start (), insn);
8192 INSN_ADDRESSES_NEW (insn, -1);
8193 }
8194
8195 /* Ensure minimum pool alignment. */
8196 if (TARGET_CPU_ZARCH)
8197 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8198 else
8199 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8200 INSN_ADDRESSES_NEW (insn, -1);
8201
8202 /* Emit pool base label. */
8203 if (!remote_label)
8204 {
8205 insn = emit_label_after (pool->label, insn);
8206 INSN_ADDRESSES_NEW (insn, -1);
8207 }
8208
8209 /* Dump constants in descending alignment requirement order,
8210 ensuring proper alignment for every constant. */
8211 for (i = 0; i < NR_C_MODES; i++)
8212 for (c = pool->constants[i]; c; c = c->next)
8213 {
8214 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8215 rtx value = copy_rtx (c->value);
8216 if (GET_CODE (value) == CONST
8217 && GET_CODE (XEXP (value, 0)) == UNSPEC
8218 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8219 && XVECLEN (XEXP (value, 0), 0) == 1)
8220 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8221
8222 insn = emit_label_after (c->label, insn);
8223 INSN_ADDRESSES_NEW (insn, -1);
8224
8225 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8226 gen_rtvec (1, value),
8227 UNSPECV_POOL_ENTRY);
8228 insn = emit_insn_after (value, insn);
8229 INSN_ADDRESSES_NEW (insn, -1);
8230 }
8231
8232 /* Ensure minimum alignment for instructions. */
8233 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8234 INSN_ADDRESSES_NEW (insn, -1);
8235
8236 /* Output in-pool execute template insns. */
8237 for (c = pool->execute; c; c = c->next)
8238 {
8239 insn = emit_label_after (c->label, insn);
8240 INSN_ADDRESSES_NEW (insn, -1);
8241
8242 insn = emit_insn_after (s390_execute_target (c->value), insn);
8243 INSN_ADDRESSES_NEW (insn, -1);
8244 }
8245
8246 /* Switch back to previous section. */
8247 if (TARGET_CPU_ZARCH)
8248 {
8249 insn = emit_insn_after (gen_pool_section_end (), insn);
8250 INSN_ADDRESSES_NEW (insn, -1);
8251 }
8252
8253 insn = emit_barrier_after (insn);
8254 INSN_ADDRESSES_NEW (insn, -1);
8255
8256 /* Remove placeholder insn. */
8257 remove_insn (pool->pool_insn);
8258 }
8259
8260 /* Free all memory used by POOL. */
8261
8262 static void
8263 s390_free_pool (struct constant_pool *pool)
8264 {
8265 struct constant *c, *next;
8266 int i;
8267
8268 for (i = 0; i < NR_C_MODES; i++)
8269 for (c = pool->constants[i]; c; c = next)
8270 {
8271 next = c->next;
8272 free (c);
8273 }
8274
8275 for (c = pool->execute; c; c = next)
8276 {
8277 next = c->next;
8278 free (c);
8279 }
8280
8281 BITMAP_FREE (pool->insns);
8282 free (pool);
8283 }
8284
8285
8286 /* Collect main literal pool. Return NULL on overflow. */
8287
8288 static struct constant_pool *
8289 s390_mainpool_start (void)
8290 {
8291 struct constant_pool *pool;
8292 rtx_insn *insn;
8293
8294 pool = s390_alloc_pool ();
8295
8296 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8297 {
8298 if (NONJUMP_INSN_P (insn)
8299 && GET_CODE (PATTERN (insn)) == SET
8300 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8301 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8302 {
8303 /* There might be two main_pool instructions if base_reg
8304 is call-clobbered; one for shrink-wrapped code and one
8305 for the rest. We want to keep the first. */
8306 if (pool->pool_insn)
8307 {
8308 insn = PREV_INSN (insn);
8309 delete_insn (NEXT_INSN (insn));
8310 continue;
8311 }
8312 pool->pool_insn = insn;
8313 }
8314
8315 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8316 {
8317 s390_add_execute (pool, insn);
8318 }
8319 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8320 {
8321 rtx pool_ref = NULL_RTX;
8322 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8323 if (pool_ref)
8324 {
8325 rtx constant = get_pool_constant (pool_ref);
8326 machine_mode mode = get_pool_mode (pool_ref);
8327 s390_add_constant (pool, constant, mode);
8328 }
8329 }
8330
8331 /* If hot/cold partitioning is enabled we have to make sure that
8332 the literal pool is emitted in the same section where the
8333 initialization of the literal pool base pointer takes place.
8334 emit_pool_after is only used in the non-overflow case on non
8335 Z cpus where we can emit the literal pool at the end of the
8336 function body within the text section. */
8337 if (NOTE_P (insn)
8338 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8339 && !pool->emit_pool_after)
8340 pool->emit_pool_after = PREV_INSN (insn);
8341 }
8342
8343 gcc_assert (pool->pool_insn || pool->size == 0);
8344
8345 if (pool->size >= 4096)
8346 {
8347 /* We're going to chunkify the pool, so remove the main
8348 pool placeholder insn. */
8349 remove_insn (pool->pool_insn);
8350
8351 s390_free_pool (pool);
8352 pool = NULL;
8353 }
8354
8355 /* If the functions ends with the section where the literal pool
8356 should be emitted set the marker to its end. */
8357 if (pool && !pool->emit_pool_after)
8358 pool->emit_pool_after = get_last_insn ();
8359
8360 return pool;
8361 }
8362
8363 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8364 Modify the current function to output the pool constants as well as
8365 the pool register setup instruction. */
8366
8367 static void
8368 s390_mainpool_finish (struct constant_pool *pool)
8369 {
8370 rtx base_reg = cfun->machine->base_reg;
8371
8372 /* If the pool is empty, we're done. */
8373 if (pool->size == 0)
8374 {
8375 /* We don't actually need a base register after all. */
8376 cfun->machine->base_reg = NULL_RTX;
8377
8378 if (pool->pool_insn)
8379 remove_insn (pool->pool_insn);
8380 s390_free_pool (pool);
8381 return;
8382 }
8383
8384 /* We need correct insn addresses. */
8385 shorten_branches (get_insns ());
8386
8387 /* On zSeries, we use a LARL to load the pool register. The pool is
8388 located in the .rodata section, so we emit it after the function. */
8389 if (TARGET_CPU_ZARCH)
8390 {
8391 rtx set = gen_main_base_64 (base_reg, pool->label);
8392 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8393 INSN_ADDRESSES_NEW (insn, -1);
8394 remove_insn (pool->pool_insn);
8395
8396 insn = get_last_insn ();
8397 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8398 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8399
8400 s390_dump_pool (pool, 0);
8401 }
8402
8403 /* On S/390, if the total size of the function's code plus literal pool
8404 does not exceed 4096 bytes, we use BASR to set up a function base
8405 pointer, and emit the literal pool at the end of the function. */
8406 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8407 + pool->size + 8 /* alignment slop */ < 4096)
8408 {
8409 rtx set = gen_main_base_31_small (base_reg, pool->label);
8410 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8411 INSN_ADDRESSES_NEW (insn, -1);
8412 remove_insn (pool->pool_insn);
8413
8414 insn = emit_label_after (pool->label, insn);
8415 INSN_ADDRESSES_NEW (insn, -1);
8416
8417 /* emit_pool_after will be set by s390_mainpool_start to the
8418 last insn of the section where the literal pool should be
8419 emitted. */
8420 insn = pool->emit_pool_after;
8421
8422 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8423 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8424
8425 s390_dump_pool (pool, 1);
8426 }
8427
8428 /* Otherwise, we emit an inline literal pool and use BASR to branch
8429 over it, setting up the pool register at the same time. */
8430 else
8431 {
8432 rtx_code_label *pool_end = gen_label_rtx ();
8433
8434 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
8435 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
8436 JUMP_LABEL (insn) = pool_end;
8437 INSN_ADDRESSES_NEW (insn, -1);
8438 remove_insn (pool->pool_insn);
8439
8440 insn = emit_label_after (pool->label, insn);
8441 INSN_ADDRESSES_NEW (insn, -1);
8442
8443 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8444 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8445
8446 insn = emit_label_after (pool_end, pool->pool_insn);
8447 INSN_ADDRESSES_NEW (insn, -1);
8448
8449 s390_dump_pool (pool, 1);
8450 }
8451
8452
8453 /* Replace all literal pool references. */
8454
8455 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
8456 {
8457 if (INSN_P (insn))
8458 replace_ltrel_base (&PATTERN (insn));
8459
8460 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8461 {
8462 rtx addr, pool_ref = NULL_RTX;
8463 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8464 if (pool_ref)
8465 {
8466 if (s390_execute_label (insn))
8467 addr = s390_find_execute (pool, insn);
8468 else
8469 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
8470 get_pool_mode (pool_ref));
8471
8472 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8473 INSN_CODE (insn) = -1;
8474 }
8475 }
8476 }
8477
8478
8479 /* Free the pool. */
8480 s390_free_pool (pool);
8481 }
8482
8483 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8484 We have decided we cannot use this pool, so revert all changes
8485 to the current function that were done by s390_mainpool_start. */
8486 static void
8487 s390_mainpool_cancel (struct constant_pool *pool)
8488 {
8489 /* We didn't actually change the instruction stream, so simply
8490 free the pool memory. */
8491 s390_free_pool (pool);
8492 }
8493
8494
8495 /* Chunkify the literal pool. */
8496
8497 #define S390_POOL_CHUNK_MIN 0xc00
8498 #define S390_POOL_CHUNK_MAX 0xe00
8499
8500 static struct constant_pool *
8501 s390_chunkify_start (void)
8502 {
8503 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
8504 int extra_size = 0;
8505 bitmap far_labels;
8506 rtx pending_ltrel = NULL_RTX;
8507 rtx_insn *insn;
8508
8509 rtx (*gen_reload_base) (rtx, rtx) =
8510 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
8511
8512
8513 /* We need correct insn addresses. */
8514
8515 shorten_branches (get_insns ());
8516
8517 /* Scan all insns and move literals to pool chunks. */
8518
8519 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8520 {
8521 bool section_switch_p = false;
8522
8523 /* Check for pending LTREL_BASE. */
8524 if (INSN_P (insn))
8525 {
8526 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
8527 if (ltrel_base)
8528 {
8529 gcc_assert (ltrel_base == pending_ltrel);
8530 pending_ltrel = NULL_RTX;
8531 }
8532 }
8533
8534 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8535 {
8536 if (!curr_pool)
8537 curr_pool = s390_start_pool (&pool_list, insn);
8538
8539 s390_add_execute (curr_pool, insn);
8540 s390_add_pool_insn (curr_pool, insn);
8541 }
8542 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8543 {
8544 rtx pool_ref = NULL_RTX;
8545 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8546 if (pool_ref)
8547 {
8548 rtx constant = get_pool_constant (pool_ref);
8549 machine_mode mode = get_pool_mode (pool_ref);
8550
8551 if (!curr_pool)
8552 curr_pool = s390_start_pool (&pool_list, insn);
8553
8554 s390_add_constant (curr_pool, constant, mode);
8555 s390_add_pool_insn (curr_pool, insn);
8556
8557 /* Don't split the pool chunk between a LTREL_OFFSET load
8558 and the corresponding LTREL_BASE. */
8559 if (GET_CODE (constant) == CONST
8560 && GET_CODE (XEXP (constant, 0)) == UNSPEC
8561 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
8562 {
8563 gcc_assert (!pending_ltrel);
8564 pending_ltrel = pool_ref;
8565 }
8566 }
8567 }
8568
8569 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
8570 {
8571 if (curr_pool)
8572 s390_add_pool_insn (curr_pool, insn);
8573 /* An LTREL_BASE must follow within the same basic block. */
8574 gcc_assert (!pending_ltrel);
8575 }
8576
8577 if (NOTE_P (insn))
8578 switch (NOTE_KIND (insn))
8579 {
8580 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
8581 section_switch_p = true;
8582 break;
8583 case NOTE_INSN_VAR_LOCATION:
8584 case NOTE_INSN_CALL_ARG_LOCATION:
8585 continue;
8586 default:
8587 break;
8588 }
8589
8590 if (!curr_pool
8591 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
8592 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
8593 continue;
8594
8595 if (TARGET_CPU_ZARCH)
8596 {
8597 if (curr_pool->size < S390_POOL_CHUNK_MAX)
8598 continue;
8599
8600 s390_end_pool (curr_pool, NULL);
8601 curr_pool = NULL;
8602 }
8603 else
8604 {
8605 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
8606 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
8607 + extra_size;
8608
8609 /* We will later have to insert base register reload insns.
8610 Those will have an effect on code size, which we need to
8611 consider here. This calculation makes rather pessimistic
8612 worst-case assumptions. */
8613 if (LABEL_P (insn))
8614 extra_size += 6;
8615
8616 if (chunk_size < S390_POOL_CHUNK_MIN
8617 && curr_pool->size < S390_POOL_CHUNK_MIN
8618 && !section_switch_p)
8619 continue;
8620
8621 /* Pool chunks can only be inserted after BARRIERs ... */
8622 if (BARRIER_P (insn))
8623 {
8624 s390_end_pool (curr_pool, insn);
8625 curr_pool = NULL;
8626 extra_size = 0;
8627 }
8628
8629 /* ... so if we don't find one in time, create one. */
8630 else if (chunk_size > S390_POOL_CHUNK_MAX
8631 || curr_pool->size > S390_POOL_CHUNK_MAX
8632 || section_switch_p)
8633 {
8634 rtx_insn *label, *jump, *barrier, *next, *prev;
8635
8636 if (!section_switch_p)
8637 {
8638 /* We can insert the barrier only after a 'real' insn. */
8639 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
8640 continue;
8641 if (get_attr_length (insn) == 0)
8642 continue;
8643 /* Don't separate LTREL_BASE from the corresponding
8644 LTREL_OFFSET load. */
8645 if (pending_ltrel)
8646 continue;
8647 next = insn;
8648 do
8649 {
8650 insn = next;
8651 next = NEXT_INSN (insn);
8652 }
8653 while (next
8654 && NOTE_P (next)
8655 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
8656 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
8657 }
8658 else
8659 {
8660 gcc_assert (!pending_ltrel);
8661
8662 /* The old pool has to end before the section switch
8663 note in order to make it part of the current
8664 section. */
8665 insn = PREV_INSN (insn);
8666 }
8667
8668 label = gen_label_rtx ();
8669 prev = insn;
8670 if (prev && NOTE_P (prev))
8671 prev = prev_nonnote_insn (prev);
8672 if (prev)
8673 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
8674 INSN_LOCATION (prev));
8675 else
8676 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
8677 barrier = emit_barrier_after (jump);
8678 insn = emit_label_after (label, barrier);
8679 JUMP_LABEL (jump) = label;
8680 LABEL_NUSES (label) = 1;
8681
8682 INSN_ADDRESSES_NEW (jump, -1);
8683 INSN_ADDRESSES_NEW (barrier, -1);
8684 INSN_ADDRESSES_NEW (insn, -1);
8685
8686 s390_end_pool (curr_pool, barrier);
8687 curr_pool = NULL;
8688 extra_size = 0;
8689 }
8690 }
8691 }
8692
8693 if (curr_pool)
8694 s390_end_pool (curr_pool, NULL);
8695 gcc_assert (!pending_ltrel);
8696
8697 /* Find all labels that are branched into
8698 from an insn belonging to a different chunk. */
8699
8700 far_labels = BITMAP_ALLOC (NULL);
8701
8702 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8703 {
8704 rtx_jump_table_data *table;
8705
8706 /* Labels marked with LABEL_PRESERVE_P can be target
8707 of non-local jumps, so we have to mark them.
8708 The same holds for named labels.
8709
8710 Don't do that, however, if it is the label before
8711 a jump table. */
8712
8713 if (LABEL_P (insn)
8714 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
8715 {
8716 rtx_insn *vec_insn = NEXT_INSN (insn);
8717 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
8718 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
8719 }
8720 /* Check potential targets in a table jump (casesi_jump). */
8721 else if (tablejump_p (insn, NULL, &table))
8722 {
8723 rtx vec_pat = PATTERN (table);
8724 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
8725
8726 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
8727 {
8728 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
8729
8730 if (s390_find_pool (pool_list, label)
8731 != s390_find_pool (pool_list, insn))
8732 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8733 }
8734 }
8735 /* If we have a direct jump (conditional or unconditional),
8736 check all potential targets. */
8737 else if (JUMP_P (insn))
8738 {
8739 rtx pat = PATTERN (insn);
8740
8741 if (GET_CODE (pat) == PARALLEL)
8742 pat = XVECEXP (pat, 0, 0);
8743
8744 if (GET_CODE (pat) == SET)
8745 {
8746 rtx label = JUMP_LABEL (insn);
8747 if (label && !ANY_RETURN_P (label))
8748 {
8749 if (s390_find_pool (pool_list, label)
8750 != s390_find_pool (pool_list, insn))
8751 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8752 }
8753 }
8754 }
8755 }
8756
8757 /* Insert base register reload insns before every pool. */
8758
8759 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8760 {
8761 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8762 curr_pool->label);
8763 rtx_insn *insn = curr_pool->first_insn;
8764 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
8765 }
8766
8767 /* Insert base register reload insns at every far label. */
8768
8769 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8770 if (LABEL_P (insn)
8771 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
8772 {
8773 struct constant_pool *pool = s390_find_pool (pool_list, insn);
8774 if (pool)
8775 {
8776 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8777 pool->label);
8778 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
8779 }
8780 }
8781
8782
8783 BITMAP_FREE (far_labels);
8784
8785
8786 /* Recompute insn addresses. */
8787
8788 init_insn_lengths ();
8789 shorten_branches (get_insns ());
8790
8791 return pool_list;
8792 }
8793
8794 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
8795 After we have decided to use this list, finish implementing
8796 all changes to the current function as required. */
8797
8798 static void
8799 s390_chunkify_finish (struct constant_pool *pool_list)
8800 {
8801 struct constant_pool *curr_pool = NULL;
8802 rtx_insn *insn;
8803
8804
8805 /* Replace all literal pool references. */
8806
8807 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8808 {
8809 if (INSN_P (insn))
8810 replace_ltrel_base (&PATTERN (insn));
8811
8812 curr_pool = s390_find_pool (pool_list, insn);
8813 if (!curr_pool)
8814 continue;
8815
8816 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8817 {
8818 rtx addr, pool_ref = NULL_RTX;
8819 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8820 if (pool_ref)
8821 {
8822 if (s390_execute_label (insn))
8823 addr = s390_find_execute (curr_pool, insn);
8824 else
8825 addr = s390_find_constant (curr_pool,
8826 get_pool_constant (pool_ref),
8827 get_pool_mode (pool_ref));
8828
8829 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8830 INSN_CODE (insn) = -1;
8831 }
8832 }
8833 }
8834
8835 /* Dump out all literal pools. */
8836
8837 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8838 s390_dump_pool (curr_pool, 0);
8839
8840 /* Free pool list. */
8841
8842 while (pool_list)
8843 {
8844 struct constant_pool *next = pool_list->next;
8845 s390_free_pool (pool_list);
8846 pool_list = next;
8847 }
8848 }
8849
8850 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
8851 We have decided we cannot use this list, so revert all changes
8852 to the current function that were done by s390_chunkify_start. */
8853
8854 static void
8855 s390_chunkify_cancel (struct constant_pool *pool_list)
8856 {
8857 struct constant_pool *curr_pool = NULL;
8858 rtx_insn *insn;
8859
8860 /* Remove all pool placeholder insns. */
8861
8862 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8863 {
8864 /* Did we insert an extra barrier? Remove it. */
8865 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
8866 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
8867 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
8868
8869 if (jump && JUMP_P (jump)
8870 && barrier && BARRIER_P (barrier)
8871 && label && LABEL_P (label)
8872 && GET_CODE (PATTERN (jump)) == SET
8873 && SET_DEST (PATTERN (jump)) == pc_rtx
8874 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
8875 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
8876 {
8877 remove_insn (jump);
8878 remove_insn (barrier);
8879 remove_insn (label);
8880 }
8881
8882 remove_insn (curr_pool->pool_insn);
8883 }
8884
8885 /* Remove all base register reload insns. */
8886
8887 for (insn = get_insns (); insn; )
8888 {
8889 rtx_insn *next_insn = NEXT_INSN (insn);
8890
8891 if (NONJUMP_INSN_P (insn)
8892 && GET_CODE (PATTERN (insn)) == SET
8893 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
8894 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
8895 remove_insn (insn);
8896
8897 insn = next_insn;
8898 }
8899
8900 /* Free pool list. */
8901
8902 while (pool_list)
8903 {
8904 struct constant_pool *next = pool_list->next;
8905 s390_free_pool (pool_list);
8906 pool_list = next;
8907 }
8908 }
8909
8910 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
8911
8912 void
8913 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
8914 {
8915 switch (GET_MODE_CLASS (mode))
8916 {
8917 case MODE_FLOAT:
8918 case MODE_DECIMAL_FLOAT:
8919 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
8920
8921 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp), mode, align);
8922 break;
8923
8924 case MODE_INT:
8925 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
8926 mark_symbol_refs_as_used (exp);
8927 break;
8928
8929 case MODE_VECTOR_INT:
8930 case MODE_VECTOR_FLOAT:
8931 {
8932 int i;
8933 machine_mode inner_mode;
8934 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
8935
8936 inner_mode = GET_MODE_INNER (GET_MODE (exp));
8937 for (i = 0; i < XVECLEN (exp, 0); i++)
8938 s390_output_pool_entry (XVECEXP (exp, 0, i),
8939 inner_mode,
8940 i == 0
8941 ? align
8942 : GET_MODE_BITSIZE (inner_mode));
8943 }
8944 break;
8945
8946 default:
8947 gcc_unreachable ();
8948 }
8949 }
8950
8951
8952 /* Return an RTL expression representing the value of the return address
8953 for the frame COUNT steps up from the current frame. FRAME is the
8954 frame pointer of that frame. */
8955
8956 rtx
8957 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
8958 {
8959 int offset;
8960 rtx addr;
8961
8962 /* Without backchain, we fail for all but the current frame. */
8963
8964 if (!TARGET_BACKCHAIN && count > 0)
8965 return NULL_RTX;
8966
8967 /* For the current frame, we need to make sure the initial
8968 value of RETURN_REGNUM is actually saved. */
8969
8970 if (count == 0)
8971 {
8972 /* On non-z architectures branch splitting could overwrite r14. */
8973 if (TARGET_CPU_ZARCH)
8974 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
8975 else
8976 {
8977 cfun_frame_layout.save_return_addr_p = true;
8978 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
8979 }
8980 }
8981
8982 if (TARGET_PACKED_STACK)
8983 offset = -2 * UNITS_PER_LONG;
8984 else
8985 offset = RETURN_REGNUM * UNITS_PER_LONG;
8986
8987 addr = plus_constant (Pmode, frame, offset);
8988 addr = memory_address (Pmode, addr);
8989 return gen_rtx_MEM (Pmode, addr);
8990 }
8991
8992 /* Return an RTL expression representing the back chain stored in
8993 the current stack frame. */
8994
8995 rtx
8996 s390_back_chain_rtx (void)
8997 {
8998 rtx chain;
8999
9000 gcc_assert (TARGET_BACKCHAIN);
9001
9002 if (TARGET_PACKED_STACK)
9003 chain = plus_constant (Pmode, stack_pointer_rtx,
9004 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9005 else
9006 chain = stack_pointer_rtx;
9007
9008 chain = gen_rtx_MEM (Pmode, chain);
9009 return chain;
9010 }
9011
9012 /* Find first call clobbered register unused in a function.
9013 This could be used as base register in a leaf function
9014 or for holding the return address before epilogue. */
9015
9016 static int
9017 find_unused_clobbered_reg (void)
9018 {
9019 int i;
9020 for (i = 0; i < 6; i++)
9021 if (!df_regs_ever_live_p (i))
9022 return i;
9023 return 0;
9024 }
9025
9026
9027 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9028 clobbered hard regs in SETREG. */
9029
9030 static void
9031 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9032 {
9033 char *regs_ever_clobbered = (char *)data;
9034 unsigned int i, regno;
9035 machine_mode mode = GET_MODE (setreg);
9036
9037 if (GET_CODE (setreg) == SUBREG)
9038 {
9039 rtx inner = SUBREG_REG (setreg);
9040 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9041 return;
9042 regno = subreg_regno (setreg);
9043 }
9044 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9045 regno = REGNO (setreg);
9046 else
9047 return;
9048
9049 for (i = regno;
9050 i < regno + HARD_REGNO_NREGS (regno, mode);
9051 i++)
9052 regs_ever_clobbered[i] = 1;
9053 }
9054
9055 /* Walks through all basic blocks of the current function looking
9056 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9057 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9058 each of those regs. */
9059
9060 static void
9061 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9062 {
9063 basic_block cur_bb;
9064 rtx_insn *cur_insn;
9065 unsigned int i;
9066
9067 memset (regs_ever_clobbered, 0, 32);
9068
9069 /* For non-leaf functions we have to consider all call clobbered regs to be
9070 clobbered. */
9071 if (!crtl->is_leaf)
9072 {
9073 for (i = 0; i < 32; i++)
9074 regs_ever_clobbered[i] = call_really_used_regs[i];
9075 }
9076
9077 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9078 this work is done by liveness analysis (mark_regs_live_at_end).
9079 Special care is needed for functions containing landing pads. Landing pads
9080 may use the eh registers, but the code which sets these registers is not
9081 contained in that function. Hence s390_regs_ever_clobbered is not able to
9082 deal with this automatically. */
9083 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9084 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9085 if (crtl->calls_eh_return
9086 || (cfun->machine->has_landing_pad_p
9087 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9088 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9089
9090 /* For nonlocal gotos all call-saved registers have to be saved.
9091 This flag is also set for the unwinding code in libgcc.
9092 See expand_builtin_unwind_init. For regs_ever_live this is done by
9093 reload. */
9094 if (crtl->saves_all_registers)
9095 for (i = 0; i < 32; i++)
9096 if (!call_really_used_regs[i])
9097 regs_ever_clobbered[i] = 1;
9098
9099 FOR_EACH_BB_FN (cur_bb, cfun)
9100 {
9101 FOR_BB_INSNS (cur_bb, cur_insn)
9102 {
9103 rtx pat;
9104
9105 if (!INSN_P (cur_insn))
9106 continue;
9107
9108 pat = PATTERN (cur_insn);
9109
9110 /* Ignore GPR restore insns. */
9111 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9112 {
9113 if (GET_CODE (pat) == SET
9114 && GENERAL_REG_P (SET_DEST (pat)))
9115 {
9116 /* lgdr */
9117 if (GET_MODE (SET_SRC (pat)) == DImode
9118 && FP_REG_P (SET_SRC (pat)))
9119 continue;
9120
9121 /* l / lg */
9122 if (GET_CODE (SET_SRC (pat)) == MEM)
9123 continue;
9124 }
9125
9126 /* lm / lmg */
9127 if (GET_CODE (pat) == PARALLEL
9128 && load_multiple_operation (pat, VOIDmode))
9129 continue;
9130 }
9131
9132 note_stores (pat,
9133 s390_reg_clobbered_rtx,
9134 regs_ever_clobbered);
9135 }
9136 }
9137 }
9138
9139 /* Determine the frame area which actually has to be accessed
9140 in the function epilogue. The values are stored at the
9141 given pointers AREA_BOTTOM (address of the lowest used stack
9142 address) and AREA_TOP (address of the first item which does
9143 not belong to the stack frame). */
9144
9145 static void
9146 s390_frame_area (int *area_bottom, int *area_top)
9147 {
9148 int b, t;
9149
9150 b = INT_MAX;
9151 t = INT_MIN;
9152
9153 if (cfun_frame_layout.first_restore_gpr != -1)
9154 {
9155 b = (cfun_frame_layout.gprs_offset
9156 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9157 t = b + (cfun_frame_layout.last_restore_gpr
9158 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9159 }
9160
9161 if (TARGET_64BIT && cfun_save_high_fprs_p)
9162 {
9163 b = MIN (b, cfun_frame_layout.f8_offset);
9164 t = MAX (t, (cfun_frame_layout.f8_offset
9165 + cfun_frame_layout.high_fprs * 8));
9166 }
9167
9168 if (!TARGET_64BIT)
9169 {
9170 if (cfun_fpr_save_p (FPR4_REGNUM))
9171 {
9172 b = MIN (b, cfun_frame_layout.f4_offset);
9173 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9174 }
9175 if (cfun_fpr_save_p (FPR6_REGNUM))
9176 {
9177 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9178 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9179 }
9180 }
9181 *area_bottom = b;
9182 *area_top = t;
9183 }
9184 /* Update gpr_save_slots in the frame layout trying to make use of
9185 FPRs as GPR save slots.
9186 This is a helper routine of s390_register_info. */
9187
9188 static void
9189 s390_register_info_gprtofpr ()
9190 {
9191 int save_reg_slot = FPR0_REGNUM;
9192 int i, j;
9193
9194 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9195 return;
9196
9197 for (i = 15; i >= 6; i--)
9198 {
9199 if (cfun_gpr_save_slot (i) == 0)
9200 continue;
9201
9202 /* Advance to the next FP register which can be used as a
9203 GPR save slot. */
9204 while ((!call_really_used_regs[save_reg_slot]
9205 || df_regs_ever_live_p (save_reg_slot)
9206 || cfun_fpr_save_p (save_reg_slot))
9207 && FP_REGNO_P (save_reg_slot))
9208 save_reg_slot++;
9209 if (!FP_REGNO_P (save_reg_slot))
9210 {
9211 /* We only want to use ldgr/lgdr if we can get rid of
9212 stm/lm entirely. So undo the gpr slot allocation in
9213 case we ran out of FPR save slots. */
9214 for (j = 6; j <= 15; j++)
9215 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9216 cfun_gpr_save_slot (j) = -1;
9217 break;
9218 }
9219 cfun_gpr_save_slot (i) = save_reg_slot++;
9220 }
9221 }
9222
9223 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9224 stdarg.
9225 This is a helper routine for s390_register_info. */
9226
9227 static void
9228 s390_register_info_stdarg_fpr ()
9229 {
9230 int i;
9231 int min_fpr;
9232 int max_fpr;
9233
9234 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9235 f0-f4 for 64 bit. */
9236 if (!cfun->stdarg
9237 || !TARGET_HARD_FLOAT
9238 || !cfun->va_list_fpr_size
9239 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9240 return;
9241
9242 min_fpr = crtl->args.info.fprs;
9243 max_fpr = min_fpr + cfun->va_list_fpr_size;
9244 if (max_fpr > FP_ARG_NUM_REG)
9245 max_fpr = FP_ARG_NUM_REG;
9246
9247 for (i = min_fpr; i < max_fpr; i++)
9248 cfun_set_fpr_save (i + FPR0_REGNUM);
9249 }
9250
9251 /* Reserve the GPR save slots for GPRs which need to be saved due to
9252 stdarg.
9253 This is a helper routine for s390_register_info. */
9254
9255 static void
9256 s390_register_info_stdarg_gpr ()
9257 {
9258 int i;
9259 int min_gpr;
9260 int max_gpr;
9261
9262 if (!cfun->stdarg
9263 || !cfun->va_list_gpr_size
9264 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9265 return;
9266
9267 min_gpr = crtl->args.info.gprs;
9268 max_gpr = min_gpr + cfun->va_list_gpr_size;
9269 if (max_gpr > GP_ARG_NUM_REG)
9270 max_gpr = GP_ARG_NUM_REG;
9271
9272 for (i = min_gpr; i < max_gpr; i++)
9273 cfun_gpr_save_slot (2 + i) = -1;
9274 }
9275
9276 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9277 for registers which need to be saved in function prologue.
9278 This function can be used until the insns emitted for save/restore
9279 of the regs are visible in the RTL stream. */
9280
9281 static void
9282 s390_register_info ()
9283 {
9284 int i, j;
9285 char clobbered_regs[32];
9286
9287 gcc_assert (!epilogue_completed);
9288
9289 if (reload_completed)
9290 /* After reload we rely on our own routine to determine which
9291 registers need saving. */
9292 s390_regs_ever_clobbered (clobbered_regs);
9293 else
9294 /* During reload we use regs_ever_live as a base since reload
9295 does changes in there which we otherwise would not be aware
9296 of. */
9297 for (i = 0; i < 32; i++)
9298 clobbered_regs[i] = df_regs_ever_live_p (i);
9299
9300 for (i = 0; i < 32; i++)
9301 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9302
9303 /* Mark the call-saved FPRs which need to be saved.
9304 This needs to be done before checking the special GPRs since the
9305 stack pointer usage depends on whether high FPRs have to be saved
9306 or not. */
9307 cfun_frame_layout.fpr_bitmap = 0;
9308 cfun_frame_layout.high_fprs = 0;
9309 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9310 if (clobbered_regs[i] && !call_really_used_regs[i])
9311 {
9312 cfun_set_fpr_save (i);
9313 if (i >= FPR8_REGNUM)
9314 cfun_frame_layout.high_fprs++;
9315 }
9316
9317 if (flag_pic)
9318 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
9319 |= !!df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
9320
9321 clobbered_regs[BASE_REGNUM]
9322 |= (cfun->machine->base_reg
9323 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9324
9325 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9326 |= !!frame_pointer_needed;
9327
9328 /* On pre z900 machines this might take until machine dependent
9329 reorg to decide.
9330 save_return_addr_p will only be set on non-zarch machines so
9331 there is no risk that r14 goes into an FPR instead of a stack
9332 slot. */
9333 clobbered_regs[RETURN_REGNUM]
9334 |= (!crtl->is_leaf
9335 || TARGET_TPF_PROFILING
9336 || cfun->machine->split_branches_pending_p
9337 || cfun_frame_layout.save_return_addr_p
9338 || crtl->calls_eh_return);
9339
9340 clobbered_regs[STACK_POINTER_REGNUM]
9341 |= (!crtl->is_leaf
9342 || TARGET_TPF_PROFILING
9343 || cfun_save_high_fprs_p
9344 || get_frame_size () > 0
9345 || (reload_completed && cfun_frame_layout.frame_size > 0)
9346 || cfun->calls_alloca);
9347
9348 memset (cfun_frame_layout.gpr_save_slots, 0, 16);
9349
9350 for (i = 6; i < 16; i++)
9351 if (clobbered_regs[i])
9352 cfun_gpr_save_slot (i) = -1;
9353
9354 s390_register_info_stdarg_fpr ();
9355 s390_register_info_gprtofpr ();
9356
9357 /* First find the range of GPRs to be restored. Vararg regs don't
9358 need to be restored so we do it before assigning slots to the
9359 vararg GPRs. */
9360 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9361 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9362 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9363 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9364
9365 /* stdarg functions might need to save GPRs 2 to 6. This might
9366 override the GPR->FPR save decision made above for r6 since
9367 vararg regs must go to the stack. */
9368 s390_register_info_stdarg_gpr ();
9369
9370 /* Now the range of GPRs which need saving. */
9371 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9372 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9373 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9374 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9375 }
9376
9377 /* This function is called by s390_optimize_prologue in order to get
9378 rid of unnecessary GPR save/restore instructions. The register info
9379 for the GPRs is re-computed and the ranges are re-calculated. */
9380
9381 static void
9382 s390_optimize_register_info ()
9383 {
9384 char clobbered_regs[32];
9385 int i, j;
9386
9387 gcc_assert (epilogue_completed);
9388 gcc_assert (!cfun->machine->split_branches_pending_p);
9389
9390 s390_regs_ever_clobbered (clobbered_regs);
9391
9392 for (i = 0; i < 32; i++)
9393 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9394
9395 /* There is still special treatment needed for cases invisible to
9396 s390_regs_ever_clobbered. */
9397 clobbered_regs[RETURN_REGNUM]
9398 |= (TARGET_TPF_PROFILING
9399 /* When expanding builtin_return_addr in ESA mode we do not
9400 know whether r14 will later be needed as scratch reg when
9401 doing branch splitting. So the builtin always accesses the
9402 r14 save slot and we need to stick to the save/restore
9403 decision for r14 even if it turns out that it didn't get
9404 clobbered. */
9405 || cfun_frame_layout.save_return_addr_p
9406 || crtl->calls_eh_return);
9407
9408 memset (cfun_frame_layout.gpr_save_slots, 0, 6);
9409
9410 for (i = 6; i < 16; i++)
9411 if (!clobbered_regs[i])
9412 cfun_gpr_save_slot (i) = 0;
9413
9414 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9415 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9416 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9417 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9418
9419 s390_register_info_stdarg_gpr ();
9420
9421 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9422 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9423 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9424 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9425 }
9426
9427 /* Fill cfun->machine with info about frame of current function. */
9428
9429 static void
9430 s390_frame_info (void)
9431 {
9432 HOST_WIDE_INT lowest_offset;
9433
9434 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
9435 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
9436
9437 /* The va_arg builtin uses a constant distance of 16 *
9438 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
9439 pointer. So even if we are going to save the stack pointer in an
9440 FPR we need the stack space in order to keep the offsets
9441 correct. */
9442 if (cfun->stdarg && cfun_save_arg_fprs_p)
9443 {
9444 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9445
9446 if (cfun_frame_layout.first_save_gpr_slot == -1)
9447 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
9448 }
9449
9450 cfun_frame_layout.frame_size = get_frame_size ();
9451 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
9452 fatal_error (input_location,
9453 "total size of local variables exceeds architecture limit");
9454
9455 if (!TARGET_PACKED_STACK)
9456 {
9457 /* Fixed stack layout. */
9458 cfun_frame_layout.backchain_offset = 0;
9459 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
9460 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
9461 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
9462 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
9463 * UNITS_PER_LONG);
9464 }
9465 else if (TARGET_BACKCHAIN)
9466 {
9467 /* Kernel stack layout - packed stack, backchain, no float */
9468 gcc_assert (TARGET_SOFT_FLOAT);
9469 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
9470 - UNITS_PER_LONG);
9471
9472 /* The distance between the backchain and the return address
9473 save slot must not change. So we always need a slot for the
9474 stack pointer which resides in between. */
9475 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9476
9477 cfun_frame_layout.gprs_offset
9478 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
9479
9480 /* FPRs will not be saved. Nevertheless pick sane values to
9481 keep area calculations valid. */
9482 cfun_frame_layout.f0_offset =
9483 cfun_frame_layout.f4_offset =
9484 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
9485 }
9486 else
9487 {
9488 int num_fprs;
9489
9490 /* Packed stack layout without backchain. */
9491
9492 /* With stdarg FPRs need their dedicated slots. */
9493 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
9494 : (cfun_fpr_save_p (FPR4_REGNUM) +
9495 cfun_fpr_save_p (FPR6_REGNUM)));
9496 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
9497
9498 num_fprs = (cfun->stdarg ? 2
9499 : (cfun_fpr_save_p (FPR0_REGNUM)
9500 + cfun_fpr_save_p (FPR2_REGNUM)));
9501 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
9502
9503 cfun_frame_layout.gprs_offset
9504 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
9505
9506 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
9507 - cfun_frame_layout.high_fprs * 8);
9508 }
9509
9510 if (cfun_save_high_fprs_p)
9511 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
9512
9513 if (!crtl->is_leaf)
9514 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
9515
9516 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
9517 sized area at the bottom of the stack. This is required also for
9518 leaf functions. When GCC generates a local stack reference it
9519 will always add STACK_POINTER_OFFSET to all these references. */
9520 if (crtl->is_leaf
9521 && !TARGET_TPF_PROFILING
9522 && cfun_frame_layout.frame_size == 0
9523 && !cfun->calls_alloca)
9524 return;
9525
9526 /* Calculate the number of bytes we have used in our own register
9527 save area. With the packed stack layout we can re-use the
9528 remaining bytes for normal stack elements. */
9529
9530 if (TARGET_PACKED_STACK)
9531 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
9532 cfun_frame_layout.f4_offset),
9533 cfun_frame_layout.gprs_offset);
9534 else
9535 lowest_offset = 0;
9536
9537 if (TARGET_BACKCHAIN)
9538 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
9539
9540 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
9541
9542 /* If under 31 bit an odd number of gprs has to be saved we have to
9543 adjust the frame size to sustain 8 byte alignment of stack
9544 frames. */
9545 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
9546 STACK_BOUNDARY / BITS_PER_UNIT - 1)
9547 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
9548 }
9549
9550 /* Generate frame layout. Fills in register and frame data for the current
9551 function in cfun->machine. This routine can be called multiple times;
9552 it will re-do the complete frame layout every time. */
9553
9554 static void
9555 s390_init_frame_layout (void)
9556 {
9557 HOST_WIDE_INT frame_size;
9558 int base_used;
9559
9560 /* After LRA the frame layout is supposed to be read-only and should
9561 not be re-computed. */
9562 if (reload_completed)
9563 return;
9564
9565 /* On S/390 machines, we may need to perform branch splitting, which
9566 will require both base and return address register. We have no
9567 choice but to assume we're going to need them until right at the
9568 end of the machine dependent reorg phase. */
9569 if (!TARGET_CPU_ZARCH)
9570 cfun->machine->split_branches_pending_p = true;
9571
9572 do
9573 {
9574 frame_size = cfun_frame_layout.frame_size;
9575
9576 /* Try to predict whether we'll need the base register. */
9577 base_used = cfun->machine->split_branches_pending_p
9578 || crtl->uses_const_pool
9579 || (!DISP_IN_RANGE (frame_size)
9580 && !CONST_OK_FOR_K (frame_size));
9581
9582 /* Decide which register to use as literal pool base. In small
9583 leaf functions, try to use an unused call-clobbered register
9584 as base register to avoid save/restore overhead. */
9585 if (!base_used)
9586 cfun->machine->base_reg = NULL_RTX;
9587 else
9588 {
9589 int br = 0;
9590
9591 if (crtl->is_leaf)
9592 /* Prefer r5 (most likely to be free). */
9593 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
9594 ;
9595 cfun->machine->base_reg =
9596 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
9597 }
9598
9599 s390_register_info ();
9600 s390_frame_info ();
9601 }
9602 while (frame_size != cfun_frame_layout.frame_size);
9603 }
9604
9605 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
9606 the TX is nonescaping. A transaction is considered escaping if
9607 there is at least one path from tbegin returning CC0 to the
9608 function exit block without an tend.
9609
9610 The check so far has some limitations:
9611 - only single tbegin/tend BBs are supported
9612 - the first cond jump after tbegin must separate the CC0 path from ~CC0
9613 - when CC is copied to a GPR and the CC0 check is done with the GPR
9614 this is not supported
9615 */
9616
9617 static void
9618 s390_optimize_nonescaping_tx (void)
9619 {
9620 const unsigned int CC0 = 1 << 3;
9621 basic_block tbegin_bb = NULL;
9622 basic_block tend_bb = NULL;
9623 basic_block bb;
9624 rtx_insn *insn;
9625 bool result = true;
9626 int bb_index;
9627 rtx_insn *tbegin_insn = NULL;
9628
9629 if (!cfun->machine->tbegin_p)
9630 return;
9631
9632 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
9633 {
9634 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
9635
9636 if (!bb)
9637 continue;
9638
9639 FOR_BB_INSNS (bb, insn)
9640 {
9641 rtx ite, cc, pat, target;
9642 unsigned HOST_WIDE_INT mask;
9643
9644 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
9645 continue;
9646
9647 pat = PATTERN (insn);
9648
9649 if (GET_CODE (pat) == PARALLEL)
9650 pat = XVECEXP (pat, 0, 0);
9651
9652 if (GET_CODE (pat) != SET
9653 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
9654 continue;
9655
9656 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
9657 {
9658 rtx_insn *tmp;
9659
9660 tbegin_insn = insn;
9661
9662 /* Just return if the tbegin doesn't have clobbers. */
9663 if (GET_CODE (PATTERN (insn)) != PARALLEL)
9664 return;
9665
9666 if (tbegin_bb != NULL)
9667 return;
9668
9669 /* Find the next conditional jump. */
9670 for (tmp = NEXT_INSN (insn);
9671 tmp != NULL_RTX;
9672 tmp = NEXT_INSN (tmp))
9673 {
9674 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
9675 return;
9676 if (!JUMP_P (tmp))
9677 continue;
9678
9679 ite = SET_SRC (PATTERN (tmp));
9680 if (GET_CODE (ite) != IF_THEN_ELSE)
9681 continue;
9682
9683 cc = XEXP (XEXP (ite, 0), 0);
9684 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
9685 || GET_MODE (cc) != CCRAWmode
9686 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
9687 return;
9688
9689 if (bb->succs->length () != 2)
9690 return;
9691
9692 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
9693 if (GET_CODE (XEXP (ite, 0)) == NE)
9694 mask ^= 0xf;
9695
9696 if (mask == CC0)
9697 target = XEXP (ite, 1);
9698 else if (mask == (CC0 ^ 0xf))
9699 target = XEXP (ite, 2);
9700 else
9701 return;
9702
9703 {
9704 edge_iterator ei;
9705 edge e1, e2;
9706
9707 ei = ei_start (bb->succs);
9708 e1 = ei_safe_edge (ei);
9709 ei_next (&ei);
9710 e2 = ei_safe_edge (ei);
9711
9712 if (e2->flags & EDGE_FALLTHRU)
9713 {
9714 e2 = e1;
9715 e1 = ei_safe_edge (ei);
9716 }
9717
9718 if (!(e1->flags & EDGE_FALLTHRU))
9719 return;
9720
9721 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
9722 }
9723 if (tmp == BB_END (bb))
9724 break;
9725 }
9726 }
9727
9728 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
9729 {
9730 if (tend_bb != NULL)
9731 return;
9732 tend_bb = bb;
9733 }
9734 }
9735 }
9736
9737 /* Either we successfully remove the FPR clobbers here or we are not
9738 able to do anything for this TX. Both cases don't qualify for
9739 another look. */
9740 cfun->machine->tbegin_p = false;
9741
9742 if (tbegin_bb == NULL || tend_bb == NULL)
9743 return;
9744
9745 calculate_dominance_info (CDI_POST_DOMINATORS);
9746 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
9747 free_dominance_info (CDI_POST_DOMINATORS);
9748
9749 if (!result)
9750 return;
9751
9752 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
9753 gen_rtvec (2,
9754 XVECEXP (PATTERN (tbegin_insn), 0, 0),
9755 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
9756 INSN_CODE (tbegin_insn) = -1;
9757 df_insn_rescan (tbegin_insn);
9758
9759 return;
9760 }
9761
9762 /* Return true if it is legal to put a value with MODE into REGNO. */
9763
9764 bool
9765 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
9766 {
9767 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
9768 return false;
9769
9770 switch (REGNO_REG_CLASS (regno))
9771 {
9772 case VEC_REGS:
9773 return ((GET_MODE_CLASS (mode) == MODE_INT
9774 && s390_class_max_nregs (VEC_REGS, mode) == 1)
9775 || mode == DFmode
9776 || s390_vector_mode_supported_p (mode));
9777 break;
9778 case FP_REGS:
9779 if (TARGET_VX
9780 && ((GET_MODE_CLASS (mode) == MODE_INT
9781 && s390_class_max_nregs (FP_REGS, mode) == 1)
9782 || mode == DFmode
9783 || s390_vector_mode_supported_p (mode)))
9784 return true;
9785
9786 if (REGNO_PAIR_OK (regno, mode))
9787 {
9788 if (mode == SImode || mode == DImode)
9789 return true;
9790
9791 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
9792 return true;
9793 }
9794 break;
9795 case ADDR_REGS:
9796 if (FRAME_REGNO_P (regno) && mode == Pmode)
9797 return true;
9798
9799 /* fallthrough */
9800 case GENERAL_REGS:
9801 if (REGNO_PAIR_OK (regno, mode))
9802 {
9803 if (TARGET_ZARCH
9804 || (mode != TFmode && mode != TCmode && mode != TDmode))
9805 return true;
9806 }
9807 break;
9808 case CC_REGS:
9809 if (GET_MODE_CLASS (mode) == MODE_CC)
9810 return true;
9811 break;
9812 case ACCESS_REGS:
9813 if (REGNO_PAIR_OK (regno, mode))
9814 {
9815 if (mode == SImode || mode == Pmode)
9816 return true;
9817 }
9818 break;
9819 default:
9820 return false;
9821 }
9822
9823 return false;
9824 }
9825
9826 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
9827
9828 bool
9829 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
9830 {
9831 /* Once we've decided upon a register to use as base register, it must
9832 no longer be used for any other purpose. */
9833 if (cfun->machine->base_reg)
9834 if (REGNO (cfun->machine->base_reg) == old_reg
9835 || REGNO (cfun->machine->base_reg) == new_reg)
9836 return false;
9837
9838 /* Prevent regrename from using call-saved regs which haven't
9839 actually been saved. This is necessary since regrename assumes
9840 the backend save/restore decisions are based on
9841 df_regs_ever_live. Since we have our own routine we have to tell
9842 regrename manually about it. */
9843 if (GENERAL_REGNO_P (new_reg)
9844 && !call_really_used_regs[new_reg]
9845 && cfun_gpr_save_slot (new_reg) == 0)
9846 return false;
9847
9848 return true;
9849 }
9850
9851 /* Return nonzero if register REGNO can be used as a scratch register
9852 in peephole2. */
9853
9854 static bool
9855 s390_hard_regno_scratch_ok (unsigned int regno)
9856 {
9857 /* See s390_hard_regno_rename_ok. */
9858 if (GENERAL_REGNO_P (regno)
9859 && !call_really_used_regs[regno]
9860 && cfun_gpr_save_slot (regno) == 0)
9861 return false;
9862
9863 return true;
9864 }
9865
9866 /* Maximum number of registers to represent a value of mode MODE
9867 in a register of class RCLASS. */
9868
9869 int
9870 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
9871 {
9872 int reg_size;
9873 bool reg_pair_required_p = false;
9874
9875 switch (rclass)
9876 {
9877 case FP_REGS:
9878 case VEC_REGS:
9879 reg_size = TARGET_VX ? 16 : 8;
9880
9881 /* TF and TD modes would fit into a VR but we put them into a
9882 register pair since we do not have 128bit FP instructions on
9883 full VRs. */
9884 if (TARGET_VX
9885 && SCALAR_FLOAT_MODE_P (mode)
9886 && GET_MODE_SIZE (mode) >= 16)
9887 reg_pair_required_p = true;
9888
9889 /* Even if complex types would fit into a single FPR/VR we force
9890 them into a register pair to deal with the parts more easily.
9891 (FIXME: What about complex ints?) */
9892 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
9893 reg_pair_required_p = true;
9894 break;
9895 case ACCESS_REGS:
9896 reg_size = 4;
9897 break;
9898 default:
9899 reg_size = UNITS_PER_WORD;
9900 break;
9901 }
9902
9903 if (reg_pair_required_p)
9904 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
9905
9906 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
9907 }
9908
9909 /* Return TRUE if changing mode from FROM to TO should not be allowed
9910 for register class CLASS. */
9911
9912 int
9913 s390_cannot_change_mode_class (machine_mode from_mode,
9914 machine_mode to_mode,
9915 enum reg_class rclass)
9916 {
9917 machine_mode small_mode;
9918 machine_mode big_mode;
9919
9920 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
9921 return 0;
9922
9923 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
9924 {
9925 small_mode = from_mode;
9926 big_mode = to_mode;
9927 }
9928 else
9929 {
9930 small_mode = to_mode;
9931 big_mode = from_mode;
9932 }
9933
9934 /* Values residing in VRs are little-endian style. All modes are
9935 placed left-aligned in an VR. This means that we cannot allow
9936 switching between modes with differing sizes. Also if the vector
9937 facility is available we still place TFmode values in VR register
9938 pairs, since the only instructions we have operating on TFmodes
9939 only deal with register pairs. Therefore we have to allow DFmode
9940 subregs of TFmodes to enable the TFmode splitters. */
9941 if (reg_classes_intersect_p (VEC_REGS, rclass)
9942 && (GET_MODE_SIZE (small_mode) < 8
9943 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
9944 return 1;
9945
9946 /* Likewise for access registers, since they have only half the
9947 word size on 64-bit. */
9948 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
9949 return 1;
9950
9951 return 0;
9952 }
9953
9954 /* Return true if we use LRA instead of reload pass. */
9955 static bool
9956 s390_lra_p (void)
9957 {
9958 return s390_lra_flag;
9959 }
9960
9961 /* Return true if register FROM can be eliminated via register TO. */
9962
9963 static bool
9964 s390_can_eliminate (const int from, const int to)
9965 {
9966 /* On zSeries machines, we have not marked the base register as fixed.
9967 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
9968 If a function requires the base register, we say here that this
9969 elimination cannot be performed. This will cause reload to free
9970 up the base register (as if it were fixed). On the other hand,
9971 if the current function does *not* require the base register, we
9972 say here the elimination succeeds, which in turn allows reload
9973 to allocate the base register for any other purpose. */
9974 if (from == BASE_REGNUM && to == BASE_REGNUM)
9975 {
9976 if (TARGET_CPU_ZARCH)
9977 {
9978 s390_init_frame_layout ();
9979 return cfun->machine->base_reg == NULL_RTX;
9980 }
9981
9982 return false;
9983 }
9984
9985 /* Everything else must point into the stack frame. */
9986 gcc_assert (to == STACK_POINTER_REGNUM
9987 || to == HARD_FRAME_POINTER_REGNUM);
9988
9989 gcc_assert (from == FRAME_POINTER_REGNUM
9990 || from == ARG_POINTER_REGNUM
9991 || from == RETURN_ADDRESS_POINTER_REGNUM);
9992
9993 /* Make sure we actually saved the return address. */
9994 if (from == RETURN_ADDRESS_POINTER_REGNUM)
9995 if (!crtl->calls_eh_return
9996 && !cfun->stdarg
9997 && !cfun_frame_layout.save_return_addr_p)
9998 return false;
9999
10000 return true;
10001 }
10002
10003 /* Return offset between register FROM and TO initially after prolog. */
10004
10005 HOST_WIDE_INT
10006 s390_initial_elimination_offset (int from, int to)
10007 {
10008 HOST_WIDE_INT offset;
10009
10010 /* ??? Why are we called for non-eliminable pairs? */
10011 if (!s390_can_eliminate (from, to))
10012 return 0;
10013
10014 switch (from)
10015 {
10016 case FRAME_POINTER_REGNUM:
10017 offset = (get_frame_size()
10018 + STACK_POINTER_OFFSET
10019 + crtl->outgoing_args_size);
10020 break;
10021
10022 case ARG_POINTER_REGNUM:
10023 s390_init_frame_layout ();
10024 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10025 break;
10026
10027 case RETURN_ADDRESS_POINTER_REGNUM:
10028 s390_init_frame_layout ();
10029
10030 if (cfun_frame_layout.first_save_gpr_slot == -1)
10031 {
10032 /* If it turns out that for stdarg nothing went into the reg
10033 save area we also do not need the return address
10034 pointer. */
10035 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10036 return 0;
10037
10038 gcc_unreachable ();
10039 }
10040
10041 /* In order to make the following work it is not necessary for
10042 r14 to have a save slot. It is sufficient if one other GPR
10043 got one. Since the GPRs are always stored without gaps we
10044 are able to calculate where the r14 save slot would
10045 reside. */
10046 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10047 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10048 UNITS_PER_LONG);
10049 break;
10050
10051 case BASE_REGNUM:
10052 offset = 0;
10053 break;
10054
10055 default:
10056 gcc_unreachable ();
10057 }
10058
10059 return offset;
10060 }
10061
10062 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10063 to register BASE. Return generated insn. */
10064
10065 static rtx
10066 save_fpr (rtx base, int offset, int regnum)
10067 {
10068 rtx addr;
10069 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10070
10071 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10072 set_mem_alias_set (addr, get_varargs_alias_set ());
10073 else
10074 set_mem_alias_set (addr, get_frame_alias_set ());
10075
10076 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10077 }
10078
10079 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10080 to register BASE. Return generated insn. */
10081
10082 static rtx
10083 restore_fpr (rtx base, int offset, int regnum)
10084 {
10085 rtx addr;
10086 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10087 set_mem_alias_set (addr, get_frame_alias_set ());
10088
10089 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10090 }
10091
10092 /* Return true if REGNO is a global register, but not one
10093 of the special ones that need to be saved/restored in anyway. */
10094
10095 static inline bool
10096 global_not_special_regno_p (int regno)
10097 {
10098 return (global_regs[regno]
10099 /* These registers are special and need to be
10100 restored in any case. */
10101 && !(regno == STACK_POINTER_REGNUM
10102 || regno == RETURN_REGNUM
10103 || regno == BASE_REGNUM
10104 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10105 }
10106
10107 /* Generate insn to save registers FIRST to LAST into
10108 the register save area located at offset OFFSET
10109 relative to register BASE. */
10110
10111 static rtx
10112 save_gprs (rtx base, int offset, int first, int last)
10113 {
10114 rtx addr, insn, note;
10115 int i;
10116
10117 addr = plus_constant (Pmode, base, offset);
10118 addr = gen_rtx_MEM (Pmode, addr);
10119
10120 set_mem_alias_set (addr, get_frame_alias_set ());
10121
10122 /* Special-case single register. */
10123 if (first == last)
10124 {
10125 if (TARGET_64BIT)
10126 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10127 else
10128 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10129
10130 if (!global_not_special_regno_p (first))
10131 RTX_FRAME_RELATED_P (insn) = 1;
10132 return insn;
10133 }
10134
10135
10136 insn = gen_store_multiple (addr,
10137 gen_rtx_REG (Pmode, first),
10138 GEN_INT (last - first + 1));
10139
10140 if (first <= 6 && cfun->stdarg)
10141 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10142 {
10143 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10144
10145 if (first + i <= 6)
10146 set_mem_alias_set (mem, get_varargs_alias_set ());
10147 }
10148
10149 /* We need to set the FRAME_RELATED flag on all SETs
10150 inside the store-multiple pattern.
10151
10152 However, we must not emit DWARF records for registers 2..5
10153 if they are stored for use by variable arguments ...
10154
10155 ??? Unfortunately, it is not enough to simply not the
10156 FRAME_RELATED flags for those SETs, because the first SET
10157 of the PARALLEL is always treated as if it had the flag
10158 set, even if it does not. Therefore we emit a new pattern
10159 without those registers as REG_FRAME_RELATED_EXPR note. */
10160
10161 if (first >= 6 && !global_not_special_regno_p (first))
10162 {
10163 rtx pat = PATTERN (insn);
10164
10165 for (i = 0; i < XVECLEN (pat, 0); i++)
10166 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10167 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10168 0, i)))))
10169 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10170
10171 RTX_FRAME_RELATED_P (insn) = 1;
10172 }
10173 else if (last >= 6)
10174 {
10175 int start;
10176
10177 for (start = first >= 6 ? first : 6; start <= last; start++)
10178 if (!global_not_special_regno_p (start))
10179 break;
10180
10181 if (start > last)
10182 return insn;
10183
10184 addr = plus_constant (Pmode, base,
10185 offset + (start - first) * UNITS_PER_LONG);
10186
10187 if (start == last)
10188 {
10189 if (TARGET_64BIT)
10190 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10191 gen_rtx_REG (Pmode, start));
10192 else
10193 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10194 gen_rtx_REG (Pmode, start));
10195 note = PATTERN (note);
10196
10197 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10198 RTX_FRAME_RELATED_P (insn) = 1;
10199
10200 return insn;
10201 }
10202
10203 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10204 gen_rtx_REG (Pmode, start),
10205 GEN_INT (last - start + 1));
10206 note = PATTERN (note);
10207
10208 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10209
10210 for (i = 0; i < XVECLEN (note, 0); i++)
10211 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10212 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10213 0, i)))))
10214 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10215
10216 RTX_FRAME_RELATED_P (insn) = 1;
10217 }
10218
10219 return insn;
10220 }
10221
10222 /* Generate insn to restore registers FIRST to LAST from
10223 the register save area located at offset OFFSET
10224 relative to register BASE. */
10225
10226 static rtx
10227 restore_gprs (rtx base, int offset, int first, int last)
10228 {
10229 rtx addr, insn;
10230
10231 addr = plus_constant (Pmode, base, offset);
10232 addr = gen_rtx_MEM (Pmode, addr);
10233 set_mem_alias_set (addr, get_frame_alias_set ());
10234
10235 /* Special-case single register. */
10236 if (first == last)
10237 {
10238 if (TARGET_64BIT)
10239 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10240 else
10241 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10242
10243 RTX_FRAME_RELATED_P (insn) = 1;
10244 return insn;
10245 }
10246
10247 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10248 addr,
10249 GEN_INT (last - first + 1));
10250 RTX_FRAME_RELATED_P (insn) = 1;
10251 return insn;
10252 }
10253
10254 /* Return insn sequence to load the GOT register. */
10255
10256 static GTY(()) rtx got_symbol;
10257 rtx_insn *
10258 s390_load_got (void)
10259 {
10260 rtx_insn *insns;
10261
10262 /* We cannot use pic_offset_table_rtx here since we use this
10263 function also for non-pic if __tls_get_offset is called and in
10264 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10265 aren't usable. */
10266 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10267
10268 if (!got_symbol)
10269 {
10270 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10271 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
10272 }
10273
10274 start_sequence ();
10275
10276 if (TARGET_CPU_ZARCH)
10277 {
10278 emit_move_insn (got_rtx, got_symbol);
10279 }
10280 else
10281 {
10282 rtx offset;
10283
10284 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
10285 UNSPEC_LTREL_OFFSET);
10286 offset = gen_rtx_CONST (Pmode, offset);
10287 offset = force_const_mem (Pmode, offset);
10288
10289 emit_move_insn (got_rtx, offset);
10290
10291 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10292 UNSPEC_LTREL_BASE);
10293 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10294
10295 emit_move_insn (got_rtx, offset);
10296 }
10297
10298 insns = get_insns ();
10299 end_sequence ();
10300 return insns;
10301 }
10302
10303 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10304 and the change to the stack pointer. */
10305
10306 static void
10307 s390_emit_stack_tie (void)
10308 {
10309 rtx mem = gen_frame_mem (BLKmode,
10310 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10311
10312 emit_insn (gen_stack_tie (mem));
10313 }
10314
10315 /* Copy GPRS into FPR save slots. */
10316
10317 static void
10318 s390_save_gprs_to_fprs (void)
10319 {
10320 int i;
10321
10322 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10323 return;
10324
10325 for (i = 6; i < 16; i++)
10326 {
10327 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10328 {
10329 rtx_insn *insn =
10330 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10331 gen_rtx_REG (DImode, i));
10332 RTX_FRAME_RELATED_P (insn) = 1;
10333 /* This prevents dwarf2cfi from interpreting the set. Doing
10334 so it might emit def_cfa_register infos setting an FPR as
10335 new CFA. */
10336 add_reg_note (insn, REG_CFA_REGISTER, PATTERN (insn));
10337 }
10338 }
10339 }
10340
10341 /* Restore GPRs from FPR save slots. */
10342
10343 static void
10344 s390_restore_gprs_from_fprs (void)
10345 {
10346 int i;
10347
10348 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10349 return;
10350
10351 for (i = 6; i < 16; i++)
10352 {
10353 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10354 {
10355 rtx_insn *insn =
10356 emit_move_insn (gen_rtx_REG (DImode, i),
10357 gen_rtx_REG (DImode, cfun_gpr_save_slot (i)));
10358 df_set_regs_ever_live (i, true);
10359 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10360 if (i == STACK_POINTER_REGNUM)
10361 add_reg_note (insn, REG_CFA_DEF_CFA,
10362 plus_constant (Pmode, stack_pointer_rtx,
10363 STACK_POINTER_OFFSET));
10364 RTX_FRAME_RELATED_P (insn) = 1;
10365 }
10366 }
10367 }
10368
10369
10370 /* A pass run immediately before shrink-wrapping and prologue and epilogue
10371 generation. */
10372
10373 namespace {
10374
10375 const pass_data pass_data_s390_early_mach =
10376 {
10377 RTL_PASS, /* type */
10378 "early_mach", /* name */
10379 OPTGROUP_NONE, /* optinfo_flags */
10380 TV_MACH_DEP, /* tv_id */
10381 0, /* properties_required */
10382 0, /* properties_provided */
10383 0, /* properties_destroyed */
10384 0, /* todo_flags_start */
10385 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
10386 };
10387
10388 class pass_s390_early_mach : public rtl_opt_pass
10389 {
10390 public:
10391 pass_s390_early_mach (gcc::context *ctxt)
10392 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
10393 {}
10394
10395 /* opt_pass methods: */
10396 virtual unsigned int execute (function *);
10397
10398 }; // class pass_s390_early_mach
10399
10400 unsigned int
10401 pass_s390_early_mach::execute (function *fun)
10402 {
10403 rtx_insn *insn;
10404
10405 /* Try to get rid of the FPR clobbers. */
10406 s390_optimize_nonescaping_tx ();
10407
10408 /* Re-compute register info. */
10409 s390_register_info ();
10410
10411 /* If we're using a base register, ensure that it is always valid for
10412 the first non-prologue instruction. */
10413 if (fun->machine->base_reg)
10414 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
10415
10416 /* Annotate all constant pool references to let the scheduler know
10417 they implicitly use the base register. */
10418 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10419 if (INSN_P (insn))
10420 {
10421 annotate_constant_pool_refs (&PATTERN (insn));
10422 df_insn_rescan (insn);
10423 }
10424 return 0;
10425 }
10426
10427 } // anon namespace
10428
10429 /* Expand the prologue into a bunch of separate insns. */
10430
10431 void
10432 s390_emit_prologue (void)
10433 {
10434 rtx insn, addr;
10435 rtx temp_reg;
10436 int i;
10437 int offset;
10438 int next_fpr = 0;
10439
10440 /* Choose best register to use for temp use within prologue.
10441 See below for why TPF must use the register 1. */
10442
10443 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
10444 && !crtl->is_leaf
10445 && !TARGET_TPF_PROFILING)
10446 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10447 else
10448 temp_reg = gen_rtx_REG (Pmode, 1);
10449
10450 s390_save_gprs_to_fprs ();
10451
10452 /* Save call saved gprs. */
10453 if (cfun_frame_layout.first_save_gpr != -1)
10454 {
10455 insn = save_gprs (stack_pointer_rtx,
10456 cfun_frame_layout.gprs_offset +
10457 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
10458 - cfun_frame_layout.first_save_gpr_slot),
10459 cfun_frame_layout.first_save_gpr,
10460 cfun_frame_layout.last_save_gpr);
10461 emit_insn (insn);
10462 }
10463
10464 /* Dummy insn to mark literal pool slot. */
10465
10466 if (cfun->machine->base_reg)
10467 emit_insn (gen_main_pool (cfun->machine->base_reg));
10468
10469 offset = cfun_frame_layout.f0_offset;
10470
10471 /* Save f0 and f2. */
10472 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
10473 {
10474 if (cfun_fpr_save_p (i))
10475 {
10476 save_fpr (stack_pointer_rtx, offset, i);
10477 offset += 8;
10478 }
10479 else if (!TARGET_PACKED_STACK || cfun->stdarg)
10480 offset += 8;
10481 }
10482
10483 /* Save f4 and f6. */
10484 offset = cfun_frame_layout.f4_offset;
10485 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10486 {
10487 if (cfun_fpr_save_p (i))
10488 {
10489 insn = save_fpr (stack_pointer_rtx, offset, i);
10490 offset += 8;
10491
10492 /* If f4 and f6 are call clobbered they are saved due to
10493 stdargs and therefore are not frame related. */
10494 if (!call_really_used_regs[i])
10495 RTX_FRAME_RELATED_P (insn) = 1;
10496 }
10497 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
10498 offset += 8;
10499 }
10500
10501 if (TARGET_PACKED_STACK
10502 && cfun_save_high_fprs_p
10503 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
10504 {
10505 offset = (cfun_frame_layout.f8_offset
10506 + (cfun_frame_layout.high_fprs - 1) * 8);
10507
10508 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
10509 if (cfun_fpr_save_p (i))
10510 {
10511 insn = save_fpr (stack_pointer_rtx, offset, i);
10512
10513 RTX_FRAME_RELATED_P (insn) = 1;
10514 offset -= 8;
10515 }
10516 if (offset >= cfun_frame_layout.f8_offset)
10517 next_fpr = i;
10518 }
10519
10520 if (!TARGET_PACKED_STACK)
10521 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
10522
10523 if (flag_stack_usage_info)
10524 current_function_static_stack_size = cfun_frame_layout.frame_size;
10525
10526 /* Decrement stack pointer. */
10527
10528 if (cfun_frame_layout.frame_size > 0)
10529 {
10530 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10531 rtx real_frame_off;
10532
10533 if (s390_stack_size)
10534 {
10535 HOST_WIDE_INT stack_guard;
10536
10537 if (s390_stack_guard)
10538 stack_guard = s390_stack_guard;
10539 else
10540 {
10541 /* If no value for stack guard is provided the smallest power of 2
10542 larger than the current frame size is chosen. */
10543 stack_guard = 1;
10544 while (stack_guard < cfun_frame_layout.frame_size)
10545 stack_guard <<= 1;
10546 }
10547
10548 if (cfun_frame_layout.frame_size >= s390_stack_size)
10549 {
10550 warning (0, "frame size of function %qs is %wd"
10551 " bytes exceeding user provided stack limit of "
10552 "%d bytes. "
10553 "An unconditional trap is added.",
10554 current_function_name(), cfun_frame_layout.frame_size,
10555 s390_stack_size);
10556 emit_insn (gen_trap ());
10557 emit_barrier ();
10558 }
10559 else
10560 {
10561 /* stack_guard has to be smaller than s390_stack_size.
10562 Otherwise we would emit an AND with zero which would
10563 not match the test under mask pattern. */
10564 if (stack_guard >= s390_stack_size)
10565 {
10566 warning (0, "frame size of function %qs is %wd"
10567 " bytes which is more than half the stack size. "
10568 "The dynamic check would not be reliable. "
10569 "No check emitted for this function.",
10570 current_function_name(),
10571 cfun_frame_layout.frame_size);
10572 }
10573 else
10574 {
10575 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
10576 & ~(stack_guard - 1));
10577
10578 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
10579 GEN_INT (stack_check_mask));
10580 if (TARGET_64BIT)
10581 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
10582 t, const0_rtx),
10583 t, const0_rtx, const0_rtx));
10584 else
10585 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
10586 t, const0_rtx),
10587 t, const0_rtx, const0_rtx));
10588 }
10589 }
10590 }
10591
10592 if (s390_warn_framesize > 0
10593 && cfun_frame_layout.frame_size >= s390_warn_framesize)
10594 warning (0, "frame size of %qs is %wd bytes",
10595 current_function_name (), cfun_frame_layout.frame_size);
10596
10597 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
10598 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
10599
10600 /* Save incoming stack pointer into temp reg. */
10601 if (TARGET_BACKCHAIN || next_fpr)
10602 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
10603
10604 /* Subtract frame size from stack pointer. */
10605
10606 if (DISP_IN_RANGE (INTVAL (frame_off)))
10607 {
10608 insn = gen_rtx_SET (stack_pointer_rtx,
10609 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10610 frame_off));
10611 insn = emit_insn (insn);
10612 }
10613 else
10614 {
10615 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10616 frame_off = force_const_mem (Pmode, frame_off);
10617
10618 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
10619 annotate_constant_pool_refs (&PATTERN (insn));
10620 }
10621
10622 RTX_FRAME_RELATED_P (insn) = 1;
10623 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10624 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10625 gen_rtx_SET (stack_pointer_rtx,
10626 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10627 real_frame_off)));
10628
10629 /* Set backchain. */
10630
10631 if (TARGET_BACKCHAIN)
10632 {
10633 if (cfun_frame_layout.backchain_offset)
10634 addr = gen_rtx_MEM (Pmode,
10635 plus_constant (Pmode, stack_pointer_rtx,
10636 cfun_frame_layout.backchain_offset));
10637 else
10638 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
10639 set_mem_alias_set (addr, get_frame_alias_set ());
10640 insn = emit_insn (gen_move_insn (addr, temp_reg));
10641 }
10642
10643 /* If we support non-call exceptions (e.g. for Java),
10644 we need to make sure the backchain pointer is set up
10645 before any possibly trapping memory access. */
10646 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
10647 {
10648 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
10649 emit_clobber (addr);
10650 }
10651 }
10652
10653 /* Save fprs 8 - 15 (64 bit ABI). */
10654
10655 if (cfun_save_high_fprs_p && next_fpr)
10656 {
10657 /* If the stack might be accessed through a different register
10658 we have to make sure that the stack pointer decrement is not
10659 moved below the use of the stack slots. */
10660 s390_emit_stack_tie ();
10661
10662 insn = emit_insn (gen_add2_insn (temp_reg,
10663 GEN_INT (cfun_frame_layout.f8_offset)));
10664
10665 offset = 0;
10666
10667 for (i = FPR8_REGNUM; i <= next_fpr; i++)
10668 if (cfun_fpr_save_p (i))
10669 {
10670 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
10671 cfun_frame_layout.frame_size
10672 + cfun_frame_layout.f8_offset
10673 + offset);
10674
10675 insn = save_fpr (temp_reg, offset, i);
10676 offset += 8;
10677 RTX_FRAME_RELATED_P (insn) = 1;
10678 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10679 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
10680 gen_rtx_REG (DFmode, i)));
10681 }
10682 }
10683
10684 /* Set frame pointer, if needed. */
10685
10686 if (frame_pointer_needed)
10687 {
10688 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10689 RTX_FRAME_RELATED_P (insn) = 1;
10690 }
10691
10692 /* Set up got pointer, if needed. */
10693
10694 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
10695 {
10696 rtx_insn *insns = s390_load_got ();
10697
10698 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
10699 annotate_constant_pool_refs (&PATTERN (insn));
10700
10701 emit_insn (insns);
10702 }
10703
10704 if (TARGET_TPF_PROFILING)
10705 {
10706 /* Generate a BAS instruction to serve as a function
10707 entry intercept to facilitate the use of tracing
10708 algorithms located at the branch target. */
10709 emit_insn (gen_prologue_tpf ());
10710
10711 /* Emit a blockage here so that all code
10712 lies between the profiling mechanisms. */
10713 emit_insn (gen_blockage ());
10714 }
10715 }
10716
10717 /* Expand the epilogue into a bunch of separate insns. */
10718
10719 void
10720 s390_emit_epilogue (bool sibcall)
10721 {
10722 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
10723 int area_bottom, area_top, offset = 0;
10724 int next_offset;
10725 rtvec p;
10726 int i;
10727
10728 if (TARGET_TPF_PROFILING)
10729 {
10730
10731 /* Generate a BAS instruction to serve as a function
10732 entry intercept to facilitate the use of tracing
10733 algorithms located at the branch target. */
10734
10735 /* Emit a blockage here so that all code
10736 lies between the profiling mechanisms. */
10737 emit_insn (gen_blockage ());
10738
10739 emit_insn (gen_epilogue_tpf ());
10740 }
10741
10742 /* Check whether to use frame or stack pointer for restore. */
10743
10744 frame_pointer = (frame_pointer_needed
10745 ? hard_frame_pointer_rtx : stack_pointer_rtx);
10746
10747 s390_frame_area (&area_bottom, &area_top);
10748
10749 /* Check whether we can access the register save area.
10750 If not, increment the frame pointer as required. */
10751
10752 if (area_top <= area_bottom)
10753 {
10754 /* Nothing to restore. */
10755 }
10756 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
10757 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
10758 {
10759 /* Area is in range. */
10760 offset = cfun_frame_layout.frame_size;
10761 }
10762 else
10763 {
10764 rtx insn, frame_off, cfa;
10765
10766 offset = area_bottom < 0 ? -area_bottom : 0;
10767 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
10768
10769 cfa = gen_rtx_SET (frame_pointer,
10770 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10771 if (DISP_IN_RANGE (INTVAL (frame_off)))
10772 {
10773 insn = gen_rtx_SET (frame_pointer,
10774 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10775 insn = emit_insn (insn);
10776 }
10777 else
10778 {
10779 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10780 frame_off = force_const_mem (Pmode, frame_off);
10781
10782 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
10783 annotate_constant_pool_refs (&PATTERN (insn));
10784 }
10785 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
10786 RTX_FRAME_RELATED_P (insn) = 1;
10787 }
10788
10789 /* Restore call saved fprs. */
10790
10791 if (TARGET_64BIT)
10792 {
10793 if (cfun_save_high_fprs_p)
10794 {
10795 next_offset = cfun_frame_layout.f8_offset;
10796 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
10797 {
10798 if (cfun_fpr_save_p (i))
10799 {
10800 restore_fpr (frame_pointer,
10801 offset + next_offset, i);
10802 cfa_restores
10803 = alloc_reg_note (REG_CFA_RESTORE,
10804 gen_rtx_REG (DFmode, i), cfa_restores);
10805 next_offset += 8;
10806 }
10807 }
10808 }
10809
10810 }
10811 else
10812 {
10813 next_offset = cfun_frame_layout.f4_offset;
10814 /* f4, f6 */
10815 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10816 {
10817 if (cfun_fpr_save_p (i))
10818 {
10819 restore_fpr (frame_pointer,
10820 offset + next_offset, i);
10821 cfa_restores
10822 = alloc_reg_note (REG_CFA_RESTORE,
10823 gen_rtx_REG (DFmode, i), cfa_restores);
10824 next_offset += 8;
10825 }
10826 else if (!TARGET_PACKED_STACK)
10827 next_offset += 8;
10828 }
10829
10830 }
10831
10832 /* Return register. */
10833
10834 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10835
10836 /* Restore call saved gprs. */
10837
10838 if (cfun_frame_layout.first_restore_gpr != -1)
10839 {
10840 rtx insn, addr;
10841 int i;
10842
10843 /* Check for global register and save them
10844 to stack location from where they get restored. */
10845
10846 for (i = cfun_frame_layout.first_restore_gpr;
10847 i <= cfun_frame_layout.last_restore_gpr;
10848 i++)
10849 {
10850 if (global_not_special_regno_p (i))
10851 {
10852 addr = plus_constant (Pmode, frame_pointer,
10853 offset + cfun_frame_layout.gprs_offset
10854 + (i - cfun_frame_layout.first_save_gpr_slot)
10855 * UNITS_PER_LONG);
10856 addr = gen_rtx_MEM (Pmode, addr);
10857 set_mem_alias_set (addr, get_frame_alias_set ());
10858 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
10859 }
10860 else
10861 cfa_restores
10862 = alloc_reg_note (REG_CFA_RESTORE,
10863 gen_rtx_REG (Pmode, i), cfa_restores);
10864 }
10865
10866 if (! sibcall)
10867 {
10868 /* Fetch return address from stack before load multiple,
10869 this will do good for scheduling.
10870
10871 Only do this if we already decided that r14 needs to be
10872 saved to a stack slot. (And not just because r14 happens to
10873 be in between two GPRs which need saving.) Otherwise it
10874 would be difficult to take that decision back in
10875 s390_optimize_prologue. */
10876 if (cfun_gpr_save_slot (RETURN_REGNUM) == -1)
10877 {
10878 int return_regnum = find_unused_clobbered_reg();
10879 if (!return_regnum)
10880 return_regnum = 4;
10881 return_reg = gen_rtx_REG (Pmode, return_regnum);
10882
10883 addr = plus_constant (Pmode, frame_pointer,
10884 offset + cfun_frame_layout.gprs_offset
10885 + (RETURN_REGNUM
10886 - cfun_frame_layout.first_save_gpr_slot)
10887 * UNITS_PER_LONG);
10888 addr = gen_rtx_MEM (Pmode, addr);
10889 set_mem_alias_set (addr, get_frame_alias_set ());
10890 emit_move_insn (return_reg, addr);
10891
10892 /* Once we did that optimization we have to make sure
10893 s390_optimize_prologue does not try to remove the
10894 store of r14 since we will not be able to find the
10895 load issued here. */
10896 cfun_frame_layout.save_return_addr_p = true;
10897 }
10898 }
10899
10900 insn = restore_gprs (frame_pointer,
10901 offset + cfun_frame_layout.gprs_offset
10902 + (cfun_frame_layout.first_restore_gpr
10903 - cfun_frame_layout.first_save_gpr_slot)
10904 * UNITS_PER_LONG,
10905 cfun_frame_layout.first_restore_gpr,
10906 cfun_frame_layout.last_restore_gpr);
10907 insn = emit_insn (insn);
10908 REG_NOTES (insn) = cfa_restores;
10909 add_reg_note (insn, REG_CFA_DEF_CFA,
10910 plus_constant (Pmode, stack_pointer_rtx,
10911 STACK_POINTER_OFFSET));
10912 RTX_FRAME_RELATED_P (insn) = 1;
10913 }
10914
10915 s390_restore_gprs_from_fprs ();
10916
10917 if (! sibcall)
10918 {
10919
10920 /* Return to caller. */
10921
10922 p = rtvec_alloc (2);
10923
10924 RTVEC_ELT (p, 0) = ret_rtx;
10925 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
10926 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
10927 }
10928 }
10929
10930 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
10931
10932 static void
10933 s300_set_up_by_prologue (hard_reg_set_container *regs)
10934 {
10935 if (cfun->machine->base_reg
10936 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
10937 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
10938 }
10939
10940 /* Return true if the function can use simple_return to return outside
10941 of a shrink-wrapped region. At present shrink-wrapping is supported
10942 in all cases. */
10943
10944 bool
10945 s390_can_use_simple_return_insn (void)
10946 {
10947 return true;
10948 }
10949
10950 /* Return true if the epilogue is guaranteed to contain only a return
10951 instruction and if a direct return can therefore be used instead.
10952 One of the main advantages of using direct return instructions
10953 is that we can then use conditional returns. */
10954
10955 bool
10956 s390_can_use_return_insn (void)
10957 {
10958 int i;
10959
10960 if (!reload_completed)
10961 return false;
10962
10963 if (crtl->profile)
10964 return false;
10965
10966 if (TARGET_TPF_PROFILING)
10967 return false;
10968
10969 for (i = 0; i < 16; i++)
10970 if (cfun_gpr_save_slot (i))
10971 return false;
10972
10973 /* For 31 bit this is not covered by the frame_size check below
10974 since f4, f6 are saved in the register save area without needing
10975 additional stack space. */
10976 if (!TARGET_64BIT
10977 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
10978 return false;
10979
10980 if (cfun->machine->base_reg
10981 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
10982 return false;
10983
10984 return cfun_frame_layout.frame_size == 0;
10985 }
10986
10987 /* The VX ABI differs for vararg functions. Therefore we need the
10988 prototype of the callee to be available when passing vector type
10989 values. */
10990 static const char *
10991 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
10992 {
10993 return ((TARGET_VX_ABI
10994 && typelist == 0
10995 && VECTOR_TYPE_P (TREE_TYPE (val))
10996 && (funcdecl == NULL_TREE
10997 || (TREE_CODE (funcdecl) == FUNCTION_DECL
10998 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
10999 ? N_("Vector argument passed to unprototyped function")
11000 : NULL);
11001 }
11002
11003
11004 /* Return the size in bytes of a function argument of
11005 type TYPE and/or mode MODE. At least one of TYPE or
11006 MODE must be specified. */
11007
11008 static int
11009 s390_function_arg_size (machine_mode mode, const_tree type)
11010 {
11011 if (type)
11012 return int_size_in_bytes (type);
11013
11014 /* No type info available for some library calls ... */
11015 if (mode != BLKmode)
11016 return GET_MODE_SIZE (mode);
11017
11018 /* If we have neither type nor mode, abort */
11019 gcc_unreachable ();
11020 }
11021
11022 /* Return true if a function argument of type TYPE and mode MODE
11023 is to be passed in a vector register, if available. */
11024
11025 bool
11026 s390_function_arg_vector (machine_mode mode, const_tree type)
11027 {
11028 if (!TARGET_VX_ABI)
11029 return false;
11030
11031 if (s390_function_arg_size (mode, type) > 16)
11032 return false;
11033
11034 /* No type info available for some library calls ... */
11035 if (!type)
11036 return VECTOR_MODE_P (mode);
11037
11038 /* The ABI says that record types with a single member are treated
11039 just like that member would be. */
11040 while (TREE_CODE (type) == RECORD_TYPE)
11041 {
11042 tree field, single = NULL_TREE;
11043
11044 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11045 {
11046 if (TREE_CODE (field) != FIELD_DECL)
11047 continue;
11048
11049 if (single == NULL_TREE)
11050 single = TREE_TYPE (field);
11051 else
11052 return false;
11053 }
11054
11055 if (single == NULL_TREE)
11056 return false;
11057 else
11058 {
11059 /* If the field declaration adds extra byte due to
11060 e.g. padding this is not accepted as vector type. */
11061 if (int_size_in_bytes (single) <= 0
11062 || int_size_in_bytes (single) != int_size_in_bytes (type))
11063 return false;
11064 type = single;
11065 }
11066 }
11067
11068 return VECTOR_TYPE_P (type);
11069 }
11070
11071 /* Return true if a function argument of type TYPE and mode MODE
11072 is to be passed in a floating-point register, if available. */
11073
11074 static bool
11075 s390_function_arg_float (machine_mode mode, const_tree type)
11076 {
11077 if (s390_function_arg_size (mode, type) > 8)
11078 return false;
11079
11080 /* Soft-float changes the ABI: no floating-point registers are used. */
11081 if (TARGET_SOFT_FLOAT)
11082 return false;
11083
11084 /* No type info available for some library calls ... */
11085 if (!type)
11086 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
11087
11088 /* The ABI says that record types with a single member are treated
11089 just like that member would be. */
11090 while (TREE_CODE (type) == RECORD_TYPE)
11091 {
11092 tree field, single = NULL_TREE;
11093
11094 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11095 {
11096 if (TREE_CODE (field) != FIELD_DECL)
11097 continue;
11098
11099 if (single == NULL_TREE)
11100 single = TREE_TYPE (field);
11101 else
11102 return false;
11103 }
11104
11105 if (single == NULL_TREE)
11106 return false;
11107 else
11108 type = single;
11109 }
11110
11111 return TREE_CODE (type) == REAL_TYPE;
11112 }
11113
11114 /* Return true if a function argument of type TYPE and mode MODE
11115 is to be passed in an integer register, or a pair of integer
11116 registers, if available. */
11117
11118 static bool
11119 s390_function_arg_integer (machine_mode mode, const_tree type)
11120 {
11121 int size = s390_function_arg_size (mode, type);
11122 if (size > 8)
11123 return false;
11124
11125 /* No type info available for some library calls ... */
11126 if (!type)
11127 return GET_MODE_CLASS (mode) == MODE_INT
11128 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
11129
11130 /* We accept small integral (and similar) types. */
11131 if (INTEGRAL_TYPE_P (type)
11132 || POINTER_TYPE_P (type)
11133 || TREE_CODE (type) == NULLPTR_TYPE
11134 || TREE_CODE (type) == OFFSET_TYPE
11135 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
11136 return true;
11137
11138 /* We also accept structs of size 1, 2, 4, 8 that are not
11139 passed in floating-point registers. */
11140 if (AGGREGATE_TYPE_P (type)
11141 && exact_log2 (size) >= 0
11142 && !s390_function_arg_float (mode, type))
11143 return true;
11144
11145 return false;
11146 }
11147
11148 /* Return 1 if a function argument of type TYPE and mode MODE
11149 is to be passed by reference. The ABI specifies that only
11150 structures of size 1, 2, 4, or 8 bytes are passed by value,
11151 all other structures (and complex numbers) are passed by
11152 reference. */
11153
11154 static bool
11155 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
11156 machine_mode mode, const_tree type,
11157 bool named ATTRIBUTE_UNUSED)
11158 {
11159 int size = s390_function_arg_size (mode, type);
11160
11161 if (s390_function_arg_vector (mode, type))
11162 return false;
11163
11164 if (size > 8)
11165 return true;
11166
11167 if (type)
11168 {
11169 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
11170 return true;
11171
11172 if (TREE_CODE (type) == COMPLEX_TYPE
11173 || TREE_CODE (type) == VECTOR_TYPE)
11174 return true;
11175 }
11176
11177 return false;
11178 }
11179
11180 /* Update the data in CUM to advance over an argument of mode MODE and
11181 data type TYPE. (TYPE is null for libcalls where that information
11182 may not be available.). The boolean NAMED specifies whether the
11183 argument is a named argument (as opposed to an unnamed argument
11184 matching an ellipsis). */
11185
11186 static void
11187 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
11188 const_tree type, bool named)
11189 {
11190 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11191
11192 if (s390_function_arg_vector (mode, type))
11193 {
11194 /* We are called for unnamed vector stdarg arguments which are
11195 passed on the stack. In this case this hook does not have to
11196 do anything since stack arguments are tracked by common
11197 code. */
11198 if (!named)
11199 return;
11200 cum->vrs += 1;
11201 }
11202 else if (s390_function_arg_float (mode, type))
11203 {
11204 cum->fprs += 1;
11205 }
11206 else if (s390_function_arg_integer (mode, type))
11207 {
11208 int size = s390_function_arg_size (mode, type);
11209 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
11210 }
11211 else
11212 gcc_unreachable ();
11213 }
11214
11215 /* Define where to put the arguments to a function.
11216 Value is zero to push the argument on the stack,
11217 or a hard register in which to store the argument.
11218
11219 MODE is the argument's machine mode.
11220 TYPE is the data type of the argument (as a tree).
11221 This is null for libcalls where that information may
11222 not be available.
11223 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11224 the preceding args and about the function being called.
11225 NAMED is nonzero if this argument is a named parameter
11226 (otherwise it is an extra parameter matching an ellipsis).
11227
11228 On S/390, we use general purpose registers 2 through 6 to
11229 pass integer, pointer, and certain structure arguments, and
11230 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
11231 to pass floating point arguments. All remaining arguments
11232 are pushed to the stack. */
11233
11234 static rtx
11235 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
11236 const_tree type, bool named)
11237 {
11238 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11239
11240 if (!named)
11241 s390_check_type_for_vector_abi (type, true, false);
11242
11243 if (s390_function_arg_vector (mode, type))
11244 {
11245 /* Vector arguments being part of the ellipsis are passed on the
11246 stack. */
11247 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
11248 return NULL_RTX;
11249
11250 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
11251 }
11252 else if (s390_function_arg_float (mode, type))
11253 {
11254 if (cum->fprs + 1 > FP_ARG_NUM_REG)
11255 return NULL_RTX;
11256 else
11257 return gen_rtx_REG (mode, cum->fprs + 16);
11258 }
11259 else if (s390_function_arg_integer (mode, type))
11260 {
11261 int size = s390_function_arg_size (mode, type);
11262 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11263
11264 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
11265 return NULL_RTX;
11266 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
11267 return gen_rtx_REG (mode, cum->gprs + 2);
11268 else if (n_gprs == 2)
11269 {
11270 rtvec p = rtvec_alloc (2);
11271
11272 RTVEC_ELT (p, 0)
11273 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
11274 const0_rtx);
11275 RTVEC_ELT (p, 1)
11276 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
11277 GEN_INT (4));
11278
11279 return gen_rtx_PARALLEL (mode, p);
11280 }
11281 }
11282
11283 /* After the real arguments, expand_call calls us once again
11284 with a void_type_node type. Whatever we return here is
11285 passed as operand 2 to the call expanders.
11286
11287 We don't need this feature ... */
11288 else if (type == void_type_node)
11289 return const0_rtx;
11290
11291 gcc_unreachable ();
11292 }
11293
11294 /* Return true if return values of type TYPE should be returned
11295 in a memory buffer whose address is passed by the caller as
11296 hidden first argument. */
11297
11298 static bool
11299 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
11300 {
11301 /* We accept small integral (and similar) types. */
11302 if (INTEGRAL_TYPE_P (type)
11303 || POINTER_TYPE_P (type)
11304 || TREE_CODE (type) == OFFSET_TYPE
11305 || TREE_CODE (type) == REAL_TYPE)
11306 return int_size_in_bytes (type) > 8;
11307
11308 /* vector types which fit into a VR. */
11309 if (TARGET_VX_ABI
11310 && VECTOR_TYPE_P (type)
11311 && int_size_in_bytes (type) <= 16)
11312 return false;
11313
11314 /* Aggregates and similar constructs are always returned
11315 in memory. */
11316 if (AGGREGATE_TYPE_P (type)
11317 || TREE_CODE (type) == COMPLEX_TYPE
11318 || VECTOR_TYPE_P (type))
11319 return true;
11320
11321 /* ??? We get called on all sorts of random stuff from
11322 aggregate_value_p. We can't abort, but it's not clear
11323 what's safe to return. Pretend it's a struct I guess. */
11324 return true;
11325 }
11326
11327 /* Function arguments and return values are promoted to word size. */
11328
11329 static machine_mode
11330 s390_promote_function_mode (const_tree type, machine_mode mode,
11331 int *punsignedp,
11332 const_tree fntype ATTRIBUTE_UNUSED,
11333 int for_return ATTRIBUTE_UNUSED)
11334 {
11335 if (INTEGRAL_MODE_P (mode)
11336 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
11337 {
11338 if (type != NULL_TREE && POINTER_TYPE_P (type))
11339 *punsignedp = POINTERS_EXTEND_UNSIGNED;
11340 return Pmode;
11341 }
11342
11343 return mode;
11344 }
11345
11346 /* Define where to return a (scalar) value of type RET_TYPE.
11347 If RET_TYPE is null, define where to return a (scalar)
11348 value of mode MODE from a libcall. */
11349
11350 static rtx
11351 s390_function_and_libcall_value (machine_mode mode,
11352 const_tree ret_type,
11353 const_tree fntype_or_decl,
11354 bool outgoing ATTRIBUTE_UNUSED)
11355 {
11356 /* For vector return types it is important to use the RET_TYPE
11357 argument whenever available since the middle-end might have
11358 changed the mode to a scalar mode. */
11359 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
11360 || (!ret_type && VECTOR_MODE_P (mode)));
11361
11362 /* For normal functions perform the promotion as
11363 promote_function_mode would do. */
11364 if (ret_type)
11365 {
11366 int unsignedp = TYPE_UNSIGNED (ret_type);
11367 mode = promote_function_mode (ret_type, mode, &unsignedp,
11368 fntype_or_decl, 1);
11369 }
11370
11371 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
11372 || SCALAR_FLOAT_MODE_P (mode)
11373 || (TARGET_VX_ABI && vector_ret_type_p));
11374 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
11375
11376 if (TARGET_VX_ABI && vector_ret_type_p)
11377 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
11378 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
11379 return gen_rtx_REG (mode, 16);
11380 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
11381 || UNITS_PER_LONG == UNITS_PER_WORD)
11382 return gen_rtx_REG (mode, 2);
11383 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
11384 {
11385 /* This case is triggered when returning a 64 bit value with
11386 -m31 -mzarch. Although the value would fit into a single
11387 register it has to be forced into a 32 bit register pair in
11388 order to match the ABI. */
11389 rtvec p = rtvec_alloc (2);
11390
11391 RTVEC_ELT (p, 0)
11392 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
11393 RTVEC_ELT (p, 1)
11394 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
11395
11396 return gen_rtx_PARALLEL (mode, p);
11397 }
11398
11399 gcc_unreachable ();
11400 }
11401
11402 /* Define where to return a scalar return value of type RET_TYPE. */
11403
11404 static rtx
11405 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
11406 bool outgoing)
11407 {
11408 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
11409 fn_decl_or_type, outgoing);
11410 }
11411
11412 /* Define where to return a scalar libcall return value of mode
11413 MODE. */
11414
11415 static rtx
11416 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
11417 {
11418 return s390_function_and_libcall_value (mode, NULL_TREE,
11419 NULL_TREE, true);
11420 }
11421
11422
11423 /* Create and return the va_list datatype.
11424
11425 On S/390, va_list is an array type equivalent to
11426
11427 typedef struct __va_list_tag
11428 {
11429 long __gpr;
11430 long __fpr;
11431 void *__overflow_arg_area;
11432 void *__reg_save_area;
11433 } va_list[1];
11434
11435 where __gpr and __fpr hold the number of general purpose
11436 or floating point arguments used up to now, respectively,
11437 __overflow_arg_area points to the stack location of the
11438 next argument passed on the stack, and __reg_save_area
11439 always points to the start of the register area in the
11440 call frame of the current function. The function prologue
11441 saves all registers used for argument passing into this
11442 area if the function uses variable arguments. */
11443
11444 static tree
11445 s390_build_builtin_va_list (void)
11446 {
11447 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
11448
11449 record = lang_hooks.types.make_type (RECORD_TYPE);
11450
11451 type_decl =
11452 build_decl (BUILTINS_LOCATION,
11453 TYPE_DECL, get_identifier ("__va_list_tag"), record);
11454
11455 f_gpr = build_decl (BUILTINS_LOCATION,
11456 FIELD_DECL, get_identifier ("__gpr"),
11457 long_integer_type_node);
11458 f_fpr = build_decl (BUILTINS_LOCATION,
11459 FIELD_DECL, get_identifier ("__fpr"),
11460 long_integer_type_node);
11461 f_ovf = build_decl (BUILTINS_LOCATION,
11462 FIELD_DECL, get_identifier ("__overflow_arg_area"),
11463 ptr_type_node);
11464 f_sav = build_decl (BUILTINS_LOCATION,
11465 FIELD_DECL, get_identifier ("__reg_save_area"),
11466 ptr_type_node);
11467
11468 va_list_gpr_counter_field = f_gpr;
11469 va_list_fpr_counter_field = f_fpr;
11470
11471 DECL_FIELD_CONTEXT (f_gpr) = record;
11472 DECL_FIELD_CONTEXT (f_fpr) = record;
11473 DECL_FIELD_CONTEXT (f_ovf) = record;
11474 DECL_FIELD_CONTEXT (f_sav) = record;
11475
11476 TYPE_STUB_DECL (record) = type_decl;
11477 TYPE_NAME (record) = type_decl;
11478 TYPE_FIELDS (record) = f_gpr;
11479 DECL_CHAIN (f_gpr) = f_fpr;
11480 DECL_CHAIN (f_fpr) = f_ovf;
11481 DECL_CHAIN (f_ovf) = f_sav;
11482
11483 layout_type (record);
11484
11485 /* The correct type is an array type of one element. */
11486 return build_array_type (record, build_index_type (size_zero_node));
11487 }
11488
11489 /* Implement va_start by filling the va_list structure VALIST.
11490 STDARG_P is always true, and ignored.
11491 NEXTARG points to the first anonymous stack argument.
11492
11493 The following global variables are used to initialize
11494 the va_list structure:
11495
11496 crtl->args.info:
11497 holds number of gprs and fprs used for named arguments.
11498 crtl->args.arg_offset_rtx:
11499 holds the offset of the first anonymous stack argument
11500 (relative to the virtual arg pointer). */
11501
11502 static void
11503 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
11504 {
11505 HOST_WIDE_INT n_gpr, n_fpr;
11506 int off;
11507 tree f_gpr, f_fpr, f_ovf, f_sav;
11508 tree gpr, fpr, ovf, sav, t;
11509
11510 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11511 f_fpr = DECL_CHAIN (f_gpr);
11512 f_ovf = DECL_CHAIN (f_fpr);
11513 f_sav = DECL_CHAIN (f_ovf);
11514
11515 valist = build_simple_mem_ref (valist);
11516 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11517 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11518 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11519 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11520
11521 /* Count number of gp and fp argument registers used. */
11522
11523 n_gpr = crtl->args.info.gprs;
11524 n_fpr = crtl->args.info.fprs;
11525
11526 if (cfun->va_list_gpr_size)
11527 {
11528 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
11529 build_int_cst (NULL_TREE, n_gpr));
11530 TREE_SIDE_EFFECTS (t) = 1;
11531 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11532 }
11533
11534 if (cfun->va_list_fpr_size)
11535 {
11536 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
11537 build_int_cst (NULL_TREE, n_fpr));
11538 TREE_SIDE_EFFECTS (t) = 1;
11539 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11540 }
11541
11542 /* Find the overflow area.
11543 FIXME: This currently is too pessimistic when the vector ABI is
11544 enabled. In that case we *always* set up the overflow area
11545 pointer. */
11546 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
11547 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
11548 || TARGET_VX_ABI)
11549 {
11550 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
11551
11552 off = INTVAL (crtl->args.arg_offset_rtx);
11553 off = off < 0 ? 0 : off;
11554 if (TARGET_DEBUG_ARG)
11555 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
11556 (int)n_gpr, (int)n_fpr, off);
11557
11558 t = fold_build_pointer_plus_hwi (t, off);
11559
11560 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
11561 TREE_SIDE_EFFECTS (t) = 1;
11562 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11563 }
11564
11565 /* Find the register save area. */
11566 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
11567 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
11568 {
11569 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
11570 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
11571
11572 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
11573 TREE_SIDE_EFFECTS (t) = 1;
11574 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11575 }
11576 }
11577
11578 /* Implement va_arg by updating the va_list structure
11579 VALIST as required to retrieve an argument of type
11580 TYPE, and returning that argument.
11581
11582 Generates code equivalent to:
11583
11584 if (integral value) {
11585 if (size <= 4 && args.gpr < 5 ||
11586 size > 4 && args.gpr < 4 )
11587 ret = args.reg_save_area[args.gpr+8]
11588 else
11589 ret = *args.overflow_arg_area++;
11590 } else if (vector value) {
11591 ret = *args.overflow_arg_area;
11592 args.overflow_arg_area += size / 8;
11593 } else if (float value) {
11594 if (args.fgpr < 2)
11595 ret = args.reg_save_area[args.fpr+64]
11596 else
11597 ret = *args.overflow_arg_area++;
11598 } else if (aggregate value) {
11599 if (args.gpr < 5)
11600 ret = *args.reg_save_area[args.gpr]
11601 else
11602 ret = **args.overflow_arg_area++;
11603 } */
11604
11605 static tree
11606 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
11607 gimple_seq *post_p ATTRIBUTE_UNUSED)
11608 {
11609 tree f_gpr, f_fpr, f_ovf, f_sav;
11610 tree gpr, fpr, ovf, sav, reg, t, u;
11611 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
11612 tree lab_false, lab_over;
11613 tree addr = create_tmp_var (ptr_type_node, "addr");
11614 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
11615 a stack slot. */
11616
11617 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11618 f_fpr = DECL_CHAIN (f_gpr);
11619 f_ovf = DECL_CHAIN (f_fpr);
11620 f_sav = DECL_CHAIN (f_ovf);
11621
11622 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11623 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11624 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11625
11626 /* The tree for args* cannot be shared between gpr/fpr and ovf since
11627 both appear on a lhs. */
11628 valist = unshare_expr (valist);
11629 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11630
11631 size = int_size_in_bytes (type);
11632
11633 s390_check_type_for_vector_abi (type, true, false);
11634
11635 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
11636 {
11637 if (TARGET_DEBUG_ARG)
11638 {
11639 fprintf (stderr, "va_arg: aggregate type");
11640 debug_tree (type);
11641 }
11642
11643 /* Aggregates are passed by reference. */
11644 indirect_p = 1;
11645 reg = gpr;
11646 n_reg = 1;
11647
11648 /* kernel stack layout on 31 bit: It is assumed here that no padding
11649 will be added by s390_frame_info because for va_args always an even
11650 number of gprs has to be saved r15-r2 = 14 regs. */
11651 sav_ofs = 2 * UNITS_PER_LONG;
11652 sav_scale = UNITS_PER_LONG;
11653 size = UNITS_PER_LONG;
11654 max_reg = GP_ARG_NUM_REG - n_reg;
11655 left_align_p = false;
11656 }
11657 else if (s390_function_arg_vector (TYPE_MODE (type), type))
11658 {
11659 if (TARGET_DEBUG_ARG)
11660 {
11661 fprintf (stderr, "va_arg: vector type");
11662 debug_tree (type);
11663 }
11664
11665 indirect_p = 0;
11666 reg = NULL_TREE;
11667 n_reg = 0;
11668 sav_ofs = 0;
11669 sav_scale = 8;
11670 max_reg = 0;
11671 left_align_p = true;
11672 }
11673 else if (s390_function_arg_float (TYPE_MODE (type), type))
11674 {
11675 if (TARGET_DEBUG_ARG)
11676 {
11677 fprintf (stderr, "va_arg: float type");
11678 debug_tree (type);
11679 }
11680
11681 /* FP args go in FP registers, if present. */
11682 indirect_p = 0;
11683 reg = fpr;
11684 n_reg = 1;
11685 sav_ofs = 16 * UNITS_PER_LONG;
11686 sav_scale = 8;
11687 max_reg = FP_ARG_NUM_REG - n_reg;
11688 left_align_p = false;
11689 }
11690 else
11691 {
11692 if (TARGET_DEBUG_ARG)
11693 {
11694 fprintf (stderr, "va_arg: other type");
11695 debug_tree (type);
11696 }
11697
11698 /* Otherwise into GP registers. */
11699 indirect_p = 0;
11700 reg = gpr;
11701 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11702
11703 /* kernel stack layout on 31 bit: It is assumed here that no padding
11704 will be added by s390_frame_info because for va_args always an even
11705 number of gprs has to be saved r15-r2 = 14 regs. */
11706 sav_ofs = 2 * UNITS_PER_LONG;
11707
11708 if (size < UNITS_PER_LONG)
11709 sav_ofs += UNITS_PER_LONG - size;
11710
11711 sav_scale = UNITS_PER_LONG;
11712 max_reg = GP_ARG_NUM_REG - n_reg;
11713 left_align_p = false;
11714 }
11715
11716 /* Pull the value out of the saved registers ... */
11717
11718 if (reg != NULL_TREE)
11719 {
11720 /*
11721 if (reg > ((typeof (reg))max_reg))
11722 goto lab_false;
11723
11724 addr = sav + sav_ofs + reg * save_scale;
11725
11726 goto lab_over;
11727
11728 lab_false:
11729 */
11730
11731 lab_false = create_artificial_label (UNKNOWN_LOCATION);
11732 lab_over = create_artificial_label (UNKNOWN_LOCATION);
11733
11734 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
11735 t = build2 (GT_EXPR, boolean_type_node, reg, t);
11736 u = build1 (GOTO_EXPR, void_type_node, lab_false);
11737 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
11738 gimplify_and_add (t, pre_p);
11739
11740 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
11741 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
11742 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
11743 t = fold_build_pointer_plus (t, u);
11744
11745 gimplify_assign (addr, t, pre_p);
11746
11747 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
11748
11749 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
11750 }
11751
11752 /* ... Otherwise out of the overflow area. */
11753
11754 t = ovf;
11755 if (size < UNITS_PER_LONG && !left_align_p)
11756 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
11757
11758 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
11759
11760 gimplify_assign (addr, t, pre_p);
11761
11762 if (size < UNITS_PER_LONG && left_align_p)
11763 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
11764 else
11765 t = fold_build_pointer_plus_hwi (t, size);
11766
11767 gimplify_assign (ovf, t, pre_p);
11768
11769 if (reg != NULL_TREE)
11770 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
11771
11772
11773 /* Increment register save count. */
11774
11775 if (n_reg > 0)
11776 {
11777 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
11778 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
11779 gimplify_and_add (u, pre_p);
11780 }
11781
11782 if (indirect_p)
11783 {
11784 t = build_pointer_type_for_mode (build_pointer_type (type),
11785 ptr_mode, true);
11786 addr = fold_convert (t, addr);
11787 addr = build_va_arg_indirect_ref (addr);
11788 }
11789 else
11790 {
11791 t = build_pointer_type_for_mode (type, ptr_mode, true);
11792 addr = fold_convert (t, addr);
11793 }
11794
11795 return build_va_arg_indirect_ref (addr);
11796 }
11797
11798 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
11799 expanders.
11800 DEST - Register location where CC will be stored.
11801 TDB - Pointer to a 256 byte area where to store the transaction.
11802 diagnostic block. NULL if TDB is not needed.
11803 RETRY - Retry count value. If non-NULL a retry loop for CC2
11804 is emitted
11805 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
11806 of the tbegin instruction pattern. */
11807
11808 void
11809 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
11810 {
11811 rtx retry_plus_two = gen_reg_rtx (SImode);
11812 rtx retry_reg = gen_reg_rtx (SImode);
11813 rtx_code_label *retry_label = NULL;
11814
11815 if (retry != NULL_RTX)
11816 {
11817 emit_move_insn (retry_reg, retry);
11818 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
11819 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
11820 retry_label = gen_label_rtx ();
11821 emit_label (retry_label);
11822 }
11823
11824 if (clobber_fprs_p)
11825 {
11826 if (TARGET_VX)
11827 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11828 tdb));
11829 else
11830 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11831 tdb));
11832 }
11833 else
11834 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11835 tdb));
11836
11837 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
11838 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
11839 CC_REGNUM)),
11840 UNSPEC_CC_TO_INT));
11841 if (retry != NULL_RTX)
11842 {
11843 const int CC0 = 1 << 3;
11844 const int CC1 = 1 << 2;
11845 const int CC3 = 1 << 0;
11846 rtx jump;
11847 rtx count = gen_reg_rtx (SImode);
11848 rtx_code_label *leave_label = gen_label_rtx ();
11849
11850 /* Exit for success and permanent failures. */
11851 jump = s390_emit_jump (leave_label,
11852 gen_rtx_EQ (VOIDmode,
11853 gen_rtx_REG (CCRAWmode, CC_REGNUM),
11854 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
11855 LABEL_NUSES (leave_label) = 1;
11856
11857 /* CC2 - transient failure. Perform retry with ppa. */
11858 emit_move_insn (count, retry_plus_two);
11859 emit_insn (gen_subsi3 (count, count, retry_reg));
11860 emit_insn (gen_tx_assist (count));
11861 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
11862 retry_reg,
11863 retry_reg));
11864 JUMP_LABEL (jump) = retry_label;
11865 LABEL_NUSES (retry_label) = 1;
11866 emit_label (leave_label);
11867 }
11868 }
11869
11870
11871 /* Return the decl for the target specific builtin with the function
11872 code FCODE. */
11873
11874 static tree
11875 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
11876 {
11877 if (fcode >= S390_BUILTIN_MAX)
11878 return error_mark_node;
11879
11880 return s390_builtin_decls[fcode];
11881 }
11882
11883 /* We call mcount before the function prologue. So a profiled leaf
11884 function should stay a leaf function. */
11885
11886 static bool
11887 s390_keep_leaf_when_profiled ()
11888 {
11889 return true;
11890 }
11891
11892 /* Output assembly code for the trampoline template to
11893 stdio stream FILE.
11894
11895 On S/390, we use gpr 1 internally in the trampoline code;
11896 gpr 0 is used to hold the static chain. */
11897
11898 static void
11899 s390_asm_trampoline_template (FILE *file)
11900 {
11901 rtx op[2];
11902 op[0] = gen_rtx_REG (Pmode, 0);
11903 op[1] = gen_rtx_REG (Pmode, 1);
11904
11905 if (TARGET_64BIT)
11906 {
11907 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
11908 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
11909 output_asm_insn ("br\t%1", op); /* 2 byte */
11910 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
11911 }
11912 else
11913 {
11914 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
11915 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
11916 output_asm_insn ("br\t%1", op); /* 2 byte */
11917 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
11918 }
11919 }
11920
11921 /* Emit RTL insns to initialize the variable parts of a trampoline.
11922 FNADDR is an RTX for the address of the function's pure code.
11923 CXT is an RTX for the static chain value for the function. */
11924
11925 static void
11926 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
11927 {
11928 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
11929 rtx mem;
11930
11931 emit_block_move (m_tramp, assemble_trampoline_template (),
11932 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
11933
11934 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
11935 emit_move_insn (mem, cxt);
11936 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
11937 emit_move_insn (mem, fnaddr);
11938 }
11939
11940 /* Output assembler code to FILE to increment profiler label # LABELNO
11941 for profiling a function entry. */
11942
11943 void
11944 s390_function_profiler (FILE *file, int labelno)
11945 {
11946 rtx op[7];
11947
11948 char label[128];
11949 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
11950
11951 fprintf (file, "# function profiler \n");
11952
11953 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
11954 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
11955 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
11956
11957 op[2] = gen_rtx_REG (Pmode, 1);
11958 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
11959 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
11960
11961 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
11962 if (flag_pic)
11963 {
11964 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
11965 op[4] = gen_rtx_CONST (Pmode, op[4]);
11966 }
11967
11968 if (TARGET_64BIT)
11969 {
11970 output_asm_insn ("stg\t%0,%1", op);
11971 output_asm_insn ("larl\t%2,%3", op);
11972 output_asm_insn ("brasl\t%0,%4", op);
11973 output_asm_insn ("lg\t%0,%1", op);
11974 }
11975 else if (!flag_pic)
11976 {
11977 op[6] = gen_label_rtx ();
11978
11979 output_asm_insn ("st\t%0,%1", op);
11980 output_asm_insn ("bras\t%2,%l6", op);
11981 output_asm_insn (".long\t%4", op);
11982 output_asm_insn (".long\t%3", op);
11983 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
11984 output_asm_insn ("l\t%0,0(%2)", op);
11985 output_asm_insn ("l\t%2,4(%2)", op);
11986 output_asm_insn ("basr\t%0,%0", op);
11987 output_asm_insn ("l\t%0,%1", op);
11988 }
11989 else
11990 {
11991 op[5] = gen_label_rtx ();
11992 op[6] = gen_label_rtx ();
11993
11994 output_asm_insn ("st\t%0,%1", op);
11995 output_asm_insn ("bras\t%2,%l6", op);
11996 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
11997 output_asm_insn (".long\t%4-%l5", op);
11998 output_asm_insn (".long\t%3-%l5", op);
11999 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12000 output_asm_insn ("lr\t%0,%2", op);
12001 output_asm_insn ("a\t%0,0(%2)", op);
12002 output_asm_insn ("a\t%2,4(%2)", op);
12003 output_asm_insn ("basr\t%0,%0", op);
12004 output_asm_insn ("l\t%0,%1", op);
12005 }
12006 }
12007
12008 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
12009 into its SYMBOL_REF_FLAGS. */
12010
12011 static void
12012 s390_encode_section_info (tree decl, rtx rtl, int first)
12013 {
12014 default_encode_section_info (decl, rtl, first);
12015
12016 if (TREE_CODE (decl) == VAR_DECL)
12017 {
12018 /* Store the alignment to be able to check if we can use
12019 a larl/load-relative instruction. We only handle the cases
12020 that can go wrong (i.e. no FUNC_DECLs). If a symref does
12021 not have any flag we assume it to be correctly aligned. */
12022
12023 if (DECL_ALIGN (decl) % 64)
12024 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12025
12026 if (DECL_ALIGN (decl) % 32)
12027 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12028
12029 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
12030 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12031 }
12032
12033 /* Literal pool references don't have a decl so they are handled
12034 differently here. We rely on the information in the MEM_ALIGN
12035 entry to decide upon the alignment. */
12036 if (MEM_P (rtl)
12037 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
12038 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
12039 && MEM_ALIGN (rtl) != 0
12040 && GET_MODE_BITSIZE (GET_MODE (rtl)) != 0)
12041 {
12042 if (MEM_ALIGN (rtl) % 64)
12043 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12044
12045 if (MEM_ALIGN (rtl) % 32)
12046 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12047
12048 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
12049 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12050 }
12051 }
12052
12053 /* Output thunk to FILE that implements a C++ virtual function call (with
12054 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
12055 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
12056 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
12057 relative to the resulting this pointer. */
12058
12059 static void
12060 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
12061 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12062 tree function)
12063 {
12064 rtx op[10];
12065 int nonlocal = 0;
12066
12067 /* Make sure unwind info is emitted for the thunk if needed. */
12068 final_start_function (emit_barrier (), file, 1);
12069
12070 /* Operand 0 is the target function. */
12071 op[0] = XEXP (DECL_RTL (function), 0);
12072 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
12073 {
12074 nonlocal = 1;
12075 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
12076 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
12077 op[0] = gen_rtx_CONST (Pmode, op[0]);
12078 }
12079
12080 /* Operand 1 is the 'this' pointer. */
12081 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12082 op[1] = gen_rtx_REG (Pmode, 3);
12083 else
12084 op[1] = gen_rtx_REG (Pmode, 2);
12085
12086 /* Operand 2 is the delta. */
12087 op[2] = GEN_INT (delta);
12088
12089 /* Operand 3 is the vcall_offset. */
12090 op[3] = GEN_INT (vcall_offset);
12091
12092 /* Operand 4 is the temporary register. */
12093 op[4] = gen_rtx_REG (Pmode, 1);
12094
12095 /* Operands 5 to 8 can be used as labels. */
12096 op[5] = NULL_RTX;
12097 op[6] = NULL_RTX;
12098 op[7] = NULL_RTX;
12099 op[8] = NULL_RTX;
12100
12101 /* Operand 9 can be used for temporary register. */
12102 op[9] = NULL_RTX;
12103
12104 /* Generate code. */
12105 if (TARGET_64BIT)
12106 {
12107 /* Setup literal pool pointer if required. */
12108 if ((!DISP_IN_RANGE (delta)
12109 && !CONST_OK_FOR_K (delta)
12110 && !CONST_OK_FOR_Os (delta))
12111 || (!DISP_IN_RANGE (vcall_offset)
12112 && !CONST_OK_FOR_K (vcall_offset)
12113 && !CONST_OK_FOR_Os (vcall_offset)))
12114 {
12115 op[5] = gen_label_rtx ();
12116 output_asm_insn ("larl\t%4,%5", op);
12117 }
12118
12119 /* Add DELTA to this pointer. */
12120 if (delta)
12121 {
12122 if (CONST_OK_FOR_J (delta))
12123 output_asm_insn ("la\t%1,%2(%1)", op);
12124 else if (DISP_IN_RANGE (delta))
12125 output_asm_insn ("lay\t%1,%2(%1)", op);
12126 else if (CONST_OK_FOR_K (delta))
12127 output_asm_insn ("aghi\t%1,%2", op);
12128 else if (CONST_OK_FOR_Os (delta))
12129 output_asm_insn ("agfi\t%1,%2", op);
12130 else
12131 {
12132 op[6] = gen_label_rtx ();
12133 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
12134 }
12135 }
12136
12137 /* Perform vcall adjustment. */
12138 if (vcall_offset)
12139 {
12140 if (DISP_IN_RANGE (vcall_offset))
12141 {
12142 output_asm_insn ("lg\t%4,0(%1)", op);
12143 output_asm_insn ("ag\t%1,%3(%4)", op);
12144 }
12145 else if (CONST_OK_FOR_K (vcall_offset))
12146 {
12147 output_asm_insn ("lghi\t%4,%3", op);
12148 output_asm_insn ("ag\t%4,0(%1)", op);
12149 output_asm_insn ("ag\t%1,0(%4)", op);
12150 }
12151 else if (CONST_OK_FOR_Os (vcall_offset))
12152 {
12153 output_asm_insn ("lgfi\t%4,%3", op);
12154 output_asm_insn ("ag\t%4,0(%1)", op);
12155 output_asm_insn ("ag\t%1,0(%4)", op);
12156 }
12157 else
12158 {
12159 op[7] = gen_label_rtx ();
12160 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
12161 output_asm_insn ("ag\t%4,0(%1)", op);
12162 output_asm_insn ("ag\t%1,0(%4)", op);
12163 }
12164 }
12165
12166 /* Jump to target. */
12167 output_asm_insn ("jg\t%0", op);
12168
12169 /* Output literal pool if required. */
12170 if (op[5])
12171 {
12172 output_asm_insn (".align\t4", op);
12173 targetm.asm_out.internal_label (file, "L",
12174 CODE_LABEL_NUMBER (op[5]));
12175 }
12176 if (op[6])
12177 {
12178 targetm.asm_out.internal_label (file, "L",
12179 CODE_LABEL_NUMBER (op[6]));
12180 output_asm_insn (".long\t%2", op);
12181 }
12182 if (op[7])
12183 {
12184 targetm.asm_out.internal_label (file, "L",
12185 CODE_LABEL_NUMBER (op[7]));
12186 output_asm_insn (".long\t%3", op);
12187 }
12188 }
12189 else
12190 {
12191 /* Setup base pointer if required. */
12192 if (!vcall_offset
12193 || (!DISP_IN_RANGE (delta)
12194 && !CONST_OK_FOR_K (delta)
12195 && !CONST_OK_FOR_Os (delta))
12196 || (!DISP_IN_RANGE (delta)
12197 && !CONST_OK_FOR_K (vcall_offset)
12198 && !CONST_OK_FOR_Os (vcall_offset)))
12199 {
12200 op[5] = gen_label_rtx ();
12201 output_asm_insn ("basr\t%4,0", op);
12202 targetm.asm_out.internal_label (file, "L",
12203 CODE_LABEL_NUMBER (op[5]));
12204 }
12205
12206 /* Add DELTA to this pointer. */
12207 if (delta)
12208 {
12209 if (CONST_OK_FOR_J (delta))
12210 output_asm_insn ("la\t%1,%2(%1)", op);
12211 else if (DISP_IN_RANGE (delta))
12212 output_asm_insn ("lay\t%1,%2(%1)", op);
12213 else if (CONST_OK_FOR_K (delta))
12214 output_asm_insn ("ahi\t%1,%2", op);
12215 else if (CONST_OK_FOR_Os (delta))
12216 output_asm_insn ("afi\t%1,%2", op);
12217 else
12218 {
12219 op[6] = gen_label_rtx ();
12220 output_asm_insn ("a\t%1,%6-%5(%4)", op);
12221 }
12222 }
12223
12224 /* Perform vcall adjustment. */
12225 if (vcall_offset)
12226 {
12227 if (CONST_OK_FOR_J (vcall_offset))
12228 {
12229 output_asm_insn ("l\t%4,0(%1)", op);
12230 output_asm_insn ("a\t%1,%3(%4)", op);
12231 }
12232 else if (DISP_IN_RANGE (vcall_offset))
12233 {
12234 output_asm_insn ("l\t%4,0(%1)", op);
12235 output_asm_insn ("ay\t%1,%3(%4)", op);
12236 }
12237 else if (CONST_OK_FOR_K (vcall_offset))
12238 {
12239 output_asm_insn ("lhi\t%4,%3", op);
12240 output_asm_insn ("a\t%4,0(%1)", op);
12241 output_asm_insn ("a\t%1,0(%4)", op);
12242 }
12243 else if (CONST_OK_FOR_Os (vcall_offset))
12244 {
12245 output_asm_insn ("iilf\t%4,%3", op);
12246 output_asm_insn ("a\t%4,0(%1)", op);
12247 output_asm_insn ("a\t%1,0(%4)", op);
12248 }
12249 else
12250 {
12251 op[7] = gen_label_rtx ();
12252 output_asm_insn ("l\t%4,%7-%5(%4)", op);
12253 output_asm_insn ("a\t%4,0(%1)", op);
12254 output_asm_insn ("a\t%1,0(%4)", op);
12255 }
12256
12257 /* We had to clobber the base pointer register.
12258 Re-setup the base pointer (with a different base). */
12259 op[5] = gen_label_rtx ();
12260 output_asm_insn ("basr\t%4,0", op);
12261 targetm.asm_out.internal_label (file, "L",
12262 CODE_LABEL_NUMBER (op[5]));
12263 }
12264
12265 /* Jump to target. */
12266 op[8] = gen_label_rtx ();
12267
12268 if (!flag_pic)
12269 output_asm_insn ("l\t%4,%8-%5(%4)", op);
12270 else if (!nonlocal)
12271 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12272 /* We cannot call through .plt, since .plt requires %r12 loaded. */
12273 else if (flag_pic == 1)
12274 {
12275 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12276 output_asm_insn ("l\t%4,%0(%4)", op);
12277 }
12278 else if (flag_pic == 2)
12279 {
12280 op[9] = gen_rtx_REG (Pmode, 0);
12281 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
12282 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12283 output_asm_insn ("ar\t%4,%9", op);
12284 output_asm_insn ("l\t%4,0(%4)", op);
12285 }
12286
12287 output_asm_insn ("br\t%4", op);
12288
12289 /* Output literal pool. */
12290 output_asm_insn (".align\t4", op);
12291
12292 if (nonlocal && flag_pic == 2)
12293 output_asm_insn (".long\t%0", op);
12294 if (nonlocal)
12295 {
12296 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
12297 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
12298 }
12299
12300 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
12301 if (!flag_pic)
12302 output_asm_insn (".long\t%0", op);
12303 else
12304 output_asm_insn (".long\t%0-%5", op);
12305
12306 if (op[6])
12307 {
12308 targetm.asm_out.internal_label (file, "L",
12309 CODE_LABEL_NUMBER (op[6]));
12310 output_asm_insn (".long\t%2", op);
12311 }
12312 if (op[7])
12313 {
12314 targetm.asm_out.internal_label (file, "L",
12315 CODE_LABEL_NUMBER (op[7]));
12316 output_asm_insn (".long\t%3", op);
12317 }
12318 }
12319 final_end_function ();
12320 }
12321
12322 static bool
12323 s390_valid_pointer_mode (machine_mode mode)
12324 {
12325 return (mode == SImode || (TARGET_64BIT && mode == DImode));
12326 }
12327
12328 /* Checks whether the given CALL_EXPR would use a caller
12329 saved register. This is used to decide whether sibling call
12330 optimization could be performed on the respective function
12331 call. */
12332
12333 static bool
12334 s390_call_saved_register_used (tree call_expr)
12335 {
12336 CUMULATIVE_ARGS cum_v;
12337 cumulative_args_t cum;
12338 tree parameter;
12339 machine_mode mode;
12340 tree type;
12341 rtx parm_rtx;
12342 int reg, i;
12343
12344 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
12345 cum = pack_cumulative_args (&cum_v);
12346
12347 for (i = 0; i < call_expr_nargs (call_expr); i++)
12348 {
12349 parameter = CALL_EXPR_ARG (call_expr, i);
12350 gcc_assert (parameter);
12351
12352 /* For an undeclared variable passed as parameter we will get
12353 an ERROR_MARK node here. */
12354 if (TREE_CODE (parameter) == ERROR_MARK)
12355 return true;
12356
12357 type = TREE_TYPE (parameter);
12358 gcc_assert (type);
12359
12360 mode = TYPE_MODE (type);
12361 gcc_assert (mode);
12362
12363 /* We assume that in the target function all parameters are
12364 named. This only has an impact on vector argument register
12365 usage none of which is call-saved. */
12366 if (pass_by_reference (&cum_v, mode, type, true))
12367 {
12368 mode = Pmode;
12369 type = build_pointer_type (type);
12370 }
12371
12372 parm_rtx = s390_function_arg (cum, mode, type, true);
12373
12374 s390_function_arg_advance (cum, mode, type, true);
12375
12376 if (!parm_rtx)
12377 continue;
12378
12379 if (REG_P (parm_rtx))
12380 {
12381 for (reg = 0;
12382 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
12383 reg++)
12384 if (!call_used_regs[reg + REGNO (parm_rtx)])
12385 return true;
12386 }
12387
12388 if (GET_CODE (parm_rtx) == PARALLEL)
12389 {
12390 int i;
12391
12392 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
12393 {
12394 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
12395
12396 gcc_assert (REG_P (r));
12397
12398 for (reg = 0;
12399 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
12400 reg++)
12401 if (!call_used_regs[reg + REGNO (r)])
12402 return true;
12403 }
12404 }
12405
12406 }
12407 return false;
12408 }
12409
12410 /* Return true if the given call expression can be
12411 turned into a sibling call.
12412 DECL holds the declaration of the function to be called whereas
12413 EXP is the call expression itself. */
12414
12415 static bool
12416 s390_function_ok_for_sibcall (tree decl, tree exp)
12417 {
12418 /* The TPF epilogue uses register 1. */
12419 if (TARGET_TPF_PROFILING)
12420 return false;
12421
12422 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
12423 which would have to be restored before the sibcall. */
12424 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
12425 return false;
12426
12427 /* Register 6 on s390 is available as an argument register but unfortunately
12428 "caller saved". This makes functions needing this register for arguments
12429 not suitable for sibcalls. */
12430 return !s390_call_saved_register_used (exp);
12431 }
12432
12433 /* Return the fixed registers used for condition codes. */
12434
12435 static bool
12436 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
12437 {
12438 *p1 = CC_REGNUM;
12439 *p2 = INVALID_REGNUM;
12440
12441 return true;
12442 }
12443
12444 /* This function is used by the call expanders of the machine description.
12445 It emits the call insn itself together with the necessary operations
12446 to adjust the target address and returns the emitted insn.
12447 ADDR_LOCATION is the target address rtx
12448 TLS_CALL the location of the thread-local symbol
12449 RESULT_REG the register where the result of the call should be stored
12450 RETADDR_REG the register where the return address should be stored
12451 If this parameter is NULL_RTX the call is considered
12452 to be a sibling call. */
12453
12454 rtx_insn *
12455 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
12456 rtx retaddr_reg)
12457 {
12458 bool plt_call = false;
12459 rtx_insn *insn;
12460 rtx call;
12461 rtx clobber;
12462 rtvec vec;
12463
12464 /* Direct function calls need special treatment. */
12465 if (GET_CODE (addr_location) == SYMBOL_REF)
12466 {
12467 /* When calling a global routine in PIC mode, we must
12468 replace the symbol itself with the PLT stub. */
12469 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
12470 {
12471 if (retaddr_reg != NULL_RTX)
12472 {
12473 addr_location = gen_rtx_UNSPEC (Pmode,
12474 gen_rtvec (1, addr_location),
12475 UNSPEC_PLT);
12476 addr_location = gen_rtx_CONST (Pmode, addr_location);
12477 plt_call = true;
12478 }
12479 else
12480 /* For -fpic code the PLT entries might use r12 which is
12481 call-saved. Therefore we cannot do a sibcall when
12482 calling directly using a symbol ref. When reaching
12483 this point we decided (in s390_function_ok_for_sibcall)
12484 to do a sibcall for a function pointer but one of the
12485 optimizers was able to get rid of the function pointer
12486 by propagating the symbol ref into the call. This
12487 optimization is illegal for S/390 so we turn the direct
12488 call into a indirect call again. */
12489 addr_location = force_reg (Pmode, addr_location);
12490 }
12491
12492 /* Unless we can use the bras(l) insn, force the
12493 routine address into a register. */
12494 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
12495 {
12496 if (flag_pic)
12497 addr_location = legitimize_pic_address (addr_location, 0);
12498 else
12499 addr_location = force_reg (Pmode, addr_location);
12500 }
12501 }
12502
12503 /* If it is already an indirect call or the code above moved the
12504 SYMBOL_REF to somewhere else make sure the address can be found in
12505 register 1. */
12506 if (retaddr_reg == NULL_RTX
12507 && GET_CODE (addr_location) != SYMBOL_REF
12508 && !plt_call)
12509 {
12510 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
12511 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
12512 }
12513
12514 addr_location = gen_rtx_MEM (QImode, addr_location);
12515 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
12516
12517 if (result_reg != NULL_RTX)
12518 call = gen_rtx_SET (result_reg, call);
12519
12520 if (retaddr_reg != NULL_RTX)
12521 {
12522 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
12523
12524 if (tls_call != NULL_RTX)
12525 vec = gen_rtvec (3, call, clobber,
12526 gen_rtx_USE (VOIDmode, tls_call));
12527 else
12528 vec = gen_rtvec (2, call, clobber);
12529
12530 call = gen_rtx_PARALLEL (VOIDmode, vec);
12531 }
12532
12533 insn = emit_call_insn (call);
12534
12535 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
12536 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
12537 {
12538 /* s390_function_ok_for_sibcall should
12539 have denied sibcalls in this case. */
12540 gcc_assert (retaddr_reg != NULL_RTX);
12541 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
12542 }
12543 return insn;
12544 }
12545
12546 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
12547
12548 static void
12549 s390_conditional_register_usage (void)
12550 {
12551 int i;
12552
12553 if (flag_pic)
12554 {
12555 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12556 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12557 }
12558 if (TARGET_CPU_ZARCH)
12559 {
12560 fixed_regs[BASE_REGNUM] = 0;
12561 call_used_regs[BASE_REGNUM] = 0;
12562 fixed_regs[RETURN_REGNUM] = 0;
12563 call_used_regs[RETURN_REGNUM] = 0;
12564 }
12565 if (TARGET_64BIT)
12566 {
12567 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
12568 call_used_regs[i] = call_really_used_regs[i] = 0;
12569 }
12570 else
12571 {
12572 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
12573 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
12574 }
12575
12576 if (TARGET_SOFT_FLOAT)
12577 {
12578 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
12579 call_used_regs[i] = fixed_regs[i] = 1;
12580 }
12581
12582 /* Disable v16 - v31 for non-vector target. */
12583 if (!TARGET_VX)
12584 {
12585 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
12586 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
12587 }
12588 }
12589
12590 /* Corresponding function to eh_return expander. */
12591
12592 static GTY(()) rtx s390_tpf_eh_return_symbol;
12593 void
12594 s390_emit_tpf_eh_return (rtx target)
12595 {
12596 rtx_insn *insn;
12597 rtx reg, orig_ra;
12598
12599 if (!s390_tpf_eh_return_symbol)
12600 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
12601
12602 reg = gen_rtx_REG (Pmode, 2);
12603 orig_ra = gen_rtx_REG (Pmode, 3);
12604
12605 emit_move_insn (reg, target);
12606 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
12607 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
12608 gen_rtx_REG (Pmode, RETURN_REGNUM));
12609 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
12610 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
12611
12612 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
12613 }
12614
12615 /* Rework the prologue/epilogue to avoid saving/restoring
12616 registers unnecessarily. */
12617
12618 static void
12619 s390_optimize_prologue (void)
12620 {
12621 rtx_insn *insn, *new_insn, *next_insn;
12622
12623 /* Do a final recompute of the frame-related data. */
12624 s390_optimize_register_info ();
12625
12626 /* If all special registers are in fact used, there's nothing we
12627 can do, so no point in walking the insn list. */
12628
12629 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
12630 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
12631 && (TARGET_CPU_ZARCH
12632 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
12633 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
12634 return;
12635
12636 /* Search for prologue/epilogue insns and replace them. */
12637
12638 for (insn = get_insns (); insn; insn = next_insn)
12639 {
12640 int first, last, off;
12641 rtx set, base, offset;
12642 rtx pat;
12643
12644 next_insn = NEXT_INSN (insn);
12645
12646 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
12647 continue;
12648
12649 pat = PATTERN (insn);
12650
12651 /* Remove ldgr/lgdr instructions used for saving and restore
12652 GPRs if possible. */
12653 if (TARGET_Z10
12654 && GET_CODE (pat) == SET
12655 && GET_MODE (SET_SRC (pat)) == DImode
12656 && REG_P (SET_SRC (pat))
12657 && REG_P (SET_DEST (pat)))
12658 {
12659 int src_regno = REGNO (SET_SRC (pat));
12660 int dest_regno = REGNO (SET_DEST (pat));
12661 int gpr_regno;
12662 int fpr_regno;
12663
12664 if (!((GENERAL_REGNO_P (src_regno) && FP_REGNO_P (dest_regno))
12665 || (FP_REGNO_P (src_regno) && GENERAL_REGNO_P (dest_regno))))
12666 continue;
12667
12668 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
12669 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
12670
12671 /* GPR must be call-saved, FPR must be call-clobbered. */
12672 if (!call_really_used_regs[fpr_regno]
12673 || call_really_used_regs[gpr_regno])
12674 continue;
12675
12676 /* It must not happen that what we once saved in an FPR now
12677 needs a stack slot. */
12678 gcc_assert (cfun_gpr_save_slot (gpr_regno) != -1);
12679
12680 if (cfun_gpr_save_slot (gpr_regno) == 0)
12681 {
12682 remove_insn (insn);
12683 continue;
12684 }
12685 }
12686
12687 if (GET_CODE (pat) == PARALLEL
12688 && store_multiple_operation (pat, VOIDmode))
12689 {
12690 set = XVECEXP (pat, 0, 0);
12691 first = REGNO (SET_SRC (set));
12692 last = first + XVECLEN (pat, 0) - 1;
12693 offset = const0_rtx;
12694 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
12695 off = INTVAL (offset);
12696
12697 if (GET_CODE (base) != REG || off < 0)
12698 continue;
12699 if (cfun_frame_layout.first_save_gpr != -1
12700 && (cfun_frame_layout.first_save_gpr < first
12701 || cfun_frame_layout.last_save_gpr > last))
12702 continue;
12703 if (REGNO (base) != STACK_POINTER_REGNUM
12704 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12705 continue;
12706 if (first > BASE_REGNUM || last < BASE_REGNUM)
12707 continue;
12708
12709 if (cfun_frame_layout.first_save_gpr != -1)
12710 {
12711 rtx s_pat = save_gprs (base,
12712 off + (cfun_frame_layout.first_save_gpr
12713 - first) * UNITS_PER_LONG,
12714 cfun_frame_layout.first_save_gpr,
12715 cfun_frame_layout.last_save_gpr);
12716 new_insn = emit_insn_before (s_pat, insn);
12717 INSN_ADDRESSES_NEW (new_insn, -1);
12718 }
12719
12720 remove_insn (insn);
12721 continue;
12722 }
12723
12724 if (cfun_frame_layout.first_save_gpr == -1
12725 && GET_CODE (pat) == SET
12726 && GENERAL_REG_P (SET_SRC (pat))
12727 && GET_CODE (SET_DEST (pat)) == MEM)
12728 {
12729 set = pat;
12730 first = REGNO (SET_SRC (set));
12731 offset = const0_rtx;
12732 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
12733 off = INTVAL (offset);
12734
12735 if (GET_CODE (base) != REG || off < 0)
12736 continue;
12737 if (REGNO (base) != STACK_POINTER_REGNUM
12738 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12739 continue;
12740
12741 remove_insn (insn);
12742 continue;
12743 }
12744
12745 if (GET_CODE (pat) == PARALLEL
12746 && load_multiple_operation (pat, VOIDmode))
12747 {
12748 set = XVECEXP (pat, 0, 0);
12749 first = REGNO (SET_DEST (set));
12750 last = first + XVECLEN (pat, 0) - 1;
12751 offset = const0_rtx;
12752 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
12753 off = INTVAL (offset);
12754
12755 if (GET_CODE (base) != REG || off < 0)
12756 continue;
12757
12758 if (cfun_frame_layout.first_restore_gpr != -1
12759 && (cfun_frame_layout.first_restore_gpr < first
12760 || cfun_frame_layout.last_restore_gpr > last))
12761 continue;
12762 if (REGNO (base) != STACK_POINTER_REGNUM
12763 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12764 continue;
12765 if (first > BASE_REGNUM || last < BASE_REGNUM)
12766 continue;
12767
12768 if (cfun_frame_layout.first_restore_gpr != -1)
12769 {
12770 rtx rpat = restore_gprs (base,
12771 off + (cfun_frame_layout.first_restore_gpr
12772 - first) * UNITS_PER_LONG,
12773 cfun_frame_layout.first_restore_gpr,
12774 cfun_frame_layout.last_restore_gpr);
12775
12776 /* Remove REG_CFA_RESTOREs for registers that we no
12777 longer need to save. */
12778 REG_NOTES (rpat) = REG_NOTES (insn);
12779 for (rtx *ptr = &REG_NOTES (rpat); *ptr; )
12780 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
12781 && ((int) REGNO (XEXP (*ptr, 0))
12782 < cfun_frame_layout.first_restore_gpr))
12783 *ptr = XEXP (*ptr, 1);
12784 else
12785 ptr = &XEXP (*ptr, 1);
12786 new_insn = emit_insn_before (rpat, insn);
12787 RTX_FRAME_RELATED_P (new_insn) = 1;
12788 INSN_ADDRESSES_NEW (new_insn, -1);
12789 }
12790
12791 remove_insn (insn);
12792 continue;
12793 }
12794
12795 if (cfun_frame_layout.first_restore_gpr == -1
12796 && GET_CODE (pat) == SET
12797 && GENERAL_REG_P (SET_DEST (pat))
12798 && GET_CODE (SET_SRC (pat)) == MEM)
12799 {
12800 set = pat;
12801 first = REGNO (SET_DEST (set));
12802 offset = const0_rtx;
12803 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
12804 off = INTVAL (offset);
12805
12806 if (GET_CODE (base) != REG || off < 0)
12807 continue;
12808
12809 if (REGNO (base) != STACK_POINTER_REGNUM
12810 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12811 continue;
12812
12813 remove_insn (insn);
12814 continue;
12815 }
12816 }
12817 }
12818
12819 /* On z10 and later the dynamic branch prediction must see the
12820 backward jump within a certain windows. If not it falls back to
12821 the static prediction. This function rearranges the loop backward
12822 branch in a way which makes the static prediction always correct.
12823 The function returns true if it added an instruction. */
12824 static bool
12825 s390_fix_long_loop_prediction (rtx_insn *insn)
12826 {
12827 rtx set = single_set (insn);
12828 rtx code_label, label_ref, new_label;
12829 rtx_insn *uncond_jump;
12830 rtx_insn *cur_insn;
12831 rtx tmp;
12832 int distance;
12833
12834 /* This will exclude branch on count and branch on index patterns
12835 since these are correctly statically predicted. */
12836 if (!set
12837 || SET_DEST (set) != pc_rtx
12838 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
12839 return false;
12840
12841 /* Skip conditional returns. */
12842 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
12843 && XEXP (SET_SRC (set), 2) == pc_rtx)
12844 return false;
12845
12846 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
12847 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
12848
12849 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
12850
12851 code_label = XEXP (label_ref, 0);
12852
12853 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
12854 || INSN_ADDRESSES (INSN_UID (insn)) == -1
12855 || (INSN_ADDRESSES (INSN_UID (insn))
12856 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
12857 return false;
12858
12859 for (distance = 0, cur_insn = PREV_INSN (insn);
12860 distance < PREDICT_DISTANCE - 6;
12861 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
12862 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
12863 return false;
12864
12865 new_label = gen_label_rtx ();
12866 uncond_jump = emit_jump_insn_after (
12867 gen_rtx_SET (pc_rtx,
12868 gen_rtx_LABEL_REF (VOIDmode, code_label)),
12869 insn);
12870 emit_label_after (new_label, uncond_jump);
12871
12872 tmp = XEXP (SET_SRC (set), 1);
12873 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
12874 XEXP (SET_SRC (set), 2) = tmp;
12875 INSN_CODE (insn) = -1;
12876
12877 XEXP (label_ref, 0) = new_label;
12878 JUMP_LABEL (insn) = new_label;
12879 JUMP_LABEL (uncond_jump) = code_label;
12880
12881 return true;
12882 }
12883
12884 /* Returns 1 if INSN reads the value of REG for purposes not related
12885 to addressing of memory, and 0 otherwise. */
12886 static int
12887 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
12888 {
12889 return reg_referenced_p (reg, PATTERN (insn))
12890 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
12891 }
12892
12893 /* Starting from INSN find_cond_jump looks downwards in the insn
12894 stream for a single jump insn which is the last user of the
12895 condition code set in INSN. */
12896 static rtx_insn *
12897 find_cond_jump (rtx_insn *insn)
12898 {
12899 for (; insn; insn = NEXT_INSN (insn))
12900 {
12901 rtx ite, cc;
12902
12903 if (LABEL_P (insn))
12904 break;
12905
12906 if (!JUMP_P (insn))
12907 {
12908 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
12909 break;
12910 continue;
12911 }
12912
12913 /* This will be triggered by a return. */
12914 if (GET_CODE (PATTERN (insn)) != SET)
12915 break;
12916
12917 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
12918 ite = SET_SRC (PATTERN (insn));
12919
12920 if (GET_CODE (ite) != IF_THEN_ELSE)
12921 break;
12922
12923 cc = XEXP (XEXP (ite, 0), 0);
12924 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
12925 break;
12926
12927 if (find_reg_note (insn, REG_DEAD, cc))
12928 return insn;
12929 break;
12930 }
12931
12932 return NULL;
12933 }
12934
12935 /* Swap the condition in COND and the operands in OP0 and OP1 so that
12936 the semantics does not change. If NULL_RTX is passed as COND the
12937 function tries to find the conditional jump starting with INSN. */
12938 static void
12939 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
12940 {
12941 rtx tmp = *op0;
12942
12943 if (cond == NULL_RTX)
12944 {
12945 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
12946 rtx set = jump ? single_set (jump) : NULL_RTX;
12947
12948 if (set == NULL_RTX)
12949 return;
12950
12951 cond = XEXP (SET_SRC (set), 0);
12952 }
12953
12954 *op0 = *op1;
12955 *op1 = tmp;
12956 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
12957 }
12958
12959 /* On z10, instructions of the compare-and-branch family have the
12960 property to access the register occurring as second operand with
12961 its bits complemented. If such a compare is grouped with a second
12962 instruction that accesses the same register non-complemented, and
12963 if that register's value is delivered via a bypass, then the
12964 pipeline recycles, thereby causing significant performance decline.
12965 This function locates such situations and exchanges the two
12966 operands of the compare. The function return true whenever it
12967 added an insn. */
12968 static bool
12969 s390_z10_optimize_cmp (rtx_insn *insn)
12970 {
12971 rtx_insn *prev_insn, *next_insn;
12972 bool insn_added_p = false;
12973 rtx cond, *op0, *op1;
12974
12975 if (GET_CODE (PATTERN (insn)) == PARALLEL)
12976 {
12977 /* Handle compare and branch and branch on count
12978 instructions. */
12979 rtx pattern = single_set (insn);
12980
12981 if (!pattern
12982 || SET_DEST (pattern) != pc_rtx
12983 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
12984 return false;
12985
12986 cond = XEXP (SET_SRC (pattern), 0);
12987 op0 = &XEXP (cond, 0);
12988 op1 = &XEXP (cond, 1);
12989 }
12990 else if (GET_CODE (PATTERN (insn)) == SET)
12991 {
12992 rtx src, dest;
12993
12994 /* Handle normal compare instructions. */
12995 src = SET_SRC (PATTERN (insn));
12996 dest = SET_DEST (PATTERN (insn));
12997
12998 if (!REG_P (dest)
12999 || !CC_REGNO_P (REGNO (dest))
13000 || GET_CODE (src) != COMPARE)
13001 return false;
13002
13003 /* s390_swap_cmp will try to find the conditional
13004 jump when passing NULL_RTX as condition. */
13005 cond = NULL_RTX;
13006 op0 = &XEXP (src, 0);
13007 op1 = &XEXP (src, 1);
13008 }
13009 else
13010 return false;
13011
13012 if (!REG_P (*op0) || !REG_P (*op1))
13013 return false;
13014
13015 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
13016 return false;
13017
13018 /* Swap the COMPARE arguments and its mask if there is a
13019 conflicting access in the previous insn. */
13020 prev_insn = prev_active_insn (insn);
13021 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13022 && reg_referenced_p (*op1, PATTERN (prev_insn)))
13023 s390_swap_cmp (cond, op0, op1, insn);
13024
13025 /* Check if there is a conflict with the next insn. If there
13026 was no conflict with the previous insn, then swap the
13027 COMPARE arguments and its mask. If we already swapped
13028 the operands, or if swapping them would cause a conflict
13029 with the previous insn, issue a NOP after the COMPARE in
13030 order to separate the two instuctions. */
13031 next_insn = next_active_insn (insn);
13032 if (next_insn != NULL_RTX && INSN_P (next_insn)
13033 && s390_non_addr_reg_read_p (*op1, next_insn))
13034 {
13035 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13036 && s390_non_addr_reg_read_p (*op0, prev_insn))
13037 {
13038 if (REGNO (*op1) == 0)
13039 emit_insn_after (gen_nop1 (), insn);
13040 else
13041 emit_insn_after (gen_nop (), insn);
13042 insn_added_p = true;
13043 }
13044 else
13045 s390_swap_cmp (cond, op0, op1, insn);
13046 }
13047 return insn_added_p;
13048 }
13049
13050 /* Perform machine-dependent processing. */
13051
13052 static void
13053 s390_reorg (void)
13054 {
13055 bool pool_overflow = false;
13056 int hw_before, hw_after;
13057
13058 /* Make sure all splits have been performed; splits after
13059 machine_dependent_reorg might confuse insn length counts. */
13060 split_all_insns_noflow ();
13061
13062 /* Install the main literal pool and the associated base
13063 register load insns.
13064
13065 In addition, there are two problematic situations we need
13066 to correct:
13067
13068 - the literal pool might be > 4096 bytes in size, so that
13069 some of its elements cannot be directly accessed
13070
13071 - a branch target might be > 64K away from the branch, so that
13072 it is not possible to use a PC-relative instruction.
13073
13074 To fix those, we split the single literal pool into multiple
13075 pool chunks, reloading the pool base register at various
13076 points throughout the function to ensure it always points to
13077 the pool chunk the following code expects, and / or replace
13078 PC-relative branches by absolute branches.
13079
13080 However, the two problems are interdependent: splitting the
13081 literal pool can move a branch further away from its target,
13082 causing the 64K limit to overflow, and on the other hand,
13083 replacing a PC-relative branch by an absolute branch means
13084 we need to put the branch target address into the literal
13085 pool, possibly causing it to overflow.
13086
13087 So, we loop trying to fix up both problems until we manage
13088 to satisfy both conditions at the same time. Note that the
13089 loop is guaranteed to terminate as every pass of the loop
13090 strictly decreases the total number of PC-relative branches
13091 in the function. (This is not completely true as there
13092 might be branch-over-pool insns introduced by chunkify_start.
13093 Those never need to be split however.) */
13094
13095 for (;;)
13096 {
13097 struct constant_pool *pool = NULL;
13098
13099 /* Collect the literal pool. */
13100 if (!pool_overflow)
13101 {
13102 pool = s390_mainpool_start ();
13103 if (!pool)
13104 pool_overflow = true;
13105 }
13106
13107 /* If literal pool overflowed, start to chunkify it. */
13108 if (pool_overflow)
13109 pool = s390_chunkify_start ();
13110
13111 /* Split out-of-range branches. If this has created new
13112 literal pool entries, cancel current chunk list and
13113 recompute it. zSeries machines have large branch
13114 instructions, so we never need to split a branch. */
13115 if (!TARGET_CPU_ZARCH && s390_split_branches ())
13116 {
13117 if (pool_overflow)
13118 s390_chunkify_cancel (pool);
13119 else
13120 s390_mainpool_cancel (pool);
13121
13122 continue;
13123 }
13124
13125 /* If we made it up to here, both conditions are satisfied.
13126 Finish up literal pool related changes. */
13127 if (pool_overflow)
13128 s390_chunkify_finish (pool);
13129 else
13130 s390_mainpool_finish (pool);
13131
13132 /* We're done splitting branches. */
13133 cfun->machine->split_branches_pending_p = false;
13134 break;
13135 }
13136
13137 /* Generate out-of-pool execute target insns. */
13138 if (TARGET_CPU_ZARCH)
13139 {
13140 rtx_insn *insn, *target;
13141 rtx label;
13142
13143 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13144 {
13145 label = s390_execute_label (insn);
13146 if (!label)
13147 continue;
13148
13149 gcc_assert (label != const0_rtx);
13150
13151 target = emit_label (XEXP (label, 0));
13152 INSN_ADDRESSES_NEW (target, -1);
13153
13154 target = emit_insn (s390_execute_target (insn));
13155 INSN_ADDRESSES_NEW (target, -1);
13156 }
13157 }
13158
13159 /* Try to optimize prologue and epilogue further. */
13160 s390_optimize_prologue ();
13161
13162 /* Walk over the insns and do some >=z10 specific changes. */
13163 if (s390_tune >= PROCESSOR_2097_Z10)
13164 {
13165 rtx_insn *insn;
13166 bool insn_added_p = false;
13167
13168 /* The insn lengths and addresses have to be up to date for the
13169 following manipulations. */
13170 shorten_branches (get_insns ());
13171
13172 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13173 {
13174 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13175 continue;
13176
13177 if (JUMP_P (insn))
13178 insn_added_p |= s390_fix_long_loop_prediction (insn);
13179
13180 if ((GET_CODE (PATTERN (insn)) == PARALLEL
13181 || GET_CODE (PATTERN (insn)) == SET)
13182 && s390_tune == PROCESSOR_2097_Z10)
13183 insn_added_p |= s390_z10_optimize_cmp (insn);
13184 }
13185
13186 /* Adjust branches if we added new instructions. */
13187 if (insn_added_p)
13188 shorten_branches (get_insns ());
13189 }
13190
13191 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
13192 if (hw_after > 0)
13193 {
13194 rtx_insn *insn;
13195
13196 /* Insert NOPs for hotpatching. */
13197 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13198 /* Emit NOPs
13199 1. inside the area covered by debug information to allow setting
13200 breakpoints at the NOPs,
13201 2. before any insn which results in an asm instruction,
13202 3. before in-function labels to avoid jumping to the NOPs, for
13203 example as part of a loop,
13204 4. before any barrier in case the function is completely empty
13205 (__builtin_unreachable ()) and has neither internal labels nor
13206 active insns.
13207 */
13208 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
13209 break;
13210 /* Output a series of NOPs before the first active insn. */
13211 while (insn && hw_after > 0)
13212 {
13213 if (hw_after >= 3 && TARGET_CPU_ZARCH)
13214 {
13215 emit_insn_before (gen_nop_6_byte (), insn);
13216 hw_after -= 3;
13217 }
13218 else if (hw_after >= 2)
13219 {
13220 emit_insn_before (gen_nop_4_byte (), insn);
13221 hw_after -= 2;
13222 }
13223 else
13224 {
13225 emit_insn_before (gen_nop_2_byte (), insn);
13226 hw_after -= 1;
13227 }
13228 }
13229 }
13230 }
13231
13232 /* Return true if INSN is a fp load insn writing register REGNO. */
13233 static inline bool
13234 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
13235 {
13236 rtx set;
13237 enum attr_type flag = s390_safe_attr_type (insn);
13238
13239 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
13240 return false;
13241
13242 set = single_set (insn);
13243
13244 if (set == NULL_RTX)
13245 return false;
13246
13247 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
13248 return false;
13249
13250 if (REGNO (SET_DEST (set)) != regno)
13251 return false;
13252
13253 return true;
13254 }
13255
13256 /* This value describes the distance to be avoided between an
13257 aritmetic fp instruction and an fp load writing the same register.
13258 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
13259 fine but the exact value has to be avoided. Otherwise the FP
13260 pipeline will throw an exception causing a major penalty. */
13261 #define Z10_EARLYLOAD_DISTANCE 7
13262
13263 /* Rearrange the ready list in order to avoid the situation described
13264 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
13265 moved to the very end of the ready list. */
13266 static void
13267 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
13268 {
13269 unsigned int regno;
13270 int nready = *nready_p;
13271 rtx_insn *tmp;
13272 int i;
13273 rtx_insn *insn;
13274 rtx set;
13275 enum attr_type flag;
13276 int distance;
13277
13278 /* Skip DISTANCE - 1 active insns. */
13279 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
13280 distance > 0 && insn != NULL_RTX;
13281 distance--, insn = prev_active_insn (insn))
13282 if (CALL_P (insn) || JUMP_P (insn))
13283 return;
13284
13285 if (insn == NULL_RTX)
13286 return;
13287
13288 set = single_set (insn);
13289
13290 if (set == NULL_RTX || !REG_P (SET_DEST (set))
13291 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
13292 return;
13293
13294 flag = s390_safe_attr_type (insn);
13295
13296 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
13297 return;
13298
13299 regno = REGNO (SET_DEST (set));
13300 i = nready - 1;
13301
13302 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
13303 i--;
13304
13305 if (!i)
13306 return;
13307
13308 tmp = ready[i];
13309 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
13310 ready[0] = tmp;
13311 }
13312
13313
13314 /* The s390_sched_state variable tracks the state of the current or
13315 the last instruction group.
13316
13317 0,1,2 number of instructions scheduled in the current group
13318 3 the last group is complete - normal insns
13319 4 the last group was a cracked/expanded insn */
13320
13321 static int s390_sched_state;
13322
13323 #define S390_OOO_SCHED_STATE_NORMAL 3
13324 #define S390_OOO_SCHED_STATE_CRACKED 4
13325
13326 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
13327 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
13328 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
13329 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
13330
13331 static unsigned int
13332 s390_get_sched_attrmask (rtx_insn *insn)
13333 {
13334 unsigned int mask = 0;
13335
13336 if (get_attr_ooo_cracked (insn))
13337 mask |= S390_OOO_SCHED_ATTR_MASK_CRACKED;
13338 if (get_attr_ooo_expanded (insn))
13339 mask |= S390_OOO_SCHED_ATTR_MASK_EXPANDED;
13340 if (get_attr_ooo_endgroup (insn))
13341 mask |= S390_OOO_SCHED_ATTR_MASK_ENDGROUP;
13342 if (get_attr_ooo_groupalone (insn))
13343 mask |= S390_OOO_SCHED_ATTR_MASK_GROUPALONE;
13344 return mask;
13345 }
13346
13347 /* Return the scheduling score for INSN. The higher the score the
13348 better. The score is calculated from the OOO scheduling attributes
13349 of INSN and the scheduling state s390_sched_state. */
13350 static int
13351 s390_sched_score (rtx_insn *insn)
13352 {
13353 unsigned int mask = s390_get_sched_attrmask (insn);
13354 int score = 0;
13355
13356 switch (s390_sched_state)
13357 {
13358 case 0:
13359 /* Try to put insns into the first slot which would otherwise
13360 break a group. */
13361 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13362 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13363 score += 5;
13364 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13365 score += 10;
13366 case 1:
13367 /* Prefer not cracked insns while trying to put together a
13368 group. */
13369 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13370 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
13371 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
13372 score += 10;
13373 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) == 0)
13374 score += 5;
13375 break;
13376 case 2:
13377 /* Prefer not cracked insns while trying to put together a
13378 group. */
13379 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13380 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
13381 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
13382 score += 10;
13383 /* Prefer endgroup insns in the last slot. */
13384 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0)
13385 score += 10;
13386 break;
13387 case S390_OOO_SCHED_STATE_NORMAL:
13388 /* Prefer not cracked insns if the last was not cracked. */
13389 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13390 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0)
13391 score += 5;
13392 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13393 score += 10;
13394 break;
13395 case S390_OOO_SCHED_STATE_CRACKED:
13396 /* Try to keep cracked insns together to prevent them from
13397 interrupting groups. */
13398 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13399 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13400 score += 5;
13401 break;
13402 }
13403 return score;
13404 }
13405
13406 /* This function is called via hook TARGET_SCHED_REORDER before
13407 issuing one insn from list READY which contains *NREADYP entries.
13408 For target z10 it reorders load instructions to avoid early load
13409 conflicts in the floating point pipeline */
13410 static int
13411 s390_sched_reorder (FILE *file, int verbose,
13412 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
13413 {
13414 if (s390_tune == PROCESSOR_2097_Z10
13415 && reload_completed
13416 && *nreadyp > 1)
13417 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
13418
13419 if (s390_tune >= PROCESSOR_2827_ZEC12
13420 && reload_completed
13421 && *nreadyp > 1)
13422 {
13423 int i;
13424 int last_index = *nreadyp - 1;
13425 int max_index = -1;
13426 int max_score = -1;
13427 rtx_insn *tmp;
13428
13429 /* Just move the insn with the highest score to the top (the
13430 end) of the list. A full sort is not needed since a conflict
13431 in the hazard recognition cannot happen. So the top insn in
13432 the ready list will always be taken. */
13433 for (i = last_index; i >= 0; i--)
13434 {
13435 int score;
13436
13437 if (recog_memoized (ready[i]) < 0)
13438 continue;
13439
13440 score = s390_sched_score (ready[i]);
13441 if (score > max_score)
13442 {
13443 max_score = score;
13444 max_index = i;
13445 }
13446 }
13447
13448 if (max_index != -1)
13449 {
13450 if (max_index != last_index)
13451 {
13452 tmp = ready[max_index];
13453 ready[max_index] = ready[last_index];
13454 ready[last_index] = tmp;
13455
13456 if (verbose > 5)
13457 fprintf (file,
13458 "move insn %d to the top of list\n",
13459 INSN_UID (ready[last_index]));
13460 }
13461 else if (verbose > 5)
13462 fprintf (file,
13463 "best insn %d already on top\n",
13464 INSN_UID (ready[last_index]));
13465 }
13466
13467 if (verbose > 5)
13468 {
13469 fprintf (file, "ready list ooo attributes - sched state: %d\n",
13470 s390_sched_state);
13471
13472 for (i = last_index; i >= 0; i--)
13473 {
13474 if (recog_memoized (ready[i]) < 0)
13475 continue;
13476 fprintf (file, "insn %d score: %d: ", INSN_UID (ready[i]),
13477 s390_sched_score (ready[i]));
13478 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
13479 PRINT_OOO_ATTR (ooo_cracked);
13480 PRINT_OOO_ATTR (ooo_expanded);
13481 PRINT_OOO_ATTR (ooo_endgroup);
13482 PRINT_OOO_ATTR (ooo_groupalone);
13483 #undef PRINT_OOO_ATTR
13484 fprintf (file, "\n");
13485 }
13486 }
13487 }
13488
13489 return s390_issue_rate ();
13490 }
13491
13492
13493 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
13494 the scheduler has issued INSN. It stores the last issued insn into
13495 last_scheduled_insn in order to make it available for
13496 s390_sched_reorder. */
13497 static int
13498 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
13499 {
13500 last_scheduled_insn = insn;
13501
13502 if (s390_tune >= PROCESSOR_2827_ZEC12
13503 && reload_completed
13504 && recog_memoized (insn) >= 0)
13505 {
13506 unsigned int mask = s390_get_sched_attrmask (insn);
13507
13508 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13509 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13510 s390_sched_state = S390_OOO_SCHED_STATE_CRACKED;
13511 else if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0
13512 || (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13513 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
13514 else
13515 {
13516 /* Only normal insns are left (mask == 0). */
13517 switch (s390_sched_state)
13518 {
13519 case 0:
13520 case 1:
13521 case 2:
13522 case S390_OOO_SCHED_STATE_NORMAL:
13523 if (s390_sched_state == S390_OOO_SCHED_STATE_NORMAL)
13524 s390_sched_state = 1;
13525 else
13526 s390_sched_state++;
13527
13528 break;
13529 case S390_OOO_SCHED_STATE_CRACKED:
13530 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
13531 break;
13532 }
13533 }
13534 if (verbose > 5)
13535 {
13536 fprintf (file, "insn %d: ", INSN_UID (insn));
13537 #define PRINT_OOO_ATTR(ATTR) \
13538 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
13539 PRINT_OOO_ATTR (ooo_cracked);
13540 PRINT_OOO_ATTR (ooo_expanded);
13541 PRINT_OOO_ATTR (ooo_endgroup);
13542 PRINT_OOO_ATTR (ooo_groupalone);
13543 #undef PRINT_OOO_ATTR
13544 fprintf (file, "\n");
13545 fprintf (file, "sched state: %d\n", s390_sched_state);
13546 }
13547 }
13548
13549 if (GET_CODE (PATTERN (insn)) != USE
13550 && GET_CODE (PATTERN (insn)) != CLOBBER)
13551 return more - 1;
13552 else
13553 return more;
13554 }
13555
13556 static void
13557 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
13558 int verbose ATTRIBUTE_UNUSED,
13559 int max_ready ATTRIBUTE_UNUSED)
13560 {
13561 last_scheduled_insn = NULL;
13562 s390_sched_state = 0;
13563 }
13564
13565 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
13566 a new number struct loop *loop should be unrolled if tuned for cpus with
13567 a built-in stride prefetcher.
13568 The loop is analyzed for memory accesses by calling check_dpu for
13569 each rtx of the loop. Depending on the loop_depth and the amount of
13570 memory accesses a new number <=nunroll is returned to improve the
13571 behaviour of the hardware prefetch unit. */
13572 static unsigned
13573 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
13574 {
13575 basic_block *bbs;
13576 rtx_insn *insn;
13577 unsigned i;
13578 unsigned mem_count = 0;
13579
13580 if (s390_tune < PROCESSOR_2097_Z10)
13581 return nunroll;
13582
13583 /* Count the number of memory references within the loop body. */
13584 bbs = get_loop_body (loop);
13585 subrtx_iterator::array_type array;
13586 for (i = 0; i < loop->num_nodes; i++)
13587 FOR_BB_INSNS (bbs[i], insn)
13588 if (INSN_P (insn) && INSN_CODE (insn) != -1)
13589 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
13590 if (MEM_P (*iter))
13591 mem_count += 1;
13592 free (bbs);
13593
13594 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
13595 if (mem_count == 0)
13596 return nunroll;
13597
13598 switch (loop_depth(loop))
13599 {
13600 case 1:
13601 return MIN (nunroll, 28 / mem_count);
13602 case 2:
13603 return MIN (nunroll, 22 / mem_count);
13604 default:
13605 return MIN (nunroll, 16 / mem_count);
13606 }
13607 }
13608
13609 /* Restore the current options. This is a hook function and also called
13610 internally. */
13611
13612 static void
13613 s390_function_specific_restore (struct gcc_options *opts,
13614 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
13615 {
13616 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
13617 }
13618
13619 static void
13620 s390_option_override_internal (bool main_args_p,
13621 struct gcc_options *opts,
13622 const struct gcc_options *opts_set)
13623 {
13624 const char *prefix;
13625 const char *suffix;
13626
13627 /* Set up prefix/suffix so the error messages refer to either the command
13628 line argument, or the attribute(target). */
13629 if (main_args_p)
13630 {
13631 prefix = "-m";
13632 suffix = "";
13633 }
13634 else
13635 {
13636 prefix = "option(\"";
13637 suffix = "\")";
13638 }
13639
13640
13641 /* Architecture mode defaults according to ABI. */
13642 if (!(opts_set->x_target_flags & MASK_ZARCH))
13643 {
13644 if (TARGET_64BIT)
13645 opts->x_target_flags |= MASK_ZARCH;
13646 else
13647 opts->x_target_flags &= ~MASK_ZARCH;
13648 }
13649
13650 /* Set the march default in case it hasn't been specified on cmdline. */
13651 if (!opts_set->x_s390_arch)
13652 opts->x_s390_arch = PROCESSOR_2064_Z900;
13653 else if (opts->x_s390_arch == PROCESSOR_9672_G5
13654 || opts->x_s390_arch == PROCESSOR_9672_G6)
13655 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
13656 "in future releases; use at least %sarch=z900%s",
13657 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
13658 suffix, prefix, suffix);
13659
13660 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
13661
13662 /* Determine processor to tune for. */
13663 if (!opts_set->x_s390_tune)
13664 opts->x_s390_tune = opts->x_s390_arch;
13665 else if (opts->x_s390_tune == PROCESSOR_9672_G5
13666 || opts->x_s390_tune == PROCESSOR_9672_G6)
13667 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
13668 "in future releases; use at least %stune=z900%s",
13669 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
13670 suffix, prefix, suffix);
13671
13672 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
13673
13674 /* Sanity checks. */
13675 if (opts->x_s390_arch == PROCESSOR_NATIVE
13676 || opts->x_s390_tune == PROCESSOR_NATIVE)
13677 gcc_unreachable ();
13678 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
13679 error ("z/Architecture mode not supported on %s",
13680 processor_table[(int)opts->x_s390_arch].name);
13681 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
13682 error ("64-bit ABI not supported in ESA/390 mode");
13683
13684 /* Enable hardware transactions if available and not explicitly
13685 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
13686 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
13687 {
13688 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
13689 opts->x_target_flags |= MASK_OPT_HTM;
13690 else
13691 opts->x_target_flags &= ~MASK_OPT_HTM;
13692 }
13693
13694 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
13695 {
13696 if (TARGET_OPT_VX_P (opts->x_target_flags))
13697 {
13698 if (!TARGET_CPU_VX_P (opts))
13699 error ("hardware vector support not available on %s",
13700 processor_table[(int)opts->x_s390_arch].name);
13701 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
13702 error ("hardware vector support not available with -msoft-float");
13703 }
13704 }
13705 else
13706 {
13707 if (TARGET_CPU_VX_P (opts))
13708 /* Enable vector support if available and not explicitly disabled
13709 by user. E.g. with -m31 -march=z13 -mzarch */
13710 opts->x_target_flags |= MASK_OPT_VX;
13711 else
13712 opts->x_target_flags &= ~MASK_OPT_VX;
13713 }
13714
13715 /* Use hardware DFP if available and not explicitly disabled by
13716 user. E.g. with -m31 -march=z10 -mzarch */
13717 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
13718 {
13719 if (TARGET_DFP_P (opts))
13720 opts->x_target_flags |= MASK_HARD_DFP;
13721 else
13722 opts->x_target_flags &= ~MASK_HARD_DFP;
13723 }
13724
13725 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
13726 {
13727 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
13728 {
13729 if (!TARGET_CPU_DFP_P (opts))
13730 error ("hardware decimal floating point instructions"
13731 " not available on %s",
13732 processor_table[(int)opts->x_s390_arch].name);
13733 if (!TARGET_ZARCH_P (opts->x_target_flags))
13734 error ("hardware decimal floating point instructions"
13735 " not available in ESA/390 mode");
13736 }
13737 else
13738 opts->x_target_flags &= ~MASK_HARD_DFP;
13739 }
13740
13741 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
13742 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
13743 {
13744 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
13745 && TARGET_HARD_DFP_P (opts->x_target_flags))
13746 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
13747
13748 opts->x_target_flags &= ~MASK_HARD_DFP;
13749 }
13750
13751 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
13752 && TARGET_PACKED_STACK_P (opts->x_target_flags)
13753 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
13754 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
13755 "in combination");
13756
13757 if (opts->x_s390_stack_size)
13758 {
13759 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
13760 error ("stack size must be greater than the stack guard value");
13761 else if (opts->x_s390_stack_size > 1 << 16)
13762 error ("stack size must not be greater than 64k");
13763 }
13764 else if (opts->x_s390_stack_guard)
13765 error ("-mstack-guard implies use of -mstack-size");
13766
13767 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
13768 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
13769 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
13770 #endif
13771
13772 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
13773 {
13774 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
13775 opts->x_param_values,
13776 opts_set->x_param_values);
13777 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
13778 opts->x_param_values,
13779 opts_set->x_param_values);
13780 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
13781 opts->x_param_values,
13782 opts_set->x_param_values);
13783 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
13784 opts->x_param_values,
13785 opts_set->x_param_values);
13786 }
13787
13788 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
13789 opts->x_param_values,
13790 opts_set->x_param_values);
13791 /* values for loop prefetching */
13792 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
13793 opts->x_param_values,
13794 opts_set->x_param_values);
13795 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
13796 opts->x_param_values,
13797 opts_set->x_param_values);
13798 /* s390 has more than 2 levels and the size is much larger. Since
13799 we are always running virtualized assume that we only get a small
13800 part of the caches above l1. */
13801 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
13802 opts->x_param_values,
13803 opts_set->x_param_values);
13804 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
13805 opts->x_param_values,
13806 opts_set->x_param_values);
13807 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
13808 opts->x_param_values,
13809 opts_set->x_param_values);
13810
13811 /* Use the alternative scheduling-pressure algorithm by default. */
13812 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
13813 opts->x_param_values,
13814 opts_set->x_param_values);
13815
13816 /* Call target specific restore function to do post-init work. At the moment,
13817 this just sets opts->x_s390_cost_pointer. */
13818 s390_function_specific_restore (opts, NULL);
13819 }
13820
13821 static void
13822 s390_option_override (void)
13823 {
13824 unsigned int i;
13825 cl_deferred_option *opt;
13826 vec<cl_deferred_option> *v =
13827 (vec<cl_deferred_option> *) s390_deferred_options;
13828
13829 if (v)
13830 FOR_EACH_VEC_ELT (*v, i, opt)
13831 {
13832 switch (opt->opt_index)
13833 {
13834 case OPT_mhotpatch_:
13835 {
13836 int val1;
13837 int val2;
13838 char s[256];
13839 char *t;
13840
13841 strncpy (s, opt->arg, 256);
13842 s[255] = 0;
13843 t = strchr (s, ',');
13844 if (t != NULL)
13845 {
13846 *t = 0;
13847 t++;
13848 val1 = integral_argument (s);
13849 val2 = integral_argument (t);
13850 }
13851 else
13852 {
13853 val1 = -1;
13854 val2 = -1;
13855 }
13856 if (val1 == -1 || val2 == -1)
13857 {
13858 /* argument is not a plain number */
13859 error ("arguments to %qs should be non-negative integers",
13860 "-mhotpatch=n,m");
13861 break;
13862 }
13863 else if (val1 > s390_hotpatch_hw_max
13864 || val2 > s390_hotpatch_hw_max)
13865 {
13866 error ("argument to %qs is too large (max. %d)",
13867 "-mhotpatch=n,m", s390_hotpatch_hw_max);
13868 break;
13869 }
13870 s390_hotpatch_hw_before_label = val1;
13871 s390_hotpatch_hw_after_label = val2;
13872 break;
13873 }
13874 default:
13875 gcc_unreachable ();
13876 }
13877 }
13878
13879 /* Set up function hooks. */
13880 init_machine_status = s390_init_machine_status;
13881
13882 s390_option_override_internal (true, &global_options, &global_options_set);
13883
13884 /* Save the initial options in case the user does function specific
13885 options. */
13886 target_option_default_node = build_target_option_node (&global_options);
13887 target_option_current_node = target_option_default_node;
13888
13889 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
13890 requires the arch flags to be evaluated already. Since prefetching
13891 is beneficial on s390, we enable it if available. */
13892 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
13893 flag_prefetch_loop_arrays = 1;
13894
13895 if (TARGET_TPF)
13896 {
13897 /* Don't emit DWARF3/4 unless specifically selected. The TPF
13898 debuggers do not yet support DWARF 3/4. */
13899 if (!global_options_set.x_dwarf_strict)
13900 dwarf_strict = 1;
13901 if (!global_options_set.x_dwarf_version)
13902 dwarf_version = 2;
13903 }
13904
13905 /* Register a target-specific optimization-and-lowering pass
13906 to run immediately before prologue and epilogue generation.
13907
13908 Registering the pass must be done at start up. It's
13909 convenient to do it here. */
13910 opt_pass *new_pass = new pass_s390_early_mach (g);
13911 struct register_pass_info insert_pass_s390_early_mach =
13912 {
13913 new_pass, /* pass */
13914 "pro_and_epilogue", /* reference_pass_name */
13915 1, /* ref_pass_instance_number */
13916 PASS_POS_INSERT_BEFORE /* po_op */
13917 };
13918 register_pass (&insert_pass_s390_early_mach);
13919 }
13920
13921 #if S390_USE_TARGET_ATTRIBUTE
13922 /* Inner function to process the attribute((target(...))), take an argument and
13923 set the current options from the argument. If we have a list, recursively go
13924 over the list. */
13925
13926 static bool
13927 s390_valid_target_attribute_inner_p (tree args,
13928 struct gcc_options *opts,
13929 struct gcc_options *new_opts_set,
13930 bool force_pragma)
13931 {
13932 char *next_optstr;
13933 bool ret = true;
13934
13935 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
13936 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
13937 static const struct
13938 {
13939 const char *string;
13940 size_t len;
13941 int opt;
13942 int has_arg;
13943 int only_as_pragma;
13944 } attrs[] = {
13945 /* enum options */
13946 S390_ATTRIB ("arch=", OPT_march_, 1),
13947 S390_ATTRIB ("tune=", OPT_mtune_, 1),
13948 /* uinteger options */
13949 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
13950 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
13951 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
13952 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
13953 /* flag options */
13954 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
13955 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
13956 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
13957 S390_ATTRIB ("htm", OPT_mhtm, 0),
13958 S390_ATTRIB ("vx", OPT_mvx, 0),
13959 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
13960 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
13961 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
13962 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
13963 S390_PRAGMA ("zvector", OPT_mzvector, 0),
13964 /* boolean options */
13965 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
13966 };
13967 #undef S390_ATTRIB
13968 #undef S390_PRAGMA
13969
13970 /* If this is a list, recurse to get the options. */
13971 if (TREE_CODE (args) == TREE_LIST)
13972 {
13973 bool ret = true;
13974 int num_pragma_values;
13975 int i;
13976
13977 /* Note: attribs.c:decl_attributes prepends the values from
13978 current_target_pragma to the list of target attributes. To determine
13979 whether we're looking at a value of the attribute or the pragma we
13980 assume that the first [list_length (current_target_pragma)] values in
13981 the list are the values from the pragma. */
13982 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
13983 ? list_length (current_target_pragma) : 0;
13984 for (i = 0; args; args = TREE_CHAIN (args), i++)
13985 {
13986 bool is_pragma;
13987
13988 is_pragma = (force_pragma || i < num_pragma_values);
13989 if (TREE_VALUE (args)
13990 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
13991 opts, new_opts_set,
13992 is_pragma))
13993 {
13994 ret = false;
13995 }
13996 }
13997 return ret;
13998 }
13999
14000 else if (TREE_CODE (args) != STRING_CST)
14001 {
14002 error ("attribute %<target%> argument not a string");
14003 return false;
14004 }
14005
14006 /* Handle multiple arguments separated by commas. */
14007 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
14008
14009 while (next_optstr && *next_optstr != '\0')
14010 {
14011 char *p = next_optstr;
14012 char *orig_p = p;
14013 char *comma = strchr (next_optstr, ',');
14014 size_t len, opt_len;
14015 int opt;
14016 bool opt_set_p;
14017 char ch;
14018 unsigned i;
14019 int mask = 0;
14020 enum cl_var_type var_type;
14021 bool found;
14022
14023 if (comma)
14024 {
14025 *comma = '\0';
14026 len = comma - next_optstr;
14027 next_optstr = comma + 1;
14028 }
14029 else
14030 {
14031 len = strlen (p);
14032 next_optstr = NULL;
14033 }
14034
14035 /* Recognize no-xxx. */
14036 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
14037 {
14038 opt_set_p = false;
14039 p += 3;
14040 len -= 3;
14041 }
14042 else
14043 opt_set_p = true;
14044
14045 /* Find the option. */
14046 ch = *p;
14047 found = false;
14048 for (i = 0; i < ARRAY_SIZE (attrs); i++)
14049 {
14050 opt_len = attrs[i].len;
14051 if (ch == attrs[i].string[0]
14052 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
14053 && memcmp (p, attrs[i].string, opt_len) == 0)
14054 {
14055 opt = attrs[i].opt;
14056 if (!opt_set_p && cl_options[opt].cl_reject_negative)
14057 continue;
14058 mask = cl_options[opt].var_value;
14059 var_type = cl_options[opt].var_type;
14060 found = true;
14061 break;
14062 }
14063 }
14064
14065 /* Process the option. */
14066 if (!found)
14067 {
14068 error ("attribute(target(\"%s\")) is unknown", orig_p);
14069 return false;
14070 }
14071 else if (attrs[i].only_as_pragma && !force_pragma)
14072 {
14073 /* Value is not allowed for the target attribute. */
14074 error ("Value %qs is not supported by attribute %<target%>",
14075 attrs[i].string);
14076 return false;
14077 }
14078
14079 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
14080 {
14081 if (var_type == CLVC_BIT_CLEAR)
14082 opt_set_p = !opt_set_p;
14083
14084 if (opt_set_p)
14085 opts->x_target_flags |= mask;
14086 else
14087 opts->x_target_flags &= ~mask;
14088 new_opts_set->x_target_flags |= mask;
14089 }
14090
14091 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
14092 {
14093 int value;
14094
14095 if (cl_options[opt].cl_uinteger)
14096 {
14097 /* Unsigned integer argument. Code based on the function
14098 decode_cmdline_option () in opts-common.c. */
14099 value = integral_argument (p + opt_len);
14100 }
14101 else
14102 value = (opt_set_p) ? 1 : 0;
14103
14104 if (value != -1)
14105 {
14106 struct cl_decoded_option decoded;
14107
14108 /* Value range check; only implemented for numeric and boolean
14109 options at the moment. */
14110 generate_option (opt, NULL, value, CL_TARGET, &decoded);
14111 s390_handle_option (opts, new_opts_set, &decoded, input_location);
14112 set_option (opts, new_opts_set, opt, value,
14113 p + opt_len, DK_UNSPECIFIED, input_location,
14114 global_dc);
14115 }
14116 else
14117 {
14118 error ("attribute(target(\"%s\")) is unknown", orig_p);
14119 ret = false;
14120 }
14121 }
14122
14123 else if (cl_options[opt].var_type == CLVC_ENUM)
14124 {
14125 bool arg_ok;
14126 int value;
14127
14128 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
14129 if (arg_ok)
14130 set_option (opts, new_opts_set, opt, value,
14131 p + opt_len, DK_UNSPECIFIED, input_location,
14132 global_dc);
14133 else
14134 {
14135 error ("attribute(target(\"%s\")) is unknown", orig_p);
14136 ret = false;
14137 }
14138 }
14139
14140 else
14141 gcc_unreachable ();
14142 }
14143 return ret;
14144 }
14145
14146 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
14147
14148 tree
14149 s390_valid_target_attribute_tree (tree args,
14150 struct gcc_options *opts,
14151 const struct gcc_options *opts_set,
14152 bool force_pragma)
14153 {
14154 tree t = NULL_TREE;
14155 struct gcc_options new_opts_set;
14156
14157 memset (&new_opts_set, 0, sizeof (new_opts_set));
14158
14159 /* Process each of the options on the chain. */
14160 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
14161 force_pragma))
14162 return error_mark_node;
14163
14164 /* If some option was set (even if it has not changed), rerun
14165 s390_option_override_internal, and then save the options away. */
14166 if (new_opts_set.x_target_flags
14167 || new_opts_set.x_s390_arch
14168 || new_opts_set.x_s390_tune
14169 || new_opts_set.x_s390_stack_guard
14170 || new_opts_set.x_s390_stack_size
14171 || new_opts_set.x_s390_branch_cost
14172 || new_opts_set.x_s390_warn_framesize
14173 || new_opts_set.x_s390_warn_dynamicstack_p)
14174 {
14175 const unsigned char *src = (const unsigned char *)opts_set;
14176 unsigned char *dest = (unsigned char *)&new_opts_set;
14177 unsigned int i;
14178
14179 /* Merge the original option flags into the new ones. */
14180 for (i = 0; i < sizeof(*opts_set); i++)
14181 dest[i] |= src[i];
14182
14183 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
14184 s390_option_override_internal (false, opts, &new_opts_set);
14185 /* Save the current options unless we are validating options for
14186 #pragma. */
14187 t = build_target_option_node (opts);
14188 }
14189 return t;
14190 }
14191
14192 /* Hook to validate attribute((target("string"))). */
14193
14194 static bool
14195 s390_valid_target_attribute_p (tree fndecl,
14196 tree ARG_UNUSED (name),
14197 tree args,
14198 int ARG_UNUSED (flags))
14199 {
14200 struct gcc_options func_options;
14201 tree new_target, new_optimize;
14202 bool ret = true;
14203
14204 /* attribute((target("default"))) does nothing, beyond
14205 affecting multi-versioning. */
14206 if (TREE_VALUE (args)
14207 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
14208 && TREE_CHAIN (args) == NULL_TREE
14209 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
14210 return true;
14211
14212 tree old_optimize = build_optimization_node (&global_options);
14213
14214 /* Get the optimization options of the current function. */
14215 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
14216
14217 if (!func_optimize)
14218 func_optimize = old_optimize;
14219
14220 /* Init func_options. */
14221 memset (&func_options, 0, sizeof (func_options));
14222 init_options_struct (&func_options, NULL);
14223 lang_hooks.init_options_struct (&func_options);
14224
14225 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
14226
14227 /* Initialize func_options to the default before its target options can
14228 be set. */
14229 cl_target_option_restore (&func_options,
14230 TREE_TARGET_OPTION (target_option_default_node));
14231
14232 new_target = s390_valid_target_attribute_tree (args, &func_options,
14233 &global_options_set,
14234 (args ==
14235 current_target_pragma));
14236 new_optimize = build_optimization_node (&func_options);
14237 if (new_target == error_mark_node)
14238 ret = false;
14239 else if (fndecl && new_target)
14240 {
14241 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
14242 if (old_optimize != new_optimize)
14243 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
14244 }
14245 return ret;
14246 }
14247
14248 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
14249 cache. */
14250
14251 void
14252 s390_activate_target_options (tree new_tree)
14253 {
14254 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
14255 if (TREE_TARGET_GLOBALS (new_tree))
14256 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
14257 else if (new_tree == target_option_default_node)
14258 restore_target_globals (&default_target_globals);
14259 else
14260 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
14261 s390_previous_fndecl = NULL_TREE;
14262 }
14263
14264 /* Establish appropriate back-end context for processing the function
14265 FNDECL. The argument might be NULL to indicate processing at top
14266 level, outside of any function scope. */
14267 static void
14268 s390_set_current_function (tree fndecl)
14269 {
14270 /* Only change the context if the function changes. This hook is called
14271 several times in the course of compiling a function, and we don't want to
14272 slow things down too much or call target_reinit when it isn't safe. */
14273 if (fndecl == s390_previous_fndecl)
14274 return;
14275
14276 tree old_tree;
14277 if (s390_previous_fndecl == NULL_TREE)
14278 old_tree = target_option_current_node;
14279 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
14280 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
14281 else
14282 old_tree = target_option_default_node;
14283
14284 if (fndecl == NULL_TREE)
14285 {
14286 if (old_tree != target_option_current_node)
14287 s390_activate_target_options (target_option_current_node);
14288 return;
14289 }
14290
14291 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
14292 if (new_tree == NULL_TREE)
14293 new_tree = target_option_default_node;
14294
14295 if (old_tree != new_tree)
14296 s390_activate_target_options (new_tree);
14297 s390_previous_fndecl = fndecl;
14298 }
14299 #endif
14300
14301 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
14302
14303 static bool
14304 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
14305 unsigned int align ATTRIBUTE_UNUSED,
14306 enum by_pieces_operation op ATTRIBUTE_UNUSED,
14307 bool speed_p ATTRIBUTE_UNUSED)
14308 {
14309 return (size == 1 || size == 2
14310 || size == 4 || (TARGET_ZARCH && size == 8));
14311 }
14312
14313 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
14314
14315 static void
14316 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
14317 {
14318 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
14319 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
14320 tree call_efpc = build_call_expr (efpc, 0);
14321 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
14322
14323 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
14324 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
14325 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
14326 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
14327 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
14328 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
14329
14330 /* Generates the equivalent of feholdexcept (&fenv_var)
14331
14332 fenv_var = __builtin_s390_efpc ();
14333 __builtin_s390_sfpc (fenv_var & mask) */
14334 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
14335 tree new_fpc =
14336 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
14337 build_int_cst (unsigned_type_node,
14338 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
14339 FPC_EXCEPTION_MASK)));
14340 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
14341 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
14342
14343 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
14344
14345 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
14346 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
14347 build_int_cst (unsigned_type_node,
14348 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
14349 *clear = build_call_expr (sfpc, 1, new_fpc);
14350
14351 /* Generates the equivalent of feupdateenv (fenv_var)
14352
14353 old_fpc = __builtin_s390_efpc ();
14354 __builtin_s390_sfpc (fenv_var);
14355 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
14356
14357 old_fpc = create_tmp_var_raw (unsigned_type_node);
14358 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
14359 old_fpc, call_efpc);
14360
14361 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
14362
14363 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
14364 build_int_cst (unsigned_type_node,
14365 FPC_FLAGS_MASK));
14366 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
14367 build_int_cst (unsigned_type_node,
14368 FPC_FLAGS_SHIFT));
14369 tree atomic_feraiseexcept
14370 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
14371 raise_old_except = build_call_expr (atomic_feraiseexcept,
14372 1, raise_old_except);
14373
14374 *update = build2 (COMPOUND_EXPR, void_type_node,
14375 build2 (COMPOUND_EXPR, void_type_node,
14376 store_old_fpc, set_new_fpc),
14377 raise_old_except);
14378
14379 #undef FPC_EXCEPTION_MASK
14380 #undef FPC_FLAGS_MASK
14381 #undef FPC_DXC_MASK
14382 #undef FPC_EXCEPTION_MASK_SHIFT
14383 #undef FPC_FLAGS_SHIFT
14384 #undef FPC_DXC_SHIFT
14385 }
14386
14387 /* Return the vector mode to be used for inner mode MODE when doing
14388 vectorization. */
14389 static machine_mode
14390 s390_preferred_simd_mode (machine_mode mode)
14391 {
14392 if (TARGET_VX)
14393 switch (mode)
14394 {
14395 case DFmode:
14396 return V2DFmode;
14397 case DImode:
14398 return V2DImode;
14399 case SImode:
14400 return V4SImode;
14401 case HImode:
14402 return V8HImode;
14403 case QImode:
14404 return V16QImode;
14405 default:;
14406 }
14407 return word_mode;
14408 }
14409
14410 /* Our hardware does not require vectors to be strictly aligned. */
14411 static bool
14412 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
14413 const_tree type ATTRIBUTE_UNUSED,
14414 int misalignment ATTRIBUTE_UNUSED,
14415 bool is_packed ATTRIBUTE_UNUSED)
14416 {
14417 if (TARGET_VX)
14418 return true;
14419
14420 return default_builtin_support_vector_misalignment (mode, type, misalignment,
14421 is_packed);
14422 }
14423
14424 /* The vector ABI requires vector types to be aligned on an 8 byte
14425 boundary (our stack alignment). However, we allow this to be
14426 overriden by the user, while this definitely breaks the ABI. */
14427 static HOST_WIDE_INT
14428 s390_vector_alignment (const_tree type)
14429 {
14430 if (!TARGET_VX_ABI)
14431 return default_vector_alignment (type);
14432
14433 if (TYPE_USER_ALIGN (type))
14434 return TYPE_ALIGN (type);
14435
14436 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
14437 }
14438
14439 #ifdef HAVE_AS_MACHINE_MACHINEMODE
14440 /* Implement TARGET_ASM_FILE_START. */
14441 static void
14442 s390_asm_file_start (void)
14443 {
14444 s390_asm_output_machine_for_arch (asm_out_file);
14445 }
14446 #endif
14447
14448 /* Implement TARGET_ASM_FILE_END. */
14449 static void
14450 s390_asm_file_end (void)
14451 {
14452 #ifdef HAVE_AS_GNU_ATTRIBUTE
14453 varpool_node *vnode;
14454 cgraph_node *cnode;
14455
14456 FOR_EACH_VARIABLE (vnode)
14457 if (TREE_PUBLIC (vnode->decl))
14458 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
14459
14460 FOR_EACH_FUNCTION (cnode)
14461 if (TREE_PUBLIC (cnode->decl))
14462 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
14463
14464
14465 if (s390_vector_abi != 0)
14466 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
14467 s390_vector_abi);
14468 #endif
14469 file_end_indicate_exec_stack ();
14470 }
14471
14472 /* Return true if TYPE is a vector bool type. */
14473 static inline bool
14474 s390_vector_bool_type_p (const_tree type)
14475 {
14476 return TYPE_VECTOR_OPAQUE (type);
14477 }
14478
14479 /* Return the diagnostic message string if the binary operation OP is
14480 not permitted on TYPE1 and TYPE2, NULL otherwise. */
14481 static const char*
14482 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
14483 {
14484 bool bool1_p, bool2_p;
14485 bool plusminus_p;
14486 bool muldiv_p;
14487 bool compare_p;
14488 machine_mode mode1, mode2;
14489
14490 if (!TARGET_ZVECTOR)
14491 return NULL;
14492
14493 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
14494 return NULL;
14495
14496 bool1_p = s390_vector_bool_type_p (type1);
14497 bool2_p = s390_vector_bool_type_p (type2);
14498
14499 /* Mixing signed and unsigned types is forbidden for all
14500 operators. */
14501 if (!bool1_p && !bool2_p
14502 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
14503 return N_("types differ in signess");
14504
14505 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
14506 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
14507 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
14508 || op == ROUND_DIV_EXPR);
14509 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
14510 || op == EQ_EXPR || op == NE_EXPR);
14511
14512 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
14513 return N_("binary operator does not support two vector bool operands");
14514
14515 if (bool1_p != bool2_p && (muldiv_p || compare_p))
14516 return N_("binary operator does not support vector bool operand");
14517
14518 mode1 = TYPE_MODE (type1);
14519 mode2 = TYPE_MODE (type2);
14520
14521 if (bool1_p != bool2_p && plusminus_p
14522 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
14523 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
14524 return N_("binary operator does not support mixing vector "
14525 "bool with floating point vector operands");
14526
14527 return NULL;
14528 }
14529
14530 /* Initialize GCC target structure. */
14531
14532 #undef TARGET_ASM_ALIGNED_HI_OP
14533 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
14534 #undef TARGET_ASM_ALIGNED_DI_OP
14535 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
14536 #undef TARGET_ASM_INTEGER
14537 #define TARGET_ASM_INTEGER s390_assemble_integer
14538
14539 #undef TARGET_ASM_OPEN_PAREN
14540 #define TARGET_ASM_OPEN_PAREN ""
14541
14542 #undef TARGET_ASM_CLOSE_PAREN
14543 #define TARGET_ASM_CLOSE_PAREN ""
14544
14545 #undef TARGET_OPTION_OVERRIDE
14546 #define TARGET_OPTION_OVERRIDE s390_option_override
14547
14548 #undef TARGET_ENCODE_SECTION_INFO
14549 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
14550
14551 #undef TARGET_SCALAR_MODE_SUPPORTED_P
14552 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
14553
14554 #ifdef HAVE_AS_TLS
14555 #undef TARGET_HAVE_TLS
14556 #define TARGET_HAVE_TLS true
14557 #endif
14558 #undef TARGET_CANNOT_FORCE_CONST_MEM
14559 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
14560
14561 #undef TARGET_DELEGITIMIZE_ADDRESS
14562 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
14563
14564 #undef TARGET_LEGITIMIZE_ADDRESS
14565 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
14566
14567 #undef TARGET_RETURN_IN_MEMORY
14568 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
14569
14570 #undef TARGET_INIT_BUILTINS
14571 #define TARGET_INIT_BUILTINS s390_init_builtins
14572 #undef TARGET_EXPAND_BUILTIN
14573 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
14574 #undef TARGET_BUILTIN_DECL
14575 #define TARGET_BUILTIN_DECL s390_builtin_decl
14576
14577 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
14578 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
14579
14580 #undef TARGET_ASM_OUTPUT_MI_THUNK
14581 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
14582 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
14583 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
14584
14585 #undef TARGET_SCHED_ADJUST_PRIORITY
14586 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
14587 #undef TARGET_SCHED_ISSUE_RATE
14588 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
14589 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
14590 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
14591
14592 #undef TARGET_SCHED_VARIABLE_ISSUE
14593 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
14594 #undef TARGET_SCHED_REORDER
14595 #define TARGET_SCHED_REORDER s390_sched_reorder
14596 #undef TARGET_SCHED_INIT
14597 #define TARGET_SCHED_INIT s390_sched_init
14598
14599 #undef TARGET_CANNOT_COPY_INSN_P
14600 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
14601 #undef TARGET_RTX_COSTS
14602 #define TARGET_RTX_COSTS s390_rtx_costs
14603 #undef TARGET_ADDRESS_COST
14604 #define TARGET_ADDRESS_COST s390_address_cost
14605 #undef TARGET_REGISTER_MOVE_COST
14606 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
14607 #undef TARGET_MEMORY_MOVE_COST
14608 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
14609
14610 #undef TARGET_MACHINE_DEPENDENT_REORG
14611 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
14612
14613 #undef TARGET_VALID_POINTER_MODE
14614 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
14615
14616 #undef TARGET_BUILD_BUILTIN_VA_LIST
14617 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
14618 #undef TARGET_EXPAND_BUILTIN_VA_START
14619 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
14620 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
14621 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
14622
14623 #undef TARGET_PROMOTE_FUNCTION_MODE
14624 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
14625 #undef TARGET_PASS_BY_REFERENCE
14626 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
14627
14628 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
14629 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
14630 #undef TARGET_FUNCTION_ARG
14631 #define TARGET_FUNCTION_ARG s390_function_arg
14632 #undef TARGET_FUNCTION_ARG_ADVANCE
14633 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
14634 #undef TARGET_FUNCTION_VALUE
14635 #define TARGET_FUNCTION_VALUE s390_function_value
14636 #undef TARGET_LIBCALL_VALUE
14637 #define TARGET_LIBCALL_VALUE s390_libcall_value
14638 #undef TARGET_STRICT_ARGUMENT_NAMING
14639 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
14640
14641 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
14642 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
14643
14644 #undef TARGET_FIXED_CONDITION_CODE_REGS
14645 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
14646
14647 #undef TARGET_CC_MODES_COMPATIBLE
14648 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
14649
14650 #undef TARGET_INVALID_WITHIN_DOLOOP
14651 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
14652
14653 #ifdef HAVE_AS_TLS
14654 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
14655 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
14656 #endif
14657
14658 #undef TARGET_DWARF_FRAME_REG_MODE
14659 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
14660
14661 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
14662 #undef TARGET_MANGLE_TYPE
14663 #define TARGET_MANGLE_TYPE s390_mangle_type
14664 #endif
14665
14666 #undef TARGET_SCALAR_MODE_SUPPORTED_P
14667 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
14668
14669 #undef TARGET_VECTOR_MODE_SUPPORTED_P
14670 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
14671
14672 #undef TARGET_PREFERRED_RELOAD_CLASS
14673 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
14674
14675 #undef TARGET_SECONDARY_RELOAD
14676 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
14677
14678 #undef TARGET_LIBGCC_CMP_RETURN_MODE
14679 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
14680
14681 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
14682 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
14683
14684 #undef TARGET_LEGITIMATE_ADDRESS_P
14685 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
14686
14687 #undef TARGET_LEGITIMATE_CONSTANT_P
14688 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
14689
14690 #undef TARGET_LRA_P
14691 #define TARGET_LRA_P s390_lra_p
14692
14693 #undef TARGET_CAN_ELIMINATE
14694 #define TARGET_CAN_ELIMINATE s390_can_eliminate
14695
14696 #undef TARGET_CONDITIONAL_REGISTER_USAGE
14697 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
14698
14699 #undef TARGET_LOOP_UNROLL_ADJUST
14700 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
14701
14702 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
14703 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
14704 #undef TARGET_TRAMPOLINE_INIT
14705 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
14706
14707 #undef TARGET_UNWIND_WORD_MODE
14708 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
14709
14710 #undef TARGET_CANONICALIZE_COMPARISON
14711 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
14712
14713 #undef TARGET_HARD_REGNO_SCRATCH_OK
14714 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
14715
14716 #undef TARGET_ATTRIBUTE_TABLE
14717 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
14718
14719 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
14720 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
14721
14722 #undef TARGET_SET_UP_BY_PROLOGUE
14723 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
14724
14725 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
14726 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
14727 s390_use_by_pieces_infrastructure_p
14728
14729 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
14730 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
14731
14732 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
14733 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
14734
14735 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
14736 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
14737
14738 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
14739 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
14740
14741 #undef TARGET_VECTOR_ALIGNMENT
14742 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
14743
14744 #undef TARGET_INVALID_BINARY_OP
14745 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
14746
14747 #ifdef HAVE_AS_MACHINE_MACHINEMODE
14748 #undef TARGET_ASM_FILE_START
14749 #define TARGET_ASM_FILE_START s390_asm_file_start
14750 #endif
14751
14752 #undef TARGET_ASM_FILE_END
14753 #define TARGET_ASM_FILE_END s390_asm_file_end
14754
14755 #if S390_USE_TARGET_ATTRIBUTE
14756 #undef TARGET_SET_CURRENT_FUNCTION
14757 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
14758
14759 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
14760 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
14761 #endif
14762
14763 #undef TARGET_OPTION_RESTORE
14764 #define TARGET_OPTION_RESTORE s390_function_specific_restore
14765
14766 struct gcc_target targetm = TARGET_INITIALIZER;
14767
14768 #include "gt-s390.h"