]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/s390/s390.c
s390.c (s390_asan_shadow_offset): New function.
[thirdparty/gcc.git] / gcc / config / s390 / s390.c
1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2017 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "target-globals.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "gimple.h"
32 #include "cfghooks.h"
33 #include "cfgloop.h"
34 #include "df.h"
35 #include "memmodel.h"
36 #include "tm_p.h"
37 #include "stringpool.h"
38 #include "expmed.h"
39 #include "optabs.h"
40 #include "regs.h"
41 #include "emit-rtl.h"
42 #include "recog.h"
43 #include "cgraph.h"
44 #include "diagnostic-core.h"
45 #include "diagnostic.h"
46 #include "alias.h"
47 #include "fold-const.h"
48 #include "print-tree.h"
49 #include "stor-layout.h"
50 #include "varasm.h"
51 #include "calls.h"
52 #include "conditions.h"
53 #include "output.h"
54 #include "insn-attr.h"
55 #include "flags.h"
56 #include "except.h"
57 #include "dojump.h"
58 #include "explow.h"
59 #include "stmt.h"
60 #include "expr.h"
61 #include "reload.h"
62 #include "cfgrtl.h"
63 #include "cfganal.h"
64 #include "lcm.h"
65 #include "cfgbuild.h"
66 #include "cfgcleanup.h"
67 #include "debug.h"
68 #include "langhooks.h"
69 #include "internal-fn.h"
70 #include "gimple-fold.h"
71 #include "tree-eh.h"
72 #include "gimplify.h"
73 #include "params.h"
74 #include "opts.h"
75 #include "tree-pass.h"
76 #include "context.h"
77 #include "builtins.h"
78 #include "rtl-iter.h"
79 #include "intl.h"
80 #include "tm-constrs.h"
81
82 /* This file should be included last. */
83 #include "target-def.h"
84
85 /* Remember the last target of s390_set_current_function. */
86 static GTY(()) tree s390_previous_fndecl;
87
88 /* Define the specific costs for a given cpu. */
89
90 struct processor_costs
91 {
92 /* multiplication */
93 const int m; /* cost of an M instruction. */
94 const int mghi; /* cost of an MGHI instruction. */
95 const int mh; /* cost of an MH instruction. */
96 const int mhi; /* cost of an MHI instruction. */
97 const int ml; /* cost of an ML instruction. */
98 const int mr; /* cost of an MR instruction. */
99 const int ms; /* cost of an MS instruction. */
100 const int msg; /* cost of an MSG instruction. */
101 const int msgf; /* cost of an MSGF instruction. */
102 const int msgfr; /* cost of an MSGFR instruction. */
103 const int msgr; /* cost of an MSGR instruction. */
104 const int msr; /* cost of an MSR instruction. */
105 const int mult_df; /* cost of multiplication in DFmode. */
106 const int mxbr;
107 /* square root */
108 const int sqxbr; /* cost of square root in TFmode. */
109 const int sqdbr; /* cost of square root in DFmode. */
110 const int sqebr; /* cost of square root in SFmode. */
111 /* multiply and add */
112 const int madbr; /* cost of multiply and add in DFmode. */
113 const int maebr; /* cost of multiply and add in SFmode. */
114 /* division */
115 const int dxbr;
116 const int ddbr;
117 const int debr;
118 const int dlgr;
119 const int dlr;
120 const int dr;
121 const int dsgfr;
122 const int dsgr;
123 };
124
125 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
126
127 static const
128 struct processor_costs z900_cost =
129 {
130 COSTS_N_INSNS (5), /* M */
131 COSTS_N_INSNS (10), /* MGHI */
132 COSTS_N_INSNS (5), /* MH */
133 COSTS_N_INSNS (4), /* MHI */
134 COSTS_N_INSNS (5), /* ML */
135 COSTS_N_INSNS (5), /* MR */
136 COSTS_N_INSNS (4), /* MS */
137 COSTS_N_INSNS (15), /* MSG */
138 COSTS_N_INSNS (7), /* MSGF */
139 COSTS_N_INSNS (7), /* MSGFR */
140 COSTS_N_INSNS (10), /* MSGR */
141 COSTS_N_INSNS (4), /* MSR */
142 COSTS_N_INSNS (7), /* multiplication in DFmode */
143 COSTS_N_INSNS (13), /* MXBR */
144 COSTS_N_INSNS (136), /* SQXBR */
145 COSTS_N_INSNS (44), /* SQDBR */
146 COSTS_N_INSNS (35), /* SQEBR */
147 COSTS_N_INSNS (18), /* MADBR */
148 COSTS_N_INSNS (13), /* MAEBR */
149 COSTS_N_INSNS (134), /* DXBR */
150 COSTS_N_INSNS (30), /* DDBR */
151 COSTS_N_INSNS (27), /* DEBR */
152 COSTS_N_INSNS (220), /* DLGR */
153 COSTS_N_INSNS (34), /* DLR */
154 COSTS_N_INSNS (34), /* DR */
155 COSTS_N_INSNS (32), /* DSGFR */
156 COSTS_N_INSNS (32), /* DSGR */
157 };
158
159 static const
160 struct processor_costs z990_cost =
161 {
162 COSTS_N_INSNS (4), /* M */
163 COSTS_N_INSNS (2), /* MGHI */
164 COSTS_N_INSNS (2), /* MH */
165 COSTS_N_INSNS (2), /* MHI */
166 COSTS_N_INSNS (4), /* ML */
167 COSTS_N_INSNS (4), /* MR */
168 COSTS_N_INSNS (5), /* MS */
169 COSTS_N_INSNS (6), /* MSG */
170 COSTS_N_INSNS (4), /* MSGF */
171 COSTS_N_INSNS (4), /* MSGFR */
172 COSTS_N_INSNS (4), /* MSGR */
173 COSTS_N_INSNS (4), /* MSR */
174 COSTS_N_INSNS (1), /* multiplication in DFmode */
175 COSTS_N_INSNS (28), /* MXBR */
176 COSTS_N_INSNS (130), /* SQXBR */
177 COSTS_N_INSNS (66), /* SQDBR */
178 COSTS_N_INSNS (38), /* SQEBR */
179 COSTS_N_INSNS (1), /* MADBR */
180 COSTS_N_INSNS (1), /* MAEBR */
181 COSTS_N_INSNS (60), /* DXBR */
182 COSTS_N_INSNS (40), /* DDBR */
183 COSTS_N_INSNS (26), /* DEBR */
184 COSTS_N_INSNS (176), /* DLGR */
185 COSTS_N_INSNS (31), /* DLR */
186 COSTS_N_INSNS (31), /* DR */
187 COSTS_N_INSNS (31), /* DSGFR */
188 COSTS_N_INSNS (31), /* DSGR */
189 };
190
191 static const
192 struct processor_costs z9_109_cost =
193 {
194 COSTS_N_INSNS (4), /* M */
195 COSTS_N_INSNS (2), /* MGHI */
196 COSTS_N_INSNS (2), /* MH */
197 COSTS_N_INSNS (2), /* MHI */
198 COSTS_N_INSNS (4), /* ML */
199 COSTS_N_INSNS (4), /* MR */
200 COSTS_N_INSNS (5), /* MS */
201 COSTS_N_INSNS (6), /* MSG */
202 COSTS_N_INSNS (4), /* MSGF */
203 COSTS_N_INSNS (4), /* MSGFR */
204 COSTS_N_INSNS (4), /* MSGR */
205 COSTS_N_INSNS (4), /* MSR */
206 COSTS_N_INSNS (1), /* multiplication in DFmode */
207 COSTS_N_INSNS (28), /* MXBR */
208 COSTS_N_INSNS (130), /* SQXBR */
209 COSTS_N_INSNS (66), /* SQDBR */
210 COSTS_N_INSNS (38), /* SQEBR */
211 COSTS_N_INSNS (1), /* MADBR */
212 COSTS_N_INSNS (1), /* MAEBR */
213 COSTS_N_INSNS (60), /* DXBR */
214 COSTS_N_INSNS (40), /* DDBR */
215 COSTS_N_INSNS (26), /* DEBR */
216 COSTS_N_INSNS (30), /* DLGR */
217 COSTS_N_INSNS (23), /* DLR */
218 COSTS_N_INSNS (23), /* DR */
219 COSTS_N_INSNS (24), /* DSGFR */
220 COSTS_N_INSNS (24), /* DSGR */
221 };
222
223 static const
224 struct processor_costs z10_cost =
225 {
226 COSTS_N_INSNS (10), /* M */
227 COSTS_N_INSNS (10), /* MGHI */
228 COSTS_N_INSNS (10), /* MH */
229 COSTS_N_INSNS (10), /* MHI */
230 COSTS_N_INSNS (10), /* ML */
231 COSTS_N_INSNS (10), /* MR */
232 COSTS_N_INSNS (10), /* MS */
233 COSTS_N_INSNS (10), /* MSG */
234 COSTS_N_INSNS (10), /* MSGF */
235 COSTS_N_INSNS (10), /* MSGFR */
236 COSTS_N_INSNS (10), /* MSGR */
237 COSTS_N_INSNS (10), /* MSR */
238 COSTS_N_INSNS (1) , /* multiplication in DFmode */
239 COSTS_N_INSNS (50), /* MXBR */
240 COSTS_N_INSNS (120), /* SQXBR */
241 COSTS_N_INSNS (52), /* SQDBR */
242 COSTS_N_INSNS (38), /* SQEBR */
243 COSTS_N_INSNS (1), /* MADBR */
244 COSTS_N_INSNS (1), /* MAEBR */
245 COSTS_N_INSNS (111), /* DXBR */
246 COSTS_N_INSNS (39), /* DDBR */
247 COSTS_N_INSNS (32), /* DEBR */
248 COSTS_N_INSNS (160), /* DLGR */
249 COSTS_N_INSNS (71), /* DLR */
250 COSTS_N_INSNS (71), /* DR */
251 COSTS_N_INSNS (71), /* DSGFR */
252 COSTS_N_INSNS (71), /* DSGR */
253 };
254
255 static const
256 struct processor_costs z196_cost =
257 {
258 COSTS_N_INSNS (7), /* M */
259 COSTS_N_INSNS (5), /* MGHI */
260 COSTS_N_INSNS (5), /* MH */
261 COSTS_N_INSNS (5), /* MHI */
262 COSTS_N_INSNS (7), /* ML */
263 COSTS_N_INSNS (7), /* MR */
264 COSTS_N_INSNS (6), /* MS */
265 COSTS_N_INSNS (8), /* MSG */
266 COSTS_N_INSNS (6), /* MSGF */
267 COSTS_N_INSNS (6), /* MSGFR */
268 COSTS_N_INSNS (8), /* MSGR */
269 COSTS_N_INSNS (6), /* MSR */
270 COSTS_N_INSNS (1) , /* multiplication in DFmode */
271 COSTS_N_INSNS (40), /* MXBR B+40 */
272 COSTS_N_INSNS (100), /* SQXBR B+100 */
273 COSTS_N_INSNS (42), /* SQDBR B+42 */
274 COSTS_N_INSNS (28), /* SQEBR B+28 */
275 COSTS_N_INSNS (1), /* MADBR B */
276 COSTS_N_INSNS (1), /* MAEBR B */
277 COSTS_N_INSNS (101), /* DXBR B+101 */
278 COSTS_N_INSNS (29), /* DDBR */
279 COSTS_N_INSNS (22), /* DEBR */
280 COSTS_N_INSNS (160), /* DLGR cracked */
281 COSTS_N_INSNS (160), /* DLR cracked */
282 COSTS_N_INSNS (160), /* DR expanded */
283 COSTS_N_INSNS (160), /* DSGFR cracked */
284 COSTS_N_INSNS (160), /* DSGR cracked */
285 };
286
287 static const
288 struct processor_costs zEC12_cost =
289 {
290 COSTS_N_INSNS (7), /* M */
291 COSTS_N_INSNS (5), /* MGHI */
292 COSTS_N_INSNS (5), /* MH */
293 COSTS_N_INSNS (5), /* MHI */
294 COSTS_N_INSNS (7), /* ML */
295 COSTS_N_INSNS (7), /* MR */
296 COSTS_N_INSNS (6), /* MS */
297 COSTS_N_INSNS (8), /* MSG */
298 COSTS_N_INSNS (6), /* MSGF */
299 COSTS_N_INSNS (6), /* MSGFR */
300 COSTS_N_INSNS (8), /* MSGR */
301 COSTS_N_INSNS (6), /* MSR */
302 COSTS_N_INSNS (1) , /* multiplication in DFmode */
303 COSTS_N_INSNS (40), /* MXBR B+40 */
304 COSTS_N_INSNS (100), /* SQXBR B+100 */
305 COSTS_N_INSNS (42), /* SQDBR B+42 */
306 COSTS_N_INSNS (28), /* SQEBR B+28 */
307 COSTS_N_INSNS (1), /* MADBR B */
308 COSTS_N_INSNS (1), /* MAEBR B */
309 COSTS_N_INSNS (131), /* DXBR B+131 */
310 COSTS_N_INSNS (29), /* DDBR */
311 COSTS_N_INSNS (22), /* DEBR */
312 COSTS_N_INSNS (160), /* DLGR cracked */
313 COSTS_N_INSNS (160), /* DLR cracked */
314 COSTS_N_INSNS (160), /* DR expanded */
315 COSTS_N_INSNS (160), /* DSGFR cracked */
316 COSTS_N_INSNS (160), /* DSGR cracked */
317 };
318
319 static struct
320 {
321 const char *const name;
322 const enum processor_type processor;
323 const struct processor_costs *cost;
324 }
325 const processor_table[] =
326 {
327 { "g5", PROCESSOR_9672_G5, &z900_cost },
328 { "g6", PROCESSOR_9672_G6, &z900_cost },
329 { "z900", PROCESSOR_2064_Z900, &z900_cost },
330 { "z990", PROCESSOR_2084_Z990, &z990_cost },
331 { "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
332 { "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
333 { "z10", PROCESSOR_2097_Z10, &z10_cost },
334 { "z196", PROCESSOR_2817_Z196, &z196_cost },
335 { "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
336 { "z13", PROCESSOR_2964_Z13, &zEC12_cost },
337 { "native", PROCESSOR_NATIVE, NULL }
338 };
339
340 extern int reload_completed;
341
342 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
343 static rtx_insn *last_scheduled_insn;
344 #define MAX_SCHED_UNITS 3
345 static int last_scheduled_unit_distance[MAX_SCHED_UNITS];
346
347 /* The maximum score added for an instruction whose unit hasn't been
348 in use for MAX_SCHED_MIX_DISTANCE steps. Increase this value to
349 give instruction mix scheduling more priority over instruction
350 grouping. */
351 #define MAX_SCHED_MIX_SCORE 8
352
353 /* The maximum distance up to which individual scores will be
354 calculated. Everything beyond this gives MAX_SCHED_MIX_SCORE.
355 Increase this with the OOO windows size of the machine. */
356 #define MAX_SCHED_MIX_DISTANCE 100
357
358 /* Structure used to hold the components of a S/390 memory
359 address. A legitimate address on S/390 is of the general
360 form
361 base + index + displacement
362 where any of the components is optional.
363
364 base and index are registers of the class ADDR_REGS,
365 displacement is an unsigned 12-bit immediate constant. */
366
367 struct s390_address
368 {
369 rtx base;
370 rtx indx;
371 rtx disp;
372 bool pointer;
373 bool literal_pool;
374 };
375
376 /* The following structure is embedded in the machine
377 specific part of struct function. */
378
379 struct GTY (()) s390_frame_layout
380 {
381 /* Offset within stack frame. */
382 HOST_WIDE_INT gprs_offset;
383 HOST_WIDE_INT f0_offset;
384 HOST_WIDE_INT f4_offset;
385 HOST_WIDE_INT f8_offset;
386 HOST_WIDE_INT backchain_offset;
387
388 /* Number of first and last gpr where slots in the register
389 save area are reserved for. */
390 int first_save_gpr_slot;
391 int last_save_gpr_slot;
392
393 /* Location (FP register number) where GPRs (r0-r15) should
394 be saved to.
395 0 - does not need to be saved at all
396 -1 - stack slot */
397 #define SAVE_SLOT_NONE 0
398 #define SAVE_SLOT_STACK -1
399 signed char gpr_save_slots[16];
400
401 /* Number of first and last gpr to be saved, restored. */
402 int first_save_gpr;
403 int first_restore_gpr;
404 int last_save_gpr;
405 int last_restore_gpr;
406
407 /* Bits standing for floating point registers. Set, if the
408 respective register has to be saved. Starting with reg 16 (f0)
409 at the rightmost bit.
410 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
411 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
412 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
413 unsigned int fpr_bitmap;
414
415 /* Number of floating point registers f8-f15 which must be saved. */
416 int high_fprs;
417
418 /* Set if return address needs to be saved.
419 This flag is set by s390_return_addr_rtx if it could not use
420 the initial value of r14 and therefore depends on r14 saved
421 to the stack. */
422 bool save_return_addr_p;
423
424 /* Size of stack frame. */
425 HOST_WIDE_INT frame_size;
426 };
427
428 /* Define the structure for the machine field in struct function. */
429
430 struct GTY(()) machine_function
431 {
432 struct s390_frame_layout frame_layout;
433
434 /* Literal pool base register. */
435 rtx base_reg;
436
437 /* True if we may need to perform branch splitting. */
438 bool split_branches_pending_p;
439
440 bool has_landing_pad_p;
441
442 /* True if the current function may contain a tbegin clobbering
443 FPRs. */
444 bool tbegin_p;
445
446 /* For -fsplit-stack support: A stack local which holds a pointer to
447 the stack arguments for a function with a variable number of
448 arguments. This is set at the start of the function and is used
449 to initialize the overflow_arg_area field of the va_list
450 structure. */
451 rtx split_stack_varargs_pointer;
452 };
453
454 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
455
456 #define cfun_frame_layout (cfun->machine->frame_layout)
457 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
458 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
459 ? cfun_frame_layout.fpr_bitmap & 0x0f \
460 : cfun_frame_layout.fpr_bitmap & 0x03))
461 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
462 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
463 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
464 (1 << (REGNO - FPR0_REGNUM)))
465 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
466 (1 << (REGNO - FPR0_REGNUM))))
467 #define cfun_gpr_save_slot(REGNO) \
468 cfun->machine->frame_layout.gpr_save_slots[REGNO]
469
470 /* Number of GPRs and FPRs used for argument passing. */
471 #define GP_ARG_NUM_REG 5
472 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
473 #define VEC_ARG_NUM_REG 8
474
475 /* A couple of shortcuts. */
476 #define CONST_OK_FOR_J(x) \
477 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
478 #define CONST_OK_FOR_K(x) \
479 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
480 #define CONST_OK_FOR_Os(x) \
481 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
482 #define CONST_OK_FOR_Op(x) \
483 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
484 #define CONST_OK_FOR_On(x) \
485 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
486
487 #define REGNO_PAIR_OK(REGNO, MODE) \
488 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
489
490 /* That's the read ahead of the dynamic branch prediction unit in
491 bytes on a z10 (or higher) CPU. */
492 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
493
494
495 /* Indicate which ABI has been used for passing vector args.
496 0 - no vector type arguments have been passed where the ABI is relevant
497 1 - the old ABI has been used
498 2 - a vector type argument has been passed either in a vector register
499 or on the stack by value */
500 static int s390_vector_abi = 0;
501
502 /* Set the vector ABI marker if TYPE is subject to the vector ABI
503 switch. The vector ABI affects only vector data types. There are
504 two aspects of the vector ABI relevant here:
505
506 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
507 ABI and natural alignment with the old.
508
509 2. vector <= 16 bytes are passed in VRs or by value on the stack
510 with the new ABI but by reference on the stack with the old.
511
512 If ARG_P is true TYPE is used for a function argument or return
513 value. The ABI marker then is set for all vector data types. If
514 ARG_P is false only type 1 vectors are being checked. */
515
516 static void
517 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
518 {
519 static hash_set<const_tree> visited_types_hash;
520
521 if (s390_vector_abi)
522 return;
523
524 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
525 return;
526
527 if (visited_types_hash.contains (type))
528 return;
529
530 visited_types_hash.add (type);
531
532 if (VECTOR_TYPE_P (type))
533 {
534 int type_size = int_size_in_bytes (type);
535
536 /* Outside arguments only the alignment is changing and this
537 only happens for vector types >= 16 bytes. */
538 if (!arg_p && type_size < 16)
539 return;
540
541 /* In arguments vector types > 16 are passed as before (GCC
542 never enforced the bigger alignment for arguments which was
543 required by the old vector ABI). However, it might still be
544 ABI relevant due to the changed alignment if it is a struct
545 member. */
546 if (arg_p && type_size > 16 && !in_struct_p)
547 return;
548
549 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
550 }
551 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
552 {
553 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
554 natural alignment there will never be ABI dependent padding
555 in an array type. That's why we do not set in_struct_p to
556 true here. */
557 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
558 }
559 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
560 {
561 tree arg_chain;
562
563 /* Check the return type. */
564 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
565
566 for (arg_chain = TYPE_ARG_TYPES (type);
567 arg_chain;
568 arg_chain = TREE_CHAIN (arg_chain))
569 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
570 }
571 else if (RECORD_OR_UNION_TYPE_P (type))
572 {
573 tree field;
574
575 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
576 {
577 if (TREE_CODE (field) != FIELD_DECL)
578 continue;
579
580 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
581 }
582 }
583 }
584
585
586 /* System z builtins. */
587
588 #include "s390-builtins.h"
589
590 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
591 {
592 #undef B_DEF
593 #undef OB_DEF
594 #undef OB_DEF_VAR
595 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
596 #define OB_DEF(...)
597 #define OB_DEF_VAR(...)
598 #include "s390-builtins.def"
599 0
600 };
601
602 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
603 {
604 #undef B_DEF
605 #undef OB_DEF
606 #undef OB_DEF_VAR
607 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
608 #define OB_DEF(...)
609 #define OB_DEF_VAR(...)
610 #include "s390-builtins.def"
611 0
612 };
613
614 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
615 {
616 #undef B_DEF
617 #undef OB_DEF
618 #undef OB_DEF_VAR
619 #define B_DEF(...)
620 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
621 #define OB_DEF_VAR(...)
622 #include "s390-builtins.def"
623 0
624 };
625
626 const unsigned int
627 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
628 {
629 #undef B_DEF
630 #undef OB_DEF
631 #undef OB_DEF_VAR
632 #define B_DEF(...)
633 #define OB_DEF(...)
634 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, FNTYPE) FLAGS,
635 #include "s390-builtins.def"
636 0
637 };
638
639 tree s390_builtin_types[BT_MAX];
640 tree s390_builtin_fn_types[BT_FN_MAX];
641 tree s390_builtin_decls[S390_BUILTIN_MAX +
642 S390_OVERLOADED_BUILTIN_MAX +
643 S390_OVERLOADED_BUILTIN_VAR_MAX];
644
645 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
646 #undef B_DEF
647 #undef OB_DEF
648 #undef OB_DEF_VAR
649 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
650 #define OB_DEF(...)
651 #define OB_DEF_VAR(...)
652
653 #include "s390-builtins.def"
654 CODE_FOR_nothing
655 };
656
657 static void
658 s390_init_builtins (void)
659 {
660 /* These definitions are being used in s390-builtins.def. */
661 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
662 NULL, NULL);
663 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
664 tree c_uint64_type_node;
665
666 /* The uint64_type_node from tree.c is not compatible to the C99
667 uint64_t data type. What we want is c_uint64_type_node from
668 c-common.c. But since backend code is not supposed to interface
669 with the frontend we recreate it here. */
670 if (TARGET_64BIT)
671 c_uint64_type_node = long_unsigned_type_node;
672 else
673 c_uint64_type_node = long_long_unsigned_type_node;
674
675 #undef DEF_TYPE
676 #define DEF_TYPE(INDEX, BFLAGS, NODE, CONST_P) \
677 if (s390_builtin_types[INDEX] == NULL) \
678 s390_builtin_types[INDEX] = (!CONST_P) ? \
679 (NODE) : build_type_variant ((NODE), 1, 0);
680
681 #undef DEF_POINTER_TYPE
682 #define DEF_POINTER_TYPE(INDEX, BFLAGS, INDEX_BASE) \
683 if (s390_builtin_types[INDEX] == NULL) \
684 s390_builtin_types[INDEX] = \
685 build_pointer_type (s390_builtin_types[INDEX_BASE]);
686
687 #undef DEF_DISTINCT_TYPE
688 #define DEF_DISTINCT_TYPE(INDEX, BFLAGS, INDEX_BASE) \
689 if (s390_builtin_types[INDEX] == NULL) \
690 s390_builtin_types[INDEX] = \
691 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
692
693 #undef DEF_VECTOR_TYPE
694 #define DEF_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
695 if (s390_builtin_types[INDEX] == NULL) \
696 s390_builtin_types[INDEX] = \
697 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
698
699 #undef DEF_OPAQUE_VECTOR_TYPE
700 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
701 if (s390_builtin_types[INDEX] == NULL) \
702 s390_builtin_types[INDEX] = \
703 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
704
705 #undef DEF_FN_TYPE
706 #define DEF_FN_TYPE(INDEX, BFLAGS, args...) \
707 if (s390_builtin_fn_types[INDEX] == NULL) \
708 s390_builtin_fn_types[INDEX] = \
709 build_function_type_list (args, NULL_TREE);
710 #undef DEF_OV_TYPE
711 #define DEF_OV_TYPE(...)
712 #include "s390-builtin-types.def"
713
714 #undef B_DEF
715 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
716 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
717 s390_builtin_decls[S390_BUILTIN_##NAME] = \
718 add_builtin_function ("__builtin_" #NAME, \
719 s390_builtin_fn_types[FNTYPE], \
720 S390_BUILTIN_##NAME, \
721 BUILT_IN_MD, \
722 NULL, \
723 ATTRS);
724 #undef OB_DEF
725 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
726 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
727 == NULL) \
728 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
729 add_builtin_function ("__builtin_" #NAME, \
730 s390_builtin_fn_types[FNTYPE], \
731 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
732 BUILT_IN_MD, \
733 NULL, \
734 0);
735 #undef OB_DEF_VAR
736 #define OB_DEF_VAR(...)
737 #include "s390-builtins.def"
738
739 }
740
741 /* Return true if ARG is appropriate as argument number ARGNUM of
742 builtin DECL. The operand flags from s390-builtins.def have to
743 passed as OP_FLAGS. */
744 bool
745 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
746 {
747 if (O_UIMM_P (op_flags))
748 {
749 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
750 int bitwidth = bitwidths[op_flags - O_U1];
751
752 if (!tree_fits_uhwi_p (arg)
753 || tree_to_uhwi (arg) > ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1)
754 {
755 error("constant argument %d for builtin %qF is out of range (0.."
756 HOST_WIDE_INT_PRINT_UNSIGNED ")",
757 argnum, decl,
758 ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1);
759 return false;
760 }
761 }
762
763 if (O_SIMM_P (op_flags))
764 {
765 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
766 int bitwidth = bitwidths[op_flags - O_S2];
767
768 if (!tree_fits_shwi_p (arg)
769 || tree_to_shwi (arg) < -((HOST_WIDE_INT)1 << (bitwidth - 1))
770 || tree_to_shwi (arg) > (((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1))
771 {
772 error("constant argument %d for builtin %qF is out of range ("
773 HOST_WIDE_INT_PRINT_DEC ".."
774 HOST_WIDE_INT_PRINT_DEC ")",
775 argnum, decl,
776 -((HOST_WIDE_INT)1 << (bitwidth - 1)),
777 ((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1);
778 return false;
779 }
780 }
781 return true;
782 }
783
784 /* Expand an expression EXP that calls a built-in function,
785 with result going to TARGET if that's convenient
786 (and in mode MODE if that's convenient).
787 SUBTARGET may be used as the target for computing one of EXP's operands.
788 IGNORE is nonzero if the value is to be ignored. */
789
790 static rtx
791 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
792 machine_mode mode ATTRIBUTE_UNUSED,
793 int ignore ATTRIBUTE_UNUSED)
794 {
795 #define MAX_ARGS 6
796
797 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
798 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
799 enum insn_code icode;
800 rtx op[MAX_ARGS], pat;
801 int arity;
802 bool nonvoid;
803 tree arg;
804 call_expr_arg_iterator iter;
805 unsigned int all_op_flags = opflags_for_builtin (fcode);
806 machine_mode last_vec_mode = VOIDmode;
807
808 if (TARGET_DEBUG_ARG)
809 {
810 fprintf (stderr,
811 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
812 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
813 bflags_for_builtin (fcode));
814 }
815
816 if (S390_USE_TARGET_ATTRIBUTE)
817 {
818 unsigned int bflags;
819
820 bflags = bflags_for_builtin (fcode);
821 if ((bflags & B_HTM) && !TARGET_HTM)
822 {
823 error ("builtin %qF is not supported without -mhtm "
824 "(default with -march=zEC12 and higher).", fndecl);
825 return const0_rtx;
826 }
827 if ((bflags & B_VX) && !TARGET_VX)
828 {
829 error ("builtin %qF is not supported without -mvx "
830 "(default with -march=z13 and higher).", fndecl);
831 return const0_rtx;
832 }
833 }
834 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
835 && fcode < S390_ALL_BUILTIN_MAX)
836 {
837 gcc_unreachable ();
838 }
839 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
840 {
841 icode = code_for_builtin[fcode];
842 /* Set a flag in the machine specific cfun part in order to support
843 saving/restoring of FPRs. */
844 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
845 cfun->machine->tbegin_p = true;
846 }
847 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
848 {
849 error ("unresolved overloaded builtin");
850 return const0_rtx;
851 }
852 else
853 internal_error ("bad builtin fcode");
854
855 if (icode == 0)
856 internal_error ("bad builtin icode");
857
858 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
859
860 if (nonvoid)
861 {
862 machine_mode tmode = insn_data[icode].operand[0].mode;
863 if (!target
864 || GET_MODE (target) != tmode
865 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
866 target = gen_reg_rtx (tmode);
867
868 /* There are builtins (e.g. vec_promote) with no vector
869 arguments but an element selector. So we have to also look
870 at the vector return type when emitting the modulo
871 operation. */
872 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
873 last_vec_mode = insn_data[icode].operand[0].mode;
874 }
875
876 arity = 0;
877 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
878 {
879 rtx tmp_rtx;
880 const struct insn_operand_data *insn_op;
881 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
882
883 all_op_flags = all_op_flags >> O_SHIFT;
884
885 if (arg == error_mark_node)
886 return NULL_RTX;
887 if (arity >= MAX_ARGS)
888 return NULL_RTX;
889
890 if (O_IMM_P (op_flags)
891 && TREE_CODE (arg) != INTEGER_CST)
892 {
893 error ("constant value required for builtin %qF argument %d",
894 fndecl, arity + 1);
895 return const0_rtx;
896 }
897
898 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
899 return const0_rtx;
900
901 insn_op = &insn_data[icode].operand[arity + nonvoid];
902 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
903
904 /* expand_expr truncates constants to the target mode only if it
905 is "convenient". However, our checks below rely on this
906 being done. */
907 if (CONST_INT_P (op[arity])
908 && SCALAR_INT_MODE_P (insn_op->mode)
909 && GET_MODE (op[arity]) != insn_op->mode)
910 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
911 insn_op->mode));
912
913 /* Wrap the expanded RTX for pointer types into a MEM expr with
914 the proper mode. This allows us to use e.g. (match_operand
915 "memory_operand"..) in the insn patterns instead of (mem
916 (match_operand "address_operand)). This is helpful for
917 patterns not just accepting MEMs. */
918 if (POINTER_TYPE_P (TREE_TYPE (arg))
919 && insn_op->predicate != address_operand)
920 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
921
922 /* Expand the module operation required on element selectors. */
923 if (op_flags == O_ELEM)
924 {
925 gcc_assert (last_vec_mode != VOIDmode);
926 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
927 op[arity],
928 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
929 NULL_RTX, 1, OPTAB_DIRECT);
930 }
931
932 /* Record the vector mode used for an element selector. This assumes:
933 1. There is no builtin with two different vector modes and an element selector
934 2. The element selector comes after the vector type it is referring to.
935 This currently the true for all the builtins but FIXME we
936 should better check for that. */
937 if (VECTOR_MODE_P (insn_op->mode))
938 last_vec_mode = insn_op->mode;
939
940 if (insn_op->predicate (op[arity], insn_op->mode))
941 {
942 arity++;
943 continue;
944 }
945
946 if (MEM_P (op[arity])
947 && insn_op->predicate == memory_operand
948 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
949 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
950 {
951 op[arity] = replace_equiv_address (op[arity],
952 copy_to_mode_reg (Pmode,
953 XEXP (op[arity], 0)));
954 }
955 /* Some of the builtins require different modes/types than the
956 pattern in order to implement a specific API. Instead of
957 adding many expanders which do the mode change we do it here.
958 E.g. s390_vec_add_u128 required to have vector unsigned char
959 arguments is mapped to addti3. */
960 else if (insn_op->mode != VOIDmode
961 && GET_MODE (op[arity]) != VOIDmode
962 && GET_MODE (op[arity]) != insn_op->mode
963 && ((tmp_rtx = simplify_gen_subreg (insn_op->mode, op[arity],
964 GET_MODE (op[arity]), 0))
965 != NULL_RTX))
966 {
967 op[arity] = tmp_rtx;
968 }
969 else if (GET_MODE (op[arity]) == insn_op->mode
970 || GET_MODE (op[arity]) == VOIDmode
971 || (insn_op->predicate == address_operand
972 && GET_MODE (op[arity]) == Pmode))
973 {
974 /* An address_operand usually has VOIDmode in the expander
975 so we cannot use this. */
976 machine_mode target_mode =
977 (insn_op->predicate == address_operand
978 ? Pmode : insn_op->mode);
979 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
980 }
981
982 if (!insn_op->predicate (op[arity], insn_op->mode))
983 {
984 error ("invalid argument %d for builtin %qF", arity + 1, fndecl);
985 return const0_rtx;
986 }
987 arity++;
988 }
989
990 switch (arity)
991 {
992 case 0:
993 pat = GEN_FCN (icode) (target);
994 break;
995 case 1:
996 if (nonvoid)
997 pat = GEN_FCN (icode) (target, op[0]);
998 else
999 pat = GEN_FCN (icode) (op[0]);
1000 break;
1001 case 2:
1002 if (nonvoid)
1003 pat = GEN_FCN (icode) (target, op[0], op[1]);
1004 else
1005 pat = GEN_FCN (icode) (op[0], op[1]);
1006 break;
1007 case 3:
1008 if (nonvoid)
1009 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
1010 else
1011 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
1012 break;
1013 case 4:
1014 if (nonvoid)
1015 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
1016 else
1017 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
1018 break;
1019 case 5:
1020 if (nonvoid)
1021 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
1022 else
1023 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
1024 break;
1025 case 6:
1026 if (nonvoid)
1027 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
1028 else
1029 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1030 break;
1031 default:
1032 gcc_unreachable ();
1033 }
1034 if (!pat)
1035 return NULL_RTX;
1036 emit_insn (pat);
1037
1038 if (nonvoid)
1039 return target;
1040 else
1041 return const0_rtx;
1042 }
1043
1044
1045 static const int s390_hotpatch_hw_max = 1000000;
1046 static int s390_hotpatch_hw_before_label = 0;
1047 static int s390_hotpatch_hw_after_label = 0;
1048
1049 /* Check whether the hotpatch attribute is applied to a function and, if it has
1050 an argument, the argument is valid. */
1051
1052 static tree
1053 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1054 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1055 {
1056 tree expr;
1057 tree expr2;
1058 int err;
1059
1060 if (TREE_CODE (*node) != FUNCTION_DECL)
1061 {
1062 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1063 name);
1064 *no_add_attrs = true;
1065 }
1066 if (args != NULL && TREE_CHAIN (args) != NULL)
1067 {
1068 expr = TREE_VALUE (args);
1069 expr2 = TREE_VALUE (TREE_CHAIN (args));
1070 }
1071 if (args == NULL || TREE_CHAIN (args) == NULL)
1072 err = 1;
1073 else if (TREE_CODE (expr) != INTEGER_CST
1074 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1075 || wi::gtu_p (expr, s390_hotpatch_hw_max))
1076 err = 1;
1077 else if (TREE_CODE (expr2) != INTEGER_CST
1078 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1079 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1080 err = 1;
1081 else
1082 err = 0;
1083 if (err)
1084 {
1085 error ("requested %qE attribute is not a comma separated pair of"
1086 " non-negative integer constants or too large (max. %d)", name,
1087 s390_hotpatch_hw_max);
1088 *no_add_attrs = true;
1089 }
1090
1091 return NULL_TREE;
1092 }
1093
1094 /* Expand the s390_vector_bool type attribute. */
1095
1096 static tree
1097 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1098 tree args ATTRIBUTE_UNUSED,
1099 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1100 {
1101 tree type = *node, result = NULL_TREE;
1102 machine_mode mode;
1103
1104 while (POINTER_TYPE_P (type)
1105 || TREE_CODE (type) == FUNCTION_TYPE
1106 || TREE_CODE (type) == METHOD_TYPE
1107 || TREE_CODE (type) == ARRAY_TYPE)
1108 type = TREE_TYPE (type);
1109
1110 mode = TYPE_MODE (type);
1111 switch (mode)
1112 {
1113 case DImode: case V2DImode: result = s390_builtin_types[BT_BV2DI]; break;
1114 case SImode: case V4SImode: result = s390_builtin_types[BT_BV4SI]; break;
1115 case HImode: case V8HImode: result = s390_builtin_types[BT_BV8HI]; break;
1116 case QImode: case V16QImode: result = s390_builtin_types[BT_BV16QI];
1117 default: break;
1118 }
1119
1120 *no_add_attrs = true; /* No need to hang on to the attribute. */
1121
1122 if (result)
1123 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1124
1125 return NULL_TREE;
1126 }
1127
1128 static const struct attribute_spec s390_attribute_table[] = {
1129 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1130 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1131 /* End element. */
1132 { NULL, 0, 0, false, false, false, NULL, false }
1133 };
1134
1135 /* Return the alignment for LABEL. We default to the -falign-labels
1136 value except for the literal pool base label. */
1137 int
1138 s390_label_align (rtx_insn *label)
1139 {
1140 rtx_insn *prev_insn = prev_active_insn (label);
1141 rtx set, src;
1142
1143 if (prev_insn == NULL_RTX)
1144 goto old;
1145
1146 set = single_set (prev_insn);
1147
1148 if (set == NULL_RTX)
1149 goto old;
1150
1151 src = SET_SRC (set);
1152
1153 /* Don't align literal pool base labels. */
1154 if (GET_CODE (src) == UNSPEC
1155 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1156 return 0;
1157
1158 old:
1159 return align_labels_log;
1160 }
1161
1162 static machine_mode
1163 s390_libgcc_cmp_return_mode (void)
1164 {
1165 return TARGET_64BIT ? DImode : SImode;
1166 }
1167
1168 static machine_mode
1169 s390_libgcc_shift_count_mode (void)
1170 {
1171 return TARGET_64BIT ? DImode : SImode;
1172 }
1173
1174 static machine_mode
1175 s390_unwind_word_mode (void)
1176 {
1177 return TARGET_64BIT ? DImode : SImode;
1178 }
1179
1180 /* Return true if the back end supports mode MODE. */
1181 static bool
1182 s390_scalar_mode_supported_p (machine_mode mode)
1183 {
1184 /* In contrast to the default implementation reject TImode constants on 31bit
1185 TARGET_ZARCH for ABI compliance. */
1186 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1187 return false;
1188
1189 if (DECIMAL_FLOAT_MODE_P (mode))
1190 return default_decimal_float_supported_p ();
1191
1192 return default_scalar_mode_supported_p (mode);
1193 }
1194
1195 /* Return true if the back end supports vector mode MODE. */
1196 static bool
1197 s390_vector_mode_supported_p (machine_mode mode)
1198 {
1199 machine_mode inner;
1200
1201 if (!VECTOR_MODE_P (mode)
1202 || !TARGET_VX
1203 || GET_MODE_SIZE (mode) > 16)
1204 return false;
1205
1206 inner = GET_MODE_INNER (mode);
1207
1208 switch (inner)
1209 {
1210 case QImode:
1211 case HImode:
1212 case SImode:
1213 case DImode:
1214 case TImode:
1215 case SFmode:
1216 case DFmode:
1217 case TFmode:
1218 return true;
1219 default:
1220 return false;
1221 }
1222 }
1223
1224 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1225
1226 void
1227 s390_set_has_landing_pad_p (bool value)
1228 {
1229 cfun->machine->has_landing_pad_p = value;
1230 }
1231
1232 /* If two condition code modes are compatible, return a condition code
1233 mode which is compatible with both. Otherwise, return
1234 VOIDmode. */
1235
1236 static machine_mode
1237 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1238 {
1239 if (m1 == m2)
1240 return m1;
1241
1242 switch (m1)
1243 {
1244 case CCZmode:
1245 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1246 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1247 return m2;
1248 return VOIDmode;
1249
1250 case CCSmode:
1251 case CCUmode:
1252 case CCTmode:
1253 case CCSRmode:
1254 case CCURmode:
1255 case CCZ1mode:
1256 if (m2 == CCZmode)
1257 return m1;
1258
1259 return VOIDmode;
1260
1261 default:
1262 return VOIDmode;
1263 }
1264 return VOIDmode;
1265 }
1266
1267 /* Return true if SET either doesn't set the CC register, or else
1268 the source and destination have matching CC modes and that
1269 CC mode is at least as constrained as REQ_MODE. */
1270
1271 static bool
1272 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1273 {
1274 machine_mode set_mode;
1275
1276 gcc_assert (GET_CODE (set) == SET);
1277
1278 /* These modes are supposed to be used only in CC consumer
1279 patterns. */
1280 gcc_assert (req_mode != CCVIALLmode && req_mode != CCVIANYmode
1281 && req_mode != CCVFALLmode && req_mode != CCVFANYmode);
1282
1283 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1284 return 1;
1285
1286 set_mode = GET_MODE (SET_DEST (set));
1287 switch (set_mode)
1288 {
1289 case CCSmode:
1290 case CCSRmode:
1291 case CCUmode:
1292 case CCURmode:
1293 case CCLmode:
1294 case CCL1mode:
1295 case CCL2mode:
1296 case CCL3mode:
1297 case CCT1mode:
1298 case CCT2mode:
1299 case CCT3mode:
1300 case CCVEQmode:
1301 case CCVIHmode:
1302 case CCVIHUmode:
1303 case CCVFHmode:
1304 case CCVFHEmode:
1305 if (req_mode != set_mode)
1306 return 0;
1307 break;
1308
1309 case CCZmode:
1310 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1311 && req_mode != CCSRmode && req_mode != CCURmode)
1312 return 0;
1313 break;
1314
1315 case CCAPmode:
1316 case CCANmode:
1317 if (req_mode != CCAmode)
1318 return 0;
1319 break;
1320
1321 default:
1322 gcc_unreachable ();
1323 }
1324
1325 return (GET_MODE (SET_SRC (set)) == set_mode);
1326 }
1327
1328 /* Return true if every SET in INSN that sets the CC register
1329 has source and destination with matching CC modes and that
1330 CC mode is at least as constrained as REQ_MODE.
1331 If REQ_MODE is VOIDmode, always return false. */
1332
1333 bool
1334 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1335 {
1336 int i;
1337
1338 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1339 if (req_mode == VOIDmode)
1340 return false;
1341
1342 if (GET_CODE (PATTERN (insn)) == SET)
1343 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1344
1345 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1346 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1347 {
1348 rtx set = XVECEXP (PATTERN (insn), 0, i);
1349 if (GET_CODE (set) == SET)
1350 if (!s390_match_ccmode_set (set, req_mode))
1351 return false;
1352 }
1353
1354 return true;
1355 }
1356
1357 /* If a test-under-mask instruction can be used to implement
1358 (compare (and ... OP1) OP2), return the CC mode required
1359 to do that. Otherwise, return VOIDmode.
1360 MIXED is true if the instruction can distinguish between
1361 CC1 and CC2 for mixed selected bits (TMxx), it is false
1362 if the instruction cannot (TM). */
1363
1364 machine_mode
1365 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1366 {
1367 int bit0, bit1;
1368
1369 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1370 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1371 return VOIDmode;
1372
1373 /* Selected bits all zero: CC0.
1374 e.g.: int a; if ((a & (16 + 128)) == 0) */
1375 if (INTVAL (op2) == 0)
1376 return CCTmode;
1377
1378 /* Selected bits all one: CC3.
1379 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1380 if (INTVAL (op2) == INTVAL (op1))
1381 return CCT3mode;
1382
1383 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1384 int a;
1385 if ((a & (16 + 128)) == 16) -> CCT1
1386 if ((a & (16 + 128)) == 128) -> CCT2 */
1387 if (mixed)
1388 {
1389 bit1 = exact_log2 (INTVAL (op2));
1390 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1391 if (bit0 != -1 && bit1 != -1)
1392 return bit0 > bit1 ? CCT1mode : CCT2mode;
1393 }
1394
1395 return VOIDmode;
1396 }
1397
1398 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1399 OP0 and OP1 of a COMPARE, return the mode to be used for the
1400 comparison. */
1401
1402 machine_mode
1403 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1404 {
1405 if (TARGET_VX
1406 && register_operand (op0, DFmode)
1407 && register_operand (op1, DFmode))
1408 {
1409 /* LT, LE, UNGT, UNGE require swapping OP0 and OP1. Either
1410 s390_emit_compare or s390_canonicalize_comparison will take
1411 care of it. */
1412 switch (code)
1413 {
1414 case EQ:
1415 case NE:
1416 return CCVEQmode;
1417 case GT:
1418 case UNLE:
1419 return CCVFHmode;
1420 case GE:
1421 case UNLT:
1422 return CCVFHEmode;
1423 default:
1424 ;
1425 }
1426 }
1427
1428 switch (code)
1429 {
1430 case EQ:
1431 case NE:
1432 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1433 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1434 return CCAPmode;
1435 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1436 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1437 return CCAPmode;
1438 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1439 || GET_CODE (op1) == NEG)
1440 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1441 return CCLmode;
1442
1443 if (GET_CODE (op0) == AND)
1444 {
1445 /* Check whether we can potentially do it via TM. */
1446 machine_mode ccmode;
1447 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1448 if (ccmode != VOIDmode)
1449 {
1450 /* Relax CCTmode to CCZmode to allow fall-back to AND
1451 if that turns out to be beneficial. */
1452 return ccmode == CCTmode ? CCZmode : ccmode;
1453 }
1454 }
1455
1456 if (register_operand (op0, HImode)
1457 && GET_CODE (op1) == CONST_INT
1458 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1459 return CCT3mode;
1460 if (register_operand (op0, QImode)
1461 && GET_CODE (op1) == CONST_INT
1462 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1463 return CCT3mode;
1464
1465 return CCZmode;
1466
1467 case LE:
1468 case LT:
1469 case GE:
1470 case GT:
1471 /* The only overflow condition of NEG and ABS happens when
1472 -INT_MAX is used as parameter, which stays negative. So
1473 we have an overflow from a positive value to a negative.
1474 Using CCAP mode the resulting cc can be used for comparisons. */
1475 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1476 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1477 return CCAPmode;
1478
1479 /* If constants are involved in an add instruction it is possible to use
1480 the resulting cc for comparisons with zero. Knowing the sign of the
1481 constant the overflow behavior gets predictable. e.g.:
1482 int a, b; if ((b = a + c) > 0)
1483 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1484 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1485 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1486 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1487 /* Avoid INT32_MIN on 32 bit. */
1488 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1489 {
1490 if (INTVAL (XEXP((op0), 1)) < 0)
1491 return CCANmode;
1492 else
1493 return CCAPmode;
1494 }
1495 /* Fall through. */
1496 case UNORDERED:
1497 case ORDERED:
1498 case UNEQ:
1499 case UNLE:
1500 case UNLT:
1501 case UNGE:
1502 case UNGT:
1503 case LTGT:
1504 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1505 && GET_CODE (op1) != CONST_INT)
1506 return CCSRmode;
1507 return CCSmode;
1508
1509 case LTU:
1510 case GEU:
1511 if (GET_CODE (op0) == PLUS
1512 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1513 return CCL1mode;
1514
1515 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1516 && GET_CODE (op1) != CONST_INT)
1517 return CCURmode;
1518 return CCUmode;
1519
1520 case LEU:
1521 case GTU:
1522 if (GET_CODE (op0) == MINUS
1523 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1524 return CCL2mode;
1525
1526 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1527 && GET_CODE (op1) != CONST_INT)
1528 return CCURmode;
1529 return CCUmode;
1530
1531 default:
1532 gcc_unreachable ();
1533 }
1534 }
1535
1536 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1537 that we can implement more efficiently. */
1538
1539 static void
1540 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1541 bool op0_preserve_value)
1542 {
1543 if (op0_preserve_value)
1544 return;
1545
1546 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1547 if ((*code == EQ || *code == NE)
1548 && *op1 == const0_rtx
1549 && GET_CODE (*op0) == ZERO_EXTRACT
1550 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1551 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1552 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1553 {
1554 rtx inner = XEXP (*op0, 0);
1555 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1556 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1557 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1558
1559 if (len > 0 && len < modesize
1560 && pos >= 0 && pos + len <= modesize
1561 && modesize <= HOST_BITS_PER_WIDE_INT)
1562 {
1563 unsigned HOST_WIDE_INT block;
1564 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
1565 block <<= modesize - pos - len;
1566
1567 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1568 gen_int_mode (block, GET_MODE (inner)));
1569 }
1570 }
1571
1572 /* Narrow AND of memory against immediate to enable TM. */
1573 if ((*code == EQ || *code == NE)
1574 && *op1 == const0_rtx
1575 && GET_CODE (*op0) == AND
1576 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1577 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1578 {
1579 rtx inner = XEXP (*op0, 0);
1580 rtx mask = XEXP (*op0, 1);
1581
1582 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1583 if (GET_CODE (inner) == SUBREG
1584 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1585 && (GET_MODE_SIZE (GET_MODE (inner))
1586 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1587 && ((INTVAL (mask)
1588 & GET_MODE_MASK (GET_MODE (inner))
1589 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1590 == 0))
1591 inner = SUBREG_REG (inner);
1592
1593 /* Do not change volatile MEMs. */
1594 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1595 {
1596 int part = s390_single_part (XEXP (*op0, 1),
1597 GET_MODE (inner), QImode, 0);
1598 if (part >= 0)
1599 {
1600 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1601 inner = adjust_address_nv (inner, QImode, part);
1602 *op0 = gen_rtx_AND (QImode, inner, mask);
1603 }
1604 }
1605 }
1606
1607 /* Narrow comparisons against 0xffff to HImode if possible. */
1608 if ((*code == EQ || *code == NE)
1609 && GET_CODE (*op1) == CONST_INT
1610 && INTVAL (*op1) == 0xffff
1611 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1612 && (nonzero_bits (*op0, GET_MODE (*op0))
1613 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
1614 {
1615 *op0 = gen_lowpart (HImode, *op0);
1616 *op1 = constm1_rtx;
1617 }
1618
1619 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1620 if (GET_CODE (*op0) == UNSPEC
1621 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1622 && XVECLEN (*op0, 0) == 1
1623 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1624 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1625 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1626 && *op1 == const0_rtx)
1627 {
1628 enum rtx_code new_code = UNKNOWN;
1629 switch (*code)
1630 {
1631 case EQ: new_code = EQ; break;
1632 case NE: new_code = NE; break;
1633 case LT: new_code = GTU; break;
1634 case GT: new_code = LTU; break;
1635 case LE: new_code = GEU; break;
1636 case GE: new_code = LEU; break;
1637 default: break;
1638 }
1639
1640 if (new_code != UNKNOWN)
1641 {
1642 *op0 = XVECEXP (*op0, 0, 0);
1643 *code = new_code;
1644 }
1645 }
1646
1647 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1648 if (GET_CODE (*op0) == UNSPEC
1649 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1650 && XVECLEN (*op0, 0) == 1
1651 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1652 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1653 && CONST_INT_P (*op1))
1654 {
1655 enum rtx_code new_code = UNKNOWN;
1656 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1657 {
1658 case CCZmode:
1659 case CCRAWmode:
1660 switch (*code)
1661 {
1662 case EQ: new_code = EQ; break;
1663 case NE: new_code = NE; break;
1664 default: break;
1665 }
1666 break;
1667 default: break;
1668 }
1669
1670 if (new_code != UNKNOWN)
1671 {
1672 /* For CCRAWmode put the required cc mask into the second
1673 operand. */
1674 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1675 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1676 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1677 *op0 = XVECEXP (*op0, 0, 0);
1678 *code = new_code;
1679 }
1680 }
1681
1682 /* Simplify cascaded EQ, NE with const0_rtx. */
1683 if ((*code == NE || *code == EQ)
1684 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1685 && GET_MODE (*op0) == SImode
1686 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1687 && REG_P (XEXP (*op0, 0))
1688 && XEXP (*op0, 1) == const0_rtx
1689 && *op1 == const0_rtx)
1690 {
1691 if ((*code == EQ && GET_CODE (*op0) == NE)
1692 || (*code == NE && GET_CODE (*op0) == EQ))
1693 *code = EQ;
1694 else
1695 *code = NE;
1696 *op0 = XEXP (*op0, 0);
1697 }
1698
1699 /* Prefer register over memory as first operand. */
1700 if (MEM_P (*op0) && REG_P (*op1))
1701 {
1702 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1703 *code = (int)swap_condition ((enum rtx_code)*code);
1704 }
1705
1706 /* Using the scalar variants of vector instructions for 64 bit FP
1707 comparisons might require swapping the operands. */
1708 if (TARGET_VX
1709 && register_operand (*op0, DFmode)
1710 && register_operand (*op1, DFmode)
1711 && (*code == LT || *code == LE || *code == UNGT || *code == UNGE))
1712 {
1713 rtx tmp;
1714
1715 switch (*code)
1716 {
1717 case LT: *code = GT; break;
1718 case LE: *code = GE; break;
1719 case UNGT: *code = UNLE; break;
1720 case UNGE: *code = UNLT; break;
1721 default: ;
1722 }
1723 tmp = *op0; *op0 = *op1; *op1 = tmp;
1724 }
1725
1726 /* A comparison result is compared against zero. Replace it with
1727 the (perhaps inverted) original comparison.
1728 This probably should be done by simplify_relational_operation. */
1729 if ((*code == EQ || *code == NE)
1730 && *op1 == const0_rtx
1731 && COMPARISON_P (*op0)
1732 && CC_REG_P (XEXP (*op0, 0)))
1733 {
1734 enum rtx_code new_code;
1735
1736 if (*code == EQ)
1737 new_code = reversed_comparison_code_parts (GET_CODE (*op0),
1738 XEXP (*op0, 0),
1739 XEXP (*op1, 0), NULL);
1740 else
1741 new_code = GET_CODE (*op0);
1742
1743 if (new_code != UNKNOWN)
1744 {
1745 *code = new_code;
1746 *op1 = XEXP (*op0, 1);
1747 *op0 = XEXP (*op0, 0);
1748 }
1749 }
1750 }
1751
1752 /* Helper function for s390_emit_compare. If possible emit a 64 bit
1753 FP compare using the single element variant of vector instructions.
1754 Replace CODE with the comparison code to be used in the CC reg
1755 compare and return the condition code register RTX in CC. */
1756
1757 static bool
1758 s390_expand_vec_compare_scalar (enum rtx_code *code, rtx cmp1, rtx cmp2,
1759 rtx *cc)
1760 {
1761 machine_mode cmp_mode;
1762 bool swap_p = false;
1763
1764 switch (*code)
1765 {
1766 case EQ: cmp_mode = CCVEQmode; break;
1767 case NE: cmp_mode = CCVEQmode; break;
1768 case GT: cmp_mode = CCVFHmode; break;
1769 case GE: cmp_mode = CCVFHEmode; break;
1770 case UNLE: cmp_mode = CCVFHmode; break;
1771 case UNLT: cmp_mode = CCVFHEmode; break;
1772 case LT: cmp_mode = CCVFHmode; *code = GT; swap_p = true; break;
1773 case LE: cmp_mode = CCVFHEmode; *code = GE; swap_p = true; break;
1774 case UNGE: cmp_mode = CCVFHmode; *code = UNLE; swap_p = true; break;
1775 case UNGT: cmp_mode = CCVFHEmode; *code = UNLT; swap_p = true; break;
1776 default: return false;
1777 }
1778
1779 if (swap_p)
1780 {
1781 rtx tmp = cmp2;
1782 cmp2 = cmp1;
1783 cmp1 = tmp;
1784 }
1785
1786 emit_insn (gen_rtx_PARALLEL (VOIDmode,
1787 gen_rtvec (2,
1788 gen_rtx_SET (gen_rtx_REG (cmp_mode, CC_REGNUM),
1789 gen_rtx_COMPARE (cmp_mode, cmp1,
1790 cmp2)),
1791 gen_rtx_CLOBBER (VOIDmode,
1792 gen_rtx_SCRATCH (V2DImode)))));
1793
1794 /* This is the cc reg how it will be used in the cc mode consumer.
1795 It either needs to be CCVFALL or CCVFANY. However, CC1 will
1796 never be set by the scalar variants. So it actually doesn't
1797 matter which one we choose here. */
1798 *cc = gen_rtx_REG (CCVFALLmode, CC_REGNUM);
1799 return true;
1800 }
1801
1802
1803 /* Emit a compare instruction suitable to implement the comparison
1804 OP0 CODE OP1. Return the correct condition RTL to be placed in
1805 the IF_THEN_ELSE of the conditional branch testing the result. */
1806
1807 rtx
1808 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1809 {
1810 machine_mode mode = s390_select_ccmode (code, op0, op1);
1811 rtx cc;
1812
1813 if (TARGET_VX
1814 && register_operand (op0, DFmode)
1815 && register_operand (op1, DFmode)
1816 && s390_expand_vec_compare_scalar (&code, op0, op1, &cc))
1817 {
1818 /* Work has been done by s390_expand_vec_compare_scalar already. */
1819 }
1820 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1821 {
1822 /* Do not output a redundant compare instruction if a
1823 compare_and_swap pattern already computed the result and the
1824 machine modes are compatible. */
1825 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1826 == GET_MODE (op0));
1827 cc = op0;
1828 }
1829 else
1830 {
1831 cc = gen_rtx_REG (mode, CC_REGNUM);
1832 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1833 }
1834
1835 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1836 }
1837
1838 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1839 matches CMP.
1840 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1841 conditional branch testing the result. */
1842
1843 static rtx
1844 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1845 rtx cmp, rtx new_rtx)
1846 {
1847 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1848 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1849 const0_rtx);
1850 }
1851
1852 /* Emit a jump instruction to TARGET and return it. If COND is
1853 NULL_RTX, emit an unconditional jump, else a conditional jump under
1854 condition COND. */
1855
1856 rtx_insn *
1857 s390_emit_jump (rtx target, rtx cond)
1858 {
1859 rtx insn;
1860
1861 target = gen_rtx_LABEL_REF (VOIDmode, target);
1862 if (cond)
1863 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1864
1865 insn = gen_rtx_SET (pc_rtx, target);
1866 return emit_jump_insn (insn);
1867 }
1868
1869 /* Return branch condition mask to implement a branch
1870 specified by CODE. Return -1 for invalid comparisons. */
1871
1872 int
1873 s390_branch_condition_mask (rtx code)
1874 {
1875 const int CC0 = 1 << 3;
1876 const int CC1 = 1 << 2;
1877 const int CC2 = 1 << 1;
1878 const int CC3 = 1 << 0;
1879
1880 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1881 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1882 gcc_assert (XEXP (code, 1) == const0_rtx
1883 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1884 && CONST_INT_P (XEXP (code, 1))));
1885
1886
1887 switch (GET_MODE (XEXP (code, 0)))
1888 {
1889 case CCZmode:
1890 case CCZ1mode:
1891 switch (GET_CODE (code))
1892 {
1893 case EQ: return CC0;
1894 case NE: return CC1 | CC2 | CC3;
1895 default: return -1;
1896 }
1897 break;
1898
1899 case CCT1mode:
1900 switch (GET_CODE (code))
1901 {
1902 case EQ: return CC1;
1903 case NE: return CC0 | CC2 | CC3;
1904 default: return -1;
1905 }
1906 break;
1907
1908 case CCT2mode:
1909 switch (GET_CODE (code))
1910 {
1911 case EQ: return CC2;
1912 case NE: return CC0 | CC1 | CC3;
1913 default: return -1;
1914 }
1915 break;
1916
1917 case CCT3mode:
1918 switch (GET_CODE (code))
1919 {
1920 case EQ: return CC3;
1921 case NE: return CC0 | CC1 | CC2;
1922 default: return -1;
1923 }
1924 break;
1925
1926 case CCLmode:
1927 switch (GET_CODE (code))
1928 {
1929 case EQ: return CC0 | CC2;
1930 case NE: return CC1 | CC3;
1931 default: return -1;
1932 }
1933 break;
1934
1935 case CCL1mode:
1936 switch (GET_CODE (code))
1937 {
1938 case LTU: return CC2 | CC3; /* carry */
1939 case GEU: return CC0 | CC1; /* no carry */
1940 default: return -1;
1941 }
1942 break;
1943
1944 case CCL2mode:
1945 switch (GET_CODE (code))
1946 {
1947 case GTU: return CC0 | CC1; /* borrow */
1948 case LEU: return CC2 | CC3; /* no borrow */
1949 default: return -1;
1950 }
1951 break;
1952
1953 case CCL3mode:
1954 switch (GET_CODE (code))
1955 {
1956 case EQ: return CC0 | CC2;
1957 case NE: return CC1 | CC3;
1958 case LTU: return CC1;
1959 case GTU: return CC3;
1960 case LEU: return CC1 | CC2;
1961 case GEU: return CC2 | CC3;
1962 default: return -1;
1963 }
1964
1965 case CCUmode:
1966 switch (GET_CODE (code))
1967 {
1968 case EQ: return CC0;
1969 case NE: return CC1 | CC2 | CC3;
1970 case LTU: return CC1;
1971 case GTU: return CC2;
1972 case LEU: return CC0 | CC1;
1973 case GEU: return CC0 | CC2;
1974 default: return -1;
1975 }
1976 break;
1977
1978 case CCURmode:
1979 switch (GET_CODE (code))
1980 {
1981 case EQ: return CC0;
1982 case NE: return CC2 | CC1 | CC3;
1983 case LTU: return CC2;
1984 case GTU: return CC1;
1985 case LEU: return CC0 | CC2;
1986 case GEU: return CC0 | CC1;
1987 default: return -1;
1988 }
1989 break;
1990
1991 case CCAPmode:
1992 switch (GET_CODE (code))
1993 {
1994 case EQ: return CC0;
1995 case NE: return CC1 | CC2 | CC3;
1996 case LT: return CC1 | CC3;
1997 case GT: return CC2;
1998 case LE: return CC0 | CC1 | CC3;
1999 case GE: return CC0 | CC2;
2000 default: return -1;
2001 }
2002 break;
2003
2004 case CCANmode:
2005 switch (GET_CODE (code))
2006 {
2007 case EQ: return CC0;
2008 case NE: return CC1 | CC2 | CC3;
2009 case LT: return CC1;
2010 case GT: return CC2 | CC3;
2011 case LE: return CC0 | CC1;
2012 case GE: return CC0 | CC2 | CC3;
2013 default: return -1;
2014 }
2015 break;
2016
2017 case CCSmode:
2018 switch (GET_CODE (code))
2019 {
2020 case EQ: return CC0;
2021 case NE: return CC1 | CC2 | CC3;
2022 case LT: return CC1;
2023 case GT: return CC2;
2024 case LE: return CC0 | CC1;
2025 case GE: return CC0 | CC2;
2026 case UNORDERED: return CC3;
2027 case ORDERED: return CC0 | CC1 | CC2;
2028 case UNEQ: return CC0 | CC3;
2029 case UNLT: return CC1 | CC3;
2030 case UNGT: return CC2 | CC3;
2031 case UNLE: return CC0 | CC1 | CC3;
2032 case UNGE: return CC0 | CC2 | CC3;
2033 case LTGT: return CC1 | CC2;
2034 default: return -1;
2035 }
2036 break;
2037
2038 case CCSRmode:
2039 switch (GET_CODE (code))
2040 {
2041 case EQ: return CC0;
2042 case NE: return CC2 | CC1 | CC3;
2043 case LT: return CC2;
2044 case GT: return CC1;
2045 case LE: return CC0 | CC2;
2046 case GE: return CC0 | CC1;
2047 case UNORDERED: return CC3;
2048 case ORDERED: return CC0 | CC2 | CC1;
2049 case UNEQ: return CC0 | CC3;
2050 case UNLT: return CC2 | CC3;
2051 case UNGT: return CC1 | CC3;
2052 case UNLE: return CC0 | CC2 | CC3;
2053 case UNGE: return CC0 | CC1 | CC3;
2054 case LTGT: return CC2 | CC1;
2055 default: return -1;
2056 }
2057 break;
2058
2059 /* Vector comparison modes. */
2060 /* CC2 will never be set. It however is part of the negated
2061 masks. */
2062 case CCVIALLmode:
2063 switch (GET_CODE (code))
2064 {
2065 case EQ:
2066 case GTU:
2067 case GT:
2068 case GE: return CC0;
2069 /* The inverted modes are in fact *any* modes. */
2070 case NE:
2071 case LEU:
2072 case LE:
2073 case LT: return CC3 | CC1 | CC2;
2074 default: return -1;
2075 }
2076
2077 case CCVIANYmode:
2078 switch (GET_CODE (code))
2079 {
2080 case EQ:
2081 case GTU:
2082 case GT:
2083 case GE: return CC0 | CC1;
2084 /* The inverted modes are in fact *all* modes. */
2085 case NE:
2086 case LEU:
2087 case LE:
2088 case LT: return CC3 | CC2;
2089 default: return -1;
2090 }
2091 case CCVFALLmode:
2092 switch (GET_CODE (code))
2093 {
2094 case EQ:
2095 case GT:
2096 case GE: return CC0;
2097 /* The inverted modes are in fact *any* modes. */
2098 case NE:
2099 case UNLE:
2100 case UNLT: return CC3 | CC1 | CC2;
2101 default: return -1;
2102 }
2103
2104 case CCVFANYmode:
2105 switch (GET_CODE (code))
2106 {
2107 case EQ:
2108 case GT:
2109 case GE: return CC0 | CC1;
2110 /* The inverted modes are in fact *all* modes. */
2111 case NE:
2112 case UNLE:
2113 case UNLT: return CC3 | CC2;
2114 default: return -1;
2115 }
2116
2117 case CCRAWmode:
2118 switch (GET_CODE (code))
2119 {
2120 case EQ:
2121 return INTVAL (XEXP (code, 1));
2122 case NE:
2123 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2124 default:
2125 gcc_unreachable ();
2126 }
2127
2128 default:
2129 return -1;
2130 }
2131 }
2132
2133
2134 /* Return branch condition mask to implement a compare and branch
2135 specified by CODE. Return -1 for invalid comparisons. */
2136
2137 int
2138 s390_compare_and_branch_condition_mask (rtx code)
2139 {
2140 const int CC0 = 1 << 3;
2141 const int CC1 = 1 << 2;
2142 const int CC2 = 1 << 1;
2143
2144 switch (GET_CODE (code))
2145 {
2146 case EQ:
2147 return CC0;
2148 case NE:
2149 return CC1 | CC2;
2150 case LT:
2151 case LTU:
2152 return CC1;
2153 case GT:
2154 case GTU:
2155 return CC2;
2156 case LE:
2157 case LEU:
2158 return CC0 | CC1;
2159 case GE:
2160 case GEU:
2161 return CC0 | CC2;
2162 default:
2163 gcc_unreachable ();
2164 }
2165 return -1;
2166 }
2167
2168 /* If INV is false, return assembler mnemonic string to implement
2169 a branch specified by CODE. If INV is true, return mnemonic
2170 for the corresponding inverted branch. */
2171
2172 static const char *
2173 s390_branch_condition_mnemonic (rtx code, int inv)
2174 {
2175 int mask;
2176
2177 static const char *const mnemonic[16] =
2178 {
2179 NULL, "o", "h", "nle",
2180 "l", "nhe", "lh", "ne",
2181 "e", "nlh", "he", "nl",
2182 "le", "nh", "no", NULL
2183 };
2184
2185 if (GET_CODE (XEXP (code, 0)) == REG
2186 && REGNO (XEXP (code, 0)) == CC_REGNUM
2187 && (XEXP (code, 1) == const0_rtx
2188 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2189 && CONST_INT_P (XEXP (code, 1)))))
2190 mask = s390_branch_condition_mask (code);
2191 else
2192 mask = s390_compare_and_branch_condition_mask (code);
2193
2194 gcc_assert (mask >= 0);
2195
2196 if (inv)
2197 mask ^= 15;
2198
2199 gcc_assert (mask >= 1 && mask <= 14);
2200
2201 return mnemonic[mask];
2202 }
2203
2204 /* Return the part of op which has a value different from def.
2205 The size of the part is determined by mode.
2206 Use this function only if you already know that op really
2207 contains such a part. */
2208
2209 unsigned HOST_WIDE_INT
2210 s390_extract_part (rtx op, machine_mode mode, int def)
2211 {
2212 unsigned HOST_WIDE_INT value = 0;
2213 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2214 int part_bits = GET_MODE_BITSIZE (mode);
2215 unsigned HOST_WIDE_INT part_mask
2216 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
2217 int i;
2218
2219 for (i = 0; i < max_parts; i++)
2220 {
2221 if (i == 0)
2222 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2223 else
2224 value >>= part_bits;
2225
2226 if ((value & part_mask) != (def & part_mask))
2227 return value & part_mask;
2228 }
2229
2230 gcc_unreachable ();
2231 }
2232
2233 /* If OP is an integer constant of mode MODE with exactly one
2234 part of mode PART_MODE unequal to DEF, return the number of that
2235 part. Otherwise, return -1. */
2236
2237 int
2238 s390_single_part (rtx op,
2239 machine_mode mode,
2240 machine_mode part_mode,
2241 int def)
2242 {
2243 unsigned HOST_WIDE_INT value = 0;
2244 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2245 unsigned HOST_WIDE_INT part_mask
2246 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
2247 int i, part = -1;
2248
2249 if (GET_CODE (op) != CONST_INT)
2250 return -1;
2251
2252 for (i = 0; i < n_parts; i++)
2253 {
2254 if (i == 0)
2255 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2256 else
2257 value >>= GET_MODE_BITSIZE (part_mode);
2258
2259 if ((value & part_mask) != (def & part_mask))
2260 {
2261 if (part != -1)
2262 return -1;
2263 else
2264 part = i;
2265 }
2266 }
2267 return part == -1 ? -1 : n_parts - 1 - part;
2268 }
2269
2270 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2271 bits and no other bits are set in (the lower SIZE bits of) IN.
2272
2273 PSTART and PEND can be used to obtain the start and end
2274 position (inclusive) of the bitfield relative to 64
2275 bits. *PSTART / *PEND gives the position of the first/last bit
2276 of the bitfield counting from the highest order bit starting
2277 with zero. */
2278
2279 bool
2280 s390_contiguous_bitmask_nowrap_p (unsigned HOST_WIDE_INT in, int size,
2281 int *pstart, int *pend)
2282 {
2283 int start;
2284 int end = -1;
2285 int lowbit = sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - 1;
2286 int highbit = sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - size;
2287 unsigned HOST_WIDE_INT bitmask = 1ULL;
2288
2289 gcc_assert (!!pstart == !!pend);
2290 for (start = lowbit; start >= highbit; bitmask <<= 1, start--)
2291 if (end == -1)
2292 {
2293 /* Look for the rightmost bit of a contiguous range of ones. */
2294 if (bitmask & in)
2295 /* Found it. */
2296 end = start;
2297 }
2298 else
2299 {
2300 /* Look for the firt zero bit after the range of ones. */
2301 if (! (bitmask & in))
2302 /* Found it. */
2303 break;
2304 }
2305 /* We're one past the last one-bit. */
2306 start++;
2307
2308 if (end == -1)
2309 /* No one bits found. */
2310 return false;
2311
2312 if (start > highbit)
2313 {
2314 unsigned HOST_WIDE_INT mask;
2315
2316 /* Calculate a mask for all bits beyond the contiguous bits. */
2317 mask = ((~(0ULL) >> highbit) & (~(0ULL) << (lowbit - start + 1)));
2318 if (mask & in)
2319 /* There are more bits set beyond the first range of one bits. */
2320 return false;
2321 }
2322
2323 if (pstart)
2324 {
2325 *pstart = start;
2326 *pend = end;
2327 }
2328
2329 return true;
2330 }
2331
2332 /* Same as s390_contiguous_bitmask_nowrap_p but also returns true
2333 if ~IN contains a contiguous bitfield. In that case, *END is <
2334 *START.
2335
2336 If WRAP_P is true, a bitmask that wraps around is also tested.
2337 When a wraparoud occurs *START is greater than *END (in
2338 non-null pointers), and the uppermost (64 - SIZE) bits are thus
2339 part of the range. If WRAP_P is false, no wraparound is
2340 tested. */
2341
2342 bool
2343 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, bool wrap_p,
2344 int size, int *start, int *end)
2345 {
2346 int bs = sizeof (HOST_WIDE_INT) * BITS_PER_UNIT;
2347 bool b;
2348
2349 gcc_assert (!!start == !!end);
2350 if ((in & ((~(0ULL)) >> (bs - size))) == 0)
2351 /* This cannot be expressed as a contiguous bitmask. Exit early because
2352 the second call of s390_contiguous_bitmask_nowrap_p would accept this as
2353 a valid bitmask. */
2354 return false;
2355 b = s390_contiguous_bitmask_nowrap_p (in, size, start, end);
2356 if (b)
2357 return true;
2358 if (! wrap_p)
2359 return false;
2360 b = s390_contiguous_bitmask_nowrap_p (~in, size, start, end);
2361 if (b && start)
2362 {
2363 int s = *start;
2364 int e = *end;
2365
2366 gcc_assert (s >= 1);
2367 *start = ((e + 1) & (bs - 1));
2368 *end = ((s - 1 + bs) & (bs - 1));
2369 }
2370
2371 return b;
2372 }
2373
2374 /* Return true if OP contains the same contiguous bitfield in *all*
2375 its elements. START and END can be used to obtain the start and
2376 end position of the bitfield.
2377
2378 START/STOP give the position of the first/last bit of the bitfield
2379 counting from the lowest order bit starting with zero. In order to
2380 use these values for S/390 instructions this has to be converted to
2381 "bits big endian" style. */
2382
2383 bool
2384 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2385 {
2386 unsigned HOST_WIDE_INT mask;
2387 int size;
2388 rtx elt;
2389 bool b;
2390
2391 gcc_assert (!!start == !!end);
2392 if (!const_vec_duplicate_p (op, &elt)
2393 || !CONST_INT_P (elt))
2394 return false;
2395
2396 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2397
2398 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2399 if (size > 64)
2400 return false;
2401
2402 mask = UINTVAL (elt);
2403
2404 b = s390_contiguous_bitmask_p (mask, true, size, start, end);
2405 if (b)
2406 {
2407 if (start)
2408 {
2409 int bs = sizeof (HOST_WIDE_INT) * BITS_PER_UNIT;
2410
2411 *start -= (bs - size);
2412 *end -= (bs - size);
2413 }
2414 return true;
2415 }
2416 else
2417 return false;
2418 }
2419
2420 /* Return true if C consists only of byte chunks being either 0 or
2421 0xff. If MASK is !=NULL a byte mask is generated which is
2422 appropriate for the vector generate byte mask instruction. */
2423
2424 bool
2425 s390_bytemask_vector_p (rtx op, unsigned *mask)
2426 {
2427 int i;
2428 unsigned tmp_mask = 0;
2429 int nunit, unit_size;
2430
2431 if (!VECTOR_MODE_P (GET_MODE (op))
2432 || GET_CODE (op) != CONST_VECTOR
2433 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2434 return false;
2435
2436 nunit = GET_MODE_NUNITS (GET_MODE (op));
2437 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2438
2439 for (i = 0; i < nunit; i++)
2440 {
2441 unsigned HOST_WIDE_INT c;
2442 int j;
2443
2444 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2445 return false;
2446
2447 c = UINTVAL (XVECEXP (op, 0, i));
2448 for (j = 0; j < unit_size; j++)
2449 {
2450 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2451 return false;
2452 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2453 c = c >> BITS_PER_UNIT;
2454 }
2455 }
2456
2457 if (mask != NULL)
2458 *mask = tmp_mask;
2459
2460 return true;
2461 }
2462
2463 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2464 equivalent to a shift followed by the AND. In particular, CONTIG
2465 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2466 for ROTL indicate a rotate to the right. */
2467
2468 bool
2469 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2470 {
2471 int start, end;
2472 bool ok;
2473
2474 ok = s390_contiguous_bitmask_nowrap_p (contig, bitsize, &start, &end);
2475 gcc_assert (ok);
2476
2477 if (rotl >= 0)
2478 return (64 - end >= rotl);
2479 else
2480 {
2481 /* Translate "- rotate right" in BITSIZE mode to "rotate left" in
2482 DIMode. */
2483 rotl = -rotl + (64 - bitsize);
2484 return (start >= rotl);
2485 }
2486 }
2487
2488 /* Check whether we can (and want to) split a double-word
2489 move in mode MODE from SRC to DST into two single-word
2490 moves, moving the subword FIRST_SUBWORD first. */
2491
2492 bool
2493 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2494 {
2495 /* Floating point and vector registers cannot be split. */
2496 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2497 return false;
2498
2499 /* We don't need to split if operands are directly accessible. */
2500 if (s_operand (src, mode) || s_operand (dst, mode))
2501 return false;
2502
2503 /* Non-offsettable memory references cannot be split. */
2504 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2505 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2506 return false;
2507
2508 /* Moving the first subword must not clobber a register
2509 needed to move the second subword. */
2510 if (register_operand (dst, mode))
2511 {
2512 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2513 if (reg_overlap_mentioned_p (subreg, src))
2514 return false;
2515 }
2516
2517 return true;
2518 }
2519
2520 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2521 and [MEM2, MEM2 + SIZE] do overlap and false
2522 otherwise. */
2523
2524 bool
2525 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2526 {
2527 rtx addr1, addr2, addr_delta;
2528 HOST_WIDE_INT delta;
2529
2530 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2531 return true;
2532
2533 if (size == 0)
2534 return false;
2535
2536 addr1 = XEXP (mem1, 0);
2537 addr2 = XEXP (mem2, 0);
2538
2539 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2540
2541 /* This overlapping check is used by peepholes merging memory block operations.
2542 Overlapping operations would otherwise be recognized by the S/390 hardware
2543 and would fall back to a slower implementation. Allowing overlapping
2544 operations would lead to slow code but not to wrong code. Therefore we are
2545 somewhat optimistic if we cannot prove that the memory blocks are
2546 overlapping.
2547 That's why we return false here although this may accept operations on
2548 overlapping memory areas. */
2549 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2550 return false;
2551
2552 delta = INTVAL (addr_delta);
2553
2554 if (delta == 0
2555 || (delta > 0 && delta < size)
2556 || (delta < 0 && -delta < size))
2557 return true;
2558
2559 return false;
2560 }
2561
2562 /* Check whether the address of memory reference MEM2 equals exactly
2563 the address of memory reference MEM1 plus DELTA. Return true if
2564 we can prove this to be the case, false otherwise. */
2565
2566 bool
2567 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2568 {
2569 rtx addr1, addr2, addr_delta;
2570
2571 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2572 return false;
2573
2574 addr1 = XEXP (mem1, 0);
2575 addr2 = XEXP (mem2, 0);
2576
2577 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2578 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2579 return false;
2580
2581 return true;
2582 }
2583
2584 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2585
2586 void
2587 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2588 rtx *operands)
2589 {
2590 machine_mode wmode = mode;
2591 rtx dst = operands[0];
2592 rtx src1 = operands[1];
2593 rtx src2 = operands[2];
2594 rtx op, clob, tem;
2595
2596 /* If we cannot handle the operation directly, use a temp register. */
2597 if (!s390_logical_operator_ok_p (operands))
2598 dst = gen_reg_rtx (mode);
2599
2600 /* QImode and HImode patterns make sense only if we have a destination
2601 in memory. Otherwise perform the operation in SImode. */
2602 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2603 wmode = SImode;
2604
2605 /* Widen operands if required. */
2606 if (mode != wmode)
2607 {
2608 if (GET_CODE (dst) == SUBREG
2609 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2610 dst = tem;
2611 else if (REG_P (dst))
2612 dst = gen_rtx_SUBREG (wmode, dst, 0);
2613 else
2614 dst = gen_reg_rtx (wmode);
2615
2616 if (GET_CODE (src1) == SUBREG
2617 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2618 src1 = tem;
2619 else if (GET_MODE (src1) != VOIDmode)
2620 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2621
2622 if (GET_CODE (src2) == SUBREG
2623 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2624 src2 = tem;
2625 else if (GET_MODE (src2) != VOIDmode)
2626 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2627 }
2628
2629 /* Emit the instruction. */
2630 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2631 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2632 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2633
2634 /* Fix up the destination if needed. */
2635 if (dst != operands[0])
2636 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2637 }
2638
2639 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2640
2641 bool
2642 s390_logical_operator_ok_p (rtx *operands)
2643 {
2644 /* If the destination operand is in memory, it needs to coincide
2645 with one of the source operands. After reload, it has to be
2646 the first source operand. */
2647 if (GET_CODE (operands[0]) == MEM)
2648 return rtx_equal_p (operands[0], operands[1])
2649 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2650
2651 return true;
2652 }
2653
2654 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2655 operand IMMOP to switch from SS to SI type instructions. */
2656
2657 void
2658 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2659 {
2660 int def = code == AND ? -1 : 0;
2661 HOST_WIDE_INT mask;
2662 int part;
2663
2664 gcc_assert (GET_CODE (*memop) == MEM);
2665 gcc_assert (!MEM_VOLATILE_P (*memop));
2666
2667 mask = s390_extract_part (*immop, QImode, def);
2668 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2669 gcc_assert (part >= 0);
2670
2671 *memop = adjust_address (*memop, QImode, part);
2672 *immop = gen_int_mode (mask, QImode);
2673 }
2674
2675
2676 /* How to allocate a 'struct machine_function'. */
2677
2678 static struct machine_function *
2679 s390_init_machine_status (void)
2680 {
2681 return ggc_cleared_alloc<machine_function> ();
2682 }
2683
2684 /* Map for smallest class containing reg regno. */
2685
2686 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2687 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2688 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2689 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2690 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2691 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2692 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2693 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2694 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2695 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2696 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2697 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2698 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2699 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2700 VEC_REGS, VEC_REGS /* 52 */
2701 };
2702
2703 /* Return attribute type of insn. */
2704
2705 static enum attr_type
2706 s390_safe_attr_type (rtx_insn *insn)
2707 {
2708 if (recog_memoized (insn) >= 0)
2709 return get_attr_type (insn);
2710 else
2711 return TYPE_NONE;
2712 }
2713
2714 /* Return true if DISP is a valid short displacement. */
2715
2716 static bool
2717 s390_short_displacement (rtx disp)
2718 {
2719 /* No displacement is OK. */
2720 if (!disp)
2721 return true;
2722
2723 /* Without the long displacement facility we don't need to
2724 distingiush between long and short displacement. */
2725 if (!TARGET_LONG_DISPLACEMENT)
2726 return true;
2727
2728 /* Integer displacement in range. */
2729 if (GET_CODE (disp) == CONST_INT)
2730 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2731
2732 /* GOT offset is not OK, the GOT can be large. */
2733 if (GET_CODE (disp) == CONST
2734 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2735 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2736 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2737 return false;
2738
2739 /* All other symbolic constants are literal pool references,
2740 which are OK as the literal pool must be small. */
2741 if (GET_CODE (disp) == CONST)
2742 return true;
2743
2744 return false;
2745 }
2746
2747 /* Decompose a RTL expression ADDR for a memory address into
2748 its components, returned in OUT.
2749
2750 Returns false if ADDR is not a valid memory address, true
2751 otherwise. If OUT is NULL, don't return the components,
2752 but check for validity only.
2753
2754 Note: Only addresses in canonical form are recognized.
2755 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2756 canonical form so that they will be recognized. */
2757
2758 static int
2759 s390_decompose_address (rtx addr, struct s390_address *out)
2760 {
2761 HOST_WIDE_INT offset = 0;
2762 rtx base = NULL_RTX;
2763 rtx indx = NULL_RTX;
2764 rtx disp = NULL_RTX;
2765 rtx orig_disp;
2766 bool pointer = false;
2767 bool base_ptr = false;
2768 bool indx_ptr = false;
2769 bool literal_pool = false;
2770
2771 /* We may need to substitute the literal pool base register into the address
2772 below. However, at this point we do not know which register is going to
2773 be used as base, so we substitute the arg pointer register. This is going
2774 to be treated as holding a pointer below -- it shouldn't be used for any
2775 other purpose. */
2776 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2777
2778 /* Decompose address into base + index + displacement. */
2779
2780 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2781 base = addr;
2782
2783 else if (GET_CODE (addr) == PLUS)
2784 {
2785 rtx op0 = XEXP (addr, 0);
2786 rtx op1 = XEXP (addr, 1);
2787 enum rtx_code code0 = GET_CODE (op0);
2788 enum rtx_code code1 = GET_CODE (op1);
2789
2790 if (code0 == REG || code0 == UNSPEC)
2791 {
2792 if (code1 == REG || code1 == UNSPEC)
2793 {
2794 indx = op0; /* index + base */
2795 base = op1;
2796 }
2797
2798 else
2799 {
2800 base = op0; /* base + displacement */
2801 disp = op1;
2802 }
2803 }
2804
2805 else if (code0 == PLUS)
2806 {
2807 indx = XEXP (op0, 0); /* index + base + disp */
2808 base = XEXP (op0, 1);
2809 disp = op1;
2810 }
2811
2812 else
2813 {
2814 return false;
2815 }
2816 }
2817
2818 else
2819 disp = addr; /* displacement */
2820
2821 /* Extract integer part of displacement. */
2822 orig_disp = disp;
2823 if (disp)
2824 {
2825 if (GET_CODE (disp) == CONST_INT)
2826 {
2827 offset = INTVAL (disp);
2828 disp = NULL_RTX;
2829 }
2830 else if (GET_CODE (disp) == CONST
2831 && GET_CODE (XEXP (disp, 0)) == PLUS
2832 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2833 {
2834 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2835 disp = XEXP (XEXP (disp, 0), 0);
2836 }
2837 }
2838
2839 /* Strip off CONST here to avoid special case tests later. */
2840 if (disp && GET_CODE (disp) == CONST)
2841 disp = XEXP (disp, 0);
2842
2843 /* We can convert literal pool addresses to
2844 displacements by basing them off the base register. */
2845 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2846 {
2847 /* Either base or index must be free to hold the base register. */
2848 if (!base)
2849 base = fake_pool_base, literal_pool = true;
2850 else if (!indx)
2851 indx = fake_pool_base, literal_pool = true;
2852 else
2853 return false;
2854
2855 /* Mark up the displacement. */
2856 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2857 UNSPEC_LTREL_OFFSET);
2858 }
2859
2860 /* Validate base register. */
2861 if (base)
2862 {
2863 if (GET_CODE (base) == UNSPEC)
2864 switch (XINT (base, 1))
2865 {
2866 case UNSPEC_LTREF:
2867 if (!disp)
2868 disp = gen_rtx_UNSPEC (Pmode,
2869 gen_rtvec (1, XVECEXP (base, 0, 0)),
2870 UNSPEC_LTREL_OFFSET);
2871 else
2872 return false;
2873
2874 base = XVECEXP (base, 0, 1);
2875 break;
2876
2877 case UNSPEC_LTREL_BASE:
2878 if (XVECLEN (base, 0) == 1)
2879 base = fake_pool_base, literal_pool = true;
2880 else
2881 base = XVECEXP (base, 0, 1);
2882 break;
2883
2884 default:
2885 return false;
2886 }
2887
2888 if (!REG_P (base) || GET_MODE (base) != Pmode)
2889 return false;
2890
2891 if (REGNO (base) == STACK_POINTER_REGNUM
2892 || REGNO (base) == FRAME_POINTER_REGNUM
2893 || ((reload_completed || reload_in_progress)
2894 && frame_pointer_needed
2895 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2896 || REGNO (base) == ARG_POINTER_REGNUM
2897 || (flag_pic
2898 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2899 pointer = base_ptr = true;
2900
2901 if ((reload_completed || reload_in_progress)
2902 && base == cfun->machine->base_reg)
2903 pointer = base_ptr = literal_pool = true;
2904 }
2905
2906 /* Validate index register. */
2907 if (indx)
2908 {
2909 if (GET_CODE (indx) == UNSPEC)
2910 switch (XINT (indx, 1))
2911 {
2912 case UNSPEC_LTREF:
2913 if (!disp)
2914 disp = gen_rtx_UNSPEC (Pmode,
2915 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2916 UNSPEC_LTREL_OFFSET);
2917 else
2918 return false;
2919
2920 indx = XVECEXP (indx, 0, 1);
2921 break;
2922
2923 case UNSPEC_LTREL_BASE:
2924 if (XVECLEN (indx, 0) == 1)
2925 indx = fake_pool_base, literal_pool = true;
2926 else
2927 indx = XVECEXP (indx, 0, 1);
2928 break;
2929
2930 default:
2931 return false;
2932 }
2933
2934 if (!REG_P (indx) || GET_MODE (indx) != Pmode)
2935 return false;
2936
2937 if (REGNO (indx) == STACK_POINTER_REGNUM
2938 || REGNO (indx) == FRAME_POINTER_REGNUM
2939 || ((reload_completed || reload_in_progress)
2940 && frame_pointer_needed
2941 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2942 || REGNO (indx) == ARG_POINTER_REGNUM
2943 || (flag_pic
2944 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2945 pointer = indx_ptr = true;
2946
2947 if ((reload_completed || reload_in_progress)
2948 && indx == cfun->machine->base_reg)
2949 pointer = indx_ptr = literal_pool = true;
2950 }
2951
2952 /* Prefer to use pointer as base, not index. */
2953 if (base && indx && !base_ptr
2954 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2955 {
2956 rtx tmp = base;
2957 base = indx;
2958 indx = tmp;
2959 }
2960
2961 /* Validate displacement. */
2962 if (!disp)
2963 {
2964 /* If virtual registers are involved, the displacement will change later
2965 anyway as the virtual registers get eliminated. This could make a
2966 valid displacement invalid, but it is more likely to make an invalid
2967 displacement valid, because we sometimes access the register save area
2968 via negative offsets to one of those registers.
2969 Thus we don't check the displacement for validity here. If after
2970 elimination the displacement turns out to be invalid after all,
2971 this is fixed up by reload in any case. */
2972 /* LRA maintains always displacements up to date and we need to
2973 know the displacement is right during all LRA not only at the
2974 final elimination. */
2975 if (lra_in_progress
2976 || (base != arg_pointer_rtx
2977 && indx != arg_pointer_rtx
2978 && base != return_address_pointer_rtx
2979 && indx != return_address_pointer_rtx
2980 && base != frame_pointer_rtx
2981 && indx != frame_pointer_rtx
2982 && base != virtual_stack_vars_rtx
2983 && indx != virtual_stack_vars_rtx))
2984 if (!DISP_IN_RANGE (offset))
2985 return false;
2986 }
2987 else
2988 {
2989 /* All the special cases are pointers. */
2990 pointer = true;
2991
2992 /* In the small-PIC case, the linker converts @GOT
2993 and @GOTNTPOFF offsets to possible displacements. */
2994 if (GET_CODE (disp) == UNSPEC
2995 && (XINT (disp, 1) == UNSPEC_GOT
2996 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2997 && flag_pic == 1)
2998 {
2999 ;
3000 }
3001
3002 /* Accept pool label offsets. */
3003 else if (GET_CODE (disp) == UNSPEC
3004 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
3005 ;
3006
3007 /* Accept literal pool references. */
3008 else if (GET_CODE (disp) == UNSPEC
3009 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
3010 {
3011 /* In case CSE pulled a non literal pool reference out of
3012 the pool we have to reject the address. This is
3013 especially important when loading the GOT pointer on non
3014 zarch CPUs. In this case the literal pool contains an lt
3015 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
3016 will most likely exceed the displacement. */
3017 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
3018 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
3019 return false;
3020
3021 orig_disp = gen_rtx_CONST (Pmode, disp);
3022 if (offset)
3023 {
3024 /* If we have an offset, make sure it does not
3025 exceed the size of the constant pool entry. */
3026 rtx sym = XVECEXP (disp, 0, 0);
3027 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
3028 return false;
3029
3030 orig_disp = plus_constant (Pmode, orig_disp, offset);
3031 }
3032 }
3033
3034 else
3035 return false;
3036 }
3037
3038 if (!base && !indx)
3039 pointer = true;
3040
3041 if (out)
3042 {
3043 out->base = base;
3044 out->indx = indx;
3045 out->disp = orig_disp;
3046 out->pointer = pointer;
3047 out->literal_pool = literal_pool;
3048 }
3049
3050 return true;
3051 }
3052
3053 /* Decompose a RTL expression OP for an address style operand into its
3054 components, and return the base register in BASE and the offset in
3055 OFFSET. While OP looks like an address it is never supposed to be
3056 used as such.
3057
3058 Return true if OP is a valid address operand, false if not. */
3059
3060 bool
3061 s390_decompose_addrstyle_without_index (rtx op, rtx *base,
3062 HOST_WIDE_INT *offset)
3063 {
3064 rtx off = NULL_RTX;
3065
3066 /* We can have an integer constant, an address register,
3067 or a sum of the two. */
3068 if (CONST_SCALAR_INT_P (op))
3069 {
3070 off = op;
3071 op = NULL_RTX;
3072 }
3073 if (op && GET_CODE (op) == PLUS && CONST_SCALAR_INT_P (XEXP (op, 1)))
3074 {
3075 off = XEXP (op, 1);
3076 op = XEXP (op, 0);
3077 }
3078 while (op && GET_CODE (op) == SUBREG)
3079 op = SUBREG_REG (op);
3080
3081 if (op && GET_CODE (op) != REG)
3082 return false;
3083
3084 if (offset)
3085 {
3086 if (off == NULL_RTX)
3087 *offset = 0;
3088 else if (CONST_INT_P (off))
3089 *offset = INTVAL (off);
3090 else if (CONST_WIDE_INT_P (off))
3091 /* The offset will anyway be cut down to 12 bits so take just
3092 the lowest order chunk of the wide int. */
3093 *offset = CONST_WIDE_INT_ELT (off, 0);
3094 else
3095 gcc_unreachable ();
3096 }
3097 if (base)
3098 *base = op;
3099
3100 return true;
3101 }
3102
3103
3104 /* Return true if CODE is a valid address without index. */
3105
3106 bool
3107 s390_legitimate_address_without_index_p (rtx op)
3108 {
3109 struct s390_address addr;
3110
3111 if (!s390_decompose_address (XEXP (op, 0), &addr))
3112 return false;
3113 if (addr.indx)
3114 return false;
3115
3116 return true;
3117 }
3118
3119
3120 /* Return TRUE if ADDR is an operand valid for a load/store relative
3121 instruction. Be aware that the alignment of the operand needs to
3122 be checked separately.
3123 Valid addresses are single references or a sum of a reference and a
3124 constant integer. Return these parts in SYMREF and ADDEND. You can
3125 pass NULL in REF and/or ADDEND if you are not interested in these
3126 values. Literal pool references are *not* considered symbol
3127 references. */
3128
3129 static bool
3130 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3131 {
3132 HOST_WIDE_INT tmpaddend = 0;
3133
3134 if (GET_CODE (addr) == CONST)
3135 addr = XEXP (addr, 0);
3136
3137 if (GET_CODE (addr) == PLUS)
3138 {
3139 if (!CONST_INT_P (XEXP (addr, 1)))
3140 return false;
3141
3142 tmpaddend = INTVAL (XEXP (addr, 1));
3143 addr = XEXP (addr, 0);
3144 }
3145
3146 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3147 || (GET_CODE (addr) == UNSPEC
3148 && (XINT (addr, 1) == UNSPEC_GOTENT
3149 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3150 {
3151 if (symref)
3152 *symref = addr;
3153 if (addend)
3154 *addend = tmpaddend;
3155
3156 return true;
3157 }
3158 return false;
3159 }
3160
3161 /* Return true if the address in OP is valid for constraint letter C
3162 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3163 pool MEMs should be accepted. Only the Q, R, S, T constraint
3164 letters are allowed for C. */
3165
3166 static int
3167 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3168 {
3169 struct s390_address addr;
3170 bool decomposed = false;
3171
3172 /* This check makes sure that no symbolic address (except literal
3173 pool references) are accepted by the R or T constraints. */
3174 if (s390_loadrelative_operand_p (op, NULL, NULL))
3175 return 0;
3176
3177 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3178 if (!lit_pool_ok)
3179 {
3180 if (!s390_decompose_address (op, &addr))
3181 return 0;
3182 if (addr.literal_pool)
3183 return 0;
3184 decomposed = true;
3185 }
3186
3187 /* With reload, we sometimes get intermediate address forms that are
3188 actually invalid as-is, but we need to accept them in the most
3189 generic cases below ('R' or 'T'), since reload will in fact fix
3190 them up. LRA behaves differently here; we never see such forms,
3191 but on the other hand, we need to strictly reject every invalid
3192 address form. Perform this check right up front. */
3193 if (lra_in_progress)
3194 {
3195 if (!decomposed && !s390_decompose_address (op, &addr))
3196 return 0;
3197 decomposed = true;
3198 }
3199
3200 switch (c)
3201 {
3202 case 'Q': /* no index short displacement */
3203 if (!decomposed && !s390_decompose_address (op, &addr))
3204 return 0;
3205 if (addr.indx)
3206 return 0;
3207 if (!s390_short_displacement (addr.disp))
3208 return 0;
3209 break;
3210
3211 case 'R': /* with index short displacement */
3212 if (TARGET_LONG_DISPLACEMENT)
3213 {
3214 if (!decomposed && !s390_decompose_address (op, &addr))
3215 return 0;
3216 if (!s390_short_displacement (addr.disp))
3217 return 0;
3218 }
3219 /* Any invalid address here will be fixed up by reload,
3220 so accept it for the most generic constraint. */
3221 break;
3222
3223 case 'S': /* no index long displacement */
3224 if (!decomposed && !s390_decompose_address (op, &addr))
3225 return 0;
3226 if (addr.indx)
3227 return 0;
3228 break;
3229
3230 case 'T': /* with index long displacement */
3231 /* Any invalid address here will be fixed up by reload,
3232 so accept it for the most generic constraint. */
3233 break;
3234
3235 default:
3236 return 0;
3237 }
3238 return 1;
3239 }
3240
3241
3242 /* Evaluates constraint strings described by the regular expression
3243 ([A|B|Z](Q|R|S|T))|Y and returns 1 if OP is a valid operand for
3244 the constraint given in STR, or 0 else. */
3245
3246 int
3247 s390_mem_constraint (const char *str, rtx op)
3248 {
3249 char c = str[0];
3250
3251 switch (c)
3252 {
3253 case 'A':
3254 /* Check for offsettable variants of memory constraints. */
3255 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3256 return 0;
3257 if ((reload_completed || reload_in_progress)
3258 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3259 return 0;
3260 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3261 case 'B':
3262 /* Check for non-literal-pool variants of memory constraints. */
3263 if (!MEM_P (op))
3264 return 0;
3265 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3266 case 'Q':
3267 case 'R':
3268 case 'S':
3269 case 'T':
3270 if (GET_CODE (op) != MEM)
3271 return 0;
3272 return s390_check_qrst_address (c, XEXP (op, 0), true);
3273 case 'Y':
3274 /* Simply check for the basic form of a shift count. Reload will
3275 take care of making sure we have a proper base register. */
3276 if (!s390_decompose_addrstyle_without_index (op, NULL, NULL))
3277 return 0;
3278 break;
3279 case 'Z':
3280 return s390_check_qrst_address (str[1], op, true);
3281 default:
3282 return 0;
3283 }
3284 return 1;
3285 }
3286
3287
3288 /* Evaluates constraint strings starting with letter O. Input
3289 parameter C is the second letter following the "O" in the constraint
3290 string. Returns 1 if VALUE meets the respective constraint and 0
3291 otherwise. */
3292
3293 int
3294 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3295 {
3296 if (!TARGET_EXTIMM)
3297 return 0;
3298
3299 switch (c)
3300 {
3301 case 's':
3302 return trunc_int_for_mode (value, SImode) == value;
3303
3304 case 'p':
3305 return value == 0
3306 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3307
3308 case 'n':
3309 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3310
3311 default:
3312 gcc_unreachable ();
3313 }
3314 }
3315
3316
3317 /* Evaluates constraint strings starting with letter N. Parameter STR
3318 contains the letters following letter "N" in the constraint string.
3319 Returns true if VALUE matches the constraint. */
3320
3321 int
3322 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3323 {
3324 machine_mode mode, part_mode;
3325 int def;
3326 int part, part_goal;
3327
3328
3329 if (str[0] == 'x')
3330 part_goal = -1;
3331 else
3332 part_goal = str[0] - '0';
3333
3334 switch (str[1])
3335 {
3336 case 'Q':
3337 part_mode = QImode;
3338 break;
3339 case 'H':
3340 part_mode = HImode;
3341 break;
3342 case 'S':
3343 part_mode = SImode;
3344 break;
3345 default:
3346 return 0;
3347 }
3348
3349 switch (str[2])
3350 {
3351 case 'H':
3352 mode = HImode;
3353 break;
3354 case 'S':
3355 mode = SImode;
3356 break;
3357 case 'D':
3358 mode = DImode;
3359 break;
3360 default:
3361 return 0;
3362 }
3363
3364 switch (str[3])
3365 {
3366 case '0':
3367 def = 0;
3368 break;
3369 case 'F':
3370 def = -1;
3371 break;
3372 default:
3373 return 0;
3374 }
3375
3376 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3377 return 0;
3378
3379 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3380 if (part < 0)
3381 return 0;
3382 if (part_goal != -1 && part_goal != part)
3383 return 0;
3384
3385 return 1;
3386 }
3387
3388
3389 /* Returns true if the input parameter VALUE is a float zero. */
3390
3391 int
3392 s390_float_const_zero_p (rtx value)
3393 {
3394 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3395 && value == CONST0_RTX (GET_MODE (value)));
3396 }
3397
3398 /* Implement TARGET_REGISTER_MOVE_COST. */
3399
3400 static int
3401 s390_register_move_cost (machine_mode mode,
3402 reg_class_t from, reg_class_t to)
3403 {
3404 /* On s390, copy between fprs and gprs is expensive. */
3405
3406 /* It becomes somewhat faster having ldgr/lgdr. */
3407 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3408 {
3409 /* ldgr is single cycle. */
3410 if (reg_classes_intersect_p (from, GENERAL_REGS)
3411 && reg_classes_intersect_p (to, FP_REGS))
3412 return 1;
3413 /* lgdr needs 3 cycles. */
3414 if (reg_classes_intersect_p (to, GENERAL_REGS)
3415 && reg_classes_intersect_p (from, FP_REGS))
3416 return 3;
3417 }
3418
3419 /* Otherwise copying is done via memory. */
3420 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3421 && reg_classes_intersect_p (to, FP_REGS))
3422 || (reg_classes_intersect_p (from, FP_REGS)
3423 && reg_classes_intersect_p (to, GENERAL_REGS)))
3424 return 10;
3425
3426 return 1;
3427 }
3428
3429 /* Implement TARGET_MEMORY_MOVE_COST. */
3430
3431 static int
3432 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3433 reg_class_t rclass ATTRIBUTE_UNUSED,
3434 bool in ATTRIBUTE_UNUSED)
3435 {
3436 return 2;
3437 }
3438
3439 /* Compute a (partial) cost for rtx X. Return true if the complete
3440 cost has been computed, and false if subexpressions should be
3441 scanned. In either case, *TOTAL contains the cost result. The
3442 initial value of *TOTAL is the default value computed by
3443 rtx_cost. It may be left unmodified. OUTER_CODE contains the
3444 code of the superexpression of x. */
3445
3446 static bool
3447 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3448 int opno ATTRIBUTE_UNUSED,
3449 int *total, bool speed ATTRIBUTE_UNUSED)
3450 {
3451 int code = GET_CODE (x);
3452 switch (code)
3453 {
3454 case CONST:
3455 case CONST_INT:
3456 case LABEL_REF:
3457 case SYMBOL_REF:
3458 case CONST_DOUBLE:
3459 case CONST_WIDE_INT:
3460 case MEM:
3461 *total = 0;
3462 return true;
3463
3464 case IOR:
3465 /* risbg */
3466 if (GET_CODE (XEXP (x, 0)) == AND
3467 && GET_CODE (XEXP (x, 1)) == ASHIFT
3468 && REG_P (XEXP (XEXP (x, 0), 0))
3469 && REG_P (XEXP (XEXP (x, 1), 0))
3470 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3471 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3472 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3473 (1UL << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3474 {
3475 *total = COSTS_N_INSNS (2);
3476 return true;
3477 }
3478 /* fallthrough */
3479 case ASHIFT:
3480 case ASHIFTRT:
3481 case LSHIFTRT:
3482 case ROTATE:
3483 case ROTATERT:
3484 case AND:
3485 case XOR:
3486 case NEG:
3487 case NOT:
3488 *total = COSTS_N_INSNS (1);
3489 return false;
3490
3491 case PLUS:
3492 case MINUS:
3493 *total = COSTS_N_INSNS (1);
3494 return false;
3495
3496 case MULT:
3497 switch (mode)
3498 {
3499 case SImode:
3500 {
3501 rtx left = XEXP (x, 0);
3502 rtx right = XEXP (x, 1);
3503 if (GET_CODE (right) == CONST_INT
3504 && CONST_OK_FOR_K (INTVAL (right)))
3505 *total = s390_cost->mhi;
3506 else if (GET_CODE (left) == SIGN_EXTEND)
3507 *total = s390_cost->mh;
3508 else
3509 *total = s390_cost->ms; /* msr, ms, msy */
3510 break;
3511 }
3512 case DImode:
3513 {
3514 rtx left = XEXP (x, 0);
3515 rtx right = XEXP (x, 1);
3516 if (TARGET_ZARCH)
3517 {
3518 if (GET_CODE (right) == CONST_INT
3519 && CONST_OK_FOR_K (INTVAL (right)))
3520 *total = s390_cost->mghi;
3521 else if (GET_CODE (left) == SIGN_EXTEND)
3522 *total = s390_cost->msgf;
3523 else
3524 *total = s390_cost->msg; /* msgr, msg */
3525 }
3526 else /* TARGET_31BIT */
3527 {
3528 if (GET_CODE (left) == SIGN_EXTEND
3529 && GET_CODE (right) == SIGN_EXTEND)
3530 /* mulsidi case: mr, m */
3531 *total = s390_cost->m;
3532 else if (GET_CODE (left) == ZERO_EXTEND
3533 && GET_CODE (right) == ZERO_EXTEND
3534 && TARGET_CPU_ZARCH)
3535 /* umulsidi case: ml, mlr */
3536 *total = s390_cost->ml;
3537 else
3538 /* Complex calculation is required. */
3539 *total = COSTS_N_INSNS (40);
3540 }
3541 break;
3542 }
3543 case SFmode:
3544 case DFmode:
3545 *total = s390_cost->mult_df;
3546 break;
3547 case TFmode:
3548 *total = s390_cost->mxbr;
3549 break;
3550 default:
3551 return false;
3552 }
3553 return false;
3554
3555 case FMA:
3556 switch (mode)
3557 {
3558 case DFmode:
3559 *total = s390_cost->madbr;
3560 break;
3561 case SFmode:
3562 *total = s390_cost->maebr;
3563 break;
3564 default:
3565 return false;
3566 }
3567 /* Negate in the third argument is free: FMSUB. */
3568 if (GET_CODE (XEXP (x, 2)) == NEG)
3569 {
3570 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3571 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3572 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3573 return true;
3574 }
3575 return false;
3576
3577 case UDIV:
3578 case UMOD:
3579 if (mode == TImode) /* 128 bit division */
3580 *total = s390_cost->dlgr;
3581 else if (mode == DImode)
3582 {
3583 rtx right = XEXP (x, 1);
3584 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3585 *total = s390_cost->dlr;
3586 else /* 64 by 64 bit division */
3587 *total = s390_cost->dlgr;
3588 }
3589 else if (mode == SImode) /* 32 bit division */
3590 *total = s390_cost->dlr;
3591 return false;
3592
3593 case DIV:
3594 case MOD:
3595 if (mode == DImode)
3596 {
3597 rtx right = XEXP (x, 1);
3598 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3599 if (TARGET_ZARCH)
3600 *total = s390_cost->dsgfr;
3601 else
3602 *total = s390_cost->dr;
3603 else /* 64 by 64 bit division */
3604 *total = s390_cost->dsgr;
3605 }
3606 else if (mode == SImode) /* 32 bit division */
3607 *total = s390_cost->dlr;
3608 else if (mode == SFmode)
3609 {
3610 *total = s390_cost->debr;
3611 }
3612 else if (mode == DFmode)
3613 {
3614 *total = s390_cost->ddbr;
3615 }
3616 else if (mode == TFmode)
3617 {
3618 *total = s390_cost->dxbr;
3619 }
3620 return false;
3621
3622 case SQRT:
3623 if (mode == SFmode)
3624 *total = s390_cost->sqebr;
3625 else if (mode == DFmode)
3626 *total = s390_cost->sqdbr;
3627 else /* TFmode */
3628 *total = s390_cost->sqxbr;
3629 return false;
3630
3631 case SIGN_EXTEND:
3632 case ZERO_EXTEND:
3633 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3634 || outer_code == PLUS || outer_code == MINUS
3635 || outer_code == COMPARE)
3636 *total = 0;
3637 return false;
3638
3639 case COMPARE:
3640 *total = COSTS_N_INSNS (1);
3641 if (GET_CODE (XEXP (x, 0)) == AND
3642 && GET_CODE (XEXP (x, 1)) == CONST_INT
3643 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3644 {
3645 rtx op0 = XEXP (XEXP (x, 0), 0);
3646 rtx op1 = XEXP (XEXP (x, 0), 1);
3647 rtx op2 = XEXP (x, 1);
3648
3649 if (memory_operand (op0, GET_MODE (op0))
3650 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3651 return true;
3652 if (register_operand (op0, GET_MODE (op0))
3653 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3654 return true;
3655 }
3656 return false;
3657
3658 default:
3659 return false;
3660 }
3661 }
3662
3663 /* Return the cost of an address rtx ADDR. */
3664
3665 static int
3666 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3667 addr_space_t as ATTRIBUTE_UNUSED,
3668 bool speed ATTRIBUTE_UNUSED)
3669 {
3670 struct s390_address ad;
3671 if (!s390_decompose_address (addr, &ad))
3672 return 1000;
3673
3674 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3675 }
3676
3677 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3678 static int
3679 s390_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3680 tree vectype,
3681 int misalign ATTRIBUTE_UNUSED)
3682 {
3683 switch (type_of_cost)
3684 {
3685 case scalar_stmt:
3686 case scalar_load:
3687 case scalar_store:
3688 case vector_stmt:
3689 case vector_load:
3690 case vector_store:
3691 case vec_to_scalar:
3692 case scalar_to_vec:
3693 case cond_branch_not_taken:
3694 case vec_perm:
3695 case vec_promote_demote:
3696 case unaligned_load:
3697 case unaligned_store:
3698 return 1;
3699
3700 case cond_branch_taken:
3701 return 3;
3702
3703 case vec_construct:
3704 return TYPE_VECTOR_SUBPARTS (vectype) - 1;
3705
3706 default:
3707 gcc_unreachable ();
3708 }
3709 }
3710
3711 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3712 otherwise return 0. */
3713
3714 int
3715 tls_symbolic_operand (rtx op)
3716 {
3717 if (GET_CODE (op) != SYMBOL_REF)
3718 return 0;
3719 return SYMBOL_REF_TLS_MODEL (op);
3720 }
3721 \f
3722 /* Split DImode access register reference REG (on 64-bit) into its constituent
3723 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3724 gen_highpart cannot be used as they assume all registers are word-sized,
3725 while our access registers have only half that size. */
3726
3727 void
3728 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3729 {
3730 gcc_assert (TARGET_64BIT);
3731 gcc_assert (ACCESS_REG_P (reg));
3732 gcc_assert (GET_MODE (reg) == DImode);
3733 gcc_assert (!(REGNO (reg) & 1));
3734
3735 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3736 *hi = gen_rtx_REG (SImode, REGNO (reg));
3737 }
3738
3739 /* Return true if OP contains a symbol reference */
3740
3741 bool
3742 symbolic_reference_mentioned_p (rtx op)
3743 {
3744 const char *fmt;
3745 int i;
3746
3747 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3748 return 1;
3749
3750 fmt = GET_RTX_FORMAT (GET_CODE (op));
3751 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3752 {
3753 if (fmt[i] == 'E')
3754 {
3755 int j;
3756
3757 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3758 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3759 return 1;
3760 }
3761
3762 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3763 return 1;
3764 }
3765
3766 return 0;
3767 }
3768
3769 /* Return true if OP contains a reference to a thread-local symbol. */
3770
3771 bool
3772 tls_symbolic_reference_mentioned_p (rtx op)
3773 {
3774 const char *fmt;
3775 int i;
3776
3777 if (GET_CODE (op) == SYMBOL_REF)
3778 return tls_symbolic_operand (op);
3779
3780 fmt = GET_RTX_FORMAT (GET_CODE (op));
3781 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3782 {
3783 if (fmt[i] == 'E')
3784 {
3785 int j;
3786
3787 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3788 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3789 return true;
3790 }
3791
3792 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3793 return true;
3794 }
3795
3796 return false;
3797 }
3798
3799
3800 /* Return true if OP is a legitimate general operand when
3801 generating PIC code. It is given that flag_pic is on
3802 and that OP satisfies CONSTANT_P. */
3803
3804 int
3805 legitimate_pic_operand_p (rtx op)
3806 {
3807 /* Accept all non-symbolic constants. */
3808 if (!SYMBOLIC_CONST (op))
3809 return 1;
3810
3811 /* Reject everything else; must be handled
3812 via emit_symbolic_move. */
3813 return 0;
3814 }
3815
3816 /* Returns true if the constant value OP is a legitimate general operand.
3817 It is given that OP satisfies CONSTANT_P. */
3818
3819 static bool
3820 s390_legitimate_constant_p (machine_mode mode, rtx op)
3821 {
3822 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3823 {
3824 if (GET_MODE_SIZE (mode) != 16)
3825 return 0;
3826
3827 if (!satisfies_constraint_j00 (op)
3828 && !satisfies_constraint_jm1 (op)
3829 && !satisfies_constraint_jKK (op)
3830 && !satisfies_constraint_jxx (op)
3831 && !satisfies_constraint_jyy (op))
3832 return 0;
3833 }
3834
3835 /* Accept all non-symbolic constants. */
3836 if (!SYMBOLIC_CONST (op))
3837 return 1;
3838
3839 /* Accept immediate LARL operands. */
3840 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3841 return 1;
3842
3843 /* Thread-local symbols are never legal constants. This is
3844 so that emit_call knows that computing such addresses
3845 might require a function call. */
3846 if (TLS_SYMBOLIC_CONST (op))
3847 return 0;
3848
3849 /* In the PIC case, symbolic constants must *not* be
3850 forced into the literal pool. We accept them here,
3851 so that they will be handled by emit_symbolic_move. */
3852 if (flag_pic)
3853 return 1;
3854
3855 /* All remaining non-PIC symbolic constants are
3856 forced into the literal pool. */
3857 return 0;
3858 }
3859
3860 /* Determine if it's legal to put X into the constant pool. This
3861 is not possible if X contains the address of a symbol that is
3862 not constant (TLS) or not known at final link time (PIC). */
3863
3864 static bool
3865 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3866 {
3867 switch (GET_CODE (x))
3868 {
3869 case CONST_INT:
3870 case CONST_DOUBLE:
3871 case CONST_WIDE_INT:
3872 case CONST_VECTOR:
3873 /* Accept all non-symbolic constants. */
3874 return false;
3875
3876 case LABEL_REF:
3877 /* Labels are OK iff we are non-PIC. */
3878 return flag_pic != 0;
3879
3880 case SYMBOL_REF:
3881 /* 'Naked' TLS symbol references are never OK,
3882 non-TLS symbols are OK iff we are non-PIC. */
3883 if (tls_symbolic_operand (x))
3884 return true;
3885 else
3886 return flag_pic != 0;
3887
3888 case CONST:
3889 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3890 case PLUS:
3891 case MINUS:
3892 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3893 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3894
3895 case UNSPEC:
3896 switch (XINT (x, 1))
3897 {
3898 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3899 case UNSPEC_LTREL_OFFSET:
3900 case UNSPEC_GOT:
3901 case UNSPEC_GOTOFF:
3902 case UNSPEC_PLTOFF:
3903 case UNSPEC_TLSGD:
3904 case UNSPEC_TLSLDM:
3905 case UNSPEC_NTPOFF:
3906 case UNSPEC_DTPOFF:
3907 case UNSPEC_GOTNTPOFF:
3908 case UNSPEC_INDNTPOFF:
3909 return false;
3910
3911 /* If the literal pool shares the code section, be put
3912 execute template placeholders into the pool as well. */
3913 case UNSPEC_INSN:
3914 return TARGET_CPU_ZARCH;
3915
3916 default:
3917 return true;
3918 }
3919 break;
3920
3921 default:
3922 gcc_unreachable ();
3923 }
3924 }
3925
3926 /* Returns true if the constant value OP is a legitimate general
3927 operand during and after reload. The difference to
3928 legitimate_constant_p is that this function will not accept
3929 a constant that would need to be forced to the literal pool
3930 before it can be used as operand.
3931 This function accepts all constants which can be loaded directly
3932 into a GPR. */
3933
3934 bool
3935 legitimate_reload_constant_p (rtx op)
3936 {
3937 /* Accept la(y) operands. */
3938 if (GET_CODE (op) == CONST_INT
3939 && DISP_IN_RANGE (INTVAL (op)))
3940 return true;
3941
3942 /* Accept l(g)hi/l(g)fi operands. */
3943 if (GET_CODE (op) == CONST_INT
3944 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3945 return true;
3946
3947 /* Accept lliXX operands. */
3948 if (TARGET_ZARCH
3949 && GET_CODE (op) == CONST_INT
3950 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3951 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3952 return true;
3953
3954 if (TARGET_EXTIMM
3955 && GET_CODE (op) == CONST_INT
3956 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3957 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3958 return true;
3959
3960 /* Accept larl operands. */
3961 if (TARGET_CPU_ZARCH
3962 && larl_operand (op, VOIDmode))
3963 return true;
3964
3965 /* Accept floating-point zero operands that fit into a single GPR. */
3966 if (GET_CODE (op) == CONST_DOUBLE
3967 && s390_float_const_zero_p (op)
3968 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3969 return true;
3970
3971 /* Accept double-word operands that can be split. */
3972 if (GET_CODE (op) == CONST_WIDE_INT
3973 || (GET_CODE (op) == CONST_INT
3974 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
3975 {
3976 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
3977 rtx hi = operand_subword (op, 0, 0, dword_mode);
3978 rtx lo = operand_subword (op, 1, 0, dword_mode);
3979 return legitimate_reload_constant_p (hi)
3980 && legitimate_reload_constant_p (lo);
3981 }
3982
3983 /* Everything else cannot be handled without reload. */
3984 return false;
3985 }
3986
3987 /* Returns true if the constant value OP is a legitimate fp operand
3988 during and after reload.
3989 This function accepts all constants which can be loaded directly
3990 into an FPR. */
3991
3992 static bool
3993 legitimate_reload_fp_constant_p (rtx op)
3994 {
3995 /* Accept floating-point zero operands if the load zero instruction
3996 can be used. Prior to z196 the load fp zero instruction caused a
3997 performance penalty if the result is used as BFP number. */
3998 if (TARGET_Z196
3999 && GET_CODE (op) == CONST_DOUBLE
4000 && s390_float_const_zero_p (op))
4001 return true;
4002
4003 return false;
4004 }
4005
4006 /* Returns true if the constant value OP is a legitimate vector operand
4007 during and after reload.
4008 This function accepts all constants which can be loaded directly
4009 into an VR. */
4010
4011 static bool
4012 legitimate_reload_vector_constant_p (rtx op)
4013 {
4014 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
4015 && (satisfies_constraint_j00 (op)
4016 || satisfies_constraint_jm1 (op)
4017 || satisfies_constraint_jKK (op)
4018 || satisfies_constraint_jxx (op)
4019 || satisfies_constraint_jyy (op)))
4020 return true;
4021
4022 return false;
4023 }
4024
4025 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
4026 return the class of reg to actually use. */
4027
4028 static reg_class_t
4029 s390_preferred_reload_class (rtx op, reg_class_t rclass)
4030 {
4031 switch (GET_CODE (op))
4032 {
4033 /* Constants we cannot reload into general registers
4034 must be forced into the literal pool. */
4035 case CONST_VECTOR:
4036 case CONST_DOUBLE:
4037 case CONST_INT:
4038 case CONST_WIDE_INT:
4039 if (reg_class_subset_p (GENERAL_REGS, rclass)
4040 && legitimate_reload_constant_p (op))
4041 return GENERAL_REGS;
4042 else if (reg_class_subset_p (ADDR_REGS, rclass)
4043 && legitimate_reload_constant_p (op))
4044 return ADDR_REGS;
4045 else if (reg_class_subset_p (FP_REGS, rclass)
4046 && legitimate_reload_fp_constant_p (op))
4047 return FP_REGS;
4048 else if (reg_class_subset_p (VEC_REGS, rclass)
4049 && legitimate_reload_vector_constant_p (op))
4050 return VEC_REGS;
4051
4052 return NO_REGS;
4053
4054 /* If a symbolic constant or a PLUS is reloaded,
4055 it is most likely being used as an address, so
4056 prefer ADDR_REGS. If 'class' is not a superset
4057 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
4058 case CONST:
4059 /* Symrefs cannot be pushed into the literal pool with -fPIC
4060 so we *MUST NOT* return NO_REGS for these cases
4061 (s390_cannot_force_const_mem will return true).
4062
4063 On the other hand we MUST return NO_REGS for symrefs with
4064 invalid addend which might have been pushed to the literal
4065 pool (no -fPIC). Usually we would expect them to be
4066 handled via secondary reload but this does not happen if
4067 they are used as literal pool slot replacement in reload
4068 inheritance (see emit_input_reload_insns). */
4069 if (TARGET_CPU_ZARCH
4070 && GET_CODE (XEXP (op, 0)) == PLUS
4071 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
4072 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
4073 {
4074 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
4075 return ADDR_REGS;
4076 else
4077 return NO_REGS;
4078 }
4079 /* fallthrough */
4080 case LABEL_REF:
4081 case SYMBOL_REF:
4082 if (!legitimate_reload_constant_p (op))
4083 return NO_REGS;
4084 /* fallthrough */
4085 case PLUS:
4086 /* load address will be used. */
4087 if (reg_class_subset_p (ADDR_REGS, rclass))
4088 return ADDR_REGS;
4089 else
4090 return NO_REGS;
4091
4092 default:
4093 break;
4094 }
4095
4096 return rclass;
4097 }
4098
4099 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
4100 multiple of ALIGNMENT and the SYMBOL_REF being naturally
4101 aligned. */
4102
4103 bool
4104 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
4105 {
4106 HOST_WIDE_INT addend;
4107 rtx symref;
4108
4109 /* The "required alignment" might be 0 (e.g. for certain structs
4110 accessed via BLKmode). Early abort in this case, as well as when
4111 an alignment > 8 is required. */
4112 if (alignment < 2 || alignment > 8)
4113 return false;
4114
4115 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4116 return false;
4117
4118 if (addend & (alignment - 1))
4119 return false;
4120
4121 if (GET_CODE (symref) == SYMBOL_REF)
4122 {
4123 /* We have load-relative instructions for 2-byte, 4-byte, and
4124 8-byte alignment so allow only these. */
4125 switch (alignment)
4126 {
4127 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
4128 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
4129 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
4130 default: return false;
4131 }
4132 }
4133
4134 if (GET_CODE (symref) == UNSPEC
4135 && alignment <= UNITS_PER_LONG)
4136 return true;
4137
4138 return false;
4139 }
4140
4141 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4142 operand SCRATCH is used to reload the even part of the address and
4143 adding one. */
4144
4145 void
4146 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4147 {
4148 HOST_WIDE_INT addend;
4149 rtx symref;
4150
4151 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4152 gcc_unreachable ();
4153
4154 if (!(addend & 1))
4155 /* Easy case. The addend is even so larl will do fine. */
4156 emit_move_insn (reg, addr);
4157 else
4158 {
4159 /* We can leave the scratch register untouched if the target
4160 register is a valid base register. */
4161 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4162 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4163 scratch = reg;
4164
4165 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4166 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4167
4168 if (addend != 1)
4169 emit_move_insn (scratch,
4170 gen_rtx_CONST (Pmode,
4171 gen_rtx_PLUS (Pmode, symref,
4172 GEN_INT (addend - 1))));
4173 else
4174 emit_move_insn (scratch, symref);
4175
4176 /* Increment the address using la in order to avoid clobbering cc. */
4177 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4178 }
4179 }
4180
4181 /* Generate what is necessary to move between REG and MEM using
4182 SCRATCH. The direction is given by TOMEM. */
4183
4184 void
4185 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4186 {
4187 /* Reload might have pulled a constant out of the literal pool.
4188 Force it back in. */
4189 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4190 || GET_CODE (mem) == CONST_WIDE_INT
4191 || GET_CODE (mem) == CONST_VECTOR
4192 || GET_CODE (mem) == CONST)
4193 mem = force_const_mem (GET_MODE (reg), mem);
4194
4195 gcc_assert (MEM_P (mem));
4196
4197 /* For a load from memory we can leave the scratch register
4198 untouched if the target register is a valid base register. */
4199 if (!tomem
4200 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4201 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4202 && GET_MODE (reg) == GET_MODE (scratch))
4203 scratch = reg;
4204
4205 /* Load address into scratch register. Since we can't have a
4206 secondary reload for a secondary reload we have to cover the case
4207 where larl would need a secondary reload here as well. */
4208 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4209
4210 /* Now we can use a standard load/store to do the move. */
4211 if (tomem)
4212 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4213 else
4214 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4215 }
4216
4217 /* Inform reload about cases where moving X with a mode MODE to a register in
4218 RCLASS requires an extra scratch or immediate register. Return the class
4219 needed for the immediate register. */
4220
4221 static reg_class_t
4222 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4223 machine_mode mode, secondary_reload_info *sri)
4224 {
4225 enum reg_class rclass = (enum reg_class) rclass_i;
4226
4227 /* Intermediate register needed. */
4228 if (reg_classes_intersect_p (CC_REGS, rclass))
4229 return GENERAL_REGS;
4230
4231 if (TARGET_VX)
4232 {
4233 /* The vst/vl vector move instructions allow only for short
4234 displacements. */
4235 if (MEM_P (x)
4236 && GET_CODE (XEXP (x, 0)) == PLUS
4237 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4238 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4239 && reg_class_subset_p (rclass, VEC_REGS)
4240 && (!reg_class_subset_p (rclass, FP_REGS)
4241 || (GET_MODE_SIZE (mode) > 8
4242 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4243 {
4244 if (in_p)
4245 sri->icode = (TARGET_64BIT ?
4246 CODE_FOR_reloaddi_la_in :
4247 CODE_FOR_reloadsi_la_in);
4248 else
4249 sri->icode = (TARGET_64BIT ?
4250 CODE_FOR_reloaddi_la_out :
4251 CODE_FOR_reloadsi_la_out);
4252 }
4253 }
4254
4255 if (TARGET_Z10)
4256 {
4257 HOST_WIDE_INT offset;
4258 rtx symref;
4259
4260 /* On z10 several optimizer steps may generate larl operands with
4261 an odd addend. */
4262 if (in_p
4263 && s390_loadrelative_operand_p (x, &symref, &offset)
4264 && mode == Pmode
4265 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4266 && (offset & 1) == 1)
4267 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4268 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4269
4270 /* Handle all the (mem (symref)) accesses we cannot use the z10
4271 instructions for. */
4272 if (MEM_P (x)
4273 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4274 && (mode == QImode
4275 || !reg_class_subset_p (rclass, GENERAL_REGS)
4276 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4277 || !s390_check_symref_alignment (XEXP (x, 0),
4278 GET_MODE_SIZE (mode))))
4279 {
4280 #define __SECONDARY_RELOAD_CASE(M,m) \
4281 case M##mode: \
4282 if (TARGET_64BIT) \
4283 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4284 CODE_FOR_reload##m##di_tomem_z10; \
4285 else \
4286 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4287 CODE_FOR_reload##m##si_tomem_z10; \
4288 break;
4289
4290 switch (GET_MODE (x))
4291 {
4292 __SECONDARY_RELOAD_CASE (QI, qi);
4293 __SECONDARY_RELOAD_CASE (HI, hi);
4294 __SECONDARY_RELOAD_CASE (SI, si);
4295 __SECONDARY_RELOAD_CASE (DI, di);
4296 __SECONDARY_RELOAD_CASE (TI, ti);
4297 __SECONDARY_RELOAD_CASE (SF, sf);
4298 __SECONDARY_RELOAD_CASE (DF, df);
4299 __SECONDARY_RELOAD_CASE (TF, tf);
4300 __SECONDARY_RELOAD_CASE (SD, sd);
4301 __SECONDARY_RELOAD_CASE (DD, dd);
4302 __SECONDARY_RELOAD_CASE (TD, td);
4303 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4304 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4305 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4306 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4307 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4308 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4309 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4310 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4311 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4312 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4313 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4314 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4315 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4316 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4317 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4318 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4319 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4320 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4321 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4322 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4323 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4324 default:
4325 gcc_unreachable ();
4326 }
4327 #undef __SECONDARY_RELOAD_CASE
4328 }
4329 }
4330
4331 /* We need a scratch register when loading a PLUS expression which
4332 is not a legitimate operand of the LOAD ADDRESS instruction. */
4333 /* LRA can deal with transformation of plus op very well -- so we
4334 don't need to prompt LRA in this case. */
4335 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4336 sri->icode = (TARGET_64BIT ?
4337 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4338
4339 /* Performing a multiword move from or to memory we have to make sure the
4340 second chunk in memory is addressable without causing a displacement
4341 overflow. If that would be the case we calculate the address in
4342 a scratch register. */
4343 if (MEM_P (x)
4344 && GET_CODE (XEXP (x, 0)) == PLUS
4345 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4346 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4347 + GET_MODE_SIZE (mode) - 1))
4348 {
4349 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4350 in a s_operand address since we may fallback to lm/stm. So we only
4351 have to care about overflows in the b+i+d case. */
4352 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4353 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4354 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4355 /* For FP_REGS no lm/stm is available so this check is triggered
4356 for displacement overflows in b+i+d and b+d like addresses. */
4357 || (reg_classes_intersect_p (FP_REGS, rclass)
4358 && s390_class_max_nregs (FP_REGS, mode) > 1))
4359 {
4360 if (in_p)
4361 sri->icode = (TARGET_64BIT ?
4362 CODE_FOR_reloaddi_la_in :
4363 CODE_FOR_reloadsi_la_in);
4364 else
4365 sri->icode = (TARGET_64BIT ?
4366 CODE_FOR_reloaddi_la_out :
4367 CODE_FOR_reloadsi_la_out);
4368 }
4369 }
4370
4371 /* A scratch address register is needed when a symbolic constant is
4372 copied to r0 compiling with -fPIC. In other cases the target
4373 register might be used as temporary (see legitimize_pic_address). */
4374 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4375 sri->icode = (TARGET_64BIT ?
4376 CODE_FOR_reloaddi_PIC_addr :
4377 CODE_FOR_reloadsi_PIC_addr);
4378
4379 /* Either scratch or no register needed. */
4380 return NO_REGS;
4381 }
4382
4383 /* Generate code to load SRC, which is PLUS that is not a
4384 legitimate operand for the LA instruction, into TARGET.
4385 SCRATCH may be used as scratch register. */
4386
4387 void
4388 s390_expand_plus_operand (rtx target, rtx src,
4389 rtx scratch)
4390 {
4391 rtx sum1, sum2;
4392 struct s390_address ad;
4393
4394 /* src must be a PLUS; get its two operands. */
4395 gcc_assert (GET_CODE (src) == PLUS);
4396 gcc_assert (GET_MODE (src) == Pmode);
4397
4398 /* Check if any of the two operands is already scheduled
4399 for replacement by reload. This can happen e.g. when
4400 float registers occur in an address. */
4401 sum1 = find_replacement (&XEXP (src, 0));
4402 sum2 = find_replacement (&XEXP (src, 1));
4403 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4404
4405 /* If the address is already strictly valid, there's nothing to do. */
4406 if (!s390_decompose_address (src, &ad)
4407 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4408 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4409 {
4410 /* Otherwise, one of the operands cannot be an address register;
4411 we reload its value into the scratch register. */
4412 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4413 {
4414 emit_move_insn (scratch, sum1);
4415 sum1 = scratch;
4416 }
4417 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4418 {
4419 emit_move_insn (scratch, sum2);
4420 sum2 = scratch;
4421 }
4422
4423 /* According to the way these invalid addresses are generated
4424 in reload.c, it should never happen (at least on s390) that
4425 *neither* of the PLUS components, after find_replacements
4426 was applied, is an address register. */
4427 if (sum1 == scratch && sum2 == scratch)
4428 {
4429 debug_rtx (src);
4430 gcc_unreachable ();
4431 }
4432
4433 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4434 }
4435
4436 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4437 is only ever performed on addresses, so we can mark the
4438 sum as legitimate for LA in any case. */
4439 s390_load_address (target, src);
4440 }
4441
4442
4443 /* Return true if ADDR is a valid memory address.
4444 STRICT specifies whether strict register checking applies. */
4445
4446 static bool
4447 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4448 {
4449 struct s390_address ad;
4450
4451 if (TARGET_Z10
4452 && larl_operand (addr, VOIDmode)
4453 && (mode == VOIDmode
4454 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4455 return true;
4456
4457 if (!s390_decompose_address (addr, &ad))
4458 return false;
4459
4460 if (strict)
4461 {
4462 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4463 return false;
4464
4465 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4466 return false;
4467 }
4468 else
4469 {
4470 if (ad.base
4471 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4472 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4473 return false;
4474
4475 if (ad.indx
4476 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4477 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4478 return false;
4479 }
4480 return true;
4481 }
4482
4483 /* Return true if OP is a valid operand for the LA instruction.
4484 In 31-bit, we need to prove that the result is used as an
4485 address, as LA performs only a 31-bit addition. */
4486
4487 bool
4488 legitimate_la_operand_p (rtx op)
4489 {
4490 struct s390_address addr;
4491 if (!s390_decompose_address (op, &addr))
4492 return false;
4493
4494 return (TARGET_64BIT || addr.pointer);
4495 }
4496
4497 /* Return true if it is valid *and* preferable to use LA to
4498 compute the sum of OP1 and OP2. */
4499
4500 bool
4501 preferred_la_operand_p (rtx op1, rtx op2)
4502 {
4503 struct s390_address addr;
4504
4505 if (op2 != const0_rtx)
4506 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4507
4508 if (!s390_decompose_address (op1, &addr))
4509 return false;
4510 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4511 return false;
4512 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4513 return false;
4514
4515 /* Avoid LA instructions with index register on z196; it is
4516 preferable to use regular add instructions when possible.
4517 Starting with zEC12 the la with index register is "uncracked"
4518 again. */
4519 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4520 return false;
4521
4522 if (!TARGET_64BIT && !addr.pointer)
4523 return false;
4524
4525 if (addr.pointer)
4526 return true;
4527
4528 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4529 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4530 return true;
4531
4532 return false;
4533 }
4534
4535 /* Emit a forced load-address operation to load SRC into DST.
4536 This will use the LOAD ADDRESS instruction even in situations
4537 where legitimate_la_operand_p (SRC) returns false. */
4538
4539 void
4540 s390_load_address (rtx dst, rtx src)
4541 {
4542 if (TARGET_64BIT)
4543 emit_move_insn (dst, src);
4544 else
4545 emit_insn (gen_force_la_31 (dst, src));
4546 }
4547
4548 /* Return a legitimate reference for ORIG (an address) using the
4549 register REG. If REG is 0, a new pseudo is generated.
4550
4551 There are two types of references that must be handled:
4552
4553 1. Global data references must load the address from the GOT, via
4554 the PIC reg. An insn is emitted to do this load, and the reg is
4555 returned.
4556
4557 2. Static data references, constant pool addresses, and code labels
4558 compute the address as an offset from the GOT, whose base is in
4559 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4560 differentiate them from global data objects. The returned
4561 address is the PIC reg + an unspec constant.
4562
4563 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4564 reg also appears in the address. */
4565
4566 rtx
4567 legitimize_pic_address (rtx orig, rtx reg)
4568 {
4569 rtx addr = orig;
4570 rtx addend = const0_rtx;
4571 rtx new_rtx = orig;
4572
4573 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4574
4575 if (GET_CODE (addr) == CONST)
4576 addr = XEXP (addr, 0);
4577
4578 if (GET_CODE (addr) == PLUS)
4579 {
4580 addend = XEXP (addr, 1);
4581 addr = XEXP (addr, 0);
4582 }
4583
4584 if ((GET_CODE (addr) == LABEL_REF
4585 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
4586 || (GET_CODE (addr) == UNSPEC &&
4587 (XINT (addr, 1) == UNSPEC_GOTENT
4588 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4589 && GET_CODE (addend) == CONST_INT)
4590 {
4591 /* This can be locally addressed. */
4592
4593 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4594 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4595 gen_rtx_CONST (Pmode, addr) : addr);
4596
4597 if (TARGET_CPU_ZARCH
4598 && larl_operand (const_addr, VOIDmode)
4599 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
4600 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
4601 {
4602 if (INTVAL (addend) & 1)
4603 {
4604 /* LARL can't handle odd offsets, so emit a pair of LARL
4605 and LA. */
4606 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4607
4608 if (!DISP_IN_RANGE (INTVAL (addend)))
4609 {
4610 HOST_WIDE_INT even = INTVAL (addend) - 1;
4611 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4612 addr = gen_rtx_CONST (Pmode, addr);
4613 addend = const1_rtx;
4614 }
4615
4616 emit_move_insn (temp, addr);
4617 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4618
4619 if (reg != 0)
4620 {
4621 s390_load_address (reg, new_rtx);
4622 new_rtx = reg;
4623 }
4624 }
4625 else
4626 {
4627 /* If the offset is even, we can just use LARL. This
4628 will happen automatically. */
4629 }
4630 }
4631 else
4632 {
4633 /* No larl - Access local symbols relative to the GOT. */
4634
4635 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4636
4637 if (reload_in_progress || reload_completed)
4638 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4639
4640 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4641 if (addend != const0_rtx)
4642 addr = gen_rtx_PLUS (Pmode, addr, addend);
4643 addr = gen_rtx_CONST (Pmode, addr);
4644 addr = force_const_mem (Pmode, addr);
4645 emit_move_insn (temp, addr);
4646
4647 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4648 if (reg != 0)
4649 {
4650 s390_load_address (reg, new_rtx);
4651 new_rtx = reg;
4652 }
4653 }
4654 }
4655 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4656 {
4657 /* A non-local symbol reference without addend.
4658
4659 The symbol ref is wrapped into an UNSPEC to make sure the
4660 proper operand modifier (@GOT or @GOTENT) will be emitted.
4661 This will tell the linker to put the symbol into the GOT.
4662
4663 Additionally the code dereferencing the GOT slot is emitted here.
4664
4665 An addend to the symref needs to be added afterwards.
4666 legitimize_pic_address calls itself recursively to handle
4667 that case. So no need to do it here. */
4668
4669 if (reg == 0)
4670 reg = gen_reg_rtx (Pmode);
4671
4672 if (TARGET_Z10)
4673 {
4674 /* Use load relative if possible.
4675 lgrl <target>, sym@GOTENT */
4676 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4677 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4678 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4679
4680 emit_move_insn (reg, new_rtx);
4681 new_rtx = reg;
4682 }
4683 else if (flag_pic == 1)
4684 {
4685 /* Assume GOT offset is a valid displacement operand (< 4k
4686 or < 512k with z990). This is handled the same way in
4687 both 31- and 64-bit code (@GOT).
4688 lg <target>, sym@GOT(r12) */
4689
4690 if (reload_in_progress || reload_completed)
4691 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4692
4693 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4694 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4695 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4696 new_rtx = gen_const_mem (Pmode, new_rtx);
4697 emit_move_insn (reg, new_rtx);
4698 new_rtx = reg;
4699 }
4700 else if (TARGET_CPU_ZARCH)
4701 {
4702 /* If the GOT offset might be >= 4k, we determine the position
4703 of the GOT entry via a PC-relative LARL (@GOTENT).
4704 larl temp, sym@GOTENT
4705 lg <target>, 0(temp) */
4706
4707 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4708
4709 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4710 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4711
4712 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4713 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4714 emit_move_insn (temp, new_rtx);
4715
4716 new_rtx = gen_const_mem (Pmode, temp);
4717 emit_move_insn (reg, new_rtx);
4718
4719 new_rtx = reg;
4720 }
4721 else
4722 {
4723 /* If the GOT offset might be >= 4k, we have to load it
4724 from the literal pool (@GOT).
4725
4726 lg temp, lit-litbase(r13)
4727 lg <target>, 0(temp)
4728 lit: .long sym@GOT */
4729
4730 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4731
4732 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4733 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4734
4735 if (reload_in_progress || reload_completed)
4736 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4737
4738 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4739 addr = gen_rtx_CONST (Pmode, addr);
4740 addr = force_const_mem (Pmode, addr);
4741 emit_move_insn (temp, addr);
4742
4743 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4744 new_rtx = gen_const_mem (Pmode, new_rtx);
4745 emit_move_insn (reg, new_rtx);
4746 new_rtx = reg;
4747 }
4748 }
4749 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4750 {
4751 gcc_assert (XVECLEN (addr, 0) == 1);
4752 switch (XINT (addr, 1))
4753 {
4754 /* These address symbols (or PLT slots) relative to the GOT
4755 (not GOT slots!). In general this will exceed the
4756 displacement range so these value belong into the literal
4757 pool. */
4758 case UNSPEC_GOTOFF:
4759 case UNSPEC_PLTOFF:
4760 new_rtx = force_const_mem (Pmode, orig);
4761 break;
4762
4763 /* For -fPIC the GOT size might exceed the displacement
4764 range so make sure the value is in the literal pool. */
4765 case UNSPEC_GOT:
4766 if (flag_pic == 2)
4767 new_rtx = force_const_mem (Pmode, orig);
4768 break;
4769
4770 /* For @GOTENT larl is used. This is handled like local
4771 symbol refs. */
4772 case UNSPEC_GOTENT:
4773 gcc_unreachable ();
4774 break;
4775
4776 /* @PLT is OK as is on 64-bit, must be converted to
4777 GOT-relative @PLTOFF on 31-bit. */
4778 case UNSPEC_PLT:
4779 if (!TARGET_CPU_ZARCH)
4780 {
4781 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4782
4783 if (reload_in_progress || reload_completed)
4784 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4785
4786 addr = XVECEXP (addr, 0, 0);
4787 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4788 UNSPEC_PLTOFF);
4789 if (addend != const0_rtx)
4790 addr = gen_rtx_PLUS (Pmode, addr, addend);
4791 addr = gen_rtx_CONST (Pmode, addr);
4792 addr = force_const_mem (Pmode, addr);
4793 emit_move_insn (temp, addr);
4794
4795 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4796 if (reg != 0)
4797 {
4798 s390_load_address (reg, new_rtx);
4799 new_rtx = reg;
4800 }
4801 }
4802 else
4803 /* On 64 bit larl can be used. This case is handled like
4804 local symbol refs. */
4805 gcc_unreachable ();
4806 break;
4807
4808 /* Everything else cannot happen. */
4809 default:
4810 gcc_unreachable ();
4811 }
4812 }
4813 else if (addend != const0_rtx)
4814 {
4815 /* Otherwise, compute the sum. */
4816
4817 rtx base = legitimize_pic_address (addr, reg);
4818 new_rtx = legitimize_pic_address (addend,
4819 base == reg ? NULL_RTX : reg);
4820 if (GET_CODE (new_rtx) == CONST_INT)
4821 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4822 else
4823 {
4824 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4825 {
4826 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4827 new_rtx = XEXP (new_rtx, 1);
4828 }
4829 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4830 }
4831
4832 if (GET_CODE (new_rtx) == CONST)
4833 new_rtx = XEXP (new_rtx, 0);
4834 new_rtx = force_operand (new_rtx, 0);
4835 }
4836
4837 return new_rtx;
4838 }
4839
4840 /* Load the thread pointer into a register. */
4841
4842 rtx
4843 s390_get_thread_pointer (void)
4844 {
4845 rtx tp = gen_reg_rtx (Pmode);
4846
4847 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4848 mark_reg_pointer (tp, BITS_PER_WORD);
4849
4850 return tp;
4851 }
4852
4853 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4854 in s390_tls_symbol which always refers to __tls_get_offset.
4855 The returned offset is written to RESULT_REG and an USE rtx is
4856 generated for TLS_CALL. */
4857
4858 static GTY(()) rtx s390_tls_symbol;
4859
4860 static void
4861 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4862 {
4863 rtx insn;
4864
4865 if (!flag_pic)
4866 emit_insn (s390_load_got ());
4867
4868 if (!s390_tls_symbol)
4869 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4870
4871 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4872 gen_rtx_REG (Pmode, RETURN_REGNUM));
4873
4874 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4875 RTL_CONST_CALL_P (insn) = 1;
4876 }
4877
4878 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4879 this (thread-local) address. REG may be used as temporary. */
4880
4881 static rtx
4882 legitimize_tls_address (rtx addr, rtx reg)
4883 {
4884 rtx new_rtx, tls_call, temp, base, r2;
4885 rtx_insn *insn;
4886
4887 if (GET_CODE (addr) == SYMBOL_REF)
4888 switch (tls_symbolic_operand (addr))
4889 {
4890 case TLS_MODEL_GLOBAL_DYNAMIC:
4891 start_sequence ();
4892 r2 = gen_rtx_REG (Pmode, 2);
4893 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4894 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4895 new_rtx = force_const_mem (Pmode, new_rtx);
4896 emit_move_insn (r2, new_rtx);
4897 s390_emit_tls_call_insn (r2, tls_call);
4898 insn = get_insns ();
4899 end_sequence ();
4900
4901 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4902 temp = gen_reg_rtx (Pmode);
4903 emit_libcall_block (insn, temp, r2, new_rtx);
4904
4905 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4906 if (reg != 0)
4907 {
4908 s390_load_address (reg, new_rtx);
4909 new_rtx = reg;
4910 }
4911 break;
4912
4913 case TLS_MODEL_LOCAL_DYNAMIC:
4914 start_sequence ();
4915 r2 = gen_rtx_REG (Pmode, 2);
4916 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4917 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4918 new_rtx = force_const_mem (Pmode, new_rtx);
4919 emit_move_insn (r2, new_rtx);
4920 s390_emit_tls_call_insn (r2, tls_call);
4921 insn = get_insns ();
4922 end_sequence ();
4923
4924 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4925 temp = gen_reg_rtx (Pmode);
4926 emit_libcall_block (insn, temp, r2, new_rtx);
4927
4928 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4929 base = gen_reg_rtx (Pmode);
4930 s390_load_address (base, new_rtx);
4931
4932 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4933 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4934 new_rtx = force_const_mem (Pmode, new_rtx);
4935 temp = gen_reg_rtx (Pmode);
4936 emit_move_insn (temp, new_rtx);
4937
4938 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4939 if (reg != 0)
4940 {
4941 s390_load_address (reg, new_rtx);
4942 new_rtx = reg;
4943 }
4944 break;
4945
4946 case TLS_MODEL_INITIAL_EXEC:
4947 if (flag_pic == 1)
4948 {
4949 /* Assume GOT offset < 4k. This is handled the same way
4950 in both 31- and 64-bit code. */
4951
4952 if (reload_in_progress || reload_completed)
4953 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4954
4955 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4956 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4957 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4958 new_rtx = gen_const_mem (Pmode, new_rtx);
4959 temp = gen_reg_rtx (Pmode);
4960 emit_move_insn (temp, new_rtx);
4961 }
4962 else if (TARGET_CPU_ZARCH)
4963 {
4964 /* If the GOT offset might be >= 4k, we determine the position
4965 of the GOT entry via a PC-relative LARL. */
4966
4967 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4968 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4969 temp = gen_reg_rtx (Pmode);
4970 emit_move_insn (temp, new_rtx);
4971
4972 new_rtx = gen_const_mem (Pmode, temp);
4973 temp = gen_reg_rtx (Pmode);
4974 emit_move_insn (temp, new_rtx);
4975 }
4976 else if (flag_pic)
4977 {
4978 /* If the GOT offset might be >= 4k, we have to load it
4979 from the literal pool. */
4980
4981 if (reload_in_progress || reload_completed)
4982 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4983
4984 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4985 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4986 new_rtx = force_const_mem (Pmode, new_rtx);
4987 temp = gen_reg_rtx (Pmode);
4988 emit_move_insn (temp, new_rtx);
4989
4990 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4991 new_rtx = gen_const_mem (Pmode, new_rtx);
4992
4993 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4994 temp = gen_reg_rtx (Pmode);
4995 emit_insn (gen_rtx_SET (temp, new_rtx));
4996 }
4997 else
4998 {
4999 /* In position-dependent code, load the absolute address of
5000 the GOT entry from the literal pool. */
5001
5002 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
5003 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5004 new_rtx = force_const_mem (Pmode, new_rtx);
5005 temp = gen_reg_rtx (Pmode);
5006 emit_move_insn (temp, new_rtx);
5007
5008 new_rtx = temp;
5009 new_rtx = gen_const_mem (Pmode, new_rtx);
5010 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
5011 temp = gen_reg_rtx (Pmode);
5012 emit_insn (gen_rtx_SET (temp, new_rtx));
5013 }
5014
5015 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5016 if (reg != 0)
5017 {
5018 s390_load_address (reg, new_rtx);
5019 new_rtx = reg;
5020 }
5021 break;
5022
5023 case TLS_MODEL_LOCAL_EXEC:
5024 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
5025 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5026 new_rtx = force_const_mem (Pmode, new_rtx);
5027 temp = gen_reg_rtx (Pmode);
5028 emit_move_insn (temp, new_rtx);
5029
5030 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5031 if (reg != 0)
5032 {
5033 s390_load_address (reg, new_rtx);
5034 new_rtx = reg;
5035 }
5036 break;
5037
5038 default:
5039 gcc_unreachable ();
5040 }
5041
5042 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
5043 {
5044 switch (XINT (XEXP (addr, 0), 1))
5045 {
5046 case UNSPEC_INDNTPOFF:
5047 gcc_assert (TARGET_CPU_ZARCH);
5048 new_rtx = addr;
5049 break;
5050
5051 default:
5052 gcc_unreachable ();
5053 }
5054 }
5055
5056 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5057 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5058 {
5059 new_rtx = XEXP (XEXP (addr, 0), 0);
5060 if (GET_CODE (new_rtx) != SYMBOL_REF)
5061 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5062
5063 new_rtx = legitimize_tls_address (new_rtx, reg);
5064 new_rtx = plus_constant (Pmode, new_rtx,
5065 INTVAL (XEXP (XEXP (addr, 0), 1)));
5066 new_rtx = force_operand (new_rtx, 0);
5067 }
5068
5069 else
5070 gcc_unreachable (); /* for now ... */
5071
5072 return new_rtx;
5073 }
5074
5075 /* Emit insns making the address in operands[1] valid for a standard
5076 move to operands[0]. operands[1] is replaced by an address which
5077 should be used instead of the former RTX to emit the move
5078 pattern. */
5079
5080 void
5081 emit_symbolic_move (rtx *operands)
5082 {
5083 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
5084
5085 if (GET_CODE (operands[0]) == MEM)
5086 operands[1] = force_reg (Pmode, operands[1]);
5087 else if (TLS_SYMBOLIC_CONST (operands[1]))
5088 operands[1] = legitimize_tls_address (operands[1], temp);
5089 else if (flag_pic)
5090 operands[1] = legitimize_pic_address (operands[1], temp);
5091 }
5092
5093 /* Try machine-dependent ways of modifying an illegitimate address X
5094 to be legitimate. If we find one, return the new, valid address.
5095
5096 OLDX is the address as it was before break_out_memory_refs was called.
5097 In some cases it is useful to look at this to decide what needs to be done.
5098
5099 MODE is the mode of the operand pointed to by X.
5100
5101 When -fpic is used, special handling is needed for symbolic references.
5102 See comments by legitimize_pic_address for details. */
5103
5104 static rtx
5105 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5106 machine_mode mode ATTRIBUTE_UNUSED)
5107 {
5108 rtx constant_term = const0_rtx;
5109
5110 if (TLS_SYMBOLIC_CONST (x))
5111 {
5112 x = legitimize_tls_address (x, 0);
5113
5114 if (s390_legitimate_address_p (mode, x, FALSE))
5115 return x;
5116 }
5117 else if (GET_CODE (x) == PLUS
5118 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
5119 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
5120 {
5121 return x;
5122 }
5123 else if (flag_pic)
5124 {
5125 if (SYMBOLIC_CONST (x)
5126 || (GET_CODE (x) == PLUS
5127 && (SYMBOLIC_CONST (XEXP (x, 0))
5128 || SYMBOLIC_CONST (XEXP (x, 1)))))
5129 x = legitimize_pic_address (x, 0);
5130
5131 if (s390_legitimate_address_p (mode, x, FALSE))
5132 return x;
5133 }
5134
5135 x = eliminate_constant_term (x, &constant_term);
5136
5137 /* Optimize loading of large displacements by splitting them
5138 into the multiple of 4K and the rest; this allows the
5139 former to be CSE'd if possible.
5140
5141 Don't do this if the displacement is added to a register
5142 pointing into the stack frame, as the offsets will
5143 change later anyway. */
5144
5145 if (GET_CODE (constant_term) == CONST_INT
5146 && !TARGET_LONG_DISPLACEMENT
5147 && !DISP_IN_RANGE (INTVAL (constant_term))
5148 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5149 {
5150 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5151 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5152
5153 rtx temp = gen_reg_rtx (Pmode);
5154 rtx val = force_operand (GEN_INT (upper), temp);
5155 if (val != temp)
5156 emit_move_insn (temp, val);
5157
5158 x = gen_rtx_PLUS (Pmode, x, temp);
5159 constant_term = GEN_INT (lower);
5160 }
5161
5162 if (GET_CODE (x) == PLUS)
5163 {
5164 if (GET_CODE (XEXP (x, 0)) == REG)
5165 {
5166 rtx temp = gen_reg_rtx (Pmode);
5167 rtx val = force_operand (XEXP (x, 1), temp);
5168 if (val != temp)
5169 emit_move_insn (temp, val);
5170
5171 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5172 }
5173
5174 else if (GET_CODE (XEXP (x, 1)) == REG)
5175 {
5176 rtx temp = gen_reg_rtx (Pmode);
5177 rtx val = force_operand (XEXP (x, 0), temp);
5178 if (val != temp)
5179 emit_move_insn (temp, val);
5180
5181 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5182 }
5183 }
5184
5185 if (constant_term != const0_rtx)
5186 x = gen_rtx_PLUS (Pmode, x, constant_term);
5187
5188 return x;
5189 }
5190
5191 /* Try a machine-dependent way of reloading an illegitimate address AD
5192 operand. If we find one, push the reload and return the new address.
5193
5194 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5195 and TYPE is the reload type of the current reload. */
5196
5197 rtx
5198 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5199 int opnum, int type)
5200 {
5201 if (!optimize || TARGET_LONG_DISPLACEMENT)
5202 return NULL_RTX;
5203
5204 if (GET_CODE (ad) == PLUS)
5205 {
5206 rtx tem = simplify_binary_operation (PLUS, Pmode,
5207 XEXP (ad, 0), XEXP (ad, 1));
5208 if (tem)
5209 ad = tem;
5210 }
5211
5212 if (GET_CODE (ad) == PLUS
5213 && GET_CODE (XEXP (ad, 0)) == REG
5214 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5215 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5216 {
5217 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5218 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5219 rtx cst, tem, new_rtx;
5220
5221 cst = GEN_INT (upper);
5222 if (!legitimate_reload_constant_p (cst))
5223 cst = force_const_mem (Pmode, cst);
5224
5225 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5226 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5227
5228 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5229 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5230 opnum, (enum reload_type) type);
5231 return new_rtx;
5232 }
5233
5234 return NULL_RTX;
5235 }
5236
5237 /* Emit code to move LEN bytes from DST to SRC. */
5238
5239 bool
5240 s390_expand_movmem (rtx dst, rtx src, rtx len)
5241 {
5242 /* When tuning for z10 or higher we rely on the Glibc functions to
5243 do the right thing. Only for constant lengths below 64k we will
5244 generate inline code. */
5245 if (s390_tune >= PROCESSOR_2097_Z10
5246 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5247 return false;
5248
5249 /* Expand memcpy for constant length operands without a loop if it
5250 is shorter that way.
5251
5252 With a constant length argument a
5253 memcpy loop (without pfd) is 36 bytes -> 6 * mvc */
5254 if (GET_CODE (len) == CONST_INT
5255 && INTVAL (len) >= 0
5256 && INTVAL (len) <= 256 * 6
5257 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5258 {
5259 HOST_WIDE_INT o, l;
5260
5261 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5262 {
5263 rtx newdst = adjust_address (dst, BLKmode, o);
5264 rtx newsrc = adjust_address (src, BLKmode, o);
5265 emit_insn (gen_movmem_short (newdst, newsrc,
5266 GEN_INT (l > 256 ? 255 : l - 1)));
5267 }
5268 }
5269
5270 else if (TARGET_MVCLE)
5271 {
5272 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5273 }
5274
5275 else
5276 {
5277 rtx dst_addr, src_addr, count, blocks, temp;
5278 rtx_code_label *loop_start_label = gen_label_rtx ();
5279 rtx_code_label *loop_end_label = gen_label_rtx ();
5280 rtx_code_label *end_label = gen_label_rtx ();
5281 machine_mode mode;
5282
5283 mode = GET_MODE (len);
5284 if (mode == VOIDmode)
5285 mode = Pmode;
5286
5287 dst_addr = gen_reg_rtx (Pmode);
5288 src_addr = gen_reg_rtx (Pmode);
5289 count = gen_reg_rtx (mode);
5290 blocks = gen_reg_rtx (mode);
5291
5292 convert_move (count, len, 1);
5293 emit_cmp_and_jump_insns (count, const0_rtx,
5294 EQ, NULL_RTX, mode, 1, end_label);
5295
5296 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5297 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5298 dst = change_address (dst, VOIDmode, dst_addr);
5299 src = change_address (src, VOIDmode, src_addr);
5300
5301 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5302 OPTAB_DIRECT);
5303 if (temp != count)
5304 emit_move_insn (count, temp);
5305
5306 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5307 OPTAB_DIRECT);
5308 if (temp != blocks)
5309 emit_move_insn (blocks, temp);
5310
5311 emit_cmp_and_jump_insns (blocks, const0_rtx,
5312 EQ, NULL_RTX, mode, 1, loop_end_label);
5313
5314 emit_label (loop_start_label);
5315
5316 if (TARGET_Z10
5317 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5318 {
5319 rtx prefetch;
5320
5321 /* Issue a read prefetch for the +3 cache line. */
5322 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5323 const0_rtx, const0_rtx);
5324 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5325 emit_insn (prefetch);
5326
5327 /* Issue a write prefetch for the +3 cache line. */
5328 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5329 const1_rtx, const0_rtx);
5330 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5331 emit_insn (prefetch);
5332 }
5333
5334 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5335 s390_load_address (dst_addr,
5336 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5337 s390_load_address (src_addr,
5338 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5339
5340 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5341 OPTAB_DIRECT);
5342 if (temp != blocks)
5343 emit_move_insn (blocks, temp);
5344
5345 emit_cmp_and_jump_insns (blocks, const0_rtx,
5346 EQ, NULL_RTX, mode, 1, loop_end_label);
5347
5348 emit_jump (loop_start_label);
5349 emit_label (loop_end_label);
5350
5351 emit_insn (gen_movmem_short (dst, src,
5352 convert_to_mode (Pmode, count, 1)));
5353 emit_label (end_label);
5354 }
5355 return true;
5356 }
5357
5358 /* Emit code to set LEN bytes at DST to VAL.
5359 Make use of clrmem if VAL is zero. */
5360
5361 void
5362 s390_expand_setmem (rtx dst, rtx len, rtx val)
5363 {
5364 const int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5365
5366 if (GET_CODE (len) == CONST_INT && INTVAL (len) <= 0)
5367 return;
5368
5369 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5370
5371 /* Expand setmem/clrmem for a constant length operand without a
5372 loop if it will be shorter that way.
5373 With a constant length and without pfd argument a
5374 clrmem loop is 32 bytes -> 5.3 * xc
5375 setmem loop is 36 bytes -> 3.6 * (mvi/stc + mvc) */
5376 if (GET_CODE (len) == CONST_INT
5377 && ((INTVAL (len) <= 256 * 5 && val == const0_rtx)
5378 || INTVAL (len) <= 257 * 3)
5379 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5380 {
5381 HOST_WIDE_INT o, l;
5382
5383 if (val == const0_rtx)
5384 /* clrmem: emit 256 byte blockwise XCs. */
5385 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5386 {
5387 rtx newdst = adjust_address (dst, BLKmode, o);
5388 emit_insn (gen_clrmem_short (newdst,
5389 GEN_INT (l > 256 ? 255 : l - 1)));
5390 }
5391 else
5392 /* setmem: emit 1(mvi) + 256(mvc) byte blockwise memsets by
5393 setting first byte to val and using a 256 byte mvc with one
5394 byte overlap to propagate the byte. */
5395 for (l = INTVAL (len), o = 0; l > 0; l -= 257, o += 257)
5396 {
5397 rtx newdst = adjust_address (dst, BLKmode, o);
5398 emit_move_insn (adjust_address (dst, QImode, o), val);
5399 if (l > 1)
5400 {
5401 rtx newdstp1 = adjust_address (dst, BLKmode, o + 1);
5402 emit_insn (gen_movmem_short (newdstp1, newdst,
5403 GEN_INT (l > 257 ? 255 : l - 2)));
5404 }
5405 }
5406 }
5407
5408 else if (TARGET_MVCLE)
5409 {
5410 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5411 if (TARGET_64BIT)
5412 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5413 val));
5414 else
5415 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5416 val));
5417 }
5418
5419 else
5420 {
5421 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5422 rtx_code_label *loop_start_label = gen_label_rtx ();
5423 rtx_code_label *onebyte_end_label = gen_label_rtx ();
5424 rtx_code_label *zerobyte_end_label = gen_label_rtx ();
5425 rtx_code_label *restbyte_end_label = gen_label_rtx ();
5426 machine_mode mode;
5427
5428 mode = GET_MODE (len);
5429 if (mode == VOIDmode)
5430 mode = Pmode;
5431
5432 dst_addr = gen_reg_rtx (Pmode);
5433 count = gen_reg_rtx (mode);
5434 blocks = gen_reg_rtx (mode);
5435
5436 convert_move (count, len, 1);
5437 emit_cmp_and_jump_insns (count, const0_rtx,
5438 EQ, NULL_RTX, mode, 1, zerobyte_end_label,
5439 very_unlikely);
5440
5441 /* We need to make a copy of the target address since memset is
5442 supposed to return it unmodified. We have to make it here
5443 already since the new reg is used at onebyte_end_label. */
5444 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5445 dst = change_address (dst, VOIDmode, dst_addr);
5446
5447 if (val != const0_rtx)
5448 {
5449 /* When using the overlapping mvc the original target
5450 address is only accessed as single byte entity (even by
5451 the mvc reading this value). */
5452 set_mem_size (dst, 1);
5453 dstp1 = adjust_address (dst, VOIDmode, 1);
5454 emit_cmp_and_jump_insns (count,
5455 const1_rtx, EQ, NULL_RTX, mode, 1,
5456 onebyte_end_label, very_unlikely);
5457 }
5458
5459 /* There is one unconditional (mvi+mvc)/xc after the loop
5460 dealing with the rest of the bytes, subtracting two (mvi+mvc)
5461 or one (xc) here leaves this number of bytes to be handled by
5462 it. */
5463 temp = expand_binop (mode, add_optab, count,
5464 val == const0_rtx ? constm1_rtx : GEN_INT (-2),
5465 count, 1, OPTAB_DIRECT);
5466 if (temp != count)
5467 emit_move_insn (count, temp);
5468
5469 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5470 OPTAB_DIRECT);
5471 if (temp != blocks)
5472 emit_move_insn (blocks, temp);
5473
5474 emit_cmp_and_jump_insns (blocks, const0_rtx,
5475 EQ, NULL_RTX, mode, 1, restbyte_end_label);
5476
5477 emit_jump (loop_start_label);
5478
5479 if (val != const0_rtx)
5480 {
5481 /* The 1 byte != 0 special case. Not handled efficiently
5482 since we require two jumps for that. However, this
5483 should be very rare. */
5484 emit_label (onebyte_end_label);
5485 emit_move_insn (adjust_address (dst, QImode, 0), val);
5486 emit_jump (zerobyte_end_label);
5487 }
5488
5489 emit_label (loop_start_label);
5490
5491 if (TARGET_Z10
5492 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5493 {
5494 /* Issue a write prefetch for the +4 cache line. */
5495 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5496 GEN_INT (1024)),
5497 const1_rtx, const0_rtx);
5498 emit_insn (prefetch);
5499 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5500 }
5501
5502 if (val == const0_rtx)
5503 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5504 else
5505 {
5506 /* Set the first byte in the block to the value and use an
5507 overlapping mvc for the block. */
5508 emit_move_insn (adjust_address (dst, QImode, 0), val);
5509 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (254)));
5510 }
5511 s390_load_address (dst_addr,
5512 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5513
5514 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5515 OPTAB_DIRECT);
5516 if (temp != blocks)
5517 emit_move_insn (blocks, temp);
5518
5519 emit_cmp_and_jump_insns (blocks, const0_rtx,
5520 NE, NULL_RTX, mode, 1, loop_start_label);
5521
5522 emit_label (restbyte_end_label);
5523
5524 if (val == const0_rtx)
5525 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5526 else
5527 {
5528 /* Set the first byte in the block to the value and use an
5529 overlapping mvc for the block. */
5530 emit_move_insn (adjust_address (dst, QImode, 0), val);
5531 /* execute only uses the lowest 8 bits of count that's
5532 exactly what we need here. */
5533 emit_insn (gen_movmem_short (dstp1, dst,
5534 convert_to_mode (Pmode, count, 1)));
5535 }
5536
5537 emit_label (zerobyte_end_label);
5538 }
5539 }
5540
5541 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5542 and return the result in TARGET. */
5543
5544 bool
5545 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5546 {
5547 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5548 rtx tmp;
5549
5550 /* When tuning for z10 or higher we rely on the Glibc functions to
5551 do the right thing. Only for constant lengths below 64k we will
5552 generate inline code. */
5553 if (s390_tune >= PROCESSOR_2097_Z10
5554 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5555 return false;
5556
5557 /* As the result of CMPINT is inverted compared to what we need,
5558 we have to swap the operands. */
5559 tmp = op0; op0 = op1; op1 = tmp;
5560
5561 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5562 {
5563 if (INTVAL (len) > 0)
5564 {
5565 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5566 emit_insn (gen_cmpint (target, ccreg));
5567 }
5568 else
5569 emit_move_insn (target, const0_rtx);
5570 }
5571 else if (TARGET_MVCLE)
5572 {
5573 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5574 emit_insn (gen_cmpint (target, ccreg));
5575 }
5576 else
5577 {
5578 rtx addr0, addr1, count, blocks, temp;
5579 rtx_code_label *loop_start_label = gen_label_rtx ();
5580 rtx_code_label *loop_end_label = gen_label_rtx ();
5581 rtx_code_label *end_label = gen_label_rtx ();
5582 machine_mode mode;
5583
5584 mode = GET_MODE (len);
5585 if (mode == VOIDmode)
5586 mode = Pmode;
5587
5588 addr0 = gen_reg_rtx (Pmode);
5589 addr1 = gen_reg_rtx (Pmode);
5590 count = gen_reg_rtx (mode);
5591 blocks = gen_reg_rtx (mode);
5592
5593 convert_move (count, len, 1);
5594 emit_cmp_and_jump_insns (count, const0_rtx,
5595 EQ, NULL_RTX, mode, 1, end_label);
5596
5597 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5598 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5599 op0 = change_address (op0, VOIDmode, addr0);
5600 op1 = change_address (op1, VOIDmode, addr1);
5601
5602 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5603 OPTAB_DIRECT);
5604 if (temp != count)
5605 emit_move_insn (count, temp);
5606
5607 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5608 OPTAB_DIRECT);
5609 if (temp != blocks)
5610 emit_move_insn (blocks, temp);
5611
5612 emit_cmp_and_jump_insns (blocks, const0_rtx,
5613 EQ, NULL_RTX, mode, 1, loop_end_label);
5614
5615 emit_label (loop_start_label);
5616
5617 if (TARGET_Z10
5618 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5619 {
5620 rtx prefetch;
5621
5622 /* Issue a read prefetch for the +2 cache line of operand 1. */
5623 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5624 const0_rtx, const0_rtx);
5625 emit_insn (prefetch);
5626 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5627
5628 /* Issue a read prefetch for the +2 cache line of operand 2. */
5629 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5630 const0_rtx, const0_rtx);
5631 emit_insn (prefetch);
5632 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5633 }
5634
5635 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5636 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5637 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5638 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5639 temp = gen_rtx_SET (pc_rtx, temp);
5640 emit_jump_insn (temp);
5641
5642 s390_load_address (addr0,
5643 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5644 s390_load_address (addr1,
5645 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5646
5647 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5648 OPTAB_DIRECT);
5649 if (temp != blocks)
5650 emit_move_insn (blocks, temp);
5651
5652 emit_cmp_and_jump_insns (blocks, const0_rtx,
5653 EQ, NULL_RTX, mode, 1, loop_end_label);
5654
5655 emit_jump (loop_start_label);
5656 emit_label (loop_end_label);
5657
5658 emit_insn (gen_cmpmem_short (op0, op1,
5659 convert_to_mode (Pmode, count, 1)));
5660 emit_label (end_label);
5661
5662 emit_insn (gen_cmpint (target, ccreg));
5663 }
5664 return true;
5665 }
5666
5667 /* Emit a conditional jump to LABEL for condition code mask MASK using
5668 comparsion operator COMPARISON. Return the emitted jump insn. */
5669
5670 static rtx_insn *
5671 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5672 {
5673 rtx temp;
5674
5675 gcc_assert (comparison == EQ || comparison == NE);
5676 gcc_assert (mask > 0 && mask < 15);
5677
5678 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5679 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5680 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5681 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5682 temp = gen_rtx_SET (pc_rtx, temp);
5683 return emit_jump_insn (temp);
5684 }
5685
5686 /* Emit the instructions to implement strlen of STRING and store the
5687 result in TARGET. The string has the known ALIGNMENT. This
5688 version uses vector instructions and is therefore not appropriate
5689 for targets prior to z13. */
5690
5691 void
5692 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5693 {
5694 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5695 int very_likely = REG_BR_PROB_BASE - 1;
5696 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5697 rtx str_reg = gen_reg_rtx (V16QImode);
5698 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5699 rtx str_idx_reg = gen_reg_rtx (Pmode);
5700 rtx result_reg = gen_reg_rtx (V16QImode);
5701 rtx is_aligned_label = gen_label_rtx ();
5702 rtx into_loop_label = NULL_RTX;
5703 rtx loop_start_label = gen_label_rtx ();
5704 rtx temp;
5705 rtx len = gen_reg_rtx (QImode);
5706 rtx cond;
5707
5708 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5709 emit_move_insn (str_idx_reg, const0_rtx);
5710
5711 if (INTVAL (alignment) < 16)
5712 {
5713 /* Check whether the address happens to be aligned properly so
5714 jump directly to the aligned loop. */
5715 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5716 str_addr_base_reg, GEN_INT (15)),
5717 const0_rtx, EQ, NULL_RTX,
5718 Pmode, 1, is_aligned_label);
5719
5720 temp = gen_reg_rtx (Pmode);
5721 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5722 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5723 gcc_assert (REG_P (temp));
5724 highest_index_to_load_reg =
5725 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5726 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5727 gcc_assert (REG_P (highest_index_to_load_reg));
5728 emit_insn (gen_vllv16qi (str_reg,
5729 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5730 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5731
5732 into_loop_label = gen_label_rtx ();
5733 s390_emit_jump (into_loop_label, NULL_RTX);
5734 emit_barrier ();
5735 }
5736
5737 emit_label (is_aligned_label);
5738 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5739
5740 /* Reaching this point we are only performing 16 bytes aligned
5741 loads. */
5742 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5743
5744 emit_label (loop_start_label);
5745 LABEL_NUSES (loop_start_label) = 1;
5746
5747 /* Load 16 bytes of the string into VR. */
5748 emit_move_insn (str_reg,
5749 gen_rtx_MEM (V16QImode,
5750 gen_rtx_PLUS (Pmode, str_idx_reg,
5751 str_addr_base_reg)));
5752 if (into_loop_label != NULL_RTX)
5753 {
5754 emit_label (into_loop_label);
5755 LABEL_NUSES (into_loop_label) = 1;
5756 }
5757
5758 /* Increment string index by 16 bytes. */
5759 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5760 str_idx_reg, 1, OPTAB_DIRECT);
5761
5762 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5763 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5764
5765 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5766 REG_BR_PROB, very_likely);
5767 emit_insn (gen_vec_extractv16qi (len, result_reg, GEN_INT (7)));
5768
5769 /* If the string pointer wasn't aligned we have loaded less then 16
5770 bytes and the remaining bytes got filled with zeros (by vll).
5771 Now we have to check whether the resulting index lies within the
5772 bytes actually part of the string. */
5773
5774 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5775 highest_index_to_load_reg);
5776 s390_load_address (highest_index_to_load_reg,
5777 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5778 const1_rtx));
5779 if (TARGET_64BIT)
5780 emit_insn (gen_movdicc (str_idx_reg, cond,
5781 highest_index_to_load_reg, str_idx_reg));
5782 else
5783 emit_insn (gen_movsicc (str_idx_reg, cond,
5784 highest_index_to_load_reg, str_idx_reg));
5785
5786 add_int_reg_note (s390_emit_jump (is_aligned_label, cond), REG_BR_PROB,
5787 very_unlikely);
5788
5789 expand_binop (Pmode, add_optab, str_idx_reg,
5790 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5791 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5792 here. */
5793 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5794 convert_to_mode (Pmode, len, 1),
5795 target, 1, OPTAB_DIRECT);
5796 if (temp != target)
5797 emit_move_insn (target, temp);
5798 }
5799
5800 void
5801 s390_expand_vec_movstr (rtx result, rtx dst, rtx src)
5802 {
5803 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5804 rtx temp = gen_reg_rtx (Pmode);
5805 rtx src_addr = XEXP (src, 0);
5806 rtx dst_addr = XEXP (dst, 0);
5807 rtx src_addr_reg = gen_reg_rtx (Pmode);
5808 rtx dst_addr_reg = gen_reg_rtx (Pmode);
5809 rtx offset = gen_reg_rtx (Pmode);
5810 rtx vsrc = gen_reg_rtx (V16QImode);
5811 rtx vpos = gen_reg_rtx (V16QImode);
5812 rtx loadlen = gen_reg_rtx (SImode);
5813 rtx gpos_qi = gen_reg_rtx(QImode);
5814 rtx gpos = gen_reg_rtx (SImode);
5815 rtx done_label = gen_label_rtx ();
5816 rtx loop_label = gen_label_rtx ();
5817 rtx exit_label = gen_label_rtx ();
5818 rtx full_label = gen_label_rtx ();
5819
5820 /* Perform a quick check for string ending on the first up to 16
5821 bytes and exit early if successful. */
5822
5823 emit_insn (gen_vlbb (vsrc, src, GEN_INT (6)));
5824 emit_insn (gen_lcbb (loadlen, src_addr, GEN_INT (6)));
5825 emit_insn (gen_vfenezv16qi (vpos, vsrc, vsrc));
5826 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5827 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5828 /* gpos is the byte index if a zero was found and 16 otherwise.
5829 So if it is lower than the loaded bytes we have a hit. */
5830 emit_cmp_and_jump_insns (gpos, loadlen, GE, NULL_RTX, SImode, 1,
5831 full_label);
5832 emit_insn (gen_vstlv16qi (vsrc, gpos, dst));
5833
5834 force_expand_binop (Pmode, add_optab, dst_addr, gpos, result,
5835 1, OPTAB_DIRECT);
5836 emit_jump (exit_label);
5837 emit_barrier ();
5838
5839 emit_label (full_label);
5840 LABEL_NUSES (full_label) = 1;
5841
5842 /* Calculate `offset' so that src + offset points to the last byte
5843 before 16 byte alignment. */
5844
5845 /* temp = src_addr & 0xf */
5846 force_expand_binop (Pmode, and_optab, src_addr, GEN_INT (15), temp,
5847 1, OPTAB_DIRECT);
5848
5849 /* offset = 0xf - temp */
5850 emit_move_insn (offset, GEN_INT (15));
5851 force_expand_binop (Pmode, sub_optab, offset, temp, offset,
5852 1, OPTAB_DIRECT);
5853
5854 /* Store `offset' bytes in the dstination string. The quick check
5855 has loaded at least `offset' bytes into vsrc. */
5856
5857 emit_insn (gen_vstlv16qi (vsrc, gen_lowpart (SImode, offset), dst));
5858
5859 /* Advance to the next byte to be loaded. */
5860 force_expand_binop (Pmode, add_optab, offset, const1_rtx, offset,
5861 1, OPTAB_DIRECT);
5862
5863 /* Make sure the addresses are single regs which can be used as a
5864 base. */
5865 emit_move_insn (src_addr_reg, src_addr);
5866 emit_move_insn (dst_addr_reg, dst_addr);
5867
5868 /* MAIN LOOP */
5869
5870 emit_label (loop_label);
5871 LABEL_NUSES (loop_label) = 1;
5872
5873 emit_move_insn (vsrc,
5874 gen_rtx_MEM (V16QImode,
5875 gen_rtx_PLUS (Pmode, src_addr_reg, offset)));
5876
5877 emit_insn (gen_vec_vfenesv16qi (vpos, vsrc, vsrc,
5878 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5879 add_int_reg_note (s390_emit_ccraw_jump (8, EQ, done_label),
5880 REG_BR_PROB, very_unlikely);
5881
5882 emit_move_insn (gen_rtx_MEM (V16QImode,
5883 gen_rtx_PLUS (Pmode, dst_addr_reg, offset)),
5884 vsrc);
5885 /* offset += 16 */
5886 force_expand_binop (Pmode, add_optab, offset, GEN_INT (16),
5887 offset, 1, OPTAB_DIRECT);
5888
5889 emit_jump (loop_label);
5890 emit_barrier ();
5891
5892 /* REGULAR EXIT */
5893
5894 /* We are done. Add the offset of the zero character to the dst_addr
5895 pointer to get the result. */
5896
5897 emit_label (done_label);
5898 LABEL_NUSES (done_label) = 1;
5899
5900 force_expand_binop (Pmode, add_optab, dst_addr_reg, offset, dst_addr_reg,
5901 1, OPTAB_DIRECT);
5902
5903 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5904 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5905
5906 emit_insn (gen_vstlv16qi (vsrc, gpos, gen_rtx_MEM (BLKmode, dst_addr_reg)));
5907
5908 force_expand_binop (Pmode, add_optab, dst_addr_reg, gpos, result,
5909 1, OPTAB_DIRECT);
5910
5911 /* EARLY EXIT */
5912
5913 emit_label (exit_label);
5914 LABEL_NUSES (exit_label) = 1;
5915 }
5916
5917
5918 /* Expand conditional increment or decrement using alc/slb instructions.
5919 Should generate code setting DST to either SRC or SRC + INCREMENT,
5920 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5921 Returns true if successful, false otherwise.
5922
5923 That makes it possible to implement some if-constructs without jumps e.g.:
5924 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5925 unsigned int a, b, c;
5926 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5927 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5928 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5929 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5930
5931 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5932 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5933 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5934 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5935 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5936
5937 bool
5938 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5939 rtx dst, rtx src, rtx increment)
5940 {
5941 machine_mode cmp_mode;
5942 machine_mode cc_mode;
5943 rtx op_res;
5944 rtx insn;
5945 rtvec p;
5946 int ret;
5947
5948 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5949 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5950 cmp_mode = SImode;
5951 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5952 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5953 cmp_mode = DImode;
5954 else
5955 return false;
5956
5957 /* Try ADD LOGICAL WITH CARRY. */
5958 if (increment == const1_rtx)
5959 {
5960 /* Determine CC mode to use. */
5961 if (cmp_code == EQ || cmp_code == NE)
5962 {
5963 if (cmp_op1 != const0_rtx)
5964 {
5965 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5966 NULL_RTX, 0, OPTAB_WIDEN);
5967 cmp_op1 = const0_rtx;
5968 }
5969
5970 cmp_code = cmp_code == EQ ? LEU : GTU;
5971 }
5972
5973 if (cmp_code == LTU || cmp_code == LEU)
5974 {
5975 rtx tem = cmp_op0;
5976 cmp_op0 = cmp_op1;
5977 cmp_op1 = tem;
5978 cmp_code = swap_condition (cmp_code);
5979 }
5980
5981 switch (cmp_code)
5982 {
5983 case GTU:
5984 cc_mode = CCUmode;
5985 break;
5986
5987 case GEU:
5988 cc_mode = CCL3mode;
5989 break;
5990
5991 default:
5992 return false;
5993 }
5994
5995 /* Emit comparison instruction pattern. */
5996 if (!register_operand (cmp_op0, cmp_mode))
5997 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5998
5999 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
6000 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6001 /* We use insn_invalid_p here to add clobbers if required. */
6002 ret = insn_invalid_p (emit_insn (insn), false);
6003 gcc_assert (!ret);
6004
6005 /* Emit ALC instruction pattern. */
6006 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6007 gen_rtx_REG (cc_mode, CC_REGNUM),
6008 const0_rtx);
6009
6010 if (src != const0_rtx)
6011 {
6012 if (!register_operand (src, GET_MODE (dst)))
6013 src = force_reg (GET_MODE (dst), src);
6014
6015 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
6016 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
6017 }
6018
6019 p = rtvec_alloc (2);
6020 RTVEC_ELT (p, 0) =
6021 gen_rtx_SET (dst, op_res);
6022 RTVEC_ELT (p, 1) =
6023 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6024 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6025
6026 return true;
6027 }
6028
6029 /* Try SUBTRACT LOGICAL WITH BORROW. */
6030 if (increment == constm1_rtx)
6031 {
6032 /* Determine CC mode to use. */
6033 if (cmp_code == EQ || cmp_code == NE)
6034 {
6035 if (cmp_op1 != const0_rtx)
6036 {
6037 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6038 NULL_RTX, 0, OPTAB_WIDEN);
6039 cmp_op1 = const0_rtx;
6040 }
6041
6042 cmp_code = cmp_code == EQ ? LEU : GTU;
6043 }
6044
6045 if (cmp_code == GTU || cmp_code == GEU)
6046 {
6047 rtx tem = cmp_op0;
6048 cmp_op0 = cmp_op1;
6049 cmp_op1 = tem;
6050 cmp_code = swap_condition (cmp_code);
6051 }
6052
6053 switch (cmp_code)
6054 {
6055 case LEU:
6056 cc_mode = CCUmode;
6057 break;
6058
6059 case LTU:
6060 cc_mode = CCL3mode;
6061 break;
6062
6063 default:
6064 return false;
6065 }
6066
6067 /* Emit comparison instruction pattern. */
6068 if (!register_operand (cmp_op0, cmp_mode))
6069 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6070
6071 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
6072 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6073 /* We use insn_invalid_p here to add clobbers if required. */
6074 ret = insn_invalid_p (emit_insn (insn), false);
6075 gcc_assert (!ret);
6076
6077 /* Emit SLB instruction pattern. */
6078 if (!register_operand (src, GET_MODE (dst)))
6079 src = force_reg (GET_MODE (dst), src);
6080
6081 op_res = gen_rtx_MINUS (GET_MODE (dst),
6082 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
6083 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6084 gen_rtx_REG (cc_mode, CC_REGNUM),
6085 const0_rtx));
6086 p = rtvec_alloc (2);
6087 RTVEC_ELT (p, 0) =
6088 gen_rtx_SET (dst, op_res);
6089 RTVEC_ELT (p, 1) =
6090 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6091 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6092
6093 return true;
6094 }
6095
6096 return false;
6097 }
6098
6099 /* Expand code for the insv template. Return true if successful. */
6100
6101 bool
6102 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
6103 {
6104 int bitsize = INTVAL (op1);
6105 int bitpos = INTVAL (op2);
6106 machine_mode mode = GET_MODE (dest);
6107 machine_mode smode;
6108 int smode_bsize, mode_bsize;
6109 rtx op, clobber;
6110
6111 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
6112 return false;
6113
6114 /* Generate INSERT IMMEDIATE (IILL et al). */
6115 /* (set (ze (reg)) (const_int)). */
6116 if (TARGET_ZARCH
6117 && register_operand (dest, word_mode)
6118 && (bitpos % 16) == 0
6119 && (bitsize % 16) == 0
6120 && const_int_operand (src, VOIDmode))
6121 {
6122 HOST_WIDE_INT val = INTVAL (src);
6123 int regpos = bitpos + bitsize;
6124
6125 while (regpos > bitpos)
6126 {
6127 machine_mode putmode;
6128 int putsize;
6129
6130 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
6131 putmode = SImode;
6132 else
6133 putmode = HImode;
6134
6135 putsize = GET_MODE_BITSIZE (putmode);
6136 regpos -= putsize;
6137 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6138 GEN_INT (putsize),
6139 GEN_INT (regpos)),
6140 gen_int_mode (val, putmode));
6141 val >>= putsize;
6142 }
6143 gcc_assert (regpos == bitpos);
6144 return true;
6145 }
6146
6147 smode = smallest_mode_for_size (bitsize, MODE_INT);
6148 smode_bsize = GET_MODE_BITSIZE (smode);
6149 mode_bsize = GET_MODE_BITSIZE (mode);
6150
6151 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
6152 if (bitpos == 0
6153 && (bitsize % BITS_PER_UNIT) == 0
6154 && MEM_P (dest)
6155 && (register_operand (src, word_mode)
6156 || const_int_operand (src, VOIDmode)))
6157 {
6158 /* Emit standard pattern if possible. */
6159 if (smode_bsize == bitsize)
6160 {
6161 emit_move_insn (adjust_address (dest, smode, 0),
6162 gen_lowpart (smode, src));
6163 return true;
6164 }
6165
6166 /* (set (ze (mem)) (const_int)). */
6167 else if (const_int_operand (src, VOIDmode))
6168 {
6169 int size = bitsize / BITS_PER_UNIT;
6170 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
6171 BLKmode,
6172 UNITS_PER_WORD - size);
6173
6174 dest = adjust_address (dest, BLKmode, 0);
6175 set_mem_size (dest, size);
6176 s390_expand_movmem (dest, src_mem, GEN_INT (size));
6177 return true;
6178 }
6179
6180 /* (set (ze (mem)) (reg)). */
6181 else if (register_operand (src, word_mode))
6182 {
6183 if (bitsize <= 32)
6184 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
6185 const0_rtx), src);
6186 else
6187 {
6188 /* Emit st,stcmh sequence. */
6189 int stcmh_width = bitsize - 32;
6190 int size = stcmh_width / BITS_PER_UNIT;
6191
6192 emit_move_insn (adjust_address (dest, SImode, size),
6193 gen_lowpart (SImode, src));
6194 set_mem_size (dest, size);
6195 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6196 GEN_INT (stcmh_width),
6197 const0_rtx),
6198 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
6199 }
6200 return true;
6201 }
6202 }
6203
6204 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
6205 if ((bitpos % BITS_PER_UNIT) == 0
6206 && (bitsize % BITS_PER_UNIT) == 0
6207 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
6208 && MEM_P (src)
6209 && (mode == DImode || mode == SImode)
6210 && register_operand (dest, mode))
6211 {
6212 /* Emit a strict_low_part pattern if possible. */
6213 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
6214 {
6215 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
6216 op = gen_rtx_SET (op, gen_lowpart (smode, src));
6217 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6218 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
6219 return true;
6220 }
6221
6222 /* ??? There are more powerful versions of ICM that are not
6223 completely represented in the md file. */
6224 }
6225
6226 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
6227 if (TARGET_Z10 && (mode == DImode || mode == SImode))
6228 {
6229 machine_mode mode_s = GET_MODE (src);
6230
6231 if (CONSTANT_P (src))
6232 {
6233 /* For constant zero values the representation with AND
6234 appears to be folded in more situations than the (set
6235 (zero_extract) ...).
6236 We only do this when the start and end of the bitfield
6237 remain in the same SImode chunk. That way nihf or nilf
6238 can be used.
6239 The AND patterns might still generate a risbg for this. */
6240 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
6241 return false;
6242 else
6243 src = force_reg (mode, src);
6244 }
6245 else if (mode_s != mode)
6246 {
6247 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
6248 src = force_reg (mode_s, src);
6249 src = gen_lowpart (mode, src);
6250 }
6251
6252 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
6253 op = gen_rtx_SET (op, src);
6254
6255 if (!TARGET_ZEC12)
6256 {
6257 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6258 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
6259 }
6260 emit_insn (op);
6261
6262 return true;
6263 }
6264
6265 return false;
6266 }
6267
6268 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
6269 register that holds VAL of mode MODE shifted by COUNT bits. */
6270
6271 static inline rtx
6272 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
6273 {
6274 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
6275 NULL_RTX, 1, OPTAB_DIRECT);
6276 return expand_simple_binop (SImode, ASHIFT, val, count,
6277 NULL_RTX, 1, OPTAB_DIRECT);
6278 }
6279
6280 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
6281 the result in TARGET. */
6282
6283 void
6284 s390_expand_vec_compare (rtx target, enum rtx_code cond,
6285 rtx cmp_op1, rtx cmp_op2)
6286 {
6287 machine_mode mode = GET_MODE (target);
6288 bool neg_p = false, swap_p = false;
6289 rtx tmp;
6290
6291 if (GET_MODE (cmp_op1) == V2DFmode)
6292 {
6293 switch (cond)
6294 {
6295 /* NE a != b -> !(a == b) */
6296 case NE: cond = EQ; neg_p = true; break;
6297 /* UNGT a u> b -> !(b >= a) */
6298 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
6299 /* UNGE a u>= b -> !(b > a) */
6300 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
6301 /* LE: a <= b -> b >= a */
6302 case LE: cond = GE; swap_p = true; break;
6303 /* UNLE: a u<= b -> !(a > b) */
6304 case UNLE: cond = GT; neg_p = true; break;
6305 /* LT: a < b -> b > a */
6306 case LT: cond = GT; swap_p = true; break;
6307 /* UNLT: a u< b -> !(a >= b) */
6308 case UNLT: cond = GE; neg_p = true; break;
6309 case UNEQ:
6310 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
6311 return;
6312 case LTGT:
6313 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
6314 return;
6315 case ORDERED:
6316 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
6317 return;
6318 case UNORDERED:
6319 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
6320 return;
6321 default: break;
6322 }
6323 }
6324 else
6325 {
6326 switch (cond)
6327 {
6328 /* NE: a != b -> !(a == b) */
6329 case NE: cond = EQ; neg_p = true; break;
6330 /* GE: a >= b -> !(b > a) */
6331 case GE: cond = GT; neg_p = true; swap_p = true; break;
6332 /* GEU: a >= b -> !(b > a) */
6333 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6334 /* LE: a <= b -> !(a > b) */
6335 case LE: cond = GT; neg_p = true; break;
6336 /* LEU: a <= b -> !(a > b) */
6337 case LEU: cond = GTU; neg_p = true; break;
6338 /* LT: a < b -> b > a */
6339 case LT: cond = GT; swap_p = true; break;
6340 /* LTU: a < b -> b > a */
6341 case LTU: cond = GTU; swap_p = true; break;
6342 default: break;
6343 }
6344 }
6345
6346 if (swap_p)
6347 {
6348 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6349 }
6350
6351 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6352 mode,
6353 cmp_op1, cmp_op2)));
6354 if (neg_p)
6355 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6356 }
6357
6358 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6359 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6360 elements in CMP1 and CMP2 fulfill the comparison.
6361 This function is only used to emit patterns for the vx builtins and
6362 therefore only handles comparison codes required by the
6363 builtins. */
6364 void
6365 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6366 rtx cmp1, rtx cmp2, bool all_p)
6367 {
6368 machine_mode cc_producer_mode, cc_consumer_mode, scratch_mode;
6369 rtx tmp_reg = gen_reg_rtx (SImode);
6370 bool swap_p = false;
6371
6372 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6373 {
6374 switch (code)
6375 {
6376 case EQ:
6377 case NE:
6378 cc_producer_mode = CCVEQmode;
6379 break;
6380 case GE:
6381 case LT:
6382 code = swap_condition (code);
6383 swap_p = true;
6384 /* fallthrough */
6385 case GT:
6386 case LE:
6387 cc_producer_mode = CCVIHmode;
6388 break;
6389 case GEU:
6390 case LTU:
6391 code = swap_condition (code);
6392 swap_p = true;
6393 /* fallthrough */
6394 case GTU:
6395 case LEU:
6396 cc_producer_mode = CCVIHUmode;
6397 break;
6398 default:
6399 gcc_unreachable ();
6400 }
6401
6402 scratch_mode = GET_MODE (cmp1);
6403 /* These codes represent inverted CC interpretations. Inverting
6404 an ALL CC mode results in an ANY CC mode and the other way
6405 around. Invert the all_p flag here to compensate for
6406 that. */
6407 if (code == NE || code == LE || code == LEU)
6408 all_p = !all_p;
6409
6410 cc_consumer_mode = all_p ? CCVIALLmode : CCVIANYmode;
6411 }
6412 else if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_FLOAT)
6413 {
6414 bool inv_p = false;
6415
6416 switch (code)
6417 {
6418 case EQ: cc_producer_mode = CCVEQmode; break;
6419 case NE: cc_producer_mode = CCVEQmode; inv_p = true; break;
6420 case GT: cc_producer_mode = CCVFHmode; break;
6421 case GE: cc_producer_mode = CCVFHEmode; break;
6422 case UNLE: cc_producer_mode = CCVFHmode; inv_p = true; break;
6423 case UNLT: cc_producer_mode = CCVFHEmode; inv_p = true; break;
6424 case LT: cc_producer_mode = CCVFHmode; code = GT; swap_p = true; break;
6425 case LE: cc_producer_mode = CCVFHEmode; code = GE; swap_p = true; break;
6426 default: gcc_unreachable ();
6427 }
6428 scratch_mode = mode_for_vector (
6429 int_mode_for_mode (GET_MODE_INNER (GET_MODE (cmp1))),
6430 GET_MODE_NUNITS (GET_MODE (cmp1)));
6431 gcc_assert (scratch_mode != BLKmode);
6432
6433 if (inv_p)
6434 all_p = !all_p;
6435
6436 cc_consumer_mode = all_p ? CCVFALLmode : CCVFANYmode;
6437 }
6438 else
6439 gcc_unreachable ();
6440
6441 if (swap_p)
6442 {
6443 rtx tmp = cmp2;
6444 cmp2 = cmp1;
6445 cmp1 = tmp;
6446 }
6447
6448 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6449 gen_rtvec (2, gen_rtx_SET (
6450 gen_rtx_REG (cc_producer_mode, CC_REGNUM),
6451 gen_rtx_COMPARE (cc_producer_mode, cmp1, cmp2)),
6452 gen_rtx_CLOBBER (VOIDmode,
6453 gen_rtx_SCRATCH (scratch_mode)))));
6454 emit_move_insn (target, const0_rtx);
6455 emit_move_insn (tmp_reg, const1_rtx);
6456
6457 emit_move_insn (target,
6458 gen_rtx_IF_THEN_ELSE (SImode,
6459 gen_rtx_fmt_ee (code, VOIDmode,
6460 gen_rtx_REG (cc_consumer_mode, CC_REGNUM),
6461 const0_rtx),
6462 tmp_reg, target));
6463 }
6464
6465 /* Invert the comparison CODE applied to a CC mode. This is only safe
6466 if we know whether there result was created by a floating point
6467 compare or not. For the CCV modes this is encoded as part of the
6468 mode. */
6469 enum rtx_code
6470 s390_reverse_condition (machine_mode mode, enum rtx_code code)
6471 {
6472 /* Reversal of FP compares takes care -- an ordered compare
6473 becomes an unordered compare and vice versa. */
6474 if (mode == CCVFALLmode || mode == CCVFANYmode)
6475 return reverse_condition_maybe_unordered (code);
6476 else if (mode == CCVIALLmode || mode == CCVIANYmode)
6477 return reverse_condition (code);
6478 else
6479 gcc_unreachable ();
6480 }
6481
6482 /* Generate a vector comparison expression loading either elements of
6483 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6484 and CMP_OP2. */
6485
6486 void
6487 s390_expand_vcond (rtx target, rtx then, rtx els,
6488 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6489 {
6490 rtx tmp;
6491 machine_mode result_mode;
6492 rtx result_target;
6493
6494 machine_mode target_mode = GET_MODE (target);
6495 machine_mode cmp_mode = GET_MODE (cmp_op1);
6496 rtx op = (cond == LT) ? els : then;
6497
6498 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6499 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6500 for short and byte (x >> 15 and x >> 7 respectively). */
6501 if ((cond == LT || cond == GE)
6502 && target_mode == cmp_mode
6503 && cmp_op2 == CONST0_RTX (cmp_mode)
6504 && op == CONST0_RTX (target_mode)
6505 && s390_vector_mode_supported_p (target_mode)
6506 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6507 {
6508 rtx negop = (cond == LT) ? then : els;
6509
6510 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6511
6512 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6513 if (negop == CONST1_RTX (target_mode))
6514 {
6515 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6516 GEN_INT (shift), target,
6517 1, OPTAB_DIRECT);
6518 if (res != target)
6519 emit_move_insn (target, res);
6520 return;
6521 }
6522
6523 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6524 else if (all_ones_operand (negop, target_mode))
6525 {
6526 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6527 GEN_INT (shift), target,
6528 0, OPTAB_DIRECT);
6529 if (res != target)
6530 emit_move_insn (target, res);
6531 return;
6532 }
6533 }
6534
6535 /* We always use an integral type vector to hold the comparison
6536 result. */
6537 result_mode = cmp_mode == V2DFmode ? V2DImode : cmp_mode;
6538 result_target = gen_reg_rtx (result_mode);
6539
6540 /* We allow vector immediates as comparison operands that
6541 can be handled by the optimization above but not by the
6542 following code. Hence, force them into registers here. */
6543 if (!REG_P (cmp_op1))
6544 cmp_op1 = force_reg (GET_MODE (cmp_op1), cmp_op1);
6545
6546 if (!REG_P (cmp_op2))
6547 cmp_op2 = force_reg (GET_MODE (cmp_op2), cmp_op2);
6548
6549 s390_expand_vec_compare (result_target, cond,
6550 cmp_op1, cmp_op2);
6551
6552 /* If the results are supposed to be either -1 or 0 we are done
6553 since this is what our compare instructions generate anyway. */
6554 if (all_ones_operand (then, GET_MODE (then))
6555 && const0_operand (els, GET_MODE (els)))
6556 {
6557 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6558 result_target, 0));
6559 return;
6560 }
6561
6562 /* Otherwise we will do a vsel afterwards. */
6563 /* This gets triggered e.g.
6564 with gcc.c-torture/compile/pr53410-1.c */
6565 if (!REG_P (then))
6566 then = force_reg (target_mode, then);
6567
6568 if (!REG_P (els))
6569 els = force_reg (target_mode, els);
6570
6571 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6572 result_target,
6573 CONST0_RTX (result_mode));
6574
6575 /* We compared the result against zero above so we have to swap then
6576 and els here. */
6577 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6578
6579 gcc_assert (target_mode == GET_MODE (then));
6580 emit_insn (gen_rtx_SET (target, tmp));
6581 }
6582
6583 /* Emit the RTX necessary to initialize the vector TARGET with values
6584 in VALS. */
6585 void
6586 s390_expand_vec_init (rtx target, rtx vals)
6587 {
6588 machine_mode mode = GET_MODE (target);
6589 machine_mode inner_mode = GET_MODE_INNER (mode);
6590 int n_elts = GET_MODE_NUNITS (mode);
6591 bool all_same = true, all_regs = true, all_const_int = true;
6592 rtx x;
6593 int i;
6594
6595 for (i = 0; i < n_elts; ++i)
6596 {
6597 x = XVECEXP (vals, 0, i);
6598
6599 if (!CONST_INT_P (x))
6600 all_const_int = false;
6601
6602 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6603 all_same = false;
6604
6605 if (!REG_P (x))
6606 all_regs = false;
6607 }
6608
6609 /* Use vector gen mask or vector gen byte mask if possible. */
6610 if (all_same && all_const_int
6611 && (XVECEXP (vals, 0, 0) == const0_rtx
6612 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6613 NULL, NULL)
6614 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6615 {
6616 emit_insn (gen_rtx_SET (target,
6617 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6618 return;
6619 }
6620
6621 if (all_same)
6622 {
6623 emit_insn (gen_rtx_SET (target,
6624 gen_rtx_VEC_DUPLICATE (mode,
6625 XVECEXP (vals, 0, 0))));
6626 return;
6627 }
6628
6629 if (all_regs && REG_P (target) && n_elts == 2 && inner_mode == DImode)
6630 {
6631 /* Use vector load pair. */
6632 emit_insn (gen_rtx_SET (target,
6633 gen_rtx_VEC_CONCAT (mode,
6634 XVECEXP (vals, 0, 0),
6635 XVECEXP (vals, 0, 1))));
6636 return;
6637 }
6638
6639 /* We are about to set the vector elements one by one. Zero out the
6640 full register first in order to help the data flow framework to
6641 detect it as full VR set. */
6642 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6643
6644 /* Unfortunately the vec_init expander is not allowed to fail. So
6645 we have to implement the fallback ourselves. */
6646 for (i = 0; i < n_elts; i++)
6647 {
6648 rtx elem = XVECEXP (vals, 0, i);
6649 if (!general_operand (elem, GET_MODE (elem)))
6650 elem = force_reg (inner_mode, elem);
6651
6652 emit_insn (gen_rtx_SET (target,
6653 gen_rtx_UNSPEC (mode,
6654 gen_rtvec (3, elem,
6655 GEN_INT (i), target),
6656 UNSPEC_VEC_SET)));
6657 }
6658 }
6659
6660 /* Structure to hold the initial parameters for a compare_and_swap operation
6661 in HImode and QImode. */
6662
6663 struct alignment_context
6664 {
6665 rtx memsi; /* SI aligned memory location. */
6666 rtx shift; /* Bit offset with regard to lsb. */
6667 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6668 rtx modemaski; /* ~modemask */
6669 bool aligned; /* True if memory is aligned, false else. */
6670 };
6671
6672 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6673 structure AC for transparent simplifying, if the memory alignment is known
6674 to be at least 32bit. MEM is the memory location for the actual operation
6675 and MODE its mode. */
6676
6677 static void
6678 init_alignment_context (struct alignment_context *ac, rtx mem,
6679 machine_mode mode)
6680 {
6681 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6682 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6683
6684 if (ac->aligned)
6685 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6686 else
6687 {
6688 /* Alignment is unknown. */
6689 rtx byteoffset, addr, align;
6690
6691 /* Force the address into a register. */
6692 addr = force_reg (Pmode, XEXP (mem, 0));
6693
6694 /* Align it to SImode. */
6695 align = expand_simple_binop (Pmode, AND, addr,
6696 GEN_INT (-GET_MODE_SIZE (SImode)),
6697 NULL_RTX, 1, OPTAB_DIRECT);
6698 /* Generate MEM. */
6699 ac->memsi = gen_rtx_MEM (SImode, align);
6700 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6701 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6702 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6703
6704 /* Calculate shiftcount. */
6705 byteoffset = expand_simple_binop (Pmode, AND, addr,
6706 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6707 NULL_RTX, 1, OPTAB_DIRECT);
6708 /* As we already have some offset, evaluate the remaining distance. */
6709 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6710 NULL_RTX, 1, OPTAB_DIRECT);
6711 }
6712
6713 /* Shift is the byte count, but we need the bitcount. */
6714 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6715 NULL_RTX, 1, OPTAB_DIRECT);
6716
6717 /* Calculate masks. */
6718 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6719 GEN_INT (GET_MODE_MASK (mode)),
6720 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6721 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6722 NULL_RTX, 1);
6723 }
6724
6725 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6726 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6727 perform the merge in SEQ2. */
6728
6729 static rtx
6730 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6731 machine_mode mode, rtx val, rtx ins)
6732 {
6733 rtx tmp;
6734
6735 if (ac->aligned)
6736 {
6737 start_sequence ();
6738 tmp = copy_to_mode_reg (SImode, val);
6739 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6740 const0_rtx, ins))
6741 {
6742 *seq1 = NULL;
6743 *seq2 = get_insns ();
6744 end_sequence ();
6745 return tmp;
6746 }
6747 end_sequence ();
6748 }
6749
6750 /* Failed to use insv. Generate a two part shift and mask. */
6751 start_sequence ();
6752 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6753 *seq1 = get_insns ();
6754 end_sequence ();
6755
6756 start_sequence ();
6757 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6758 *seq2 = get_insns ();
6759 end_sequence ();
6760
6761 return tmp;
6762 }
6763
6764 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6765 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6766 value to set if CMP == MEM. */
6767
6768 void
6769 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6770 rtx cmp, rtx new_rtx, bool is_weak)
6771 {
6772 struct alignment_context ac;
6773 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6774 rtx res = gen_reg_rtx (SImode);
6775 rtx_code_label *csloop = NULL, *csend = NULL;
6776
6777 gcc_assert (MEM_P (mem));
6778
6779 init_alignment_context (&ac, mem, mode);
6780
6781 /* Load full word. Subsequent loads are performed by CS. */
6782 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6783 NULL_RTX, 1, OPTAB_DIRECT);
6784
6785 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6786 possible, we try to use insv to make this happen efficiently. If
6787 that fails we'll generate code both inside and outside the loop. */
6788 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6789 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6790
6791 if (seq0)
6792 emit_insn (seq0);
6793 if (seq1)
6794 emit_insn (seq1);
6795
6796 /* Start CS loop. */
6797 if (!is_weak)
6798 {
6799 /* Begin assuming success. */
6800 emit_move_insn (btarget, const1_rtx);
6801
6802 csloop = gen_label_rtx ();
6803 csend = gen_label_rtx ();
6804 emit_label (csloop);
6805 }
6806
6807 /* val = "<mem>00..0<mem>"
6808 * cmp = "00..0<cmp>00..0"
6809 * new = "00..0<new>00..0"
6810 */
6811
6812 emit_insn (seq2);
6813 emit_insn (seq3);
6814
6815 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
6816 if (is_weak)
6817 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6818 else
6819 {
6820 rtx tmp;
6821
6822 /* Jump to end if we're done (likely?). */
6823 s390_emit_jump (csend, cc);
6824
6825 /* Check for changes outside mode, and loop internal if so.
6826 Arrange the moves so that the compare is adjacent to the
6827 branch so that we can generate CRJ. */
6828 tmp = copy_to_reg (val);
6829 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6830 1, OPTAB_DIRECT);
6831 cc = s390_emit_compare (NE, val, tmp);
6832 s390_emit_jump (csloop, cc);
6833
6834 /* Failed. */
6835 emit_move_insn (btarget, const0_rtx);
6836 emit_label (csend);
6837 }
6838
6839 /* Return the correct part of the bitfield. */
6840 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6841 NULL_RTX, 1, OPTAB_DIRECT), 1);
6842 }
6843
6844 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
6845 and VAL the value to play with. If AFTER is true then store the value
6846 MEM holds after the operation, if AFTER is false then store the value MEM
6847 holds before the operation. If TARGET is zero then discard that value, else
6848 store it to TARGET. */
6849
6850 void
6851 s390_expand_atomic (machine_mode mode, enum rtx_code code,
6852 rtx target, rtx mem, rtx val, bool after)
6853 {
6854 struct alignment_context ac;
6855 rtx cmp;
6856 rtx new_rtx = gen_reg_rtx (SImode);
6857 rtx orig = gen_reg_rtx (SImode);
6858 rtx_code_label *csloop = gen_label_rtx ();
6859
6860 gcc_assert (!target || register_operand (target, VOIDmode));
6861 gcc_assert (MEM_P (mem));
6862
6863 init_alignment_context (&ac, mem, mode);
6864
6865 /* Shift val to the correct bit positions.
6866 Preserve "icm", but prevent "ex icm". */
6867 if (!(ac.aligned && code == SET && MEM_P (val)))
6868 val = s390_expand_mask_and_shift (val, mode, ac.shift);
6869
6870 /* Further preparation insns. */
6871 if (code == PLUS || code == MINUS)
6872 emit_move_insn (orig, val);
6873 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
6874 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
6875 NULL_RTX, 1, OPTAB_DIRECT);
6876
6877 /* Load full word. Subsequent loads are performed by CS. */
6878 cmp = force_reg (SImode, ac.memsi);
6879
6880 /* Start CS loop. */
6881 emit_label (csloop);
6882 emit_move_insn (new_rtx, cmp);
6883
6884 /* Patch new with val at correct position. */
6885 switch (code)
6886 {
6887 case PLUS:
6888 case MINUS:
6889 val = expand_simple_binop (SImode, code, new_rtx, orig,
6890 NULL_RTX, 1, OPTAB_DIRECT);
6891 val = expand_simple_binop (SImode, AND, val, ac.modemask,
6892 NULL_RTX, 1, OPTAB_DIRECT);
6893 /* FALLTHRU */
6894 case SET:
6895 if (ac.aligned && MEM_P (val))
6896 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
6897 0, 0, SImode, val, false);
6898 else
6899 {
6900 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
6901 NULL_RTX, 1, OPTAB_DIRECT);
6902 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
6903 NULL_RTX, 1, OPTAB_DIRECT);
6904 }
6905 break;
6906 case AND:
6907 case IOR:
6908 case XOR:
6909 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
6910 NULL_RTX, 1, OPTAB_DIRECT);
6911 break;
6912 case MULT: /* NAND */
6913 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
6914 NULL_RTX, 1, OPTAB_DIRECT);
6915 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
6916 NULL_RTX, 1, OPTAB_DIRECT);
6917 break;
6918 default:
6919 gcc_unreachable ();
6920 }
6921
6922 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
6923 ac.memsi, cmp, new_rtx));
6924
6925 /* Return the correct part of the bitfield. */
6926 if (target)
6927 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
6928 after ? new_rtx : cmp, ac.shift,
6929 NULL_RTX, 1, OPTAB_DIRECT), 1);
6930 }
6931
6932 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6933 We need to emit DTP-relative relocations. */
6934
6935 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6936
6937 static void
6938 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
6939 {
6940 switch (size)
6941 {
6942 case 4:
6943 fputs ("\t.long\t", file);
6944 break;
6945 case 8:
6946 fputs ("\t.quad\t", file);
6947 break;
6948 default:
6949 gcc_unreachable ();
6950 }
6951 output_addr_const (file, x);
6952 fputs ("@DTPOFF", file);
6953 }
6954
6955 /* Return the proper mode for REGNO being represented in the dwarf
6956 unwind table. */
6957 machine_mode
6958 s390_dwarf_frame_reg_mode (int regno)
6959 {
6960 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
6961
6962 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
6963 if (GENERAL_REGNO_P (regno))
6964 save_mode = Pmode;
6965
6966 /* The rightmost 64 bits of vector registers are call-clobbered. */
6967 if (GET_MODE_SIZE (save_mode) > 8)
6968 save_mode = DImode;
6969
6970 return save_mode;
6971 }
6972
6973 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
6974 /* Implement TARGET_MANGLE_TYPE. */
6975
6976 static const char *
6977 s390_mangle_type (const_tree type)
6978 {
6979 type = TYPE_MAIN_VARIANT (type);
6980
6981 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
6982 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
6983 return NULL;
6984
6985 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
6986 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
6987 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
6988 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
6989
6990 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
6991 && TARGET_LONG_DOUBLE_128)
6992 return "g";
6993
6994 /* For all other types, use normal C++ mangling. */
6995 return NULL;
6996 }
6997 #endif
6998
6999 /* In the name of slightly smaller debug output, and to cater to
7000 general assembler lossage, recognize various UNSPEC sequences
7001 and turn them back into a direct symbol reference. */
7002
7003 static rtx
7004 s390_delegitimize_address (rtx orig_x)
7005 {
7006 rtx x, y;
7007
7008 orig_x = delegitimize_mem_from_attrs (orig_x);
7009 x = orig_x;
7010
7011 /* Extract the symbol ref from:
7012 (plus:SI (reg:SI 12 %r12)
7013 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
7014 UNSPEC_GOTOFF/PLTOFF)))
7015 and
7016 (plus:SI (reg:SI 12 %r12)
7017 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
7018 UNSPEC_GOTOFF/PLTOFF)
7019 (const_int 4 [0x4])))) */
7020 if (GET_CODE (x) == PLUS
7021 && REG_P (XEXP (x, 0))
7022 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
7023 && GET_CODE (XEXP (x, 1)) == CONST)
7024 {
7025 HOST_WIDE_INT offset = 0;
7026
7027 /* The const operand. */
7028 y = XEXP (XEXP (x, 1), 0);
7029
7030 if (GET_CODE (y) == PLUS
7031 && GET_CODE (XEXP (y, 1)) == CONST_INT)
7032 {
7033 offset = INTVAL (XEXP (y, 1));
7034 y = XEXP (y, 0);
7035 }
7036
7037 if (GET_CODE (y) == UNSPEC
7038 && (XINT (y, 1) == UNSPEC_GOTOFF
7039 || XINT (y, 1) == UNSPEC_PLTOFF))
7040 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
7041 }
7042
7043 if (GET_CODE (x) != MEM)
7044 return orig_x;
7045
7046 x = XEXP (x, 0);
7047 if (GET_CODE (x) == PLUS
7048 && GET_CODE (XEXP (x, 1)) == CONST
7049 && GET_CODE (XEXP (x, 0)) == REG
7050 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7051 {
7052 y = XEXP (XEXP (x, 1), 0);
7053 if (GET_CODE (y) == UNSPEC
7054 && XINT (y, 1) == UNSPEC_GOT)
7055 y = XVECEXP (y, 0, 0);
7056 else
7057 return orig_x;
7058 }
7059 else if (GET_CODE (x) == CONST)
7060 {
7061 /* Extract the symbol ref from:
7062 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
7063 UNSPEC_PLT/GOTENT))) */
7064
7065 y = XEXP (x, 0);
7066 if (GET_CODE (y) == UNSPEC
7067 && (XINT (y, 1) == UNSPEC_GOTENT
7068 || XINT (y, 1) == UNSPEC_PLT))
7069 y = XVECEXP (y, 0, 0);
7070 else
7071 return orig_x;
7072 }
7073 else
7074 return orig_x;
7075
7076 if (GET_MODE (orig_x) != Pmode)
7077 {
7078 if (GET_MODE (orig_x) == BLKmode)
7079 return orig_x;
7080 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
7081 if (y == NULL_RTX)
7082 return orig_x;
7083 }
7084 return y;
7085 }
7086
7087 /* Output operand OP to stdio stream FILE.
7088 OP is an address (register + offset) which is not used to address data;
7089 instead the rightmost bits are interpreted as the value. */
7090
7091 static void
7092 print_addrstyle_operand (FILE *file, rtx op)
7093 {
7094 HOST_WIDE_INT offset;
7095 rtx base;
7096
7097 /* Extract base register and offset. */
7098 if (!s390_decompose_addrstyle_without_index (op, &base, &offset))
7099 gcc_unreachable ();
7100
7101 /* Sanity check. */
7102 if (base)
7103 {
7104 gcc_assert (GET_CODE (base) == REG);
7105 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
7106 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
7107 }
7108
7109 /* Offsets are constricted to twelve bits. */
7110 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
7111 if (base)
7112 fprintf (file, "(%s)", reg_names[REGNO (base)]);
7113 }
7114
7115 /* Assigns the number of NOP halfwords to be emitted before and after the
7116 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
7117 If hotpatching is disabled for the function, the values are set to zero.
7118 */
7119
7120 static void
7121 s390_function_num_hotpatch_hw (tree decl,
7122 int *hw_before,
7123 int *hw_after)
7124 {
7125 tree attr;
7126
7127 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
7128
7129 /* Handle the arguments of the hotpatch attribute. The values
7130 specified via attribute might override the cmdline argument
7131 values. */
7132 if (attr)
7133 {
7134 tree args = TREE_VALUE (attr);
7135
7136 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
7137 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
7138 }
7139 else
7140 {
7141 /* Use the values specified by the cmdline arguments. */
7142 *hw_before = s390_hotpatch_hw_before_label;
7143 *hw_after = s390_hotpatch_hw_after_label;
7144 }
7145 }
7146
7147 /* Write the current .machine and .machinemode specification to the assembler
7148 file. */
7149
7150 #ifdef HAVE_AS_MACHINE_MACHINEMODE
7151 static void
7152 s390_asm_output_machine_for_arch (FILE *asm_out_file)
7153 {
7154 fprintf (asm_out_file, "\t.machinemode %s\n",
7155 (TARGET_ZARCH) ? "zarch" : "esa");
7156 fprintf (asm_out_file, "\t.machine \"%s", processor_table[s390_arch].name);
7157 if (S390_USE_ARCHITECTURE_MODIFIERS)
7158 {
7159 int cpu_flags;
7160
7161 cpu_flags = processor_flags_table[(int) s390_arch];
7162 if (TARGET_HTM && !(cpu_flags & PF_TX))
7163 fprintf (asm_out_file, "+htm");
7164 else if (!TARGET_HTM && (cpu_flags & PF_TX))
7165 fprintf (asm_out_file, "+nohtm");
7166 if (TARGET_VX && !(cpu_flags & PF_VX))
7167 fprintf (asm_out_file, "+vx");
7168 else if (!TARGET_VX && (cpu_flags & PF_VX))
7169 fprintf (asm_out_file, "+novx");
7170 }
7171 fprintf (asm_out_file, "\"\n");
7172 }
7173
7174 /* Write an extra function header before the very start of the function. */
7175
7176 void
7177 s390_asm_output_function_prefix (FILE *asm_out_file,
7178 const char *fnname ATTRIBUTE_UNUSED)
7179 {
7180 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
7181 return;
7182 /* Since only the function specific options are saved but not the indications
7183 which options are set, it's too much work here to figure out which options
7184 have actually changed. Thus, generate .machine and .machinemode whenever a
7185 function has the target attribute or pragma. */
7186 fprintf (asm_out_file, "\t.machinemode push\n");
7187 fprintf (asm_out_file, "\t.machine push\n");
7188 s390_asm_output_machine_for_arch (asm_out_file);
7189 }
7190
7191 /* Write an extra function footer after the very end of the function. */
7192
7193 void
7194 s390_asm_declare_function_size (FILE *asm_out_file,
7195 const char *fnname, tree decl)
7196 {
7197 if (!flag_inhibit_size_directive)
7198 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
7199 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
7200 return;
7201 fprintf (asm_out_file, "\t.machine pop\n");
7202 fprintf (asm_out_file, "\t.machinemode pop\n");
7203 }
7204 #endif
7205
7206 /* Write the extra assembler code needed to declare a function properly. */
7207
7208 void
7209 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
7210 tree decl)
7211 {
7212 int hw_before, hw_after;
7213
7214 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
7215 if (hw_before > 0)
7216 {
7217 unsigned int function_alignment;
7218 int i;
7219
7220 /* Add a trampoline code area before the function label and initialize it
7221 with two-byte nop instructions. This area can be overwritten with code
7222 that jumps to a patched version of the function. */
7223 asm_fprintf (asm_out_file, "\tnopr\t%%r7"
7224 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
7225 hw_before);
7226 for (i = 1; i < hw_before; i++)
7227 fputs ("\tnopr\t%r7\n", asm_out_file);
7228
7229 /* Note: The function label must be aligned so that (a) the bytes of the
7230 following nop do not cross a cacheline boundary, and (b) a jump address
7231 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
7232 stored directly before the label without crossing a cacheline
7233 boundary. All this is necessary to make sure the trampoline code can
7234 be changed atomically.
7235 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
7236 if there are NOPs before the function label, the alignment is placed
7237 before them. So it is necessary to duplicate the alignment after the
7238 NOPs. */
7239 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
7240 if (! DECL_USER_ALIGN (decl))
7241 function_alignment = MAX (function_alignment,
7242 (unsigned int) align_functions);
7243 fputs ("\t# alignment for hotpatch\n", asm_out_file);
7244 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
7245 }
7246
7247 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
7248 {
7249 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
7250 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
7251 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
7252 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
7253 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
7254 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
7255 s390_warn_framesize);
7256 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
7257 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
7258 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
7259 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
7260 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
7261 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
7262 TARGET_PACKED_STACK);
7263 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
7264 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
7265 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
7266 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
7267 s390_warn_dynamicstack_p);
7268 }
7269 ASM_OUTPUT_LABEL (asm_out_file, fname);
7270 if (hw_after > 0)
7271 asm_fprintf (asm_out_file,
7272 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
7273 hw_after);
7274 }
7275
7276 /* Output machine-dependent UNSPECs occurring in address constant X
7277 in assembler syntax to stdio stream FILE. Returns true if the
7278 constant X could be recognized, false otherwise. */
7279
7280 static bool
7281 s390_output_addr_const_extra (FILE *file, rtx x)
7282 {
7283 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
7284 switch (XINT (x, 1))
7285 {
7286 case UNSPEC_GOTENT:
7287 output_addr_const (file, XVECEXP (x, 0, 0));
7288 fprintf (file, "@GOTENT");
7289 return true;
7290 case UNSPEC_GOT:
7291 output_addr_const (file, XVECEXP (x, 0, 0));
7292 fprintf (file, "@GOT");
7293 return true;
7294 case UNSPEC_GOTOFF:
7295 output_addr_const (file, XVECEXP (x, 0, 0));
7296 fprintf (file, "@GOTOFF");
7297 return true;
7298 case UNSPEC_PLT:
7299 output_addr_const (file, XVECEXP (x, 0, 0));
7300 fprintf (file, "@PLT");
7301 return true;
7302 case UNSPEC_PLTOFF:
7303 output_addr_const (file, XVECEXP (x, 0, 0));
7304 fprintf (file, "@PLTOFF");
7305 return true;
7306 case UNSPEC_TLSGD:
7307 output_addr_const (file, XVECEXP (x, 0, 0));
7308 fprintf (file, "@TLSGD");
7309 return true;
7310 case UNSPEC_TLSLDM:
7311 assemble_name (file, get_some_local_dynamic_name ());
7312 fprintf (file, "@TLSLDM");
7313 return true;
7314 case UNSPEC_DTPOFF:
7315 output_addr_const (file, XVECEXP (x, 0, 0));
7316 fprintf (file, "@DTPOFF");
7317 return true;
7318 case UNSPEC_NTPOFF:
7319 output_addr_const (file, XVECEXP (x, 0, 0));
7320 fprintf (file, "@NTPOFF");
7321 return true;
7322 case UNSPEC_GOTNTPOFF:
7323 output_addr_const (file, XVECEXP (x, 0, 0));
7324 fprintf (file, "@GOTNTPOFF");
7325 return true;
7326 case UNSPEC_INDNTPOFF:
7327 output_addr_const (file, XVECEXP (x, 0, 0));
7328 fprintf (file, "@INDNTPOFF");
7329 return true;
7330 }
7331
7332 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
7333 switch (XINT (x, 1))
7334 {
7335 case UNSPEC_POOL_OFFSET:
7336 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
7337 output_addr_const (file, x);
7338 return true;
7339 }
7340 return false;
7341 }
7342
7343 /* Output address operand ADDR in assembler syntax to
7344 stdio stream FILE. */
7345
7346 void
7347 print_operand_address (FILE *file, rtx addr)
7348 {
7349 struct s390_address ad;
7350
7351 if (s390_loadrelative_operand_p (addr, NULL, NULL))
7352 {
7353 if (!TARGET_Z10)
7354 {
7355 output_operand_lossage ("symbolic memory references are "
7356 "only supported on z10 or later");
7357 return;
7358 }
7359 output_addr_const (file, addr);
7360 return;
7361 }
7362
7363 if (!s390_decompose_address (addr, &ad)
7364 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7365 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7366 output_operand_lossage ("cannot decompose address");
7367
7368 if (ad.disp)
7369 output_addr_const (file, ad.disp);
7370 else
7371 fprintf (file, "0");
7372
7373 if (ad.base && ad.indx)
7374 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7375 reg_names[REGNO (ad.base)]);
7376 else if (ad.base)
7377 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7378 }
7379
7380 /* Output operand X in assembler syntax to stdio stream FILE.
7381 CODE specified the format flag. The following format flags
7382 are recognized:
7383
7384 'C': print opcode suffix for branch condition.
7385 'D': print opcode suffix for inverse branch condition.
7386 'E': print opcode suffix for branch on index instruction.
7387 'G': print the size of the operand in bytes.
7388 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7389 'M': print the second word of a TImode operand.
7390 'N': print the second word of a DImode operand.
7391 'O': print only the displacement of a memory reference or address.
7392 'R': print only the base register of a memory reference or address.
7393 'S': print S-type memory reference (base+displacement).
7394 'Y': print address style operand without index (e.g. shift count or setmem
7395 operand).
7396
7397 'b': print integer X as if it's an unsigned byte.
7398 'c': print integer X as if it's an signed byte.
7399 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7400 'f': "end" contiguous bitmask X in SImode.
7401 'h': print integer X as if it's a signed halfword.
7402 'i': print the first nonzero HImode part of X.
7403 'j': print the first HImode part unequal to -1 of X.
7404 'k': print the first nonzero SImode part of X.
7405 'm': print the first SImode part unequal to -1 of X.
7406 'o': print integer X as if it's an unsigned 32bit word.
7407 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7408 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7409 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7410 'x': print integer X as if it's an unsigned halfword.
7411 'v': print register number as vector register (v1 instead of f1).
7412 */
7413
7414 void
7415 print_operand (FILE *file, rtx x, int code)
7416 {
7417 HOST_WIDE_INT ival;
7418
7419 switch (code)
7420 {
7421 case 'C':
7422 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7423 return;
7424
7425 case 'D':
7426 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7427 return;
7428
7429 case 'E':
7430 if (GET_CODE (x) == LE)
7431 fprintf (file, "l");
7432 else if (GET_CODE (x) == GT)
7433 fprintf (file, "h");
7434 else
7435 output_operand_lossage ("invalid comparison operator "
7436 "for 'E' output modifier");
7437 return;
7438
7439 case 'J':
7440 if (GET_CODE (x) == SYMBOL_REF)
7441 {
7442 fprintf (file, "%s", ":tls_load:");
7443 output_addr_const (file, x);
7444 }
7445 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7446 {
7447 fprintf (file, "%s", ":tls_gdcall:");
7448 output_addr_const (file, XVECEXP (x, 0, 0));
7449 }
7450 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7451 {
7452 fprintf (file, "%s", ":tls_ldcall:");
7453 const char *name = get_some_local_dynamic_name ();
7454 gcc_assert (name);
7455 assemble_name (file, name);
7456 }
7457 else
7458 output_operand_lossage ("invalid reference for 'J' output modifier");
7459 return;
7460
7461 case 'G':
7462 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7463 return;
7464
7465 case 'O':
7466 {
7467 struct s390_address ad;
7468 int ret;
7469
7470 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7471
7472 if (!ret
7473 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7474 || ad.indx)
7475 {
7476 output_operand_lossage ("invalid address for 'O' output modifier");
7477 return;
7478 }
7479
7480 if (ad.disp)
7481 output_addr_const (file, ad.disp);
7482 else
7483 fprintf (file, "0");
7484 }
7485 return;
7486
7487 case 'R':
7488 {
7489 struct s390_address ad;
7490 int ret;
7491
7492 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7493
7494 if (!ret
7495 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7496 || ad.indx)
7497 {
7498 output_operand_lossage ("invalid address for 'R' output modifier");
7499 return;
7500 }
7501
7502 if (ad.base)
7503 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7504 else
7505 fprintf (file, "0");
7506 }
7507 return;
7508
7509 case 'S':
7510 {
7511 struct s390_address ad;
7512 int ret;
7513
7514 if (!MEM_P (x))
7515 {
7516 output_operand_lossage ("memory reference expected for "
7517 "'S' output modifier");
7518 return;
7519 }
7520 ret = s390_decompose_address (XEXP (x, 0), &ad);
7521
7522 if (!ret
7523 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7524 || ad.indx)
7525 {
7526 output_operand_lossage ("invalid address for 'S' output modifier");
7527 return;
7528 }
7529
7530 if (ad.disp)
7531 output_addr_const (file, ad.disp);
7532 else
7533 fprintf (file, "0");
7534
7535 if (ad.base)
7536 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7537 }
7538 return;
7539
7540 case 'N':
7541 if (GET_CODE (x) == REG)
7542 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7543 else if (GET_CODE (x) == MEM)
7544 x = change_address (x, VOIDmode,
7545 plus_constant (Pmode, XEXP (x, 0), 4));
7546 else
7547 output_operand_lossage ("register or memory expression expected "
7548 "for 'N' output modifier");
7549 break;
7550
7551 case 'M':
7552 if (GET_CODE (x) == REG)
7553 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7554 else if (GET_CODE (x) == MEM)
7555 x = change_address (x, VOIDmode,
7556 plus_constant (Pmode, XEXP (x, 0), 8));
7557 else
7558 output_operand_lossage ("register or memory expression expected "
7559 "for 'M' output modifier");
7560 break;
7561
7562 case 'Y':
7563 print_addrstyle_operand (file, x);
7564 return;
7565 }
7566
7567 switch (GET_CODE (x))
7568 {
7569 case REG:
7570 /* Print FP regs as fx instead of vx when they are accessed
7571 through non-vector mode. */
7572 if (code == 'v'
7573 || VECTOR_NOFP_REG_P (x)
7574 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7575 || (VECTOR_REG_P (x)
7576 && (GET_MODE_SIZE (GET_MODE (x)) /
7577 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7578 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7579 else
7580 fprintf (file, "%s", reg_names[REGNO (x)]);
7581 break;
7582
7583 case MEM:
7584 output_address (GET_MODE (x), XEXP (x, 0));
7585 break;
7586
7587 case CONST:
7588 case CODE_LABEL:
7589 case LABEL_REF:
7590 case SYMBOL_REF:
7591 output_addr_const (file, x);
7592 break;
7593
7594 case CONST_INT:
7595 ival = INTVAL (x);
7596 switch (code)
7597 {
7598 case 0:
7599 break;
7600 case 'b':
7601 ival &= 0xff;
7602 break;
7603 case 'c':
7604 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7605 break;
7606 case 'x':
7607 ival &= 0xffff;
7608 break;
7609 case 'h':
7610 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7611 break;
7612 case 'i':
7613 ival = s390_extract_part (x, HImode, 0);
7614 break;
7615 case 'j':
7616 ival = s390_extract_part (x, HImode, -1);
7617 break;
7618 case 'k':
7619 ival = s390_extract_part (x, SImode, 0);
7620 break;
7621 case 'm':
7622 ival = s390_extract_part (x, SImode, -1);
7623 break;
7624 case 'o':
7625 ival &= 0xffffffff;
7626 break;
7627 case 'e': case 'f':
7628 case 's': case 't':
7629 {
7630 int start, end;
7631 int len;
7632 bool ok;
7633
7634 len = (code == 's' || code == 'e' ? 64 : 32);
7635 ok = s390_contiguous_bitmask_p (ival, true, len, &start, &end);
7636 gcc_assert (ok);
7637 if (code == 's' || code == 't')
7638 ival = start;
7639 else
7640 ival = end;
7641 }
7642 break;
7643 default:
7644 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7645 }
7646 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7647 break;
7648
7649 case CONST_WIDE_INT:
7650 if (code == 'b')
7651 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7652 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7653 else if (code == 'x')
7654 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7655 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7656 else if (code == 'h')
7657 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7658 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7659 else
7660 {
7661 if (code == 0)
7662 output_operand_lossage ("invalid constant - try using "
7663 "an output modifier");
7664 else
7665 output_operand_lossage ("invalid constant for output modifier '%c'",
7666 code);
7667 }
7668 break;
7669 case CONST_VECTOR:
7670 switch (code)
7671 {
7672 case 'h':
7673 gcc_assert (const_vec_duplicate_p (x));
7674 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7675 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7676 break;
7677 case 'e':
7678 case 's':
7679 {
7680 int start, end;
7681 bool ok;
7682
7683 ok = s390_contiguous_bitmask_vector_p (x, &start, &end);
7684 gcc_assert (ok);
7685 ival = (code == 's') ? start : end;
7686 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7687 }
7688 break;
7689 case 't':
7690 {
7691 unsigned mask;
7692 bool ok = s390_bytemask_vector_p (x, &mask);
7693 gcc_assert (ok);
7694 fprintf (file, "%u", mask);
7695 }
7696 break;
7697
7698 default:
7699 output_operand_lossage ("invalid constant vector for output "
7700 "modifier '%c'", code);
7701 }
7702 break;
7703
7704 default:
7705 if (code == 0)
7706 output_operand_lossage ("invalid expression - try using "
7707 "an output modifier");
7708 else
7709 output_operand_lossage ("invalid expression for output "
7710 "modifier '%c'", code);
7711 break;
7712 }
7713 }
7714
7715 /* Target hook for assembling integer objects. We need to define it
7716 here to work a round a bug in some versions of GAS, which couldn't
7717 handle values smaller than INT_MIN when printed in decimal. */
7718
7719 static bool
7720 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7721 {
7722 if (size == 8 && aligned_p
7723 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7724 {
7725 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7726 INTVAL (x));
7727 return true;
7728 }
7729 return default_assemble_integer (x, size, aligned_p);
7730 }
7731
7732 /* Returns true if register REGNO is used for forming
7733 a memory address in expression X. */
7734
7735 static bool
7736 reg_used_in_mem_p (int regno, rtx x)
7737 {
7738 enum rtx_code code = GET_CODE (x);
7739 int i, j;
7740 const char *fmt;
7741
7742 if (code == MEM)
7743 {
7744 if (refers_to_regno_p (regno, XEXP (x, 0)))
7745 return true;
7746 }
7747 else if (code == SET
7748 && GET_CODE (SET_DEST (x)) == PC)
7749 {
7750 if (refers_to_regno_p (regno, SET_SRC (x)))
7751 return true;
7752 }
7753
7754 fmt = GET_RTX_FORMAT (code);
7755 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7756 {
7757 if (fmt[i] == 'e'
7758 && reg_used_in_mem_p (regno, XEXP (x, i)))
7759 return true;
7760
7761 else if (fmt[i] == 'E')
7762 for (j = 0; j < XVECLEN (x, i); j++)
7763 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7764 return true;
7765 }
7766 return false;
7767 }
7768
7769 /* Returns true if expression DEP_RTX sets an address register
7770 used by instruction INSN to address memory. */
7771
7772 static bool
7773 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7774 {
7775 rtx target, pat;
7776
7777 if (NONJUMP_INSN_P (dep_rtx))
7778 dep_rtx = PATTERN (dep_rtx);
7779
7780 if (GET_CODE (dep_rtx) == SET)
7781 {
7782 target = SET_DEST (dep_rtx);
7783 if (GET_CODE (target) == STRICT_LOW_PART)
7784 target = XEXP (target, 0);
7785 while (GET_CODE (target) == SUBREG)
7786 target = SUBREG_REG (target);
7787
7788 if (GET_CODE (target) == REG)
7789 {
7790 int regno = REGNO (target);
7791
7792 if (s390_safe_attr_type (insn) == TYPE_LA)
7793 {
7794 pat = PATTERN (insn);
7795 if (GET_CODE (pat) == PARALLEL)
7796 {
7797 gcc_assert (XVECLEN (pat, 0) == 2);
7798 pat = XVECEXP (pat, 0, 0);
7799 }
7800 gcc_assert (GET_CODE (pat) == SET);
7801 return refers_to_regno_p (regno, SET_SRC (pat));
7802 }
7803 else if (get_attr_atype (insn) == ATYPE_AGEN)
7804 return reg_used_in_mem_p (regno, PATTERN (insn));
7805 }
7806 }
7807 return false;
7808 }
7809
7810 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
7811
7812 int
7813 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
7814 {
7815 rtx dep_rtx = PATTERN (dep_insn);
7816 int i;
7817
7818 if (GET_CODE (dep_rtx) == SET
7819 && addr_generation_dependency_p (dep_rtx, insn))
7820 return 1;
7821 else if (GET_CODE (dep_rtx) == PARALLEL)
7822 {
7823 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
7824 {
7825 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
7826 return 1;
7827 }
7828 }
7829 return 0;
7830 }
7831
7832
7833 /* A C statement (sans semicolon) to update the integer scheduling priority
7834 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
7835 reduce the priority to execute INSN later. Do not define this macro if
7836 you do not need to adjust the scheduling priorities of insns.
7837
7838 A STD instruction should be scheduled earlier,
7839 in order to use the bypass. */
7840 static int
7841 s390_adjust_priority (rtx_insn *insn, int priority)
7842 {
7843 if (! INSN_P (insn))
7844 return priority;
7845
7846 if (s390_tune <= PROCESSOR_2064_Z900)
7847 return priority;
7848
7849 switch (s390_safe_attr_type (insn))
7850 {
7851 case TYPE_FSTOREDF:
7852 case TYPE_FSTORESF:
7853 priority = priority << 3;
7854 break;
7855 case TYPE_STORE:
7856 case TYPE_STM:
7857 priority = priority << 1;
7858 break;
7859 default:
7860 break;
7861 }
7862 return priority;
7863 }
7864
7865
7866 /* The number of instructions that can be issued per cycle. */
7867
7868 static int
7869 s390_issue_rate (void)
7870 {
7871 switch (s390_tune)
7872 {
7873 case PROCESSOR_2084_Z990:
7874 case PROCESSOR_2094_Z9_109:
7875 case PROCESSOR_2094_Z9_EC:
7876 case PROCESSOR_2817_Z196:
7877 return 3;
7878 case PROCESSOR_2097_Z10:
7879 return 2;
7880 case PROCESSOR_9672_G5:
7881 case PROCESSOR_9672_G6:
7882 case PROCESSOR_2064_Z900:
7883 /* Starting with EC12 we use the sched_reorder hook to take care
7884 of instruction dispatch constraints. The algorithm only
7885 picks the best instruction and assumes only a single
7886 instruction gets issued per cycle. */
7887 case PROCESSOR_2827_ZEC12:
7888 case PROCESSOR_2964_Z13:
7889 default:
7890 return 1;
7891 }
7892 }
7893
7894 static int
7895 s390_first_cycle_multipass_dfa_lookahead (void)
7896 {
7897 return 4;
7898 }
7899
7900 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
7901 Fix up MEMs as required. */
7902
7903 static void
7904 annotate_constant_pool_refs (rtx *x)
7905 {
7906 int i, j;
7907 const char *fmt;
7908
7909 gcc_assert (GET_CODE (*x) != SYMBOL_REF
7910 || !CONSTANT_POOL_ADDRESS_P (*x));
7911
7912 /* Literal pool references can only occur inside a MEM ... */
7913 if (GET_CODE (*x) == MEM)
7914 {
7915 rtx memref = XEXP (*x, 0);
7916
7917 if (GET_CODE (memref) == SYMBOL_REF
7918 && CONSTANT_POOL_ADDRESS_P (memref))
7919 {
7920 rtx base = cfun->machine->base_reg;
7921 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
7922 UNSPEC_LTREF);
7923
7924 *x = replace_equiv_address (*x, addr);
7925 return;
7926 }
7927
7928 if (GET_CODE (memref) == CONST
7929 && GET_CODE (XEXP (memref, 0)) == PLUS
7930 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
7931 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
7932 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
7933 {
7934 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
7935 rtx sym = XEXP (XEXP (memref, 0), 0);
7936 rtx base = cfun->machine->base_reg;
7937 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7938 UNSPEC_LTREF);
7939
7940 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
7941 return;
7942 }
7943 }
7944
7945 /* ... or a load-address type pattern. */
7946 if (GET_CODE (*x) == SET)
7947 {
7948 rtx addrref = SET_SRC (*x);
7949
7950 if (GET_CODE (addrref) == SYMBOL_REF
7951 && CONSTANT_POOL_ADDRESS_P (addrref))
7952 {
7953 rtx base = cfun->machine->base_reg;
7954 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
7955 UNSPEC_LTREF);
7956
7957 SET_SRC (*x) = addr;
7958 return;
7959 }
7960
7961 if (GET_CODE (addrref) == CONST
7962 && GET_CODE (XEXP (addrref, 0)) == PLUS
7963 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
7964 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
7965 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
7966 {
7967 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
7968 rtx sym = XEXP (XEXP (addrref, 0), 0);
7969 rtx base = cfun->machine->base_reg;
7970 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7971 UNSPEC_LTREF);
7972
7973 SET_SRC (*x) = plus_constant (Pmode, addr, off);
7974 return;
7975 }
7976 }
7977
7978 /* Annotate LTREL_BASE as well. */
7979 if (GET_CODE (*x) == UNSPEC
7980 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7981 {
7982 rtx base = cfun->machine->base_reg;
7983 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
7984 UNSPEC_LTREL_BASE);
7985 return;
7986 }
7987
7988 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7989 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7990 {
7991 if (fmt[i] == 'e')
7992 {
7993 annotate_constant_pool_refs (&XEXP (*x, i));
7994 }
7995 else if (fmt[i] == 'E')
7996 {
7997 for (j = 0; j < XVECLEN (*x, i); j++)
7998 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
7999 }
8000 }
8001 }
8002
8003 /* Split all branches that exceed the maximum distance.
8004 Returns true if this created a new literal pool entry. */
8005
8006 static int
8007 s390_split_branches (void)
8008 {
8009 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8010 int new_literal = 0, ret;
8011 rtx_insn *insn;
8012 rtx pat, target;
8013 rtx *label;
8014
8015 /* We need correct insn addresses. */
8016
8017 shorten_branches (get_insns ());
8018
8019 /* Find all branches that exceed 64KB, and split them. */
8020
8021 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8022 {
8023 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
8024 continue;
8025
8026 pat = PATTERN (insn);
8027 if (GET_CODE (pat) == PARALLEL)
8028 pat = XVECEXP (pat, 0, 0);
8029 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
8030 continue;
8031
8032 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
8033 {
8034 label = &SET_SRC (pat);
8035 }
8036 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
8037 {
8038 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
8039 label = &XEXP (SET_SRC (pat), 1);
8040 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
8041 label = &XEXP (SET_SRC (pat), 2);
8042 else
8043 continue;
8044 }
8045 else
8046 continue;
8047
8048 if (get_attr_length (insn) <= 4)
8049 continue;
8050
8051 /* We are going to use the return register as scratch register,
8052 make sure it will be saved/restored by the prologue/epilogue. */
8053 cfun_frame_layout.save_return_addr_p = 1;
8054
8055 if (!flag_pic)
8056 {
8057 new_literal = 1;
8058 rtx mem = force_const_mem (Pmode, *label);
8059 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
8060 insn);
8061 INSN_ADDRESSES_NEW (set_insn, -1);
8062 annotate_constant_pool_refs (&PATTERN (set_insn));
8063
8064 target = temp_reg;
8065 }
8066 else
8067 {
8068 new_literal = 1;
8069 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
8070 UNSPEC_LTREL_OFFSET);
8071 target = gen_rtx_CONST (Pmode, target);
8072 target = force_const_mem (Pmode, target);
8073 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
8074 insn);
8075 INSN_ADDRESSES_NEW (set_insn, -1);
8076 annotate_constant_pool_refs (&PATTERN (set_insn));
8077
8078 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
8079 cfun->machine->base_reg),
8080 UNSPEC_LTREL_BASE);
8081 target = gen_rtx_PLUS (Pmode, temp_reg, target);
8082 }
8083
8084 ret = validate_change (insn, label, target, 0);
8085 gcc_assert (ret);
8086 }
8087
8088 return new_literal;
8089 }
8090
8091
8092 /* Find an annotated literal pool symbol referenced in RTX X,
8093 and store it at REF. Will abort if X contains references to
8094 more than one such pool symbol; multiple references to the same
8095 symbol are allowed, however.
8096
8097 The rtx pointed to by REF must be initialized to NULL_RTX
8098 by the caller before calling this routine. */
8099
8100 static void
8101 find_constant_pool_ref (rtx x, rtx *ref)
8102 {
8103 int i, j;
8104 const char *fmt;
8105
8106 /* Ignore LTREL_BASE references. */
8107 if (GET_CODE (x) == UNSPEC
8108 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8109 return;
8110 /* Likewise POOL_ENTRY insns. */
8111 if (GET_CODE (x) == UNSPEC_VOLATILE
8112 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
8113 return;
8114
8115 gcc_assert (GET_CODE (x) != SYMBOL_REF
8116 || !CONSTANT_POOL_ADDRESS_P (x));
8117
8118 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
8119 {
8120 rtx sym = XVECEXP (x, 0, 0);
8121 gcc_assert (GET_CODE (sym) == SYMBOL_REF
8122 && CONSTANT_POOL_ADDRESS_P (sym));
8123
8124 if (*ref == NULL_RTX)
8125 *ref = sym;
8126 else
8127 gcc_assert (*ref == sym);
8128
8129 return;
8130 }
8131
8132 fmt = GET_RTX_FORMAT (GET_CODE (x));
8133 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8134 {
8135 if (fmt[i] == 'e')
8136 {
8137 find_constant_pool_ref (XEXP (x, i), ref);
8138 }
8139 else if (fmt[i] == 'E')
8140 {
8141 for (j = 0; j < XVECLEN (x, i); j++)
8142 find_constant_pool_ref (XVECEXP (x, i, j), ref);
8143 }
8144 }
8145 }
8146
8147 /* Replace every reference to the annotated literal pool
8148 symbol REF in X by its base plus OFFSET. */
8149
8150 static void
8151 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
8152 {
8153 int i, j;
8154 const char *fmt;
8155
8156 gcc_assert (*x != ref);
8157
8158 if (GET_CODE (*x) == UNSPEC
8159 && XINT (*x, 1) == UNSPEC_LTREF
8160 && XVECEXP (*x, 0, 0) == ref)
8161 {
8162 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
8163 return;
8164 }
8165
8166 if (GET_CODE (*x) == PLUS
8167 && GET_CODE (XEXP (*x, 1)) == CONST_INT
8168 && GET_CODE (XEXP (*x, 0)) == UNSPEC
8169 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
8170 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
8171 {
8172 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
8173 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
8174 return;
8175 }
8176
8177 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8178 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8179 {
8180 if (fmt[i] == 'e')
8181 {
8182 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
8183 }
8184 else if (fmt[i] == 'E')
8185 {
8186 for (j = 0; j < XVECLEN (*x, i); j++)
8187 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
8188 }
8189 }
8190 }
8191
8192 /* Check whether X contains an UNSPEC_LTREL_BASE.
8193 Return its constant pool symbol if found, NULL_RTX otherwise. */
8194
8195 static rtx
8196 find_ltrel_base (rtx x)
8197 {
8198 int i, j;
8199 const char *fmt;
8200
8201 if (GET_CODE (x) == UNSPEC
8202 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8203 return XVECEXP (x, 0, 0);
8204
8205 fmt = GET_RTX_FORMAT (GET_CODE (x));
8206 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8207 {
8208 if (fmt[i] == 'e')
8209 {
8210 rtx fnd = find_ltrel_base (XEXP (x, i));
8211 if (fnd)
8212 return fnd;
8213 }
8214 else if (fmt[i] == 'E')
8215 {
8216 for (j = 0; j < XVECLEN (x, i); j++)
8217 {
8218 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
8219 if (fnd)
8220 return fnd;
8221 }
8222 }
8223 }
8224
8225 return NULL_RTX;
8226 }
8227
8228 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
8229
8230 static void
8231 replace_ltrel_base (rtx *x)
8232 {
8233 int i, j;
8234 const char *fmt;
8235
8236 if (GET_CODE (*x) == UNSPEC
8237 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8238 {
8239 *x = XVECEXP (*x, 0, 1);
8240 return;
8241 }
8242
8243 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8244 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8245 {
8246 if (fmt[i] == 'e')
8247 {
8248 replace_ltrel_base (&XEXP (*x, i));
8249 }
8250 else if (fmt[i] == 'E')
8251 {
8252 for (j = 0; j < XVECLEN (*x, i); j++)
8253 replace_ltrel_base (&XVECEXP (*x, i, j));
8254 }
8255 }
8256 }
8257
8258
8259 /* We keep a list of constants which we have to add to internal
8260 constant tables in the middle of large functions. */
8261
8262 #define NR_C_MODES 32
8263 machine_mode constant_modes[NR_C_MODES] =
8264 {
8265 TFmode, TImode, TDmode,
8266 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
8267 V4SFmode, V2DFmode, V1TFmode,
8268 DFmode, DImode, DDmode,
8269 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
8270 SFmode, SImode, SDmode,
8271 V4QImode, V2HImode, V1SImode, V1SFmode,
8272 HImode,
8273 V2QImode, V1HImode,
8274 QImode,
8275 V1QImode
8276 };
8277
8278 struct constant
8279 {
8280 struct constant *next;
8281 rtx value;
8282 rtx_code_label *label;
8283 };
8284
8285 struct constant_pool
8286 {
8287 struct constant_pool *next;
8288 rtx_insn *first_insn;
8289 rtx_insn *pool_insn;
8290 bitmap insns;
8291 rtx_insn *emit_pool_after;
8292
8293 struct constant *constants[NR_C_MODES];
8294 struct constant *execute;
8295 rtx_code_label *label;
8296 int size;
8297 };
8298
8299 /* Allocate new constant_pool structure. */
8300
8301 static struct constant_pool *
8302 s390_alloc_pool (void)
8303 {
8304 struct constant_pool *pool;
8305 int i;
8306
8307 pool = (struct constant_pool *) xmalloc (sizeof *pool);
8308 pool->next = NULL;
8309 for (i = 0; i < NR_C_MODES; i++)
8310 pool->constants[i] = NULL;
8311
8312 pool->execute = NULL;
8313 pool->label = gen_label_rtx ();
8314 pool->first_insn = NULL;
8315 pool->pool_insn = NULL;
8316 pool->insns = BITMAP_ALLOC (NULL);
8317 pool->size = 0;
8318 pool->emit_pool_after = NULL;
8319
8320 return pool;
8321 }
8322
8323 /* Create new constant pool covering instructions starting at INSN
8324 and chain it to the end of POOL_LIST. */
8325
8326 static struct constant_pool *
8327 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
8328 {
8329 struct constant_pool *pool, **prev;
8330
8331 pool = s390_alloc_pool ();
8332 pool->first_insn = insn;
8333
8334 for (prev = pool_list; *prev; prev = &(*prev)->next)
8335 ;
8336 *prev = pool;
8337
8338 return pool;
8339 }
8340
8341 /* End range of instructions covered by POOL at INSN and emit
8342 placeholder insn representing the pool. */
8343
8344 static void
8345 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
8346 {
8347 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
8348
8349 if (!insn)
8350 insn = get_last_insn ();
8351
8352 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
8353 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8354 }
8355
8356 /* Add INSN to the list of insns covered by POOL. */
8357
8358 static void
8359 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8360 {
8361 bitmap_set_bit (pool->insns, INSN_UID (insn));
8362 }
8363
8364 /* Return pool out of POOL_LIST that covers INSN. */
8365
8366 static struct constant_pool *
8367 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8368 {
8369 struct constant_pool *pool;
8370
8371 for (pool = pool_list; pool; pool = pool->next)
8372 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8373 break;
8374
8375 return pool;
8376 }
8377
8378 /* Add constant VAL of mode MODE to the constant pool POOL. */
8379
8380 static void
8381 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8382 {
8383 struct constant *c;
8384 int i;
8385
8386 for (i = 0; i < NR_C_MODES; i++)
8387 if (constant_modes[i] == mode)
8388 break;
8389 gcc_assert (i != NR_C_MODES);
8390
8391 for (c = pool->constants[i]; c != NULL; c = c->next)
8392 if (rtx_equal_p (val, c->value))
8393 break;
8394
8395 if (c == NULL)
8396 {
8397 c = (struct constant *) xmalloc (sizeof *c);
8398 c->value = val;
8399 c->label = gen_label_rtx ();
8400 c->next = pool->constants[i];
8401 pool->constants[i] = c;
8402 pool->size += GET_MODE_SIZE (mode);
8403 }
8404 }
8405
8406 /* Return an rtx that represents the offset of X from the start of
8407 pool POOL. */
8408
8409 static rtx
8410 s390_pool_offset (struct constant_pool *pool, rtx x)
8411 {
8412 rtx label;
8413
8414 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8415 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8416 UNSPEC_POOL_OFFSET);
8417 return gen_rtx_CONST (GET_MODE (x), x);
8418 }
8419
8420 /* Find constant VAL of mode MODE in the constant pool POOL.
8421 Return an RTX describing the distance from the start of
8422 the pool to the location of the new constant. */
8423
8424 static rtx
8425 s390_find_constant (struct constant_pool *pool, rtx val,
8426 machine_mode mode)
8427 {
8428 struct constant *c;
8429 int i;
8430
8431 for (i = 0; i < NR_C_MODES; i++)
8432 if (constant_modes[i] == mode)
8433 break;
8434 gcc_assert (i != NR_C_MODES);
8435
8436 for (c = pool->constants[i]; c != NULL; c = c->next)
8437 if (rtx_equal_p (val, c->value))
8438 break;
8439
8440 gcc_assert (c);
8441
8442 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8443 }
8444
8445 /* Check whether INSN is an execute. Return the label_ref to its
8446 execute target template if so, NULL_RTX otherwise. */
8447
8448 static rtx
8449 s390_execute_label (rtx insn)
8450 {
8451 if (NONJUMP_INSN_P (insn)
8452 && GET_CODE (PATTERN (insn)) == PARALLEL
8453 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8454 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8455 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8456
8457 return NULL_RTX;
8458 }
8459
8460 /* Add execute target for INSN to the constant pool POOL. */
8461
8462 static void
8463 s390_add_execute (struct constant_pool *pool, rtx insn)
8464 {
8465 struct constant *c;
8466
8467 for (c = pool->execute; c != NULL; c = c->next)
8468 if (INSN_UID (insn) == INSN_UID (c->value))
8469 break;
8470
8471 if (c == NULL)
8472 {
8473 c = (struct constant *) xmalloc (sizeof *c);
8474 c->value = insn;
8475 c->label = gen_label_rtx ();
8476 c->next = pool->execute;
8477 pool->execute = c;
8478 pool->size += 6;
8479 }
8480 }
8481
8482 /* Find execute target for INSN in the constant pool POOL.
8483 Return an RTX describing the distance from the start of
8484 the pool to the location of the execute target. */
8485
8486 static rtx
8487 s390_find_execute (struct constant_pool *pool, rtx insn)
8488 {
8489 struct constant *c;
8490
8491 for (c = pool->execute; c != NULL; c = c->next)
8492 if (INSN_UID (insn) == INSN_UID (c->value))
8493 break;
8494
8495 gcc_assert (c);
8496
8497 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8498 }
8499
8500 /* For an execute INSN, extract the execute target template. */
8501
8502 static rtx
8503 s390_execute_target (rtx insn)
8504 {
8505 rtx pattern = PATTERN (insn);
8506 gcc_assert (s390_execute_label (insn));
8507
8508 if (XVECLEN (pattern, 0) == 2)
8509 {
8510 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8511 }
8512 else
8513 {
8514 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8515 int i;
8516
8517 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8518 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8519
8520 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8521 }
8522
8523 return pattern;
8524 }
8525
8526 /* Indicate that INSN cannot be duplicated. This is the case for
8527 execute insns that carry a unique label. */
8528
8529 static bool
8530 s390_cannot_copy_insn_p (rtx_insn *insn)
8531 {
8532 rtx label = s390_execute_label (insn);
8533 return label && label != const0_rtx;
8534 }
8535
8536 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8537 do not emit the pool base label. */
8538
8539 static void
8540 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8541 {
8542 struct constant *c;
8543 rtx_insn *insn = pool->pool_insn;
8544 int i;
8545
8546 /* Switch to rodata section. */
8547 if (TARGET_CPU_ZARCH)
8548 {
8549 insn = emit_insn_after (gen_pool_section_start (), insn);
8550 INSN_ADDRESSES_NEW (insn, -1);
8551 }
8552
8553 /* Ensure minimum pool alignment. */
8554 if (TARGET_CPU_ZARCH)
8555 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8556 else
8557 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8558 INSN_ADDRESSES_NEW (insn, -1);
8559
8560 /* Emit pool base label. */
8561 if (!remote_label)
8562 {
8563 insn = emit_label_after (pool->label, insn);
8564 INSN_ADDRESSES_NEW (insn, -1);
8565 }
8566
8567 /* Dump constants in descending alignment requirement order,
8568 ensuring proper alignment for every constant. */
8569 for (i = 0; i < NR_C_MODES; i++)
8570 for (c = pool->constants[i]; c; c = c->next)
8571 {
8572 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8573 rtx value = copy_rtx (c->value);
8574 if (GET_CODE (value) == CONST
8575 && GET_CODE (XEXP (value, 0)) == UNSPEC
8576 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8577 && XVECLEN (XEXP (value, 0), 0) == 1)
8578 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8579
8580 insn = emit_label_after (c->label, insn);
8581 INSN_ADDRESSES_NEW (insn, -1);
8582
8583 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8584 gen_rtvec (1, value),
8585 UNSPECV_POOL_ENTRY);
8586 insn = emit_insn_after (value, insn);
8587 INSN_ADDRESSES_NEW (insn, -1);
8588 }
8589
8590 /* Ensure minimum alignment for instructions. */
8591 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8592 INSN_ADDRESSES_NEW (insn, -1);
8593
8594 /* Output in-pool execute template insns. */
8595 for (c = pool->execute; c; c = c->next)
8596 {
8597 insn = emit_label_after (c->label, insn);
8598 INSN_ADDRESSES_NEW (insn, -1);
8599
8600 insn = emit_insn_after (s390_execute_target (c->value), insn);
8601 INSN_ADDRESSES_NEW (insn, -1);
8602 }
8603
8604 /* Switch back to previous section. */
8605 if (TARGET_CPU_ZARCH)
8606 {
8607 insn = emit_insn_after (gen_pool_section_end (), insn);
8608 INSN_ADDRESSES_NEW (insn, -1);
8609 }
8610
8611 insn = emit_barrier_after (insn);
8612 INSN_ADDRESSES_NEW (insn, -1);
8613
8614 /* Remove placeholder insn. */
8615 remove_insn (pool->pool_insn);
8616 }
8617
8618 /* Free all memory used by POOL. */
8619
8620 static void
8621 s390_free_pool (struct constant_pool *pool)
8622 {
8623 struct constant *c, *next;
8624 int i;
8625
8626 for (i = 0; i < NR_C_MODES; i++)
8627 for (c = pool->constants[i]; c; c = next)
8628 {
8629 next = c->next;
8630 free (c);
8631 }
8632
8633 for (c = pool->execute; c; c = next)
8634 {
8635 next = c->next;
8636 free (c);
8637 }
8638
8639 BITMAP_FREE (pool->insns);
8640 free (pool);
8641 }
8642
8643
8644 /* Collect main literal pool. Return NULL on overflow. */
8645
8646 static struct constant_pool *
8647 s390_mainpool_start (void)
8648 {
8649 struct constant_pool *pool;
8650 rtx_insn *insn;
8651
8652 pool = s390_alloc_pool ();
8653
8654 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8655 {
8656 if (NONJUMP_INSN_P (insn)
8657 && GET_CODE (PATTERN (insn)) == SET
8658 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8659 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8660 {
8661 /* There might be two main_pool instructions if base_reg
8662 is call-clobbered; one for shrink-wrapped code and one
8663 for the rest. We want to keep the first. */
8664 if (pool->pool_insn)
8665 {
8666 insn = PREV_INSN (insn);
8667 delete_insn (NEXT_INSN (insn));
8668 continue;
8669 }
8670 pool->pool_insn = insn;
8671 }
8672
8673 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8674 {
8675 s390_add_execute (pool, insn);
8676 }
8677 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8678 {
8679 rtx pool_ref = NULL_RTX;
8680 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8681 if (pool_ref)
8682 {
8683 rtx constant = get_pool_constant (pool_ref);
8684 machine_mode mode = get_pool_mode (pool_ref);
8685 s390_add_constant (pool, constant, mode);
8686 }
8687 }
8688
8689 /* If hot/cold partitioning is enabled we have to make sure that
8690 the literal pool is emitted in the same section where the
8691 initialization of the literal pool base pointer takes place.
8692 emit_pool_after is only used in the non-overflow case on non
8693 Z cpus where we can emit the literal pool at the end of the
8694 function body within the text section. */
8695 if (NOTE_P (insn)
8696 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8697 && !pool->emit_pool_after)
8698 pool->emit_pool_after = PREV_INSN (insn);
8699 }
8700
8701 gcc_assert (pool->pool_insn || pool->size == 0);
8702
8703 if (pool->size >= 4096)
8704 {
8705 /* We're going to chunkify the pool, so remove the main
8706 pool placeholder insn. */
8707 remove_insn (pool->pool_insn);
8708
8709 s390_free_pool (pool);
8710 pool = NULL;
8711 }
8712
8713 /* If the functions ends with the section where the literal pool
8714 should be emitted set the marker to its end. */
8715 if (pool && !pool->emit_pool_after)
8716 pool->emit_pool_after = get_last_insn ();
8717
8718 return pool;
8719 }
8720
8721 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8722 Modify the current function to output the pool constants as well as
8723 the pool register setup instruction. */
8724
8725 static void
8726 s390_mainpool_finish (struct constant_pool *pool)
8727 {
8728 rtx base_reg = cfun->machine->base_reg;
8729
8730 /* If the pool is empty, we're done. */
8731 if (pool->size == 0)
8732 {
8733 /* We don't actually need a base register after all. */
8734 cfun->machine->base_reg = NULL_RTX;
8735
8736 if (pool->pool_insn)
8737 remove_insn (pool->pool_insn);
8738 s390_free_pool (pool);
8739 return;
8740 }
8741
8742 /* We need correct insn addresses. */
8743 shorten_branches (get_insns ());
8744
8745 /* On zSeries, we use a LARL to load the pool register. The pool is
8746 located in the .rodata section, so we emit it after the function. */
8747 if (TARGET_CPU_ZARCH)
8748 {
8749 rtx set = gen_main_base_64 (base_reg, pool->label);
8750 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8751 INSN_ADDRESSES_NEW (insn, -1);
8752 remove_insn (pool->pool_insn);
8753
8754 insn = get_last_insn ();
8755 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8756 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8757
8758 s390_dump_pool (pool, 0);
8759 }
8760
8761 /* On S/390, if the total size of the function's code plus literal pool
8762 does not exceed 4096 bytes, we use BASR to set up a function base
8763 pointer, and emit the literal pool at the end of the function. */
8764 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8765 + pool->size + 8 /* alignment slop */ < 4096)
8766 {
8767 rtx set = gen_main_base_31_small (base_reg, pool->label);
8768 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8769 INSN_ADDRESSES_NEW (insn, -1);
8770 remove_insn (pool->pool_insn);
8771
8772 insn = emit_label_after (pool->label, insn);
8773 INSN_ADDRESSES_NEW (insn, -1);
8774
8775 /* emit_pool_after will be set by s390_mainpool_start to the
8776 last insn of the section where the literal pool should be
8777 emitted. */
8778 insn = pool->emit_pool_after;
8779
8780 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8781 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8782
8783 s390_dump_pool (pool, 1);
8784 }
8785
8786 /* Otherwise, we emit an inline literal pool and use BASR to branch
8787 over it, setting up the pool register at the same time. */
8788 else
8789 {
8790 rtx_code_label *pool_end = gen_label_rtx ();
8791
8792 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
8793 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
8794 JUMP_LABEL (insn) = pool_end;
8795 INSN_ADDRESSES_NEW (insn, -1);
8796 remove_insn (pool->pool_insn);
8797
8798 insn = emit_label_after (pool->label, insn);
8799 INSN_ADDRESSES_NEW (insn, -1);
8800
8801 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8802 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8803
8804 insn = emit_label_after (pool_end, pool->pool_insn);
8805 INSN_ADDRESSES_NEW (insn, -1);
8806
8807 s390_dump_pool (pool, 1);
8808 }
8809
8810
8811 /* Replace all literal pool references. */
8812
8813 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
8814 {
8815 if (INSN_P (insn))
8816 replace_ltrel_base (&PATTERN (insn));
8817
8818 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8819 {
8820 rtx addr, pool_ref = NULL_RTX;
8821 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8822 if (pool_ref)
8823 {
8824 if (s390_execute_label (insn))
8825 addr = s390_find_execute (pool, insn);
8826 else
8827 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
8828 get_pool_mode (pool_ref));
8829
8830 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8831 INSN_CODE (insn) = -1;
8832 }
8833 }
8834 }
8835
8836
8837 /* Free the pool. */
8838 s390_free_pool (pool);
8839 }
8840
8841 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8842 We have decided we cannot use this pool, so revert all changes
8843 to the current function that were done by s390_mainpool_start. */
8844 static void
8845 s390_mainpool_cancel (struct constant_pool *pool)
8846 {
8847 /* We didn't actually change the instruction stream, so simply
8848 free the pool memory. */
8849 s390_free_pool (pool);
8850 }
8851
8852
8853 /* Chunkify the literal pool. */
8854
8855 #define S390_POOL_CHUNK_MIN 0xc00
8856 #define S390_POOL_CHUNK_MAX 0xe00
8857
8858 static struct constant_pool *
8859 s390_chunkify_start (void)
8860 {
8861 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
8862 int extra_size = 0;
8863 bitmap far_labels;
8864 rtx pending_ltrel = NULL_RTX;
8865 rtx_insn *insn;
8866
8867 rtx (*gen_reload_base) (rtx, rtx) =
8868 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
8869
8870
8871 /* We need correct insn addresses. */
8872
8873 shorten_branches (get_insns ());
8874
8875 /* Scan all insns and move literals to pool chunks. */
8876
8877 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8878 {
8879 bool section_switch_p = false;
8880
8881 /* Check for pending LTREL_BASE. */
8882 if (INSN_P (insn))
8883 {
8884 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
8885 if (ltrel_base)
8886 {
8887 gcc_assert (ltrel_base == pending_ltrel);
8888 pending_ltrel = NULL_RTX;
8889 }
8890 }
8891
8892 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8893 {
8894 if (!curr_pool)
8895 curr_pool = s390_start_pool (&pool_list, insn);
8896
8897 s390_add_execute (curr_pool, insn);
8898 s390_add_pool_insn (curr_pool, insn);
8899 }
8900 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8901 {
8902 rtx pool_ref = NULL_RTX;
8903 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8904 if (pool_ref)
8905 {
8906 rtx constant = get_pool_constant (pool_ref);
8907 machine_mode mode = get_pool_mode (pool_ref);
8908
8909 if (!curr_pool)
8910 curr_pool = s390_start_pool (&pool_list, insn);
8911
8912 s390_add_constant (curr_pool, constant, mode);
8913 s390_add_pool_insn (curr_pool, insn);
8914
8915 /* Don't split the pool chunk between a LTREL_OFFSET load
8916 and the corresponding LTREL_BASE. */
8917 if (GET_CODE (constant) == CONST
8918 && GET_CODE (XEXP (constant, 0)) == UNSPEC
8919 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
8920 {
8921 gcc_assert (!pending_ltrel);
8922 pending_ltrel = pool_ref;
8923 }
8924 }
8925 }
8926
8927 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
8928 {
8929 if (curr_pool)
8930 s390_add_pool_insn (curr_pool, insn);
8931 /* An LTREL_BASE must follow within the same basic block. */
8932 gcc_assert (!pending_ltrel);
8933 }
8934
8935 if (NOTE_P (insn))
8936 switch (NOTE_KIND (insn))
8937 {
8938 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
8939 section_switch_p = true;
8940 break;
8941 case NOTE_INSN_VAR_LOCATION:
8942 case NOTE_INSN_CALL_ARG_LOCATION:
8943 continue;
8944 default:
8945 break;
8946 }
8947
8948 if (!curr_pool
8949 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
8950 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
8951 continue;
8952
8953 if (TARGET_CPU_ZARCH)
8954 {
8955 if (curr_pool->size < S390_POOL_CHUNK_MAX)
8956 continue;
8957
8958 s390_end_pool (curr_pool, NULL);
8959 curr_pool = NULL;
8960 }
8961 else
8962 {
8963 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
8964 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
8965 + extra_size;
8966
8967 /* We will later have to insert base register reload insns.
8968 Those will have an effect on code size, which we need to
8969 consider here. This calculation makes rather pessimistic
8970 worst-case assumptions. */
8971 if (LABEL_P (insn))
8972 extra_size += 6;
8973
8974 if (chunk_size < S390_POOL_CHUNK_MIN
8975 && curr_pool->size < S390_POOL_CHUNK_MIN
8976 && !section_switch_p)
8977 continue;
8978
8979 /* Pool chunks can only be inserted after BARRIERs ... */
8980 if (BARRIER_P (insn))
8981 {
8982 s390_end_pool (curr_pool, insn);
8983 curr_pool = NULL;
8984 extra_size = 0;
8985 }
8986
8987 /* ... so if we don't find one in time, create one. */
8988 else if (chunk_size > S390_POOL_CHUNK_MAX
8989 || curr_pool->size > S390_POOL_CHUNK_MAX
8990 || section_switch_p)
8991 {
8992 rtx_insn *label, *jump, *barrier, *next, *prev;
8993
8994 if (!section_switch_p)
8995 {
8996 /* We can insert the barrier only after a 'real' insn. */
8997 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
8998 continue;
8999 if (get_attr_length (insn) == 0)
9000 continue;
9001 /* Don't separate LTREL_BASE from the corresponding
9002 LTREL_OFFSET load. */
9003 if (pending_ltrel)
9004 continue;
9005 next = insn;
9006 do
9007 {
9008 insn = next;
9009 next = NEXT_INSN (insn);
9010 }
9011 while (next
9012 && NOTE_P (next)
9013 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
9014 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
9015 }
9016 else
9017 {
9018 gcc_assert (!pending_ltrel);
9019
9020 /* The old pool has to end before the section switch
9021 note in order to make it part of the current
9022 section. */
9023 insn = PREV_INSN (insn);
9024 }
9025
9026 label = gen_label_rtx ();
9027 prev = insn;
9028 if (prev && NOTE_P (prev))
9029 prev = prev_nonnote_insn (prev);
9030 if (prev)
9031 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
9032 INSN_LOCATION (prev));
9033 else
9034 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
9035 barrier = emit_barrier_after (jump);
9036 insn = emit_label_after (label, barrier);
9037 JUMP_LABEL (jump) = label;
9038 LABEL_NUSES (label) = 1;
9039
9040 INSN_ADDRESSES_NEW (jump, -1);
9041 INSN_ADDRESSES_NEW (barrier, -1);
9042 INSN_ADDRESSES_NEW (insn, -1);
9043
9044 s390_end_pool (curr_pool, barrier);
9045 curr_pool = NULL;
9046 extra_size = 0;
9047 }
9048 }
9049 }
9050
9051 if (curr_pool)
9052 s390_end_pool (curr_pool, NULL);
9053 gcc_assert (!pending_ltrel);
9054
9055 /* Find all labels that are branched into
9056 from an insn belonging to a different chunk. */
9057
9058 far_labels = BITMAP_ALLOC (NULL);
9059
9060 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9061 {
9062 rtx_jump_table_data *table;
9063
9064 /* Labels marked with LABEL_PRESERVE_P can be target
9065 of non-local jumps, so we have to mark them.
9066 The same holds for named labels.
9067
9068 Don't do that, however, if it is the label before
9069 a jump table. */
9070
9071 if (LABEL_P (insn)
9072 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
9073 {
9074 rtx_insn *vec_insn = NEXT_INSN (insn);
9075 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
9076 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
9077 }
9078 /* Check potential targets in a table jump (casesi_jump). */
9079 else if (tablejump_p (insn, NULL, &table))
9080 {
9081 rtx vec_pat = PATTERN (table);
9082 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
9083
9084 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
9085 {
9086 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
9087
9088 if (s390_find_pool (pool_list, label)
9089 != s390_find_pool (pool_list, insn))
9090 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9091 }
9092 }
9093 /* If we have a direct jump (conditional or unconditional),
9094 check all potential targets. */
9095 else if (JUMP_P (insn))
9096 {
9097 rtx pat = PATTERN (insn);
9098
9099 if (GET_CODE (pat) == PARALLEL)
9100 pat = XVECEXP (pat, 0, 0);
9101
9102 if (GET_CODE (pat) == SET)
9103 {
9104 rtx label = JUMP_LABEL (insn);
9105 if (label && !ANY_RETURN_P (label))
9106 {
9107 if (s390_find_pool (pool_list, label)
9108 != s390_find_pool (pool_list, insn))
9109 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9110 }
9111 }
9112 }
9113 }
9114
9115 /* Insert base register reload insns before every pool. */
9116
9117 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9118 {
9119 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9120 curr_pool->label);
9121 rtx_insn *insn = curr_pool->first_insn;
9122 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
9123 }
9124
9125 /* Insert base register reload insns at every far label. */
9126
9127 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9128 if (LABEL_P (insn)
9129 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
9130 {
9131 struct constant_pool *pool = s390_find_pool (pool_list, insn);
9132 if (pool)
9133 {
9134 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9135 pool->label);
9136 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
9137 }
9138 }
9139
9140
9141 BITMAP_FREE (far_labels);
9142
9143
9144 /* Recompute insn addresses. */
9145
9146 init_insn_lengths ();
9147 shorten_branches (get_insns ());
9148
9149 return pool_list;
9150 }
9151
9152 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9153 After we have decided to use this list, finish implementing
9154 all changes to the current function as required. */
9155
9156 static void
9157 s390_chunkify_finish (struct constant_pool *pool_list)
9158 {
9159 struct constant_pool *curr_pool = NULL;
9160 rtx_insn *insn;
9161
9162
9163 /* Replace all literal pool references. */
9164
9165 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9166 {
9167 if (INSN_P (insn))
9168 replace_ltrel_base (&PATTERN (insn));
9169
9170 curr_pool = s390_find_pool (pool_list, insn);
9171 if (!curr_pool)
9172 continue;
9173
9174 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9175 {
9176 rtx addr, pool_ref = NULL_RTX;
9177 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9178 if (pool_ref)
9179 {
9180 if (s390_execute_label (insn))
9181 addr = s390_find_execute (curr_pool, insn);
9182 else
9183 addr = s390_find_constant (curr_pool,
9184 get_pool_constant (pool_ref),
9185 get_pool_mode (pool_ref));
9186
9187 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9188 INSN_CODE (insn) = -1;
9189 }
9190 }
9191 }
9192
9193 /* Dump out all literal pools. */
9194
9195 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9196 s390_dump_pool (curr_pool, 0);
9197
9198 /* Free pool list. */
9199
9200 while (pool_list)
9201 {
9202 struct constant_pool *next = pool_list->next;
9203 s390_free_pool (pool_list);
9204 pool_list = next;
9205 }
9206 }
9207
9208 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9209 We have decided we cannot use this list, so revert all changes
9210 to the current function that were done by s390_chunkify_start. */
9211
9212 static void
9213 s390_chunkify_cancel (struct constant_pool *pool_list)
9214 {
9215 struct constant_pool *curr_pool = NULL;
9216 rtx_insn *insn;
9217
9218 /* Remove all pool placeholder insns. */
9219
9220 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9221 {
9222 /* Did we insert an extra barrier? Remove it. */
9223 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
9224 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
9225 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
9226
9227 if (jump && JUMP_P (jump)
9228 && barrier && BARRIER_P (barrier)
9229 && label && LABEL_P (label)
9230 && GET_CODE (PATTERN (jump)) == SET
9231 && SET_DEST (PATTERN (jump)) == pc_rtx
9232 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
9233 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
9234 {
9235 remove_insn (jump);
9236 remove_insn (barrier);
9237 remove_insn (label);
9238 }
9239
9240 remove_insn (curr_pool->pool_insn);
9241 }
9242
9243 /* Remove all base register reload insns. */
9244
9245 for (insn = get_insns (); insn; )
9246 {
9247 rtx_insn *next_insn = NEXT_INSN (insn);
9248
9249 if (NONJUMP_INSN_P (insn)
9250 && GET_CODE (PATTERN (insn)) == SET
9251 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
9252 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
9253 remove_insn (insn);
9254
9255 insn = next_insn;
9256 }
9257
9258 /* Free pool list. */
9259
9260 while (pool_list)
9261 {
9262 struct constant_pool *next = pool_list->next;
9263 s390_free_pool (pool_list);
9264 pool_list = next;
9265 }
9266 }
9267
9268 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
9269
9270 void
9271 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
9272 {
9273 switch (GET_MODE_CLASS (mode))
9274 {
9275 case MODE_FLOAT:
9276 case MODE_DECIMAL_FLOAT:
9277 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
9278
9279 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp), mode, align);
9280 break;
9281
9282 case MODE_INT:
9283 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
9284 mark_symbol_refs_as_used (exp);
9285 break;
9286
9287 case MODE_VECTOR_INT:
9288 case MODE_VECTOR_FLOAT:
9289 {
9290 int i;
9291 machine_mode inner_mode;
9292 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
9293
9294 inner_mode = GET_MODE_INNER (GET_MODE (exp));
9295 for (i = 0; i < XVECLEN (exp, 0); i++)
9296 s390_output_pool_entry (XVECEXP (exp, 0, i),
9297 inner_mode,
9298 i == 0
9299 ? align
9300 : GET_MODE_BITSIZE (inner_mode));
9301 }
9302 break;
9303
9304 default:
9305 gcc_unreachable ();
9306 }
9307 }
9308
9309
9310 /* Return an RTL expression representing the value of the return address
9311 for the frame COUNT steps up from the current frame. FRAME is the
9312 frame pointer of that frame. */
9313
9314 rtx
9315 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
9316 {
9317 int offset;
9318 rtx addr;
9319
9320 /* Without backchain, we fail for all but the current frame. */
9321
9322 if (!TARGET_BACKCHAIN && count > 0)
9323 return NULL_RTX;
9324
9325 /* For the current frame, we need to make sure the initial
9326 value of RETURN_REGNUM is actually saved. */
9327
9328 if (count == 0)
9329 {
9330 /* On non-z architectures branch splitting could overwrite r14. */
9331 if (TARGET_CPU_ZARCH)
9332 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
9333 else
9334 {
9335 cfun_frame_layout.save_return_addr_p = true;
9336 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
9337 }
9338 }
9339
9340 if (TARGET_PACKED_STACK)
9341 offset = -2 * UNITS_PER_LONG;
9342 else
9343 offset = RETURN_REGNUM * UNITS_PER_LONG;
9344
9345 addr = plus_constant (Pmode, frame, offset);
9346 addr = memory_address (Pmode, addr);
9347 return gen_rtx_MEM (Pmode, addr);
9348 }
9349
9350 /* Return an RTL expression representing the back chain stored in
9351 the current stack frame. */
9352
9353 rtx
9354 s390_back_chain_rtx (void)
9355 {
9356 rtx chain;
9357
9358 gcc_assert (TARGET_BACKCHAIN);
9359
9360 if (TARGET_PACKED_STACK)
9361 chain = plus_constant (Pmode, stack_pointer_rtx,
9362 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9363 else
9364 chain = stack_pointer_rtx;
9365
9366 chain = gen_rtx_MEM (Pmode, chain);
9367 return chain;
9368 }
9369
9370 /* Find first call clobbered register unused in a function.
9371 This could be used as base register in a leaf function
9372 or for holding the return address before epilogue. */
9373
9374 static int
9375 find_unused_clobbered_reg (void)
9376 {
9377 int i;
9378 for (i = 0; i < 6; i++)
9379 if (!df_regs_ever_live_p (i))
9380 return i;
9381 return 0;
9382 }
9383
9384
9385 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9386 clobbered hard regs in SETREG. */
9387
9388 static void
9389 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9390 {
9391 char *regs_ever_clobbered = (char *)data;
9392 unsigned int i, regno;
9393 machine_mode mode = GET_MODE (setreg);
9394
9395 if (GET_CODE (setreg) == SUBREG)
9396 {
9397 rtx inner = SUBREG_REG (setreg);
9398 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9399 return;
9400 regno = subreg_regno (setreg);
9401 }
9402 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9403 regno = REGNO (setreg);
9404 else
9405 return;
9406
9407 for (i = regno;
9408 i < regno + HARD_REGNO_NREGS (regno, mode);
9409 i++)
9410 regs_ever_clobbered[i] = 1;
9411 }
9412
9413 /* Walks through all basic blocks of the current function looking
9414 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9415 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9416 each of those regs. */
9417
9418 static void
9419 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9420 {
9421 basic_block cur_bb;
9422 rtx_insn *cur_insn;
9423 unsigned int i;
9424
9425 memset (regs_ever_clobbered, 0, 32);
9426
9427 /* For non-leaf functions we have to consider all call clobbered regs to be
9428 clobbered. */
9429 if (!crtl->is_leaf)
9430 {
9431 for (i = 0; i < 32; i++)
9432 regs_ever_clobbered[i] = call_really_used_regs[i];
9433 }
9434
9435 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9436 this work is done by liveness analysis (mark_regs_live_at_end).
9437 Special care is needed for functions containing landing pads. Landing pads
9438 may use the eh registers, but the code which sets these registers is not
9439 contained in that function. Hence s390_regs_ever_clobbered is not able to
9440 deal with this automatically. */
9441 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9442 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9443 if (crtl->calls_eh_return
9444 || (cfun->machine->has_landing_pad_p
9445 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9446 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9447
9448 /* For nonlocal gotos all call-saved registers have to be saved.
9449 This flag is also set for the unwinding code in libgcc.
9450 See expand_builtin_unwind_init. For regs_ever_live this is done by
9451 reload. */
9452 if (crtl->saves_all_registers)
9453 for (i = 0; i < 32; i++)
9454 if (!call_really_used_regs[i])
9455 regs_ever_clobbered[i] = 1;
9456
9457 FOR_EACH_BB_FN (cur_bb, cfun)
9458 {
9459 FOR_BB_INSNS (cur_bb, cur_insn)
9460 {
9461 rtx pat;
9462
9463 if (!INSN_P (cur_insn))
9464 continue;
9465
9466 pat = PATTERN (cur_insn);
9467
9468 /* Ignore GPR restore insns. */
9469 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9470 {
9471 if (GET_CODE (pat) == SET
9472 && GENERAL_REG_P (SET_DEST (pat)))
9473 {
9474 /* lgdr */
9475 if (GET_MODE (SET_SRC (pat)) == DImode
9476 && FP_REG_P (SET_SRC (pat)))
9477 continue;
9478
9479 /* l / lg */
9480 if (GET_CODE (SET_SRC (pat)) == MEM)
9481 continue;
9482 }
9483
9484 /* lm / lmg */
9485 if (GET_CODE (pat) == PARALLEL
9486 && load_multiple_operation (pat, VOIDmode))
9487 continue;
9488 }
9489
9490 note_stores (pat,
9491 s390_reg_clobbered_rtx,
9492 regs_ever_clobbered);
9493 }
9494 }
9495 }
9496
9497 /* Determine the frame area which actually has to be accessed
9498 in the function epilogue. The values are stored at the
9499 given pointers AREA_BOTTOM (address of the lowest used stack
9500 address) and AREA_TOP (address of the first item which does
9501 not belong to the stack frame). */
9502
9503 static void
9504 s390_frame_area (int *area_bottom, int *area_top)
9505 {
9506 int b, t;
9507
9508 b = INT_MAX;
9509 t = INT_MIN;
9510
9511 if (cfun_frame_layout.first_restore_gpr != -1)
9512 {
9513 b = (cfun_frame_layout.gprs_offset
9514 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9515 t = b + (cfun_frame_layout.last_restore_gpr
9516 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9517 }
9518
9519 if (TARGET_64BIT && cfun_save_high_fprs_p)
9520 {
9521 b = MIN (b, cfun_frame_layout.f8_offset);
9522 t = MAX (t, (cfun_frame_layout.f8_offset
9523 + cfun_frame_layout.high_fprs * 8));
9524 }
9525
9526 if (!TARGET_64BIT)
9527 {
9528 if (cfun_fpr_save_p (FPR4_REGNUM))
9529 {
9530 b = MIN (b, cfun_frame_layout.f4_offset);
9531 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9532 }
9533 if (cfun_fpr_save_p (FPR6_REGNUM))
9534 {
9535 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9536 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9537 }
9538 }
9539 *area_bottom = b;
9540 *area_top = t;
9541 }
9542 /* Update gpr_save_slots in the frame layout trying to make use of
9543 FPRs as GPR save slots.
9544 This is a helper routine of s390_register_info. */
9545
9546 static void
9547 s390_register_info_gprtofpr ()
9548 {
9549 int save_reg_slot = FPR0_REGNUM;
9550 int i, j;
9551
9552 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9553 return;
9554
9555 for (i = 15; i >= 6; i--)
9556 {
9557 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
9558 continue;
9559
9560 /* Advance to the next FP register which can be used as a
9561 GPR save slot. */
9562 while ((!call_really_used_regs[save_reg_slot]
9563 || df_regs_ever_live_p (save_reg_slot)
9564 || cfun_fpr_save_p (save_reg_slot))
9565 && FP_REGNO_P (save_reg_slot))
9566 save_reg_slot++;
9567 if (!FP_REGNO_P (save_reg_slot))
9568 {
9569 /* We only want to use ldgr/lgdr if we can get rid of
9570 stm/lm entirely. So undo the gpr slot allocation in
9571 case we ran out of FPR save slots. */
9572 for (j = 6; j <= 15; j++)
9573 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9574 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
9575 break;
9576 }
9577 cfun_gpr_save_slot (i) = save_reg_slot++;
9578 }
9579 }
9580
9581 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9582 stdarg.
9583 This is a helper routine for s390_register_info. */
9584
9585 static void
9586 s390_register_info_stdarg_fpr ()
9587 {
9588 int i;
9589 int min_fpr;
9590 int max_fpr;
9591
9592 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9593 f0-f4 for 64 bit. */
9594 if (!cfun->stdarg
9595 || !TARGET_HARD_FLOAT
9596 || !cfun->va_list_fpr_size
9597 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9598 return;
9599
9600 min_fpr = crtl->args.info.fprs;
9601 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9602 if (max_fpr >= FP_ARG_NUM_REG)
9603 max_fpr = FP_ARG_NUM_REG - 1;
9604
9605 /* FPR argument regs start at f0. */
9606 min_fpr += FPR0_REGNUM;
9607 max_fpr += FPR0_REGNUM;
9608
9609 for (i = min_fpr; i <= max_fpr; i++)
9610 cfun_set_fpr_save (i);
9611 }
9612
9613 /* Reserve the GPR save slots for GPRs which need to be saved due to
9614 stdarg.
9615 This is a helper routine for s390_register_info. */
9616
9617 static void
9618 s390_register_info_stdarg_gpr ()
9619 {
9620 int i;
9621 int min_gpr;
9622 int max_gpr;
9623
9624 if (!cfun->stdarg
9625 || !cfun->va_list_gpr_size
9626 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9627 return;
9628
9629 min_gpr = crtl->args.info.gprs;
9630 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9631 if (max_gpr >= GP_ARG_NUM_REG)
9632 max_gpr = GP_ARG_NUM_REG - 1;
9633
9634 /* GPR argument regs start at r2. */
9635 min_gpr += GPR2_REGNUM;
9636 max_gpr += GPR2_REGNUM;
9637
9638 /* If r6 was supposed to be saved into an FPR and now needs to go to
9639 the stack for vararg we have to adjust the restore range to make
9640 sure that the restore is done from stack as well. */
9641 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9642 && min_gpr <= GPR6_REGNUM
9643 && max_gpr >= GPR6_REGNUM)
9644 {
9645 if (cfun_frame_layout.first_restore_gpr == -1
9646 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9647 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9648 if (cfun_frame_layout.last_restore_gpr == -1
9649 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9650 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9651 }
9652
9653 if (cfun_frame_layout.first_save_gpr == -1
9654 || cfun_frame_layout.first_save_gpr > min_gpr)
9655 cfun_frame_layout.first_save_gpr = min_gpr;
9656
9657 if (cfun_frame_layout.last_save_gpr == -1
9658 || cfun_frame_layout.last_save_gpr < max_gpr)
9659 cfun_frame_layout.last_save_gpr = max_gpr;
9660
9661 for (i = min_gpr; i <= max_gpr; i++)
9662 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9663 }
9664
9665 /* Calculate the save and restore ranges for stm(g) and lm(g) in the
9666 prologue and epilogue. */
9667
9668 static void
9669 s390_register_info_set_ranges ()
9670 {
9671 int i, j;
9672
9673 /* Find the first and the last save slot supposed to use the stack
9674 to set the restore range.
9675 Vararg regs might be marked as save to stack but only the
9676 call-saved regs really need restoring (i.e. r6). This code
9677 assumes that the vararg regs have not yet been recorded in
9678 cfun_gpr_save_slot. */
9679 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
9680 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
9681 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9682 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9683 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9684 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9685 }
9686
9687 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9688 for registers which need to be saved in function prologue.
9689 This function can be used until the insns emitted for save/restore
9690 of the regs are visible in the RTL stream. */
9691
9692 static void
9693 s390_register_info ()
9694 {
9695 int i;
9696 char clobbered_regs[32];
9697
9698 gcc_assert (!epilogue_completed);
9699
9700 if (reload_completed)
9701 /* After reload we rely on our own routine to determine which
9702 registers need saving. */
9703 s390_regs_ever_clobbered (clobbered_regs);
9704 else
9705 /* During reload we use regs_ever_live as a base since reload
9706 does changes in there which we otherwise would not be aware
9707 of. */
9708 for (i = 0; i < 32; i++)
9709 clobbered_regs[i] = df_regs_ever_live_p (i);
9710
9711 for (i = 0; i < 32; i++)
9712 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9713
9714 /* Mark the call-saved FPRs which need to be saved.
9715 This needs to be done before checking the special GPRs since the
9716 stack pointer usage depends on whether high FPRs have to be saved
9717 or not. */
9718 cfun_frame_layout.fpr_bitmap = 0;
9719 cfun_frame_layout.high_fprs = 0;
9720 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9721 if (clobbered_regs[i] && !call_really_used_regs[i])
9722 {
9723 cfun_set_fpr_save (i);
9724 if (i >= FPR8_REGNUM)
9725 cfun_frame_layout.high_fprs++;
9726 }
9727
9728 /* Register 12 is used for GOT address, but also as temp in prologue
9729 for split-stack stdarg functions (unless r14 is available). */
9730 clobbered_regs[12]
9731 |= ((flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
9732 || (flag_split_stack && cfun->stdarg
9733 && (crtl->is_leaf || TARGET_TPF_PROFILING
9734 || has_hard_reg_initial_val (Pmode, RETURN_REGNUM))));
9735
9736 clobbered_regs[BASE_REGNUM]
9737 |= (cfun->machine->base_reg
9738 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9739
9740 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9741 |= !!frame_pointer_needed;
9742
9743 /* On pre z900 machines this might take until machine dependent
9744 reorg to decide.
9745 save_return_addr_p will only be set on non-zarch machines so
9746 there is no risk that r14 goes into an FPR instead of a stack
9747 slot. */
9748 clobbered_regs[RETURN_REGNUM]
9749 |= (!crtl->is_leaf
9750 || TARGET_TPF_PROFILING
9751 || cfun->machine->split_branches_pending_p
9752 || cfun_frame_layout.save_return_addr_p
9753 || crtl->calls_eh_return);
9754
9755 clobbered_regs[STACK_POINTER_REGNUM]
9756 |= (!crtl->is_leaf
9757 || TARGET_TPF_PROFILING
9758 || cfun_save_high_fprs_p
9759 || get_frame_size () > 0
9760 || (reload_completed && cfun_frame_layout.frame_size > 0)
9761 || cfun->calls_alloca);
9762
9763 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
9764
9765 for (i = 6; i < 16; i++)
9766 if (clobbered_regs[i])
9767 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9768
9769 s390_register_info_stdarg_fpr ();
9770 s390_register_info_gprtofpr ();
9771 s390_register_info_set_ranges ();
9772 /* stdarg functions might need to save GPRs 2 to 6. This might
9773 override the GPR->FPR save decision made by
9774 s390_register_info_gprtofpr for r6 since vararg regs must go to
9775 the stack. */
9776 s390_register_info_stdarg_gpr ();
9777 }
9778
9779 /* This function is called by s390_optimize_prologue in order to get
9780 rid of unnecessary GPR save/restore instructions. The register info
9781 for the GPRs is re-computed and the ranges are re-calculated. */
9782
9783 static void
9784 s390_optimize_register_info ()
9785 {
9786 char clobbered_regs[32];
9787 int i;
9788
9789 gcc_assert (epilogue_completed);
9790 gcc_assert (!cfun->machine->split_branches_pending_p);
9791
9792 s390_regs_ever_clobbered (clobbered_regs);
9793
9794 for (i = 0; i < 32; i++)
9795 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9796
9797 /* There is still special treatment needed for cases invisible to
9798 s390_regs_ever_clobbered. */
9799 clobbered_regs[RETURN_REGNUM]
9800 |= (TARGET_TPF_PROFILING
9801 /* When expanding builtin_return_addr in ESA mode we do not
9802 know whether r14 will later be needed as scratch reg when
9803 doing branch splitting. So the builtin always accesses the
9804 r14 save slot and we need to stick to the save/restore
9805 decision for r14 even if it turns out that it didn't get
9806 clobbered. */
9807 || cfun_frame_layout.save_return_addr_p
9808 || crtl->calls_eh_return);
9809
9810 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
9811
9812 for (i = 6; i < 16; i++)
9813 if (!clobbered_regs[i])
9814 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
9815
9816 s390_register_info_set_ranges ();
9817 s390_register_info_stdarg_gpr ();
9818 }
9819
9820 /* Fill cfun->machine with info about frame of current function. */
9821
9822 static void
9823 s390_frame_info (void)
9824 {
9825 HOST_WIDE_INT lowest_offset;
9826
9827 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
9828 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
9829
9830 /* The va_arg builtin uses a constant distance of 16 *
9831 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
9832 pointer. So even if we are going to save the stack pointer in an
9833 FPR we need the stack space in order to keep the offsets
9834 correct. */
9835 if (cfun->stdarg && cfun_save_arg_fprs_p)
9836 {
9837 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9838
9839 if (cfun_frame_layout.first_save_gpr_slot == -1)
9840 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
9841 }
9842
9843 cfun_frame_layout.frame_size = get_frame_size ();
9844 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
9845 fatal_error (input_location,
9846 "total size of local variables exceeds architecture limit");
9847
9848 if (!TARGET_PACKED_STACK)
9849 {
9850 /* Fixed stack layout. */
9851 cfun_frame_layout.backchain_offset = 0;
9852 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
9853 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
9854 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
9855 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
9856 * UNITS_PER_LONG);
9857 }
9858 else if (TARGET_BACKCHAIN)
9859 {
9860 /* Kernel stack layout - packed stack, backchain, no float */
9861 gcc_assert (TARGET_SOFT_FLOAT);
9862 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
9863 - UNITS_PER_LONG);
9864
9865 /* The distance between the backchain and the return address
9866 save slot must not change. So we always need a slot for the
9867 stack pointer which resides in between. */
9868 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9869
9870 cfun_frame_layout.gprs_offset
9871 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
9872
9873 /* FPRs will not be saved. Nevertheless pick sane values to
9874 keep area calculations valid. */
9875 cfun_frame_layout.f0_offset =
9876 cfun_frame_layout.f4_offset =
9877 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
9878 }
9879 else
9880 {
9881 int num_fprs;
9882
9883 /* Packed stack layout without backchain. */
9884
9885 /* With stdarg FPRs need their dedicated slots. */
9886 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
9887 : (cfun_fpr_save_p (FPR4_REGNUM) +
9888 cfun_fpr_save_p (FPR6_REGNUM)));
9889 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
9890
9891 num_fprs = (cfun->stdarg ? 2
9892 : (cfun_fpr_save_p (FPR0_REGNUM)
9893 + cfun_fpr_save_p (FPR2_REGNUM)));
9894 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
9895
9896 cfun_frame_layout.gprs_offset
9897 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
9898
9899 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
9900 - cfun_frame_layout.high_fprs * 8);
9901 }
9902
9903 if (cfun_save_high_fprs_p)
9904 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
9905
9906 if (!crtl->is_leaf)
9907 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
9908
9909 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
9910 sized area at the bottom of the stack. This is required also for
9911 leaf functions. When GCC generates a local stack reference it
9912 will always add STACK_POINTER_OFFSET to all these references. */
9913 if (crtl->is_leaf
9914 && !TARGET_TPF_PROFILING
9915 && cfun_frame_layout.frame_size == 0
9916 && !cfun->calls_alloca)
9917 return;
9918
9919 /* Calculate the number of bytes we have used in our own register
9920 save area. With the packed stack layout we can re-use the
9921 remaining bytes for normal stack elements. */
9922
9923 if (TARGET_PACKED_STACK)
9924 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
9925 cfun_frame_layout.f4_offset),
9926 cfun_frame_layout.gprs_offset);
9927 else
9928 lowest_offset = 0;
9929
9930 if (TARGET_BACKCHAIN)
9931 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
9932
9933 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
9934
9935 /* If under 31 bit an odd number of gprs has to be saved we have to
9936 adjust the frame size to sustain 8 byte alignment of stack
9937 frames. */
9938 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
9939 STACK_BOUNDARY / BITS_PER_UNIT - 1)
9940 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
9941 }
9942
9943 /* Generate frame layout. Fills in register and frame data for the current
9944 function in cfun->machine. This routine can be called multiple times;
9945 it will re-do the complete frame layout every time. */
9946
9947 static void
9948 s390_init_frame_layout (void)
9949 {
9950 HOST_WIDE_INT frame_size;
9951 int base_used;
9952
9953 /* After LRA the frame layout is supposed to be read-only and should
9954 not be re-computed. */
9955 if (reload_completed)
9956 return;
9957
9958 /* On S/390 machines, we may need to perform branch splitting, which
9959 will require both base and return address register. We have no
9960 choice but to assume we're going to need them until right at the
9961 end of the machine dependent reorg phase. */
9962 if (!TARGET_CPU_ZARCH)
9963 cfun->machine->split_branches_pending_p = true;
9964
9965 do
9966 {
9967 frame_size = cfun_frame_layout.frame_size;
9968
9969 /* Try to predict whether we'll need the base register. */
9970 base_used = cfun->machine->split_branches_pending_p
9971 || crtl->uses_const_pool
9972 || (!DISP_IN_RANGE (frame_size)
9973 && !CONST_OK_FOR_K (frame_size));
9974
9975 /* Decide which register to use as literal pool base. In small
9976 leaf functions, try to use an unused call-clobbered register
9977 as base register to avoid save/restore overhead. */
9978 if (!base_used)
9979 cfun->machine->base_reg = NULL_RTX;
9980 else
9981 {
9982 int br = 0;
9983
9984 if (crtl->is_leaf)
9985 /* Prefer r5 (most likely to be free). */
9986 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
9987 ;
9988 cfun->machine->base_reg =
9989 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
9990 }
9991
9992 s390_register_info ();
9993 s390_frame_info ();
9994 }
9995 while (frame_size != cfun_frame_layout.frame_size);
9996 }
9997
9998 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
9999 the TX is nonescaping. A transaction is considered escaping if
10000 there is at least one path from tbegin returning CC0 to the
10001 function exit block without an tend.
10002
10003 The check so far has some limitations:
10004 - only single tbegin/tend BBs are supported
10005 - the first cond jump after tbegin must separate the CC0 path from ~CC0
10006 - when CC is copied to a GPR and the CC0 check is done with the GPR
10007 this is not supported
10008 */
10009
10010 static void
10011 s390_optimize_nonescaping_tx (void)
10012 {
10013 const unsigned int CC0 = 1 << 3;
10014 basic_block tbegin_bb = NULL;
10015 basic_block tend_bb = NULL;
10016 basic_block bb;
10017 rtx_insn *insn;
10018 bool result = true;
10019 int bb_index;
10020 rtx_insn *tbegin_insn = NULL;
10021
10022 if (!cfun->machine->tbegin_p)
10023 return;
10024
10025 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
10026 {
10027 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
10028
10029 if (!bb)
10030 continue;
10031
10032 FOR_BB_INSNS (bb, insn)
10033 {
10034 rtx ite, cc, pat, target;
10035 unsigned HOST_WIDE_INT mask;
10036
10037 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10038 continue;
10039
10040 pat = PATTERN (insn);
10041
10042 if (GET_CODE (pat) == PARALLEL)
10043 pat = XVECEXP (pat, 0, 0);
10044
10045 if (GET_CODE (pat) != SET
10046 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
10047 continue;
10048
10049 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
10050 {
10051 rtx_insn *tmp;
10052
10053 tbegin_insn = insn;
10054
10055 /* Just return if the tbegin doesn't have clobbers. */
10056 if (GET_CODE (PATTERN (insn)) != PARALLEL)
10057 return;
10058
10059 if (tbegin_bb != NULL)
10060 return;
10061
10062 /* Find the next conditional jump. */
10063 for (tmp = NEXT_INSN (insn);
10064 tmp != NULL_RTX;
10065 tmp = NEXT_INSN (tmp))
10066 {
10067 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
10068 return;
10069 if (!JUMP_P (tmp))
10070 continue;
10071
10072 ite = SET_SRC (PATTERN (tmp));
10073 if (GET_CODE (ite) != IF_THEN_ELSE)
10074 continue;
10075
10076 cc = XEXP (XEXP (ite, 0), 0);
10077 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
10078 || GET_MODE (cc) != CCRAWmode
10079 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
10080 return;
10081
10082 if (bb->succs->length () != 2)
10083 return;
10084
10085 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
10086 if (GET_CODE (XEXP (ite, 0)) == NE)
10087 mask ^= 0xf;
10088
10089 if (mask == CC0)
10090 target = XEXP (ite, 1);
10091 else if (mask == (CC0 ^ 0xf))
10092 target = XEXP (ite, 2);
10093 else
10094 return;
10095
10096 {
10097 edge_iterator ei;
10098 edge e1, e2;
10099
10100 ei = ei_start (bb->succs);
10101 e1 = ei_safe_edge (ei);
10102 ei_next (&ei);
10103 e2 = ei_safe_edge (ei);
10104
10105 if (e2->flags & EDGE_FALLTHRU)
10106 {
10107 e2 = e1;
10108 e1 = ei_safe_edge (ei);
10109 }
10110
10111 if (!(e1->flags & EDGE_FALLTHRU))
10112 return;
10113
10114 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
10115 }
10116 if (tmp == BB_END (bb))
10117 break;
10118 }
10119 }
10120
10121 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
10122 {
10123 if (tend_bb != NULL)
10124 return;
10125 tend_bb = bb;
10126 }
10127 }
10128 }
10129
10130 /* Either we successfully remove the FPR clobbers here or we are not
10131 able to do anything for this TX. Both cases don't qualify for
10132 another look. */
10133 cfun->machine->tbegin_p = false;
10134
10135 if (tbegin_bb == NULL || tend_bb == NULL)
10136 return;
10137
10138 calculate_dominance_info (CDI_POST_DOMINATORS);
10139 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
10140 free_dominance_info (CDI_POST_DOMINATORS);
10141
10142 if (!result)
10143 return;
10144
10145 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
10146 gen_rtvec (2,
10147 XVECEXP (PATTERN (tbegin_insn), 0, 0),
10148 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
10149 INSN_CODE (tbegin_insn) = -1;
10150 df_insn_rescan (tbegin_insn);
10151
10152 return;
10153 }
10154
10155 /* Return true if it is legal to put a value with MODE into REGNO. */
10156
10157 bool
10158 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10159 {
10160 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
10161 return false;
10162
10163 switch (REGNO_REG_CLASS (regno))
10164 {
10165 case VEC_REGS:
10166 return ((GET_MODE_CLASS (mode) == MODE_INT
10167 && s390_class_max_nregs (VEC_REGS, mode) == 1)
10168 || mode == DFmode
10169 || s390_vector_mode_supported_p (mode));
10170 break;
10171 case FP_REGS:
10172 if (TARGET_VX
10173 && ((GET_MODE_CLASS (mode) == MODE_INT
10174 && s390_class_max_nregs (FP_REGS, mode) == 1)
10175 || mode == DFmode
10176 || s390_vector_mode_supported_p (mode)))
10177 return true;
10178
10179 if (REGNO_PAIR_OK (regno, mode))
10180 {
10181 if (mode == SImode || mode == DImode)
10182 return true;
10183
10184 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
10185 return true;
10186 }
10187 break;
10188 case ADDR_REGS:
10189 if (FRAME_REGNO_P (regno) && mode == Pmode)
10190 return true;
10191
10192 /* fallthrough */
10193 case GENERAL_REGS:
10194 if (REGNO_PAIR_OK (regno, mode))
10195 {
10196 if (TARGET_ZARCH
10197 || (mode != TFmode && mode != TCmode && mode != TDmode))
10198 return true;
10199 }
10200 break;
10201 case CC_REGS:
10202 if (GET_MODE_CLASS (mode) == MODE_CC)
10203 return true;
10204 break;
10205 case ACCESS_REGS:
10206 if (REGNO_PAIR_OK (regno, mode))
10207 {
10208 if (mode == SImode || mode == Pmode)
10209 return true;
10210 }
10211 break;
10212 default:
10213 return false;
10214 }
10215
10216 return false;
10217 }
10218
10219 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
10220
10221 bool
10222 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
10223 {
10224 /* Once we've decided upon a register to use as base register, it must
10225 no longer be used for any other purpose. */
10226 if (cfun->machine->base_reg)
10227 if (REGNO (cfun->machine->base_reg) == old_reg
10228 || REGNO (cfun->machine->base_reg) == new_reg)
10229 return false;
10230
10231 /* Prevent regrename from using call-saved regs which haven't
10232 actually been saved. This is necessary since regrename assumes
10233 the backend save/restore decisions are based on
10234 df_regs_ever_live. Since we have our own routine we have to tell
10235 regrename manually about it. */
10236 if (GENERAL_REGNO_P (new_reg)
10237 && !call_really_used_regs[new_reg]
10238 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
10239 return false;
10240
10241 return true;
10242 }
10243
10244 /* Return nonzero if register REGNO can be used as a scratch register
10245 in peephole2. */
10246
10247 static bool
10248 s390_hard_regno_scratch_ok (unsigned int regno)
10249 {
10250 /* See s390_hard_regno_rename_ok. */
10251 if (GENERAL_REGNO_P (regno)
10252 && !call_really_used_regs[regno]
10253 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
10254 return false;
10255
10256 return true;
10257 }
10258
10259 /* Maximum number of registers to represent a value of mode MODE
10260 in a register of class RCLASS. */
10261
10262 int
10263 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
10264 {
10265 int reg_size;
10266 bool reg_pair_required_p = false;
10267
10268 switch (rclass)
10269 {
10270 case FP_REGS:
10271 case VEC_REGS:
10272 reg_size = TARGET_VX ? 16 : 8;
10273
10274 /* TF and TD modes would fit into a VR but we put them into a
10275 register pair since we do not have 128bit FP instructions on
10276 full VRs. */
10277 if (TARGET_VX
10278 && SCALAR_FLOAT_MODE_P (mode)
10279 && GET_MODE_SIZE (mode) >= 16)
10280 reg_pair_required_p = true;
10281
10282 /* Even if complex types would fit into a single FPR/VR we force
10283 them into a register pair to deal with the parts more easily.
10284 (FIXME: What about complex ints?) */
10285 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
10286 reg_pair_required_p = true;
10287 break;
10288 case ACCESS_REGS:
10289 reg_size = 4;
10290 break;
10291 default:
10292 reg_size = UNITS_PER_WORD;
10293 break;
10294 }
10295
10296 if (reg_pair_required_p)
10297 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
10298
10299 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
10300 }
10301
10302 /* Return TRUE if changing mode from FROM to TO should not be allowed
10303 for register class CLASS. */
10304
10305 int
10306 s390_cannot_change_mode_class (machine_mode from_mode,
10307 machine_mode to_mode,
10308 enum reg_class rclass)
10309 {
10310 machine_mode small_mode;
10311 machine_mode big_mode;
10312
10313 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
10314 return 0;
10315
10316 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
10317 {
10318 small_mode = from_mode;
10319 big_mode = to_mode;
10320 }
10321 else
10322 {
10323 small_mode = to_mode;
10324 big_mode = from_mode;
10325 }
10326
10327 /* Values residing in VRs are little-endian style. All modes are
10328 placed left-aligned in an VR. This means that we cannot allow
10329 switching between modes with differing sizes. Also if the vector
10330 facility is available we still place TFmode values in VR register
10331 pairs, since the only instructions we have operating on TFmodes
10332 only deal with register pairs. Therefore we have to allow DFmode
10333 subregs of TFmodes to enable the TFmode splitters. */
10334 if (reg_classes_intersect_p (VEC_REGS, rclass)
10335 && (GET_MODE_SIZE (small_mode) < 8
10336 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
10337 return 1;
10338
10339 /* Likewise for access registers, since they have only half the
10340 word size on 64-bit. */
10341 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
10342 return 1;
10343
10344 return 0;
10345 }
10346
10347 /* Return true if we use LRA instead of reload pass. */
10348 static bool
10349 s390_lra_p (void)
10350 {
10351 return s390_lra_flag;
10352 }
10353
10354 /* Return true if register FROM can be eliminated via register TO. */
10355
10356 static bool
10357 s390_can_eliminate (const int from, const int to)
10358 {
10359 /* On zSeries machines, we have not marked the base register as fixed.
10360 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10361 If a function requires the base register, we say here that this
10362 elimination cannot be performed. This will cause reload to free
10363 up the base register (as if it were fixed). On the other hand,
10364 if the current function does *not* require the base register, we
10365 say here the elimination succeeds, which in turn allows reload
10366 to allocate the base register for any other purpose. */
10367 if (from == BASE_REGNUM && to == BASE_REGNUM)
10368 {
10369 if (TARGET_CPU_ZARCH)
10370 {
10371 s390_init_frame_layout ();
10372 return cfun->machine->base_reg == NULL_RTX;
10373 }
10374
10375 return false;
10376 }
10377
10378 /* Everything else must point into the stack frame. */
10379 gcc_assert (to == STACK_POINTER_REGNUM
10380 || to == HARD_FRAME_POINTER_REGNUM);
10381
10382 gcc_assert (from == FRAME_POINTER_REGNUM
10383 || from == ARG_POINTER_REGNUM
10384 || from == RETURN_ADDRESS_POINTER_REGNUM);
10385
10386 /* Make sure we actually saved the return address. */
10387 if (from == RETURN_ADDRESS_POINTER_REGNUM)
10388 if (!crtl->calls_eh_return
10389 && !cfun->stdarg
10390 && !cfun_frame_layout.save_return_addr_p)
10391 return false;
10392
10393 return true;
10394 }
10395
10396 /* Return offset between register FROM and TO initially after prolog. */
10397
10398 HOST_WIDE_INT
10399 s390_initial_elimination_offset (int from, int to)
10400 {
10401 HOST_WIDE_INT offset;
10402
10403 /* ??? Why are we called for non-eliminable pairs? */
10404 if (!s390_can_eliminate (from, to))
10405 return 0;
10406
10407 switch (from)
10408 {
10409 case FRAME_POINTER_REGNUM:
10410 offset = (get_frame_size()
10411 + STACK_POINTER_OFFSET
10412 + crtl->outgoing_args_size);
10413 break;
10414
10415 case ARG_POINTER_REGNUM:
10416 s390_init_frame_layout ();
10417 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10418 break;
10419
10420 case RETURN_ADDRESS_POINTER_REGNUM:
10421 s390_init_frame_layout ();
10422
10423 if (cfun_frame_layout.first_save_gpr_slot == -1)
10424 {
10425 /* If it turns out that for stdarg nothing went into the reg
10426 save area we also do not need the return address
10427 pointer. */
10428 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10429 return 0;
10430
10431 gcc_unreachable ();
10432 }
10433
10434 /* In order to make the following work it is not necessary for
10435 r14 to have a save slot. It is sufficient if one other GPR
10436 got one. Since the GPRs are always stored without gaps we
10437 are able to calculate where the r14 save slot would
10438 reside. */
10439 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10440 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10441 UNITS_PER_LONG);
10442 break;
10443
10444 case BASE_REGNUM:
10445 offset = 0;
10446 break;
10447
10448 default:
10449 gcc_unreachable ();
10450 }
10451
10452 return offset;
10453 }
10454
10455 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10456 to register BASE. Return generated insn. */
10457
10458 static rtx
10459 save_fpr (rtx base, int offset, int regnum)
10460 {
10461 rtx addr;
10462 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10463
10464 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10465 set_mem_alias_set (addr, get_varargs_alias_set ());
10466 else
10467 set_mem_alias_set (addr, get_frame_alias_set ());
10468
10469 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10470 }
10471
10472 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10473 to register BASE. Return generated insn. */
10474
10475 static rtx
10476 restore_fpr (rtx base, int offset, int regnum)
10477 {
10478 rtx addr;
10479 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10480 set_mem_alias_set (addr, get_frame_alias_set ());
10481
10482 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10483 }
10484
10485 /* Return true if REGNO is a global register, but not one
10486 of the special ones that need to be saved/restored in anyway. */
10487
10488 static inline bool
10489 global_not_special_regno_p (int regno)
10490 {
10491 return (global_regs[regno]
10492 /* These registers are special and need to be
10493 restored in any case. */
10494 && !(regno == STACK_POINTER_REGNUM
10495 || regno == RETURN_REGNUM
10496 || regno == BASE_REGNUM
10497 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10498 }
10499
10500 /* Generate insn to save registers FIRST to LAST into
10501 the register save area located at offset OFFSET
10502 relative to register BASE. */
10503
10504 static rtx
10505 save_gprs (rtx base, int offset, int first, int last)
10506 {
10507 rtx addr, insn, note;
10508 int i;
10509
10510 addr = plus_constant (Pmode, base, offset);
10511 addr = gen_rtx_MEM (Pmode, addr);
10512
10513 set_mem_alias_set (addr, get_frame_alias_set ());
10514
10515 /* Special-case single register. */
10516 if (first == last)
10517 {
10518 if (TARGET_64BIT)
10519 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10520 else
10521 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10522
10523 if (!global_not_special_regno_p (first))
10524 RTX_FRAME_RELATED_P (insn) = 1;
10525 return insn;
10526 }
10527
10528
10529 insn = gen_store_multiple (addr,
10530 gen_rtx_REG (Pmode, first),
10531 GEN_INT (last - first + 1));
10532
10533 if (first <= 6 && cfun->stdarg)
10534 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10535 {
10536 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10537
10538 if (first + i <= 6)
10539 set_mem_alias_set (mem, get_varargs_alias_set ());
10540 }
10541
10542 /* We need to set the FRAME_RELATED flag on all SETs
10543 inside the store-multiple pattern.
10544
10545 However, we must not emit DWARF records for registers 2..5
10546 if they are stored for use by variable arguments ...
10547
10548 ??? Unfortunately, it is not enough to simply not the
10549 FRAME_RELATED flags for those SETs, because the first SET
10550 of the PARALLEL is always treated as if it had the flag
10551 set, even if it does not. Therefore we emit a new pattern
10552 without those registers as REG_FRAME_RELATED_EXPR note. */
10553
10554 if (first >= 6 && !global_not_special_regno_p (first))
10555 {
10556 rtx pat = PATTERN (insn);
10557
10558 for (i = 0; i < XVECLEN (pat, 0); i++)
10559 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10560 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10561 0, i)))))
10562 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10563
10564 RTX_FRAME_RELATED_P (insn) = 1;
10565 }
10566 else if (last >= 6)
10567 {
10568 int start;
10569
10570 for (start = first >= 6 ? first : 6; start <= last; start++)
10571 if (!global_not_special_regno_p (start))
10572 break;
10573
10574 if (start > last)
10575 return insn;
10576
10577 addr = plus_constant (Pmode, base,
10578 offset + (start - first) * UNITS_PER_LONG);
10579
10580 if (start == last)
10581 {
10582 if (TARGET_64BIT)
10583 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10584 gen_rtx_REG (Pmode, start));
10585 else
10586 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10587 gen_rtx_REG (Pmode, start));
10588 note = PATTERN (note);
10589
10590 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10591 RTX_FRAME_RELATED_P (insn) = 1;
10592
10593 return insn;
10594 }
10595
10596 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10597 gen_rtx_REG (Pmode, start),
10598 GEN_INT (last - start + 1));
10599 note = PATTERN (note);
10600
10601 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10602
10603 for (i = 0; i < XVECLEN (note, 0); i++)
10604 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10605 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10606 0, i)))))
10607 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10608
10609 RTX_FRAME_RELATED_P (insn) = 1;
10610 }
10611
10612 return insn;
10613 }
10614
10615 /* Generate insn to restore registers FIRST to LAST from
10616 the register save area located at offset OFFSET
10617 relative to register BASE. */
10618
10619 static rtx
10620 restore_gprs (rtx base, int offset, int first, int last)
10621 {
10622 rtx addr, insn;
10623
10624 addr = plus_constant (Pmode, base, offset);
10625 addr = gen_rtx_MEM (Pmode, addr);
10626 set_mem_alias_set (addr, get_frame_alias_set ());
10627
10628 /* Special-case single register. */
10629 if (first == last)
10630 {
10631 if (TARGET_64BIT)
10632 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10633 else
10634 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10635
10636 RTX_FRAME_RELATED_P (insn) = 1;
10637 return insn;
10638 }
10639
10640 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10641 addr,
10642 GEN_INT (last - first + 1));
10643 RTX_FRAME_RELATED_P (insn) = 1;
10644 return insn;
10645 }
10646
10647 /* Return insn sequence to load the GOT register. */
10648
10649 static GTY(()) rtx got_symbol;
10650 rtx_insn *
10651 s390_load_got (void)
10652 {
10653 rtx_insn *insns;
10654
10655 /* We cannot use pic_offset_table_rtx here since we use this
10656 function also for non-pic if __tls_get_offset is called and in
10657 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10658 aren't usable. */
10659 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10660
10661 if (!got_symbol)
10662 {
10663 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10664 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
10665 }
10666
10667 start_sequence ();
10668
10669 if (TARGET_CPU_ZARCH)
10670 {
10671 emit_move_insn (got_rtx, got_symbol);
10672 }
10673 else
10674 {
10675 rtx offset;
10676
10677 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
10678 UNSPEC_LTREL_OFFSET);
10679 offset = gen_rtx_CONST (Pmode, offset);
10680 offset = force_const_mem (Pmode, offset);
10681
10682 emit_move_insn (got_rtx, offset);
10683
10684 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10685 UNSPEC_LTREL_BASE);
10686 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10687
10688 emit_move_insn (got_rtx, offset);
10689 }
10690
10691 insns = get_insns ();
10692 end_sequence ();
10693 return insns;
10694 }
10695
10696 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10697 and the change to the stack pointer. */
10698
10699 static void
10700 s390_emit_stack_tie (void)
10701 {
10702 rtx mem = gen_frame_mem (BLKmode,
10703 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10704
10705 emit_insn (gen_stack_tie (mem));
10706 }
10707
10708 /* Copy GPRS into FPR save slots. */
10709
10710 static void
10711 s390_save_gprs_to_fprs (void)
10712 {
10713 int i;
10714
10715 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10716 return;
10717
10718 for (i = 6; i < 16; i++)
10719 {
10720 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10721 {
10722 rtx_insn *insn =
10723 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10724 gen_rtx_REG (DImode, i));
10725 RTX_FRAME_RELATED_P (insn) = 1;
10726 /* This prevents dwarf2cfi from interpreting the set. Doing
10727 so it might emit def_cfa_register infos setting an FPR as
10728 new CFA. */
10729 add_reg_note (insn, REG_CFA_REGISTER, copy_rtx (PATTERN (insn)));
10730 }
10731 }
10732 }
10733
10734 /* Restore GPRs from FPR save slots. */
10735
10736 static void
10737 s390_restore_gprs_from_fprs (void)
10738 {
10739 int i;
10740
10741 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10742 return;
10743
10744 for (i = 6; i < 16; i++)
10745 {
10746 rtx_insn *insn;
10747
10748 if (!FP_REGNO_P (cfun_gpr_save_slot (i)))
10749 continue;
10750
10751 rtx fpr = gen_rtx_REG (DImode, cfun_gpr_save_slot (i));
10752
10753 if (i == STACK_POINTER_REGNUM)
10754 insn = emit_insn (gen_stack_restore_from_fpr (fpr));
10755 else
10756 insn = emit_move_insn (gen_rtx_REG (DImode, i), fpr);
10757
10758 df_set_regs_ever_live (i, true);
10759 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10760 if (i == STACK_POINTER_REGNUM)
10761 add_reg_note (insn, REG_CFA_DEF_CFA,
10762 plus_constant (Pmode, stack_pointer_rtx,
10763 STACK_POINTER_OFFSET));
10764 RTX_FRAME_RELATED_P (insn) = 1;
10765 }
10766 }
10767
10768
10769 /* A pass run immediately before shrink-wrapping and prologue and epilogue
10770 generation. */
10771
10772 namespace {
10773
10774 const pass_data pass_data_s390_early_mach =
10775 {
10776 RTL_PASS, /* type */
10777 "early_mach", /* name */
10778 OPTGROUP_NONE, /* optinfo_flags */
10779 TV_MACH_DEP, /* tv_id */
10780 0, /* properties_required */
10781 0, /* properties_provided */
10782 0, /* properties_destroyed */
10783 0, /* todo_flags_start */
10784 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
10785 };
10786
10787 class pass_s390_early_mach : public rtl_opt_pass
10788 {
10789 public:
10790 pass_s390_early_mach (gcc::context *ctxt)
10791 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
10792 {}
10793
10794 /* opt_pass methods: */
10795 virtual unsigned int execute (function *);
10796
10797 }; // class pass_s390_early_mach
10798
10799 unsigned int
10800 pass_s390_early_mach::execute (function *fun)
10801 {
10802 rtx_insn *insn;
10803
10804 /* Try to get rid of the FPR clobbers. */
10805 s390_optimize_nonescaping_tx ();
10806
10807 /* Re-compute register info. */
10808 s390_register_info ();
10809
10810 /* If we're using a base register, ensure that it is always valid for
10811 the first non-prologue instruction. */
10812 if (fun->machine->base_reg)
10813 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
10814
10815 /* Annotate all constant pool references to let the scheduler know
10816 they implicitly use the base register. */
10817 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10818 if (INSN_P (insn))
10819 {
10820 annotate_constant_pool_refs (&PATTERN (insn));
10821 df_insn_rescan (insn);
10822 }
10823 return 0;
10824 }
10825
10826 } // anon namespace
10827
10828 /* Expand the prologue into a bunch of separate insns. */
10829
10830 void
10831 s390_emit_prologue (void)
10832 {
10833 rtx insn, addr;
10834 rtx temp_reg;
10835 int i;
10836 int offset;
10837 int next_fpr = 0;
10838
10839 /* Choose best register to use for temp use within prologue.
10840 TPF with profiling must avoid the register 14 - the tracing function
10841 needs the original contents of r14 to be preserved. */
10842
10843 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
10844 && !crtl->is_leaf
10845 && !TARGET_TPF_PROFILING)
10846 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10847 else if (flag_split_stack && cfun->stdarg)
10848 temp_reg = gen_rtx_REG (Pmode, 12);
10849 else
10850 temp_reg = gen_rtx_REG (Pmode, 1);
10851
10852 s390_save_gprs_to_fprs ();
10853
10854 /* Save call saved gprs. */
10855 if (cfun_frame_layout.first_save_gpr != -1)
10856 {
10857 insn = save_gprs (stack_pointer_rtx,
10858 cfun_frame_layout.gprs_offset +
10859 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
10860 - cfun_frame_layout.first_save_gpr_slot),
10861 cfun_frame_layout.first_save_gpr,
10862 cfun_frame_layout.last_save_gpr);
10863 emit_insn (insn);
10864 }
10865
10866 /* Dummy insn to mark literal pool slot. */
10867
10868 if (cfun->machine->base_reg)
10869 emit_insn (gen_main_pool (cfun->machine->base_reg));
10870
10871 offset = cfun_frame_layout.f0_offset;
10872
10873 /* Save f0 and f2. */
10874 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
10875 {
10876 if (cfun_fpr_save_p (i))
10877 {
10878 save_fpr (stack_pointer_rtx, offset, i);
10879 offset += 8;
10880 }
10881 else if (!TARGET_PACKED_STACK || cfun->stdarg)
10882 offset += 8;
10883 }
10884
10885 /* Save f4 and f6. */
10886 offset = cfun_frame_layout.f4_offset;
10887 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10888 {
10889 if (cfun_fpr_save_p (i))
10890 {
10891 insn = save_fpr (stack_pointer_rtx, offset, i);
10892 offset += 8;
10893
10894 /* If f4 and f6 are call clobbered they are saved due to
10895 stdargs and therefore are not frame related. */
10896 if (!call_really_used_regs[i])
10897 RTX_FRAME_RELATED_P (insn) = 1;
10898 }
10899 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
10900 offset += 8;
10901 }
10902
10903 if (TARGET_PACKED_STACK
10904 && cfun_save_high_fprs_p
10905 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
10906 {
10907 offset = (cfun_frame_layout.f8_offset
10908 + (cfun_frame_layout.high_fprs - 1) * 8);
10909
10910 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
10911 if (cfun_fpr_save_p (i))
10912 {
10913 insn = save_fpr (stack_pointer_rtx, offset, i);
10914
10915 RTX_FRAME_RELATED_P (insn) = 1;
10916 offset -= 8;
10917 }
10918 if (offset >= cfun_frame_layout.f8_offset)
10919 next_fpr = i;
10920 }
10921
10922 if (!TARGET_PACKED_STACK)
10923 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
10924
10925 if (flag_stack_usage_info)
10926 current_function_static_stack_size = cfun_frame_layout.frame_size;
10927
10928 /* Decrement stack pointer. */
10929
10930 if (cfun_frame_layout.frame_size > 0)
10931 {
10932 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10933 rtx real_frame_off;
10934
10935 if (s390_stack_size)
10936 {
10937 HOST_WIDE_INT stack_guard;
10938
10939 if (s390_stack_guard)
10940 stack_guard = s390_stack_guard;
10941 else
10942 {
10943 /* If no value for stack guard is provided the smallest power of 2
10944 larger than the current frame size is chosen. */
10945 stack_guard = 1;
10946 while (stack_guard < cfun_frame_layout.frame_size)
10947 stack_guard <<= 1;
10948 }
10949
10950 if (cfun_frame_layout.frame_size >= s390_stack_size)
10951 {
10952 warning (0, "frame size of function %qs is %wd"
10953 " bytes exceeding user provided stack limit of "
10954 "%d bytes. "
10955 "An unconditional trap is added.",
10956 current_function_name(), cfun_frame_layout.frame_size,
10957 s390_stack_size);
10958 emit_insn (gen_trap ());
10959 emit_barrier ();
10960 }
10961 else
10962 {
10963 /* stack_guard has to be smaller than s390_stack_size.
10964 Otherwise we would emit an AND with zero which would
10965 not match the test under mask pattern. */
10966 if (stack_guard >= s390_stack_size)
10967 {
10968 warning (0, "frame size of function %qs is %wd"
10969 " bytes which is more than half the stack size. "
10970 "The dynamic check would not be reliable. "
10971 "No check emitted for this function.",
10972 current_function_name(),
10973 cfun_frame_layout.frame_size);
10974 }
10975 else
10976 {
10977 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
10978 & ~(stack_guard - 1));
10979
10980 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
10981 GEN_INT (stack_check_mask));
10982 if (TARGET_64BIT)
10983 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
10984 t, const0_rtx),
10985 t, const0_rtx, const0_rtx));
10986 else
10987 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
10988 t, const0_rtx),
10989 t, const0_rtx, const0_rtx));
10990 }
10991 }
10992 }
10993
10994 if (s390_warn_framesize > 0
10995 && cfun_frame_layout.frame_size >= s390_warn_framesize)
10996 warning (0, "frame size of %qs is %wd bytes",
10997 current_function_name (), cfun_frame_layout.frame_size);
10998
10999 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
11000 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
11001
11002 /* Save incoming stack pointer into temp reg. */
11003 if (TARGET_BACKCHAIN || next_fpr)
11004 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
11005
11006 /* Subtract frame size from stack pointer. */
11007
11008 if (DISP_IN_RANGE (INTVAL (frame_off)))
11009 {
11010 insn = gen_rtx_SET (stack_pointer_rtx,
11011 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11012 frame_off));
11013 insn = emit_insn (insn);
11014 }
11015 else
11016 {
11017 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11018 frame_off = force_const_mem (Pmode, frame_off);
11019
11020 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
11021 annotate_constant_pool_refs (&PATTERN (insn));
11022 }
11023
11024 RTX_FRAME_RELATED_P (insn) = 1;
11025 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
11026 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11027 gen_rtx_SET (stack_pointer_rtx,
11028 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11029 real_frame_off)));
11030
11031 /* Set backchain. */
11032
11033 if (TARGET_BACKCHAIN)
11034 {
11035 if (cfun_frame_layout.backchain_offset)
11036 addr = gen_rtx_MEM (Pmode,
11037 plus_constant (Pmode, stack_pointer_rtx,
11038 cfun_frame_layout.backchain_offset));
11039 else
11040 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
11041 set_mem_alias_set (addr, get_frame_alias_set ());
11042 insn = emit_insn (gen_move_insn (addr, temp_reg));
11043 }
11044
11045 /* If we support non-call exceptions (e.g. for Java),
11046 we need to make sure the backchain pointer is set up
11047 before any possibly trapping memory access. */
11048 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
11049 {
11050 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
11051 emit_clobber (addr);
11052 }
11053 }
11054
11055 /* Save fprs 8 - 15 (64 bit ABI). */
11056
11057 if (cfun_save_high_fprs_p && next_fpr)
11058 {
11059 /* If the stack might be accessed through a different register
11060 we have to make sure that the stack pointer decrement is not
11061 moved below the use of the stack slots. */
11062 s390_emit_stack_tie ();
11063
11064 insn = emit_insn (gen_add2_insn (temp_reg,
11065 GEN_INT (cfun_frame_layout.f8_offset)));
11066
11067 offset = 0;
11068
11069 for (i = FPR8_REGNUM; i <= next_fpr; i++)
11070 if (cfun_fpr_save_p (i))
11071 {
11072 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
11073 cfun_frame_layout.frame_size
11074 + cfun_frame_layout.f8_offset
11075 + offset);
11076
11077 insn = save_fpr (temp_reg, offset, i);
11078 offset += 8;
11079 RTX_FRAME_RELATED_P (insn) = 1;
11080 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11081 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
11082 gen_rtx_REG (DFmode, i)));
11083 }
11084 }
11085
11086 /* Set frame pointer, if needed. */
11087
11088 if (frame_pointer_needed)
11089 {
11090 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
11091 RTX_FRAME_RELATED_P (insn) = 1;
11092 }
11093
11094 /* Set up got pointer, if needed. */
11095
11096 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
11097 {
11098 rtx_insn *insns = s390_load_got ();
11099
11100 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
11101 annotate_constant_pool_refs (&PATTERN (insn));
11102
11103 emit_insn (insns);
11104 }
11105
11106 if (TARGET_TPF_PROFILING)
11107 {
11108 /* Generate a BAS instruction to serve as a function
11109 entry intercept to facilitate the use of tracing
11110 algorithms located at the branch target. */
11111 emit_insn (gen_prologue_tpf ());
11112
11113 /* Emit a blockage here so that all code
11114 lies between the profiling mechanisms. */
11115 emit_insn (gen_blockage ());
11116 }
11117 }
11118
11119 /* Expand the epilogue into a bunch of separate insns. */
11120
11121 void
11122 s390_emit_epilogue (bool sibcall)
11123 {
11124 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
11125 int area_bottom, area_top, offset = 0;
11126 int next_offset;
11127 rtvec p;
11128 int i;
11129
11130 if (TARGET_TPF_PROFILING)
11131 {
11132
11133 /* Generate a BAS instruction to serve as a function
11134 entry intercept to facilitate the use of tracing
11135 algorithms located at the branch target. */
11136
11137 /* Emit a blockage here so that all code
11138 lies between the profiling mechanisms. */
11139 emit_insn (gen_blockage ());
11140
11141 emit_insn (gen_epilogue_tpf ());
11142 }
11143
11144 /* Check whether to use frame or stack pointer for restore. */
11145
11146 frame_pointer = (frame_pointer_needed
11147 ? hard_frame_pointer_rtx : stack_pointer_rtx);
11148
11149 s390_frame_area (&area_bottom, &area_top);
11150
11151 /* Check whether we can access the register save area.
11152 If not, increment the frame pointer as required. */
11153
11154 if (area_top <= area_bottom)
11155 {
11156 /* Nothing to restore. */
11157 }
11158 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
11159 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
11160 {
11161 /* Area is in range. */
11162 offset = cfun_frame_layout.frame_size;
11163 }
11164 else
11165 {
11166 rtx insn, frame_off, cfa;
11167
11168 offset = area_bottom < 0 ? -area_bottom : 0;
11169 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
11170
11171 cfa = gen_rtx_SET (frame_pointer,
11172 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11173 if (DISP_IN_RANGE (INTVAL (frame_off)))
11174 {
11175 insn = gen_rtx_SET (frame_pointer,
11176 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11177 insn = emit_insn (insn);
11178 }
11179 else
11180 {
11181 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11182 frame_off = force_const_mem (Pmode, frame_off);
11183
11184 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
11185 annotate_constant_pool_refs (&PATTERN (insn));
11186 }
11187 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
11188 RTX_FRAME_RELATED_P (insn) = 1;
11189 }
11190
11191 /* Restore call saved fprs. */
11192
11193 if (TARGET_64BIT)
11194 {
11195 if (cfun_save_high_fprs_p)
11196 {
11197 next_offset = cfun_frame_layout.f8_offset;
11198 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
11199 {
11200 if (cfun_fpr_save_p (i))
11201 {
11202 restore_fpr (frame_pointer,
11203 offset + next_offset, i);
11204 cfa_restores
11205 = alloc_reg_note (REG_CFA_RESTORE,
11206 gen_rtx_REG (DFmode, i), cfa_restores);
11207 next_offset += 8;
11208 }
11209 }
11210 }
11211
11212 }
11213 else
11214 {
11215 next_offset = cfun_frame_layout.f4_offset;
11216 /* f4, f6 */
11217 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11218 {
11219 if (cfun_fpr_save_p (i))
11220 {
11221 restore_fpr (frame_pointer,
11222 offset + next_offset, i);
11223 cfa_restores
11224 = alloc_reg_note (REG_CFA_RESTORE,
11225 gen_rtx_REG (DFmode, i), cfa_restores);
11226 next_offset += 8;
11227 }
11228 else if (!TARGET_PACKED_STACK)
11229 next_offset += 8;
11230 }
11231
11232 }
11233
11234 /* Return register. */
11235
11236 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11237
11238 /* Restore call saved gprs. */
11239
11240 if (cfun_frame_layout.first_restore_gpr != -1)
11241 {
11242 rtx insn, addr;
11243 int i;
11244
11245 /* Check for global register and save them
11246 to stack location from where they get restored. */
11247
11248 for (i = cfun_frame_layout.first_restore_gpr;
11249 i <= cfun_frame_layout.last_restore_gpr;
11250 i++)
11251 {
11252 if (global_not_special_regno_p (i))
11253 {
11254 addr = plus_constant (Pmode, frame_pointer,
11255 offset + cfun_frame_layout.gprs_offset
11256 + (i - cfun_frame_layout.first_save_gpr_slot)
11257 * UNITS_PER_LONG);
11258 addr = gen_rtx_MEM (Pmode, addr);
11259 set_mem_alias_set (addr, get_frame_alias_set ());
11260 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
11261 }
11262 else
11263 cfa_restores
11264 = alloc_reg_note (REG_CFA_RESTORE,
11265 gen_rtx_REG (Pmode, i), cfa_restores);
11266 }
11267
11268 if (! sibcall)
11269 {
11270 /* Fetch return address from stack before load multiple,
11271 this will do good for scheduling.
11272
11273 Only do this if we already decided that r14 needs to be
11274 saved to a stack slot. (And not just because r14 happens to
11275 be in between two GPRs which need saving.) Otherwise it
11276 would be difficult to take that decision back in
11277 s390_optimize_prologue. */
11278 if (cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK)
11279 {
11280 int return_regnum = find_unused_clobbered_reg();
11281 if (!return_regnum)
11282 return_regnum = 4;
11283 return_reg = gen_rtx_REG (Pmode, return_regnum);
11284
11285 addr = plus_constant (Pmode, frame_pointer,
11286 offset + cfun_frame_layout.gprs_offset
11287 + (RETURN_REGNUM
11288 - cfun_frame_layout.first_save_gpr_slot)
11289 * UNITS_PER_LONG);
11290 addr = gen_rtx_MEM (Pmode, addr);
11291 set_mem_alias_set (addr, get_frame_alias_set ());
11292 emit_move_insn (return_reg, addr);
11293
11294 /* Once we did that optimization we have to make sure
11295 s390_optimize_prologue does not try to remove the
11296 store of r14 since we will not be able to find the
11297 load issued here. */
11298 cfun_frame_layout.save_return_addr_p = true;
11299 }
11300 }
11301
11302 insn = restore_gprs (frame_pointer,
11303 offset + cfun_frame_layout.gprs_offset
11304 + (cfun_frame_layout.first_restore_gpr
11305 - cfun_frame_layout.first_save_gpr_slot)
11306 * UNITS_PER_LONG,
11307 cfun_frame_layout.first_restore_gpr,
11308 cfun_frame_layout.last_restore_gpr);
11309 insn = emit_insn (insn);
11310 REG_NOTES (insn) = cfa_restores;
11311 add_reg_note (insn, REG_CFA_DEF_CFA,
11312 plus_constant (Pmode, stack_pointer_rtx,
11313 STACK_POINTER_OFFSET));
11314 RTX_FRAME_RELATED_P (insn) = 1;
11315 }
11316
11317 s390_restore_gprs_from_fprs ();
11318
11319 if (! sibcall)
11320 {
11321
11322 /* Return to caller. */
11323
11324 p = rtvec_alloc (2);
11325
11326 RTVEC_ELT (p, 0) = ret_rtx;
11327 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
11328 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
11329 }
11330 }
11331
11332 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
11333
11334 static void
11335 s300_set_up_by_prologue (hard_reg_set_container *regs)
11336 {
11337 if (cfun->machine->base_reg
11338 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11339 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
11340 }
11341
11342 /* -fsplit-stack support. */
11343
11344 /* A SYMBOL_REF for __morestack. */
11345 static GTY(()) rtx morestack_ref;
11346
11347 /* When using -fsplit-stack, the allocation routines set a field in
11348 the TCB to the bottom of the stack plus this much space, measured
11349 in bytes. */
11350
11351 #define SPLIT_STACK_AVAILABLE 1024
11352
11353 /* Emit -fsplit-stack prologue, which goes before the regular function
11354 prologue. */
11355
11356 void
11357 s390_expand_split_stack_prologue (void)
11358 {
11359 rtx r1, guard, cc = NULL;
11360 rtx_insn *insn;
11361 /* Offset from thread pointer to __private_ss. */
11362 int psso = TARGET_64BIT ? 0x38 : 0x20;
11363 /* Pointer size in bytes. */
11364 /* Frame size and argument size - the two parameters to __morestack. */
11365 HOST_WIDE_INT frame_size = cfun_frame_layout.frame_size;
11366 /* Align argument size to 8 bytes - simplifies __morestack code. */
11367 HOST_WIDE_INT args_size = crtl->args.size >= 0
11368 ? ((crtl->args.size + 7) & ~7)
11369 : 0;
11370 /* Label to be called by __morestack. */
11371 rtx_code_label *call_done = NULL;
11372 rtx_code_label *parm_base = NULL;
11373 rtx tmp;
11374
11375 gcc_assert (flag_split_stack && reload_completed);
11376 if (!TARGET_CPU_ZARCH)
11377 {
11378 sorry ("CPUs older than z900 are not supported for -fsplit-stack");
11379 return;
11380 }
11381
11382 r1 = gen_rtx_REG (Pmode, 1);
11383
11384 /* If no stack frame will be allocated, don't do anything. */
11385 if (!frame_size)
11386 {
11387 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11388 {
11389 /* If va_start is used, just use r15. */
11390 emit_move_insn (r1,
11391 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11392 GEN_INT (STACK_POINTER_OFFSET)));
11393
11394 }
11395 return;
11396 }
11397
11398 if (morestack_ref == NULL_RTX)
11399 {
11400 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11401 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
11402 | SYMBOL_FLAG_FUNCTION);
11403 }
11404
11405 if (CONST_OK_FOR_K (frame_size) || CONST_OK_FOR_Op (frame_size))
11406 {
11407 /* If frame_size will fit in an add instruction, do a stack space
11408 check, and only call __morestack if there's not enough space. */
11409
11410 /* Get thread pointer. r1 is the only register we can always destroy - r0
11411 could contain a static chain (and cannot be used to address memory
11412 anyway), r2-r6 can contain parameters, and r6-r15 are callee-saved. */
11413 emit_move_insn (r1, gen_rtx_REG (Pmode, TP_REGNUM));
11414 /* Aim at __private_ss. */
11415 guard = gen_rtx_MEM (Pmode, plus_constant (Pmode, r1, psso));
11416
11417 /* If less that 1kiB used, skip addition and compare directly with
11418 __private_ss. */
11419 if (frame_size > SPLIT_STACK_AVAILABLE)
11420 {
11421 emit_move_insn (r1, guard);
11422 if (TARGET_64BIT)
11423 emit_insn (gen_adddi3 (r1, r1, GEN_INT (frame_size)));
11424 else
11425 emit_insn (gen_addsi3 (r1, r1, GEN_INT (frame_size)));
11426 guard = r1;
11427 }
11428
11429 /* Compare the (maybe adjusted) guard with the stack pointer. */
11430 cc = s390_emit_compare (LT, stack_pointer_rtx, guard);
11431 }
11432
11433 call_done = gen_label_rtx ();
11434 parm_base = gen_label_rtx ();
11435
11436 /* Emit the parameter block. */
11437 tmp = gen_split_stack_data (parm_base, call_done,
11438 GEN_INT (frame_size),
11439 GEN_INT (args_size));
11440 insn = emit_insn (tmp);
11441 add_reg_note (insn, REG_LABEL_OPERAND, call_done);
11442 LABEL_NUSES (call_done)++;
11443 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11444 LABEL_NUSES (parm_base)++;
11445
11446 /* %r1 = litbase. */
11447 insn = emit_move_insn (r1, gen_rtx_LABEL_REF (VOIDmode, parm_base));
11448 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11449 LABEL_NUSES (parm_base)++;
11450
11451 /* Now, we need to call __morestack. It has very special calling
11452 conventions: it preserves param/return/static chain registers for
11453 calling main function body, and looks for its own parameters at %r1. */
11454
11455 if (cc != NULL)
11456 {
11457 tmp = gen_split_stack_cond_call (morestack_ref, cc, call_done);
11458
11459 insn = emit_jump_insn (tmp);
11460 JUMP_LABEL (insn) = call_done;
11461 LABEL_NUSES (call_done)++;
11462
11463 /* Mark the jump as very unlikely to be taken. */
11464 add_int_reg_note (insn, REG_BR_PROB, REG_BR_PROB_BASE / 100);
11465
11466 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11467 {
11468 /* If va_start is used, and __morestack was not called, just use
11469 r15. */
11470 emit_move_insn (r1,
11471 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11472 GEN_INT (STACK_POINTER_OFFSET)));
11473 }
11474 }
11475 else
11476 {
11477 tmp = gen_split_stack_call (morestack_ref, call_done);
11478 insn = emit_jump_insn (tmp);
11479 JUMP_LABEL (insn) = call_done;
11480 LABEL_NUSES (call_done)++;
11481 emit_barrier ();
11482 }
11483
11484 /* __morestack will call us here. */
11485
11486 emit_label (call_done);
11487 }
11488
11489 /* We may have to tell the dataflow pass that the split stack prologue
11490 is initializing a register. */
11491
11492 static void
11493 s390_live_on_entry (bitmap regs)
11494 {
11495 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11496 {
11497 gcc_assert (flag_split_stack);
11498 bitmap_set_bit (regs, 1);
11499 }
11500 }
11501
11502 /* Return true if the function can use simple_return to return outside
11503 of a shrink-wrapped region. At present shrink-wrapping is supported
11504 in all cases. */
11505
11506 bool
11507 s390_can_use_simple_return_insn (void)
11508 {
11509 return true;
11510 }
11511
11512 /* Return true if the epilogue is guaranteed to contain only a return
11513 instruction and if a direct return can therefore be used instead.
11514 One of the main advantages of using direct return instructions
11515 is that we can then use conditional returns. */
11516
11517 bool
11518 s390_can_use_return_insn (void)
11519 {
11520 int i;
11521
11522 if (!reload_completed)
11523 return false;
11524
11525 if (crtl->profile)
11526 return false;
11527
11528 if (TARGET_TPF_PROFILING)
11529 return false;
11530
11531 for (i = 0; i < 16; i++)
11532 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
11533 return false;
11534
11535 /* For 31 bit this is not covered by the frame_size check below
11536 since f4, f6 are saved in the register save area without needing
11537 additional stack space. */
11538 if (!TARGET_64BIT
11539 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
11540 return false;
11541
11542 if (cfun->machine->base_reg
11543 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11544 return false;
11545
11546 return cfun_frame_layout.frame_size == 0;
11547 }
11548
11549 /* The VX ABI differs for vararg functions. Therefore we need the
11550 prototype of the callee to be available when passing vector type
11551 values. */
11552 static const char *
11553 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
11554 {
11555 return ((TARGET_VX_ABI
11556 && typelist == 0
11557 && VECTOR_TYPE_P (TREE_TYPE (val))
11558 && (funcdecl == NULL_TREE
11559 || (TREE_CODE (funcdecl) == FUNCTION_DECL
11560 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
11561 ? N_("vector argument passed to unprototyped function")
11562 : NULL);
11563 }
11564
11565
11566 /* Return the size in bytes of a function argument of
11567 type TYPE and/or mode MODE. At least one of TYPE or
11568 MODE must be specified. */
11569
11570 static int
11571 s390_function_arg_size (machine_mode mode, const_tree type)
11572 {
11573 if (type)
11574 return int_size_in_bytes (type);
11575
11576 /* No type info available for some library calls ... */
11577 if (mode != BLKmode)
11578 return GET_MODE_SIZE (mode);
11579
11580 /* If we have neither type nor mode, abort */
11581 gcc_unreachable ();
11582 }
11583
11584 /* Return true if a function argument of type TYPE and mode MODE
11585 is to be passed in a vector register, if available. */
11586
11587 bool
11588 s390_function_arg_vector (machine_mode mode, const_tree type)
11589 {
11590 if (!TARGET_VX_ABI)
11591 return false;
11592
11593 if (s390_function_arg_size (mode, type) > 16)
11594 return false;
11595
11596 /* No type info available for some library calls ... */
11597 if (!type)
11598 return VECTOR_MODE_P (mode);
11599
11600 /* The ABI says that record types with a single member are treated
11601 just like that member would be. */
11602 while (TREE_CODE (type) == RECORD_TYPE)
11603 {
11604 tree field, single = NULL_TREE;
11605
11606 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11607 {
11608 if (TREE_CODE (field) != FIELD_DECL)
11609 continue;
11610
11611 if (single == NULL_TREE)
11612 single = TREE_TYPE (field);
11613 else
11614 return false;
11615 }
11616
11617 if (single == NULL_TREE)
11618 return false;
11619 else
11620 {
11621 /* If the field declaration adds extra byte due to
11622 e.g. padding this is not accepted as vector type. */
11623 if (int_size_in_bytes (single) <= 0
11624 || int_size_in_bytes (single) != int_size_in_bytes (type))
11625 return false;
11626 type = single;
11627 }
11628 }
11629
11630 return VECTOR_TYPE_P (type);
11631 }
11632
11633 /* Return true if a function argument of type TYPE and mode MODE
11634 is to be passed in a floating-point register, if available. */
11635
11636 static bool
11637 s390_function_arg_float (machine_mode mode, const_tree type)
11638 {
11639 if (s390_function_arg_size (mode, type) > 8)
11640 return false;
11641
11642 /* Soft-float changes the ABI: no floating-point registers are used. */
11643 if (TARGET_SOFT_FLOAT)
11644 return false;
11645
11646 /* No type info available for some library calls ... */
11647 if (!type)
11648 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
11649
11650 /* The ABI says that record types with a single member are treated
11651 just like that member would be. */
11652 while (TREE_CODE (type) == RECORD_TYPE)
11653 {
11654 tree field, single = NULL_TREE;
11655
11656 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11657 {
11658 if (TREE_CODE (field) != FIELD_DECL)
11659 continue;
11660
11661 if (single == NULL_TREE)
11662 single = TREE_TYPE (field);
11663 else
11664 return false;
11665 }
11666
11667 if (single == NULL_TREE)
11668 return false;
11669 else
11670 type = single;
11671 }
11672
11673 return TREE_CODE (type) == REAL_TYPE;
11674 }
11675
11676 /* Return true if a function argument of type TYPE and mode MODE
11677 is to be passed in an integer register, or a pair of integer
11678 registers, if available. */
11679
11680 static bool
11681 s390_function_arg_integer (machine_mode mode, const_tree type)
11682 {
11683 int size = s390_function_arg_size (mode, type);
11684 if (size > 8)
11685 return false;
11686
11687 /* No type info available for some library calls ... */
11688 if (!type)
11689 return GET_MODE_CLASS (mode) == MODE_INT
11690 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
11691
11692 /* We accept small integral (and similar) types. */
11693 if (INTEGRAL_TYPE_P (type)
11694 || POINTER_TYPE_P (type)
11695 || TREE_CODE (type) == NULLPTR_TYPE
11696 || TREE_CODE (type) == OFFSET_TYPE
11697 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
11698 return true;
11699
11700 /* We also accept structs of size 1, 2, 4, 8 that are not
11701 passed in floating-point registers. */
11702 if (AGGREGATE_TYPE_P (type)
11703 && exact_log2 (size) >= 0
11704 && !s390_function_arg_float (mode, type))
11705 return true;
11706
11707 return false;
11708 }
11709
11710 /* Return 1 if a function argument of type TYPE and mode MODE
11711 is to be passed by reference. The ABI specifies that only
11712 structures of size 1, 2, 4, or 8 bytes are passed by value,
11713 all other structures (and complex numbers) are passed by
11714 reference. */
11715
11716 static bool
11717 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
11718 machine_mode mode, const_tree type,
11719 bool named ATTRIBUTE_UNUSED)
11720 {
11721 int size = s390_function_arg_size (mode, type);
11722
11723 if (s390_function_arg_vector (mode, type))
11724 return false;
11725
11726 if (size > 8)
11727 return true;
11728
11729 if (type)
11730 {
11731 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
11732 return true;
11733
11734 if (TREE_CODE (type) == COMPLEX_TYPE
11735 || TREE_CODE (type) == VECTOR_TYPE)
11736 return true;
11737 }
11738
11739 return false;
11740 }
11741
11742 /* Update the data in CUM to advance over an argument of mode MODE and
11743 data type TYPE. (TYPE is null for libcalls where that information
11744 may not be available.). The boolean NAMED specifies whether the
11745 argument is a named argument (as opposed to an unnamed argument
11746 matching an ellipsis). */
11747
11748 static void
11749 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
11750 const_tree type, bool named)
11751 {
11752 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11753
11754 if (s390_function_arg_vector (mode, type))
11755 {
11756 /* We are called for unnamed vector stdarg arguments which are
11757 passed on the stack. In this case this hook does not have to
11758 do anything since stack arguments are tracked by common
11759 code. */
11760 if (!named)
11761 return;
11762 cum->vrs += 1;
11763 }
11764 else if (s390_function_arg_float (mode, type))
11765 {
11766 cum->fprs += 1;
11767 }
11768 else if (s390_function_arg_integer (mode, type))
11769 {
11770 int size = s390_function_arg_size (mode, type);
11771 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
11772 }
11773 else
11774 gcc_unreachable ();
11775 }
11776
11777 /* Define where to put the arguments to a function.
11778 Value is zero to push the argument on the stack,
11779 or a hard register in which to store the argument.
11780
11781 MODE is the argument's machine mode.
11782 TYPE is the data type of the argument (as a tree).
11783 This is null for libcalls where that information may
11784 not be available.
11785 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11786 the preceding args and about the function being called.
11787 NAMED is nonzero if this argument is a named parameter
11788 (otherwise it is an extra parameter matching an ellipsis).
11789
11790 On S/390, we use general purpose registers 2 through 6 to
11791 pass integer, pointer, and certain structure arguments, and
11792 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
11793 to pass floating point arguments. All remaining arguments
11794 are pushed to the stack. */
11795
11796 static rtx
11797 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
11798 const_tree type, bool named)
11799 {
11800 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11801
11802 if (!named)
11803 s390_check_type_for_vector_abi (type, true, false);
11804
11805 if (s390_function_arg_vector (mode, type))
11806 {
11807 /* Vector arguments being part of the ellipsis are passed on the
11808 stack. */
11809 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
11810 return NULL_RTX;
11811
11812 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
11813 }
11814 else if (s390_function_arg_float (mode, type))
11815 {
11816 if (cum->fprs + 1 > FP_ARG_NUM_REG)
11817 return NULL_RTX;
11818 else
11819 return gen_rtx_REG (mode, cum->fprs + 16);
11820 }
11821 else if (s390_function_arg_integer (mode, type))
11822 {
11823 int size = s390_function_arg_size (mode, type);
11824 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11825
11826 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
11827 return NULL_RTX;
11828 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
11829 return gen_rtx_REG (mode, cum->gprs + 2);
11830 else if (n_gprs == 2)
11831 {
11832 rtvec p = rtvec_alloc (2);
11833
11834 RTVEC_ELT (p, 0)
11835 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
11836 const0_rtx);
11837 RTVEC_ELT (p, 1)
11838 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
11839 GEN_INT (4));
11840
11841 return gen_rtx_PARALLEL (mode, p);
11842 }
11843 }
11844
11845 /* After the real arguments, expand_call calls us once again
11846 with a void_type_node type. Whatever we return here is
11847 passed as operand 2 to the call expanders.
11848
11849 We don't need this feature ... */
11850 else if (type == void_type_node)
11851 return const0_rtx;
11852
11853 gcc_unreachable ();
11854 }
11855
11856 /* Return true if return values of type TYPE should be returned
11857 in a memory buffer whose address is passed by the caller as
11858 hidden first argument. */
11859
11860 static bool
11861 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
11862 {
11863 /* We accept small integral (and similar) types. */
11864 if (INTEGRAL_TYPE_P (type)
11865 || POINTER_TYPE_P (type)
11866 || TREE_CODE (type) == OFFSET_TYPE
11867 || TREE_CODE (type) == REAL_TYPE)
11868 return int_size_in_bytes (type) > 8;
11869
11870 /* vector types which fit into a VR. */
11871 if (TARGET_VX_ABI
11872 && VECTOR_TYPE_P (type)
11873 && int_size_in_bytes (type) <= 16)
11874 return false;
11875
11876 /* Aggregates and similar constructs are always returned
11877 in memory. */
11878 if (AGGREGATE_TYPE_P (type)
11879 || TREE_CODE (type) == COMPLEX_TYPE
11880 || VECTOR_TYPE_P (type))
11881 return true;
11882
11883 /* ??? We get called on all sorts of random stuff from
11884 aggregate_value_p. We can't abort, but it's not clear
11885 what's safe to return. Pretend it's a struct I guess. */
11886 return true;
11887 }
11888
11889 /* Function arguments and return values are promoted to word size. */
11890
11891 static machine_mode
11892 s390_promote_function_mode (const_tree type, machine_mode mode,
11893 int *punsignedp,
11894 const_tree fntype ATTRIBUTE_UNUSED,
11895 int for_return ATTRIBUTE_UNUSED)
11896 {
11897 if (INTEGRAL_MODE_P (mode)
11898 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
11899 {
11900 if (type != NULL_TREE && POINTER_TYPE_P (type))
11901 *punsignedp = POINTERS_EXTEND_UNSIGNED;
11902 return Pmode;
11903 }
11904
11905 return mode;
11906 }
11907
11908 /* Define where to return a (scalar) value of type RET_TYPE.
11909 If RET_TYPE is null, define where to return a (scalar)
11910 value of mode MODE from a libcall. */
11911
11912 static rtx
11913 s390_function_and_libcall_value (machine_mode mode,
11914 const_tree ret_type,
11915 const_tree fntype_or_decl,
11916 bool outgoing ATTRIBUTE_UNUSED)
11917 {
11918 /* For vector return types it is important to use the RET_TYPE
11919 argument whenever available since the middle-end might have
11920 changed the mode to a scalar mode. */
11921 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
11922 || (!ret_type && VECTOR_MODE_P (mode)));
11923
11924 /* For normal functions perform the promotion as
11925 promote_function_mode would do. */
11926 if (ret_type)
11927 {
11928 int unsignedp = TYPE_UNSIGNED (ret_type);
11929 mode = promote_function_mode (ret_type, mode, &unsignedp,
11930 fntype_or_decl, 1);
11931 }
11932
11933 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
11934 || SCALAR_FLOAT_MODE_P (mode)
11935 || (TARGET_VX_ABI && vector_ret_type_p));
11936 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
11937
11938 if (TARGET_VX_ABI && vector_ret_type_p)
11939 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
11940 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
11941 return gen_rtx_REG (mode, 16);
11942 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
11943 || UNITS_PER_LONG == UNITS_PER_WORD)
11944 return gen_rtx_REG (mode, 2);
11945 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
11946 {
11947 /* This case is triggered when returning a 64 bit value with
11948 -m31 -mzarch. Although the value would fit into a single
11949 register it has to be forced into a 32 bit register pair in
11950 order to match the ABI. */
11951 rtvec p = rtvec_alloc (2);
11952
11953 RTVEC_ELT (p, 0)
11954 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
11955 RTVEC_ELT (p, 1)
11956 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
11957
11958 return gen_rtx_PARALLEL (mode, p);
11959 }
11960
11961 gcc_unreachable ();
11962 }
11963
11964 /* Define where to return a scalar return value of type RET_TYPE. */
11965
11966 static rtx
11967 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
11968 bool outgoing)
11969 {
11970 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
11971 fn_decl_or_type, outgoing);
11972 }
11973
11974 /* Define where to return a scalar libcall return value of mode
11975 MODE. */
11976
11977 static rtx
11978 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
11979 {
11980 return s390_function_and_libcall_value (mode, NULL_TREE,
11981 NULL_TREE, true);
11982 }
11983
11984
11985 /* Create and return the va_list datatype.
11986
11987 On S/390, va_list is an array type equivalent to
11988
11989 typedef struct __va_list_tag
11990 {
11991 long __gpr;
11992 long __fpr;
11993 void *__overflow_arg_area;
11994 void *__reg_save_area;
11995 } va_list[1];
11996
11997 where __gpr and __fpr hold the number of general purpose
11998 or floating point arguments used up to now, respectively,
11999 __overflow_arg_area points to the stack location of the
12000 next argument passed on the stack, and __reg_save_area
12001 always points to the start of the register area in the
12002 call frame of the current function. The function prologue
12003 saves all registers used for argument passing into this
12004 area if the function uses variable arguments. */
12005
12006 static tree
12007 s390_build_builtin_va_list (void)
12008 {
12009 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
12010
12011 record = lang_hooks.types.make_type (RECORD_TYPE);
12012
12013 type_decl =
12014 build_decl (BUILTINS_LOCATION,
12015 TYPE_DECL, get_identifier ("__va_list_tag"), record);
12016
12017 f_gpr = build_decl (BUILTINS_LOCATION,
12018 FIELD_DECL, get_identifier ("__gpr"),
12019 long_integer_type_node);
12020 f_fpr = build_decl (BUILTINS_LOCATION,
12021 FIELD_DECL, get_identifier ("__fpr"),
12022 long_integer_type_node);
12023 f_ovf = build_decl (BUILTINS_LOCATION,
12024 FIELD_DECL, get_identifier ("__overflow_arg_area"),
12025 ptr_type_node);
12026 f_sav = build_decl (BUILTINS_LOCATION,
12027 FIELD_DECL, get_identifier ("__reg_save_area"),
12028 ptr_type_node);
12029
12030 va_list_gpr_counter_field = f_gpr;
12031 va_list_fpr_counter_field = f_fpr;
12032
12033 DECL_FIELD_CONTEXT (f_gpr) = record;
12034 DECL_FIELD_CONTEXT (f_fpr) = record;
12035 DECL_FIELD_CONTEXT (f_ovf) = record;
12036 DECL_FIELD_CONTEXT (f_sav) = record;
12037
12038 TYPE_STUB_DECL (record) = type_decl;
12039 TYPE_NAME (record) = type_decl;
12040 TYPE_FIELDS (record) = f_gpr;
12041 DECL_CHAIN (f_gpr) = f_fpr;
12042 DECL_CHAIN (f_fpr) = f_ovf;
12043 DECL_CHAIN (f_ovf) = f_sav;
12044
12045 layout_type (record);
12046
12047 /* The correct type is an array type of one element. */
12048 return build_array_type (record, build_index_type (size_zero_node));
12049 }
12050
12051 /* Implement va_start by filling the va_list structure VALIST.
12052 STDARG_P is always true, and ignored.
12053 NEXTARG points to the first anonymous stack argument.
12054
12055 The following global variables are used to initialize
12056 the va_list structure:
12057
12058 crtl->args.info:
12059 holds number of gprs and fprs used for named arguments.
12060 crtl->args.arg_offset_rtx:
12061 holds the offset of the first anonymous stack argument
12062 (relative to the virtual arg pointer). */
12063
12064 static void
12065 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
12066 {
12067 HOST_WIDE_INT n_gpr, n_fpr;
12068 int off;
12069 tree f_gpr, f_fpr, f_ovf, f_sav;
12070 tree gpr, fpr, ovf, sav, t;
12071
12072 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12073 f_fpr = DECL_CHAIN (f_gpr);
12074 f_ovf = DECL_CHAIN (f_fpr);
12075 f_sav = DECL_CHAIN (f_ovf);
12076
12077 valist = build_simple_mem_ref (valist);
12078 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12079 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12080 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12081 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12082
12083 /* Count number of gp and fp argument registers used. */
12084
12085 n_gpr = crtl->args.info.gprs;
12086 n_fpr = crtl->args.info.fprs;
12087
12088 if (cfun->va_list_gpr_size)
12089 {
12090 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12091 build_int_cst (NULL_TREE, n_gpr));
12092 TREE_SIDE_EFFECTS (t) = 1;
12093 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12094 }
12095
12096 if (cfun->va_list_fpr_size)
12097 {
12098 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12099 build_int_cst (NULL_TREE, n_fpr));
12100 TREE_SIDE_EFFECTS (t) = 1;
12101 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12102 }
12103
12104 if (flag_split_stack
12105 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
12106 == NULL)
12107 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12108 {
12109 rtx reg;
12110 rtx_insn *seq;
12111
12112 reg = gen_reg_rtx (Pmode);
12113 cfun->machine->split_stack_varargs_pointer = reg;
12114
12115 start_sequence ();
12116 emit_move_insn (reg, gen_rtx_REG (Pmode, 1));
12117 seq = get_insns ();
12118 end_sequence ();
12119
12120 push_topmost_sequence ();
12121 emit_insn_after (seq, entry_of_function ());
12122 pop_topmost_sequence ();
12123 }
12124
12125 /* Find the overflow area.
12126 FIXME: This currently is too pessimistic when the vector ABI is
12127 enabled. In that case we *always* set up the overflow area
12128 pointer. */
12129 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
12130 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
12131 || TARGET_VX_ABI)
12132 {
12133 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12134 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
12135 else
12136 t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
12137
12138 off = INTVAL (crtl->args.arg_offset_rtx);
12139 off = off < 0 ? 0 : off;
12140 if (TARGET_DEBUG_ARG)
12141 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
12142 (int)n_gpr, (int)n_fpr, off);
12143
12144 t = fold_build_pointer_plus_hwi (t, off);
12145
12146 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12147 TREE_SIDE_EFFECTS (t) = 1;
12148 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12149 }
12150
12151 /* Find the register save area. */
12152 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
12153 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
12154 {
12155 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
12156 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
12157
12158 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12159 TREE_SIDE_EFFECTS (t) = 1;
12160 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12161 }
12162 }
12163
12164 /* Implement va_arg by updating the va_list structure
12165 VALIST as required to retrieve an argument of type
12166 TYPE, and returning that argument.
12167
12168 Generates code equivalent to:
12169
12170 if (integral value) {
12171 if (size <= 4 && args.gpr < 5 ||
12172 size > 4 && args.gpr < 4 )
12173 ret = args.reg_save_area[args.gpr+8]
12174 else
12175 ret = *args.overflow_arg_area++;
12176 } else if (vector value) {
12177 ret = *args.overflow_arg_area;
12178 args.overflow_arg_area += size / 8;
12179 } else if (float value) {
12180 if (args.fgpr < 2)
12181 ret = args.reg_save_area[args.fpr+64]
12182 else
12183 ret = *args.overflow_arg_area++;
12184 } else if (aggregate value) {
12185 if (args.gpr < 5)
12186 ret = *args.reg_save_area[args.gpr]
12187 else
12188 ret = **args.overflow_arg_area++;
12189 } */
12190
12191 static tree
12192 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12193 gimple_seq *post_p ATTRIBUTE_UNUSED)
12194 {
12195 tree f_gpr, f_fpr, f_ovf, f_sav;
12196 tree gpr, fpr, ovf, sav, reg, t, u;
12197 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
12198 tree lab_false, lab_over;
12199 tree addr = create_tmp_var (ptr_type_node, "addr");
12200 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
12201 a stack slot. */
12202
12203 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12204 f_fpr = DECL_CHAIN (f_gpr);
12205 f_ovf = DECL_CHAIN (f_fpr);
12206 f_sav = DECL_CHAIN (f_ovf);
12207
12208 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12209 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12210 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12211
12212 /* The tree for args* cannot be shared between gpr/fpr and ovf since
12213 both appear on a lhs. */
12214 valist = unshare_expr (valist);
12215 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12216
12217 size = int_size_in_bytes (type);
12218
12219 s390_check_type_for_vector_abi (type, true, false);
12220
12221 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12222 {
12223 if (TARGET_DEBUG_ARG)
12224 {
12225 fprintf (stderr, "va_arg: aggregate type");
12226 debug_tree (type);
12227 }
12228
12229 /* Aggregates are passed by reference. */
12230 indirect_p = 1;
12231 reg = gpr;
12232 n_reg = 1;
12233
12234 /* kernel stack layout on 31 bit: It is assumed here that no padding
12235 will be added by s390_frame_info because for va_args always an even
12236 number of gprs has to be saved r15-r2 = 14 regs. */
12237 sav_ofs = 2 * UNITS_PER_LONG;
12238 sav_scale = UNITS_PER_LONG;
12239 size = UNITS_PER_LONG;
12240 max_reg = GP_ARG_NUM_REG - n_reg;
12241 left_align_p = false;
12242 }
12243 else if (s390_function_arg_vector (TYPE_MODE (type), type))
12244 {
12245 if (TARGET_DEBUG_ARG)
12246 {
12247 fprintf (stderr, "va_arg: vector type");
12248 debug_tree (type);
12249 }
12250
12251 indirect_p = 0;
12252 reg = NULL_TREE;
12253 n_reg = 0;
12254 sav_ofs = 0;
12255 sav_scale = 8;
12256 max_reg = 0;
12257 left_align_p = true;
12258 }
12259 else if (s390_function_arg_float (TYPE_MODE (type), type))
12260 {
12261 if (TARGET_DEBUG_ARG)
12262 {
12263 fprintf (stderr, "va_arg: float type");
12264 debug_tree (type);
12265 }
12266
12267 /* FP args go in FP registers, if present. */
12268 indirect_p = 0;
12269 reg = fpr;
12270 n_reg = 1;
12271 sav_ofs = 16 * UNITS_PER_LONG;
12272 sav_scale = 8;
12273 max_reg = FP_ARG_NUM_REG - n_reg;
12274 left_align_p = false;
12275 }
12276 else
12277 {
12278 if (TARGET_DEBUG_ARG)
12279 {
12280 fprintf (stderr, "va_arg: other type");
12281 debug_tree (type);
12282 }
12283
12284 /* Otherwise into GP registers. */
12285 indirect_p = 0;
12286 reg = gpr;
12287 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12288
12289 /* kernel stack layout on 31 bit: It is assumed here that no padding
12290 will be added by s390_frame_info because for va_args always an even
12291 number of gprs has to be saved r15-r2 = 14 regs. */
12292 sav_ofs = 2 * UNITS_PER_LONG;
12293
12294 if (size < UNITS_PER_LONG)
12295 sav_ofs += UNITS_PER_LONG - size;
12296
12297 sav_scale = UNITS_PER_LONG;
12298 max_reg = GP_ARG_NUM_REG - n_reg;
12299 left_align_p = false;
12300 }
12301
12302 /* Pull the value out of the saved registers ... */
12303
12304 if (reg != NULL_TREE)
12305 {
12306 /*
12307 if (reg > ((typeof (reg))max_reg))
12308 goto lab_false;
12309
12310 addr = sav + sav_ofs + reg * save_scale;
12311
12312 goto lab_over;
12313
12314 lab_false:
12315 */
12316
12317 lab_false = create_artificial_label (UNKNOWN_LOCATION);
12318 lab_over = create_artificial_label (UNKNOWN_LOCATION);
12319
12320 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
12321 t = build2 (GT_EXPR, boolean_type_node, reg, t);
12322 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12323 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12324 gimplify_and_add (t, pre_p);
12325
12326 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12327 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
12328 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
12329 t = fold_build_pointer_plus (t, u);
12330
12331 gimplify_assign (addr, t, pre_p);
12332
12333 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12334
12335 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
12336 }
12337
12338 /* ... Otherwise out of the overflow area. */
12339
12340 t = ovf;
12341 if (size < UNITS_PER_LONG && !left_align_p)
12342 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
12343
12344 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12345
12346 gimplify_assign (addr, t, pre_p);
12347
12348 if (size < UNITS_PER_LONG && left_align_p)
12349 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
12350 else
12351 t = fold_build_pointer_plus_hwi (t, size);
12352
12353 gimplify_assign (ovf, t, pre_p);
12354
12355 if (reg != NULL_TREE)
12356 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
12357
12358
12359 /* Increment register save count. */
12360
12361 if (n_reg > 0)
12362 {
12363 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
12364 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
12365 gimplify_and_add (u, pre_p);
12366 }
12367
12368 if (indirect_p)
12369 {
12370 t = build_pointer_type_for_mode (build_pointer_type (type),
12371 ptr_mode, true);
12372 addr = fold_convert (t, addr);
12373 addr = build_va_arg_indirect_ref (addr);
12374 }
12375 else
12376 {
12377 t = build_pointer_type_for_mode (type, ptr_mode, true);
12378 addr = fold_convert (t, addr);
12379 }
12380
12381 return build_va_arg_indirect_ref (addr);
12382 }
12383
12384 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
12385 expanders.
12386 DEST - Register location where CC will be stored.
12387 TDB - Pointer to a 256 byte area where to store the transaction.
12388 diagnostic block. NULL if TDB is not needed.
12389 RETRY - Retry count value. If non-NULL a retry loop for CC2
12390 is emitted
12391 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
12392 of the tbegin instruction pattern. */
12393
12394 void
12395 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
12396 {
12397 rtx retry_plus_two = gen_reg_rtx (SImode);
12398 rtx retry_reg = gen_reg_rtx (SImode);
12399 rtx_code_label *retry_label = NULL;
12400
12401 if (retry != NULL_RTX)
12402 {
12403 emit_move_insn (retry_reg, retry);
12404 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
12405 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
12406 retry_label = gen_label_rtx ();
12407 emit_label (retry_label);
12408 }
12409
12410 if (clobber_fprs_p)
12411 {
12412 if (TARGET_VX)
12413 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12414 tdb));
12415 else
12416 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12417 tdb));
12418 }
12419 else
12420 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12421 tdb));
12422
12423 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
12424 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
12425 CC_REGNUM)),
12426 UNSPEC_CC_TO_INT));
12427 if (retry != NULL_RTX)
12428 {
12429 const int CC0 = 1 << 3;
12430 const int CC1 = 1 << 2;
12431 const int CC3 = 1 << 0;
12432 rtx jump;
12433 rtx count = gen_reg_rtx (SImode);
12434 rtx_code_label *leave_label = gen_label_rtx ();
12435
12436 /* Exit for success and permanent failures. */
12437 jump = s390_emit_jump (leave_label,
12438 gen_rtx_EQ (VOIDmode,
12439 gen_rtx_REG (CCRAWmode, CC_REGNUM),
12440 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
12441 LABEL_NUSES (leave_label) = 1;
12442
12443 /* CC2 - transient failure. Perform retry with ppa. */
12444 emit_move_insn (count, retry_plus_two);
12445 emit_insn (gen_subsi3 (count, count, retry_reg));
12446 emit_insn (gen_tx_assist (count));
12447 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
12448 retry_reg,
12449 retry_reg));
12450 JUMP_LABEL (jump) = retry_label;
12451 LABEL_NUSES (retry_label) = 1;
12452 emit_label (leave_label);
12453 }
12454 }
12455
12456
12457 /* Return the decl for the target specific builtin with the function
12458 code FCODE. */
12459
12460 static tree
12461 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
12462 {
12463 if (fcode >= S390_BUILTIN_MAX)
12464 return error_mark_node;
12465
12466 return s390_builtin_decls[fcode];
12467 }
12468
12469 /* We call mcount before the function prologue. So a profiled leaf
12470 function should stay a leaf function. */
12471
12472 static bool
12473 s390_keep_leaf_when_profiled ()
12474 {
12475 return true;
12476 }
12477
12478 /* Output assembly code for the trampoline template to
12479 stdio stream FILE.
12480
12481 On S/390, we use gpr 1 internally in the trampoline code;
12482 gpr 0 is used to hold the static chain. */
12483
12484 static void
12485 s390_asm_trampoline_template (FILE *file)
12486 {
12487 rtx op[2];
12488 op[0] = gen_rtx_REG (Pmode, 0);
12489 op[1] = gen_rtx_REG (Pmode, 1);
12490
12491 if (TARGET_64BIT)
12492 {
12493 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12494 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
12495 output_asm_insn ("br\t%1", op); /* 2 byte */
12496 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
12497 }
12498 else
12499 {
12500 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12501 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
12502 output_asm_insn ("br\t%1", op); /* 2 byte */
12503 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
12504 }
12505 }
12506
12507 /* Emit RTL insns to initialize the variable parts of a trampoline.
12508 FNADDR is an RTX for the address of the function's pure code.
12509 CXT is an RTX for the static chain value for the function. */
12510
12511 static void
12512 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
12513 {
12514 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
12515 rtx mem;
12516
12517 emit_block_move (m_tramp, assemble_trampoline_template (),
12518 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
12519
12520 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
12521 emit_move_insn (mem, cxt);
12522 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
12523 emit_move_insn (mem, fnaddr);
12524 }
12525
12526 /* Output assembler code to FILE to increment profiler label # LABELNO
12527 for profiling a function entry. */
12528
12529 void
12530 s390_function_profiler (FILE *file, int labelno)
12531 {
12532 rtx op[7];
12533
12534 char label[128];
12535 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
12536
12537 fprintf (file, "# function profiler \n");
12538
12539 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
12540 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
12541 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
12542
12543 op[2] = gen_rtx_REG (Pmode, 1);
12544 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
12545 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
12546
12547 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
12548 if (flag_pic)
12549 {
12550 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
12551 op[4] = gen_rtx_CONST (Pmode, op[4]);
12552 }
12553
12554 if (TARGET_64BIT)
12555 {
12556 output_asm_insn ("stg\t%0,%1", op);
12557 output_asm_insn ("larl\t%2,%3", op);
12558 output_asm_insn ("brasl\t%0,%4", op);
12559 output_asm_insn ("lg\t%0,%1", op);
12560 }
12561 else if (TARGET_CPU_ZARCH)
12562 {
12563 output_asm_insn ("st\t%0,%1", op);
12564 output_asm_insn ("larl\t%2,%3", op);
12565 output_asm_insn ("brasl\t%0,%4", op);
12566 output_asm_insn ("l\t%0,%1", op);
12567 }
12568 else if (!flag_pic)
12569 {
12570 op[6] = gen_label_rtx ();
12571
12572 output_asm_insn ("st\t%0,%1", op);
12573 output_asm_insn ("bras\t%2,%l6", op);
12574 output_asm_insn (".long\t%4", op);
12575 output_asm_insn (".long\t%3", op);
12576 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12577 output_asm_insn ("l\t%0,0(%2)", op);
12578 output_asm_insn ("l\t%2,4(%2)", op);
12579 output_asm_insn ("basr\t%0,%0", op);
12580 output_asm_insn ("l\t%0,%1", op);
12581 }
12582 else
12583 {
12584 op[5] = gen_label_rtx ();
12585 op[6] = gen_label_rtx ();
12586
12587 output_asm_insn ("st\t%0,%1", op);
12588 output_asm_insn ("bras\t%2,%l6", op);
12589 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
12590 output_asm_insn (".long\t%4-%l5", op);
12591 output_asm_insn (".long\t%3-%l5", op);
12592 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12593 output_asm_insn ("lr\t%0,%2", op);
12594 output_asm_insn ("a\t%0,0(%2)", op);
12595 output_asm_insn ("a\t%2,4(%2)", op);
12596 output_asm_insn ("basr\t%0,%0", op);
12597 output_asm_insn ("l\t%0,%1", op);
12598 }
12599 }
12600
12601 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
12602 into its SYMBOL_REF_FLAGS. */
12603
12604 static void
12605 s390_encode_section_info (tree decl, rtx rtl, int first)
12606 {
12607 default_encode_section_info (decl, rtl, first);
12608
12609 if (TREE_CODE (decl) == VAR_DECL)
12610 {
12611 /* Store the alignment to be able to check if we can use
12612 a larl/load-relative instruction. We only handle the cases
12613 that can go wrong (i.e. no FUNC_DECLs). */
12614 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
12615 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12616 else if (DECL_ALIGN (decl) % 32)
12617 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12618 else if (DECL_ALIGN (decl) % 64)
12619 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12620 }
12621
12622 /* Literal pool references don't have a decl so they are handled
12623 differently here. We rely on the information in the MEM_ALIGN
12624 entry to decide upon the alignment. */
12625 if (MEM_P (rtl)
12626 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
12627 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0)))
12628 {
12629 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
12630 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12631 else if (MEM_ALIGN (rtl) % 32)
12632 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12633 else if (MEM_ALIGN (rtl) % 64)
12634 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12635 }
12636 }
12637
12638 /* Output thunk to FILE that implements a C++ virtual function call (with
12639 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
12640 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
12641 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
12642 relative to the resulting this pointer. */
12643
12644 static void
12645 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
12646 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12647 tree function)
12648 {
12649 rtx op[10];
12650 int nonlocal = 0;
12651
12652 /* Make sure unwind info is emitted for the thunk if needed. */
12653 final_start_function (emit_barrier (), file, 1);
12654
12655 /* Operand 0 is the target function. */
12656 op[0] = XEXP (DECL_RTL (function), 0);
12657 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
12658 {
12659 nonlocal = 1;
12660 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
12661 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
12662 op[0] = gen_rtx_CONST (Pmode, op[0]);
12663 }
12664
12665 /* Operand 1 is the 'this' pointer. */
12666 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12667 op[1] = gen_rtx_REG (Pmode, 3);
12668 else
12669 op[1] = gen_rtx_REG (Pmode, 2);
12670
12671 /* Operand 2 is the delta. */
12672 op[2] = GEN_INT (delta);
12673
12674 /* Operand 3 is the vcall_offset. */
12675 op[3] = GEN_INT (vcall_offset);
12676
12677 /* Operand 4 is the temporary register. */
12678 op[4] = gen_rtx_REG (Pmode, 1);
12679
12680 /* Operands 5 to 8 can be used as labels. */
12681 op[5] = NULL_RTX;
12682 op[6] = NULL_RTX;
12683 op[7] = NULL_RTX;
12684 op[8] = NULL_RTX;
12685
12686 /* Operand 9 can be used for temporary register. */
12687 op[9] = NULL_RTX;
12688
12689 /* Generate code. */
12690 if (TARGET_64BIT)
12691 {
12692 /* Setup literal pool pointer if required. */
12693 if ((!DISP_IN_RANGE (delta)
12694 && !CONST_OK_FOR_K (delta)
12695 && !CONST_OK_FOR_Os (delta))
12696 || (!DISP_IN_RANGE (vcall_offset)
12697 && !CONST_OK_FOR_K (vcall_offset)
12698 && !CONST_OK_FOR_Os (vcall_offset)))
12699 {
12700 op[5] = gen_label_rtx ();
12701 output_asm_insn ("larl\t%4,%5", op);
12702 }
12703
12704 /* Add DELTA to this pointer. */
12705 if (delta)
12706 {
12707 if (CONST_OK_FOR_J (delta))
12708 output_asm_insn ("la\t%1,%2(%1)", op);
12709 else if (DISP_IN_RANGE (delta))
12710 output_asm_insn ("lay\t%1,%2(%1)", op);
12711 else if (CONST_OK_FOR_K (delta))
12712 output_asm_insn ("aghi\t%1,%2", op);
12713 else if (CONST_OK_FOR_Os (delta))
12714 output_asm_insn ("agfi\t%1,%2", op);
12715 else
12716 {
12717 op[6] = gen_label_rtx ();
12718 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
12719 }
12720 }
12721
12722 /* Perform vcall adjustment. */
12723 if (vcall_offset)
12724 {
12725 if (DISP_IN_RANGE (vcall_offset))
12726 {
12727 output_asm_insn ("lg\t%4,0(%1)", op);
12728 output_asm_insn ("ag\t%1,%3(%4)", op);
12729 }
12730 else if (CONST_OK_FOR_K (vcall_offset))
12731 {
12732 output_asm_insn ("lghi\t%4,%3", op);
12733 output_asm_insn ("ag\t%4,0(%1)", op);
12734 output_asm_insn ("ag\t%1,0(%4)", op);
12735 }
12736 else if (CONST_OK_FOR_Os (vcall_offset))
12737 {
12738 output_asm_insn ("lgfi\t%4,%3", op);
12739 output_asm_insn ("ag\t%4,0(%1)", op);
12740 output_asm_insn ("ag\t%1,0(%4)", op);
12741 }
12742 else
12743 {
12744 op[7] = gen_label_rtx ();
12745 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
12746 output_asm_insn ("ag\t%4,0(%1)", op);
12747 output_asm_insn ("ag\t%1,0(%4)", op);
12748 }
12749 }
12750
12751 /* Jump to target. */
12752 output_asm_insn ("jg\t%0", op);
12753
12754 /* Output literal pool if required. */
12755 if (op[5])
12756 {
12757 output_asm_insn (".align\t4", op);
12758 targetm.asm_out.internal_label (file, "L",
12759 CODE_LABEL_NUMBER (op[5]));
12760 }
12761 if (op[6])
12762 {
12763 targetm.asm_out.internal_label (file, "L",
12764 CODE_LABEL_NUMBER (op[6]));
12765 output_asm_insn (".long\t%2", op);
12766 }
12767 if (op[7])
12768 {
12769 targetm.asm_out.internal_label (file, "L",
12770 CODE_LABEL_NUMBER (op[7]));
12771 output_asm_insn (".long\t%3", op);
12772 }
12773 }
12774 else
12775 {
12776 /* Setup base pointer if required. */
12777 if (!vcall_offset
12778 || (!DISP_IN_RANGE (delta)
12779 && !CONST_OK_FOR_K (delta)
12780 && !CONST_OK_FOR_Os (delta))
12781 || (!DISP_IN_RANGE (delta)
12782 && !CONST_OK_FOR_K (vcall_offset)
12783 && !CONST_OK_FOR_Os (vcall_offset)))
12784 {
12785 op[5] = gen_label_rtx ();
12786 output_asm_insn ("basr\t%4,0", op);
12787 targetm.asm_out.internal_label (file, "L",
12788 CODE_LABEL_NUMBER (op[5]));
12789 }
12790
12791 /* Add DELTA to this pointer. */
12792 if (delta)
12793 {
12794 if (CONST_OK_FOR_J (delta))
12795 output_asm_insn ("la\t%1,%2(%1)", op);
12796 else if (DISP_IN_RANGE (delta))
12797 output_asm_insn ("lay\t%1,%2(%1)", op);
12798 else if (CONST_OK_FOR_K (delta))
12799 output_asm_insn ("ahi\t%1,%2", op);
12800 else if (CONST_OK_FOR_Os (delta))
12801 output_asm_insn ("afi\t%1,%2", op);
12802 else
12803 {
12804 op[6] = gen_label_rtx ();
12805 output_asm_insn ("a\t%1,%6-%5(%4)", op);
12806 }
12807 }
12808
12809 /* Perform vcall adjustment. */
12810 if (vcall_offset)
12811 {
12812 if (CONST_OK_FOR_J (vcall_offset))
12813 {
12814 output_asm_insn ("l\t%4,0(%1)", op);
12815 output_asm_insn ("a\t%1,%3(%4)", op);
12816 }
12817 else if (DISP_IN_RANGE (vcall_offset))
12818 {
12819 output_asm_insn ("l\t%4,0(%1)", op);
12820 output_asm_insn ("ay\t%1,%3(%4)", op);
12821 }
12822 else if (CONST_OK_FOR_K (vcall_offset))
12823 {
12824 output_asm_insn ("lhi\t%4,%3", op);
12825 output_asm_insn ("a\t%4,0(%1)", op);
12826 output_asm_insn ("a\t%1,0(%4)", op);
12827 }
12828 else if (CONST_OK_FOR_Os (vcall_offset))
12829 {
12830 output_asm_insn ("iilf\t%4,%3", op);
12831 output_asm_insn ("a\t%4,0(%1)", op);
12832 output_asm_insn ("a\t%1,0(%4)", op);
12833 }
12834 else
12835 {
12836 op[7] = gen_label_rtx ();
12837 output_asm_insn ("l\t%4,%7-%5(%4)", op);
12838 output_asm_insn ("a\t%4,0(%1)", op);
12839 output_asm_insn ("a\t%1,0(%4)", op);
12840 }
12841
12842 /* We had to clobber the base pointer register.
12843 Re-setup the base pointer (with a different base). */
12844 op[5] = gen_label_rtx ();
12845 output_asm_insn ("basr\t%4,0", op);
12846 targetm.asm_out.internal_label (file, "L",
12847 CODE_LABEL_NUMBER (op[5]));
12848 }
12849
12850 /* Jump to target. */
12851 op[8] = gen_label_rtx ();
12852
12853 if (!flag_pic)
12854 output_asm_insn ("l\t%4,%8-%5(%4)", op);
12855 else if (!nonlocal)
12856 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12857 /* We cannot call through .plt, since .plt requires %r12 loaded. */
12858 else if (flag_pic == 1)
12859 {
12860 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12861 output_asm_insn ("l\t%4,%0(%4)", op);
12862 }
12863 else if (flag_pic == 2)
12864 {
12865 op[9] = gen_rtx_REG (Pmode, 0);
12866 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
12867 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12868 output_asm_insn ("ar\t%4,%9", op);
12869 output_asm_insn ("l\t%4,0(%4)", op);
12870 }
12871
12872 output_asm_insn ("br\t%4", op);
12873
12874 /* Output literal pool. */
12875 output_asm_insn (".align\t4", op);
12876
12877 if (nonlocal && flag_pic == 2)
12878 output_asm_insn (".long\t%0", op);
12879 if (nonlocal)
12880 {
12881 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
12882 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
12883 }
12884
12885 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
12886 if (!flag_pic)
12887 output_asm_insn (".long\t%0", op);
12888 else
12889 output_asm_insn (".long\t%0-%5", op);
12890
12891 if (op[6])
12892 {
12893 targetm.asm_out.internal_label (file, "L",
12894 CODE_LABEL_NUMBER (op[6]));
12895 output_asm_insn (".long\t%2", op);
12896 }
12897 if (op[7])
12898 {
12899 targetm.asm_out.internal_label (file, "L",
12900 CODE_LABEL_NUMBER (op[7]));
12901 output_asm_insn (".long\t%3", op);
12902 }
12903 }
12904 final_end_function ();
12905 }
12906
12907 static bool
12908 s390_valid_pointer_mode (machine_mode mode)
12909 {
12910 return (mode == SImode || (TARGET_64BIT && mode == DImode));
12911 }
12912
12913 /* Checks whether the given CALL_EXPR would use a caller
12914 saved register. This is used to decide whether sibling call
12915 optimization could be performed on the respective function
12916 call. */
12917
12918 static bool
12919 s390_call_saved_register_used (tree call_expr)
12920 {
12921 CUMULATIVE_ARGS cum_v;
12922 cumulative_args_t cum;
12923 tree parameter;
12924 machine_mode mode;
12925 tree type;
12926 rtx parm_rtx;
12927 int reg, i;
12928
12929 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
12930 cum = pack_cumulative_args (&cum_v);
12931
12932 for (i = 0; i < call_expr_nargs (call_expr); i++)
12933 {
12934 parameter = CALL_EXPR_ARG (call_expr, i);
12935 gcc_assert (parameter);
12936
12937 /* For an undeclared variable passed as parameter we will get
12938 an ERROR_MARK node here. */
12939 if (TREE_CODE (parameter) == ERROR_MARK)
12940 return true;
12941
12942 type = TREE_TYPE (parameter);
12943 gcc_assert (type);
12944
12945 mode = TYPE_MODE (type);
12946 gcc_assert (mode);
12947
12948 /* We assume that in the target function all parameters are
12949 named. This only has an impact on vector argument register
12950 usage none of which is call-saved. */
12951 if (pass_by_reference (&cum_v, mode, type, true))
12952 {
12953 mode = Pmode;
12954 type = build_pointer_type (type);
12955 }
12956
12957 parm_rtx = s390_function_arg (cum, mode, type, true);
12958
12959 s390_function_arg_advance (cum, mode, type, true);
12960
12961 if (!parm_rtx)
12962 continue;
12963
12964 if (REG_P (parm_rtx))
12965 {
12966 for (reg = 0;
12967 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
12968 reg++)
12969 if (!call_used_regs[reg + REGNO (parm_rtx)])
12970 return true;
12971 }
12972
12973 if (GET_CODE (parm_rtx) == PARALLEL)
12974 {
12975 int i;
12976
12977 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
12978 {
12979 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
12980
12981 gcc_assert (REG_P (r));
12982
12983 for (reg = 0;
12984 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
12985 reg++)
12986 if (!call_used_regs[reg + REGNO (r)])
12987 return true;
12988 }
12989 }
12990
12991 }
12992 return false;
12993 }
12994
12995 /* Return true if the given call expression can be
12996 turned into a sibling call.
12997 DECL holds the declaration of the function to be called whereas
12998 EXP is the call expression itself. */
12999
13000 static bool
13001 s390_function_ok_for_sibcall (tree decl, tree exp)
13002 {
13003 /* The TPF epilogue uses register 1. */
13004 if (TARGET_TPF_PROFILING)
13005 return false;
13006
13007 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
13008 which would have to be restored before the sibcall. */
13009 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
13010 return false;
13011
13012 /* Register 6 on s390 is available as an argument register but unfortunately
13013 "caller saved". This makes functions needing this register for arguments
13014 not suitable for sibcalls. */
13015 return !s390_call_saved_register_used (exp);
13016 }
13017
13018 /* Return the fixed registers used for condition codes. */
13019
13020 static bool
13021 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
13022 {
13023 *p1 = CC_REGNUM;
13024 *p2 = INVALID_REGNUM;
13025
13026 return true;
13027 }
13028
13029 /* This function is used by the call expanders of the machine description.
13030 It emits the call insn itself together with the necessary operations
13031 to adjust the target address and returns the emitted insn.
13032 ADDR_LOCATION is the target address rtx
13033 TLS_CALL the location of the thread-local symbol
13034 RESULT_REG the register where the result of the call should be stored
13035 RETADDR_REG the register where the return address should be stored
13036 If this parameter is NULL_RTX the call is considered
13037 to be a sibling call. */
13038
13039 rtx_insn *
13040 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
13041 rtx retaddr_reg)
13042 {
13043 bool plt_call = false;
13044 rtx_insn *insn;
13045 rtx call;
13046 rtx clobber;
13047 rtvec vec;
13048
13049 /* Direct function calls need special treatment. */
13050 if (GET_CODE (addr_location) == SYMBOL_REF)
13051 {
13052 /* When calling a global routine in PIC mode, we must
13053 replace the symbol itself with the PLT stub. */
13054 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
13055 {
13056 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
13057 {
13058 addr_location = gen_rtx_UNSPEC (Pmode,
13059 gen_rtvec (1, addr_location),
13060 UNSPEC_PLT);
13061 addr_location = gen_rtx_CONST (Pmode, addr_location);
13062 plt_call = true;
13063 }
13064 else
13065 /* For -fpic code the PLT entries might use r12 which is
13066 call-saved. Therefore we cannot do a sibcall when
13067 calling directly using a symbol ref. When reaching
13068 this point we decided (in s390_function_ok_for_sibcall)
13069 to do a sibcall for a function pointer but one of the
13070 optimizers was able to get rid of the function pointer
13071 by propagating the symbol ref into the call. This
13072 optimization is illegal for S/390 so we turn the direct
13073 call into a indirect call again. */
13074 addr_location = force_reg (Pmode, addr_location);
13075 }
13076
13077 /* Unless we can use the bras(l) insn, force the
13078 routine address into a register. */
13079 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
13080 {
13081 if (flag_pic)
13082 addr_location = legitimize_pic_address (addr_location, 0);
13083 else
13084 addr_location = force_reg (Pmode, addr_location);
13085 }
13086 }
13087
13088 /* If it is already an indirect call or the code above moved the
13089 SYMBOL_REF to somewhere else make sure the address can be found in
13090 register 1. */
13091 if (retaddr_reg == NULL_RTX
13092 && GET_CODE (addr_location) != SYMBOL_REF
13093 && !plt_call)
13094 {
13095 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
13096 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
13097 }
13098
13099 addr_location = gen_rtx_MEM (QImode, addr_location);
13100 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
13101
13102 if (result_reg != NULL_RTX)
13103 call = gen_rtx_SET (result_reg, call);
13104
13105 if (retaddr_reg != NULL_RTX)
13106 {
13107 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
13108
13109 if (tls_call != NULL_RTX)
13110 vec = gen_rtvec (3, call, clobber,
13111 gen_rtx_USE (VOIDmode, tls_call));
13112 else
13113 vec = gen_rtvec (2, call, clobber);
13114
13115 call = gen_rtx_PARALLEL (VOIDmode, vec);
13116 }
13117
13118 insn = emit_call_insn (call);
13119
13120 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
13121 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
13122 {
13123 /* s390_function_ok_for_sibcall should
13124 have denied sibcalls in this case. */
13125 gcc_assert (retaddr_reg != NULL_RTX);
13126 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
13127 }
13128 return insn;
13129 }
13130
13131 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
13132
13133 static void
13134 s390_conditional_register_usage (void)
13135 {
13136 int i;
13137
13138 if (flag_pic)
13139 {
13140 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13141 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13142 }
13143 if (TARGET_CPU_ZARCH)
13144 {
13145 fixed_regs[BASE_REGNUM] = 0;
13146 call_used_regs[BASE_REGNUM] = 0;
13147 fixed_regs[RETURN_REGNUM] = 0;
13148 call_used_regs[RETURN_REGNUM] = 0;
13149 }
13150 if (TARGET_64BIT)
13151 {
13152 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
13153 call_used_regs[i] = call_really_used_regs[i] = 0;
13154 }
13155 else
13156 {
13157 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
13158 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
13159 }
13160
13161 if (TARGET_SOFT_FLOAT)
13162 {
13163 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
13164 call_used_regs[i] = fixed_regs[i] = 1;
13165 }
13166
13167 /* Disable v16 - v31 for non-vector target. */
13168 if (!TARGET_VX)
13169 {
13170 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
13171 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
13172 }
13173 }
13174
13175 /* Corresponding function to eh_return expander. */
13176
13177 static GTY(()) rtx s390_tpf_eh_return_symbol;
13178 void
13179 s390_emit_tpf_eh_return (rtx target)
13180 {
13181 rtx_insn *insn;
13182 rtx reg, orig_ra;
13183
13184 if (!s390_tpf_eh_return_symbol)
13185 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
13186
13187 reg = gen_rtx_REG (Pmode, 2);
13188 orig_ra = gen_rtx_REG (Pmode, 3);
13189
13190 emit_move_insn (reg, target);
13191 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
13192 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
13193 gen_rtx_REG (Pmode, RETURN_REGNUM));
13194 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
13195 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
13196
13197 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
13198 }
13199
13200 /* Rework the prologue/epilogue to avoid saving/restoring
13201 registers unnecessarily. */
13202
13203 static void
13204 s390_optimize_prologue (void)
13205 {
13206 rtx_insn *insn, *new_insn, *next_insn;
13207
13208 /* Do a final recompute of the frame-related data. */
13209 s390_optimize_register_info ();
13210
13211 /* If all special registers are in fact used, there's nothing we
13212 can do, so no point in walking the insn list. */
13213
13214 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
13215 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
13216 && (TARGET_CPU_ZARCH
13217 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
13218 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
13219 return;
13220
13221 /* Search for prologue/epilogue insns and replace them. */
13222
13223 for (insn = get_insns (); insn; insn = next_insn)
13224 {
13225 int first, last, off;
13226 rtx set, base, offset;
13227 rtx pat;
13228
13229 next_insn = NEXT_INSN (insn);
13230
13231 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
13232 continue;
13233
13234 pat = PATTERN (insn);
13235
13236 /* Remove ldgr/lgdr instructions used for saving and restore
13237 GPRs if possible. */
13238 if (TARGET_Z10)
13239 {
13240 rtx tmp_pat = pat;
13241
13242 if (INSN_CODE (insn) == CODE_FOR_stack_restore_from_fpr)
13243 tmp_pat = XVECEXP (pat, 0, 0);
13244
13245 if (GET_CODE (tmp_pat) == SET
13246 && GET_MODE (SET_SRC (tmp_pat)) == DImode
13247 && REG_P (SET_SRC (tmp_pat))
13248 && REG_P (SET_DEST (tmp_pat)))
13249 {
13250 int src_regno = REGNO (SET_SRC (tmp_pat));
13251 int dest_regno = REGNO (SET_DEST (tmp_pat));
13252 int gpr_regno;
13253 int fpr_regno;
13254
13255 if (!((GENERAL_REGNO_P (src_regno)
13256 && FP_REGNO_P (dest_regno))
13257 || (FP_REGNO_P (src_regno)
13258 && GENERAL_REGNO_P (dest_regno))))
13259 continue;
13260
13261 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
13262 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
13263
13264 /* GPR must be call-saved, FPR must be call-clobbered. */
13265 if (!call_really_used_regs[fpr_regno]
13266 || call_really_used_regs[gpr_regno])
13267 continue;
13268
13269 /* It must not happen that what we once saved in an FPR now
13270 needs a stack slot. */
13271 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
13272
13273 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
13274 {
13275 remove_insn (insn);
13276 continue;
13277 }
13278 }
13279 }
13280
13281 if (GET_CODE (pat) == PARALLEL
13282 && store_multiple_operation (pat, VOIDmode))
13283 {
13284 set = XVECEXP (pat, 0, 0);
13285 first = REGNO (SET_SRC (set));
13286 last = first + XVECLEN (pat, 0) - 1;
13287 offset = const0_rtx;
13288 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13289 off = INTVAL (offset);
13290
13291 if (GET_CODE (base) != REG || off < 0)
13292 continue;
13293 if (cfun_frame_layout.first_save_gpr != -1
13294 && (cfun_frame_layout.first_save_gpr < first
13295 || cfun_frame_layout.last_save_gpr > last))
13296 continue;
13297 if (REGNO (base) != STACK_POINTER_REGNUM
13298 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13299 continue;
13300 if (first > BASE_REGNUM || last < BASE_REGNUM)
13301 continue;
13302
13303 if (cfun_frame_layout.first_save_gpr != -1)
13304 {
13305 rtx s_pat = save_gprs (base,
13306 off + (cfun_frame_layout.first_save_gpr
13307 - first) * UNITS_PER_LONG,
13308 cfun_frame_layout.first_save_gpr,
13309 cfun_frame_layout.last_save_gpr);
13310 new_insn = emit_insn_before (s_pat, insn);
13311 INSN_ADDRESSES_NEW (new_insn, -1);
13312 }
13313
13314 remove_insn (insn);
13315 continue;
13316 }
13317
13318 if (cfun_frame_layout.first_save_gpr == -1
13319 && GET_CODE (pat) == SET
13320 && GENERAL_REG_P (SET_SRC (pat))
13321 && GET_CODE (SET_DEST (pat)) == MEM)
13322 {
13323 set = pat;
13324 first = REGNO (SET_SRC (set));
13325 offset = const0_rtx;
13326 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13327 off = INTVAL (offset);
13328
13329 if (GET_CODE (base) != REG || off < 0)
13330 continue;
13331 if (REGNO (base) != STACK_POINTER_REGNUM
13332 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13333 continue;
13334
13335 remove_insn (insn);
13336 continue;
13337 }
13338
13339 if (GET_CODE (pat) == PARALLEL
13340 && load_multiple_operation (pat, VOIDmode))
13341 {
13342 set = XVECEXP (pat, 0, 0);
13343 first = REGNO (SET_DEST (set));
13344 last = first + XVECLEN (pat, 0) - 1;
13345 offset = const0_rtx;
13346 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13347 off = INTVAL (offset);
13348
13349 if (GET_CODE (base) != REG || off < 0)
13350 continue;
13351
13352 if (cfun_frame_layout.first_restore_gpr != -1
13353 && (cfun_frame_layout.first_restore_gpr < first
13354 || cfun_frame_layout.last_restore_gpr > last))
13355 continue;
13356 if (REGNO (base) != STACK_POINTER_REGNUM
13357 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13358 continue;
13359 if (first > BASE_REGNUM || last < BASE_REGNUM)
13360 continue;
13361
13362 if (cfun_frame_layout.first_restore_gpr != -1)
13363 {
13364 rtx rpat = restore_gprs (base,
13365 off + (cfun_frame_layout.first_restore_gpr
13366 - first) * UNITS_PER_LONG,
13367 cfun_frame_layout.first_restore_gpr,
13368 cfun_frame_layout.last_restore_gpr);
13369
13370 /* Remove REG_CFA_RESTOREs for registers that we no
13371 longer need to save. */
13372 REG_NOTES (rpat) = REG_NOTES (insn);
13373 for (rtx *ptr = &REG_NOTES (rpat); *ptr; )
13374 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
13375 && ((int) REGNO (XEXP (*ptr, 0))
13376 < cfun_frame_layout.first_restore_gpr))
13377 *ptr = XEXP (*ptr, 1);
13378 else
13379 ptr = &XEXP (*ptr, 1);
13380 new_insn = emit_insn_before (rpat, insn);
13381 RTX_FRAME_RELATED_P (new_insn) = 1;
13382 INSN_ADDRESSES_NEW (new_insn, -1);
13383 }
13384
13385 remove_insn (insn);
13386 continue;
13387 }
13388
13389 if (cfun_frame_layout.first_restore_gpr == -1
13390 && GET_CODE (pat) == SET
13391 && GENERAL_REG_P (SET_DEST (pat))
13392 && GET_CODE (SET_SRC (pat)) == MEM)
13393 {
13394 set = pat;
13395 first = REGNO (SET_DEST (set));
13396 offset = const0_rtx;
13397 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13398 off = INTVAL (offset);
13399
13400 if (GET_CODE (base) != REG || off < 0)
13401 continue;
13402
13403 if (REGNO (base) != STACK_POINTER_REGNUM
13404 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13405 continue;
13406
13407 remove_insn (insn);
13408 continue;
13409 }
13410 }
13411 }
13412
13413 /* On z10 and later the dynamic branch prediction must see the
13414 backward jump within a certain windows. If not it falls back to
13415 the static prediction. This function rearranges the loop backward
13416 branch in a way which makes the static prediction always correct.
13417 The function returns true if it added an instruction. */
13418 static bool
13419 s390_fix_long_loop_prediction (rtx_insn *insn)
13420 {
13421 rtx set = single_set (insn);
13422 rtx code_label, label_ref;
13423 rtx_insn *uncond_jump;
13424 rtx_insn *cur_insn;
13425 rtx tmp;
13426 int distance;
13427
13428 /* This will exclude branch on count and branch on index patterns
13429 since these are correctly statically predicted. */
13430 if (!set
13431 || SET_DEST (set) != pc_rtx
13432 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
13433 return false;
13434
13435 /* Skip conditional returns. */
13436 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
13437 && XEXP (SET_SRC (set), 2) == pc_rtx)
13438 return false;
13439
13440 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
13441 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
13442
13443 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
13444
13445 code_label = XEXP (label_ref, 0);
13446
13447 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
13448 || INSN_ADDRESSES (INSN_UID (insn)) == -1
13449 || (INSN_ADDRESSES (INSN_UID (insn))
13450 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
13451 return false;
13452
13453 for (distance = 0, cur_insn = PREV_INSN (insn);
13454 distance < PREDICT_DISTANCE - 6;
13455 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
13456 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
13457 return false;
13458
13459 rtx_code_label *new_label = gen_label_rtx ();
13460 uncond_jump = emit_jump_insn_after (
13461 gen_rtx_SET (pc_rtx,
13462 gen_rtx_LABEL_REF (VOIDmode, code_label)),
13463 insn);
13464 emit_label_after (new_label, uncond_jump);
13465
13466 tmp = XEXP (SET_SRC (set), 1);
13467 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
13468 XEXP (SET_SRC (set), 2) = tmp;
13469 INSN_CODE (insn) = -1;
13470
13471 XEXP (label_ref, 0) = new_label;
13472 JUMP_LABEL (insn) = new_label;
13473 JUMP_LABEL (uncond_jump) = code_label;
13474
13475 return true;
13476 }
13477
13478 /* Returns 1 if INSN reads the value of REG for purposes not related
13479 to addressing of memory, and 0 otherwise. */
13480 static int
13481 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
13482 {
13483 return reg_referenced_p (reg, PATTERN (insn))
13484 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
13485 }
13486
13487 /* Starting from INSN find_cond_jump looks downwards in the insn
13488 stream for a single jump insn which is the last user of the
13489 condition code set in INSN. */
13490 static rtx_insn *
13491 find_cond_jump (rtx_insn *insn)
13492 {
13493 for (; insn; insn = NEXT_INSN (insn))
13494 {
13495 rtx ite, cc;
13496
13497 if (LABEL_P (insn))
13498 break;
13499
13500 if (!JUMP_P (insn))
13501 {
13502 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
13503 break;
13504 continue;
13505 }
13506
13507 /* This will be triggered by a return. */
13508 if (GET_CODE (PATTERN (insn)) != SET)
13509 break;
13510
13511 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
13512 ite = SET_SRC (PATTERN (insn));
13513
13514 if (GET_CODE (ite) != IF_THEN_ELSE)
13515 break;
13516
13517 cc = XEXP (XEXP (ite, 0), 0);
13518 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
13519 break;
13520
13521 if (find_reg_note (insn, REG_DEAD, cc))
13522 return insn;
13523 break;
13524 }
13525
13526 return NULL;
13527 }
13528
13529 /* Swap the condition in COND and the operands in OP0 and OP1 so that
13530 the semantics does not change. If NULL_RTX is passed as COND the
13531 function tries to find the conditional jump starting with INSN. */
13532 static void
13533 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
13534 {
13535 rtx tmp = *op0;
13536
13537 if (cond == NULL_RTX)
13538 {
13539 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
13540 rtx set = jump ? single_set (jump) : NULL_RTX;
13541
13542 if (set == NULL_RTX)
13543 return;
13544
13545 cond = XEXP (SET_SRC (set), 0);
13546 }
13547
13548 *op0 = *op1;
13549 *op1 = tmp;
13550 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
13551 }
13552
13553 /* On z10, instructions of the compare-and-branch family have the
13554 property to access the register occurring as second operand with
13555 its bits complemented. If such a compare is grouped with a second
13556 instruction that accesses the same register non-complemented, and
13557 if that register's value is delivered via a bypass, then the
13558 pipeline recycles, thereby causing significant performance decline.
13559 This function locates such situations and exchanges the two
13560 operands of the compare. The function return true whenever it
13561 added an insn. */
13562 static bool
13563 s390_z10_optimize_cmp (rtx_insn *insn)
13564 {
13565 rtx_insn *prev_insn, *next_insn;
13566 bool insn_added_p = false;
13567 rtx cond, *op0, *op1;
13568
13569 if (GET_CODE (PATTERN (insn)) == PARALLEL)
13570 {
13571 /* Handle compare and branch and branch on count
13572 instructions. */
13573 rtx pattern = single_set (insn);
13574
13575 if (!pattern
13576 || SET_DEST (pattern) != pc_rtx
13577 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
13578 return false;
13579
13580 cond = XEXP (SET_SRC (pattern), 0);
13581 op0 = &XEXP (cond, 0);
13582 op1 = &XEXP (cond, 1);
13583 }
13584 else if (GET_CODE (PATTERN (insn)) == SET)
13585 {
13586 rtx src, dest;
13587
13588 /* Handle normal compare instructions. */
13589 src = SET_SRC (PATTERN (insn));
13590 dest = SET_DEST (PATTERN (insn));
13591
13592 if (!REG_P (dest)
13593 || !CC_REGNO_P (REGNO (dest))
13594 || GET_CODE (src) != COMPARE)
13595 return false;
13596
13597 /* s390_swap_cmp will try to find the conditional
13598 jump when passing NULL_RTX as condition. */
13599 cond = NULL_RTX;
13600 op0 = &XEXP (src, 0);
13601 op1 = &XEXP (src, 1);
13602 }
13603 else
13604 return false;
13605
13606 if (!REG_P (*op0) || !REG_P (*op1))
13607 return false;
13608
13609 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
13610 return false;
13611
13612 /* Swap the COMPARE arguments and its mask if there is a
13613 conflicting access in the previous insn. */
13614 prev_insn = prev_active_insn (insn);
13615 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13616 && reg_referenced_p (*op1, PATTERN (prev_insn)))
13617 s390_swap_cmp (cond, op0, op1, insn);
13618
13619 /* Check if there is a conflict with the next insn. If there
13620 was no conflict with the previous insn, then swap the
13621 COMPARE arguments and its mask. If we already swapped
13622 the operands, or if swapping them would cause a conflict
13623 with the previous insn, issue a NOP after the COMPARE in
13624 order to separate the two instuctions. */
13625 next_insn = next_active_insn (insn);
13626 if (next_insn != NULL_RTX && INSN_P (next_insn)
13627 && s390_non_addr_reg_read_p (*op1, next_insn))
13628 {
13629 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13630 && s390_non_addr_reg_read_p (*op0, prev_insn))
13631 {
13632 if (REGNO (*op1) == 0)
13633 emit_insn_after (gen_nop1 (), insn);
13634 else
13635 emit_insn_after (gen_nop (), insn);
13636 insn_added_p = true;
13637 }
13638 else
13639 s390_swap_cmp (cond, op0, op1, insn);
13640 }
13641 return insn_added_p;
13642 }
13643
13644 /* Number of INSNs to be scanned backward in the last BB of the loop
13645 and forward in the first BB of the loop. This usually should be a
13646 bit more than the number of INSNs which could go into one
13647 group. */
13648 #define S390_OSC_SCAN_INSN_NUM 5
13649
13650 /* Scan LOOP for static OSC collisions and return true if a osc_break
13651 should be issued for this loop. */
13652 static bool
13653 s390_adjust_loop_scan_osc (struct loop* loop)
13654
13655 {
13656 HARD_REG_SET modregs, newregs;
13657 rtx_insn *insn, *store_insn = NULL;
13658 rtx set;
13659 struct s390_address addr_store, addr_load;
13660 subrtx_iterator::array_type array;
13661 int insn_count;
13662
13663 CLEAR_HARD_REG_SET (modregs);
13664
13665 insn_count = 0;
13666 FOR_BB_INSNS_REVERSE (loop->latch, insn)
13667 {
13668 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13669 continue;
13670
13671 insn_count++;
13672 if (insn_count > S390_OSC_SCAN_INSN_NUM)
13673 return false;
13674
13675 find_all_hard_reg_sets (insn, &newregs, true);
13676 IOR_HARD_REG_SET (modregs, newregs);
13677
13678 set = single_set (insn);
13679 if (!set)
13680 continue;
13681
13682 if (MEM_P (SET_DEST (set))
13683 && s390_decompose_address (XEXP (SET_DEST (set), 0), &addr_store))
13684 {
13685 store_insn = insn;
13686 break;
13687 }
13688 }
13689
13690 if (store_insn == NULL_RTX)
13691 return false;
13692
13693 insn_count = 0;
13694 FOR_BB_INSNS (loop->header, insn)
13695 {
13696 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13697 continue;
13698
13699 if (insn == store_insn)
13700 return false;
13701
13702 insn_count++;
13703 if (insn_count > S390_OSC_SCAN_INSN_NUM)
13704 return false;
13705
13706 find_all_hard_reg_sets (insn, &newregs, true);
13707 IOR_HARD_REG_SET (modregs, newregs);
13708
13709 set = single_set (insn);
13710 if (!set)
13711 continue;
13712
13713 /* An intermediate store disrupts static OSC checking
13714 anyway. */
13715 if (MEM_P (SET_DEST (set))
13716 && s390_decompose_address (XEXP (SET_DEST (set), 0), NULL))
13717 return false;
13718
13719 FOR_EACH_SUBRTX (iter, array, SET_SRC (set), NONCONST)
13720 if (MEM_P (*iter)
13721 && s390_decompose_address (XEXP (*iter, 0), &addr_load)
13722 && rtx_equal_p (addr_load.base, addr_store.base)
13723 && rtx_equal_p (addr_load.indx, addr_store.indx)
13724 && rtx_equal_p (addr_load.disp, addr_store.disp))
13725 {
13726 if ((addr_load.base != NULL_RTX
13727 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.base)))
13728 || (addr_load.indx != NULL_RTX
13729 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.indx))))
13730 return true;
13731 }
13732 }
13733 return false;
13734 }
13735
13736 /* Look for adjustments which can be done on simple innermost
13737 loops. */
13738 static void
13739 s390_adjust_loops ()
13740 {
13741 struct loop *loop = NULL;
13742
13743 df_analyze ();
13744 compute_bb_for_insn ();
13745
13746 /* Find the loops. */
13747 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
13748
13749 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
13750 {
13751 if (dump_file)
13752 {
13753 flow_loop_dump (loop, dump_file, NULL, 0);
13754 fprintf (dump_file, ";; OSC loop scan Loop: ");
13755 }
13756 if (loop->latch == NULL
13757 || pc_set (BB_END (loop->latch)) == NULL_RTX
13758 || !s390_adjust_loop_scan_osc (loop))
13759 {
13760 if (dump_file)
13761 {
13762 if (loop->latch == NULL)
13763 fprintf (dump_file, " muliple backward jumps\n");
13764 else
13765 {
13766 fprintf (dump_file, " header insn: %d latch insn: %d ",
13767 INSN_UID (BB_HEAD (loop->header)),
13768 INSN_UID (BB_END (loop->latch)));
13769 if (pc_set (BB_END (loop->latch)) == NULL_RTX)
13770 fprintf (dump_file, " loop does not end with jump\n");
13771 else
13772 fprintf (dump_file, " not instrumented\n");
13773 }
13774 }
13775 }
13776 else
13777 {
13778 rtx_insn *new_insn;
13779
13780 if (dump_file)
13781 fprintf (dump_file, " adding OSC break insn: ");
13782 new_insn = emit_insn_before (gen_osc_break (),
13783 BB_END (loop->latch));
13784 INSN_ADDRESSES_NEW (new_insn, -1);
13785 }
13786 }
13787
13788 loop_optimizer_finalize ();
13789
13790 df_finish_pass (false);
13791 }
13792
13793 /* Perform machine-dependent processing. */
13794
13795 static void
13796 s390_reorg (void)
13797 {
13798 bool pool_overflow = false;
13799 int hw_before, hw_after;
13800
13801 if (s390_tune == PROCESSOR_2964_Z13)
13802 s390_adjust_loops ();
13803
13804 /* Make sure all splits have been performed; splits after
13805 machine_dependent_reorg might confuse insn length counts. */
13806 split_all_insns_noflow ();
13807
13808 /* Install the main literal pool and the associated base
13809 register load insns.
13810
13811 In addition, there are two problematic situations we need
13812 to correct:
13813
13814 - the literal pool might be > 4096 bytes in size, so that
13815 some of its elements cannot be directly accessed
13816
13817 - a branch target might be > 64K away from the branch, so that
13818 it is not possible to use a PC-relative instruction.
13819
13820 To fix those, we split the single literal pool into multiple
13821 pool chunks, reloading the pool base register at various
13822 points throughout the function to ensure it always points to
13823 the pool chunk the following code expects, and / or replace
13824 PC-relative branches by absolute branches.
13825
13826 However, the two problems are interdependent: splitting the
13827 literal pool can move a branch further away from its target,
13828 causing the 64K limit to overflow, and on the other hand,
13829 replacing a PC-relative branch by an absolute branch means
13830 we need to put the branch target address into the literal
13831 pool, possibly causing it to overflow.
13832
13833 So, we loop trying to fix up both problems until we manage
13834 to satisfy both conditions at the same time. Note that the
13835 loop is guaranteed to terminate as every pass of the loop
13836 strictly decreases the total number of PC-relative branches
13837 in the function. (This is not completely true as there
13838 might be branch-over-pool insns introduced by chunkify_start.
13839 Those never need to be split however.) */
13840
13841 for (;;)
13842 {
13843 struct constant_pool *pool = NULL;
13844
13845 /* Collect the literal pool. */
13846 if (!pool_overflow)
13847 {
13848 pool = s390_mainpool_start ();
13849 if (!pool)
13850 pool_overflow = true;
13851 }
13852
13853 /* If literal pool overflowed, start to chunkify it. */
13854 if (pool_overflow)
13855 pool = s390_chunkify_start ();
13856
13857 /* Split out-of-range branches. If this has created new
13858 literal pool entries, cancel current chunk list and
13859 recompute it. zSeries machines have large branch
13860 instructions, so we never need to split a branch. */
13861 if (!TARGET_CPU_ZARCH && s390_split_branches ())
13862 {
13863 if (pool_overflow)
13864 s390_chunkify_cancel (pool);
13865 else
13866 s390_mainpool_cancel (pool);
13867
13868 continue;
13869 }
13870
13871 /* If we made it up to here, both conditions are satisfied.
13872 Finish up literal pool related changes. */
13873 if (pool_overflow)
13874 s390_chunkify_finish (pool);
13875 else
13876 s390_mainpool_finish (pool);
13877
13878 /* We're done splitting branches. */
13879 cfun->machine->split_branches_pending_p = false;
13880 break;
13881 }
13882
13883 /* Generate out-of-pool execute target insns. */
13884 if (TARGET_CPU_ZARCH)
13885 {
13886 rtx_insn *insn, *target;
13887 rtx label;
13888
13889 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13890 {
13891 label = s390_execute_label (insn);
13892 if (!label)
13893 continue;
13894
13895 gcc_assert (label != const0_rtx);
13896
13897 target = emit_label (XEXP (label, 0));
13898 INSN_ADDRESSES_NEW (target, -1);
13899
13900 target = emit_insn (s390_execute_target (insn));
13901 INSN_ADDRESSES_NEW (target, -1);
13902 }
13903 }
13904
13905 /* Try to optimize prologue and epilogue further. */
13906 s390_optimize_prologue ();
13907
13908 /* Walk over the insns and do some >=z10 specific changes. */
13909 if (s390_tune >= PROCESSOR_2097_Z10)
13910 {
13911 rtx_insn *insn;
13912 bool insn_added_p = false;
13913
13914 /* The insn lengths and addresses have to be up to date for the
13915 following manipulations. */
13916 shorten_branches (get_insns ());
13917
13918 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13919 {
13920 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13921 continue;
13922
13923 if (JUMP_P (insn))
13924 insn_added_p |= s390_fix_long_loop_prediction (insn);
13925
13926 if ((GET_CODE (PATTERN (insn)) == PARALLEL
13927 || GET_CODE (PATTERN (insn)) == SET)
13928 && s390_tune == PROCESSOR_2097_Z10)
13929 insn_added_p |= s390_z10_optimize_cmp (insn);
13930 }
13931
13932 /* Adjust branches if we added new instructions. */
13933 if (insn_added_p)
13934 shorten_branches (get_insns ());
13935 }
13936
13937 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
13938 if (hw_after > 0)
13939 {
13940 rtx_insn *insn;
13941
13942 /* Insert NOPs for hotpatching. */
13943 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13944 /* Emit NOPs
13945 1. inside the area covered by debug information to allow setting
13946 breakpoints at the NOPs,
13947 2. before any insn which results in an asm instruction,
13948 3. before in-function labels to avoid jumping to the NOPs, for
13949 example as part of a loop,
13950 4. before any barrier in case the function is completely empty
13951 (__builtin_unreachable ()) and has neither internal labels nor
13952 active insns.
13953 */
13954 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
13955 break;
13956 /* Output a series of NOPs before the first active insn. */
13957 while (insn && hw_after > 0)
13958 {
13959 if (hw_after >= 3 && TARGET_CPU_ZARCH)
13960 {
13961 emit_insn_before (gen_nop_6_byte (), insn);
13962 hw_after -= 3;
13963 }
13964 else if (hw_after >= 2)
13965 {
13966 emit_insn_before (gen_nop_4_byte (), insn);
13967 hw_after -= 2;
13968 }
13969 else
13970 {
13971 emit_insn_before (gen_nop_2_byte (), insn);
13972 hw_after -= 1;
13973 }
13974 }
13975 }
13976 }
13977
13978 /* Return true if INSN is a fp load insn writing register REGNO. */
13979 static inline bool
13980 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
13981 {
13982 rtx set;
13983 enum attr_type flag = s390_safe_attr_type (insn);
13984
13985 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
13986 return false;
13987
13988 set = single_set (insn);
13989
13990 if (set == NULL_RTX)
13991 return false;
13992
13993 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
13994 return false;
13995
13996 if (REGNO (SET_DEST (set)) != regno)
13997 return false;
13998
13999 return true;
14000 }
14001
14002 /* This value describes the distance to be avoided between an
14003 aritmetic fp instruction and an fp load writing the same register.
14004 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
14005 fine but the exact value has to be avoided. Otherwise the FP
14006 pipeline will throw an exception causing a major penalty. */
14007 #define Z10_EARLYLOAD_DISTANCE 7
14008
14009 /* Rearrange the ready list in order to avoid the situation described
14010 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
14011 moved to the very end of the ready list. */
14012 static void
14013 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
14014 {
14015 unsigned int regno;
14016 int nready = *nready_p;
14017 rtx_insn *tmp;
14018 int i;
14019 rtx_insn *insn;
14020 rtx set;
14021 enum attr_type flag;
14022 int distance;
14023
14024 /* Skip DISTANCE - 1 active insns. */
14025 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
14026 distance > 0 && insn != NULL_RTX;
14027 distance--, insn = prev_active_insn (insn))
14028 if (CALL_P (insn) || JUMP_P (insn))
14029 return;
14030
14031 if (insn == NULL_RTX)
14032 return;
14033
14034 set = single_set (insn);
14035
14036 if (set == NULL_RTX || !REG_P (SET_DEST (set))
14037 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
14038 return;
14039
14040 flag = s390_safe_attr_type (insn);
14041
14042 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
14043 return;
14044
14045 regno = REGNO (SET_DEST (set));
14046 i = nready - 1;
14047
14048 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
14049 i--;
14050
14051 if (!i)
14052 return;
14053
14054 tmp = ready[i];
14055 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
14056 ready[0] = tmp;
14057 }
14058
14059
14060 /* The s390_sched_state variable tracks the state of the current or
14061 the last instruction group.
14062
14063 0,1,2 number of instructions scheduled in the current group
14064 3 the last group is complete - normal insns
14065 4 the last group was a cracked/expanded insn */
14066
14067 static int s390_sched_state;
14068
14069 #define S390_SCHED_STATE_NORMAL 3
14070 #define S390_SCHED_STATE_CRACKED 4
14071
14072 #define S390_SCHED_ATTR_MASK_CRACKED 0x1
14073 #define S390_SCHED_ATTR_MASK_EXPANDED 0x2
14074 #define S390_SCHED_ATTR_MASK_ENDGROUP 0x4
14075 #define S390_SCHED_ATTR_MASK_GROUPALONE 0x8
14076
14077 static unsigned int
14078 s390_get_sched_attrmask (rtx_insn *insn)
14079 {
14080 unsigned int mask = 0;
14081
14082 switch (s390_tune)
14083 {
14084 case PROCESSOR_2827_ZEC12:
14085 if (get_attr_zEC12_cracked (insn))
14086 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14087 if (get_attr_zEC12_expanded (insn))
14088 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14089 if (get_attr_zEC12_endgroup (insn))
14090 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14091 if (get_attr_zEC12_groupalone (insn))
14092 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14093 break;
14094 case PROCESSOR_2964_Z13:
14095 if (get_attr_z13_cracked (insn))
14096 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14097 if (get_attr_z13_expanded (insn))
14098 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14099 if (get_attr_z13_endgroup (insn))
14100 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14101 if (get_attr_z13_groupalone (insn))
14102 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14103 break;
14104 default:
14105 gcc_unreachable ();
14106 }
14107 return mask;
14108 }
14109
14110 static unsigned int
14111 s390_get_unit_mask (rtx_insn *insn, int *units)
14112 {
14113 unsigned int mask = 0;
14114
14115 switch (s390_tune)
14116 {
14117 case PROCESSOR_2964_Z13:
14118 *units = 3;
14119 if (get_attr_z13_unit_lsu (insn))
14120 mask |= 1 << 0;
14121 if (get_attr_z13_unit_fxu (insn))
14122 mask |= 1 << 1;
14123 if (get_attr_z13_unit_vfu (insn))
14124 mask |= 1 << 2;
14125 break;
14126 default:
14127 gcc_unreachable ();
14128 }
14129 return mask;
14130 }
14131
14132 /* Return the scheduling score for INSN. The higher the score the
14133 better. The score is calculated from the OOO scheduling attributes
14134 of INSN and the scheduling state s390_sched_state. */
14135 static int
14136 s390_sched_score (rtx_insn *insn)
14137 {
14138 unsigned int mask = s390_get_sched_attrmask (insn);
14139 int score = 0;
14140
14141 switch (s390_sched_state)
14142 {
14143 case 0:
14144 /* Try to put insns into the first slot which would otherwise
14145 break a group. */
14146 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14147 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14148 score += 5;
14149 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14150 score += 10;
14151 /* fallthrough */
14152 case 1:
14153 /* Prefer not cracked insns while trying to put together a
14154 group. */
14155 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14156 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14157 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14158 score += 10;
14159 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) == 0)
14160 score += 5;
14161 break;
14162 case 2:
14163 /* Prefer not cracked insns while trying to put together a
14164 group. */
14165 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14166 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14167 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14168 score += 10;
14169 /* Prefer endgroup insns in the last slot. */
14170 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0)
14171 score += 10;
14172 break;
14173 case S390_SCHED_STATE_NORMAL:
14174 /* Prefer not cracked insns if the last was not cracked. */
14175 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14176 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0)
14177 score += 5;
14178 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14179 score += 10;
14180 break;
14181 case S390_SCHED_STATE_CRACKED:
14182 /* Try to keep cracked insns together to prevent them from
14183 interrupting groups. */
14184 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14185 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14186 score += 5;
14187 break;
14188 }
14189
14190 if (s390_tune == PROCESSOR_2964_Z13)
14191 {
14192 int units, i;
14193 unsigned unit_mask, m = 1;
14194
14195 unit_mask = s390_get_unit_mask (insn, &units);
14196 gcc_assert (units <= MAX_SCHED_UNITS);
14197
14198 /* Add a score in range 0..MAX_SCHED_MIX_SCORE depending on how long
14199 ago the last insn of this unit type got scheduled. This is
14200 supposed to help providing a proper instruction mix to the
14201 CPU. */
14202 for (i = 0; i < units; i++, m <<= 1)
14203 if (m & unit_mask)
14204 score += (last_scheduled_unit_distance[i] * MAX_SCHED_MIX_SCORE /
14205 MAX_SCHED_MIX_DISTANCE);
14206 }
14207 return score;
14208 }
14209
14210 /* This function is called via hook TARGET_SCHED_REORDER before
14211 issuing one insn from list READY which contains *NREADYP entries.
14212 For target z10 it reorders load instructions to avoid early load
14213 conflicts in the floating point pipeline */
14214 static int
14215 s390_sched_reorder (FILE *file, int verbose,
14216 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
14217 {
14218 if (s390_tune == PROCESSOR_2097_Z10
14219 && reload_completed
14220 && *nreadyp > 1)
14221 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
14222
14223 if (s390_tune >= PROCESSOR_2827_ZEC12
14224 && reload_completed
14225 && *nreadyp > 1)
14226 {
14227 int i;
14228 int last_index = *nreadyp - 1;
14229 int max_index = -1;
14230 int max_score = -1;
14231 rtx_insn *tmp;
14232
14233 /* Just move the insn with the highest score to the top (the
14234 end) of the list. A full sort is not needed since a conflict
14235 in the hazard recognition cannot happen. So the top insn in
14236 the ready list will always be taken. */
14237 for (i = last_index; i >= 0; i--)
14238 {
14239 int score;
14240
14241 if (recog_memoized (ready[i]) < 0)
14242 continue;
14243
14244 score = s390_sched_score (ready[i]);
14245 if (score > max_score)
14246 {
14247 max_score = score;
14248 max_index = i;
14249 }
14250 }
14251
14252 if (max_index != -1)
14253 {
14254 if (max_index != last_index)
14255 {
14256 tmp = ready[max_index];
14257 ready[max_index] = ready[last_index];
14258 ready[last_index] = tmp;
14259
14260 if (verbose > 5)
14261 fprintf (file,
14262 ";;\t\tBACKEND: move insn %d to the top of list\n",
14263 INSN_UID (ready[last_index]));
14264 }
14265 else if (verbose > 5)
14266 fprintf (file,
14267 ";;\t\tBACKEND: best insn %d already on top\n",
14268 INSN_UID (ready[last_index]));
14269 }
14270
14271 if (verbose > 5)
14272 {
14273 fprintf (file, "ready list ooo attributes - sched state: %d\n",
14274 s390_sched_state);
14275
14276 for (i = last_index; i >= 0; i--)
14277 {
14278 unsigned int sched_mask;
14279 rtx_insn *insn = ready[i];
14280
14281 if (recog_memoized (insn) < 0)
14282 continue;
14283
14284 sched_mask = s390_get_sched_attrmask (insn);
14285 fprintf (file, ";;\t\tBACKEND: insn %d score: %d: ",
14286 INSN_UID (insn),
14287 s390_sched_score (insn));
14288 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ",\
14289 ((M) & sched_mask) ? #ATTR : "");
14290 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14291 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14292 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14293 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14294 #undef PRINT_SCHED_ATTR
14295 if (s390_tune == PROCESSOR_2964_Z13)
14296 {
14297 unsigned int unit_mask, m = 1;
14298 int units, j;
14299
14300 unit_mask = s390_get_unit_mask (insn, &units);
14301 fprintf (file, "(units:");
14302 for (j = 0; j < units; j++, m <<= 1)
14303 if (m & unit_mask)
14304 fprintf (file, " u%d", j);
14305 fprintf (file, ")");
14306 }
14307 fprintf (file, "\n");
14308 }
14309 }
14310 }
14311
14312 return s390_issue_rate ();
14313 }
14314
14315
14316 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
14317 the scheduler has issued INSN. It stores the last issued insn into
14318 last_scheduled_insn in order to make it available for
14319 s390_sched_reorder. */
14320 static int
14321 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
14322 {
14323 last_scheduled_insn = insn;
14324
14325 if (s390_tune >= PROCESSOR_2827_ZEC12
14326 && reload_completed
14327 && recog_memoized (insn) >= 0)
14328 {
14329 unsigned int mask = s390_get_sched_attrmask (insn);
14330
14331 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14332 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14333 s390_sched_state = S390_SCHED_STATE_CRACKED;
14334 else if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0
14335 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14336 s390_sched_state = S390_SCHED_STATE_NORMAL;
14337 else
14338 {
14339 /* Only normal insns are left (mask == 0). */
14340 switch (s390_sched_state)
14341 {
14342 case 0:
14343 case 1:
14344 case 2:
14345 case S390_SCHED_STATE_NORMAL:
14346 if (s390_sched_state == S390_SCHED_STATE_NORMAL)
14347 s390_sched_state = 1;
14348 else
14349 s390_sched_state++;
14350
14351 break;
14352 case S390_SCHED_STATE_CRACKED:
14353 s390_sched_state = S390_SCHED_STATE_NORMAL;
14354 break;
14355 }
14356 }
14357
14358 if (s390_tune == PROCESSOR_2964_Z13)
14359 {
14360 int units, i;
14361 unsigned unit_mask, m = 1;
14362
14363 unit_mask = s390_get_unit_mask (insn, &units);
14364 gcc_assert (units <= MAX_SCHED_UNITS);
14365
14366 for (i = 0; i < units; i++, m <<= 1)
14367 if (m & unit_mask)
14368 last_scheduled_unit_distance[i] = 0;
14369 else if (last_scheduled_unit_distance[i] < MAX_SCHED_MIX_DISTANCE)
14370 last_scheduled_unit_distance[i]++;
14371 }
14372
14373 if (verbose > 5)
14374 {
14375 unsigned int sched_mask;
14376
14377 sched_mask = s390_get_sched_attrmask (insn);
14378
14379 fprintf (file, ";;\t\tBACKEND: insn %d: ", INSN_UID (insn));
14380 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ", ((M) & sched_mask) ? #ATTR : "");
14381 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14382 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14383 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14384 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14385 #undef PRINT_SCHED_ATTR
14386
14387 if (s390_tune == PROCESSOR_2964_Z13)
14388 {
14389 unsigned int unit_mask, m = 1;
14390 int units, j;
14391
14392 unit_mask = s390_get_unit_mask (insn, &units);
14393 fprintf (file, "(units:");
14394 for (j = 0; j < units; j++, m <<= 1)
14395 if (m & unit_mask)
14396 fprintf (file, " %d", j);
14397 fprintf (file, ")");
14398 }
14399 fprintf (file, " sched state: %d\n", s390_sched_state);
14400
14401 if (s390_tune == PROCESSOR_2964_Z13)
14402 {
14403 int units, j;
14404
14405 s390_get_unit_mask (insn, &units);
14406
14407 fprintf (file, ";;\t\tBACKEND: units unused for: ");
14408 for (j = 0; j < units; j++)
14409 fprintf (file, "%d:%d ", j, last_scheduled_unit_distance[j]);
14410 fprintf (file, "\n");
14411 }
14412 }
14413 }
14414
14415 if (GET_CODE (PATTERN (insn)) != USE
14416 && GET_CODE (PATTERN (insn)) != CLOBBER)
14417 return more - 1;
14418 else
14419 return more;
14420 }
14421
14422 static void
14423 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
14424 int verbose ATTRIBUTE_UNUSED,
14425 int max_ready ATTRIBUTE_UNUSED)
14426 {
14427 last_scheduled_insn = NULL;
14428 memset (last_scheduled_unit_distance, 0, MAX_SCHED_UNITS * sizeof (int));
14429 s390_sched_state = 0;
14430 }
14431
14432 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
14433 a new number struct loop *loop should be unrolled if tuned for cpus with
14434 a built-in stride prefetcher.
14435 The loop is analyzed for memory accesses by calling check_dpu for
14436 each rtx of the loop. Depending on the loop_depth and the amount of
14437 memory accesses a new number <=nunroll is returned to improve the
14438 behavior of the hardware prefetch unit. */
14439 static unsigned
14440 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
14441 {
14442 basic_block *bbs;
14443 rtx_insn *insn;
14444 unsigned i;
14445 unsigned mem_count = 0;
14446
14447 if (s390_tune < PROCESSOR_2097_Z10)
14448 return nunroll;
14449
14450 /* Count the number of memory references within the loop body. */
14451 bbs = get_loop_body (loop);
14452 subrtx_iterator::array_type array;
14453 for (i = 0; i < loop->num_nodes; i++)
14454 FOR_BB_INSNS (bbs[i], insn)
14455 if (INSN_P (insn) && INSN_CODE (insn) != -1)
14456 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
14457 if (MEM_P (*iter))
14458 mem_count += 1;
14459 free (bbs);
14460
14461 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
14462 if (mem_count == 0)
14463 return nunroll;
14464
14465 switch (loop_depth(loop))
14466 {
14467 case 1:
14468 return MIN (nunroll, 28 / mem_count);
14469 case 2:
14470 return MIN (nunroll, 22 / mem_count);
14471 default:
14472 return MIN (nunroll, 16 / mem_count);
14473 }
14474 }
14475
14476 /* Restore the current options. This is a hook function and also called
14477 internally. */
14478
14479 static void
14480 s390_function_specific_restore (struct gcc_options *opts,
14481 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
14482 {
14483 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
14484 }
14485
14486 static void
14487 s390_option_override_internal (bool main_args_p,
14488 struct gcc_options *opts,
14489 const struct gcc_options *opts_set)
14490 {
14491 const char *prefix;
14492 const char *suffix;
14493
14494 /* Set up prefix/suffix so the error messages refer to either the command
14495 line argument, or the attribute(target). */
14496 if (main_args_p)
14497 {
14498 prefix = "-m";
14499 suffix = "";
14500 }
14501 else
14502 {
14503 prefix = "option(\"";
14504 suffix = "\")";
14505 }
14506
14507
14508 /* Architecture mode defaults according to ABI. */
14509 if (!(opts_set->x_target_flags & MASK_ZARCH))
14510 {
14511 if (TARGET_64BIT)
14512 opts->x_target_flags |= MASK_ZARCH;
14513 else
14514 opts->x_target_flags &= ~MASK_ZARCH;
14515 }
14516
14517 /* Set the march default in case it hasn't been specified on cmdline. */
14518 if (!opts_set->x_s390_arch)
14519 opts->x_s390_arch = PROCESSOR_2064_Z900;
14520 else if (opts->x_s390_arch == PROCESSOR_9672_G5
14521 || opts->x_s390_arch == PROCESSOR_9672_G6)
14522 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
14523 "in future releases; use at least %sarch=z900%s",
14524 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
14525 suffix, prefix, suffix);
14526
14527 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
14528
14529 /* Determine processor to tune for. */
14530 if (!opts_set->x_s390_tune)
14531 opts->x_s390_tune = opts->x_s390_arch;
14532 else if (opts->x_s390_tune == PROCESSOR_9672_G5
14533 || opts->x_s390_tune == PROCESSOR_9672_G6)
14534 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
14535 "in future releases; use at least %stune=z900%s",
14536 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
14537 suffix, prefix, suffix);
14538
14539 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
14540
14541 /* Sanity checks. */
14542 if (opts->x_s390_arch == PROCESSOR_NATIVE
14543 || opts->x_s390_tune == PROCESSOR_NATIVE)
14544 gcc_unreachable ();
14545 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
14546 error ("z/Architecture mode not supported on %s",
14547 processor_table[(int)opts->x_s390_arch].name);
14548 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
14549 error ("64-bit ABI not supported in ESA/390 mode");
14550
14551 /* Enable hardware transactions if available and not explicitly
14552 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
14553 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
14554 {
14555 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
14556 opts->x_target_flags |= MASK_OPT_HTM;
14557 else
14558 opts->x_target_flags &= ~MASK_OPT_HTM;
14559 }
14560
14561 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
14562 {
14563 if (TARGET_OPT_VX_P (opts->x_target_flags))
14564 {
14565 if (!TARGET_CPU_VX_P (opts))
14566 error ("hardware vector support not available on %s",
14567 processor_table[(int)opts->x_s390_arch].name);
14568 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14569 error ("hardware vector support not available with -msoft-float");
14570 }
14571 }
14572 else
14573 {
14574 if (TARGET_CPU_VX_P (opts))
14575 /* Enable vector support if available and not explicitly disabled
14576 by user. E.g. with -m31 -march=z13 -mzarch */
14577 opts->x_target_flags |= MASK_OPT_VX;
14578 else
14579 opts->x_target_flags &= ~MASK_OPT_VX;
14580 }
14581
14582 /* Use hardware DFP if available and not explicitly disabled by
14583 user. E.g. with -m31 -march=z10 -mzarch */
14584 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
14585 {
14586 if (TARGET_DFP_P (opts))
14587 opts->x_target_flags |= MASK_HARD_DFP;
14588 else
14589 opts->x_target_flags &= ~MASK_HARD_DFP;
14590 }
14591
14592 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
14593 {
14594 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
14595 {
14596 if (!TARGET_CPU_DFP_P (opts))
14597 error ("hardware decimal floating point instructions"
14598 " not available on %s",
14599 processor_table[(int)opts->x_s390_arch].name);
14600 if (!TARGET_ZARCH_P (opts->x_target_flags))
14601 error ("hardware decimal floating point instructions"
14602 " not available in ESA/390 mode");
14603 }
14604 else
14605 opts->x_target_flags &= ~MASK_HARD_DFP;
14606 }
14607
14608 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
14609 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14610 {
14611 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
14612 && TARGET_HARD_DFP_P (opts->x_target_flags))
14613 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
14614
14615 opts->x_target_flags &= ~MASK_HARD_DFP;
14616 }
14617
14618 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
14619 && TARGET_PACKED_STACK_P (opts->x_target_flags)
14620 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
14621 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
14622 "in combination");
14623
14624 if (opts->x_s390_stack_size)
14625 {
14626 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
14627 error ("stack size must be greater than the stack guard value");
14628 else if (opts->x_s390_stack_size > 1 << 16)
14629 error ("stack size must not be greater than 64k");
14630 }
14631 else if (opts->x_s390_stack_guard)
14632 error ("-mstack-guard implies use of -mstack-size");
14633
14634 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
14635 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
14636 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
14637 #endif
14638
14639 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
14640 {
14641 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
14642 opts->x_param_values,
14643 opts_set->x_param_values);
14644 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
14645 opts->x_param_values,
14646 opts_set->x_param_values);
14647 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
14648 opts->x_param_values,
14649 opts_set->x_param_values);
14650 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
14651 opts->x_param_values,
14652 opts_set->x_param_values);
14653 }
14654
14655 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
14656 opts->x_param_values,
14657 opts_set->x_param_values);
14658 /* values for loop prefetching */
14659 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
14660 opts->x_param_values,
14661 opts_set->x_param_values);
14662 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
14663 opts->x_param_values,
14664 opts_set->x_param_values);
14665 /* s390 has more than 2 levels and the size is much larger. Since
14666 we are always running virtualized assume that we only get a small
14667 part of the caches above l1. */
14668 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
14669 opts->x_param_values,
14670 opts_set->x_param_values);
14671 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
14672 opts->x_param_values,
14673 opts_set->x_param_values);
14674 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
14675 opts->x_param_values,
14676 opts_set->x_param_values);
14677
14678 /* Use the alternative scheduling-pressure algorithm by default. */
14679 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
14680 opts->x_param_values,
14681 opts_set->x_param_values);
14682
14683 /* Call target specific restore function to do post-init work. At the moment,
14684 this just sets opts->x_s390_cost_pointer. */
14685 s390_function_specific_restore (opts, NULL);
14686 }
14687
14688 static void
14689 s390_option_override (void)
14690 {
14691 unsigned int i;
14692 cl_deferred_option *opt;
14693 vec<cl_deferred_option> *v =
14694 (vec<cl_deferred_option> *) s390_deferred_options;
14695
14696 if (v)
14697 FOR_EACH_VEC_ELT (*v, i, opt)
14698 {
14699 switch (opt->opt_index)
14700 {
14701 case OPT_mhotpatch_:
14702 {
14703 int val1;
14704 int val2;
14705 char s[256];
14706 char *t;
14707
14708 strncpy (s, opt->arg, 256);
14709 s[255] = 0;
14710 t = strchr (s, ',');
14711 if (t != NULL)
14712 {
14713 *t = 0;
14714 t++;
14715 val1 = integral_argument (s);
14716 val2 = integral_argument (t);
14717 }
14718 else
14719 {
14720 val1 = -1;
14721 val2 = -1;
14722 }
14723 if (val1 == -1 || val2 == -1)
14724 {
14725 /* argument is not a plain number */
14726 error ("arguments to %qs should be non-negative integers",
14727 "-mhotpatch=n,m");
14728 break;
14729 }
14730 else if (val1 > s390_hotpatch_hw_max
14731 || val2 > s390_hotpatch_hw_max)
14732 {
14733 error ("argument to %qs is too large (max. %d)",
14734 "-mhotpatch=n,m", s390_hotpatch_hw_max);
14735 break;
14736 }
14737 s390_hotpatch_hw_before_label = val1;
14738 s390_hotpatch_hw_after_label = val2;
14739 break;
14740 }
14741 default:
14742 gcc_unreachable ();
14743 }
14744 }
14745
14746 /* Set up function hooks. */
14747 init_machine_status = s390_init_machine_status;
14748
14749 s390_option_override_internal (true, &global_options, &global_options_set);
14750
14751 /* Save the initial options in case the user does function specific
14752 options. */
14753 target_option_default_node = build_target_option_node (&global_options);
14754 target_option_current_node = target_option_default_node;
14755
14756 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
14757 requires the arch flags to be evaluated already. Since prefetching
14758 is beneficial on s390, we enable it if available. */
14759 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
14760 flag_prefetch_loop_arrays = 1;
14761
14762 if (TARGET_TPF)
14763 {
14764 /* Don't emit DWARF3/4 unless specifically selected. The TPF
14765 debuggers do not yet support DWARF 3/4. */
14766 if (!global_options_set.x_dwarf_strict)
14767 dwarf_strict = 1;
14768 if (!global_options_set.x_dwarf_version)
14769 dwarf_version = 2;
14770 }
14771
14772 /* Register a target-specific optimization-and-lowering pass
14773 to run immediately before prologue and epilogue generation.
14774
14775 Registering the pass must be done at start up. It's
14776 convenient to do it here. */
14777 opt_pass *new_pass = new pass_s390_early_mach (g);
14778 struct register_pass_info insert_pass_s390_early_mach =
14779 {
14780 new_pass, /* pass */
14781 "pro_and_epilogue", /* reference_pass_name */
14782 1, /* ref_pass_instance_number */
14783 PASS_POS_INSERT_BEFORE /* po_op */
14784 };
14785 register_pass (&insert_pass_s390_early_mach);
14786 }
14787
14788 #if S390_USE_TARGET_ATTRIBUTE
14789 /* Inner function to process the attribute((target(...))), take an argument and
14790 set the current options from the argument. If we have a list, recursively go
14791 over the list. */
14792
14793 static bool
14794 s390_valid_target_attribute_inner_p (tree args,
14795 struct gcc_options *opts,
14796 struct gcc_options *new_opts_set,
14797 bool force_pragma)
14798 {
14799 char *next_optstr;
14800 bool ret = true;
14801
14802 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
14803 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
14804 static const struct
14805 {
14806 const char *string;
14807 size_t len;
14808 int opt;
14809 int has_arg;
14810 int only_as_pragma;
14811 } attrs[] = {
14812 /* enum options */
14813 S390_ATTRIB ("arch=", OPT_march_, 1),
14814 S390_ATTRIB ("tune=", OPT_mtune_, 1),
14815 /* uinteger options */
14816 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
14817 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
14818 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
14819 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
14820 /* flag options */
14821 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
14822 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
14823 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
14824 S390_ATTRIB ("htm", OPT_mhtm, 0),
14825 S390_ATTRIB ("vx", OPT_mvx, 0),
14826 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
14827 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
14828 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
14829 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
14830 S390_PRAGMA ("zvector", OPT_mzvector, 0),
14831 /* boolean options */
14832 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
14833 };
14834 #undef S390_ATTRIB
14835 #undef S390_PRAGMA
14836
14837 /* If this is a list, recurse to get the options. */
14838 if (TREE_CODE (args) == TREE_LIST)
14839 {
14840 bool ret = true;
14841 int num_pragma_values;
14842 int i;
14843
14844 /* Note: attribs.c:decl_attributes prepends the values from
14845 current_target_pragma to the list of target attributes. To determine
14846 whether we're looking at a value of the attribute or the pragma we
14847 assume that the first [list_length (current_target_pragma)] values in
14848 the list are the values from the pragma. */
14849 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
14850 ? list_length (current_target_pragma) : 0;
14851 for (i = 0; args; args = TREE_CHAIN (args), i++)
14852 {
14853 bool is_pragma;
14854
14855 is_pragma = (force_pragma || i < num_pragma_values);
14856 if (TREE_VALUE (args)
14857 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
14858 opts, new_opts_set,
14859 is_pragma))
14860 {
14861 ret = false;
14862 }
14863 }
14864 return ret;
14865 }
14866
14867 else if (TREE_CODE (args) != STRING_CST)
14868 {
14869 error ("attribute %<target%> argument not a string");
14870 return false;
14871 }
14872
14873 /* Handle multiple arguments separated by commas. */
14874 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
14875
14876 while (next_optstr && *next_optstr != '\0')
14877 {
14878 char *p = next_optstr;
14879 char *orig_p = p;
14880 char *comma = strchr (next_optstr, ',');
14881 size_t len, opt_len;
14882 int opt;
14883 bool opt_set_p;
14884 char ch;
14885 unsigned i;
14886 int mask = 0;
14887 enum cl_var_type var_type;
14888 bool found;
14889
14890 if (comma)
14891 {
14892 *comma = '\0';
14893 len = comma - next_optstr;
14894 next_optstr = comma + 1;
14895 }
14896 else
14897 {
14898 len = strlen (p);
14899 next_optstr = NULL;
14900 }
14901
14902 /* Recognize no-xxx. */
14903 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
14904 {
14905 opt_set_p = false;
14906 p += 3;
14907 len -= 3;
14908 }
14909 else
14910 opt_set_p = true;
14911
14912 /* Find the option. */
14913 ch = *p;
14914 found = false;
14915 for (i = 0; i < ARRAY_SIZE (attrs); i++)
14916 {
14917 opt_len = attrs[i].len;
14918 if (ch == attrs[i].string[0]
14919 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
14920 && memcmp (p, attrs[i].string, opt_len) == 0)
14921 {
14922 opt = attrs[i].opt;
14923 if (!opt_set_p && cl_options[opt].cl_reject_negative)
14924 continue;
14925 mask = cl_options[opt].var_value;
14926 var_type = cl_options[opt].var_type;
14927 found = true;
14928 break;
14929 }
14930 }
14931
14932 /* Process the option. */
14933 if (!found)
14934 {
14935 error ("attribute(target(\"%s\")) is unknown", orig_p);
14936 return false;
14937 }
14938 else if (attrs[i].only_as_pragma && !force_pragma)
14939 {
14940 /* Value is not allowed for the target attribute. */
14941 error ("value %qs is not supported by attribute %<target%>",
14942 attrs[i].string);
14943 return false;
14944 }
14945
14946 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
14947 {
14948 if (var_type == CLVC_BIT_CLEAR)
14949 opt_set_p = !opt_set_p;
14950
14951 if (opt_set_p)
14952 opts->x_target_flags |= mask;
14953 else
14954 opts->x_target_flags &= ~mask;
14955 new_opts_set->x_target_flags |= mask;
14956 }
14957
14958 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
14959 {
14960 int value;
14961
14962 if (cl_options[opt].cl_uinteger)
14963 {
14964 /* Unsigned integer argument. Code based on the function
14965 decode_cmdline_option () in opts-common.c. */
14966 value = integral_argument (p + opt_len);
14967 }
14968 else
14969 value = (opt_set_p) ? 1 : 0;
14970
14971 if (value != -1)
14972 {
14973 struct cl_decoded_option decoded;
14974
14975 /* Value range check; only implemented for numeric and boolean
14976 options at the moment. */
14977 generate_option (opt, NULL, value, CL_TARGET, &decoded);
14978 s390_handle_option (opts, new_opts_set, &decoded, input_location);
14979 set_option (opts, new_opts_set, opt, value,
14980 p + opt_len, DK_UNSPECIFIED, input_location,
14981 global_dc);
14982 }
14983 else
14984 {
14985 error ("attribute(target(\"%s\")) is unknown", orig_p);
14986 ret = false;
14987 }
14988 }
14989
14990 else if (cl_options[opt].var_type == CLVC_ENUM)
14991 {
14992 bool arg_ok;
14993 int value;
14994
14995 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
14996 if (arg_ok)
14997 set_option (opts, new_opts_set, opt, value,
14998 p + opt_len, DK_UNSPECIFIED, input_location,
14999 global_dc);
15000 else
15001 {
15002 error ("attribute(target(\"%s\")) is unknown", orig_p);
15003 ret = false;
15004 }
15005 }
15006
15007 else
15008 gcc_unreachable ();
15009 }
15010 return ret;
15011 }
15012
15013 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
15014
15015 tree
15016 s390_valid_target_attribute_tree (tree args,
15017 struct gcc_options *opts,
15018 const struct gcc_options *opts_set,
15019 bool force_pragma)
15020 {
15021 tree t = NULL_TREE;
15022 struct gcc_options new_opts_set;
15023
15024 memset (&new_opts_set, 0, sizeof (new_opts_set));
15025
15026 /* Process each of the options on the chain. */
15027 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
15028 force_pragma))
15029 return error_mark_node;
15030
15031 /* If some option was set (even if it has not changed), rerun
15032 s390_option_override_internal, and then save the options away. */
15033 if (new_opts_set.x_target_flags
15034 || new_opts_set.x_s390_arch
15035 || new_opts_set.x_s390_tune
15036 || new_opts_set.x_s390_stack_guard
15037 || new_opts_set.x_s390_stack_size
15038 || new_opts_set.x_s390_branch_cost
15039 || new_opts_set.x_s390_warn_framesize
15040 || new_opts_set.x_s390_warn_dynamicstack_p)
15041 {
15042 const unsigned char *src = (const unsigned char *)opts_set;
15043 unsigned char *dest = (unsigned char *)&new_opts_set;
15044 unsigned int i;
15045
15046 /* Merge the original option flags into the new ones. */
15047 for (i = 0; i < sizeof(*opts_set); i++)
15048 dest[i] |= src[i];
15049
15050 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
15051 s390_option_override_internal (false, opts, &new_opts_set);
15052 /* Save the current options unless we are validating options for
15053 #pragma. */
15054 t = build_target_option_node (opts);
15055 }
15056 return t;
15057 }
15058
15059 /* Hook to validate attribute((target("string"))). */
15060
15061 static bool
15062 s390_valid_target_attribute_p (tree fndecl,
15063 tree ARG_UNUSED (name),
15064 tree args,
15065 int ARG_UNUSED (flags))
15066 {
15067 struct gcc_options func_options;
15068 tree new_target, new_optimize;
15069 bool ret = true;
15070
15071 /* attribute((target("default"))) does nothing, beyond
15072 affecting multi-versioning. */
15073 if (TREE_VALUE (args)
15074 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
15075 && TREE_CHAIN (args) == NULL_TREE
15076 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
15077 return true;
15078
15079 tree old_optimize = build_optimization_node (&global_options);
15080
15081 /* Get the optimization options of the current function. */
15082 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
15083
15084 if (!func_optimize)
15085 func_optimize = old_optimize;
15086
15087 /* Init func_options. */
15088 memset (&func_options, 0, sizeof (func_options));
15089 init_options_struct (&func_options, NULL);
15090 lang_hooks.init_options_struct (&func_options);
15091
15092 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
15093
15094 /* Initialize func_options to the default before its target options can
15095 be set. */
15096 cl_target_option_restore (&func_options,
15097 TREE_TARGET_OPTION (target_option_default_node));
15098
15099 new_target = s390_valid_target_attribute_tree (args, &func_options,
15100 &global_options_set,
15101 (args ==
15102 current_target_pragma));
15103 new_optimize = build_optimization_node (&func_options);
15104 if (new_target == error_mark_node)
15105 ret = false;
15106 else if (fndecl && new_target)
15107 {
15108 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
15109 if (old_optimize != new_optimize)
15110 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
15111 }
15112 return ret;
15113 }
15114
15115 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
15116 cache. */
15117
15118 void
15119 s390_activate_target_options (tree new_tree)
15120 {
15121 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
15122 if (TREE_TARGET_GLOBALS (new_tree))
15123 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
15124 else if (new_tree == target_option_default_node)
15125 restore_target_globals (&default_target_globals);
15126 else
15127 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
15128 s390_previous_fndecl = NULL_TREE;
15129 }
15130
15131 /* Establish appropriate back-end context for processing the function
15132 FNDECL. The argument might be NULL to indicate processing at top
15133 level, outside of any function scope. */
15134 static void
15135 s390_set_current_function (tree fndecl)
15136 {
15137 /* Only change the context if the function changes. This hook is called
15138 several times in the course of compiling a function, and we don't want to
15139 slow things down too much or call target_reinit when it isn't safe. */
15140 if (fndecl == s390_previous_fndecl)
15141 return;
15142
15143 tree old_tree;
15144 if (s390_previous_fndecl == NULL_TREE)
15145 old_tree = target_option_current_node;
15146 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
15147 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
15148 else
15149 old_tree = target_option_default_node;
15150
15151 if (fndecl == NULL_TREE)
15152 {
15153 if (old_tree != target_option_current_node)
15154 s390_activate_target_options (target_option_current_node);
15155 return;
15156 }
15157
15158 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
15159 if (new_tree == NULL_TREE)
15160 new_tree = target_option_default_node;
15161
15162 if (old_tree != new_tree)
15163 s390_activate_target_options (new_tree);
15164 s390_previous_fndecl = fndecl;
15165 }
15166 #endif
15167
15168 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
15169
15170 static bool
15171 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
15172 unsigned int align ATTRIBUTE_UNUSED,
15173 enum by_pieces_operation op ATTRIBUTE_UNUSED,
15174 bool speed_p ATTRIBUTE_UNUSED)
15175 {
15176 return (size == 1 || size == 2
15177 || size == 4 || (TARGET_ZARCH && size == 8));
15178 }
15179
15180 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
15181
15182 static void
15183 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
15184 {
15185 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
15186 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
15187 tree call_efpc = build_call_expr (efpc, 0);
15188 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
15189
15190 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
15191 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
15192 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
15193 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
15194 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
15195 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
15196
15197 /* Generates the equivalent of feholdexcept (&fenv_var)
15198
15199 fenv_var = __builtin_s390_efpc ();
15200 __builtin_s390_sfpc (fenv_var & mask) */
15201 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
15202 tree new_fpc =
15203 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
15204 build_int_cst (unsigned_type_node,
15205 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
15206 FPC_EXCEPTION_MASK)));
15207 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
15208 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
15209
15210 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
15211
15212 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
15213 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
15214 build_int_cst (unsigned_type_node,
15215 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
15216 *clear = build_call_expr (sfpc, 1, new_fpc);
15217
15218 /* Generates the equivalent of feupdateenv (fenv_var)
15219
15220 old_fpc = __builtin_s390_efpc ();
15221 __builtin_s390_sfpc (fenv_var);
15222 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
15223
15224 old_fpc = create_tmp_var_raw (unsigned_type_node);
15225 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
15226 old_fpc, call_efpc);
15227
15228 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
15229
15230 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
15231 build_int_cst (unsigned_type_node,
15232 FPC_FLAGS_MASK));
15233 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
15234 build_int_cst (unsigned_type_node,
15235 FPC_FLAGS_SHIFT));
15236 tree atomic_feraiseexcept
15237 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
15238 raise_old_except = build_call_expr (atomic_feraiseexcept,
15239 1, raise_old_except);
15240
15241 *update = build2 (COMPOUND_EXPR, void_type_node,
15242 build2 (COMPOUND_EXPR, void_type_node,
15243 store_old_fpc, set_new_fpc),
15244 raise_old_except);
15245
15246 #undef FPC_EXCEPTION_MASK
15247 #undef FPC_FLAGS_MASK
15248 #undef FPC_DXC_MASK
15249 #undef FPC_EXCEPTION_MASK_SHIFT
15250 #undef FPC_FLAGS_SHIFT
15251 #undef FPC_DXC_SHIFT
15252 }
15253
15254 /* Return the vector mode to be used for inner mode MODE when doing
15255 vectorization. */
15256 static machine_mode
15257 s390_preferred_simd_mode (machine_mode mode)
15258 {
15259 if (TARGET_VX)
15260 switch (mode)
15261 {
15262 case DFmode:
15263 return V2DFmode;
15264 case DImode:
15265 return V2DImode;
15266 case SImode:
15267 return V4SImode;
15268 case HImode:
15269 return V8HImode;
15270 case QImode:
15271 return V16QImode;
15272 default:;
15273 }
15274 return word_mode;
15275 }
15276
15277 /* Our hardware does not require vectors to be strictly aligned. */
15278 static bool
15279 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
15280 const_tree type ATTRIBUTE_UNUSED,
15281 int misalignment ATTRIBUTE_UNUSED,
15282 bool is_packed ATTRIBUTE_UNUSED)
15283 {
15284 if (TARGET_VX)
15285 return true;
15286
15287 return default_builtin_support_vector_misalignment (mode, type, misalignment,
15288 is_packed);
15289 }
15290
15291 /* The vector ABI requires vector types to be aligned on an 8 byte
15292 boundary (our stack alignment). However, we allow this to be
15293 overriden by the user, while this definitely breaks the ABI. */
15294 static HOST_WIDE_INT
15295 s390_vector_alignment (const_tree type)
15296 {
15297 if (!TARGET_VX_ABI)
15298 return default_vector_alignment (type);
15299
15300 if (TYPE_USER_ALIGN (type))
15301 return TYPE_ALIGN (type);
15302
15303 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
15304 }
15305
15306 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15307 /* Implement TARGET_ASM_FILE_START. */
15308 static void
15309 s390_asm_file_start (void)
15310 {
15311 default_file_start ();
15312 s390_asm_output_machine_for_arch (asm_out_file);
15313 }
15314 #endif
15315
15316 /* Implement TARGET_ASM_FILE_END. */
15317 static void
15318 s390_asm_file_end (void)
15319 {
15320 #ifdef HAVE_AS_GNU_ATTRIBUTE
15321 varpool_node *vnode;
15322 cgraph_node *cnode;
15323
15324 FOR_EACH_VARIABLE (vnode)
15325 if (TREE_PUBLIC (vnode->decl))
15326 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
15327
15328 FOR_EACH_FUNCTION (cnode)
15329 if (TREE_PUBLIC (cnode->decl))
15330 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
15331
15332
15333 if (s390_vector_abi != 0)
15334 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
15335 s390_vector_abi);
15336 #endif
15337 file_end_indicate_exec_stack ();
15338
15339 if (flag_split_stack)
15340 file_end_indicate_split_stack ();
15341 }
15342
15343 /* Return true if TYPE is a vector bool type. */
15344 static inline bool
15345 s390_vector_bool_type_p (const_tree type)
15346 {
15347 return TYPE_VECTOR_OPAQUE (type);
15348 }
15349
15350 /* Return the diagnostic message string if the binary operation OP is
15351 not permitted on TYPE1 and TYPE2, NULL otherwise. */
15352 static const char*
15353 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
15354 {
15355 bool bool1_p, bool2_p;
15356 bool plusminus_p;
15357 bool muldiv_p;
15358 bool compare_p;
15359 machine_mode mode1, mode2;
15360
15361 if (!TARGET_ZVECTOR)
15362 return NULL;
15363
15364 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
15365 return NULL;
15366
15367 bool1_p = s390_vector_bool_type_p (type1);
15368 bool2_p = s390_vector_bool_type_p (type2);
15369
15370 /* Mixing signed and unsigned types is forbidden for all
15371 operators. */
15372 if (!bool1_p && !bool2_p
15373 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
15374 return N_("types differ in signedness");
15375
15376 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
15377 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
15378 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
15379 || op == ROUND_DIV_EXPR);
15380 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
15381 || op == EQ_EXPR || op == NE_EXPR);
15382
15383 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
15384 return N_("binary operator does not support two vector bool operands");
15385
15386 if (bool1_p != bool2_p && (muldiv_p || compare_p))
15387 return N_("binary operator does not support vector bool operand");
15388
15389 mode1 = TYPE_MODE (type1);
15390 mode2 = TYPE_MODE (type2);
15391
15392 if (bool1_p != bool2_p && plusminus_p
15393 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
15394 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
15395 return N_("binary operator does not support mixing vector "
15396 "bool with floating point vector operands");
15397
15398 return NULL;
15399 }
15400
15401 /* Implement TARGET_C_EXCESS_PRECISION.
15402
15403 FIXME: For historical reasons, float_t and double_t are typedef'ed to
15404 double on s390, causing operations on float_t to operate in a higher
15405 precision than is necessary. However, it is not the case that SFmode
15406 operations have implicit excess precision, and we generate more optimal
15407 code if we let the compiler know no implicit extra precision is added.
15408
15409 That means when we are compiling with -fexcess-precision=fast, the value
15410 we set for FLT_EVAL_METHOD will be out of line with the actual precision of
15411 float_t (though they would be correct for -fexcess-precision=standard).
15412
15413 A complete fix would modify glibc to remove the unnecessary typedef
15414 of float_t to double. */
15415
15416 static enum flt_eval_method
15417 s390_excess_precision (enum excess_precision_type type)
15418 {
15419 switch (type)
15420 {
15421 case EXCESS_PRECISION_TYPE_IMPLICIT:
15422 case EXCESS_PRECISION_TYPE_FAST:
15423 /* The fastest type to promote to will always be the native type,
15424 whether that occurs with implicit excess precision or
15425 otherwise. */
15426 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
15427 case EXCESS_PRECISION_TYPE_STANDARD:
15428 /* Otherwise, when we are in a standards compliant mode, to
15429 ensure consistency with the implementation in glibc, report that
15430 float is evaluated to the range and precision of double. */
15431 return FLT_EVAL_METHOD_PROMOTE_TO_DOUBLE;
15432 default:
15433 gcc_unreachable ();
15434 }
15435 return FLT_EVAL_METHOD_UNPREDICTABLE;
15436 }
15437
15438 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
15439
15440 static unsigned HOST_WIDE_INT
15441 s390_asan_shadow_offset (void)
15442 {
15443 return TARGET_64BIT ? HOST_WIDE_INT_1U << 52 : HOST_WIDE_INT_UC (0x20000000);
15444 }
15445
15446 /* Initialize GCC target structure. */
15447
15448 #undef TARGET_ASM_ALIGNED_HI_OP
15449 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
15450 #undef TARGET_ASM_ALIGNED_DI_OP
15451 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
15452 #undef TARGET_ASM_INTEGER
15453 #define TARGET_ASM_INTEGER s390_assemble_integer
15454
15455 #undef TARGET_ASM_OPEN_PAREN
15456 #define TARGET_ASM_OPEN_PAREN ""
15457
15458 #undef TARGET_ASM_CLOSE_PAREN
15459 #define TARGET_ASM_CLOSE_PAREN ""
15460
15461 #undef TARGET_OPTION_OVERRIDE
15462 #define TARGET_OPTION_OVERRIDE s390_option_override
15463
15464 #ifdef TARGET_THREAD_SSP_OFFSET
15465 #undef TARGET_STACK_PROTECT_GUARD
15466 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
15467 #endif
15468
15469 #undef TARGET_ENCODE_SECTION_INFO
15470 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
15471
15472 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15473 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15474
15475 #ifdef HAVE_AS_TLS
15476 #undef TARGET_HAVE_TLS
15477 #define TARGET_HAVE_TLS true
15478 #endif
15479 #undef TARGET_CANNOT_FORCE_CONST_MEM
15480 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
15481
15482 #undef TARGET_DELEGITIMIZE_ADDRESS
15483 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
15484
15485 #undef TARGET_LEGITIMIZE_ADDRESS
15486 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
15487
15488 #undef TARGET_RETURN_IN_MEMORY
15489 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
15490
15491 #undef TARGET_INIT_BUILTINS
15492 #define TARGET_INIT_BUILTINS s390_init_builtins
15493 #undef TARGET_EXPAND_BUILTIN
15494 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
15495 #undef TARGET_BUILTIN_DECL
15496 #define TARGET_BUILTIN_DECL s390_builtin_decl
15497
15498 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
15499 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
15500
15501 #undef TARGET_ASM_OUTPUT_MI_THUNK
15502 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
15503 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
15504 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
15505
15506 #undef TARGET_C_EXCESS_PRECISION
15507 #define TARGET_C_EXCESS_PRECISION s390_excess_precision
15508
15509 #undef TARGET_SCHED_ADJUST_PRIORITY
15510 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
15511 #undef TARGET_SCHED_ISSUE_RATE
15512 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
15513 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
15514 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
15515
15516 #undef TARGET_SCHED_VARIABLE_ISSUE
15517 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
15518 #undef TARGET_SCHED_REORDER
15519 #define TARGET_SCHED_REORDER s390_sched_reorder
15520 #undef TARGET_SCHED_INIT
15521 #define TARGET_SCHED_INIT s390_sched_init
15522
15523 #undef TARGET_CANNOT_COPY_INSN_P
15524 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
15525 #undef TARGET_RTX_COSTS
15526 #define TARGET_RTX_COSTS s390_rtx_costs
15527 #undef TARGET_ADDRESS_COST
15528 #define TARGET_ADDRESS_COST s390_address_cost
15529 #undef TARGET_REGISTER_MOVE_COST
15530 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
15531 #undef TARGET_MEMORY_MOVE_COST
15532 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
15533 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
15534 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
15535 s390_builtin_vectorization_cost
15536
15537 #undef TARGET_MACHINE_DEPENDENT_REORG
15538 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
15539
15540 #undef TARGET_VALID_POINTER_MODE
15541 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
15542
15543 #undef TARGET_BUILD_BUILTIN_VA_LIST
15544 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
15545 #undef TARGET_EXPAND_BUILTIN_VA_START
15546 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
15547 #undef TARGET_ASAN_SHADOW_OFFSET
15548 #define TARGET_ASAN_SHADOW_OFFSET s390_asan_shadow_offset
15549 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
15550 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
15551
15552 #undef TARGET_PROMOTE_FUNCTION_MODE
15553 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
15554 #undef TARGET_PASS_BY_REFERENCE
15555 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
15556
15557 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
15558 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
15559 #undef TARGET_FUNCTION_ARG
15560 #define TARGET_FUNCTION_ARG s390_function_arg
15561 #undef TARGET_FUNCTION_ARG_ADVANCE
15562 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
15563 #undef TARGET_FUNCTION_VALUE
15564 #define TARGET_FUNCTION_VALUE s390_function_value
15565 #undef TARGET_LIBCALL_VALUE
15566 #define TARGET_LIBCALL_VALUE s390_libcall_value
15567 #undef TARGET_STRICT_ARGUMENT_NAMING
15568 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
15569
15570 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
15571 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
15572
15573 #undef TARGET_FIXED_CONDITION_CODE_REGS
15574 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
15575
15576 #undef TARGET_CC_MODES_COMPATIBLE
15577 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
15578
15579 #undef TARGET_INVALID_WITHIN_DOLOOP
15580 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
15581
15582 #ifdef HAVE_AS_TLS
15583 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
15584 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
15585 #endif
15586
15587 #undef TARGET_DWARF_FRAME_REG_MODE
15588 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
15589
15590 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
15591 #undef TARGET_MANGLE_TYPE
15592 #define TARGET_MANGLE_TYPE s390_mangle_type
15593 #endif
15594
15595 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15596 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15597
15598 #undef TARGET_VECTOR_MODE_SUPPORTED_P
15599 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
15600
15601 #undef TARGET_PREFERRED_RELOAD_CLASS
15602 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
15603
15604 #undef TARGET_SECONDARY_RELOAD
15605 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
15606
15607 #undef TARGET_LIBGCC_CMP_RETURN_MODE
15608 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
15609
15610 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
15611 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
15612
15613 #undef TARGET_LEGITIMATE_ADDRESS_P
15614 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
15615
15616 #undef TARGET_LEGITIMATE_CONSTANT_P
15617 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
15618
15619 #undef TARGET_LRA_P
15620 #define TARGET_LRA_P s390_lra_p
15621
15622 #undef TARGET_CAN_ELIMINATE
15623 #define TARGET_CAN_ELIMINATE s390_can_eliminate
15624
15625 #undef TARGET_CONDITIONAL_REGISTER_USAGE
15626 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
15627
15628 #undef TARGET_LOOP_UNROLL_ADJUST
15629 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
15630
15631 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
15632 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
15633 #undef TARGET_TRAMPOLINE_INIT
15634 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
15635
15636 #undef TARGET_UNWIND_WORD_MODE
15637 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
15638
15639 #undef TARGET_CANONICALIZE_COMPARISON
15640 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
15641
15642 #undef TARGET_HARD_REGNO_SCRATCH_OK
15643 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
15644
15645 #undef TARGET_ATTRIBUTE_TABLE
15646 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
15647
15648 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
15649 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
15650
15651 #undef TARGET_SET_UP_BY_PROLOGUE
15652 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
15653
15654 #undef TARGET_EXTRA_LIVE_ON_ENTRY
15655 #define TARGET_EXTRA_LIVE_ON_ENTRY s390_live_on_entry
15656
15657 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
15658 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
15659 s390_use_by_pieces_infrastructure_p
15660
15661 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
15662 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
15663
15664 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
15665 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
15666
15667 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
15668 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
15669
15670 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
15671 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
15672
15673 #undef TARGET_VECTOR_ALIGNMENT
15674 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
15675
15676 #undef TARGET_INVALID_BINARY_OP
15677 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
15678
15679 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15680 #undef TARGET_ASM_FILE_START
15681 #define TARGET_ASM_FILE_START s390_asm_file_start
15682 #endif
15683
15684 #undef TARGET_ASM_FILE_END
15685 #define TARGET_ASM_FILE_END s390_asm_file_end
15686
15687 #if S390_USE_TARGET_ATTRIBUTE
15688 #undef TARGET_SET_CURRENT_FUNCTION
15689 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
15690
15691 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
15692 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
15693 #endif
15694
15695 #undef TARGET_OPTION_RESTORE
15696 #define TARGET_OPTION_RESTORE s390_function_specific_restore
15697
15698 struct gcc_target targetm = TARGET_INITIALIZER;
15699
15700 #include "gt-s390.h"