]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/s390/s390.c
* tree-core.h (struct attribute_spec): Swap affects_type_identity and
[thirdparty/gcc.git] / gcc / config / s390 / s390.c
CommitLineData
4673c1a0 1/* Subroutines used for code generation on IBM S/390 and zSeries
aad93da1 2 Copyright (C) 1999-2017 Free Software Foundation, Inc.
4673c1a0 3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
e68d6a13 4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
4673c1a0 6
1e98c8f3 7This file is part of GCC.
4673c1a0 8
1e98c8f3 9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
038d1e19 11Software Foundation; either version 3, or (at your option) any later
1e98c8f3 12version.
4673c1a0 13
1e98c8f3 14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
4673c1a0 18
19You should have received a copy of the GNU General Public License
038d1e19 20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
4673c1a0 22
23#include "config.h"
4673c1a0 24#include "system.h"
805e22b2 25#include "coretypes.h"
9ef16211 26#include "backend.h"
c1eb80de 27#include "target.h"
7a0cee35 28#include "target-globals.h"
c1eb80de 29#include "rtl.h"
9ef16211 30#include "tree.h"
31#include "gimple.h"
c1eb80de 32#include "cfghooks.h"
33#include "cfgloop.h"
9ef16211 34#include "df.h"
ad7b10a2 35#include "memmodel.h"
c1eb80de 36#include "tm_p.h"
37#include "stringpool.h"
30a86690 38#include "attribs.h"
c1eb80de 39#include "expmed.h"
40#include "optabs.h"
41#include "regs.h"
42#include "emit-rtl.h"
43#include "recog.h"
44#include "cgraph.h"
45#include "diagnostic-core.h"
7a0cee35 46#include "diagnostic.h"
b20a8bb4 47#include "alias.h"
b20a8bb4 48#include "fold-const.h"
9ed99284 49#include "print-tree.h"
9ed99284 50#include "stor-layout.h"
51#include "varasm.h"
52#include "calls.h"
4673c1a0 53#include "conditions.h"
54#include "output.h"
55#include "insn-attr.h"
56#include "flags.h"
57#include "except.h"
d53441c8 58#include "dojump.h"
59#include "explow.h"
d53441c8 60#include "stmt.h"
4673c1a0 61#include "expr.h"
c10847b9 62#include "reload.h"
94ea8568 63#include "cfgrtl.h"
64#include "cfganal.h"
65#include "lcm.h"
66#include "cfgbuild.h"
67#include "cfgcleanup.h"
7baa5366 68#include "debug.h"
a1f71e15 69#include "langhooks.h"
bc61cadb 70#include "internal-fn.h"
71#include "gimple-fold.h"
72#include "tree-eh.h"
a8783bee 73#include "gimplify.h"
c0717306 74#include "params.h"
fba5dd52 75#include "opts.h"
0b8be04c 76#include "tree-pass.h"
77#include "context.h"
f7715905 78#include "builtins.h"
15e472ec 79#include "rtl-iter.h"
76a4c804 80#include "intl.h"
80fc7f56 81#include "tm-constrs.h"
d5a90e99 82#include "tree-vrp.h"
83#include "symbol-summary.h"
84#include "ipa-prop.h"
85#include "ipa-fnsummary.h"
b3041173 86#include "sched-int.h"
4673c1a0 87
0c71fb4f 88/* This file should be included last. */
4b498588 89#include "target-def.h"
90
b395382f 91static bool s390_hard_regno_mode_ok (unsigned int, machine_mode);
92
7a0cee35 93/* Remember the last target of s390_set_current_function. */
94static GTY(()) tree s390_previous_fndecl;
95
18925d38 96/* Define the specific costs for a given cpu. */
97
ffead1ca 98struct processor_costs
18925d38 99{
260075cc 100 /* multiplication */
18925d38 101 const int m; /* cost of an M instruction. */
102 const int mghi; /* cost of an MGHI instruction. */
103 const int mh; /* cost of an MH instruction. */
104 const int mhi; /* cost of an MHI instruction. */
9cd3f3e6 105 const int ml; /* cost of an ML instruction. */
18925d38 106 const int mr; /* cost of an MR instruction. */
107 const int ms; /* cost of an MS instruction. */
108 const int msg; /* cost of an MSG instruction. */
109 const int msgf; /* cost of an MSGF instruction. */
110 const int msgfr; /* cost of an MSGFR instruction. */
111 const int msgr; /* cost of an MSGR instruction. */
112 const int msr; /* cost of an MSR instruction. */
113 const int mult_df; /* cost of multiplication in DFmode. */
429f9fdb 114 const int mxbr;
260075cc 115 /* square root */
429f9fdb 116 const int sqxbr; /* cost of square root in TFmode. */
9cd3f3e6 117 const int sqdbr; /* cost of square root in DFmode. */
118 const int sqebr; /* cost of square root in SFmode. */
260075cc 119 /* multiply and add */
d95e38cf 120 const int madbr; /* cost of multiply and add in DFmode. */
121 const int maebr; /* cost of multiply and add in SFmode. */
260075cc 122 /* division */
429f9fdb 123 const int dxbr;
260075cc 124 const int ddbr;
260075cc 125 const int debr;
3f074425 126 const int dlgr;
127 const int dlr;
128 const int dr;
129 const int dsgfr;
130 const int dsgr;
18925d38 131};
132
7a0cee35 133#define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
18925d38 134
135static const
ffead1ca 136struct processor_costs z900_cost =
18925d38 137{
138 COSTS_N_INSNS (5), /* M */
139 COSTS_N_INSNS (10), /* MGHI */
140 COSTS_N_INSNS (5), /* MH */
141 COSTS_N_INSNS (4), /* MHI */
9cd3f3e6 142 COSTS_N_INSNS (5), /* ML */
18925d38 143 COSTS_N_INSNS (5), /* MR */
144 COSTS_N_INSNS (4), /* MS */
145 COSTS_N_INSNS (15), /* MSG */
146 COSTS_N_INSNS (7), /* MSGF */
147 COSTS_N_INSNS (7), /* MSGFR */
148 COSTS_N_INSNS (10), /* MSGR */
149 COSTS_N_INSNS (4), /* MSR */
150 COSTS_N_INSNS (7), /* multiplication in DFmode */
429f9fdb 151 COSTS_N_INSNS (13), /* MXBR */
152 COSTS_N_INSNS (136), /* SQXBR */
9cd3f3e6 153 COSTS_N_INSNS (44), /* SQDBR */
154 COSTS_N_INSNS (35), /* SQEBR */
d95e38cf 155 COSTS_N_INSNS (18), /* MADBR */
156 COSTS_N_INSNS (13), /* MAEBR */
429f9fdb 157 COSTS_N_INSNS (134), /* DXBR */
260075cc 158 COSTS_N_INSNS (30), /* DDBR */
260075cc 159 COSTS_N_INSNS (27), /* DEBR */
3f074425 160 COSTS_N_INSNS (220), /* DLGR */
161 COSTS_N_INSNS (34), /* DLR */
162 COSTS_N_INSNS (34), /* DR */
163 COSTS_N_INSNS (32), /* DSGFR */
164 COSTS_N_INSNS (32), /* DSGR */
18925d38 165};
166
167static const
ffead1ca 168struct processor_costs z990_cost =
18925d38 169{
170 COSTS_N_INSNS (4), /* M */
171 COSTS_N_INSNS (2), /* MGHI */
172 COSTS_N_INSNS (2), /* MH */
173 COSTS_N_INSNS (2), /* MHI */
9cd3f3e6 174 COSTS_N_INSNS (4), /* ML */
18925d38 175 COSTS_N_INSNS (4), /* MR */
176 COSTS_N_INSNS (5), /* MS */
177 COSTS_N_INSNS (6), /* MSG */
178 COSTS_N_INSNS (4), /* MSGF */
179 COSTS_N_INSNS (4), /* MSGFR */
180 COSTS_N_INSNS (4), /* MSGR */
181 COSTS_N_INSNS (4), /* MSR */
182 COSTS_N_INSNS (1), /* multiplication in DFmode */
429f9fdb 183 COSTS_N_INSNS (28), /* MXBR */
184 COSTS_N_INSNS (130), /* SQXBR */
9cd3f3e6 185 COSTS_N_INSNS (66), /* SQDBR */
186 COSTS_N_INSNS (38), /* SQEBR */
d95e38cf 187 COSTS_N_INSNS (1), /* MADBR */
188 COSTS_N_INSNS (1), /* MAEBR */
429f9fdb 189 COSTS_N_INSNS (60), /* DXBR */
260075cc 190 COSTS_N_INSNS (40), /* DDBR */
095798e3 191 COSTS_N_INSNS (26), /* DEBR */
3f074425 192 COSTS_N_INSNS (176), /* DLGR */
193 COSTS_N_INSNS (31), /* DLR */
194 COSTS_N_INSNS (31), /* DR */
195 COSTS_N_INSNS (31), /* DSGFR */
196 COSTS_N_INSNS (31), /* DSGR */
18925d38 197};
198
163277cf 199static const
ffead1ca 200struct processor_costs z9_109_cost =
163277cf 201{
202 COSTS_N_INSNS (4), /* M */
203 COSTS_N_INSNS (2), /* MGHI */
204 COSTS_N_INSNS (2), /* MH */
205 COSTS_N_INSNS (2), /* MHI */
206 COSTS_N_INSNS (4), /* ML */
207 COSTS_N_INSNS (4), /* MR */
208 COSTS_N_INSNS (5), /* MS */
209 COSTS_N_INSNS (6), /* MSG */
210 COSTS_N_INSNS (4), /* MSGF */
211 COSTS_N_INSNS (4), /* MSGFR */
212 COSTS_N_INSNS (4), /* MSGR */
213 COSTS_N_INSNS (4), /* MSR */
214 COSTS_N_INSNS (1), /* multiplication in DFmode */
429f9fdb 215 COSTS_N_INSNS (28), /* MXBR */
216 COSTS_N_INSNS (130), /* SQXBR */
163277cf 217 COSTS_N_INSNS (66), /* SQDBR */
218 COSTS_N_INSNS (38), /* SQEBR */
219 COSTS_N_INSNS (1), /* MADBR */
220 COSTS_N_INSNS (1), /* MAEBR */
429f9fdb 221 COSTS_N_INSNS (60), /* DXBR */
163277cf 222 COSTS_N_INSNS (40), /* DDBR */
095798e3 223 COSTS_N_INSNS (26), /* DEBR */
163277cf 224 COSTS_N_INSNS (30), /* DLGR */
225 COSTS_N_INSNS (23), /* DLR */
226 COSTS_N_INSNS (23), /* DR */
227 COSTS_N_INSNS (24), /* DSGFR */
228 COSTS_N_INSNS (24), /* DSGR */
229};
18925d38 230
a850370e 231static const
232struct processor_costs z10_cost =
233{
510c2327 234 COSTS_N_INSNS (10), /* M */
235 COSTS_N_INSNS (10), /* MGHI */
236 COSTS_N_INSNS (10), /* MH */
237 COSTS_N_INSNS (10), /* MHI */
238 COSTS_N_INSNS (10), /* ML */
239 COSTS_N_INSNS (10), /* MR */
240 COSTS_N_INSNS (10), /* MS */
241 COSTS_N_INSNS (10), /* MSG */
242 COSTS_N_INSNS (10), /* MSGF */
243 COSTS_N_INSNS (10), /* MSGFR */
244 COSTS_N_INSNS (10), /* MSGR */
245 COSTS_N_INSNS (10), /* MSR */
b0eacf26 246 COSTS_N_INSNS (1) , /* multiplication in DFmode */
510c2327 247 COSTS_N_INSNS (50), /* MXBR */
248 COSTS_N_INSNS (120), /* SQXBR */
249 COSTS_N_INSNS (52), /* SQDBR */
a850370e 250 COSTS_N_INSNS (38), /* SQEBR */
b0eacf26 251 COSTS_N_INSNS (1), /* MADBR */
252 COSTS_N_INSNS (1), /* MAEBR */
510c2327 253 COSTS_N_INSNS (111), /* DXBR */
254 COSTS_N_INSNS (39), /* DDBR */
255 COSTS_N_INSNS (32), /* DEBR */
256 COSTS_N_INSNS (160), /* DLGR */
257 COSTS_N_INSNS (71), /* DLR */
258 COSTS_N_INSNS (71), /* DR */
259 COSTS_N_INSNS (71), /* DSGFR */
260 COSTS_N_INSNS (71), /* DSGR */
a850370e 261};
262
33d033da 263static const
264struct processor_costs z196_cost =
265{
266 COSTS_N_INSNS (7), /* M */
267 COSTS_N_INSNS (5), /* MGHI */
268 COSTS_N_INSNS (5), /* MH */
269 COSTS_N_INSNS (5), /* MHI */
270 COSTS_N_INSNS (7), /* ML */
271 COSTS_N_INSNS (7), /* MR */
272 COSTS_N_INSNS (6), /* MS */
273 COSTS_N_INSNS (8), /* MSG */
274 COSTS_N_INSNS (6), /* MSGF */
275 COSTS_N_INSNS (6), /* MSGFR */
276 COSTS_N_INSNS (8), /* MSGR */
277 COSTS_N_INSNS (6), /* MSR */
278 COSTS_N_INSNS (1) , /* multiplication in DFmode */
279 COSTS_N_INSNS (40), /* MXBR B+40 */
280 COSTS_N_INSNS (100), /* SQXBR B+100 */
281 COSTS_N_INSNS (42), /* SQDBR B+42 */
282 COSTS_N_INSNS (28), /* SQEBR B+28 */
283 COSTS_N_INSNS (1), /* MADBR B */
284 COSTS_N_INSNS (1), /* MAEBR B */
285 COSTS_N_INSNS (101), /* DXBR B+101 */
286 COSTS_N_INSNS (29), /* DDBR */
287 COSTS_N_INSNS (22), /* DEBR */
288 COSTS_N_INSNS (160), /* DLGR cracked */
289 COSTS_N_INSNS (160), /* DLR cracked */
290 COSTS_N_INSNS (160), /* DR expanded */
291 COSTS_N_INSNS (160), /* DSGFR cracked */
292 COSTS_N_INSNS (160), /* DSGR cracked */
293};
294
81769881 295static const
296struct processor_costs zEC12_cost =
297{
298 COSTS_N_INSNS (7), /* M */
299 COSTS_N_INSNS (5), /* MGHI */
300 COSTS_N_INSNS (5), /* MH */
301 COSTS_N_INSNS (5), /* MHI */
302 COSTS_N_INSNS (7), /* ML */
303 COSTS_N_INSNS (7), /* MR */
304 COSTS_N_INSNS (6), /* MS */
305 COSTS_N_INSNS (8), /* MSG */
306 COSTS_N_INSNS (6), /* MSGF */
307 COSTS_N_INSNS (6), /* MSGFR */
308 COSTS_N_INSNS (8), /* MSGR */
309 COSTS_N_INSNS (6), /* MSR */
310 COSTS_N_INSNS (1) , /* multiplication in DFmode */
311 COSTS_N_INSNS (40), /* MXBR B+40 */
312 COSTS_N_INSNS (100), /* SQXBR B+100 */
313 COSTS_N_INSNS (42), /* SQDBR B+42 */
314 COSTS_N_INSNS (28), /* SQEBR B+28 */
315 COSTS_N_INSNS (1), /* MADBR B */
316 COSTS_N_INSNS (1), /* MAEBR B */
317 COSTS_N_INSNS (131), /* DXBR B+131 */
318 COSTS_N_INSNS (29), /* DDBR */
319 COSTS_N_INSNS (22), /* DEBR */
320 COSTS_N_INSNS (160), /* DLGR cracked */
321 COSTS_N_INSNS (160), /* DLR cracked */
322 COSTS_N_INSNS (160), /* DR expanded */
323 COSTS_N_INSNS (160), /* DSGFR cracked */
324 COSTS_N_INSNS (160), /* DSGR cracked */
325};
326
7a0cee35 327static struct
328{
a168a775 329 /* The preferred name to be used in user visible output. */
7a0cee35 330 const char *const name;
a168a775 331 /* CPU name as it should be passed to Binutils via .machine */
332 const char *const binutils_name;
7a0cee35 333 const enum processor_type processor;
334 const struct processor_costs *cost;
335}
336const processor_table[] =
337{
a168a775 338 { "g5", "g5", PROCESSOR_9672_G5, &z900_cost },
339 { "g6", "g6", PROCESSOR_9672_G6, &z900_cost },
340 { "z900", "z900", PROCESSOR_2064_Z900, &z900_cost },
341 { "z990", "z990", PROCESSOR_2084_Z990, &z990_cost },
342 { "z9-109", "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
343 { "z9-ec", "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
344 { "z10", "z10", PROCESSOR_2097_Z10, &z10_cost },
345 { "z196", "z196", PROCESSOR_2817_Z196, &z196_cost },
346 { "zEC12", "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
347 { "z13", "z13", PROCESSOR_2964_Z13, &zEC12_cost },
348 { "z14", "arch12", PROCESSOR_3906_Z14, &zEC12_cost },
349 { "native", "", PROCESSOR_NATIVE, NULL }
7a0cee35 350};
351
4673c1a0 352extern int reload_completed;
353
8a2a84e3 354/* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
93e0956b 355static rtx_insn *last_scheduled_insn;
0cb69051 356#define MAX_SCHED_UNITS 3
357static int last_scheduled_unit_distance[MAX_SCHED_UNITS];
358
9f3ec181 359#define NUM_SIDES 2
360static int current_side = 1;
361#define LONGRUNNING_THRESHOLD 5
362
363/* Estimate of number of cycles a long-running insn occupies an
364 execution unit. */
365static unsigned fxu_longrunning[NUM_SIDES];
366static unsigned vfu_longrunning[NUM_SIDES];
367
368/* Factor to scale latencies by, determined by measurements. */
369#define LATENCY_FACTOR 4
370
0cb69051 371/* The maximum score added for an instruction whose unit hasn't been
372 in use for MAX_SCHED_MIX_DISTANCE steps. Increase this value to
373 give instruction mix scheduling more priority over instruction
374 grouping. */
375#define MAX_SCHED_MIX_SCORE 8
376
377/* The maximum distance up to which individual scores will be
378 calculated. Everything beyond this gives MAX_SCHED_MIX_SCORE.
379 Increase this with the OOO windows size of the machine. */
380#define MAX_SCHED_MIX_DISTANCE 100
8a2a84e3 381
56769981 382/* Structure used to hold the components of a S/390 memory
383 address. A legitimate address on S/390 is of the general
384 form
385 base + index + displacement
386 where any of the components is optional.
387
388 base and index are registers of the class ADDR_REGS,
389 displacement is an unsigned 12-bit immediate constant. */
4673c1a0 390
391struct s390_address
392{
393 rtx base;
394 rtx indx;
395 rtx disp;
e5537457 396 bool pointer;
05b58257 397 bool literal_pool;
4673c1a0 398};
399
ffead1ca 400/* The following structure is embedded in the machine
67928721 401 specific part of struct function. */
402
fb1e4f4a 403struct GTY (()) s390_frame_layout
67928721 404{
405 /* Offset within stack frame. */
406 HOST_WIDE_INT gprs_offset;
407 HOST_WIDE_INT f0_offset;
408 HOST_WIDE_INT f4_offset;
409 HOST_WIDE_INT f8_offset;
410 HOST_WIDE_INT backchain_offset;
5214e6ae 411
412 /* Number of first and last gpr where slots in the register
413 save area are reserved for. */
414 int first_save_gpr_slot;
415 int last_save_gpr_slot;
416
ff4ce128 417 /* Location (FP register number) where GPRs (r0-r15) should
418 be saved to.
419 0 - does not need to be saved at all
420 -1 - stack slot */
1d3cea74 421#define SAVE_SLOT_NONE 0
422#define SAVE_SLOT_STACK -1
ff4ce128 423 signed char gpr_save_slots[16];
424
5a5e802f 425 /* Number of first and last gpr to be saved, restored. */
8b4a4127 426 int first_save_gpr;
427 int first_restore_gpr;
428 int last_save_gpr;
beee1f75 429 int last_restore_gpr;
8b4a4127 430
ffead1ca 431 /* Bits standing for floating point registers. Set, if the
432 respective register has to be saved. Starting with reg 16 (f0)
67928721 433 at the rightmost bit.
6a2469fe 434 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
435 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
436 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
67928721 437 unsigned int fpr_bitmap;
438
439 /* Number of floating point registers f8-f15 which must be saved. */
440 int high_fprs;
441
9bee2845 442 /* Set if return address needs to be saved.
443 This flag is set by s390_return_addr_rtx if it could not use
444 the initial value of r14 and therefore depends on r14 saved
445 to the stack. */
67928721 446 bool save_return_addr_p;
447
5a5e802f 448 /* Size of stack frame. */
8b4a4127 449 HOST_WIDE_INT frame_size;
67928721 450};
451
452/* Define the structure for the machine field in struct function. */
453
fb1e4f4a 454struct GTY(()) machine_function
67928721 455{
456 struct s390_frame_layout frame_layout;
be00aaa8 457
20074f87 458 /* Literal pool base register. */
459 rtx base_reg;
460
4fed3f99 461 /* True if we may need to perform branch splitting. */
462 bool split_branches_pending_p;
463
1e639cb0 464 bool has_landing_pad_p;
5ada7a14 465
466 /* True if the current function may contain a tbegin clobbering
467 FPRs. */
468 bool tbegin_p;
c6d481f7 469
470 /* For -fsplit-stack support: A stack local which holds a pointer to
471 the stack arguments for a function with a variable number of
472 arguments. This is set at the start of the function and is used
473 to initialize the overflow_arg_area field of the va_list
474 structure. */
475 rtx split_stack_varargs_pointer;
8b4a4127 476};
477
67928721 478/* Few accessor macros for struct cfun->machine->s390_frame_layout. */
479
480#define cfun_frame_layout (cfun->machine->frame_layout)
481#define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
ff4ce128 482#define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
483 ? cfun_frame_layout.fpr_bitmap & 0x0f \
484 : cfun_frame_layout.fpr_bitmap & 0x03))
485#define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
b5fdc416 486 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
29439367 487#define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
6a2469fe 488 (1 << (REGNO - FPR0_REGNUM)))
29439367 489#define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
6a2469fe 490 (1 << (REGNO - FPR0_REGNUM))))
ff4ce128 491#define cfun_gpr_save_slot(REGNO) \
492 cfun->machine->frame_layout.gpr_save_slots[REGNO]
67928721 493
6902d973 494/* Number of GPRs and FPRs used for argument passing. */
495#define GP_ARG_NUM_REG 5
496#define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
76a4c804 497#define VEC_ARG_NUM_REG 8
6902d973 498
cb888f33 499/* A couple of shortcuts. */
500#define CONST_OK_FOR_J(x) \
501 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
502#define CONST_OK_FOR_K(x) \
503 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
163277cf 504#define CONST_OK_FOR_Os(x) \
505 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
506#define CONST_OK_FOR_Op(x) \
507 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
508#define CONST_OK_FOR_On(x) \
509 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
cb888f33 510
8f1128bb 511#define REGNO_PAIR_OK(REGNO, MODE) \
74f68e49 512 (s390_hard_regno_nregs ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
8f1128bb 513
73df8a45 514/* That's the read ahead of the dynamic branch prediction unit in
33d033da 515 bytes on a z10 (or higher) CPU. */
516#define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
73df8a45 517
07f32359 518
6b7cfb9c 519/* Indicate which ABI has been used for passing vector args.
520 0 - no vector type arguments have been passed where the ABI is relevant
521 1 - the old ABI has been used
522 2 - a vector type argument has been passed either in a vector register
523 or on the stack by value */
524static int s390_vector_abi = 0;
525
526/* Set the vector ABI marker if TYPE is subject to the vector ABI
527 switch. The vector ABI affects only vector data types. There are
528 two aspects of the vector ABI relevant here:
529
530 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
531 ABI and natural alignment with the old.
532
533 2. vector <= 16 bytes are passed in VRs or by value on the stack
534 with the new ABI but by reference on the stack with the old.
535
536 If ARG_P is true TYPE is used for a function argument or return
537 value. The ABI marker then is set for all vector data types. If
538 ARG_P is false only type 1 vectors are being checked. */
539
540static void
541s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
542{
543 static hash_set<const_tree> visited_types_hash;
544
545 if (s390_vector_abi)
546 return;
547
548 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
549 return;
550
551 if (visited_types_hash.contains (type))
552 return;
553
554 visited_types_hash.add (type);
555
556 if (VECTOR_TYPE_P (type))
557 {
558 int type_size = int_size_in_bytes (type);
559
560 /* Outside arguments only the alignment is changing and this
561 only happens for vector types >= 16 bytes. */
562 if (!arg_p && type_size < 16)
563 return;
564
565 /* In arguments vector types > 16 are passed as before (GCC
566 never enforced the bigger alignment for arguments which was
567 required by the old vector ABI). However, it might still be
568 ABI relevant due to the changed alignment if it is a struct
569 member. */
570 if (arg_p && type_size > 16 && !in_struct_p)
571 return;
572
573 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
574 }
575 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
576 {
577 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
578 natural alignment there will never be ABI dependent padding
579 in an array type. That's why we do not set in_struct_p to
580 true here. */
581 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
582 }
583 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
584 {
585 tree arg_chain;
586
587 /* Check the return type. */
588 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
589
590 for (arg_chain = TYPE_ARG_TYPES (type);
591 arg_chain;
592 arg_chain = TREE_CHAIN (arg_chain))
593 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
594 }
595 else if (RECORD_OR_UNION_TYPE_P (type))
596 {
597 tree field;
598
599 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
600 {
601 if (TREE_CODE (field) != FIELD_DECL)
602 continue;
603
604 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
605 }
606 }
607}
608
609
07f32359 610/* System z builtins. */
611
612#include "s390-builtins.h"
613
a8aefbef 614const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
07f32359 615 {
616#undef B_DEF
617#undef OB_DEF
618#undef OB_DEF_VAR
a8aefbef 619#define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
07f32359 620#define OB_DEF(...)
621#define OB_DEF_VAR(...)
622#include "s390-builtins.def"
623 0
624 };
625
a8aefbef 626const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
627 {
628#undef B_DEF
629#undef OB_DEF
630#undef OB_DEF_VAR
631#define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
632#define OB_DEF(...)
633#define OB_DEF_VAR(...)
634#include "s390-builtins.def"
635 0
636 };
637
638const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
639 {
640#undef B_DEF
641#undef OB_DEF
642#undef OB_DEF_VAR
643#define B_DEF(...)
644#define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
645#define OB_DEF_VAR(...)
646#include "s390-builtins.def"
647 0
648 };
649
063ed3cf 650const unsigned int
651bflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
652 {
653#undef B_DEF
654#undef OB_DEF
655#undef OB_DEF_VAR
656#define B_DEF(...)
657#define OB_DEF(...)
658#define OB_DEF_VAR(NAME, PATTERN, FLAGS, OPFLAGS, FNTYPE) FLAGS,
659#include "s390-builtins.def"
660 0
661 };
662
a8aefbef 663const unsigned int
664opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
07f32359 665 {
666#undef B_DEF
667#undef OB_DEF
668#undef OB_DEF_VAR
669#define B_DEF(...)
670#define OB_DEF(...)
063ed3cf 671#define OB_DEF_VAR(NAME, PATTERN, FLAGS, OPFLAGS, FNTYPE) OPFLAGS,
07f32359 672#include "s390-builtins.def"
673 0
674 };
675
676tree s390_builtin_types[BT_MAX];
677tree s390_builtin_fn_types[BT_FN_MAX];
678tree s390_builtin_decls[S390_BUILTIN_MAX +
679 S390_OVERLOADED_BUILTIN_MAX +
680 S390_OVERLOADED_BUILTIN_VAR_MAX];
681
682static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
683#undef B_DEF
684#undef OB_DEF
685#undef OB_DEF_VAR
686#define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
687#define OB_DEF(...)
688#define OB_DEF_VAR(...)
689
690#include "s390-builtins.def"
691 CODE_FOR_nothing
692};
693
694static void
695s390_init_builtins (void)
696{
697 /* These definitions are being used in s390-builtins.def. */
698 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
699 NULL, NULL);
700 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
701 tree c_uint64_type_node;
702
703 /* The uint64_type_node from tree.c is not compatible to the C99
704 uint64_t data type. What we want is c_uint64_type_node from
705 c-common.c. But since backend code is not supposed to interface
706 with the frontend we recreate it here. */
707 if (TARGET_64BIT)
708 c_uint64_type_node = long_unsigned_type_node;
709 else
710 c_uint64_type_node = long_long_unsigned_type_node;
711
712#undef DEF_TYPE
f9378734 713#define DEF_TYPE(INDEX, NODE, CONST_P) \
7a0cee35 714 if (s390_builtin_types[INDEX] == NULL) \
a8aefbef 715 s390_builtin_types[INDEX] = (!CONST_P) ? \
716 (NODE) : build_type_variant ((NODE), 1, 0);
07f32359 717
718#undef DEF_POINTER_TYPE
f9378734 719#define DEF_POINTER_TYPE(INDEX, INDEX_BASE) \
7a0cee35 720 if (s390_builtin_types[INDEX] == NULL) \
a8aefbef 721 s390_builtin_types[INDEX] = \
722 build_pointer_type (s390_builtin_types[INDEX_BASE]);
07f32359 723
724#undef DEF_DISTINCT_TYPE
f9378734 725#define DEF_DISTINCT_TYPE(INDEX, INDEX_BASE) \
7a0cee35 726 if (s390_builtin_types[INDEX] == NULL) \
a8aefbef 727 s390_builtin_types[INDEX] = \
728 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
07f32359 729
730#undef DEF_VECTOR_TYPE
f9378734 731#define DEF_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
7a0cee35 732 if (s390_builtin_types[INDEX] == NULL) \
a8aefbef 733 s390_builtin_types[INDEX] = \
734 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
07f32359 735
736#undef DEF_OPAQUE_VECTOR_TYPE
f9378734 737#define DEF_OPAQUE_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
7a0cee35 738 if (s390_builtin_types[INDEX] == NULL) \
a8aefbef 739 s390_builtin_types[INDEX] = \
740 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
07f32359 741
742#undef DEF_FN_TYPE
f9378734 743#define DEF_FN_TYPE(INDEX, args...) \
7a0cee35 744 if (s390_builtin_fn_types[INDEX] == NULL) \
a8aefbef 745 s390_builtin_fn_types[INDEX] = \
7a0cee35 746 build_function_type_list (args, NULL_TREE);
07f32359 747#undef DEF_OV_TYPE
748#define DEF_OV_TYPE(...)
749#include "s390-builtin-types.def"
750
751#undef B_DEF
a8aefbef 752#define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
7a0cee35 753 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
a8aefbef 754 s390_builtin_decls[S390_BUILTIN_##NAME] = \
755 add_builtin_function ("__builtin_" #NAME, \
756 s390_builtin_fn_types[FNTYPE], \
757 S390_BUILTIN_##NAME, \
758 BUILT_IN_MD, \
759 NULL, \
760 ATTRS);
07f32359 761#undef OB_DEF
a8aefbef 762#define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
7a0cee35 763 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
764 == NULL) \
a8aefbef 765 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
766 add_builtin_function ("__builtin_" #NAME, \
767 s390_builtin_fn_types[FNTYPE], \
768 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
769 BUILT_IN_MD, \
770 NULL, \
771 0);
07f32359 772#undef OB_DEF_VAR
773#define OB_DEF_VAR(...)
774#include "s390-builtins.def"
775
776}
777
778/* Return true if ARG is appropriate as argument number ARGNUM of
779 builtin DECL. The operand flags from s390-builtins.def have to
780 passed as OP_FLAGS. */
781bool
782s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
783{
784 if (O_UIMM_P (op_flags))
785 {
786 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
787 int bitwidth = bitwidths[op_flags - O_U1];
788
789 if (!tree_fits_uhwi_p (arg)
b422d8c0 790 || tree_to_uhwi (arg) > (HOST_WIDE_INT_1U << bitwidth) - 1)
07f32359 791 {
792 error("constant argument %d for builtin %qF is out of range (0.."
793 HOST_WIDE_INT_PRINT_UNSIGNED ")",
794 argnum, decl,
b422d8c0 795 (HOST_WIDE_INT_1U << bitwidth) - 1);
07f32359 796 return false;
797 }
798 }
799
800 if (O_SIMM_P (op_flags))
801 {
802 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
803 int bitwidth = bitwidths[op_flags - O_S2];
804
805 if (!tree_fits_shwi_p (arg)
b422d8c0 806 || tree_to_shwi (arg) < -(HOST_WIDE_INT_1 << (bitwidth - 1))
807 || tree_to_shwi (arg) > ((HOST_WIDE_INT_1 << (bitwidth - 1)) - 1))
07f32359 808 {
809 error("constant argument %d for builtin %qF is out of range ("
810 HOST_WIDE_INT_PRINT_DEC ".."
811 HOST_WIDE_INT_PRINT_DEC ")",
812 argnum, decl,
b422d8c0 813 -(HOST_WIDE_INT_1 << (bitwidth - 1)),
814 (HOST_WIDE_INT_1 << (bitwidth - 1)) - 1);
07f32359 815 return false;
816 }
817 }
818 return true;
819}
820
821/* Expand an expression EXP that calls a built-in function,
822 with result going to TARGET if that's convenient
823 (and in mode MODE if that's convenient).
824 SUBTARGET may be used as the target for computing one of EXP's operands.
825 IGNORE is nonzero if the value is to be ignored. */
826
827static rtx
828s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
829 machine_mode mode ATTRIBUTE_UNUSED,
830 int ignore ATTRIBUTE_UNUSED)
831{
674b3578 832#define MAX_ARGS 6
07f32359 833
834 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
835 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
836 enum insn_code icode;
837 rtx op[MAX_ARGS], pat;
838 int arity;
839 bool nonvoid;
840 tree arg;
841 call_expr_arg_iterator iter;
a8aefbef 842 unsigned int all_op_flags = opflags_for_builtin (fcode);
07f32359 843 machine_mode last_vec_mode = VOIDmode;
844
845 if (TARGET_DEBUG_ARG)
846 {
847 fprintf (stderr,
7a0cee35 848 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
849 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
850 bflags_for_builtin (fcode));
07f32359 851 }
852
7a0cee35 853 if (S390_USE_TARGET_ATTRIBUTE)
854 {
855 unsigned int bflags;
856
857 bflags = bflags_for_builtin (fcode);
858 if ((bflags & B_HTM) && !TARGET_HTM)
859 {
19abb0ad 860 error ("builtin %qF is not supported without -mhtm "
7a0cee35 861 "(default with -march=zEC12 and higher).", fndecl);
862 return const0_rtx;
863 }
c9213ca0 864 if (((bflags & B_VX) || (bflags & B_VXE)) && !TARGET_VX)
7a0cee35 865 {
063ed3cf 866 error ("builtin %qF requires -mvx "
7a0cee35 867 "(default with -march=z13 and higher).", fndecl);
868 return const0_rtx;
869 }
c9213ca0 870
871 if ((bflags & B_VXE) && !TARGET_VXE)
872 {
a168a775 873 error ("Builtin %qF requires z14 or higher.", fndecl);
c9213ca0 874 return const0_rtx;
875 }
7a0cee35 876 }
07f32359 877 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
878 && fcode < S390_ALL_BUILTIN_MAX)
879 {
880 gcc_unreachable ();
881 }
882 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
883 {
884 icode = code_for_builtin[fcode];
885 /* Set a flag in the machine specific cfun part in order to support
886 saving/restoring of FPRs. */
887 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
888 cfun->machine->tbegin_p = true;
889 }
890 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
891 {
19abb0ad 892 error ("unresolved overloaded builtin");
07f32359 893 return const0_rtx;
894 }
895 else
896 internal_error ("bad builtin fcode");
897
898 if (icode == 0)
899 internal_error ("bad builtin icode");
900
901 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
902
903 if (nonvoid)
904 {
905 machine_mode tmode = insn_data[icode].operand[0].mode;
906 if (!target
907 || GET_MODE (target) != tmode
908 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
909 target = gen_reg_rtx (tmode);
910
911 /* There are builtins (e.g. vec_promote) with no vector
912 arguments but an element selector. So we have to also look
913 at the vector return type when emitting the modulo
914 operation. */
915 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
916 last_vec_mode = insn_data[icode].operand[0].mode;
917 }
918
919 arity = 0;
920 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
921 {
b0c401ca 922 rtx tmp_rtx;
07f32359 923 const struct insn_operand_data *insn_op;
924 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
925
926 all_op_flags = all_op_flags >> O_SHIFT;
927
928 if (arg == error_mark_node)
929 return NULL_RTX;
930 if (arity >= MAX_ARGS)
931 return NULL_RTX;
932
933 if (O_IMM_P (op_flags)
934 && TREE_CODE (arg) != INTEGER_CST)
935 {
936 error ("constant value required for builtin %qF argument %d",
937 fndecl, arity + 1);
938 return const0_rtx;
939 }
940
941 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
942 return const0_rtx;
943
944 insn_op = &insn_data[icode].operand[arity + nonvoid];
945 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
946
0570ddf1 947 /* expand_expr truncates constants to the target mode only if it
948 is "convenient". However, our checks below rely on this
949 being done. */
950 if (CONST_INT_P (op[arity])
951 && SCALAR_INT_MODE_P (insn_op->mode)
952 && GET_MODE (op[arity]) != insn_op->mode)
953 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
954 insn_op->mode));
955
07f32359 956 /* Wrap the expanded RTX for pointer types into a MEM expr with
957 the proper mode. This allows us to use e.g. (match_operand
958 "memory_operand"..) in the insn patterns instead of (mem
959 (match_operand "address_operand)). This is helpful for
960 patterns not just accepting MEMs. */
961 if (POINTER_TYPE_P (TREE_TYPE (arg))
962 && insn_op->predicate != address_operand)
963 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
964
965 /* Expand the module operation required on element selectors. */
966 if (op_flags == O_ELEM)
967 {
968 gcc_assert (last_vec_mode != VOIDmode);
969 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
970 op[arity],
971 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
972 NULL_RTX, 1, OPTAB_DIRECT);
973 }
974
975 /* Record the vector mode used for an element selector. This assumes:
976 1. There is no builtin with two different vector modes and an element selector
977 2. The element selector comes after the vector type it is referring to.
978 This currently the true for all the builtins but FIXME we
979 should better check for that. */
980 if (VECTOR_MODE_P (insn_op->mode))
981 last_vec_mode = insn_op->mode;
982
983 if (insn_op->predicate (op[arity], insn_op->mode))
984 {
985 arity++;
986 continue;
987 }
988
989 if (MEM_P (op[arity])
990 && insn_op->predicate == memory_operand
991 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
992 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
993 {
994 op[arity] = replace_equiv_address (op[arity],
995 copy_to_mode_reg (Pmode,
996 XEXP (op[arity], 0)));
997 }
b0c401ca 998 /* Some of the builtins require different modes/types than the
999 pattern in order to implement a specific API. Instead of
1000 adding many expanders which do the mode change we do it here.
1001 E.g. s390_vec_add_u128 required to have vector unsigned char
1002 arguments is mapped to addti3. */
1003 else if (insn_op->mode != VOIDmode
1004 && GET_MODE (op[arity]) != VOIDmode
1005 && GET_MODE (op[arity]) != insn_op->mode
1006 && ((tmp_rtx = simplify_gen_subreg (insn_op->mode, op[arity],
1007 GET_MODE (op[arity]), 0))
1008 != NULL_RTX))
1009 {
1010 op[arity] = tmp_rtx;
1011 }
07f32359 1012 else if (GET_MODE (op[arity]) == insn_op->mode
1013 || GET_MODE (op[arity]) == VOIDmode
1014 || (insn_op->predicate == address_operand
1015 && GET_MODE (op[arity]) == Pmode))
1016 {
1017 /* An address_operand usually has VOIDmode in the expander
1018 so we cannot use this. */
1019 machine_mode target_mode =
1020 (insn_op->predicate == address_operand
af8303fa 1021 ? (machine_mode) Pmode : insn_op->mode);
07f32359 1022 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
1023 }
1024
1025 if (!insn_op->predicate (op[arity], insn_op->mode))
1026 {
19abb0ad 1027 error ("invalid argument %d for builtin %qF", arity + 1, fndecl);
07f32359 1028 return const0_rtx;
1029 }
1030 arity++;
1031 }
1032
07f32359 1033 switch (arity)
1034 {
1035 case 0:
1036 pat = GEN_FCN (icode) (target);
1037 break;
1038 case 1:
1039 if (nonvoid)
1040 pat = GEN_FCN (icode) (target, op[0]);
1041 else
1042 pat = GEN_FCN (icode) (op[0]);
1043 break;
1044 case 2:
1045 if (nonvoid)
1046 pat = GEN_FCN (icode) (target, op[0], op[1]);
1047 else
1048 pat = GEN_FCN (icode) (op[0], op[1]);
1049 break;
1050 case 3:
1051 if (nonvoid)
1052 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
1053 else
1054 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
1055 break;
1056 case 4:
1057 if (nonvoid)
1058 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
1059 else
1060 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
1061 break;
1062 case 5:
1063 if (nonvoid)
1064 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
1065 else
1066 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
1067 break;
1068 case 6:
1069 if (nonvoid)
1070 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
1071 else
1072 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1073 break;
1074 default:
1075 gcc_unreachable ();
1076 }
1077 if (!pat)
1078 return NULL_RTX;
1079 emit_insn (pat);
1080
1081 if (nonvoid)
1082 return target;
1083 else
1084 return const0_rtx;
1085}
1086
1087
11762b83 1088static const int s390_hotpatch_hw_max = 1000000;
1089static int s390_hotpatch_hw_before_label = 0;
1090static int s390_hotpatch_hw_after_label = 0;
77bc9912 1091
1092/* Check whether the hotpatch attribute is applied to a function and, if it has
1093 an argument, the argument is valid. */
1094
1095static tree
1096s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1097 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1098{
11762b83 1099 tree expr;
1100 tree expr2;
1101 int err;
1102
77bc9912 1103 if (TREE_CODE (*node) != FUNCTION_DECL)
1104 {
1105 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1106 name);
1107 *no_add_attrs = true;
1108 }
11762b83 1109 if (args != NULL && TREE_CHAIN (args) != NULL)
1110 {
1111 expr = TREE_VALUE (args);
1112 expr2 = TREE_VALUE (TREE_CHAIN (args));
1113 }
1114 if (args == NULL || TREE_CHAIN (args) == NULL)
1115 err = 1;
1116 else if (TREE_CODE (expr) != INTEGER_CST
1117 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
e3d0f65c 1118 || wi::gtu_p (wi::to_wide (expr), s390_hotpatch_hw_max))
11762b83 1119 err = 1;
1120 else if (TREE_CODE (expr2) != INTEGER_CST
1121 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
e3d0f65c 1122 || wi::gtu_p (wi::to_wide (expr2), s390_hotpatch_hw_max))
11762b83 1123 err = 1;
1124 else
1125 err = 0;
1126 if (err)
77bc9912 1127 {
11762b83 1128 error ("requested %qE attribute is not a comma separated pair of"
1129 " non-negative integer constants or too large (max. %d)", name,
1130 s390_hotpatch_hw_max);
1131 *no_add_attrs = true;
77bc9912 1132 }
1133
1134 return NULL_TREE;
1135}
1136
07f32359 1137/* Expand the s390_vector_bool type attribute. */
1138
1139static tree
1140s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1141 tree args ATTRIBUTE_UNUSED,
1142 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1143{
1144 tree type = *node, result = NULL_TREE;
1145 machine_mode mode;
1146
1147 while (POINTER_TYPE_P (type)
1148 || TREE_CODE (type) == FUNCTION_TYPE
1149 || TREE_CODE (type) == METHOD_TYPE
1150 || TREE_CODE (type) == ARRAY_TYPE)
1151 type = TREE_TYPE (type);
1152
1153 mode = TYPE_MODE (type);
1154 switch (mode)
1155 {
916ace94 1156 case E_DImode: case E_V2DImode:
1157 result = s390_builtin_types[BT_BV2DI];
1158 break;
1159 case E_SImode: case E_V4SImode:
1160 result = s390_builtin_types[BT_BV4SI];
1161 break;
1162 case E_HImode: case E_V8HImode:
1163 result = s390_builtin_types[BT_BV8HI];
1164 break;
1165 case E_QImode: case E_V16QImode:
1166 result = s390_builtin_types[BT_BV16QI];
1167 break;
1168 default:
1169 break;
07f32359 1170 }
1171
1172 *no_add_attrs = true; /* No need to hang on to the attribute. */
1173
1174 if (result)
1175 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1176
1177 return NULL_TREE;
1178}
1179
77bc9912 1180static const struct attribute_spec s390_attribute_table[] = {
672bc44d 1181 { "hotpatch", 2, 2, true, false, false, false,
1182 s390_handle_hotpatch_attribute, NULL },
1183 { "s390_vector_bool", 0, 0, false, true, false, true,
1184 s390_handle_vectorbool_attribute, NULL },
77bc9912 1185 /* End element. */
672bc44d 1186 { NULL, 0, 0, false, false, false, false, NULL, NULL }
77bc9912 1187};
1188
6d0afa28 1189/* Return the alignment for LABEL. We default to the -falign-labels
1190 value except for the literal pool base label. */
1191int
56d95ea5 1192s390_label_align (rtx_insn *label)
6d0afa28 1193{
50fc2d35 1194 rtx_insn *prev_insn = prev_active_insn (label);
1195 rtx set, src;
6d0afa28 1196
1197 if (prev_insn == NULL_RTX)
1198 goto old;
1199
50fc2d35 1200 set = single_set (prev_insn);
6d0afa28 1201
50fc2d35 1202 if (set == NULL_RTX)
6d0afa28 1203 goto old;
1204
50fc2d35 1205 src = SET_SRC (set);
6d0afa28 1206
1207 /* Don't align literal pool base labels. */
50fc2d35 1208 if (GET_CODE (src) == UNSPEC
1209 && XINT (src, 1) == UNSPEC_MAIN_BASE)
6d0afa28 1210 return 0;
1211
1212 old:
1213 return align_labels_log;
1214}
1215
9852c8ae 1216static GTY(()) rtx got_symbol;
1217
1218/* Return the GOT table symbol. The symbol will be created when the
1219 function is invoked for the first time. */
1220
1221static rtx
1222s390_got_symbol (void)
1223{
1224 if (!got_symbol)
1225 {
1226 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
1227 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
1228 }
1229
1230 return got_symbol;
1231}
1232
f77c4496 1233static scalar_int_mode
0ef89dfd 1234s390_libgcc_cmp_return_mode (void)
1235{
1236 return TARGET_64BIT ? DImode : SImode;
1237}
1238
f77c4496 1239static scalar_int_mode
0ef89dfd 1240s390_libgcc_shift_count_mode (void)
1241{
1242 return TARGET_64BIT ? DImode : SImode;
1243}
1244
f77c4496 1245static scalar_int_mode
b5fdc416 1246s390_unwind_word_mode (void)
1247{
1248 return TARGET_64BIT ? DImode : SImode;
1249}
1250
36868490 1251/* Return true if the back end supports mode MODE. */
1252static bool
8aec1ebb 1253s390_scalar_mode_supported_p (scalar_mode mode)
36868490 1254{
b5fdc416 1255 /* In contrast to the default implementation reject TImode constants on 31bit
1256 TARGET_ZARCH for ABI compliance. */
1257 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1258 return false;
1259
36868490 1260 if (DECIMAL_FLOAT_MODE_P (mode))
8e72d11d 1261 return default_decimal_float_supported_p ();
b5fdc416 1262
1263 return default_scalar_mode_supported_p (mode);
36868490 1264}
1265
76a4c804 1266/* Return true if the back end supports vector mode MODE. */
1267static bool
1268s390_vector_mode_supported_p (machine_mode mode)
1269{
1270 machine_mode inner;
1271
1272 if (!VECTOR_MODE_P (mode)
1273 || !TARGET_VX
1274 || GET_MODE_SIZE (mode) > 16)
1275 return false;
1276
1277 inner = GET_MODE_INNER (mode);
1278
1279 switch (inner)
1280 {
916ace94 1281 case E_QImode:
1282 case E_HImode:
1283 case E_SImode:
1284 case E_DImode:
1285 case E_TImode:
1286 case E_SFmode:
1287 case E_DFmode:
1288 case E_TFmode:
76a4c804 1289 return true;
1290 default:
1291 return false;
1292 }
1293}
1294
1e639cb0 1295/* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1296
1297void
1298s390_set_has_landing_pad_p (bool value)
1299{
1300 cfun->machine->has_landing_pad_p = value;
1301}
6902d973 1302
9c93d843 1303/* If two condition code modes are compatible, return a condition code
1304 mode which is compatible with both. Otherwise, return
1305 VOIDmode. */
1306
3754d046 1307static machine_mode
1308s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
9c93d843 1309{
1310 if (m1 == m2)
1311 return m1;
1312
1313 switch (m1)
1314 {
916ace94 1315 case E_CCZmode:
9c93d843 1316 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1317 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1318 return m2;
1319 return VOIDmode;
1320
916ace94 1321 case E_CCSmode:
1322 case E_CCUmode:
1323 case E_CCTmode:
1324 case E_CCSRmode:
1325 case E_CCURmode:
1326 case E_CCZ1mode:
9c93d843 1327 if (m2 == CCZmode)
1328 return m1;
ffead1ca 1329
9c93d843 1330 return VOIDmode;
1331
1332 default:
1333 return VOIDmode;
1334 }
1335 return VOIDmode;
1336}
1337
56769981 1338/* Return true if SET either doesn't set the CC register, or else
f81e845f 1339 the source and destination have matching CC modes and that
56769981 1340 CC mode is at least as constrained as REQ_MODE. */
f81e845f 1341
e5537457 1342static bool
3754d046 1343s390_match_ccmode_set (rtx set, machine_mode req_mode)
4673c1a0 1344{
3754d046 1345 machine_mode set_mode;
4673c1a0 1346
32eda510 1347 gcc_assert (GET_CODE (set) == SET);
4673c1a0 1348
abc57c35 1349 /* These modes are supposed to be used only in CC consumer
1350 patterns. */
1351 gcc_assert (req_mode != CCVIALLmode && req_mode != CCVIANYmode
1352 && req_mode != CCVFALLmode && req_mode != CCVFANYmode);
1353
4673c1a0 1354 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1355 return 1;
1356
1357 set_mode = GET_MODE (SET_DEST (set));
1358 switch (set_mode)
1359 {
916ace94 1360 case E_CCZ1mode:
1361 case E_CCSmode:
1362 case E_CCSRmode:
1363 case E_CCUmode:
1364 case E_CCURmode:
1365 case E_CCLmode:
1366 case E_CCL1mode:
1367 case E_CCL2mode:
1368 case E_CCL3mode:
1369 case E_CCT1mode:
1370 case E_CCT2mode:
1371 case E_CCT3mode:
1372 case E_CCVEQmode:
1373 case E_CCVIHmode:
1374 case E_CCVIHUmode:
1375 case E_CCVFHmode:
1376 case E_CCVFHEmode:
c6821d1c 1377 if (req_mode != set_mode)
2eb8fe23 1378 return 0;
1379 break;
c6821d1c 1380
916ace94 1381 case E_CCZmode:
c6821d1c 1382 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
d90d26d8 1383 && req_mode != CCSRmode && req_mode != CCURmode
1384 && req_mode != CCZ1mode)
4673c1a0 1385 return 0;
1386 break;
3c482144 1387
916ace94 1388 case E_CCAPmode:
1389 case E_CCANmode:
3c482144 1390 if (req_mode != CCAmode)
1391 return 0;
1392 break;
f81e845f 1393
4673c1a0 1394 default:
32eda510 1395 gcc_unreachable ();
4673c1a0 1396 }
f81e845f 1397
4673c1a0 1398 return (GET_MODE (SET_SRC (set)) == set_mode);
1399}
1400
f81e845f 1401/* Return true if every SET in INSN that sets the CC register
1402 has source and destination with matching CC modes and that
1403 CC mode is at least as constrained as REQ_MODE.
c6821d1c 1404 If REQ_MODE is VOIDmode, always return false. */
f81e845f 1405
e5537457 1406bool
3754d046 1407s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
4673c1a0 1408{
1409 int i;
1410
c6821d1c 1411 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1412 if (req_mode == VOIDmode)
e5537457 1413 return false;
c6821d1c 1414
4673c1a0 1415 if (GET_CODE (PATTERN (insn)) == SET)
1416 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1417
1418 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1419 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1420 {
1421 rtx set = XVECEXP (PATTERN (insn), 0, i);
1422 if (GET_CODE (set) == SET)
1423 if (!s390_match_ccmode_set (set, req_mode))
e5537457 1424 return false;
4673c1a0 1425 }
1426
e5537457 1427 return true;
4673c1a0 1428}
1429
f81e845f 1430/* If a test-under-mask instruction can be used to implement
c6821d1c 1431 (compare (and ... OP1) OP2), return the CC mode required
f81e845f 1432 to do that. Otherwise, return VOIDmode.
c6821d1c 1433 MIXED is true if the instruction can distinguish between
1434 CC1 and CC2 for mixed selected bits (TMxx), it is false
1435 if the instruction cannot (TM). */
1436
3754d046 1437machine_mode
e5537457 1438s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
c6821d1c 1439{
1440 int bit0, bit1;
1441
ba0e61d6 1442 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
c6821d1c 1443 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1444 return VOIDmode;
1445
eeba5f25 1446 /* Selected bits all zero: CC0.
1447 e.g.: int a; if ((a & (16 + 128)) == 0) */
c6821d1c 1448 if (INTVAL (op2) == 0)
1449 return CCTmode;
1450
ffead1ca 1451 /* Selected bits all one: CC3.
eeba5f25 1452 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
c6821d1c 1453 if (INTVAL (op2) == INTVAL (op1))
1454 return CCT3mode;
1455
eeba5f25 1456 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1457 int a;
1458 if ((a & (16 + 128)) == 16) -> CCT1
1459 if ((a & (16 + 128)) == 128) -> CCT2 */
c6821d1c 1460 if (mixed)
1461 {
1462 bit1 = exact_log2 (INTVAL (op2));
1463 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1464 if (bit0 != -1 && bit1 != -1)
1465 return bit0 > bit1 ? CCT1mode : CCT2mode;
1466 }
1467
1468 return VOIDmode;
1469}
1470
f81e845f 1471/* Given a comparison code OP (EQ, NE, etc.) and the operands
1472 OP0 and OP1 of a COMPARE, return the mode to be used for the
2eb8fe23 1473 comparison. */
1474
3754d046 1475machine_mode
b40da9a7 1476s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
2eb8fe23 1477{
1478 switch (code)
1479 {
1480 case EQ:
1481 case NE:
9be33ca2 1482 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1483 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1484 return CCAPmode;
3c482144 1485 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
cb888f33 1486 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
3c482144 1487 return CCAPmode;
e9fd5349 1488 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1489 || GET_CODE (op1) == NEG)
1490 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
2eb8fe23 1491 return CCLmode;
1492
c6821d1c 1493 if (GET_CODE (op0) == AND)
1494 {
1495 /* Check whether we can potentially do it via TM. */
3754d046 1496 machine_mode ccmode;
c6821d1c 1497 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1498 if (ccmode != VOIDmode)
1499 {
1500 /* Relax CCTmode to CCZmode to allow fall-back to AND
1501 if that turns out to be beneficial. */
1502 return ccmode == CCTmode ? CCZmode : ccmode;
1503 }
1504 }
1505
f81e845f 1506 if (register_operand (op0, HImode)
c6821d1c 1507 && GET_CODE (op1) == CONST_INT
1508 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1509 return CCT3mode;
f81e845f 1510 if (register_operand (op0, QImode)
c6821d1c 1511 && GET_CODE (op1) == CONST_INT
1512 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1513 return CCT3mode;
1514
2eb8fe23 1515 return CCZmode;
1516
1517 case LE:
1518 case LT:
1519 case GE:
1520 case GT:
eeba5f25 1521 /* The only overflow condition of NEG and ABS happens when
1522 -INT_MAX is used as parameter, which stays negative. So
ffead1ca 1523 we have an overflow from a positive value to a negative.
eeba5f25 1524 Using CCAP mode the resulting cc can be used for comparisons. */
9be33ca2 1525 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1526 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1527 return CCAPmode;
eeba5f25 1528
1529 /* If constants are involved in an add instruction it is possible to use
1530 the resulting cc for comparisons with zero. Knowing the sign of the
0975351b 1531 constant the overflow behavior gets predictable. e.g.:
ffead1ca 1532 int a, b; if ((b = a + c) > 0)
eeba5f25 1533 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
9be33ca2 1534 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
ea14438e 1535 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1536 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1537 /* Avoid INT32_MIN on 32 bit. */
1538 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
9be33ca2 1539 {
1540 if (INTVAL (XEXP((op0), 1)) < 0)
1541 return CCANmode;
1542 else
1543 return CCAPmode;
1544 }
1545 /* Fall through. */
2eb8fe23 1546 case UNORDERED:
1547 case ORDERED:
1548 case UNEQ:
1549 case UNLE:
1550 case UNLT:
1551 case UNGE:
1552 case UNGT:
1553 case LTGT:
c6821d1c 1554 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1555 && GET_CODE (op1) != CONST_INT)
1556 return CCSRmode;
2eb8fe23 1557 return CCSmode;
1558
2eb8fe23 1559 case LTU:
1560 case GEU:
e9fd5349 1561 if (GET_CODE (op0) == PLUS
1562 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
c6821d1c 1563 return CCL1mode;
1564
1565 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1566 && GET_CODE (op1) != CONST_INT)
1567 return CCURmode;
1568 return CCUmode;
1569
1570 case LEU:
2eb8fe23 1571 case GTU:
e9fd5349 1572 if (GET_CODE (op0) == MINUS
1573 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
c6821d1c 1574 return CCL2mode;
1575
1576 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1577 && GET_CODE (op1) != CONST_INT)
1578 return CCURmode;
2eb8fe23 1579 return CCUmode;
1580
1581 default:
32eda510 1582 gcc_unreachable ();
2eb8fe23 1583 }
1584}
1585
ebe32bb0 1586/* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1587 that we can implement more efficiently. */
1588
d5065e6e 1589static void
1590s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1591 bool op0_preserve_value)
ebe32bb0 1592{
d5065e6e 1593 if (op0_preserve_value)
1594 return;
1595
ebe32bb0 1596 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1597 if ((*code == EQ || *code == NE)
1598 && *op1 == const0_rtx
1599 && GET_CODE (*op0) == ZERO_EXTRACT
1600 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1601 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1602 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1603 {
1604 rtx inner = XEXP (*op0, 0);
1605 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1606 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1607 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1608
1609 if (len > 0 && len < modesize
1610 && pos >= 0 && pos + len <= modesize
1611 && modesize <= HOST_BITS_PER_WIDE_INT)
1612 {
1613 unsigned HOST_WIDE_INT block;
b422d8c0 1614 block = (HOST_WIDE_INT_1U << len) - 1;
ebe32bb0 1615 block <<= modesize - pos - len;
1616
1617 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1618 gen_int_mode (block, GET_MODE (inner)));
1619 }
1620 }
1621
1622 /* Narrow AND of memory against immediate to enable TM. */
1623 if ((*code == EQ || *code == NE)
1624 && *op1 == const0_rtx
1625 && GET_CODE (*op0) == AND
1626 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1627 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1628 {
1629 rtx inner = XEXP (*op0, 0);
1630 rtx mask = XEXP (*op0, 1);
1631
1632 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1633 if (GET_CODE (inner) == SUBREG
1634 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1635 && (GET_MODE_SIZE (GET_MODE (inner))
1636 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1637 && ((INTVAL (mask)
1638 & GET_MODE_MASK (GET_MODE (inner))
1639 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1640 == 0))
1641 inner = SUBREG_REG (inner);
1642
1643 /* Do not change volatile MEMs. */
1644 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1645 {
1646 int part = s390_single_part (XEXP (*op0, 1),
1647 GET_MODE (inner), QImode, 0);
1648 if (part >= 0)
1649 {
1650 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1651 inner = adjust_address_nv (inner, QImode, part);
1652 *op0 = gen_rtx_AND (QImode, inner, mask);
1653 }
1654 }
1655 }
1656
1657 /* Narrow comparisons against 0xffff to HImode if possible. */
ebe32bb0 1658 if ((*code == EQ || *code == NE)
1659 && GET_CODE (*op1) == CONST_INT
1660 && INTVAL (*op1) == 0xffff
1661 && SCALAR_INT_MODE_P (GET_MODE (*op0))
ffead1ca 1662 && (nonzero_bits (*op0, GET_MODE (*op0))
b422d8c0 1663 & ~HOST_WIDE_INT_UC (0xffff)) == 0)
ebe32bb0 1664 {
1665 *op0 = gen_lowpart (HImode, *op0);
1666 *op1 = constm1_rtx;
1667 }
80b53886 1668
5ada7a14 1669 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
80b53886 1670 if (GET_CODE (*op0) == UNSPEC
5ada7a14 1671 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
80b53886 1672 && XVECLEN (*op0, 0) == 1
1673 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1674 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1675 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1676 && *op1 == const0_rtx)
1677 {
1678 enum rtx_code new_code = UNKNOWN;
1679 switch (*code)
1680 {
1681 case EQ: new_code = EQ; break;
1682 case NE: new_code = NE; break;
dd16a4bd 1683 case LT: new_code = GTU; break;
1684 case GT: new_code = LTU; break;
1685 case LE: new_code = GEU; break;
1686 case GE: new_code = LEU; break;
80b53886 1687 default: break;
1688 }
1689
1690 if (new_code != UNKNOWN)
1691 {
1692 *op0 = XVECEXP (*op0, 0, 0);
1693 *code = new_code;
1694 }
1695 }
9c93d843 1696
5ada7a14 1697 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
27784c70 1698 if (GET_CODE (*op0) == UNSPEC
5ada7a14 1699 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
27784c70 1700 && XVECLEN (*op0, 0) == 1
27784c70 1701 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1702 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
5ada7a14 1703 && CONST_INT_P (*op1))
27784c70 1704 {
1705 enum rtx_code new_code = UNKNOWN;
5ada7a14 1706 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
27784c70 1707 {
916ace94 1708 case E_CCZmode:
1709 case E_CCRAWmode:
5ada7a14 1710 switch (*code)
1711 {
1712 case EQ: new_code = EQ; break;
1713 case NE: new_code = NE; break;
1714 default: break;
1715 }
1716 break;
1717 default: break;
27784c70 1718 }
1719
1720 if (new_code != UNKNOWN)
1721 {
5ada7a14 1722 /* For CCRAWmode put the required cc mask into the second
1723 operand. */
91dfd73e 1724 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1725 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
5ada7a14 1726 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
27784c70 1727 *op0 = XVECEXP (*op0, 0, 0);
1728 *code = new_code;
1729 }
1730 }
1731
9c93d843 1732 /* Simplify cascaded EQ, NE with const0_rtx. */
1733 if ((*code == NE || *code == EQ)
1734 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1735 && GET_MODE (*op0) == SImode
1736 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1737 && REG_P (XEXP (*op0, 0))
1738 && XEXP (*op0, 1) == const0_rtx
1739 && *op1 == const0_rtx)
1740 {
1741 if ((*code == EQ && GET_CODE (*op0) == NE)
1742 || (*code == NE && GET_CODE (*op0) == EQ))
1743 *code = EQ;
1744 else
1745 *code = NE;
1746 *op0 = XEXP (*op0, 0);
1747 }
a0631a8a 1748
1749 /* Prefer register over memory as first operand. */
1750 if (MEM_P (*op0) && REG_P (*op1))
1751 {
1752 rtx tem = *op0; *op0 = *op1; *op1 = tem;
d5065e6e 1753 *code = (int)swap_condition ((enum rtx_code)*code);
a0631a8a 1754 }
26233f43 1755
e17ed6ec 1756 /* A comparison result is compared against zero. Replace it with
1757 the (perhaps inverted) original comparison.
1758 This probably should be done by simplify_relational_operation. */
1759 if ((*code == EQ || *code == NE)
1760 && *op1 == const0_rtx
1761 && COMPARISON_P (*op0)
1762 && CC_REG_P (XEXP (*op0, 0)))
1763 {
1764 enum rtx_code new_code;
1765
1766 if (*code == EQ)
1767 new_code = reversed_comparison_code_parts (GET_CODE (*op0),
1768 XEXP (*op0, 0),
1769 XEXP (*op1, 0), NULL);
1770 else
1771 new_code = GET_CODE (*op0);
1772
1773 if (new_code != UNKNOWN)
1774 {
1775 *code = new_code;
1776 *op1 = XEXP (*op0, 1);
1777 *op0 = XEXP (*op0, 0);
1778 }
1779 }
ebe32bb0 1780}
1781
26233f43 1782
0d656e8b 1783/* Emit a compare instruction suitable to implement the comparison
1784 OP0 CODE OP1. Return the correct condition RTL to be placed in
1785 the IF_THEN_ELSE of the conditional branch testing the result. */
1786
1787rtx
1788s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1789{
3754d046 1790 machine_mode mode = s390_select_ccmode (code, op0, op1);
8e58aded 1791 rtx cc;
0d656e8b 1792
29c098f6 1793 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
8e58aded 1794 {
26233f43 1795 /* Do not output a redundant compare instruction if a
1796 compare_and_swap pattern already computed the result and the
1797 machine modes are compatible. */
8e58aded 1798 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1799 == GET_MODE (op0));
1800 cc = op0;
1801 }
891e3096 1802 else
1803 {
8e58aded 1804 cc = gen_rtx_REG (mode, CC_REGNUM);
d1f9b275 1805 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
891e3096 1806 }
8e58aded 1807
ffead1ca 1808 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
0d656e8b 1809}
1810
8deb3959 1811/* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
db1f11e3 1812 matches CMP.
1813 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1814 conditional branch testing the result. */
1815
1816static rtx
8c753480 1817s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
d90d26d8 1818 rtx cmp, rtx new_rtx, machine_mode ccmode)
db1f11e3 1819{
d90d26d8 1820 rtx cc;
1821
1822 cc = gen_rtx_REG (ccmode, CC_REGNUM);
1823 switch (GET_MODE (mem))
1824 {
916ace94 1825 case E_SImode:
d90d26d8 1826 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp,
1827 new_rtx, cc));
1828 break;
916ace94 1829 case E_DImode:
d90d26d8 1830 emit_insn (gen_atomic_compare_and_swapdi_internal (old, mem, cmp,
1831 new_rtx, cc));
1832 break;
916ace94 1833 case E_TImode:
d90d26d8 1834 emit_insn (gen_atomic_compare_and_swapti_internal (old, mem, cmp,
1835 new_rtx, cc));
1836 break;
916ace94 1837 case E_QImode:
1838 case E_HImode:
d90d26d8 1839 default:
1840 gcc_unreachable ();
1841 }
1842 return s390_emit_compare (code, cc, const0_rtx);
db1f11e3 1843}
1844
5ada7a14 1845/* Emit a jump instruction to TARGET and return it. If COND is
1846 NULL_RTX, emit an unconditional jump, else a conditional jump under
1847 condition COND. */
0d656e8b 1848
93e0956b 1849rtx_insn *
0d656e8b 1850s390_emit_jump (rtx target, rtx cond)
1851{
1852 rtx insn;
1853
1854 target = gen_rtx_LABEL_REF (VOIDmode, target);
1855 if (cond)
1856 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1857
d1f9b275 1858 insn = gen_rtx_SET (pc_rtx, target);
5ada7a14 1859 return emit_jump_insn (insn);
0d656e8b 1860}
1861
f81e845f 1862/* Return branch condition mask to implement a branch
80b53886 1863 specified by CODE. Return -1 for invalid comparisons. */
2eb8fe23 1864
8cc5de33 1865int
b40da9a7 1866s390_branch_condition_mask (rtx code)
f81e845f 1867{
2eb8fe23 1868 const int CC0 = 1 << 3;
1869 const int CC1 = 1 << 2;
1870 const int CC2 = 1 << 1;
1871 const int CC3 = 1 << 0;
1872
32eda510 1873 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1874 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
5ada7a14 1875 gcc_assert (XEXP (code, 1) == const0_rtx
1876 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1877 && CONST_INT_P (XEXP (code, 1))));
1878
2eb8fe23 1879
1880 switch (GET_MODE (XEXP (code, 0)))
1881 {
916ace94 1882 case E_CCZmode:
1883 case E_CCZ1mode:
2eb8fe23 1884 switch (GET_CODE (code))
1885 {
1886 case EQ: return CC0;
1887 case NE: return CC1 | CC2 | CC3;
80b53886 1888 default: return -1;
2eb8fe23 1889 }
1890 break;
1891
916ace94 1892 case E_CCT1mode:
c6821d1c 1893 switch (GET_CODE (code))
1894 {
1895 case EQ: return CC1;
1896 case NE: return CC0 | CC2 | CC3;
80b53886 1897 default: return -1;
c6821d1c 1898 }
1899 break;
1900
916ace94 1901 case E_CCT2mode:
c6821d1c 1902 switch (GET_CODE (code))
1903 {
1904 case EQ: return CC2;
1905 case NE: return CC0 | CC1 | CC3;
80b53886 1906 default: return -1;
c6821d1c 1907 }
1908 break;
1909
916ace94 1910 case E_CCT3mode:
c6821d1c 1911 switch (GET_CODE (code))
1912 {
1913 case EQ: return CC3;
1914 case NE: return CC0 | CC1 | CC2;
80b53886 1915 default: return -1;
c6821d1c 1916 }
1917 break;
1918
916ace94 1919 case E_CCLmode:
2eb8fe23 1920 switch (GET_CODE (code))
1921 {
1922 case EQ: return CC0 | CC2;
1923 case NE: return CC1 | CC3;
80b53886 1924 default: return -1;
c6821d1c 1925 }
1926 break;
1927
916ace94 1928 case E_CCL1mode:
c6821d1c 1929 switch (GET_CODE (code))
1930 {
1931 case LTU: return CC2 | CC3; /* carry */
1932 case GEU: return CC0 | CC1; /* no carry */
80b53886 1933 default: return -1;
c6821d1c 1934 }
1935 break;
1936
916ace94 1937 case E_CCL2mode:
c6821d1c 1938 switch (GET_CODE (code))
1939 {
1940 case GTU: return CC0 | CC1; /* borrow */
1941 case LEU: return CC2 | CC3; /* no borrow */
80b53886 1942 default: return -1;
2eb8fe23 1943 }
1944 break;
1945
916ace94 1946 case E_CCL3mode:
3b699fc7 1947 switch (GET_CODE (code))
1948 {
1949 case EQ: return CC0 | CC2;
1950 case NE: return CC1 | CC3;
1951 case LTU: return CC1;
1952 case GTU: return CC3;
1953 case LEU: return CC1 | CC2;
1954 case GEU: return CC2 | CC3;
80b53886 1955 default: return -1;
3b699fc7 1956 }
1957
916ace94 1958 case E_CCUmode:
2eb8fe23 1959 switch (GET_CODE (code))
1960 {
1961 case EQ: return CC0;
1962 case NE: return CC1 | CC2 | CC3;
1963 case LTU: return CC1;
1964 case GTU: return CC2;
1965 case LEU: return CC0 | CC1;
1966 case GEU: return CC0 | CC2;
80b53886 1967 default: return -1;
2eb8fe23 1968 }
1969 break;
1970
916ace94 1971 case E_CCURmode:
c6821d1c 1972 switch (GET_CODE (code))
1973 {
1974 case EQ: return CC0;
1975 case NE: return CC2 | CC1 | CC3;
1976 case LTU: return CC2;
1977 case GTU: return CC1;
1978 case LEU: return CC0 | CC2;
1979 case GEU: return CC0 | CC1;
80b53886 1980 default: return -1;
c6821d1c 1981 }
1982 break;
1983
916ace94 1984 case E_CCAPmode:
3c482144 1985 switch (GET_CODE (code))
1986 {
1987 case EQ: return CC0;
1988 case NE: return CC1 | CC2 | CC3;
1989 case LT: return CC1 | CC3;
1990 case GT: return CC2;
1991 case LE: return CC0 | CC1 | CC3;
1992 case GE: return CC0 | CC2;
80b53886 1993 default: return -1;
3c482144 1994 }
1995 break;
1996
916ace94 1997 case E_CCANmode:
3c482144 1998 switch (GET_CODE (code))
1999 {
2000 case EQ: return CC0;
2001 case NE: return CC1 | CC2 | CC3;
2002 case LT: return CC1;
2003 case GT: return CC2 | CC3;
2004 case LE: return CC0 | CC1;
2005 case GE: return CC0 | CC2 | CC3;
80b53886 2006 default: return -1;
3c482144 2007 }
2008 break;
2009
916ace94 2010 case E_CCSmode:
2eb8fe23 2011 switch (GET_CODE (code))
2012 {
2013 case EQ: return CC0;
2014 case NE: return CC1 | CC2 | CC3;
2015 case LT: return CC1;
2016 case GT: return CC2;
2017 case LE: return CC0 | CC1;
2018 case GE: return CC0 | CC2;
2019 case UNORDERED: return CC3;
2020 case ORDERED: return CC0 | CC1 | CC2;
2021 case UNEQ: return CC0 | CC3;
2022 case UNLT: return CC1 | CC3;
2023 case UNGT: return CC2 | CC3;
2024 case UNLE: return CC0 | CC1 | CC3;
2025 case UNGE: return CC0 | CC2 | CC3;
2026 case LTGT: return CC1 | CC2;
80b53886 2027 default: return -1;
2eb8fe23 2028 }
c6821d1c 2029 break;
2030
916ace94 2031 case E_CCSRmode:
c6821d1c 2032 switch (GET_CODE (code))
2033 {
2034 case EQ: return CC0;
2035 case NE: return CC2 | CC1 | CC3;
2036 case LT: return CC2;
2037 case GT: return CC1;
2038 case LE: return CC0 | CC2;
2039 case GE: return CC0 | CC1;
2040 case UNORDERED: return CC3;
2041 case ORDERED: return CC0 | CC2 | CC1;
2042 case UNEQ: return CC0 | CC3;
2043 case UNLT: return CC2 | CC3;
2044 case UNGT: return CC1 | CC3;
2045 case UNLE: return CC0 | CC2 | CC3;
2046 case UNGE: return CC0 | CC1 | CC3;
2047 case LTGT: return CC2 | CC1;
80b53886 2048 default: return -1;
c6821d1c 2049 }
2050 break;
2eb8fe23 2051
26233f43 2052 /* Vector comparison modes. */
abc57c35 2053 /* CC2 will never be set. It however is part of the negated
2054 masks. */
916ace94 2055 case E_CCVIALLmode:
26233f43 2056 switch (GET_CODE (code))
2057 {
abc57c35 2058 case EQ:
2059 case GTU:
2060 case GT:
2061 case GE: return CC0;
2062 /* The inverted modes are in fact *any* modes. */
2063 case NE:
2064 case LEU:
2065 case LE:
2066 case LT: return CC3 | CC1 | CC2;
26233f43 2067 default: return -1;
2068 }
07f32359 2069
916ace94 2070 case E_CCVIANYmode:
07f32359 2071 switch (GET_CODE (code))
2072 {
abc57c35 2073 case EQ:
2074 case GTU:
2075 case GT:
2076 case GE: return CC0 | CC1;
2077 /* The inverted modes are in fact *all* modes. */
2078 case NE:
2079 case LEU:
2080 case LE:
2081 case LT: return CC3 | CC2;
07f32359 2082 default: return -1;
2083 }
916ace94 2084 case E_CCVFALLmode:
26233f43 2085 switch (GET_CODE (code))
2086 {
abc57c35 2087 case EQ:
2088 case GT:
26233f43 2089 case GE: return CC0;
abc57c35 2090 /* The inverted modes are in fact *any* modes. */
2091 case NE:
2092 case UNLE:
2093 case UNLT: return CC3 | CC1 | CC2;
26233f43 2094 default: return -1;
2095 }
07f32359 2096
916ace94 2097 case E_CCVFANYmode:
07f32359 2098 switch (GET_CODE (code))
2099 {
abc57c35 2100 case EQ:
2101 case GT:
07f32359 2102 case GE: return CC0 | CC1;
abc57c35 2103 /* The inverted modes are in fact *all* modes. */
2104 case NE:
2105 case UNLE:
2106 case UNLT: return CC3 | CC2;
07f32359 2107 default: return -1;
2108 }
2109
916ace94 2110 case E_CCRAWmode:
5ada7a14 2111 switch (GET_CODE (code))
2112 {
2113 case EQ:
2114 return INTVAL (XEXP (code, 1));
2115 case NE:
2116 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2117 default:
2118 gcc_unreachable ();
2119 }
2120
2eb8fe23 2121 default:
80b53886 2122 return -1;
2eb8fe23 2123 }
2124}
2125
e68d6a13 2126
2127/* Return branch condition mask to implement a compare and branch
2128 specified by CODE. Return -1 for invalid comparisons. */
2129
2130int
2131s390_compare_and_branch_condition_mask (rtx code)
2132{
2133 const int CC0 = 1 << 3;
2134 const int CC1 = 1 << 2;
2135 const int CC2 = 1 << 1;
2136
2137 switch (GET_CODE (code))
2138 {
2139 case EQ:
2140 return CC0;
2141 case NE:
2142 return CC1 | CC2;
2143 case LT:
2144 case LTU:
2145 return CC1;
2146 case GT:
2147 case GTU:
2148 return CC2;
2149 case LE:
2150 case LEU:
2151 return CC0 | CC1;
2152 case GE:
2153 case GEU:
2154 return CC0 | CC2;
2155 default:
2156 gcc_unreachable ();
2157 }
2158 return -1;
2159}
2160
f81e845f 2161/* If INV is false, return assembler mnemonic string to implement
2162 a branch specified by CODE. If INV is true, return mnemonic
2eb8fe23 2163 for the corresponding inverted branch. */
2164
2165static const char *
b40da9a7 2166s390_branch_condition_mnemonic (rtx code, int inv)
2eb8fe23 2167{
e68d6a13 2168 int mask;
2169
c8834c5f 2170 static const char *const mnemonic[16] =
2eb8fe23 2171 {
2172 NULL, "o", "h", "nle",
2173 "l", "nhe", "lh", "ne",
2174 "e", "nlh", "he", "nl",
2175 "le", "nh", "no", NULL
2176 };
2177
e68d6a13 2178 if (GET_CODE (XEXP (code, 0)) == REG
2179 && REGNO (XEXP (code, 0)) == CC_REGNUM
5ada7a14 2180 && (XEXP (code, 1) == const0_rtx
2181 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2182 && CONST_INT_P (XEXP (code, 1)))))
e68d6a13 2183 mask = s390_branch_condition_mask (code);
2184 else
2185 mask = s390_compare_and_branch_condition_mask (code);
2186
80b53886 2187 gcc_assert (mask >= 0);
2eb8fe23 2188
2189 if (inv)
2190 mask ^= 15;
2191
32eda510 2192 gcc_assert (mask >= 1 && mask <= 14);
2eb8fe23 2193
2194 return mnemonic[mask];
2195}
2196
64a1078f 2197/* Return the part of op which has a value different from def.
2198 The size of the part is determined by mode.
f588eb9f 2199 Use this function only if you already know that op really
64a1078f 2200 contains such a part. */
8b4a4127 2201
64a1078f 2202unsigned HOST_WIDE_INT
3754d046 2203s390_extract_part (rtx op, machine_mode mode, int def)
8b4a4127 2204{
64a1078f 2205 unsigned HOST_WIDE_INT value = 0;
2206 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2207 int part_bits = GET_MODE_BITSIZE (mode);
b422d8c0 2208 unsigned HOST_WIDE_INT part_mask = (HOST_WIDE_INT_1U << part_bits) - 1;
64a1078f 2209 int i;
f588eb9f 2210
64a1078f 2211 for (i = 0; i < max_parts; i++)
8b4a4127 2212 {
64a1078f 2213 if (i == 0)
b422d8c0 2214 value = UINTVAL (op);
8b4a4127 2215 else
64a1078f 2216 value >>= part_bits;
f588eb9f 2217
64a1078f 2218 if ((value & part_mask) != (def & part_mask))
2219 return value & part_mask;
8b4a4127 2220 }
f588eb9f 2221
32eda510 2222 gcc_unreachable ();
8b4a4127 2223}
2224
2225/* If OP is an integer constant of mode MODE with exactly one
64a1078f 2226 part of mode PART_MODE unequal to DEF, return the number of that
2227 part. Otherwise, return -1. */
8b4a4127 2228
2229int
f588eb9f 2230s390_single_part (rtx op,
3754d046 2231 machine_mode mode,
2232 machine_mode part_mode,
64a1078f 2233 int def)
2234{
2235 unsigned HOST_WIDE_INT value = 0;
2236 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
0451e449 2237 unsigned HOST_WIDE_INT part_mask
b422d8c0 2238 = (HOST_WIDE_INT_1U << GET_MODE_BITSIZE (part_mode)) - 1;
64a1078f 2239 int i, part = -1;
2240
2241 if (GET_CODE (op) != CONST_INT)
2242 return -1;
f588eb9f 2243
64a1078f 2244 for (i = 0; i < n_parts; i++)
2245 {
2246 if (i == 0)
b422d8c0 2247 value = UINTVAL (op);
8b4a4127 2248 else
64a1078f 2249 value >>= GET_MODE_BITSIZE (part_mode);
f588eb9f 2250
64a1078f 2251 if ((value & part_mask) != (def & part_mask))
2252 {
2253 if (part != -1)
2254 return -1;
2255 else
2256 part = i;
2257 }
8b4a4127 2258 }
64a1078f 2259 return part == -1 ? -1 : n_parts - 1 - part;
8b4a4127 2260}
2261
e68d6a13 2262/* Return true if IN contains a contiguous bitfield in the lower SIZE
e64f5133 2263 bits and no other bits are set in (the lower SIZE bits of) IN.
e68d6a13 2264
e64f5133 2265 PSTART and PEND can be used to obtain the start and end
2266 position (inclusive) of the bitfield relative to 64
2267 bits. *PSTART / *PEND gives the position of the first/last bit
2268 of the bitfield counting from the highest order bit starting
2269 with zero. */
e68d6a13 2270
2271bool
e64f5133 2272s390_contiguous_bitmask_nowrap_p (unsigned HOST_WIDE_INT in, int size,
2273 int *pstart, int *pend)
e68d6a13 2274{
e64f5133 2275 int start;
2276 int end = -1;
b422d8c0 2277 int lowbit = HOST_BITS_PER_WIDE_INT - 1;
2278 int highbit = HOST_BITS_PER_WIDE_INT - size;
2279 unsigned HOST_WIDE_INT bitmask = HOST_WIDE_INT_1U;
e64f5133 2280
2281 gcc_assert (!!pstart == !!pend);
2282 for (start = lowbit; start >= highbit; bitmask <<= 1, start--)
2283 if (end == -1)
2284 {
2285 /* Look for the rightmost bit of a contiguous range of ones. */
2286 if (bitmask & in)
2287 /* Found it. */
2288 end = start;
2289 }
2290 else
2291 {
2292 /* Look for the firt zero bit after the range of ones. */
2293 if (! (bitmask & in))
2294 /* Found it. */
2295 break;
2296 }
2297 /* We're one past the last one-bit. */
2298 start++;
e68d6a13 2299
e64f5133 2300 if (end == -1)
2301 /* No one bits found. */
2302 return false;
2303
2304 if (start > highbit)
e68d6a13 2305 {
e64f5133 2306 unsigned HOST_WIDE_INT mask;
2307
2308 /* Calculate a mask for all bits beyond the contiguous bits. */
b422d8c0 2309 mask = ((~HOST_WIDE_INT_0U >> highbit)
2310 & (~HOST_WIDE_INT_0U << (lowbit - start + 1)));
e64f5133 2311 if (mask & in)
2312 /* There are more bits set beyond the first range of one bits. */
2313 return false;
e68d6a13 2314 }
2315
e64f5133 2316 if (pstart)
2317 {
2318 *pstart = start;
2319 *pend = end;
2320 }
e68d6a13 2321
e64f5133 2322 return true;
2323}
e68d6a13 2324
e64f5133 2325/* Same as s390_contiguous_bitmask_nowrap_p but also returns true
2326 if ~IN contains a contiguous bitfield. In that case, *END is <
2327 *START.
76a4c804 2328
e64f5133 2329 If WRAP_P is true, a bitmask that wraps around is also tested.
2330 When a wraparoud occurs *START is greater than *END (in
2331 non-null pointers), and the uppermost (64 - SIZE) bits are thus
2332 part of the range. If WRAP_P is false, no wraparound is
2333 tested. */
e68d6a13 2334
e64f5133 2335bool
2336s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, bool wrap_p,
2337 int size, int *start, int *end)
2338{
b422d8c0 2339 int bs = HOST_BITS_PER_WIDE_INT;
e64f5133 2340 bool b;
2341
2342 gcc_assert (!!start == !!end);
b422d8c0 2343 if ((in & ((~HOST_WIDE_INT_0U) >> (bs - size))) == 0)
e64f5133 2344 /* This cannot be expressed as a contiguous bitmask. Exit early because
2345 the second call of s390_contiguous_bitmask_nowrap_p would accept this as
2346 a valid bitmask. */
e68d6a13 2347 return false;
e64f5133 2348 b = s390_contiguous_bitmask_nowrap_p (in, size, start, end);
2349 if (b)
2350 return true;
2351 if (! wrap_p)
2352 return false;
2353 b = s390_contiguous_bitmask_nowrap_p (~in, size, start, end);
2354 if (b && start)
2355 {
2356 int s = *start;
2357 int e = *end;
e68d6a13 2358
e64f5133 2359 gcc_assert (s >= 1);
2360 *start = ((e + 1) & (bs - 1));
2361 *end = ((s - 1 + bs) & (bs - 1));
2362 }
e68d6a13 2363
e64f5133 2364 return b;
e68d6a13 2365}
2366
76a4c804 2367/* Return true if OP contains the same contiguous bitfield in *all*
2368 its elements. START and END can be used to obtain the start and
2369 end position of the bitfield.
2370
2371 START/STOP give the position of the first/last bit of the bitfield
2372 counting from the lowest order bit starting with zero. In order to
2373 use these values for S/390 instructions this has to be converted to
2374 "bits big endian" style. */
2375
2376bool
2377s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2378{
2379 unsigned HOST_WIDE_INT mask;
e64f5133 2380 int size;
62fdb8e4 2381 rtx elt;
e64f5133 2382 bool b;
76a4c804 2383
e64f5133 2384 gcc_assert (!!start == !!end);
62fdb8e4 2385 if (!const_vec_duplicate_p (op, &elt)
2386 || !CONST_INT_P (elt))
76a4c804 2387 return false;
2388
76a4c804 2389 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
f81e57c4 2390
2391 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2392 if (size > 64)
2393 return false;
2394
62fdb8e4 2395 mask = UINTVAL (elt);
e64f5133 2396
2397 b = s390_contiguous_bitmask_p (mask, true, size, start, end);
2398 if (b)
76a4c804 2399 {
e64f5133 2400 if (start)
2401 {
b422d8c0 2402 *start -= (HOST_BITS_PER_WIDE_INT - size);
2403 *end -= (HOST_BITS_PER_WIDE_INT - size);
e64f5133 2404 }
76a4c804 2405 return true;
2406 }
e64f5133 2407 else
2408 return false;
76a4c804 2409}
2410
2411/* Return true if C consists only of byte chunks being either 0 or
2412 0xff. If MASK is !=NULL a byte mask is generated which is
2413 appropriate for the vector generate byte mask instruction. */
2414
2415bool
2416s390_bytemask_vector_p (rtx op, unsigned *mask)
2417{
2418 int i;
2419 unsigned tmp_mask = 0;
2420 int nunit, unit_size;
2421
2422 if (!VECTOR_MODE_P (GET_MODE (op))
2423 || GET_CODE (op) != CONST_VECTOR
2424 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2425 return false;
2426
2427 nunit = GET_MODE_NUNITS (GET_MODE (op));
2428 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2429
2430 for (i = 0; i < nunit; i++)
2431 {
2432 unsigned HOST_WIDE_INT c;
2433 int j;
2434
2435 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2436 return false;
2437
2438 c = UINTVAL (XVECEXP (op, 0, i));
2439 for (j = 0; j < unit_size; j++)
2440 {
2441 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2442 return false;
2443 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2444 c = c >> BITS_PER_UNIT;
2445 }
2446 }
2447
2448 if (mask != NULL)
2449 *mask = tmp_mask;
2450
2451 return true;
2452}
2453
6bc28655 2454/* Check whether a rotate of ROTL followed by an AND of CONTIG is
2455 equivalent to a shift followed by the AND. In particular, CONTIG
2456 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2457 for ROTL indicate a rotate to the right. */
2458
2459bool
2460s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2461{
e64f5133 2462 int start, end;
6bc28655 2463 bool ok;
2464
e64f5133 2465 ok = s390_contiguous_bitmask_nowrap_p (contig, bitsize, &start, &end);
6bc28655 2466 gcc_assert (ok);
2467
e64f5133 2468 if (rotl >= 0)
2469 return (64 - end >= rotl);
2470 else
2471 {
2472 /* Translate "- rotate right" in BITSIZE mode to "rotate left" in
2473 DIMode. */
2474 rotl = -rotl + (64 - bitsize);
2475 return (start >= rotl);
2476 }
6bc28655 2477}
2478
f81e845f 2479/* Check whether we can (and want to) split a double-word
2480 move in mode MODE from SRC to DST into two single-word
66795431 2481 moves, moving the subword FIRST_SUBWORD first. */
2482
2483bool
3754d046 2484s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
66795431 2485{
76a4c804 2486 /* Floating point and vector registers cannot be split. */
2487 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
66795431 2488 return false;
2489
66795431 2490 /* Non-offsettable memory references cannot be split. */
2491 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2492 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2493 return false;
2494
2495 /* Moving the first subword must not clobber a register
2496 needed to move the second subword. */
2497 if (register_operand (dst, mode))
2498 {
2499 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2500 if (reg_overlap_mentioned_p (subreg, src))
2501 return false;
2502 }
2503
2504 return true;
2505}
2506
74bdf297 2507/* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2508 and [MEM2, MEM2 + SIZE] do overlap and false
2509 otherwise. */
2510
2511bool
2512s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2513{
2514 rtx addr1, addr2, addr_delta;
2515 HOST_WIDE_INT delta;
2516
2517 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2518 return true;
2519
2520 if (size == 0)
2521 return false;
2522
2523 addr1 = XEXP (mem1, 0);
2524 addr2 = XEXP (mem2, 0);
2525
2526 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2527
2528 /* This overlapping check is used by peepholes merging memory block operations.
2529 Overlapping operations would otherwise be recognized by the S/390 hardware
ffead1ca 2530 and would fall back to a slower implementation. Allowing overlapping
74bdf297 2531 operations would lead to slow code but not to wrong code. Therefore we are
ffead1ca 2532 somewhat optimistic if we cannot prove that the memory blocks are
74bdf297 2533 overlapping.
2534 That's why we return false here although this may accept operations on
2535 overlapping memory areas. */
2536 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2537 return false;
2538
2539 delta = INTVAL (addr_delta);
2540
2541 if (delta == 0
2542 || (delta > 0 && delta < size)
2543 || (delta < 0 && -delta < size))
2544 return true;
2545
2546 return false;
2547}
2548
9dffd3ff 2549/* Check whether the address of memory reference MEM2 equals exactly
2550 the address of memory reference MEM1 plus DELTA. Return true if
2551 we can prove this to be the case, false otherwise. */
2552
2553bool
2554s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2555{
2556 rtx addr1, addr2, addr_delta;
2557
2558 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2559 return false;
2560
2561 addr1 = XEXP (mem1, 0);
2562 addr2 = XEXP (mem2, 0);
2563
2564 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2565 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2566 return false;
2567
2568 return true;
2569}
2570
3e247a31 2571/* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2572
2573void
3754d046 2574s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
3e247a31 2575 rtx *operands)
2576{
3754d046 2577 machine_mode wmode = mode;
3e247a31 2578 rtx dst = operands[0];
2579 rtx src1 = operands[1];
2580 rtx src2 = operands[2];
2581 rtx op, clob, tem;
2582
2583 /* If we cannot handle the operation directly, use a temp register. */
2584 if (!s390_logical_operator_ok_p (operands))
2585 dst = gen_reg_rtx (mode);
2586
2587 /* QImode and HImode patterns make sense only if we have a destination
2588 in memory. Otherwise perform the operation in SImode. */
2589 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2590 wmode = SImode;
2591
2592 /* Widen operands if required. */
2593 if (mode != wmode)
2594 {
2595 if (GET_CODE (dst) == SUBREG
2596 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2597 dst = tem;
2598 else if (REG_P (dst))
2599 dst = gen_rtx_SUBREG (wmode, dst, 0);
2600 else
2601 dst = gen_reg_rtx (wmode);
2602
2603 if (GET_CODE (src1) == SUBREG
2604 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2605 src1 = tem;
2606 else if (GET_MODE (src1) != VOIDmode)
2607 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2608
2609 if (GET_CODE (src2) == SUBREG
2610 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2611 src2 = tem;
2612 else if (GET_MODE (src2) != VOIDmode)
2613 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2614 }
2615
2616 /* Emit the instruction. */
d1f9b275 2617 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
3e247a31 2618 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2619 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2620
2621 /* Fix up the destination if needed. */
2622 if (dst != operands[0])
2623 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2624}
2625
2626/* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2627
2628bool
2629s390_logical_operator_ok_p (rtx *operands)
2630{
2631 /* If the destination operand is in memory, it needs to coincide
2632 with one of the source operands. After reload, it has to be
2633 the first source operand. */
2634 if (GET_CODE (operands[0]) == MEM)
2635 return rtx_equal_p (operands[0], operands[1])
2636 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2637
2638 return true;
2639}
2640
3f56e755 2641/* Narrow logical operation CODE of memory operand MEMOP with immediate
2642 operand IMMOP to switch from SS to SI type instructions. */
2643
2644void
2645s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2646{
2647 int def = code == AND ? -1 : 0;
2648 HOST_WIDE_INT mask;
2649 int part;
2650
2651 gcc_assert (GET_CODE (*memop) == MEM);
2652 gcc_assert (!MEM_VOLATILE_P (*memop));
2653
2654 mask = s390_extract_part (*immop, QImode, def);
2655 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2656 gcc_assert (part >= 0);
2657
2658 *memop = adjust_address (*memop, QImode, part);
2659 *immop = gen_int_mode (mask, QImode);
2660}
2661
2eb8fe23 2662
875862bf 2663/* How to allocate a 'struct machine_function'. */
2664
2665static struct machine_function *
2666s390_init_machine_status (void)
2667{
25a27413 2668 return ggc_cleared_alloc<machine_function> ();
875862bf 2669}
2670
4673c1a0 2671/* Map for smallest class containing reg regno. */
2672
c8834c5f 2673const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
76a4c804 2674{ GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2675 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2676 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2677 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2678 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2679 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2680 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2681 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2682 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2683 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2684 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2685 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2686 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2687 VEC_REGS, VEC_REGS /* 52 */
4673c1a0 2688};
2689
71343e6b 2690/* Return attribute type of insn. */
2691
2692static enum attr_type
ed3e6e5d 2693s390_safe_attr_type (rtx_insn *insn)
71343e6b 2694{
2695 if (recog_memoized (insn) >= 0)
2696 return get_attr_type (insn);
2697 else
2698 return TYPE_NONE;
2699}
4673c1a0 2700
51aa1e9c 2701/* Return true if DISP is a valid short displacement. */
2702
e5537457 2703static bool
b40da9a7 2704s390_short_displacement (rtx disp)
51aa1e9c 2705{
2706 /* No displacement is OK. */
2707 if (!disp)
e5537457 2708 return true;
51aa1e9c 2709
a7b49046 2710 /* Without the long displacement facility we don't need to
2711 distingiush between long and short displacement. */
2712 if (!TARGET_LONG_DISPLACEMENT)
2713 return true;
2714
51aa1e9c 2715 /* Integer displacement in range. */
2716 if (GET_CODE (disp) == CONST_INT)
2717 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2718
2719 /* GOT offset is not OK, the GOT can be large. */
2720 if (GET_CODE (disp) == CONST
2721 && GET_CODE (XEXP (disp, 0)) == UNSPEC
a6e4e903 2722 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2723 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
e5537457 2724 return false;
51aa1e9c 2725
2726 /* All other symbolic constants are literal pool references,
2727 which are OK as the literal pool must be small. */
2728 if (GET_CODE (disp) == CONST)
e5537457 2729 return true;
51aa1e9c 2730
e5537457 2731 return false;
51aa1e9c 2732}
2733
875862bf 2734/* Decompose a RTL expression ADDR for a memory address into
2735 its components, returned in OUT.
a5004c3d 2736
e5537457 2737 Returns false if ADDR is not a valid memory address, true
875862bf 2738 otherwise. If OUT is NULL, don't return the components,
2739 but check for validity only.
a5004c3d 2740
875862bf 2741 Note: Only addresses in canonical form are recognized.
2742 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2743 canonical form so that they will be recognized. */
64a1078f 2744
875862bf 2745static int
edd89d66 2746s390_decompose_address (rtx addr, struct s390_address *out)
875862bf 2747{
2748 HOST_WIDE_INT offset = 0;
2749 rtx base = NULL_RTX;
2750 rtx indx = NULL_RTX;
2751 rtx disp = NULL_RTX;
2752 rtx orig_disp;
e5537457 2753 bool pointer = false;
2754 bool base_ptr = false;
2755 bool indx_ptr = false;
05b58257 2756 bool literal_pool = false;
2757
2758 /* We may need to substitute the literal pool base register into the address
2759 below. However, at this point we do not know which register is going to
2760 be used as base, so we substitute the arg pointer register. This is going
2761 to be treated as holding a pointer below -- it shouldn't be used for any
2762 other purpose. */
2763 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
3f56e755 2764
875862bf 2765 /* Decompose address into base + index + displacement. */
3f56e755 2766
875862bf 2767 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2768 base = addr;
3f56e755 2769
875862bf 2770 else if (GET_CODE (addr) == PLUS)
6b1c8423 2771 {
875862bf 2772 rtx op0 = XEXP (addr, 0);
2773 rtx op1 = XEXP (addr, 1);
2774 enum rtx_code code0 = GET_CODE (op0);
2775 enum rtx_code code1 = GET_CODE (op1);
6b1c8423 2776
875862bf 2777 if (code0 == REG || code0 == UNSPEC)
2778 {
2779 if (code1 == REG || code1 == UNSPEC)
2780 {
2781 indx = op0; /* index + base */
2782 base = op1;
2783 }
6b1c8423 2784
875862bf 2785 else
2786 {
2787 base = op0; /* base + displacement */
2788 disp = op1;
2789 }
2790 }
a5004c3d 2791
875862bf 2792 else if (code0 == PLUS)
51aa1e9c 2793 {
875862bf 2794 indx = XEXP (op0, 0); /* index + base + disp */
2795 base = XEXP (op0, 1);
2796 disp = op1;
51aa1e9c 2797 }
51aa1e9c 2798
875862bf 2799 else
51aa1e9c 2800 {
e5537457 2801 return false;
51aa1e9c 2802 }
875862bf 2803 }
51aa1e9c 2804
875862bf 2805 else
2806 disp = addr; /* displacement */
51aa1e9c 2807
875862bf 2808 /* Extract integer part of displacement. */
2809 orig_disp = disp;
2810 if (disp)
2811 {
2812 if (GET_CODE (disp) == CONST_INT)
51aa1e9c 2813 {
875862bf 2814 offset = INTVAL (disp);
2815 disp = NULL_RTX;
51aa1e9c 2816 }
875862bf 2817 else if (GET_CODE (disp) == CONST
2818 && GET_CODE (XEXP (disp, 0)) == PLUS
2819 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2820 {
2821 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2822 disp = XEXP (XEXP (disp, 0), 0);
2823 }
2824 }
51aa1e9c 2825
875862bf 2826 /* Strip off CONST here to avoid special case tests later. */
2827 if (disp && GET_CODE (disp) == CONST)
2828 disp = XEXP (disp, 0);
63ebd742 2829
875862bf 2830 /* We can convert literal pool addresses to
2831 displacements by basing them off the base register. */
2832 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2833 {
58ad9b54 2834 if (base || indx)
2835 return false;
2836
2837 base = fake_pool_base, literal_pool = true;
875862bf 2838
2839 /* Mark up the displacement. */
2840 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2841 UNSPEC_LTREL_OFFSET);
51aa1e9c 2842 }
a5004c3d 2843
875862bf 2844 /* Validate base register. */
2845 if (base)
2846 {
2847 if (GET_CODE (base) == UNSPEC)
2848 switch (XINT (base, 1))
2849 {
2850 case UNSPEC_LTREF:
2851 if (!disp)
ffead1ca 2852 disp = gen_rtx_UNSPEC (Pmode,
875862bf 2853 gen_rtvec (1, XVECEXP (base, 0, 0)),
2854 UNSPEC_LTREL_OFFSET);
2855 else
e5537457 2856 return false;
a5004c3d 2857
05b58257 2858 base = XVECEXP (base, 0, 1);
875862bf 2859 break;
64a1078f 2860
875862bf 2861 case UNSPEC_LTREL_BASE:
05b58257 2862 if (XVECLEN (base, 0) == 1)
2863 base = fake_pool_base, literal_pool = true;
2864 else
2865 base = XVECEXP (base, 0, 1);
875862bf 2866 break;
64a1078f 2867
875862bf 2868 default:
e5537457 2869 return false;
875862bf 2870 }
64a1078f 2871
a25e52e9 2872 if (!REG_P (base) || GET_MODE (base) != Pmode)
e5537457 2873 return false;
875862bf 2874
05b58257 2875 if (REGNO (base) == STACK_POINTER_REGNUM
875862bf 2876 || REGNO (base) == FRAME_POINTER_REGNUM
2877 || ((reload_completed || reload_in_progress)
2878 && frame_pointer_needed
2879 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2880 || REGNO (base) == ARG_POINTER_REGNUM
2881 || (flag_pic
2882 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
e5537457 2883 pointer = base_ptr = true;
05b58257 2884
2885 if ((reload_completed || reload_in_progress)
2886 && base == cfun->machine->base_reg)
2887 pointer = base_ptr = literal_pool = true;
875862bf 2888 }
2889
2890 /* Validate index register. */
2891 if (indx)
64a1078f 2892 {
875862bf 2893 if (GET_CODE (indx) == UNSPEC)
2894 switch (XINT (indx, 1))
2895 {
2896 case UNSPEC_LTREF:
2897 if (!disp)
ffead1ca 2898 disp = gen_rtx_UNSPEC (Pmode,
875862bf 2899 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2900 UNSPEC_LTREL_OFFSET);
2901 else
e5537457 2902 return false;
64a1078f 2903
05b58257 2904 indx = XVECEXP (indx, 0, 1);
875862bf 2905 break;
64a1078f 2906
875862bf 2907 case UNSPEC_LTREL_BASE:
05b58257 2908 if (XVECLEN (indx, 0) == 1)
2909 indx = fake_pool_base, literal_pool = true;
2910 else
2911 indx = XVECEXP (indx, 0, 1);
875862bf 2912 break;
64a1078f 2913
875862bf 2914 default:
e5537457 2915 return false;
875862bf 2916 }
64a1078f 2917
a25e52e9 2918 if (!REG_P (indx) || GET_MODE (indx) != Pmode)
e5537457 2919 return false;
64a1078f 2920
05b58257 2921 if (REGNO (indx) == STACK_POINTER_REGNUM
875862bf 2922 || REGNO (indx) == FRAME_POINTER_REGNUM
2923 || ((reload_completed || reload_in_progress)
2924 && frame_pointer_needed
2925 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2926 || REGNO (indx) == ARG_POINTER_REGNUM
2927 || (flag_pic
2928 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
e5537457 2929 pointer = indx_ptr = true;
05b58257 2930
2931 if ((reload_completed || reload_in_progress)
2932 && indx == cfun->machine->base_reg)
2933 pointer = indx_ptr = literal_pool = true;
875862bf 2934 }
f588eb9f 2935
875862bf 2936 /* Prefer to use pointer as base, not index. */
2937 if (base && indx && !base_ptr
2938 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2939 {
2940 rtx tmp = base;
2941 base = indx;
2942 indx = tmp;
2943 }
64a1078f 2944
875862bf 2945 /* Validate displacement. */
2946 if (!disp)
2947 {
ffead1ca 2948 /* If virtual registers are involved, the displacement will change later
2949 anyway as the virtual registers get eliminated. This could make a
2950 valid displacement invalid, but it is more likely to make an invalid
2951 displacement valid, because we sometimes access the register save area
119114cb 2952 via negative offsets to one of those registers.
875862bf 2953 Thus we don't check the displacement for validity here. If after
2954 elimination the displacement turns out to be invalid after all,
2955 this is fixed up by reload in any case. */
7b1bda1c 2956 /* LRA maintains always displacements up to date and we need to
2957 know the displacement is right during all LRA not only at the
2958 final elimination. */
2959 if (lra_in_progress
2960 || (base != arg_pointer_rtx
2961 && indx != arg_pointer_rtx
2962 && base != return_address_pointer_rtx
2963 && indx != return_address_pointer_rtx
2964 && base != frame_pointer_rtx
2965 && indx != frame_pointer_rtx
2966 && base != virtual_stack_vars_rtx
2967 && indx != virtual_stack_vars_rtx))
875862bf 2968 if (!DISP_IN_RANGE (offset))
e5537457 2969 return false;
875862bf 2970 }
2971 else
2972 {
2973 /* All the special cases are pointers. */
e5537457 2974 pointer = true;
64a1078f 2975
875862bf 2976 /* In the small-PIC case, the linker converts @GOT
2977 and @GOTNTPOFF offsets to possible displacements. */
2978 if (GET_CODE (disp) == UNSPEC
2979 && (XINT (disp, 1) == UNSPEC_GOT
2980 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
875862bf 2981 && flag_pic == 1)
2982 {
2983 ;
2984 }
64a1078f 2985
1ed7a160 2986 /* Accept pool label offsets. */
2987 else if (GET_CODE (disp) == UNSPEC
2988 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2989 ;
64a1078f 2990
875862bf 2991 /* Accept literal pool references. */
2992 else if (GET_CODE (disp) == UNSPEC
2993 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2994 {
cf8ffe7d 2995 /* In case CSE pulled a non literal pool reference out of
2996 the pool we have to reject the address. This is
2997 especially important when loading the GOT pointer on non
2998 zarch CPUs. In this case the literal pool contains an lt
2999 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
3000 will most likely exceed the displacement. */
3001 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
3002 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
3003 return false;
3004
875862bf 3005 orig_disp = gen_rtx_CONST (Pmode, disp);
3006 if (offset)
3007 {
3008 /* If we have an offset, make sure it does not
3009 exceed the size of the constant pool entry. */
3010 rtx sym = XVECEXP (disp, 0, 0);
3011 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
e5537457 3012 return false;
64a1078f 3013
29c05e22 3014 orig_disp = plus_constant (Pmode, orig_disp, offset);
875862bf 3015 }
3016 }
3017
3018 else
e5537457 3019 return false;
64a1078f 3020 }
3021
875862bf 3022 if (!base && !indx)
e5537457 3023 pointer = true;
875862bf 3024
3025 if (out)
3026 {
3027 out->base = base;
3028 out->indx = indx;
3029 out->disp = orig_disp;
3030 out->pointer = pointer;
05b58257 3031 out->literal_pool = literal_pool;
875862bf 3032 }
3033
e5537457 3034 return true;
64a1078f 3035}
3036
2be7449b 3037/* Decompose a RTL expression OP for an address style operand into its
3038 components, and return the base register in BASE and the offset in
3039 OFFSET. While OP looks like an address it is never supposed to be
3040 used as such.
6d6be381 3041
2be7449b 3042 Return true if OP is a valid address operand, false if not. */
6d6be381 3043
3044bool
2be7449b 3045s390_decompose_addrstyle_without_index (rtx op, rtx *base,
3046 HOST_WIDE_INT *offset)
6d6be381 3047{
6191f2a0 3048 rtx off = NULL_RTX;
6d6be381 3049
6d6be381 3050 /* We can have an integer constant, an address register,
3051 or a sum of the two. */
6191f2a0 3052 if (CONST_SCALAR_INT_P (op))
6d6be381 3053 {
6191f2a0 3054 off = op;
6d6be381 3055 op = NULL_RTX;
3056 }
6191f2a0 3057 if (op && GET_CODE (op) == PLUS && CONST_SCALAR_INT_P (XEXP (op, 1)))
6d6be381 3058 {
6191f2a0 3059 off = XEXP (op, 1);
6d6be381 3060 op = XEXP (op, 0);
3061 }
3062 while (op && GET_CODE (op) == SUBREG)
3063 op = SUBREG_REG (op);
3064
3065 if (op && GET_CODE (op) != REG)
3066 return false;
3067
3068 if (offset)
6191f2a0 3069 {
3070 if (off == NULL_RTX)
3071 *offset = 0;
3072 else if (CONST_INT_P (off))
3073 *offset = INTVAL (off);
3074 else if (CONST_WIDE_INT_P (off))
3075 /* The offset will anyway be cut down to 12 bits so take just
3076 the lowest order chunk of the wide int. */
3077 *offset = CONST_WIDE_INT_ELT (off, 0);
3078 else
3079 gcc_unreachable ();
3080 }
6d6be381 3081 if (base)
3082 *base = op;
3083
3084 return true;
3085}
3086
3087
875862bf 3088/* Return true if CODE is a valid address without index. */
fab7adbf 3089
875862bf 3090bool
3091s390_legitimate_address_without_index_p (rtx op)
3092{
3093 struct s390_address addr;
3094
3095 if (!s390_decompose_address (XEXP (op, 0), &addr))
3096 return false;
3097 if (addr.indx)
3098 return false;
3099
3100 return true;
3101}
3102
59bc01b3 3103
2a672556 3104/* Return TRUE if ADDR is an operand valid for a load/store relative
3105 instruction. Be aware that the alignment of the operand needs to
3106 be checked separately.
3107 Valid addresses are single references or a sum of a reference and a
3108 constant integer. Return these parts in SYMREF and ADDEND. You can
3109 pass NULL in REF and/or ADDEND if you are not interested in these
3110 values. Literal pool references are *not* considered symbol
3111 references. */
875862bf 3112
a7b49046 3113static bool
2a672556 3114s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
875862bf 3115{
a7b49046 3116 HOST_WIDE_INT tmpaddend = 0;
875862bf 3117
a7b49046 3118 if (GET_CODE (addr) == CONST)
3119 addr = XEXP (addr, 0);
3120
3121 if (GET_CODE (addr) == PLUS)
875862bf 3122 {
2a672556 3123 if (!CONST_INT_P (XEXP (addr, 1)))
a7b49046 3124 return false;
875862bf 3125
2a672556 3126 tmpaddend = INTVAL (XEXP (addr, 1));
3127 addr = XEXP (addr, 0);
3128 }
62cb5855 3129
2a672556 3130 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3131 || (GET_CODE (addr) == UNSPEC
3132 && (XINT (addr, 1) == UNSPEC_GOTENT
3133 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3134 {
3135 if (symref)
3136 *symref = addr;
3137 if (addend)
3138 *addend = tmpaddend;
62cb5855 3139
2a672556 3140 return true;
3141 }
3142 return false;
62cb5855 3143}
a7b49046 3144
3145/* Return true if the address in OP is valid for constraint letter C
3146 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3147 pool MEMs should be accepted. Only the Q, R, S, T constraint
3148 letters are allowed for C. */
875862bf 3149
a7b49046 3150static int
3151s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3152{
3153 struct s390_address addr;
3154 bool decomposed = false;
3155
ff03121f 3156 if (!address_operand (op, GET_MODE (op)))
3157 return 0;
3158
a7b49046 3159 /* This check makes sure that no symbolic address (except literal
3160 pool references) are accepted by the R or T constraints. */
2a672556 3161 if (s390_loadrelative_operand_p (op, NULL, NULL))
f3959569 3162 return 0;
3163
3164 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3165 if (!lit_pool_ok)
875862bf 3166 {
a7b49046 3167 if (!s390_decompose_address (op, &addr))
875862bf 3168 return 0;
f3959569 3169 if (addr.literal_pool)
875862bf 3170 return 0;
a7b49046 3171 decomposed = true;
875862bf 3172 }
3173
7396c35d 3174 /* With reload, we sometimes get intermediate address forms that are
3175 actually invalid as-is, but we need to accept them in the most
3176 generic cases below ('R' or 'T'), since reload will in fact fix
3177 them up. LRA behaves differently here; we never see such forms,
3178 but on the other hand, we need to strictly reject every invalid
3179 address form. Perform this check right up front. */
3180 if (lra_in_progress)
3181 {
3182 if (!decomposed && !s390_decompose_address (op, &addr))
3183 return 0;
3184 decomposed = true;
3185 }
3186
875862bf 3187 switch (c)
3188 {
a7b49046 3189 case 'Q': /* no index short displacement */
3190 if (!decomposed && !s390_decompose_address (op, &addr))
875862bf 3191 return 0;
3192 if (addr.indx)
3193 return 0;
a7b49046 3194 if (!s390_short_displacement (addr.disp))
875862bf 3195 return 0;
a7b49046 3196 break;
875862bf 3197
a7b49046 3198 case 'R': /* with index short displacement */
875862bf 3199 if (TARGET_LONG_DISPLACEMENT)
3200 {
a7b49046 3201 if (!decomposed && !s390_decompose_address (op, &addr))
875862bf 3202 return 0;
3203 if (!s390_short_displacement (addr.disp))
3204 return 0;
3205 }
a7b49046 3206 /* Any invalid address here will be fixed up by reload,
3207 so accept it for the most generic constraint. */
875862bf 3208 break;
3209
a7b49046 3210 case 'S': /* no index long displacement */
a7b49046 3211 if (!decomposed && !s390_decompose_address (op, &addr))
875862bf 3212 return 0;
3213 if (addr.indx)
3214 return 0;
875862bf 3215 break;
3216
a7b49046 3217 case 'T': /* with index long displacement */
a7b49046 3218 /* Any invalid address here will be fixed up by reload,
3219 so accept it for the most generic constraint. */
875862bf 3220 break;
7396c35d 3221
a7b49046 3222 default:
3223 return 0;
3224 }
3225 return 1;
3226}
875862bf 3227
875862bf 3228
a7b49046 3229/* Evaluates constraint strings described by the regular expression
7396c35d 3230 ([A|B|Z](Q|R|S|T))|Y and returns 1 if OP is a valid operand for
a7b49046 3231 the constraint given in STR, or 0 else. */
3232
3233int
3234s390_mem_constraint (const char *str, rtx op)
3235{
3236 char c = str[0];
3237
3238 switch (c)
3239 {
3240 case 'A':
3241 /* Check for offsettable variants of memory constraints. */
3242 if (!MEM_P (op) || MEM_VOLATILE_P (op))
875862bf 3243 return 0;
a7b49046 3244 if ((reload_completed || reload_in_progress)
3245 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
e68d6a13 3246 return 0;
a7b49046 3247 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3248 case 'B':
3249 /* Check for non-literal-pool variants of memory constraints. */
3250 if (!MEM_P (op))
875862bf 3251 return 0;
a7b49046 3252 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3253 case 'Q':
3254 case 'R':
3255 case 'S':
3256 case 'T':
3257 if (GET_CODE (op) != MEM)
3258 return 0;
3259 return s390_check_qrst_address (c, XEXP (op, 0), true);
875862bf 3260 case 'Y':
6d6be381 3261 /* Simply check for the basic form of a shift count. Reload will
3262 take care of making sure we have a proper base register. */
2be7449b 3263 if (!s390_decompose_addrstyle_without_index (op, NULL, NULL))
6d6be381 3264 return 0;
3265 break;
a7b49046 3266 case 'Z':
3267 return s390_check_qrst_address (str[1], op, true);
875862bf 3268 default:
3269 return 0;
3270 }
875862bf 3271 return 1;
3272}
3273
59bc01b3 3274
59bc01b3 3275/* Evaluates constraint strings starting with letter O. Input
3276 parameter C is the second letter following the "O" in the constraint
3277 string. Returns 1 if VALUE meets the respective constraint and 0
3278 otherwise. */
875862bf 3279
e863b008 3280int
59bc01b3 3281s390_O_constraint_str (const char c, HOST_WIDE_INT value)
e863b008 3282{
59bc01b3 3283 if (!TARGET_EXTIMM)
3284 return 0;
e863b008 3285
59bc01b3 3286 switch (c)
e863b008 3287 {
59bc01b3 3288 case 's':
3289 return trunc_int_for_mode (value, SImode) == value;
3290
3291 case 'p':
3292 return value == 0
3293 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3294
3295 case 'n':
29847ec4 3296 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
59bc01b3 3297
e863b008 3298 default:
59bc01b3 3299 gcc_unreachable ();
e863b008 3300 }
3301}
3302
59bc01b3 3303
3304/* Evaluates constraint strings starting with letter N. Parameter STR
3305 contains the letters following letter "N" in the constraint string.
3306 Returns true if VALUE matches the constraint. */
e863b008 3307
875862bf 3308int
59bc01b3 3309s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
875862bf 3310{
3754d046 3311 machine_mode mode, part_mode;
875862bf 3312 int def;
3313 int part, part_goal;
3314
875862bf 3315
59bc01b3 3316 if (str[0] == 'x')
3317 part_goal = -1;
3318 else
3319 part_goal = str[0] - '0';
875862bf 3320
59bc01b3 3321 switch (str[1])
3322 {
3323 case 'Q':
3324 part_mode = QImode;
875862bf 3325 break;
59bc01b3 3326 case 'H':
3327 part_mode = HImode;
163277cf 3328 break;
59bc01b3 3329 case 'S':
3330 part_mode = SImode;
3331 break;
3332 default:
3333 return 0;
3334 }
163277cf 3335
59bc01b3 3336 switch (str[2])
3337 {
3338 case 'H':
3339 mode = HImode;
3340 break;
3341 case 'S':
3342 mode = SImode;
3343 break;
3344 case 'D':
3345 mode = DImode;
3346 break;
3347 default:
3348 return 0;
3349 }
53239c89 3350
59bc01b3 3351 switch (str[3])
3352 {
3353 case '0':
3354 def = 0;
3355 break;
3356 case 'F':
3357 def = -1;
3358 break;
875862bf 3359 default:
3360 return 0;
3361 }
3362
59bc01b3 3363 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3364 return 0;
3365
3366 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3367 if (part < 0)
3368 return 0;
3369 if (part_goal != -1 && part_goal != part)
3370 return 0;
3371
875862bf 3372 return 1;
3373}
3374
59bc01b3 3375
3376/* Returns true if the input parameter VALUE is a float zero. */
3377
3378int
3379s390_float_const_zero_p (rtx value)
3380{
3381 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3382 && value == CONST0_RTX (GET_MODE (value)));
3383}
3384
fa7a995b 3385/* Implement TARGET_REGISTER_MOVE_COST. */
3386
3387static int
5fe5762e 3388s390_register_move_cost (machine_mode mode,
fa7a995b 3389 reg_class_t from, reg_class_t to)
3390{
5fe5762e 3391 /* On s390, copy between fprs and gprs is expensive. */
3392
3393 /* It becomes somewhat faster having ldgr/lgdr. */
3394 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3395 {
3396 /* ldgr is single cycle. */
3397 if (reg_classes_intersect_p (from, GENERAL_REGS)
3398 && reg_classes_intersect_p (to, FP_REGS))
3399 return 1;
3400 /* lgdr needs 3 cycles. */
3401 if (reg_classes_intersect_p (to, GENERAL_REGS)
3402 && reg_classes_intersect_p (from, FP_REGS))
3403 return 3;
3404 }
3405
3406 /* Otherwise copying is done via memory. */
3407 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3408 && reg_classes_intersect_p (to, FP_REGS))
3409 || (reg_classes_intersect_p (from, FP_REGS)
3410 && reg_classes_intersect_p (to, GENERAL_REGS)))
fa7a995b 3411 return 10;
3412
3413 return 1;
3414}
3415
3416/* Implement TARGET_MEMORY_MOVE_COST. */
3417
3418static int
3754d046 3419s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
fa7a995b 3420 reg_class_t rclass ATTRIBUTE_UNUSED,
3421 bool in ATTRIBUTE_UNUSED)
3422{
9a071c9f 3423 return 2;
fa7a995b 3424}
59bc01b3 3425
875862bf 3426/* Compute a (partial) cost for rtx X. Return true if the complete
3427 cost has been computed, and false if subexpressions should be
016d030e 3428 scanned. In either case, *TOTAL contains the cost result. The
3429 initial value of *TOTAL is the default value computed by
3430 rtx_cost. It may be left unmodified. OUTER_CODE contains the
3431 code of the superexpression of x. */
875862bf 3432
3433static bool
5ae4887d 3434s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3435 int opno ATTRIBUTE_UNUSED,
20d892d1 3436 int *total, bool speed ATTRIBUTE_UNUSED)
fab7adbf 3437{
5ae4887d 3438 int code = GET_CODE (x);
fab7adbf 3439 switch (code)
3440 {
3441 case CONST:
fab7adbf 3442 case CONST_INT:
fab7adbf 3443 case LABEL_REF:
3444 case SYMBOL_REF:
3445 case CONST_DOUBLE:
ba0e61d6 3446 case CONST_WIDE_INT:
3f074425 3447 case MEM:
fab7adbf 3448 *total = 0;
3449 return true;
3450
c70e1aad 3451 case SET:
3452 {
3453 /* Without this a conditional move instruction would be
3454 accounted as 3 * COSTS_N_INSNS (set, if_then_else,
3455 comparison operator). That's a bit pessimistic. */
3456
3457 if (!TARGET_Z196 || GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
3458 return false;
3459
3460 rtx cond = XEXP (SET_SRC (x), 0);
3461
3462 if (!CC_REG_P (XEXP (cond, 0)) || !CONST_INT_P (XEXP (cond, 1)))
3463 return false;
3464
3465 /* It is going to be a load/store on condition. Make it
3466 slightly more expensive than a normal load. */
3467 *total = COSTS_N_INSNS (1) + 1;
3468
3469 rtx dst = SET_DEST (x);
3470 rtx then = XEXP (SET_SRC (x), 1);
3471 rtx els = XEXP (SET_SRC (x), 2);
3472
3473 /* It is a real IF-THEN-ELSE. An additional move will be
3474 needed to implement that. */
3475 if (reload_completed
3476 && !rtx_equal_p (dst, then)
3477 && !rtx_equal_p (dst, els))
3478 *total += COSTS_N_INSNS (1) / 2;
3479
3480 /* A minor penalty for constants we cannot directly handle. */
3481 if ((CONST_INT_P (then) || CONST_INT_P (els))
3482 && (!TARGET_Z13 || MEM_P (dst)
3483 || (CONST_INT_P (then) && !satisfies_constraint_K (then))
3484 || (CONST_INT_P (els) && !satisfies_constraint_K (els))))
3485 *total += COSTS_N_INSNS (1) / 2;
3486
3487 /* A store on condition can only handle register src operands. */
3488 if (MEM_P (dst) && (!REG_P (then) || !REG_P (els)))
3489 *total += COSTS_N_INSNS (1) / 2;
3490
3491 return true;
3492 }
02a8efd2 3493 case IOR:
3494 /* risbg */
3495 if (GET_CODE (XEXP (x, 0)) == AND
3496 && GET_CODE (XEXP (x, 1)) == ASHIFT
3497 && REG_P (XEXP (XEXP (x, 0), 0))
3498 && REG_P (XEXP (XEXP (x, 1), 0))
3499 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3500 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3501 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
b422d8c0 3502 (HOST_WIDE_INT_1U << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
02a8efd2 3503 {
3504 *total = COSTS_N_INSNS (2);
3505 return true;
3506 }
0f57593c 3507
3508 /* ~AND on a 128 bit mode. This can be done using a vector
3509 instruction. */
3510 if (TARGET_VXE
3511 && GET_CODE (XEXP (x, 0)) == NOT
3512 && GET_CODE (XEXP (x, 1)) == NOT
3513 && REG_P (XEXP (XEXP (x, 0), 0))
3514 && REG_P (XEXP (XEXP (x, 1), 0))
3515 && GET_MODE_SIZE (GET_MODE (XEXP (XEXP (x, 0), 0))) == 16
3516 && s390_hard_regno_mode_ok (VR0_REGNUM,
3517 GET_MODE (XEXP (XEXP (x, 0), 0))))
3518 {
3519 *total = COSTS_N_INSNS (1);
3520 return true;
3521 }
0903985d 3522 /* fallthrough */
fab7adbf 3523 case ASHIFT:
3524 case ASHIFTRT:
3525 case LSHIFTRT:
18925d38 3526 case ROTATE:
3527 case ROTATERT:
fab7adbf 3528 case AND:
fab7adbf 3529 case XOR:
fab7adbf 3530 case NEG:
3531 case NOT:
3532 *total = COSTS_N_INSNS (1);
18925d38 3533 return false;
fab7adbf 3534
9cd3f3e6 3535 case PLUS:
3536 case MINUS:
9cd3f3e6 3537 *total = COSTS_N_INSNS (1);
3538 return false;
3539
ffead1ca 3540 case MULT:
5ae4887d 3541 switch (mode)
18925d38 3542 {
916ace94 3543 case E_SImode:
9cd3f3e6 3544 {
18925d38 3545 rtx left = XEXP (x, 0);
3546 rtx right = XEXP (x, 1);
3547 if (GET_CODE (right) == CONST_INT
cb888f33 3548 && CONST_OK_FOR_K (INTVAL (right)))
18925d38 3549 *total = s390_cost->mhi;
3550 else if (GET_CODE (left) == SIGN_EXTEND)
3551 *total = s390_cost->mh;
3552 else
3553 *total = s390_cost->ms; /* msr, ms, msy */
3554 break;
3555 }
916ace94 3556 case E_DImode:
18925d38 3557 {
3558 rtx left = XEXP (x, 0);
3559 rtx right = XEXP (x, 1);
b5fdc416 3560 if (TARGET_ZARCH)
18925d38 3561 {
3562 if (GET_CODE (right) == CONST_INT
cb888f33 3563 && CONST_OK_FOR_K (INTVAL (right)))
18925d38 3564 *total = s390_cost->mghi;
3565 else if (GET_CODE (left) == SIGN_EXTEND)
3566 *total = s390_cost->msgf;
3567 else
3568 *total = s390_cost->msg; /* msgr, msg */
3569 }
3570 else /* TARGET_31BIT */
3571 {
3572 if (GET_CODE (left) == SIGN_EXTEND
3573 && GET_CODE (right) == SIGN_EXTEND)
3574 /* mulsidi case: mr, m */
3575 *total = s390_cost->m;
9cd3f3e6 3576 else if (GET_CODE (left) == ZERO_EXTEND
3577 && GET_CODE (right) == ZERO_EXTEND
3578 && TARGET_CPU_ZARCH)
3579 /* umulsidi case: ml, mlr */
3580 *total = s390_cost->ml;
18925d38 3581 else
3582 /* Complex calculation is required. */
3583 *total = COSTS_N_INSNS (40);
3584 }
3585 break;
3586 }
916ace94 3587 case E_SFmode:
3588 case E_DFmode:
18925d38 3589 *total = s390_cost->mult_df;
3590 break;
916ace94 3591 case E_TFmode:
429f9fdb 3592 *total = s390_cost->mxbr;
3593 break;
18925d38 3594 default:
3595 return false;
3596 }
3597 return false;
fab7adbf 3598
81470015 3599 case FMA:
5ae4887d 3600 switch (mode)
81470015 3601 {
916ace94 3602 case E_DFmode:
81470015 3603 *total = s390_cost->madbr;
3604 break;
916ace94 3605 case E_SFmode:
81470015 3606 *total = s390_cost->maebr;
3607 break;
3608 default:
3609 return false;
3610 }
3611 /* Negate in the third argument is free: FMSUB. */
3612 if (GET_CODE (XEXP (x, 2)) == NEG)
3613 {
5ae4887d 3614 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3615 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3616 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
81470015 3617 return true;
3618 }
3619 return false;
3620
3f074425 3621 case UDIV:
3622 case UMOD:
5ae4887d 3623 if (mode == TImode) /* 128 bit division */
3f074425 3624 *total = s390_cost->dlgr;
5ae4887d 3625 else if (mode == DImode)
3f074425 3626 {
3627 rtx right = XEXP (x, 1);
3628 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3629 *total = s390_cost->dlr;
3630 else /* 64 by 64 bit division */
3631 *total = s390_cost->dlgr;
3632 }
5ae4887d 3633 else if (mode == SImode) /* 32 bit division */
3f074425 3634 *total = s390_cost->dlr;
3635 return false;
3636
fab7adbf 3637 case DIV:
3f074425 3638 case MOD:
5ae4887d 3639 if (mode == DImode)
3f074425 3640 {
3641 rtx right = XEXP (x, 1);
3642 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
b5fdc416 3643 if (TARGET_ZARCH)
3f074425 3644 *total = s390_cost->dsgfr;
3645 else
3646 *total = s390_cost->dr;
3647 else /* 64 by 64 bit division */
3648 *total = s390_cost->dsgr;
3649 }
5ae4887d 3650 else if (mode == SImode) /* 32 bit division */
3f074425 3651 *total = s390_cost->dlr;
5ae4887d 3652 else if (mode == SFmode)
260075cc 3653 {
095798e3 3654 *total = s390_cost->debr;
260075cc 3655 }
5ae4887d 3656 else if (mode == DFmode)
260075cc 3657 {
095798e3 3658 *total = s390_cost->ddbr;
260075cc 3659 }
5ae4887d 3660 else if (mode == TFmode)
429f9fdb 3661 {
095798e3 3662 *total = s390_cost->dxbr;
429f9fdb 3663 }
18925d38 3664 return false;
3665
9cd3f3e6 3666 case SQRT:
5ae4887d 3667 if (mode == SFmode)
9cd3f3e6 3668 *total = s390_cost->sqebr;
5ae4887d 3669 else if (mode == DFmode)
9cd3f3e6 3670 *total = s390_cost->sqdbr;
429f9fdb 3671 else /* TFmode */
3672 *total = s390_cost->sqxbr;
9cd3f3e6 3673 return false;
3674
18925d38 3675 case SIGN_EXTEND:
9cd3f3e6 3676 case ZERO_EXTEND:
3f074425 3677 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3678 || outer_code == PLUS || outer_code == MINUS
3679 || outer_code == COMPARE)
18925d38 3680 *total = 0;
3681 return false;
fab7adbf 3682
3f074425 3683 case COMPARE:
3684 *total = COSTS_N_INSNS (1);
3685 if (GET_CODE (XEXP (x, 0)) == AND
3686 && GET_CODE (XEXP (x, 1)) == CONST_INT
3687 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3688 {
3689 rtx op0 = XEXP (XEXP (x, 0), 0);
3690 rtx op1 = XEXP (XEXP (x, 0), 1);
3691 rtx op2 = XEXP (x, 1);
3692
3693 if (memory_operand (op0, GET_MODE (op0))
3694 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3695 return true;
3696 if (register_operand (op0, GET_MODE (op0))
3697 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3698 return true;
3699 }
3700 return false;
3701
fab7adbf 3702 default:
3703 return false;
3704 }
3705}
3706
ee9c19ee 3707/* Return the cost of an address rtx ADDR. */
3708
ec0457a8 3709static int
3754d046 3710s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
d9c5e5f4 3711 addr_space_t as ATTRIBUTE_UNUSED,
3712 bool speed ATTRIBUTE_UNUSED)
ee9c19ee 3713{
3714 struct s390_address ad;
3715 if (!s390_decompose_address (addr, &ad))
3716 return 1000;
3717
3718 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3719}
3720
292e369f 3721/* Implement targetm.vectorize.builtin_vectorization_cost. */
3722static int
3723s390_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3724 tree vectype,
3725 int misalign ATTRIBUTE_UNUSED)
3726{
3727 switch (type_of_cost)
3728 {
3729 case scalar_stmt:
3730 case scalar_load:
3731 case scalar_store:
3732 case vector_stmt:
3733 case vector_load:
3734 case vector_store:
72e995da 3735 case vector_gather_load:
3736 case vector_scatter_store:
292e369f 3737 case vec_to_scalar:
3738 case scalar_to_vec:
3739 case cond_branch_not_taken:
3740 case vec_perm:
3741 case vec_promote_demote:
3742 case unaligned_load:
3743 case unaligned_store:
3744 return 1;
3745
3746 case cond_branch_taken:
3747 return 3;
3748
3749 case vec_construct:
3750 return TYPE_VECTOR_SUBPARTS (vectype) - 1;
3751
3752 default:
3753 gcc_unreachable ();
3754 }
3755}
3756
be00aaa8 3757/* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3758 otherwise return 0. */
3759
3760int
edd89d66 3761tls_symbolic_operand (rtx op)
be00aaa8 3762{
be00aaa8 3763 if (GET_CODE (op) != SYMBOL_REF)
3764 return 0;
a3e33162 3765 return SYMBOL_REF_TLS_MODEL (op);
be00aaa8 3766}
4673c1a0 3767\f
923cf36d 3768/* Split DImode access register reference REG (on 64-bit) into its constituent
3769 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3770 gen_highpart cannot be used as they assume all registers are word-sized,
3771 while our access registers have only half that size. */
3772
3773void
3774s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3775{
3776 gcc_assert (TARGET_64BIT);
3777 gcc_assert (ACCESS_REG_P (reg));
3778 gcc_assert (GET_MODE (reg) == DImode);
3779 gcc_assert (!(REGNO (reg) & 1));
3780
3781 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3782 *hi = gen_rtx_REG (SImode, REGNO (reg));
3783}
4673c1a0 3784
56769981 3785/* Return true if OP contains a symbol reference */
4673c1a0 3786
e5537457 3787bool
b40da9a7 3788symbolic_reference_mentioned_p (rtx op)
4673c1a0 3789{
edd89d66 3790 const char *fmt;
3791 int i;
4673c1a0 3792
3793 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3794 return 1;
3795
3796 fmt = GET_RTX_FORMAT (GET_CODE (op));
3797 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3798 {
3799 if (fmt[i] == 'E')
3800 {
edd89d66 3801 int j;
4673c1a0 3802
3803 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3804 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3805 return 1;
3806 }
3807
3808 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3809 return 1;
3810 }
3811
3812 return 0;
3813}
3814
be00aaa8 3815/* Return true if OP contains a reference to a thread-local symbol. */
3816
e5537457 3817bool
b40da9a7 3818tls_symbolic_reference_mentioned_p (rtx op)
be00aaa8 3819{
edd89d66 3820 const char *fmt;
3821 int i;
be00aaa8 3822
3823 if (GET_CODE (op) == SYMBOL_REF)
3824 return tls_symbolic_operand (op);
3825
3826 fmt = GET_RTX_FORMAT (GET_CODE (op));
3827 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3828 {
3829 if (fmt[i] == 'E')
3830 {
edd89d66 3831 int j;
be00aaa8 3832
3833 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3834 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
e5537457 3835 return true;
be00aaa8 3836 }
3837
3838 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
e5537457 3839 return true;
be00aaa8 3840 }
3841
e5537457 3842 return false;
be00aaa8 3843}
3844
4673c1a0 3845
f81e845f 3846/* Return true if OP is a legitimate general operand when
3847 generating PIC code. It is given that flag_pic is on
ba0e61d6 3848 and that OP satisfies CONSTANT_P. */
56769981 3849
4673c1a0 3850int
edd89d66 3851legitimate_pic_operand_p (rtx op)
4673c1a0 3852{
8b4a4127 3853 /* Accept all non-symbolic constants. */
4673c1a0 3854 if (!SYMBOLIC_CONST (op))
3855 return 1;
3856
f81e845f 3857 /* Reject everything else; must be handled
be00aaa8 3858 via emit_symbolic_move. */
4673c1a0 3859 return 0;
3860}
3861
56769981 3862/* Returns true if the constant value OP is a legitimate general operand.
ba0e61d6 3863 It is given that OP satisfies CONSTANT_P. */
56769981 3864
ca316360 3865static bool
3754d046 3866s390_legitimate_constant_p (machine_mode mode, rtx op)
4673c1a0 3867{
abf3beed 3868 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
76a4c804 3869 {
3870 if (GET_MODE_SIZE (mode) != 16)
3871 return 0;
3872
80fc7f56 3873 if (!satisfies_constraint_j00 (op)
3874 && !satisfies_constraint_jm1 (op)
3875 && !satisfies_constraint_jKK (op)
3876 && !satisfies_constraint_jxx (op)
3877 && !satisfies_constraint_jyy (op))
76a4c804 3878 return 0;
3879 }
3880
8b4a4127 3881 /* Accept all non-symbolic constants. */
4673c1a0 3882 if (!SYMBOLIC_CONST (op))
3883 return 1;
3884
be00aaa8 3885 /* Accept immediate LARL operands. */
ca316360 3886 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
be00aaa8 3887 return 1;
3888
3889 /* Thread-local symbols are never legal constants. This is
3890 so that emit_call knows that computing such addresses
3891 might require a function call. */
3892 if (TLS_SYMBOLIC_CONST (op))
3893 return 0;
3894
4673c1a0 3895 /* In the PIC case, symbolic constants must *not* be
3896 forced into the literal pool. We accept them here,
be00aaa8 3897 so that they will be handled by emit_symbolic_move. */
4673c1a0 3898 if (flag_pic)
3899 return 1;
3900
4673c1a0 3901 /* All remaining non-PIC symbolic constants are
3902 forced into the literal pool. */
3903 return 0;
3904}
3905
be00aaa8 3906/* Determine if it's legal to put X into the constant pool. This
3907 is not possible if X contains the address of a symbol that is
3908 not constant (TLS) or not known at final link time (PIC). */
3909
3910static bool
3754d046 3911s390_cannot_force_const_mem (machine_mode mode, rtx x)
be00aaa8 3912{
3913 switch (GET_CODE (x))
3914 {
3915 case CONST_INT:
3916 case CONST_DOUBLE:
ba0e61d6 3917 case CONST_WIDE_INT:
76a4c804 3918 case CONST_VECTOR:
be00aaa8 3919 /* Accept all non-symbolic constants. */
3920 return false;
3921
3922 case LABEL_REF:
3923 /* Labels are OK iff we are non-PIC. */
3924 return flag_pic != 0;
3925
3926 case SYMBOL_REF:
3927 /* 'Naked' TLS symbol references are never OK,
3928 non-TLS symbols are OK iff we are non-PIC. */
3929 if (tls_symbolic_operand (x))
3930 return true;
3931 else
3932 return flag_pic != 0;
3933
3934 case CONST:
7d7d7bd2 3935 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
be00aaa8 3936 case PLUS:
3937 case MINUS:
7d7d7bd2 3938 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3939 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
be00aaa8 3940
3941 case UNSPEC:
3942 switch (XINT (x, 1))
3943 {
3944 /* Only lt-relative or GOT-relative UNSPECs are OK. */
12ef3745 3945 case UNSPEC_LTREL_OFFSET:
3946 case UNSPEC_GOT:
3947 case UNSPEC_GOTOFF:
3948 case UNSPEC_PLTOFF:
be00aaa8 3949 case UNSPEC_TLSGD:
3950 case UNSPEC_TLSLDM:
3951 case UNSPEC_NTPOFF:
3952 case UNSPEC_DTPOFF:
3953 case UNSPEC_GOTNTPOFF:
3954 case UNSPEC_INDNTPOFF:
3955 return false;
3956
d345b493 3957 /* If the literal pool shares the code section, be put
3958 execute template placeholders into the pool as well. */
3959 case UNSPEC_INSN:
3960 return TARGET_CPU_ZARCH;
3961
be00aaa8 3962 default:
3963 return true;
3964 }
3965 break;
3966
3967 default:
32eda510 3968 gcc_unreachable ();
be00aaa8 3969 }
3970}
3971
8b4a4127 3972/* Returns true if the constant value OP is a legitimate general
f81e845f 3973 operand during and after reload. The difference to
8b4a4127 3974 legitimate_constant_p is that this function will not accept
3975 a constant that would need to be forced to the literal pool
33d033da 3976 before it can be used as operand.
3977 This function accepts all constants which can be loaded directly
3978 into a GPR. */
8b4a4127 3979
e5537457 3980bool
edd89d66 3981legitimate_reload_constant_p (rtx op)
8b4a4127 3982{
51aa1e9c 3983 /* Accept la(y) operands. */
f81e845f 3984 if (GET_CODE (op) == CONST_INT
51aa1e9c 3985 && DISP_IN_RANGE (INTVAL (op)))
e5537457 3986 return true;
51aa1e9c 3987
163277cf 3988 /* Accept l(g)hi/l(g)fi operands. */
8b4a4127 3989 if (GET_CODE (op) == CONST_INT
163277cf 3990 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
e5537457 3991 return true;
8b4a4127 3992
3993 /* Accept lliXX operands. */
dafc8d45 3994 if (TARGET_ZARCH
53239c89 3995 && GET_CODE (op) == CONST_INT
3996 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3997 && s390_single_part (op, word_mode, HImode, 0) >= 0)
e5537457 3998 return true;
8b4a4127 3999
163277cf 4000 if (TARGET_EXTIMM
4001 && GET_CODE (op) == CONST_INT
4002 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
4003 && s390_single_part (op, word_mode, SImode, 0) >= 0)
4004 return true;
4005
8b4a4127 4006 /* Accept larl operands. */
dafc8d45 4007 if (TARGET_CPU_ZARCH
8b4a4127 4008 && larl_operand (op, VOIDmode))
e5537457 4009 return true;
8b4a4127 4010
70037005 4011 /* Accept floating-point zero operands that fit into a single GPR. */
4012 if (GET_CODE (op) == CONST_DOUBLE
4013 && s390_float_const_zero_p (op)
4014 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
4015 return true;
4016
53239c89 4017 /* Accept double-word operands that can be split. */
ba0e61d6 4018 if (GET_CODE (op) == CONST_WIDE_INT
4019 || (GET_CODE (op) == CONST_INT
4020 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
53239c89 4021 {
3754d046 4022 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
53239c89 4023 rtx hi = operand_subword (op, 0, 0, dword_mode);
4024 rtx lo = operand_subword (op, 1, 0, dword_mode);
4025 return legitimate_reload_constant_p (hi)
4026 && legitimate_reload_constant_p (lo);
4027 }
4028
8b4a4127 4029 /* Everything else cannot be handled without reload. */
e5537457 4030 return false;
8b4a4127 4031}
4032
33d033da 4033/* Returns true if the constant value OP is a legitimate fp operand
4034 during and after reload.
4035 This function accepts all constants which can be loaded directly
4036 into an FPR. */
4037
4038static bool
4039legitimate_reload_fp_constant_p (rtx op)
4040{
4041 /* Accept floating-point zero operands if the load zero instruction
81769881 4042 can be used. Prior to z196 the load fp zero instruction caused a
4043 performance penalty if the result is used as BFP number. */
33d033da 4044 if (TARGET_Z196
4045 && GET_CODE (op) == CONST_DOUBLE
4046 && s390_float_const_zero_p (op))
4047 return true;
4048
4049 return false;
4050}
4051
76a4c804 4052/* Returns true if the constant value OP is a legitimate vector operand
4053 during and after reload.
4054 This function accepts all constants which can be loaded directly
4055 into an VR. */
4056
4057static bool
4058legitimate_reload_vector_constant_p (rtx op)
4059{
76a4c804 4060 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
80fc7f56 4061 && (satisfies_constraint_j00 (op)
4062 || satisfies_constraint_jm1 (op)
4063 || satisfies_constraint_jKK (op)
4064 || satisfies_constraint_jxx (op)
4065 || satisfies_constraint_jyy (op)))
76a4c804 4066 return true;
4067
4068 return false;
4069}
4070
8deb3959 4071/* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
8b4a4127 4072 return the class of reg to actually use. */
4073
3359ccfd 4074static reg_class_t
4075s390_preferred_reload_class (rtx op, reg_class_t rclass)
8b4a4127 4076{
8b4a4127 4077 switch (GET_CODE (op))
4078 {
70037005 4079 /* Constants we cannot reload into general registers
4080 must be forced into the literal pool. */
76a4c804 4081 case CONST_VECTOR:
8b4a4127 4082 case CONST_DOUBLE:
4083 case CONST_INT:
ba0e61d6 4084 case CONST_WIDE_INT:
70037005 4085 if (reg_class_subset_p (GENERAL_REGS, rclass)
4086 && legitimate_reload_constant_p (op))
4087 return GENERAL_REGS;
4088 else if (reg_class_subset_p (ADDR_REGS, rclass)
4089 && legitimate_reload_constant_p (op))
4090 return ADDR_REGS;
33d033da 4091 else if (reg_class_subset_p (FP_REGS, rclass)
4092 && legitimate_reload_fp_constant_p (op))
4093 return FP_REGS;
76a4c804 4094 else if (reg_class_subset_p (VEC_REGS, rclass)
4095 && legitimate_reload_vector_constant_p (op))
4096 return VEC_REGS;
4097
33d033da 4098 return NO_REGS;
8b4a4127 4099
4100 /* If a symbolic constant or a PLUS is reloaded,
0b300c86 4101 it is most likely being used as an address, so
4102 prefer ADDR_REGS. If 'class' is not a superset
4103 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
de47476b 4104 case CONST:
37c55f71 4105 /* Symrefs cannot be pushed into the literal pool with -fPIC
4106 so we *MUST NOT* return NO_REGS for these cases
4107 (s390_cannot_force_const_mem will return true).
4108
4109 On the other hand we MUST return NO_REGS for symrefs with
4110 invalid addend which might have been pushed to the literal
4111 pool (no -fPIC). Usually we would expect them to be
4112 handled via secondary reload but this does not happen if
4113 they are used as literal pool slot replacement in reload
4114 inheritance (see emit_input_reload_insns). */
de47476b 4115 if (TARGET_CPU_ZARCH
4116 && GET_CODE (XEXP (op, 0)) == PLUS
4117 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
4118 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
4119 {
37c55f71 4120 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
de47476b 4121 return ADDR_REGS;
4122 else
4123 return NO_REGS;
4124 }
4125 /* fallthrough */
8b4a4127 4126 case LABEL_REF:
4127 case SYMBOL_REF:
08d88e72 4128 if (!legitimate_reload_constant_p (op))
4129 return NO_REGS;
4130 /* fallthrough */
4131 case PLUS:
4132 /* load address will be used. */
8deb3959 4133 if (reg_class_subset_p (ADDR_REGS, rclass))
08d88e72 4134 return ADDR_REGS;
0b300c86 4135 else
4136 return NO_REGS;
8b4a4127 4137
4138 default:
4139 break;
4140 }
4141
8deb3959 4142 return rclass;
8b4a4127 4143}
4673c1a0 4144
e68d6a13 4145/* Return true if ADDR is SYMBOL_REF + addend with addend being a
4146 multiple of ALIGNMENT and the SYMBOL_REF being naturally
4147 aligned. */
4148
4149bool
4150s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
4151{
4152 HOST_WIDE_INT addend;
4153 rtx symref;
4154
78affa36 4155 /* The "required alignment" might be 0 (e.g. for certain structs
4156 accessed via BLKmode). Early abort in this case, as well as when
4157 an alignment > 8 is required. */
4158 if (alignment < 2 || alignment > 8)
4159 return false;
4160
2a672556 4161 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4162 return false;
62cb5855 4163
2a672556 4164 if (addend & (alignment - 1))
e68d6a13 4165 return false;
4166
78affa36 4167 if (GET_CODE (symref) == SYMBOL_REF)
4168 {
4169 /* We have load-relative instructions for 2-byte, 4-byte, and
4170 8-byte alignment so allow only these. */
4171 switch (alignment)
4172 {
4173 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
4174 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
4175 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
4176 default: return false;
4177 }
4178 }
2a672556 4179
4180 if (GET_CODE (symref) == UNSPEC
4181 && alignment <= UNITS_PER_LONG)
4182 return true;
4183
4184 return false;
e68d6a13 4185}
4186
4187/* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4188 operand SCRATCH is used to reload the even part of the address and
4189 adding one. */
4190
4191void
4192s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4193{
4194 HOST_WIDE_INT addend;
4195 rtx symref;
4196
2a672556 4197 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
e68d6a13 4198 gcc_unreachable ();
4199
4200 if (!(addend & 1))
4201 /* Easy case. The addend is even so larl will do fine. */
4202 emit_move_insn (reg, addr);
4203 else
4204 {
4205 /* We can leave the scratch register untouched if the target
4206 register is a valid base register. */
4207 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4208 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4209 scratch = reg;
4210
4211 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4212 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4213
4214 if (addend != 1)
4215 emit_move_insn (scratch,
4216 gen_rtx_CONST (Pmode,
4217 gen_rtx_PLUS (Pmode, symref,
4218 GEN_INT (addend - 1))));
4219 else
4220 emit_move_insn (scratch, symref);
4221
4222 /* Increment the address using la in order to avoid clobbering cc. */
de47476b 4223 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
e68d6a13 4224 }
4225}
4226
4227/* Generate what is necessary to move between REG and MEM using
4228 SCRATCH. The direction is given by TOMEM. */
4229
4230void
4231s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4232{
4233 /* Reload might have pulled a constant out of the literal pool.
4234 Force it back in. */
4235 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
ba0e61d6 4236 || GET_CODE (mem) == CONST_WIDE_INT
76a4c804 4237 || GET_CODE (mem) == CONST_VECTOR
e68d6a13 4238 || GET_CODE (mem) == CONST)
4239 mem = force_const_mem (GET_MODE (reg), mem);
4240
4241 gcc_assert (MEM_P (mem));
4242
4243 /* For a load from memory we can leave the scratch register
4244 untouched if the target register is a valid base register. */
4245 if (!tomem
4246 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4247 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4248 && GET_MODE (reg) == GET_MODE (scratch))
4249 scratch = reg;
4250
4251 /* Load address into scratch register. Since we can't have a
4252 secondary reload for a secondary reload we have to cover the case
4253 where larl would need a secondary reload here as well. */
4254 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4255
4256 /* Now we can use a standard load/store to do the move. */
4257 if (tomem)
4258 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4259 else
4260 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4261}
4262
328d5423 4263/* Inform reload about cases where moving X with a mode MODE to a register in
8deb3959 4264 RCLASS requires an extra scratch or immediate register. Return the class
328d5423 4265 needed for the immediate register. */
429f9fdb 4266
964229b7 4267static reg_class_t
4268s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3754d046 4269 machine_mode mode, secondary_reload_info *sri)
328d5423 4270{
964229b7 4271 enum reg_class rclass = (enum reg_class) rclass_i;
4272
328d5423 4273 /* Intermediate register needed. */
8deb3959 4274 if (reg_classes_intersect_p (CC_REGS, rclass))
bcbf02a5 4275 return GENERAL_REGS;
4276
76a4c804 4277 if (TARGET_VX)
4278 {
4279 /* The vst/vl vector move instructions allow only for short
4280 displacements. */
4281 if (MEM_P (x)
4282 && GET_CODE (XEXP (x, 0)) == PLUS
4283 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4284 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4285 && reg_class_subset_p (rclass, VEC_REGS)
4286 && (!reg_class_subset_p (rclass, FP_REGS)
4287 || (GET_MODE_SIZE (mode) > 8
4288 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4289 {
4290 if (in_p)
4291 sri->icode = (TARGET_64BIT ?
4292 CODE_FOR_reloaddi_la_in :
4293 CODE_FOR_reloadsi_la_in);
4294 else
4295 sri->icode = (TARGET_64BIT ?
4296 CODE_FOR_reloaddi_la_out :
4297 CODE_FOR_reloadsi_la_out);
4298 }
4299 }
4300
e68d6a13 4301 if (TARGET_Z10)
4302 {
08d88e72 4303 HOST_WIDE_INT offset;
4304 rtx symref;
4305
e68d6a13 4306 /* On z10 several optimizer steps may generate larl operands with
4307 an odd addend. */
4308 if (in_p
2a672556 4309 && s390_loadrelative_operand_p (x, &symref, &offset)
e68d6a13 4310 && mode == Pmode
78affa36 4311 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
08d88e72 4312 && (offset & 1) == 1)
e68d6a13 4313 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4314 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4315
ddb92daa 4316 /* Handle all the (mem (symref)) accesses we cannot use the z10
4317 instructions for. */
e68d6a13 4318 if (MEM_P (x)
2a672556 4319 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
ddb92daa 4320 && (mode == QImode
a1142483 4321 || !reg_class_subset_p (rclass, GENERAL_REGS)
ddb92daa 4322 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4323 || !s390_check_symref_alignment (XEXP (x, 0),
4324 GET_MODE_SIZE (mode))))
e68d6a13 4325 {
4326#define __SECONDARY_RELOAD_CASE(M,m) \
916ace94 4327 case E_##M##mode: \
e68d6a13 4328 if (TARGET_64BIT) \
4329 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4330 CODE_FOR_reload##m##di_tomem_z10; \
4331 else \
4332 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4333 CODE_FOR_reload##m##si_tomem_z10; \
4334 break;
4335
4336 switch (GET_MODE (x))
4337 {
4338 __SECONDARY_RELOAD_CASE (QI, qi);
4339 __SECONDARY_RELOAD_CASE (HI, hi);
4340 __SECONDARY_RELOAD_CASE (SI, si);
4341 __SECONDARY_RELOAD_CASE (DI, di);
4342 __SECONDARY_RELOAD_CASE (TI, ti);
4343 __SECONDARY_RELOAD_CASE (SF, sf);
4344 __SECONDARY_RELOAD_CASE (DF, df);
4345 __SECONDARY_RELOAD_CASE (TF, tf);
4346 __SECONDARY_RELOAD_CASE (SD, sd);
4347 __SECONDARY_RELOAD_CASE (DD, dd);
4348 __SECONDARY_RELOAD_CASE (TD, td);
76a4c804 4349 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4350 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4351 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4352 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4353 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4354 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4355 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4356 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4357 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4358 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4359 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4360 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4361 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4362 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4363 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4364 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4365 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4366 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4367 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4368 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4369 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
e68d6a13 4370 default:
4371 gcc_unreachable ();
4372 }
4373#undef __SECONDARY_RELOAD_CASE
4374 }
4375 }
4376
328d5423 4377 /* We need a scratch register when loading a PLUS expression which
4378 is not a legitimate operand of the LOAD ADDRESS instruction. */
7b1bda1c 4379 /* LRA can deal with transformation of plus op very well -- so we
4380 don't need to prompt LRA in this case. */
4381 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
328d5423 4382 sri->icode = (TARGET_64BIT ?
4383 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4384
efec32e0 4385 /* Performing a multiword move from or to memory we have to make sure the
328d5423 4386 second chunk in memory is addressable without causing a displacement
4387 overflow. If that would be the case we calculate the address in
4388 a scratch register. */
4389 if (MEM_P (x)
4390 && GET_CODE (XEXP (x, 0)) == PLUS
4391 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4392 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
6938bdf8 4393 + GET_MODE_SIZE (mode) - 1))
328d5423 4394 {
efec32e0 4395 /* For GENERAL_REGS a displacement overflow is no problem if occurring
328d5423 4396 in a s_operand address since we may fallback to lm/stm. So we only
4397 have to care about overflows in the b+i+d case. */
8deb3959 4398 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
328d5423 4399 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4400 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4401 /* For FP_REGS no lm/stm is available so this check is triggered
4402 for displacement overflows in b+i+d and b+d like addresses. */
8deb3959 4403 || (reg_classes_intersect_p (FP_REGS, rclass)
328d5423 4404 && s390_class_max_nregs (FP_REGS, mode) > 1))
4405 {
4406 if (in_p)
4407 sri->icode = (TARGET_64BIT ?
76a4c804 4408 CODE_FOR_reloaddi_la_in :
4409 CODE_FOR_reloadsi_la_in);
328d5423 4410 else
4411 sri->icode = (TARGET_64BIT ?
76a4c804 4412 CODE_FOR_reloaddi_la_out :
4413 CODE_FOR_reloadsi_la_out);
328d5423 4414 }
4415 }
bcbf02a5 4416
08b5e262 4417 /* A scratch address register is needed when a symbolic constant is
4418 copied to r0 compiling with -fPIC. In other cases the target
4419 register might be used as temporary (see legitimize_pic_address). */
8deb3959 4420 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
08b5e262 4421 sri->icode = (TARGET_64BIT ?
4422 CODE_FOR_reloaddi_PIC_addr :
4423 CODE_FOR_reloadsi_PIC_addr);
4424
328d5423 4425 /* Either scratch or no register needed. */
66795431 4426 return NO_REGS;
4427}
4428
c836e75b 4429/* Implement TARGET_SECONDARY_MEMORY_NEEDED.
4430
4431 We need secondary memory to move data between GPRs and FPRs.
4432
4433 - With DFP the ldgr lgdr instructions are available. Due to the
4434 different alignment we cannot use them for SFmode. For 31 bit a
4435 64 bit value in GPR would be a register pair so here we still
4436 need to go via memory.
4437
4438 - With z13 we can do the SF/SImode moves with vlgvf. Due to the
4439 overlapping of FPRs and VRs we still disallow TF/TD modes to be
4440 in full VRs so as before also on z13 we do these moves via
4441 memory.
4442
4443 FIXME: Should we try splitting it into two vlgvg's/vlvg's instead? */
4444
4445static bool
4446s390_secondary_memory_needed (machine_mode mode,
4447 reg_class_t class1, reg_class_t class2)
4448{
4449 return (((reg_classes_intersect_p (class1, VEC_REGS)
4450 && reg_classes_intersect_p (class2, GENERAL_REGS))
4451 || (reg_classes_intersect_p (class1, GENERAL_REGS)
4452 && reg_classes_intersect_p (class2, VEC_REGS)))
4453 && (!TARGET_DFP || !TARGET_64BIT || GET_MODE_SIZE (mode) != 8)
4454 && (!TARGET_VX || (SCALAR_FLOAT_MODE_P (mode)
4455 && GET_MODE_SIZE (mode) > 8)));
4456}
4457
1041f930 4458/* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE.
4459
4460 get_secondary_mem widens its argument to BITS_PER_WORD which loses on 64bit
4461 because the movsi and movsf patterns don't handle r/f moves. */
4462
4463static machine_mode
4464s390_secondary_memory_needed_mode (machine_mode mode)
4465{
4466 if (GET_MODE_BITSIZE (mode) < 32)
4467 return mode_for_size (32, GET_MODE_CLASS (mode), 0).require ();
4468 return mode;
4469}
4470
64f977d6 4471/* Generate code to load SRC, which is PLUS that is not a
4472 legitimate operand for the LA instruction, into TARGET.
4473 SCRATCH may be used as scratch register. */
4474
4475void
edd89d66 4476s390_expand_plus_operand (rtx target, rtx src,
4477 rtx scratch)
64f977d6 4478{
e7f0624a 4479 rtx sum1, sum2;
8ba34dcd 4480 struct s390_address ad;
dc4951d9 4481
dc4951d9 4482 /* src must be a PLUS; get its two operands. */
32eda510 4483 gcc_assert (GET_CODE (src) == PLUS);
4484 gcc_assert (GET_MODE (src) == Pmode);
64f977d6 4485
c10847b9 4486 /* Check if any of the two operands is already scheduled
4487 for replacement by reload. This can happen e.g. when
4488 float registers occur in an address. */
4489 sum1 = find_replacement (&XEXP (src, 0));
4490 sum2 = find_replacement (&XEXP (src, 1));
a5004c3d 4491 src = gen_rtx_PLUS (Pmode, sum1, sum2);
a5004c3d 4492
e7f0624a 4493 /* If the address is already strictly valid, there's nothing to do. */
4494 if (!s390_decompose_address (src, &ad)
1e280623 4495 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4496 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
64f977d6 4497 {
e7f0624a 4498 /* Otherwise, one of the operands cannot be an address register;
4499 we reload its value into the scratch register. */
4500 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4501 {
4502 emit_move_insn (scratch, sum1);
4503 sum1 = scratch;
4504 }
4505 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4506 {
4507 emit_move_insn (scratch, sum2);
4508 sum2 = scratch;
4509 }
64f977d6 4510
e7f0624a 4511 /* According to the way these invalid addresses are generated
4512 in reload.c, it should never happen (at least on s390) that
4513 *neither* of the PLUS components, after find_replacements
4514 was applied, is an address register. */
4515 if (sum1 == scratch && sum2 == scratch)
4516 {
4517 debug_rtx (src);
32eda510 4518 gcc_unreachable ();
e7f0624a 4519 }
64f977d6 4520
e7f0624a 4521 src = gen_rtx_PLUS (Pmode, sum1, sum2);
64f977d6 4522 }
4523
4524 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4525 is only ever performed on addresses, so we can mark the
4526 sum as legitimate for LA in any case. */
4fbc4db5 4527 s390_load_address (target, src);
64f977d6 4528}
4529
4530
e5537457 4531/* Return true if ADDR is a valid memory address.
875862bf 4532 STRICT specifies whether strict register checking applies. */
4673c1a0 4533
fd50b071 4534static bool
3754d046 4535s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4673c1a0 4536{
875862bf 4537 struct s390_address ad;
e68d6a13 4538
4539 if (TARGET_Z10
4540 && larl_operand (addr, VOIDmode)
4541 && (mode == VOIDmode
4542 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4543 return true;
4544
875862bf 4545 if (!s390_decompose_address (addr, &ad))
e5537457 4546 return false;
8ba34dcd 4547
4548 if (strict)
4549 {
1e280623 4550 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
e5537457 4551 return false;
1e280623 4552
4553 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
e5537457 4554 return false;
8ba34dcd 4555 }
4556 else
4557 {
ffead1ca 4558 if (ad.base
1e280623 4559 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4560 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
e5537457 4561 return false;
ffead1ca 4562
1e280623 4563 if (ad.indx
4564 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4565 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4566 return false;
8ba34dcd 4567 }
e5537457 4568 return true;
4673c1a0 4569}
4570
e5537457 4571/* Return true if OP is a valid operand for the LA instruction.
2eb8fe23 4572 In 31-bit, we need to prove that the result is used as an
4573 address, as LA performs only a 31-bit addition. */
4574
e5537457 4575bool
edd89d66 4576legitimate_la_operand_p (rtx op)
2eb8fe23 4577{
4578 struct s390_address addr;
8ba34dcd 4579 if (!s390_decompose_address (op, &addr))
e5537457 4580 return false;
2eb8fe23 4581
e5537457 4582 return (TARGET_64BIT || addr.pointer);
64f977d6 4583}
2eb8fe23 4584
e5537457 4585/* Return true if it is valid *and* preferable to use LA to
c6061690 4586 compute the sum of OP1 and OP2. */
f81e845f 4587
e5537457 4588bool
c6061690 4589preferred_la_operand_p (rtx op1, rtx op2)
a40b2054 4590{
4591 struct s390_address addr;
c6061690 4592
4593 if (op2 != const0_rtx)
4594 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4595
4596 if (!s390_decompose_address (op1, &addr))
e5537457 4597 return false;
1e280623 4598 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
e5537457 4599 return false;
1e280623 4600 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
e5537457 4601 return false;
a40b2054 4602
33d033da 4603 /* Avoid LA instructions with index register on z196; it is
81769881 4604 preferable to use regular add instructions when possible.
4605 Starting with zEC12 the la with index register is "uncracked"
4606 again. */
33d033da 4607 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4608 return false;
4609
a40b2054 4610 if (!TARGET_64BIT && !addr.pointer)
e5537457 4611 return false;
a40b2054 4612
4613 if (addr.pointer)
e5537457 4614 return true;
a40b2054 4615
ec3b9583 4616 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4617 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
e5537457 4618 return true;
a40b2054 4619
e5537457 4620 return false;
a40b2054 4621}
4622
4fbc4db5 4623/* Emit a forced load-address operation to load SRC into DST.
4624 This will use the LOAD ADDRESS instruction even in situations
4625 where legitimate_la_operand_p (SRC) returns false. */
2eb8fe23 4626
4fbc4db5 4627void
b40da9a7 4628s390_load_address (rtx dst, rtx src)
64f977d6 4629{
4fbc4db5 4630 if (TARGET_64BIT)
4631 emit_move_insn (dst, src);
4632 else
4633 emit_insn (gen_force_la_31 (dst, src));
2eb8fe23 4634}
4635
9852c8ae 4636/* Return true if it ok to use SYMBOL_REF in a relative address. */
4637
4638bool
4639s390_rel_address_ok_p (rtx symbol_ref)
4640{
4641 tree decl;
4642
4643 if (symbol_ref == s390_got_symbol () || CONSTANT_POOL_ADDRESS_P (symbol_ref))
4644 return true;
4645
4646 decl = SYMBOL_REF_DECL (symbol_ref);
4647
4648 if (!flag_pic || SYMBOL_REF_LOCAL_P (symbol_ref))
4649 return (s390_pic_data_is_text_relative
4650 || (decl
4651 && TREE_CODE (decl) == FUNCTION_DECL));
4652
4653 return false;
4654}
4655
4673c1a0 4656/* Return a legitimate reference for ORIG (an address) using the
4657 register REG. If REG is 0, a new pseudo is generated.
4658
4659 There are two types of references that must be handled:
4660
4661 1. Global data references must load the address from the GOT, via
4662 the PIC reg. An insn is emitted to do this load, and the reg is
4663 returned.
4664
4665 2. Static data references, constant pool addresses, and code labels
4666 compute the address as an offset from the GOT, whose base is in
a3e33162 4667 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4673c1a0 4668 differentiate them from global data objects. The returned
4669 address is the PIC reg + an unspec constant.
4670
bc409cb4 4671 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4673c1a0 4672 reg also appears in the address. */
4673
4674rtx
b40da9a7 4675legitimize_pic_address (rtx orig, rtx reg)
4673c1a0 4676{
4677 rtx addr = orig;
2a672556 4678 rtx addend = const0_rtx;
8deb3959 4679 rtx new_rtx = orig;
4673c1a0 4680
1ed004b7 4681 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4682
2a672556 4683 if (GET_CODE (addr) == CONST)
4684 addr = XEXP (addr, 0);
4685
4686 if (GET_CODE (addr) == PLUS)
4673c1a0 4687 {
2a672556 4688 addend = XEXP (addr, 1);
4689 addr = XEXP (addr, 0);
4690 }
4691
4692 if ((GET_CODE (addr) == LABEL_REF
9852c8ae 4693 || (SYMBOL_REF_P (addr) && s390_rel_address_ok_p (addr))
2a672556 4694 || (GET_CODE (addr) == UNSPEC &&
4695 (XINT (addr, 1) == UNSPEC_GOTENT
4696 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4697 && GET_CODE (addend) == CONST_INT)
4698 {
4699 /* This can be locally addressed. */
4700
4701 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4702 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4703 gen_rtx_CONST (Pmode, addr) : addr);
4704
4705 if (TARGET_CPU_ZARCH
4706 && larl_operand (const_addr, VOIDmode)
b422d8c0 4707 && INTVAL (addend) < HOST_WIDE_INT_1 << 31
4708 && INTVAL (addend) >= -(HOST_WIDE_INT_1 << 31))
2a672556 4709 {
4710 if (INTVAL (addend) & 1)
4711 {
4712 /* LARL can't handle odd offsets, so emit a pair of LARL
4713 and LA. */
4714 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4715
4716 if (!DISP_IN_RANGE (INTVAL (addend)))
4717 {
4718 HOST_WIDE_INT even = INTVAL (addend) - 1;
4719 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4720 addr = gen_rtx_CONST (Pmode, addr);
4721 addend = const1_rtx;
4722 }
4723
4724 emit_move_insn (temp, addr);
4725 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4726
4727 if (reg != 0)
4728 {
4729 s390_load_address (reg, new_rtx);
4730 new_rtx = reg;
4731 }
4732 }
4733 else
4734 {
4735 /* If the offset is even, we can just use LARL. This
4736 will happen automatically. */
4737 }
4738 }
4673c1a0 4739 else
2a672556 4740 {
4741 /* No larl - Access local symbols relative to the GOT. */
4673c1a0 4742
2a672556 4743 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4673c1a0 4744
12ef3745 4745 if (reload_in_progress || reload_completed)
3072d30e 4746 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
12ef3745 4747
2a672556 4748 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4749 if (addend != const0_rtx)
4750 addr = gen_rtx_PLUS (Pmode, addr, addend);
4751 addr = gen_rtx_CONST (Pmode, addr);
4752 addr = force_const_mem (Pmode, addr);
4673c1a0 4753 emit_move_insn (temp, addr);
4754
2a672556 4755 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4756 if (reg != 0)
4757 {
4758 s390_load_address (reg, new_rtx);
4759 new_rtx = reg;
4760 }
4761 }
4673c1a0 4762 }
2a672556 4763 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4673c1a0 4764 {
2a672556 4765 /* A non-local symbol reference without addend.
4766
4767 The symbol ref is wrapped into an UNSPEC to make sure the
4768 proper operand modifier (@GOT or @GOTENT) will be emitted.
4769 This will tell the linker to put the symbol into the GOT.
4770
4771 Additionally the code dereferencing the GOT slot is emitted here.
4772
4773 An addend to the symref needs to be added afterwards.
4774 legitimize_pic_address calls itself recursively to handle
4775 that case. So no need to do it here. */
4776
4673c1a0 4777 if (reg == 0)
4778 reg = gen_reg_rtx (Pmode);
4779
2a672556 4780 if (TARGET_Z10)
4781 {
4782 /* Use load relative if possible.
4783 lgrl <target>, sym@GOTENT */
4784 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4785 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4786 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4787
4788 emit_move_insn (reg, new_rtx);
4789 new_rtx = reg;
4790 }
4791 else if (flag_pic == 1)
4673c1a0 4792 {
2a672556 4793 /* Assume GOT offset is a valid displacement operand (< 4k
4794 or < 512k with z990). This is handled the same way in
4795 both 31- and 64-bit code (@GOT).
4796 lg <target>, sym@GOT(r12) */
4673c1a0 4797
9a2a66ae 4798 if (reload_in_progress || reload_completed)
3072d30e 4799 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4673c1a0 4800
8deb3959 4801 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4802 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4803 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4804 new_rtx = gen_const_mem (Pmode, new_rtx);
4805 emit_move_insn (reg, new_rtx);
4806 new_rtx = reg;
4673c1a0 4807 }
dafc8d45 4808 else if (TARGET_CPU_ZARCH)
4673c1a0 4809 {
4810 /* If the GOT offset might be >= 4k, we determine the position
2a672556 4811 of the GOT entry via a PC-relative LARL (@GOTENT).
4812 larl temp, sym@GOTENT
4813 lg <target>, 0(temp) */
4673c1a0 4814
08b5e262 4815 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4816
4817 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4818 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4673c1a0 4819
8deb3959 4820 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4821 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
2a672556 4822 emit_move_insn (temp, new_rtx);
4673c1a0 4823
2a672556 4824 new_rtx = gen_const_mem (Pmode, temp);
8deb3959 4825 emit_move_insn (reg, new_rtx);
2a672556 4826
8deb3959 4827 new_rtx = reg;
4673c1a0 4828 }
4829 else
4830 {
f81e845f 4831 /* If the GOT offset might be >= 4k, we have to load it
2a672556 4832 from the literal pool (@GOT).
4833
4834 lg temp, lit-litbase(r13)
4835 lg <target>, 0(temp)
4836 lit: .long sym@GOT */
4673c1a0 4837
08b5e262 4838 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4839
4840 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4841 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4673c1a0 4842
9a2a66ae 4843 if (reload_in_progress || reload_completed)
3072d30e 4844 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4673c1a0 4845
12ef3745 4846 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
525d1294 4847 addr = gen_rtx_CONST (Pmode, addr);
4848 addr = force_const_mem (Pmode, addr);
4673c1a0 4849 emit_move_insn (temp, addr);
4850
8deb3959 4851 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4852 new_rtx = gen_const_mem (Pmode, new_rtx);
4853 emit_move_insn (reg, new_rtx);
4854 new_rtx = reg;
4673c1a0 4855 }
f81e845f 4856 }
2a672556 4857 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4673c1a0 4858 {
2a672556 4859 gcc_assert (XVECLEN (addr, 0) == 1);
4860 switch (XINT (addr, 1))
4673c1a0 4861 {
2a672556 4862 /* These address symbols (or PLT slots) relative to the GOT
4863 (not GOT slots!). In general this will exceed the
4864 displacement range so these value belong into the literal
4865 pool. */
4866 case UNSPEC_GOTOFF:
4867 case UNSPEC_PLTOFF:
4868 new_rtx = force_const_mem (Pmode, orig);
4869 break;
4673c1a0 4870
2a672556 4871 /* For -fPIC the GOT size might exceed the displacement
4872 range so make sure the value is in the literal pool. */
4873 case UNSPEC_GOT:
4874 if (flag_pic == 2)
4875 new_rtx = force_const_mem (Pmode, orig);
4876 break;
4673c1a0 4877
2a672556 4878 /* For @GOTENT larl is used. This is handled like local
4879 symbol refs. */
4880 case UNSPEC_GOTENT:
4881 gcc_unreachable ();
4882 break;
4673c1a0 4883
2a672556 4884 /* @PLT is OK as is on 64-bit, must be converted to
4885 GOT-relative @PLTOFF on 31-bit. */
4886 case UNSPEC_PLT:
4887 if (!TARGET_CPU_ZARCH)
4673c1a0 4888 {
2a672556 4889 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4890
4891 if (reload_in_progress || reload_completed)
4892 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4893
4894 addr = XVECEXP (addr, 0, 0);
4895 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4896 UNSPEC_PLTOFF);
4897 if (addend != const0_rtx)
4898 addr = gen_rtx_PLUS (Pmode, addr, addend);
4899 addr = gen_rtx_CONST (Pmode, addr);
4900 addr = force_const_mem (Pmode, addr);
4901 emit_move_insn (temp, addr);
4902
4903 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4904 if (reg != 0)
4673c1a0 4905 {
2a672556 4906 s390_load_address (reg, new_rtx);
4907 new_rtx = reg;
4673c1a0 4908 }
2a672556 4909 }
4910 else
4911 /* On 64 bit larl can be used. This case is handled like
4912 local symbol refs. */
4913 gcc_unreachable ();
4914 break;
4915
4916 /* Everything else cannot happen. */
4917 default:
4918 gcc_unreachable ();
4919 }
4920 }
4921 else if (addend != const0_rtx)
4922 {
4923 /* Otherwise, compute the sum. */
4673c1a0 4924
2a672556 4925 rtx base = legitimize_pic_address (addr, reg);
4926 new_rtx = legitimize_pic_address (addend,
4927 base == reg ? NULL_RTX : reg);
4928 if (GET_CODE (new_rtx) == CONST_INT)
4929 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4930 else
4931 {
4932 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4933 {
4934 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4935 new_rtx = XEXP (new_rtx, 1);
4673c1a0 4936 }
2a672556 4937 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4673c1a0 4938 }
2a672556 4939
4940 if (GET_CODE (new_rtx) == CONST)
4941 new_rtx = XEXP (new_rtx, 0);
4942 new_rtx = force_operand (new_rtx, 0);
4673c1a0 4943 }
2a672556 4944
8deb3959 4945 return new_rtx;
4673c1a0 4946}
4947
be00aaa8 4948/* Load the thread pointer into a register. */
4949
cc87d0c5 4950rtx
4951s390_get_thread_pointer (void)
be00aaa8 4952{
923cf36d 4953 rtx tp = gen_reg_rtx (Pmode);
be00aaa8 4954
923cf36d 4955 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
be00aaa8 4956 mark_reg_pointer (tp, BITS_PER_WORD);
4957
4958 return tp;
4959}
4960
7346ca58 4961/* Emit a tls call insn. The call target is the SYMBOL_REF stored
4962 in s390_tls_symbol which always refers to __tls_get_offset.
4963 The returned offset is written to RESULT_REG and an USE rtx is
4964 generated for TLS_CALL. */
be00aaa8 4965
4966static GTY(()) rtx s390_tls_symbol;
7346ca58 4967
4968static void
4969s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
be00aaa8 4970{
7346ca58 4971 rtx insn;
f588eb9f 4972
c60a7572 4973 if (!flag_pic)
4974 emit_insn (s390_load_got ());
7346ca58 4975
be00aaa8 4976 if (!s390_tls_symbol)
4977 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4978
f588eb9f 4979 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4980 gen_rtx_REG (Pmode, RETURN_REGNUM));
7346ca58 4981
4982 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
9c2a0c05 4983 RTL_CONST_CALL_P (insn) = 1;
be00aaa8 4984}
4985
4986/* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4987 this (thread-local) address. REG may be used as temporary. */
4988
4989static rtx
b40da9a7 4990legitimize_tls_address (rtx addr, rtx reg)
be00aaa8 4991{
db7dd023 4992 rtx new_rtx, tls_call, temp, base, r2;
4993 rtx_insn *insn;
be00aaa8 4994
4995 if (GET_CODE (addr) == SYMBOL_REF)
4996 switch (tls_symbolic_operand (addr))
4997 {
4998 case TLS_MODEL_GLOBAL_DYNAMIC:
4999 start_sequence ();
5000 r2 = gen_rtx_REG (Pmode, 2);
5001 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
8deb3959 5002 new_rtx = gen_rtx_CONST (Pmode, tls_call);
5003 new_rtx = force_const_mem (Pmode, new_rtx);
5004 emit_move_insn (r2, new_rtx);
7346ca58 5005 s390_emit_tls_call_insn (r2, tls_call);
be00aaa8 5006 insn = get_insns ();
5007 end_sequence ();
5008
8deb3959 5009 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
be00aaa8 5010 temp = gen_reg_rtx (Pmode);
8deb3959 5011 emit_libcall_block (insn, temp, r2, new_rtx);
be00aaa8 5012
8deb3959 5013 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
be00aaa8 5014 if (reg != 0)
5015 {
8deb3959 5016 s390_load_address (reg, new_rtx);
5017 new_rtx = reg;
be00aaa8 5018 }
5019 break;
5020
5021 case TLS_MODEL_LOCAL_DYNAMIC:
5022 start_sequence ();
5023 r2 = gen_rtx_REG (Pmode, 2);
5024 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
8deb3959 5025 new_rtx = gen_rtx_CONST (Pmode, tls_call);
5026 new_rtx = force_const_mem (Pmode, new_rtx);
5027 emit_move_insn (r2, new_rtx);
7346ca58 5028 s390_emit_tls_call_insn (r2, tls_call);
be00aaa8 5029 insn = get_insns ();
5030 end_sequence ();
5031
8deb3959 5032 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
be00aaa8 5033 temp = gen_reg_rtx (Pmode);
8deb3959 5034 emit_libcall_block (insn, temp, r2, new_rtx);
be00aaa8 5035
8deb3959 5036 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
be00aaa8 5037 base = gen_reg_rtx (Pmode);
8deb3959 5038 s390_load_address (base, new_rtx);
be00aaa8 5039
8deb3959 5040 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
5041 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5042 new_rtx = force_const_mem (Pmode, new_rtx);
be00aaa8 5043 temp = gen_reg_rtx (Pmode);
8deb3959 5044 emit_move_insn (temp, new_rtx);
be00aaa8 5045
8deb3959 5046 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
be00aaa8 5047 if (reg != 0)
5048 {
8deb3959 5049 s390_load_address (reg, new_rtx);
5050 new_rtx = reg;
be00aaa8 5051 }
5052 break;
5053
5054 case TLS_MODEL_INITIAL_EXEC:
5055 if (flag_pic == 1)
5056 {
5057 /* Assume GOT offset < 4k. This is handled the same way
5058 in both 31- and 64-bit code. */
5059
5060 if (reload_in_progress || reload_completed)
3072d30e 5061 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
be00aaa8 5062
8deb3959 5063 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
5064 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5065 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
5066 new_rtx = gen_const_mem (Pmode, new_rtx);
be00aaa8 5067 temp = gen_reg_rtx (Pmode);
8deb3959 5068 emit_move_insn (temp, new_rtx);
be00aaa8 5069 }
dafc8d45 5070 else if (TARGET_CPU_ZARCH)
be00aaa8 5071 {
5072 /* If the GOT offset might be >= 4k, we determine the position
5073 of the GOT entry via a PC-relative LARL. */
5074
8deb3959 5075 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
5076 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
be00aaa8 5077 temp = gen_reg_rtx (Pmode);
8deb3959 5078 emit_move_insn (temp, new_rtx);
be00aaa8 5079
8deb3959 5080 new_rtx = gen_const_mem (Pmode, temp);
be00aaa8 5081 temp = gen_reg_rtx (Pmode);
8deb3959 5082 emit_move_insn (temp, new_rtx);
be00aaa8 5083 }
5084 else if (flag_pic)
5085 {
f81e845f 5086 /* If the GOT offset might be >= 4k, we have to load it
be00aaa8 5087 from the literal pool. */
5088
5089 if (reload_in_progress || reload_completed)
3072d30e 5090 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
be00aaa8 5091
8deb3959 5092 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
5093 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5094 new_rtx = force_const_mem (Pmode, new_rtx);
be00aaa8 5095 temp = gen_reg_rtx (Pmode);
8deb3959 5096 emit_move_insn (temp, new_rtx);
be00aaa8 5097
8deb3959 5098 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
5099 new_rtx = gen_const_mem (Pmode, new_rtx);
be00aaa8 5100
8deb3959 5101 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
be00aaa8 5102 temp = gen_reg_rtx (Pmode);
d1f9b275 5103 emit_insn (gen_rtx_SET (temp, new_rtx));
be00aaa8 5104 }
5105 else
5106 {
5107 /* In position-dependent code, load the absolute address of
5108 the GOT entry from the literal pool. */
5109
8deb3959 5110 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
5111 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5112 new_rtx = force_const_mem (Pmode, new_rtx);
be00aaa8 5113 temp = gen_reg_rtx (Pmode);
8deb3959 5114 emit_move_insn (temp, new_rtx);
be00aaa8 5115
8deb3959 5116 new_rtx = temp;
5117 new_rtx = gen_const_mem (Pmode, new_rtx);
5118 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
be00aaa8 5119 temp = gen_reg_rtx (Pmode);
d1f9b275 5120 emit_insn (gen_rtx_SET (temp, new_rtx));
be00aaa8 5121 }
5122
8deb3959 5123 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
be00aaa8 5124 if (reg != 0)
5125 {
8deb3959 5126 s390_load_address (reg, new_rtx);
5127 new_rtx = reg;
be00aaa8 5128 }
5129 break;
5130
5131 case TLS_MODEL_LOCAL_EXEC:
8deb3959 5132 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
5133 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5134 new_rtx = force_const_mem (Pmode, new_rtx);
be00aaa8 5135 temp = gen_reg_rtx (Pmode);
8deb3959 5136 emit_move_insn (temp, new_rtx);
be00aaa8 5137
8deb3959 5138 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
be00aaa8 5139 if (reg != 0)
5140 {
8deb3959 5141 s390_load_address (reg, new_rtx);
5142 new_rtx = reg;
be00aaa8 5143 }
5144 break;
5145
5146 default:
32eda510 5147 gcc_unreachable ();
be00aaa8 5148 }
5149
5150 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
5151 {
5152 switch (XINT (XEXP (addr, 0), 1))
5153 {
5154 case UNSPEC_INDNTPOFF:
32eda510 5155 gcc_assert (TARGET_CPU_ZARCH);
8deb3959 5156 new_rtx = addr;
be00aaa8 5157 break;
5158
5159 default:
32eda510 5160 gcc_unreachable ();
be00aaa8 5161 }
5162 }
5163
b7ace65c 5164 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5165 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5166 {
8deb3959 5167 new_rtx = XEXP (XEXP (addr, 0), 0);
5168 if (GET_CODE (new_rtx) != SYMBOL_REF)
5169 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
b7ace65c 5170
8deb3959 5171 new_rtx = legitimize_tls_address (new_rtx, reg);
29c05e22 5172 new_rtx = plus_constant (Pmode, new_rtx,
5173 INTVAL (XEXP (XEXP (addr, 0), 1)));
8deb3959 5174 new_rtx = force_operand (new_rtx, 0);
b7ace65c 5175 }
5176
be00aaa8 5177 else
32eda510 5178 gcc_unreachable (); /* for now ... */
be00aaa8 5179
8deb3959 5180 return new_rtx;
be00aaa8 5181}
5182
08b5e262 5183/* Emit insns making the address in operands[1] valid for a standard
5184 move to operands[0]. operands[1] is replaced by an address which
5185 should be used instead of the former RTX to emit the move
5186 pattern. */
4673c1a0 5187
5188void
b40da9a7 5189emit_symbolic_move (rtx *operands)
4673c1a0 5190{
e1ba4a27 5191 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
4673c1a0 5192
be00aaa8 5193 if (GET_CODE (operands[0]) == MEM)
4673c1a0 5194 operands[1] = force_reg (Pmode, operands[1]);
be00aaa8 5195 else if (TLS_SYMBOLIC_CONST (operands[1]))
5196 operands[1] = legitimize_tls_address (operands[1], temp);
5197 else if (flag_pic)
4673c1a0 5198 operands[1] = legitimize_pic_address (operands[1], temp);
5199}
5200
56769981 5201/* Try machine-dependent ways of modifying an illegitimate address X
4673c1a0 5202 to be legitimate. If we find one, return the new, valid address.
4673c1a0 5203
5204 OLDX is the address as it was before break_out_memory_refs was called.
5205 In some cases it is useful to look at this to decide what needs to be done.
5206
56769981 5207 MODE is the mode of the operand pointed to by X.
4673c1a0 5208
5209 When -fpic is used, special handling is needed for symbolic references.
5210 See comments by legitimize_pic_address for details. */
5211
41e3a0c7 5212static rtx
5213s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3754d046 5214 machine_mode mode ATTRIBUTE_UNUSED)
4673c1a0 5215{
2eb8fe23 5216 rtx constant_term = const0_rtx;
4673c1a0 5217
be00aaa8 5218 if (TLS_SYMBOLIC_CONST (x))
5219 {
5220 x = legitimize_tls_address (x, 0);
5221
fd50b071 5222 if (s390_legitimate_address_p (mode, x, FALSE))
be00aaa8 5223 return x;
5224 }
1ed004b7 5225 else if (GET_CODE (x) == PLUS
ffead1ca 5226 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
1ed004b7 5227 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
5228 {
5229 return x;
5230 }
be00aaa8 5231 else if (flag_pic)
4673c1a0 5232 {
2eb8fe23 5233 if (SYMBOLIC_CONST (x)
f81e845f 5234 || (GET_CODE (x) == PLUS
5235 && (SYMBOLIC_CONST (XEXP (x, 0))
2eb8fe23 5236 || SYMBOLIC_CONST (XEXP (x, 1)))))
5237 x = legitimize_pic_address (x, 0);
5238
fd50b071 5239 if (s390_legitimate_address_p (mode, x, FALSE))
2eb8fe23 5240 return x;
4673c1a0 5241 }
4673c1a0 5242
2eb8fe23 5243 x = eliminate_constant_term (x, &constant_term);
56769981 5244
de84f805 5245 /* Optimize loading of large displacements by splitting them
5246 into the multiple of 4K and the rest; this allows the
f81e845f 5247 former to be CSE'd if possible.
de84f805 5248
5249 Don't do this if the displacement is added to a register
5250 pointing into the stack frame, as the offsets will
5251 change later anyway. */
5252
5253 if (GET_CODE (constant_term) == CONST_INT
51aa1e9c 5254 && !TARGET_LONG_DISPLACEMENT
5255 && !DISP_IN_RANGE (INTVAL (constant_term))
de84f805 5256 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5257 {
5258 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5259 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5260
5261 rtx temp = gen_reg_rtx (Pmode);
5262 rtx val = force_operand (GEN_INT (upper), temp);
5263 if (val != temp)
5264 emit_move_insn (temp, val);
5265
5266 x = gen_rtx_PLUS (Pmode, x, temp);
5267 constant_term = GEN_INT (lower);
5268 }
5269
2eb8fe23 5270 if (GET_CODE (x) == PLUS)
4673c1a0 5271 {
2eb8fe23 5272 if (GET_CODE (XEXP (x, 0)) == REG)
5273 {
edd89d66 5274 rtx temp = gen_reg_rtx (Pmode);
5275 rtx val = force_operand (XEXP (x, 1), temp);
2eb8fe23 5276 if (val != temp)
5277 emit_move_insn (temp, val);
5278
5279 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5280 }
5281
5282 else if (GET_CODE (XEXP (x, 1)) == REG)
5283 {
edd89d66 5284 rtx temp = gen_reg_rtx (Pmode);
5285 rtx val = force_operand (XEXP (x, 0), temp);
2eb8fe23 5286 if (val != temp)
5287 emit_move_insn (temp, val);
5288
5289 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5290 }
4673c1a0 5291 }
2eb8fe23 5292
5293 if (constant_term != const0_rtx)
5294 x = gen_rtx_PLUS (Pmode, x, constant_term);
5295
5296 return x;
4673c1a0 5297}
5298
e4542435 5299/* Try a machine-dependent way of reloading an illegitimate address AD
851d9296 5300 operand. If we find one, push the reload and return the new address.
e4542435 5301
5302 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5303 and TYPE is the reload type of the current reload. */
5304
ffead1ca 5305rtx
3754d046 5306legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
e4542435 5307 int opnum, int type)
5308{
5309 if (!optimize || TARGET_LONG_DISPLACEMENT)
5310 return NULL_RTX;
5311
5312 if (GET_CODE (ad) == PLUS)
5313 {
5314 rtx tem = simplify_binary_operation (PLUS, Pmode,
5315 XEXP (ad, 0), XEXP (ad, 1));
5316 if (tem)
5317 ad = tem;
5318 }
5319
5320 if (GET_CODE (ad) == PLUS
5321 && GET_CODE (XEXP (ad, 0)) == REG
5322 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5323 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5324 {
5325 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5326 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
8deb3959 5327 rtx cst, tem, new_rtx;
e4542435 5328
5329 cst = GEN_INT (upper);
5330 if (!legitimate_reload_constant_p (cst))
5331 cst = force_const_mem (Pmode, cst);
5332
5333 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
8deb3959 5334 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
e4542435 5335
5336 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
ffead1ca 5337 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
e4542435 5338 opnum, (enum reload_type) type);
8deb3959 5339 return new_rtx;
e4542435 5340 }
5341
5342 return NULL_RTX;
5343}
5344
4fbc4db5 5345/* Emit code to move LEN bytes from DST to SRC. */
5346
daa87e5a 5347bool
008c057d 5348s390_expand_movmem (rtx dst, rtx src, rtx len)
4fbc4db5 5349{
daa87e5a 5350 /* When tuning for z10 or higher we rely on the Glibc functions to
5351 do the right thing. Only for constant lengths below 64k we will
5352 generate inline code. */
5353 if (s390_tune >= PROCESSOR_2097_Z10
5354 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5355 return false;
5356
d044af2a 5357 /* Expand memcpy for constant length operands without a loop if it
5358 is shorter that way.
5359
5360 With a constant length argument a
5361 memcpy loop (without pfd) is 36 bytes -> 6 * mvc */
5362 if (GET_CODE (len) == CONST_INT
5363 && INTVAL (len) >= 0
5364 && INTVAL (len) <= 256 * 6
5365 && (!TARGET_MVCLE || INTVAL (len) <= 256))
4fbc4db5 5366 {
d044af2a 5367 HOST_WIDE_INT o, l;
5368
5369 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5370 {
5371 rtx newdst = adjust_address (dst, BLKmode, o);
5372 rtx newsrc = adjust_address (src, BLKmode, o);
5373 emit_insn (gen_movmem_short (newdst, newsrc,
5374 GEN_INT (l > 256 ? 255 : l - 1)));
5375 }
4fbc4db5 5376 }
5377
5378 else if (TARGET_MVCLE)
5379 {
008c057d 5380 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4fbc4db5 5381 }
5382
5383 else
5384 {
5385 rtx dst_addr, src_addr, count, blocks, temp;
79f6a8ed 5386 rtx_code_label *loop_start_label = gen_label_rtx ();
5387 rtx_code_label *loop_end_label = gen_label_rtx ();
5388 rtx_code_label *end_label = gen_label_rtx ();
3754d046 5389 machine_mode mode;
4fbc4db5 5390
5391 mode = GET_MODE (len);
5392 if (mode == VOIDmode)
31838f66 5393 mode = Pmode;
4fbc4db5 5394
4fbc4db5 5395 dst_addr = gen_reg_rtx (Pmode);
5396 src_addr = gen_reg_rtx (Pmode);
5397 count = gen_reg_rtx (mode);
5398 blocks = gen_reg_rtx (mode);
5399
5400 convert_move (count, len, 1);
f81e845f 5401 emit_cmp_and_jump_insns (count, const0_rtx,
4fbc4db5 5402 EQ, NULL_RTX, mode, 1, end_label);
5403
5404 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5405 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5406 dst = change_address (dst, VOIDmode, dst_addr);
5407 src = change_address (src, VOIDmode, src_addr);
f81e845f 5408
b9c74b4d 5409 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5410 OPTAB_DIRECT);
4fbc4db5 5411 if (temp != count)
5412 emit_move_insn (count, temp);
5413
b9c74b4d 5414 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5415 OPTAB_DIRECT);
4fbc4db5 5416 if (temp != blocks)
5417 emit_move_insn (blocks, temp);
5418
4ee9c684 5419 emit_cmp_and_jump_insns (blocks, const0_rtx,
5420 EQ, NULL_RTX, mode, 1, loop_end_label);
7746964e 5421
5422 emit_label (loop_start_label);
4fbc4db5 5423
d5de7805 5424 if (TARGET_Z10
5425 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5426 {
5427 rtx prefetch;
5428
5429 /* Issue a read prefetch for the +3 cache line. */
5430 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5431 const0_rtx, const0_rtx);
5432 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5433 emit_insn (prefetch);
5434
5435 /* Issue a write prefetch for the +3 cache line. */
5436 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5437 const1_rtx, const0_rtx);
5438 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5439 emit_insn (prefetch);
5440 }
5441
008c057d 5442 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
f81e845f 5443 s390_load_address (dst_addr,
4fbc4db5 5444 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
f81e845f 5445 s390_load_address (src_addr,
4fbc4db5 5446 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
f81e845f 5447
b9c74b4d 5448 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5449 OPTAB_DIRECT);
4fbc4db5 5450 if (temp != blocks)
5451 emit_move_insn (blocks, temp);
5452
4ee9c684 5453 emit_cmp_and_jump_insns (blocks, const0_rtx,
5454 EQ, NULL_RTX, mode, 1, loop_end_label);
7746964e 5455
5456 emit_jump (loop_start_label);
4ee9c684 5457 emit_label (loop_end_label);
4fbc4db5 5458
008c057d 5459 emit_insn (gen_movmem_short (dst, src,
31838f66 5460 convert_to_mode (Pmode, count, 1)));
4fbc4db5 5461 emit_label (end_label);
5462 }
daa87e5a 5463 return true;
4fbc4db5 5464}
5465
805a133b 5466/* Emit code to set LEN bytes at DST to VAL.
5467 Make use of clrmem if VAL is zero. */
4fbc4db5 5468
5469void
805a133b 5470s390_expand_setmem (rtx dst, rtx len, rtx val)
4fbc4db5 5471{
2b1d59f5 5472 if (GET_CODE (len) == CONST_INT && INTVAL (len) <= 0)
1ed6fd08 5473 return;
5474
805a133b 5475 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
ffead1ca 5476
2b1d59f5 5477 /* Expand setmem/clrmem for a constant length operand without a
5478 loop if it will be shorter that way.
5479 With a constant length and without pfd argument a
5480 clrmem loop is 32 bytes -> 5.3 * xc
5481 setmem loop is 36 bytes -> 3.6 * (mvi/stc + mvc) */
5482 if (GET_CODE (len) == CONST_INT
5483 && ((INTVAL (len) <= 256 * 5 && val == const0_rtx)
5484 || INTVAL (len) <= 257 * 3)
5485 && (!TARGET_MVCLE || INTVAL (len) <= 256))
4fbc4db5 5486 {
2b1d59f5 5487 HOST_WIDE_INT o, l;
ffead1ca 5488
2b1d59f5 5489 if (val == const0_rtx)
5490 /* clrmem: emit 256 byte blockwise XCs. */
5491 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5492 {
5493 rtx newdst = adjust_address (dst, BLKmode, o);
5494 emit_insn (gen_clrmem_short (newdst,
5495 GEN_INT (l > 256 ? 255 : l - 1)));
5496 }
5497 else
5498 /* setmem: emit 1(mvi) + 256(mvc) byte blockwise memsets by
5499 setting first byte to val and using a 256 byte mvc with one
5500 byte overlap to propagate the byte. */
5501 for (l = INTVAL (len), o = 0; l > 0; l -= 257, o += 257)
5502 {
5503 rtx newdst = adjust_address (dst, BLKmode, o);
5504 emit_move_insn (adjust_address (dst, QImode, o), val);
5505 if (l > 1)
5506 {
5507 rtx newdstp1 = adjust_address (dst, BLKmode, o + 1);
5508 emit_insn (gen_movmem_short (newdstp1, newdst,
5509 GEN_INT (l > 257 ? 255 : l - 2)));
5510 }
5511 }
4fbc4db5 5512 }
5513
5514 else if (TARGET_MVCLE)
5515 {
805a133b 5516 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
00091884 5517 if (TARGET_64BIT)
5518 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5519 val));
5520 else
5521 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5522 val));
4fbc4db5 5523 }
5524
5525 else
5526 {
b5fdc416 5527 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
79f6a8ed 5528 rtx_code_label *loop_start_label = gen_label_rtx ();
4b6f12fb 5529 rtx_code_label *onebyte_end_label = gen_label_rtx ();
5530 rtx_code_label *zerobyte_end_label = gen_label_rtx ();
5531 rtx_code_label *restbyte_end_label = gen_label_rtx ();
3754d046 5532 machine_mode mode;
4fbc4db5 5533
5534 mode = GET_MODE (len);
5535 if (mode == VOIDmode)
4b6f12fb 5536 mode = Pmode;
4fbc4db5 5537
4fbc4db5 5538 dst_addr = gen_reg_rtx (Pmode);
4fbc4db5 5539 count = gen_reg_rtx (mode);
5540 blocks = gen_reg_rtx (mode);
5541
5542 convert_move (count, len, 1);
f81e845f 5543 emit_cmp_and_jump_insns (count, const0_rtx,
4b6f12fb 5544 EQ, NULL_RTX, mode, 1, zerobyte_end_label,
6394f7ee 5545 profile_probability::very_unlikely ());
4fbc4db5 5546
4b6f12fb 5547 /* We need to make a copy of the target address since memset is
5548 supposed to return it unmodified. We have to make it here
5549 already since the new reg is used at onebyte_end_label. */
4fbc4db5 5550 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5551 dst = change_address (dst, VOIDmode, dst_addr);
f81e845f 5552
4b6f12fb 5553 if (val != const0_rtx)
805a133b 5554 {
4b6f12fb 5555 /* When using the overlapping mvc the original target
5556 address is only accessed as single byte entity (even by
5557 the mvc reading this value). */
5b2a69fa 5558 set_mem_size (dst, 1);
4b6f12fb 5559 dstp1 = adjust_address (dst, VOIDmode, 1);
5560 emit_cmp_and_jump_insns (count,
5561 const1_rtx, EQ, NULL_RTX, mode, 1,
6394f7ee 5562 onebyte_end_label,
5563 profile_probability::very_unlikely ());
805a133b 5564 }
4b6f12fb 5565
5566 /* There is one unconditional (mvi+mvc)/xc after the loop
5567 dealing with the rest of the bytes, subtracting two (mvi+mvc)
5568 or one (xc) here leaves this number of bytes to be handled by
5569 it. */
5570 temp = expand_binop (mode, add_optab, count,
5571 val == const0_rtx ? constm1_rtx : GEN_INT (-2),
5572 count, 1, OPTAB_DIRECT);
4fbc4db5 5573 if (temp != count)
4b6f12fb 5574 emit_move_insn (count, temp);
4fbc4db5 5575
b9c74b4d 5576 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5577 OPTAB_DIRECT);
4fbc4db5 5578 if (temp != blocks)
4b6f12fb 5579 emit_move_insn (blocks, temp);
4fbc4db5 5580
4ee9c684 5581 emit_cmp_and_jump_insns (blocks, const0_rtx,
4b6f12fb 5582 EQ, NULL_RTX, mode, 1, restbyte_end_label);
5583
5584 emit_jump (loop_start_label);
5585
5586 if (val != const0_rtx)
5587 {
5588 /* The 1 byte != 0 special case. Not handled efficiently
5589 since we require two jumps for that. However, this
5590 should be very rare. */
5591 emit_label (onebyte_end_label);
5592 emit_move_insn (adjust_address (dst, QImode, 0), val);
5593 emit_jump (zerobyte_end_label);
5594 }
7746964e 5595
5596 emit_label (loop_start_label);
4fbc4db5 5597
d5de7805 5598 if (TARGET_Z10
5599 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5600 {
5601 /* Issue a write prefetch for the +4 cache line. */
5602 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5603 GEN_INT (1024)),
5604 const1_rtx, const0_rtx);
5605 emit_insn (prefetch);
5606 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5607 }
5608
805a133b 5609 if (val == const0_rtx)
5610 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5611 else
4b6f12fb 5612 {
5613 /* Set the first byte in the block to the value and use an
5614 overlapping mvc for the block. */
5615 emit_move_insn (adjust_address (dst, QImode, 0), val);
5616 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (254)));
5617 }
f81e845f 5618 s390_load_address (dst_addr,
4fbc4db5 5619 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
f81e845f 5620
b9c74b4d 5621 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5622 OPTAB_DIRECT);
4fbc4db5 5623 if (temp != blocks)
4b6f12fb 5624 emit_move_insn (blocks, temp);
4fbc4db5 5625
4ee9c684 5626 emit_cmp_and_jump_insns (blocks, const0_rtx,
4b6f12fb 5627 NE, NULL_RTX, mode, 1, loop_start_label);
7746964e 5628
4b6f12fb 5629 emit_label (restbyte_end_label);
4fbc4db5 5630
805a133b 5631 if (val == const0_rtx)
4b6f12fb 5632 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
805a133b 5633 else
4b6f12fb 5634 {
5635 /* Set the first byte in the block to the value and use an
5636 overlapping mvc for the block. */
5637 emit_move_insn (adjust_address (dst, QImode, 0), val);
5638 /* execute only uses the lowest 8 bits of count that's
5639 exactly what we need here. */
5640 emit_insn (gen_movmem_short (dstp1, dst,
5641 convert_to_mode (Pmode, count, 1)));
5642 }
5643
5644 emit_label (zerobyte_end_label);
4fbc4db5 5645 }
5646}
5647
5648/* Emit code to compare LEN bytes at OP0 with those at OP1,
5649 and return the result in TARGET. */
5650
daa87e5a 5651bool
b40da9a7 5652s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4fbc4db5 5653{
80b53886 5654 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
dd16a4bd 5655 rtx tmp;
5656
daa87e5a 5657 /* When tuning for z10 or higher we rely on the Glibc functions to
5658 do the right thing. Only for constant lengths below 64k we will
5659 generate inline code. */
5660 if (s390_tune >= PROCESSOR_2097_Z10
5661 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5662 return false;
5663
dd16a4bd 5664 /* As the result of CMPINT is inverted compared to what we need,
5665 we have to swap the operands. */
5666 tmp = op0; op0 = op1; op1 = tmp;
4fbc4db5 5667
4fbc4db5 5668 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5669 {
5670 if (INTVAL (len) > 0)
5671 {
31838f66 5672 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
dd16a4bd 5673 emit_insn (gen_cmpint (target, ccreg));
4fbc4db5 5674 }
5675 else
5676 emit_move_insn (target, const0_rtx);
5677 }
bcbf02a5 5678 else if (TARGET_MVCLE)
4fbc4db5 5679 {
31838f66 5680 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
dd16a4bd 5681 emit_insn (gen_cmpint (target, ccreg));
4fbc4db5 5682 }
4fbc4db5 5683 else
5684 {
5685 rtx addr0, addr1, count, blocks, temp;
79f6a8ed 5686 rtx_code_label *loop_start_label = gen_label_rtx ();
5687 rtx_code_label *loop_end_label = gen_label_rtx ();
5688 rtx_code_label *end_label = gen_label_rtx ();
3754d046 5689 machine_mode mode;
4fbc4db5 5690
5691 mode = GET_MODE (len);
5692 if (mode == VOIDmode)
31838f66 5693 mode = Pmode;
4fbc4db5 5694
4fbc4db5 5695 addr0 = gen_reg_rtx (Pmode);
5696 addr1 = gen_reg_rtx (Pmode);
5697 count = gen_reg_rtx (mode);
5698 blocks = gen_reg_rtx (mode);
5699
5700 convert_move (count, len, 1);
f81e845f 5701 emit_cmp_and_jump_insns (count, const0_rtx,
4fbc4db5 5702 EQ, NULL_RTX, mode, 1, end_label);
5703
5704 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5705 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5706 op0 = change_address (op0, VOIDmode, addr0);
5707 op1 = change_address (op1, VOIDmode, addr1);
f81e845f 5708
b9c74b4d 5709 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5710 OPTAB_DIRECT);
4fbc4db5 5711 if (temp != count)
5712 emit_move_insn (count, temp);
5713
b9c74b4d 5714 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5715 OPTAB_DIRECT);
4fbc4db5 5716 if (temp != blocks)
5717 emit_move_insn (blocks, temp);
5718
4ee9c684 5719 emit_cmp_and_jump_insns (blocks, const0_rtx,
5720 EQ, NULL_RTX, mode, 1, loop_end_label);
7746964e 5721
5722 emit_label (loop_start_label);
4fbc4db5 5723
d5de7805 5724 if (TARGET_Z10
5725 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5726 {
5727 rtx prefetch;
5728
5729 /* Issue a read prefetch for the +2 cache line of operand 1. */
5730 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5731 const0_rtx, const0_rtx);
5732 emit_insn (prefetch);
5733 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5734
5735 /* Issue a read prefetch for the +2 cache line of operand 2. */
5736 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5737 const0_rtx, const0_rtx);
5738 emit_insn (prefetch);
5739 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5740 }
5741
31838f66 5742 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
80b53886 5743 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
f81e845f 5744 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4fbc4db5 5745 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
d1f9b275 5746 temp = gen_rtx_SET (pc_rtx, temp);
4fbc4db5 5747 emit_jump_insn (temp);
5748
f81e845f 5749 s390_load_address (addr0,
4fbc4db5 5750 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
f81e845f 5751 s390_load_address (addr1,
4fbc4db5 5752 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
f81e845f 5753
b9c74b4d 5754 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5755 OPTAB_DIRECT);
4fbc4db5 5756 if (temp != blocks)
5757 emit_move_insn (blocks, temp);
5758
4ee9c684 5759 emit_cmp_and_jump_insns (blocks, const0_rtx,
5760 EQ, NULL_RTX, mode, 1, loop_end_label);
7746964e 5761
5762 emit_jump (loop_start_label);
4ee9c684 5763 emit_label (loop_end_label);
4fbc4db5 5764
f588eb9f 5765 emit_insn (gen_cmpmem_short (op0, op1,
31838f66 5766 convert_to_mode (Pmode, count, 1)));
4fbc4db5 5767 emit_label (end_label);
5768
dd16a4bd 5769 emit_insn (gen_cmpint (target, ccreg));
4fbc4db5 5770 }
daa87e5a 5771 return true;
4fbc4db5 5772}
5773
76a4c804 5774/* Emit a conditional jump to LABEL for condition code mask MASK using
5775 comparsion operator COMPARISON. Return the emitted jump insn. */
5776
26cd1198 5777static rtx_insn *
76a4c804 5778s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5779{
5780 rtx temp;
5781
5782 gcc_assert (comparison == EQ || comparison == NE);
5783 gcc_assert (mask > 0 && mask < 15);
5784
5785 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5786 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5787 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5788 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5789 temp = gen_rtx_SET (pc_rtx, temp);
5790 return emit_jump_insn (temp);
5791}
5792
5793/* Emit the instructions to implement strlen of STRING and store the
5794 result in TARGET. The string has the known ALIGNMENT. This
5795 version uses vector instructions and is therefore not appropriate
5796 for targets prior to z13. */
5797
5798void
5799s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5800{
76a4c804 5801 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5802 rtx str_reg = gen_reg_rtx (V16QImode);
5803 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5804 rtx str_idx_reg = gen_reg_rtx (Pmode);
5805 rtx result_reg = gen_reg_rtx (V16QImode);
5806 rtx is_aligned_label = gen_label_rtx ();
5807 rtx into_loop_label = NULL_RTX;
5808 rtx loop_start_label = gen_label_rtx ();
5809 rtx temp;
5810 rtx len = gen_reg_rtx (QImode);
5811 rtx cond;
5812
5813 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5814 emit_move_insn (str_idx_reg, const0_rtx);
5815
5816 if (INTVAL (alignment) < 16)
5817 {
5818 /* Check whether the address happens to be aligned properly so
5819 jump directly to the aligned loop. */
5820 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5821 str_addr_base_reg, GEN_INT (15)),
5822 const0_rtx, EQ, NULL_RTX,
5823 Pmode, 1, is_aligned_label);
5824
5825 temp = gen_reg_rtx (Pmode);
5826 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5827 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5828 gcc_assert (REG_P (temp));
5829 highest_index_to_load_reg =
5830 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5831 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5832 gcc_assert (REG_P (highest_index_to_load_reg));
5833 emit_insn (gen_vllv16qi (str_reg,
5834 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5835 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5836
5837 into_loop_label = gen_label_rtx ();
5838 s390_emit_jump (into_loop_label, NULL_RTX);
5839 emit_barrier ();
5840 }
5841
5842 emit_label (is_aligned_label);
5843 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5844
5845 /* Reaching this point we are only performing 16 bytes aligned
5846 loads. */
5847 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5848
5849 emit_label (loop_start_label);
5850 LABEL_NUSES (loop_start_label) = 1;
5851
5852 /* Load 16 bytes of the string into VR. */
5853 emit_move_insn (str_reg,
5854 gen_rtx_MEM (V16QImode,
5855 gen_rtx_PLUS (Pmode, str_idx_reg,
5856 str_addr_base_reg)));
5857 if (into_loop_label != NULL_RTX)
5858 {
5859 emit_label (into_loop_label);
5860 LABEL_NUSES (into_loop_label) = 1;
5861 }
5862
5863 /* Increment string index by 16 bytes. */
5864 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5865 str_idx_reg, 1, OPTAB_DIRECT);
5866
5867 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5868 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5869
5870 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
61cb1816 5871 REG_BR_PROB,
5872 profile_probability::very_likely ().to_reg_br_prob_note ());
447443f5 5873 emit_insn (gen_vec_extractv16qiqi (len, result_reg, GEN_INT (7)));
76a4c804 5874
5875 /* If the string pointer wasn't aligned we have loaded less then 16
5876 bytes and the remaining bytes got filled with zeros (by vll).
5877 Now we have to check whether the resulting index lies within the
5878 bytes actually part of the string. */
5879
5880 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5881 highest_index_to_load_reg);
5882 s390_load_address (highest_index_to_load_reg,
5883 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5884 const1_rtx));
5885 if (TARGET_64BIT)
5886 emit_insn (gen_movdicc (str_idx_reg, cond,
5887 highest_index_to_load_reg, str_idx_reg));
5888 else
5889 emit_insn (gen_movsicc (str_idx_reg, cond,
5890 highest_index_to_load_reg, str_idx_reg));
5891
61cb1816 5892 add_reg_br_prob_note (s390_emit_jump (is_aligned_label, cond),
5893 profile_probability::very_unlikely ());
76a4c804 5894
5895 expand_binop (Pmode, add_optab, str_idx_reg,
5896 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5897 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5898 here. */
5899 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5900 convert_to_mode (Pmode, len, 1),
5901 target, 1, OPTAB_DIRECT);
5902 if (temp != target)
5903 emit_move_insn (target, temp);
5904}
3b699fc7 5905
664ff6a0 5906void
5907s390_expand_vec_movstr (rtx result, rtx dst, rtx src)
5908{
664ff6a0 5909 rtx temp = gen_reg_rtx (Pmode);
5910 rtx src_addr = XEXP (src, 0);
5911 rtx dst_addr = XEXP (dst, 0);
5912 rtx src_addr_reg = gen_reg_rtx (Pmode);
5913 rtx dst_addr_reg = gen_reg_rtx (Pmode);
5914 rtx offset = gen_reg_rtx (Pmode);
5915 rtx vsrc = gen_reg_rtx (V16QImode);
5916 rtx vpos = gen_reg_rtx (V16QImode);
5917 rtx loadlen = gen_reg_rtx (SImode);
5918 rtx gpos_qi = gen_reg_rtx(QImode);
5919 rtx gpos = gen_reg_rtx (SImode);
5920 rtx done_label = gen_label_rtx ();
5921 rtx loop_label = gen_label_rtx ();
5922 rtx exit_label = gen_label_rtx ();
5923 rtx full_label = gen_label_rtx ();
5924
5925 /* Perform a quick check for string ending on the first up to 16
5926 bytes and exit early if successful. */
5927
5928 emit_insn (gen_vlbb (vsrc, src, GEN_INT (6)));
5929 emit_insn (gen_lcbb (loadlen, src_addr, GEN_INT (6)));
5930 emit_insn (gen_vfenezv16qi (vpos, vsrc, vsrc));
447443f5 5931 emit_insn (gen_vec_extractv16qiqi (gpos_qi, vpos, GEN_INT (7)));
664ff6a0 5932 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5933 /* gpos is the byte index if a zero was found and 16 otherwise.
5934 So if it is lower than the loaded bytes we have a hit. */
5935 emit_cmp_and_jump_insns (gpos, loadlen, GE, NULL_RTX, SImode, 1,
5936 full_label);
5937 emit_insn (gen_vstlv16qi (vsrc, gpos, dst));
5938
5939 force_expand_binop (Pmode, add_optab, dst_addr, gpos, result,
5940 1, OPTAB_DIRECT);
5941 emit_jump (exit_label);
5942 emit_barrier ();
5943
5944 emit_label (full_label);
5945 LABEL_NUSES (full_label) = 1;
5946
5947 /* Calculate `offset' so that src + offset points to the last byte
5948 before 16 byte alignment. */
5949
5950 /* temp = src_addr & 0xf */
5951 force_expand_binop (Pmode, and_optab, src_addr, GEN_INT (15), temp,
5952 1, OPTAB_DIRECT);
5953
5954 /* offset = 0xf - temp */
5955 emit_move_insn (offset, GEN_INT (15));
5956 force_expand_binop (Pmode, sub_optab, offset, temp, offset,
5957 1, OPTAB_DIRECT);
5958
5959 /* Store `offset' bytes in the dstination string. The quick check
5960 has loaded at least `offset' bytes into vsrc. */
5961
5962 emit_insn (gen_vstlv16qi (vsrc, gen_lowpart (SImode, offset), dst));
5963
5964 /* Advance to the next byte to be loaded. */
5965 force_expand_binop (Pmode, add_optab, offset, const1_rtx, offset,
5966 1, OPTAB_DIRECT);
5967
5968 /* Make sure the addresses are single regs which can be used as a
5969 base. */
5970 emit_move_insn (src_addr_reg, src_addr);
5971 emit_move_insn (dst_addr_reg, dst_addr);
5972
5973 /* MAIN LOOP */
5974
5975 emit_label (loop_label);
5976 LABEL_NUSES (loop_label) = 1;
5977
5978 emit_move_insn (vsrc,
5979 gen_rtx_MEM (V16QImode,
5980 gen_rtx_PLUS (Pmode, src_addr_reg, offset)));
5981
5982 emit_insn (gen_vec_vfenesv16qi (vpos, vsrc, vsrc,
5983 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5984 add_int_reg_note (s390_emit_ccraw_jump (8, EQ, done_label),
61cb1816 5985 REG_BR_PROB, profile_probability::very_unlikely ()
5986 .to_reg_br_prob_note ());
664ff6a0 5987
5988 emit_move_insn (gen_rtx_MEM (V16QImode,
5989 gen_rtx_PLUS (Pmode, dst_addr_reg, offset)),
5990 vsrc);
5991 /* offset += 16 */
5992 force_expand_binop (Pmode, add_optab, offset, GEN_INT (16),
5993 offset, 1, OPTAB_DIRECT);
5994
5995 emit_jump (loop_label);
5996 emit_barrier ();
5997
5998 /* REGULAR EXIT */
5999
6000 /* We are done. Add the offset of the zero character to the dst_addr
6001 pointer to get the result. */
6002
6003 emit_label (done_label);
6004 LABEL_NUSES (done_label) = 1;
6005
6006 force_expand_binop (Pmode, add_optab, dst_addr_reg, offset, dst_addr_reg,
6007 1, OPTAB_DIRECT);
6008
447443f5 6009 emit_insn (gen_vec_extractv16qiqi (gpos_qi, vpos, GEN_INT (7)));
664ff6a0 6010 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
6011
6012 emit_insn (gen_vstlv16qi (vsrc, gpos, gen_rtx_MEM (BLKmode, dst_addr_reg)));
6013
6014 force_expand_binop (Pmode, add_optab, dst_addr_reg, gpos, result,
6015 1, OPTAB_DIRECT);
6016
6017 /* EARLY EXIT */
6018
6019 emit_label (exit_label);
6020 LABEL_NUSES (exit_label) = 1;
6021}
6022
6023
3b699fc7 6024/* Expand conditional increment or decrement using alc/slb instructions.
6025 Should generate code setting DST to either SRC or SRC + INCREMENT,
6026 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
eeba5f25 6027 Returns true if successful, false otherwise.
6028
6029 That makes it possible to implement some if-constructs without jumps e.g.:
6030 (borrow = CC0 | CC1 and carry = CC2 | CC3)
6031 unsigned int a, b, c;
6032 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
6033 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
6034 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
6035 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
6036
6037 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
6038 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
6039 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
6040 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
6041 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
3b699fc7 6042
6043bool
6044s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
6045 rtx dst, rtx src, rtx increment)
6046{
3754d046 6047 machine_mode cmp_mode;
6048 machine_mode cc_mode;
3b699fc7 6049 rtx op_res;
6050 rtx insn;
6051 rtvec p;
32eda510 6052 int ret;
3b699fc7 6053
6054 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
6055 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
6056 cmp_mode = SImode;
6057 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
6058 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
6059 cmp_mode = DImode;
6060 else
6061 return false;
6062
6063 /* Try ADD LOGICAL WITH CARRY. */
6064 if (increment == const1_rtx)
6065 {
6066 /* Determine CC mode to use. */
6067 if (cmp_code == EQ || cmp_code == NE)
6068 {
6069 if (cmp_op1 != const0_rtx)
6070 {
6071 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6072 NULL_RTX, 0, OPTAB_WIDEN);
6073 cmp_op1 = const0_rtx;
6074 }
6075
6076 cmp_code = cmp_code == EQ ? LEU : GTU;
6077 }
6078
6079 if (cmp_code == LTU || cmp_code == LEU)
6080 {
6081 rtx tem = cmp_op0;
6082 cmp_op0 = cmp_op1;
6083 cmp_op1 = tem;
6084 cmp_code = swap_condition (cmp_code);
6085 }
6086
6087 switch (cmp_code)
6088 {
6089 case GTU:
6090 cc_mode = CCUmode;
6091 break;
6092
6093 case GEU:
6094 cc_mode = CCL3mode;
6095 break;
6096
6097 default:
6098 return false;
6099 }
6100
6101 /* Emit comparison instruction pattern. */
6102 if (!register_operand (cmp_op0, cmp_mode))
6103 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6104
d1f9b275 6105 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
3b699fc7 6106 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6107 /* We use insn_invalid_p here to add clobbers if required. */
dae9d0e7 6108 ret = insn_invalid_p (emit_insn (insn), false);
32eda510 6109 gcc_assert (!ret);
3b699fc7 6110
6111 /* Emit ALC instruction pattern. */
6112 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6113 gen_rtx_REG (cc_mode, CC_REGNUM),
6114 const0_rtx);
6115
6116 if (src != const0_rtx)
6117 {
6118 if (!register_operand (src, GET_MODE (dst)))
6119 src = force_reg (GET_MODE (dst), src);
6120
6f4afa7e 6121 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
6122 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
3b699fc7 6123 }
6124
6125 p = rtvec_alloc (2);
ffead1ca 6126 RTVEC_ELT (p, 0) =
d1f9b275 6127 gen_rtx_SET (dst, op_res);
ffead1ca 6128 RTVEC_ELT (p, 1) =
3b699fc7 6129 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6130 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6131
6132 return true;
6133 }
6134
6135 /* Try SUBTRACT LOGICAL WITH BORROW. */
6136 if (increment == constm1_rtx)
6137 {
6138 /* Determine CC mode to use. */
6139 if (cmp_code == EQ || cmp_code == NE)
6140 {
6141 if (cmp_op1 != const0_rtx)
6142 {
6143 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6144 NULL_RTX, 0, OPTAB_WIDEN);
6145 cmp_op1 = const0_rtx;
6146 }
6147
6148 cmp_code = cmp_code == EQ ? LEU : GTU;
6149 }
6150
6151 if (cmp_code == GTU || cmp_code == GEU)
6152 {
6153 rtx tem = cmp_op0;
6154 cmp_op0 = cmp_op1;
6155 cmp_op1 = tem;
6156 cmp_code = swap_condition (cmp_code);
6157 }
6158
6159 switch (cmp_code)
6160 {
6161 case LEU:
6162 cc_mode = CCUmode;
6163 break;
6164
6165 case LTU:
6166 cc_mode = CCL3mode;
6167 break;
6168
6169 default:
6170 return false;
6171 }
6172
6173 /* Emit comparison instruction pattern. */
6174 if (!register_operand (cmp_op0, cmp_mode))
6175 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6176
d1f9b275 6177 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
3b699fc7 6178 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6179 /* We use insn_invalid_p here to add clobbers if required. */
dae9d0e7 6180 ret = insn_invalid_p (emit_insn (insn), false);
32eda510 6181 gcc_assert (!ret);
3b699fc7 6182
6183 /* Emit SLB instruction pattern. */
6184 if (!register_operand (src, GET_MODE (dst)))
6185 src = force_reg (GET_MODE (dst), src);
6186
ffead1ca 6187 op_res = gen_rtx_MINUS (GET_MODE (dst),
6188 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
6189 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6190 gen_rtx_REG (cc_mode, CC_REGNUM),
3b699fc7 6191 const0_rtx));
6192 p = rtvec_alloc (2);
ffead1ca 6193 RTVEC_ELT (p, 0) =
d1f9b275 6194 gen_rtx_SET (dst, op_res);
ffead1ca 6195 RTVEC_ELT (p, 1) =
3b699fc7 6196 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6197 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6198
6199 return true;
6200 }
6201
6202 return false;
6203}
6204
e68d6a13 6205/* Expand code for the insv template. Return true if successful. */
0349cc73 6206
e68d6a13 6207bool
0349cc73 6208s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
6209{
6210 int bitsize = INTVAL (op1);
6211 int bitpos = INTVAL (op2);
3754d046 6212 machine_mode mode = GET_MODE (dest);
6213 machine_mode smode;
8c753480 6214 int smode_bsize, mode_bsize;
6215 rtx op, clobber;
0349cc73 6216
0bc377b9 6217 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
31efd1ec 6218 return false;
6219
8c753480 6220 /* Generate INSERT IMMEDIATE (IILL et al). */
6221 /* (set (ze (reg)) (const_int)). */
6222 if (TARGET_ZARCH
6223 && register_operand (dest, word_mode)
6224 && (bitpos % 16) == 0
6225 && (bitsize % 16) == 0
6226 && const_int_operand (src, VOIDmode))
e68d6a13 6227 {
8c753480 6228 HOST_WIDE_INT val = INTVAL (src);
6229 int regpos = bitpos + bitsize;
e68d6a13 6230
8c753480 6231 while (regpos > bitpos)
6232 {
3754d046 6233 machine_mode putmode;
8c753480 6234 int putsize;
6235
6236 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
6237 putmode = SImode;
6238 else
6239 putmode = HImode;
e68d6a13 6240
8c753480 6241 putsize = GET_MODE_BITSIZE (putmode);
6242 regpos -= putsize;
6243 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6244 GEN_INT (putsize),
6245 GEN_INT (regpos)),
6246 gen_int_mode (val, putmode));
6247 val >>= putsize;
6248 }
6249 gcc_assert (regpos == bitpos);
e68d6a13 6250 return true;
6251 }
6252
1a5d4b27 6253 smode = smallest_int_mode_for_size (bitsize);
8c753480 6254 smode_bsize = GET_MODE_BITSIZE (smode);
6255 mode_bsize = GET_MODE_BITSIZE (mode);
0349cc73 6256
8c753480 6257 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
0349cc73 6258 if (bitpos == 0
8c753480 6259 && (bitsize % BITS_PER_UNIT) == 0
6260 && MEM_P (dest)
0349cc73 6261 && (register_operand (src, word_mode)
6262 || const_int_operand (src, VOIDmode)))
6263 {
6264 /* Emit standard pattern if possible. */
8c753480 6265 if (smode_bsize == bitsize)
6266 {
6267 emit_move_insn (adjust_address (dest, smode, 0),
6268 gen_lowpart (smode, src));
6269 return true;
6270 }
0349cc73 6271
6272 /* (set (ze (mem)) (const_int)). */
6273 else if (const_int_operand (src, VOIDmode))
6274 {
6275 int size = bitsize / BITS_PER_UNIT;
8c753480 6276 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
6277 BLKmode,
6278 UNITS_PER_WORD - size);
0349cc73 6279
6280 dest = adjust_address (dest, BLKmode, 0);
5b2a69fa 6281 set_mem_size (dest, size);
0349cc73 6282 s390_expand_movmem (dest, src_mem, GEN_INT (size));
8c753480 6283 return true;
0349cc73 6284 }
ffead1ca 6285
0349cc73 6286 /* (set (ze (mem)) (reg)). */
6287 else if (register_operand (src, word_mode))
6288 {
8c753480 6289 if (bitsize <= 32)
0349cc73 6290 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
6291 const0_rtx), src);
6292 else
6293 {
6294 /* Emit st,stcmh sequence. */
8c753480 6295 int stcmh_width = bitsize - 32;
0349cc73 6296 int size = stcmh_width / BITS_PER_UNIT;
6297
ffead1ca 6298 emit_move_insn (adjust_address (dest, SImode, size),
0349cc73 6299 gen_lowpart (SImode, src));
5b2a69fa 6300 set_mem_size (dest, size);
8c753480 6301 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6302 GEN_INT (stcmh_width),
6303 const0_rtx),
6304 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
0349cc73 6305 }
8c753480 6306 return true;
0349cc73 6307 }
8c753480 6308 }
0349cc73 6309
8c753480 6310 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
6311 if ((bitpos % BITS_PER_UNIT) == 0
6312 && (bitsize % BITS_PER_UNIT) == 0
6313 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
6314 && MEM_P (src)
6315 && (mode == DImode || mode == SImode)
6316 && register_operand (dest, mode))
6317 {
6318 /* Emit a strict_low_part pattern if possible. */
6319 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
6320 {
6321 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
d1f9b275 6322 op = gen_rtx_SET (op, gen_lowpart (smode, src));
8c753480 6323 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6324 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
6325 return true;
6326 }
6327
6328 /* ??? There are more powerful versions of ICM that are not
6329 completely represented in the md file. */
0349cc73 6330 }
6331
8c753480 6332 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
6333 if (TARGET_Z10 && (mode == DImode || mode == SImode))
0349cc73 6334 {
3754d046 6335 machine_mode mode_s = GET_MODE (src);
0349cc73 6336
678c417b 6337 if (CONSTANT_P (src))
0349cc73 6338 {
02a8efd2 6339 /* For constant zero values the representation with AND
6340 appears to be folded in more situations than the (set
6341 (zero_extract) ...).
6342 We only do this when the start and end of the bitfield
6343 remain in the same SImode chunk. That way nihf or nilf
6344 can be used.
6345 The AND patterns might still generate a risbg for this. */
6346 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
6347 return false;
6348 else
6349 src = force_reg (mode, src);
8c753480 6350 }
6351 else if (mode_s != mode)
6352 {
6353 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
6354 src = force_reg (mode_s, src);
6355 src = gen_lowpart (mode, src);
6356 }
0349cc73 6357
99274008 6358 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
d1f9b275 6359 op = gen_rtx_SET (op, src);
81769881 6360
6361 if (!TARGET_ZEC12)
6362 {
6363 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6364 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
6365 }
6366 emit_insn (op);
0349cc73 6367
0349cc73 6368 return true;
6369 }
6370
6371 return false;
6372}
3b699fc7 6373
7cc66daf 6374/* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
6375 register that holds VAL of mode MODE shifted by COUNT bits. */
182f815e 6376
6377static inline rtx
3754d046 6378s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
182f815e 6379{
6380 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
6381 NULL_RTX, 1, OPTAB_DIRECT);
ffead1ca 6382 return expand_simple_binop (SImode, ASHIFT, val, count,
182f815e 6383 NULL_RTX, 1, OPTAB_DIRECT);
6384}
6385
76a4c804 6386/* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
6387 the result in TARGET. */
6388
6389void
6390s390_expand_vec_compare (rtx target, enum rtx_code cond,
6391 rtx cmp_op1, rtx cmp_op2)
6392{
6393 machine_mode mode = GET_MODE (target);
6394 bool neg_p = false, swap_p = false;
6395 rtx tmp;
6396
80912819 6397 if (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_VECTOR_FLOAT)
76a4c804 6398 {
6399 switch (cond)
6400 {
6401 /* NE a != b -> !(a == b) */
6402 case NE: cond = EQ; neg_p = true; break;
6403 /* UNGT a u> b -> !(b >= a) */
6404 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
6405 /* UNGE a u>= b -> !(b > a) */
6406 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
6407 /* LE: a <= b -> b >= a */
6408 case LE: cond = GE; swap_p = true; break;
6409 /* UNLE: a u<= b -> !(a > b) */
6410 case UNLE: cond = GT; neg_p = true; break;
6411 /* LT: a < b -> b > a */
6412 case LT: cond = GT; swap_p = true; break;
6413 /* UNLT: a u< b -> !(a >= b) */
6414 case UNLT: cond = GE; neg_p = true; break;
6415 case UNEQ:
4772a699 6416 emit_insn (gen_vec_cmpuneq (target, cmp_op1, cmp_op2));
76a4c804 6417 return;
6418 case LTGT:
4772a699 6419 emit_insn (gen_vec_cmpltgt (target, cmp_op1, cmp_op2));
76a4c804 6420 return;
6421 case ORDERED:
4772a699 6422 emit_insn (gen_vec_ordered (target, cmp_op1, cmp_op2));
76a4c804 6423 return;
6424 case UNORDERED:
4772a699 6425 emit_insn (gen_vec_unordered (target, cmp_op1, cmp_op2));
76a4c804 6426 return;
6427 default: break;
6428 }
6429 }
6430 else
6431 {
6432 switch (cond)
6433 {
6434 /* NE: a != b -> !(a == b) */
6435 case NE: cond = EQ; neg_p = true; break;
6436 /* GE: a >= b -> !(b > a) */
6437 case GE: cond = GT; neg_p = true; swap_p = true; break;
6438 /* GEU: a >= b -> !(b > a) */
6439 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6440 /* LE: a <= b -> !(a > b) */
6441 case LE: cond = GT; neg_p = true; break;
6442 /* LEU: a <= b -> !(a > b) */
6443 case LEU: cond = GTU; neg_p = true; break;
6444 /* LT: a < b -> b > a */
6445 case LT: cond = GT; swap_p = true; break;
6446 /* LTU: a < b -> b > a */
6447 case LTU: cond = GTU; swap_p = true; break;
6448 default: break;
6449 }
6450 }
6451
6452 if (swap_p)
6453 {
6454 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6455 }
6456
6457 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6458 mode,
6459 cmp_op1, cmp_op2)));
6460 if (neg_p)
6461 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6462}
6463
07f32359 6464/* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6465 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
abc57c35 6466 elements in CMP1 and CMP2 fulfill the comparison.
6467 This function is only used to emit patterns for the vx builtins and
6468 therefore only handles comparison codes required by the
6469 builtins. */
07f32359 6470void
6471s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6472 rtx cmp1, rtx cmp2, bool all_p)
6473{
abc57c35 6474 machine_mode cc_producer_mode, cc_consumer_mode, scratch_mode;
07f32359 6475 rtx tmp_reg = gen_reg_rtx (SImode);
6476 bool swap_p = false;
6477
6478 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6479 {
6480 switch (code)
6481 {
abc57c35 6482 case EQ:
6483 case NE:
6484 cc_producer_mode = CCVEQmode;
6485 break;
6486 case GE:
6487 case LT:
6488 code = swap_condition (code);
6489 swap_p = true;
6490 /* fallthrough */
6491 case GT:
6492 case LE:
6493 cc_producer_mode = CCVIHmode;
6494 break;
6495 case GEU:
6496 case LTU:
6497 code = swap_condition (code);
6498 swap_p = true;
6499 /* fallthrough */
6500 case GTU:
6501 case LEU:
6502 cc_producer_mode = CCVIHUmode;
6503 break;
6504 default:
6505 gcc_unreachable ();
07f32359 6506 }
abc57c35 6507
07f32359 6508 scratch_mode = GET_MODE (cmp1);
abc57c35 6509 /* These codes represent inverted CC interpretations. Inverting
6510 an ALL CC mode results in an ANY CC mode and the other way
6511 around. Invert the all_p flag here to compensate for
6512 that. */
6513 if (code == NE || code == LE || code == LEU)
6514 all_p = !all_p;
6515
6516 cc_consumer_mode = all_p ? CCVIALLmode : CCVIANYmode;
07f32359 6517 }
abc57c35 6518 else if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_FLOAT)
07f32359 6519 {
abc57c35 6520 bool inv_p = false;
6521
07f32359 6522 switch (code)
6523 {
abc57c35 6524 case EQ: cc_producer_mode = CCVEQmode; break;
6525 case NE: cc_producer_mode = CCVEQmode; inv_p = true; break;
6526 case GT: cc_producer_mode = CCVFHmode; break;
6527 case GE: cc_producer_mode = CCVFHEmode; break;
6528 case UNLE: cc_producer_mode = CCVFHmode; inv_p = true; break;
6529 case UNLT: cc_producer_mode = CCVFHEmode; inv_p = true; break;
6530 case LT: cc_producer_mode = CCVFHmode; code = GT; swap_p = true; break;
6531 case LE: cc_producer_mode = CCVFHEmode; code = GE; swap_p = true; break;
07f32359 6532 default: gcc_unreachable ();
6533 }
12bdf7c0 6534 scratch_mode = mode_for_int_vector (GET_MODE (cmp1)).require ();
abc57c35 6535
6536 if (inv_p)
6537 all_p = !all_p;
6538
6539 cc_consumer_mode = all_p ? CCVFALLmode : CCVFANYmode;
07f32359 6540 }
6541 else
6542 gcc_unreachable ();
6543
07f32359 6544 if (swap_p)
6545 {
6546 rtx tmp = cmp2;
6547 cmp2 = cmp1;
6548 cmp1 = tmp;
6549 }
6550
6551 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6552 gen_rtvec (2, gen_rtx_SET (
abc57c35 6553 gen_rtx_REG (cc_producer_mode, CC_REGNUM),
6554 gen_rtx_COMPARE (cc_producer_mode, cmp1, cmp2)),
07f32359 6555 gen_rtx_CLOBBER (VOIDmode,
6556 gen_rtx_SCRATCH (scratch_mode)))));
6557 emit_move_insn (target, const0_rtx);
6558 emit_move_insn (tmp_reg, const1_rtx);
6559
6560 emit_move_insn (target,
6561 gen_rtx_IF_THEN_ELSE (SImode,
abc57c35 6562 gen_rtx_fmt_ee (code, VOIDmode,
6563 gen_rtx_REG (cc_consumer_mode, CC_REGNUM),
07f32359 6564 const0_rtx),
abc57c35 6565 tmp_reg, target));
07f32359 6566}
6567
e17ed6ec 6568/* Invert the comparison CODE applied to a CC mode. This is only safe
6569 if we know whether there result was created by a floating point
6570 compare or not. For the CCV modes this is encoded as part of the
6571 mode. */
6572enum rtx_code
6573s390_reverse_condition (machine_mode mode, enum rtx_code code)
6574{
6575 /* Reversal of FP compares takes care -- an ordered compare
6576 becomes an unordered compare and vice versa. */
6577 if (mode == CCVFALLmode || mode == CCVFANYmode)
6578 return reverse_condition_maybe_unordered (code);
6579 else if (mode == CCVIALLmode || mode == CCVIANYmode)
6580 return reverse_condition (code);
6581 else
6582 gcc_unreachable ();
6583}
6584
76a4c804 6585/* Generate a vector comparison expression loading either elements of
6586 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6587 and CMP_OP2. */
6588
6589void
6590s390_expand_vcond (rtx target, rtx then, rtx els,
6591 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6592{
6593 rtx tmp;
6594 machine_mode result_mode;
6595 rtx result_target;
6596
651e0407 6597 machine_mode target_mode = GET_MODE (target);
6598 machine_mode cmp_mode = GET_MODE (cmp_op1);
6599 rtx op = (cond == LT) ? els : then;
6600
6601 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6602 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6603 for short and byte (x >> 15 and x >> 7 respectively). */
6604 if ((cond == LT || cond == GE)
6605 && target_mode == cmp_mode
6606 && cmp_op2 == CONST0_RTX (cmp_mode)
6607 && op == CONST0_RTX (target_mode)
6608 && s390_vector_mode_supported_p (target_mode)
6609 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6610 {
6611 rtx negop = (cond == LT) ? then : els;
6612
6613 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6614
6615 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6616 if (negop == CONST1_RTX (target_mode))
6617 {
6618 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6619 GEN_INT (shift), target,
6620 1, OPTAB_DIRECT);
6621 if (res != target)
6622 emit_move_insn (target, res);
6623 return;
6624 }
6625
6626 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
a991c8aa 6627 else if (all_ones_operand (negop, target_mode))
651e0407 6628 {
6629 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6630 GEN_INT (shift), target,
6631 0, OPTAB_DIRECT);
6632 if (res != target)
6633 emit_move_insn (target, res);
6634 return;
6635 }
6636 }
6637
76a4c804 6638 /* We always use an integral type vector to hold the comparison
6639 result. */
12bdf7c0 6640 result_mode = mode_for_int_vector (cmp_mode).require ();
76a4c804 6641 result_target = gen_reg_rtx (result_mode);
6642
651e0407 6643 /* We allow vector immediates as comparison operands that
6644 can be handled by the optimization above but not by the
6645 following code. Hence, force them into registers here. */
76a4c804 6646 if (!REG_P (cmp_op1))
b088ff4b 6647 cmp_op1 = force_reg (GET_MODE (cmp_op1), cmp_op1);
76a4c804 6648
6649 if (!REG_P (cmp_op2))
b088ff4b 6650 cmp_op2 = force_reg (GET_MODE (cmp_op2), cmp_op2);
76a4c804 6651
6652 s390_expand_vec_compare (result_target, cond,
6653 cmp_op1, cmp_op2);
6654
6655 /* If the results are supposed to be either -1 or 0 we are done
6656 since this is what our compare instructions generate anyway. */
a991c8aa 6657 if (all_ones_operand (then, GET_MODE (then))
76a4c804 6658 && const0_operand (els, GET_MODE (els)))
6659 {
651e0407 6660 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
76a4c804 6661 result_target, 0));
6662 return;
6663 }
6664
6665 /* Otherwise we will do a vsel afterwards. */
6666 /* This gets triggered e.g.
6667 with gcc.c-torture/compile/pr53410-1.c */
6668 if (!REG_P (then))
651e0407 6669 then = force_reg (target_mode, then);
76a4c804 6670
6671 if (!REG_P (els))
651e0407 6672 els = force_reg (target_mode, els);
76a4c804 6673
6674 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6675 result_target,
6676 CONST0_RTX (result_mode));
6677
6678 /* We compared the result against zero above so we have to swap then
6679 and els here. */
651e0407 6680 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
76a4c804 6681
651e0407 6682 gcc_assert (target_mode == GET_MODE (then));
76a4c804 6683 emit_insn (gen_rtx_SET (target, tmp));
6684}
6685
6686/* Emit the RTX necessary to initialize the vector TARGET with values
6687 in VALS. */
6688void
6689s390_expand_vec_init (rtx target, rtx vals)
6690{
6691 machine_mode mode = GET_MODE (target);
6692 machine_mode inner_mode = GET_MODE_INNER (mode);
6693 int n_elts = GET_MODE_NUNITS (mode);
6694 bool all_same = true, all_regs = true, all_const_int = true;
6695 rtx x;
6696 int i;
6697
6698 for (i = 0; i < n_elts; ++i)
6699 {
6700 x = XVECEXP (vals, 0, i);
6701
6702 if (!CONST_INT_P (x))
6703 all_const_int = false;
6704
6705 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6706 all_same = false;
6707
6708 if (!REG_P (x))
6709 all_regs = false;
6710 }
6711
6712 /* Use vector gen mask or vector gen byte mask if possible. */
6713 if (all_same && all_const_int
6714 && (XVECEXP (vals, 0, 0) == const0_rtx
6715 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6716 NULL, NULL)
6717 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6718 {
6719 emit_insn (gen_rtx_SET (target,
6720 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6721 return;
6722 }
6723
6724 if (all_same)
6725 {
6726 emit_insn (gen_rtx_SET (target,
6727 gen_rtx_VEC_DUPLICATE (mode,
6728 XVECEXP (vals, 0, 0))));
6729 return;
6730 }
6731
f413810a 6732 if (all_regs
6733 && REG_P (target)
6734 && n_elts == 2
6735 && GET_MODE_SIZE (inner_mode) == 8)
76a4c804 6736 {
6737 /* Use vector load pair. */
6738 emit_insn (gen_rtx_SET (target,
6739 gen_rtx_VEC_CONCAT (mode,
6740 XVECEXP (vals, 0, 0),
6741 XVECEXP (vals, 0, 1))));
6742 return;
6743 }
bd97b7d0 6744
6745 /* Use vector load logical element and zero. */
6746 if (TARGET_VXE && (mode == V4SImode || mode == V4SFmode))
6747 {
6748 bool found = true;
6749
6750 x = XVECEXP (vals, 0, 0);
6751 if (memory_operand (x, inner_mode))
6752 {
6753 for (i = 1; i < n_elts; ++i)
6754 found = found && XVECEXP (vals, 0, i) == const0_rtx;
6755
6756 if (found)
6757 {
6758 machine_mode half_mode = (inner_mode == SFmode
6759 ? V2SFmode : V2SImode);
6760 emit_insn (gen_rtx_SET (target,
6761 gen_rtx_VEC_CONCAT (mode,
6762 gen_rtx_VEC_CONCAT (half_mode,
6763 x,
6764 const0_rtx),
6765 gen_rtx_VEC_CONCAT (half_mode,
6766 const0_rtx,
6767 const0_rtx))));
6768 return;
6769 }
6770 }
6771 }
76a4c804 6772
6773 /* We are about to set the vector elements one by one. Zero out the
6774 full register first in order to help the data flow framework to
6775 detect it as full VR set. */
6776 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6777
6778 /* Unfortunately the vec_init expander is not allowed to fail. So
6779 we have to implement the fallback ourselves. */
6780 for (i = 0; i < n_elts; i++)
4ba2579e 6781 {
6782 rtx elem = XVECEXP (vals, 0, i);
6783 if (!general_operand (elem, GET_MODE (elem)))
6784 elem = force_reg (inner_mode, elem);
6785
6786 emit_insn (gen_rtx_SET (target,
6787 gen_rtx_UNSPEC (mode,
6788 gen_rtvec (3, elem,
6789 GEN_INT (i), target),
6790 UNSPEC_VEC_SET)));
6791 }
76a4c804 6792}
6793
182f815e 6794/* Structure to hold the initial parameters for a compare_and_swap operation
ffead1ca 6795 in HImode and QImode. */
182f815e 6796
6797struct alignment_context
6798{
ffead1ca 6799 rtx memsi; /* SI aligned memory location. */
182f815e 6800 rtx shift; /* Bit offset with regard to lsb. */
6801 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6802 rtx modemaski; /* ~modemask */
191ec5a2 6803 bool aligned; /* True if memory is aligned, false else. */
182f815e 6804};
6805
7cc66daf 6806/* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6807 structure AC for transparent simplifying, if the memory alignment is known
6808 to be at least 32bit. MEM is the memory location for the actual operation
6809 and MODE its mode. */
182f815e 6810
6811static void
6812init_alignment_context (struct alignment_context *ac, rtx mem,
3754d046 6813 machine_mode mode)
182f815e 6814{
6815 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6816 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6817
6818 if (ac->aligned)
6819 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6820 else
6821 {
6822 /* Alignment is unknown. */
6823 rtx byteoffset, addr, align;
6824
6825 /* Force the address into a register. */
6826 addr = force_reg (Pmode, XEXP (mem, 0));
6827
6828 /* Align it to SImode. */
6829 align = expand_simple_binop (Pmode, AND, addr,
6830 GEN_INT (-GET_MODE_SIZE (SImode)),
6831 NULL_RTX, 1, OPTAB_DIRECT);
6832 /* Generate MEM. */
6833 ac->memsi = gen_rtx_MEM (SImode, align);
6834 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
bd1da572 6835 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
182f815e 6836 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6837
6838 /* Calculate shiftcount. */
6839 byteoffset = expand_simple_binop (Pmode, AND, addr,
6840 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6841 NULL_RTX, 1, OPTAB_DIRECT);
6842 /* As we already have some offset, evaluate the remaining distance. */
6843 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6844 NULL_RTX, 1, OPTAB_DIRECT);
182f815e 6845 }
8c753480 6846
182f815e 6847 /* Shift is the byte count, but we need the bitcount. */
8c753480 6848 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6849 NULL_RTX, 1, OPTAB_DIRECT);
6850
182f815e 6851 /* Calculate masks. */
ffead1ca 6852 ac->modemask = expand_simple_binop (SImode, ASHIFT,
8c753480 6853 GEN_INT (GET_MODE_MASK (mode)),
6854 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6855 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6856 NULL_RTX, 1);
6857}
6858
6859/* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6860 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6861 perform the merge in SEQ2. */
6862
6863static rtx
6864s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
3754d046 6865 machine_mode mode, rtx val, rtx ins)
8c753480 6866{
6867 rtx tmp;
6868
6869 if (ac->aligned)
6870 {
6871 start_sequence ();
6872 tmp = copy_to_mode_reg (SImode, val);
6873 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6874 const0_rtx, ins))
6875 {
6876 *seq1 = NULL;
6877 *seq2 = get_insns ();
6878 end_sequence ();
6879 return tmp;
6880 }
6881 end_sequence ();
6882 }
6883
6884 /* Failed to use insv. Generate a two part shift and mask. */
6885 start_sequence ();
6886 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6887 *seq1 = get_insns ();
6888 end_sequence ();
6889
6890 start_sequence ();
6891 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6892 *seq2 = get_insns ();
6893 end_sequence ();
6894
6895 return tmp;
182f815e 6896}
6897
6898/* Expand an atomic compare and swap operation for HImode and QImode. MEM is
8c753480 6899 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6900 value to set if CMP == MEM. */
182f815e 6901
d90d26d8 6902static void
3754d046 6903s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
8c753480 6904 rtx cmp, rtx new_rtx, bool is_weak)
182f815e 6905{
6906 struct alignment_context ac;
77e58889 6907 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
182f815e 6908 rtx res = gen_reg_rtx (SImode);
79f6a8ed 6909 rtx_code_label *csloop = NULL, *csend = NULL;
182f815e 6910
182f815e 6911 gcc_assert (MEM_P (mem));
6912
6913 init_alignment_context (&ac, mem, mode);
6914
182f815e 6915 /* Load full word. Subsequent loads are performed by CS. */
6916 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6917 NULL_RTX, 1, OPTAB_DIRECT);
6918
8c753480 6919 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6920 possible, we try to use insv to make this happen efficiently. If
6921 that fails we'll generate code both inside and outside the loop. */
6922 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6923 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6924
6925 if (seq0)
6926 emit_insn (seq0);
6927 if (seq1)
6928 emit_insn (seq1);
6929
182f815e 6930 /* Start CS loop. */
8c753480 6931 if (!is_weak)
6932 {
6933 /* Begin assuming success. */
6934 emit_move_insn (btarget, const1_rtx);
6935
6936 csloop = gen_label_rtx ();
6937 csend = gen_label_rtx ();
6938 emit_label (csloop);
6939 }
6940
ffead1ca 6941 /* val = "<mem>00..0<mem>"
182f815e 6942 * cmp = "00..0<cmp>00..0"
ffead1ca 6943 * new = "00..0<new>00..0"
182f815e 6944 */
6945
8c753480 6946 emit_insn (seq2);
6947 emit_insn (seq3);
6948
d90d26d8 6949 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv, CCZ1mode);
8c753480 6950 if (is_weak)
6951 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
182f815e 6952 else
182f815e 6953 {
77e58889 6954 rtx tmp;
6955
8c753480 6956 /* Jump to end if we're done (likely?). */
6957 s390_emit_jump (csend, cc);
6958
77e58889 6959 /* Check for changes outside mode, and loop internal if so.
6960 Arrange the moves so that the compare is adjacent to the
6961 branch so that we can generate CRJ. */
6962 tmp = copy_to_reg (val);
6963 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6964 1, OPTAB_DIRECT);
6965 cc = s390_emit_compare (NE, val, tmp);
8c753480 6966 s390_emit_jump (csloop, cc);
6967
6968 /* Failed. */
6969 emit_move_insn (btarget, const0_rtx);
6970 emit_label (csend);
182f815e 6971 }
ffead1ca 6972
182f815e 6973 /* Return the correct part of the bitfield. */
8c753480 6974 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6975 NULL_RTX, 1, OPTAB_DIRECT), 1);
182f815e 6976}
6977
d90d26d8 6978/* Variant of s390_expand_cs for SI, DI and TI modes. */
6979static void
6980s390_expand_cs_tdsi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6981 rtx cmp, rtx new_rtx, bool is_weak)
6982{
6983 rtx output = vtarget;
6984 rtx_code_label *skip_cs_label = NULL;
6985 bool do_const_opt = false;
6986
6987 if (!register_operand (output, mode))
6988 output = gen_reg_rtx (mode);
6989
6990 /* If IS_WEAK is true and the INPUT value is a constant, compare the memory
6991 with the constant first and skip the compare_and_swap because its very
6992 expensive and likely to fail anyway.
6993 Note 1: This is done only for IS_WEAK. C11 allows optimizations that may
6994 cause spurious in that case.
6995 Note 2: It may be useful to do this also for non-constant INPUT.
6996 Note 3: Currently only targets with "load on condition" are supported
6997 (z196 and newer). */
6998
6999 if (TARGET_Z196
7000 && (mode == SImode || mode == DImode))
7001 do_const_opt = (is_weak && CONST_INT_P (cmp));
7002
7003 if (do_const_opt)
7004 {
d90d26d8 7005 rtx cc = gen_rtx_REG (CCZmode, CC_REGNUM);
7006
7007 skip_cs_label = gen_label_rtx ();
7008 emit_move_insn (btarget, const0_rtx);
7009 if (CONST_INT_P (cmp) && INTVAL (cmp) == 0)
7010 {
7011 rtvec lt = rtvec_alloc (2);
7012
7013 /* Load-and-test + conditional jump. */
7014 RTVEC_ELT (lt, 0)
7015 = gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, mem, cmp));
7016 RTVEC_ELT (lt, 1) = gen_rtx_SET (output, mem);
7017 emit_insn (gen_rtx_PARALLEL (VOIDmode, lt));
7018 }
7019 else
7020 {
7021 emit_move_insn (output, mem);
7022 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, output, cmp)));
7023 }
7024 s390_emit_jump (skip_cs_label, gen_rtx_NE (VOIDmode, cc, const0_rtx));
61cb1816 7025 add_reg_br_prob_note (get_last_insn (),
7026 profile_probability::very_unlikely ());
d90d26d8 7027 /* If the jump is not taken, OUTPUT is the expected value. */
7028 cmp = output;
7029 /* Reload newval to a register manually, *after* the compare and jump
7030 above. Otherwise Reload might place it before the jump. */
7031 }
7032 else
7033 cmp = force_reg (mode, cmp);
7034 new_rtx = force_reg (mode, new_rtx);
7035 s390_emit_compare_and_swap (EQ, output, mem, cmp, new_rtx,
7036 (do_const_opt) ? CCZmode : CCZ1mode);
7037 if (skip_cs_label != NULL)
7038 emit_label (skip_cs_label);
7039
7040 /* We deliberately accept non-register operands in the predicate
7041 to ensure the write back to the output operand happens *before*
7042 the store-flags code below. This makes it easier for combine
7043 to merge the store-flags code with a potential test-and-branch
7044 pattern following (immediately!) afterwards. */
7045 if (output != vtarget)
7046 emit_move_insn (vtarget, output);
7047
7048 if (do_const_opt)
7049 {
7050 rtx cc, cond, ite;
7051
7052 /* Do not use gen_cstorecc4 here because it writes either 1 or 0, but
7053 btarget has already been initialized with 0 above. */
7054 cc = gen_rtx_REG (CCZmode, CC_REGNUM);
7055 cond = gen_rtx_EQ (VOIDmode, cc, const0_rtx);
7056 ite = gen_rtx_IF_THEN_ELSE (SImode, cond, const1_rtx, btarget);
7057 emit_insn (gen_rtx_SET (btarget, ite));
7058 }
7059 else
7060 {
7061 rtx cc, cond;
7062
7063 cc = gen_rtx_REG (CCZ1mode, CC_REGNUM);
7064 cond = gen_rtx_EQ (SImode, cc, const0_rtx);
7065 emit_insn (gen_cstorecc4 (btarget, cond, cc, const0_rtx));
7066 }
7067}
7068
7069/* Expand an atomic compare and swap operation. MEM is the memory location,
7070 CMP the old value to compare MEM with and NEW_RTX the value to set if
7071 CMP == MEM. */
7072
7073void
7074s390_expand_cs (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
7075 rtx cmp, rtx new_rtx, bool is_weak)
7076{
7077 switch (mode)
7078 {
916ace94 7079 case E_TImode:
7080 case E_DImode:
7081 case E_SImode:
d90d26d8 7082 s390_expand_cs_tdsi (mode, btarget, vtarget, mem, cmp, new_rtx, is_weak);
7083 break;
916ace94 7084 case E_HImode:
7085 case E_QImode:
d90d26d8 7086 s390_expand_cs_hqi (mode, btarget, vtarget, mem, cmp, new_rtx, is_weak);
7087 break;
7088 default:
7089 gcc_unreachable ();
7090 }
7091}
7092
7093/* Expand an atomic_exchange operation simulated with a compare-and-swap loop.
7094 The memory location MEM is set to INPUT. OUTPUT is set to the previous value
7095 of MEM. */
7096
7097void
7098s390_expand_atomic_exchange_tdsi (rtx output, rtx mem, rtx input)
7099{
7100 machine_mode mode = GET_MODE (mem);
7101 rtx_code_label *csloop;
7102
7103 if (TARGET_Z196
7104 && (mode == DImode || mode == SImode)
7105 && CONST_INT_P (input) && INTVAL (input) == 0)
7106 {
7107 emit_move_insn (output, const0_rtx);
7108 if (mode == DImode)
7109 emit_insn (gen_atomic_fetch_anddi (output, mem, const0_rtx, input));
7110 else
7111 emit_insn (gen_atomic_fetch_andsi (output, mem, const0_rtx, input));
7112 return;
7113 }
7114
7115 input = force_reg (mode, input);
7116 emit_move_insn (output, mem);
7117 csloop = gen_label_rtx ();
7118 emit_label (csloop);
7119 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, output, mem, output,
7120 input, CCZ1mode));
7121}
7122
7cc66daf 7123/* Expand an atomic operation CODE of mode MODE. MEM is the memory location
85694bac 7124 and VAL the value to play with. If AFTER is true then store the value
7cc66daf 7125 MEM holds after the operation, if AFTER is false then store the value MEM
7126 holds before the operation. If TARGET is zero then discard that value, else
7127 store it to TARGET. */
7128
7129void
3754d046 7130s390_expand_atomic (machine_mode mode, enum rtx_code code,
7cc66daf 7131 rtx target, rtx mem, rtx val, bool after)
7132{
7133 struct alignment_context ac;
7134 rtx cmp;
8deb3959 7135 rtx new_rtx = gen_reg_rtx (SImode);
7cc66daf 7136 rtx orig = gen_reg_rtx (SImode);
79f6a8ed 7137 rtx_code_label *csloop = gen_label_rtx ();
7cc66daf 7138
7139 gcc_assert (!target || register_operand (target, VOIDmode));
7140 gcc_assert (MEM_P (mem));
7141
7142 init_alignment_context (&ac, mem, mode);
7143
7144 /* Shift val to the correct bit positions.
7145 Preserve "icm", but prevent "ex icm". */
7146 if (!(ac.aligned && code == SET && MEM_P (val)))
7147 val = s390_expand_mask_and_shift (val, mode, ac.shift);
7148
7149 /* Further preparation insns. */
7150 if (code == PLUS || code == MINUS)
7151 emit_move_insn (orig, val);
7152 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
7153 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
7154 NULL_RTX, 1, OPTAB_DIRECT);
7155
7156 /* Load full word. Subsequent loads are performed by CS. */
7157 cmp = force_reg (SImode, ac.memsi);
7158
7159 /* Start CS loop. */
7160 emit_label (csloop);
8deb3959 7161 emit_move_insn (new_rtx, cmp);
7cc66daf 7162
7163 /* Patch new with val at correct position. */
7164 switch (code)
7165 {
7166 case PLUS:
7167 case MINUS:
8deb3959 7168 val = expand_simple_binop (SImode, code, new_rtx, orig,
7cc66daf 7169 NULL_RTX, 1, OPTAB_DIRECT);
7170 val = expand_simple_binop (SImode, AND, val, ac.modemask,
7171 NULL_RTX, 1, OPTAB_DIRECT);
7172 /* FALLTHRU */
ffead1ca 7173 case SET:
7cc66daf 7174 if (ac.aligned && MEM_P (val))
b634c730 7175 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
292237f3 7176 0, 0, SImode, val, false);
7cc66daf 7177 else
7178 {
8deb3959 7179 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
7cc66daf 7180 NULL_RTX, 1, OPTAB_DIRECT);
8deb3959 7181 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
7cc66daf 7182 NULL_RTX, 1, OPTAB_DIRECT);
7183 }
7184 break;
7185 case AND:
7186 case IOR:
7187 case XOR:
8deb3959 7188 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
7cc66daf 7189 NULL_RTX, 1, OPTAB_DIRECT);
7190 break;
7191 case MULT: /* NAND */
8deb3959 7192 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
7cc66daf 7193 NULL_RTX, 1, OPTAB_DIRECT);
636c17b8 7194 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
7195 NULL_RTX, 1, OPTAB_DIRECT);
7cc66daf 7196 break;
7197 default:
7198 gcc_unreachable ();
7199 }
7cc66daf 7200
db1f11e3 7201 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
d90d26d8 7202 ac.memsi, cmp, new_rtx,
7203 CCZ1mode));
7cc66daf 7204
7205 /* Return the correct part of the bitfield. */
7206 if (target)
7207 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
8deb3959 7208 after ? new_rtx : cmp, ac.shift,
7cc66daf 7209 NULL_RTX, 1, OPTAB_DIRECT), 1);
7210}
7211
40af64cc 7212/* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
03c118d5 7213 We need to emit DTP-relative relocations. */
7214
40af64cc 7215static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
7216
7217static void
b40da9a7 7218s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
03c118d5 7219{
7220 switch (size)
7221 {
7222 case 4:
7223 fputs ("\t.long\t", file);
7224 break;
7225 case 8:
7226 fputs ("\t.quad\t", file);
7227 break;
7228 default:
32eda510 7229 gcc_unreachable ();
03c118d5 7230 }
7231 output_addr_const (file, x);
7232 fputs ("@DTPOFF", file);
7233}
7234
76a4c804 7235/* Return the proper mode for REGNO being represented in the dwarf
7236 unwind table. */
7237machine_mode
7238s390_dwarf_frame_reg_mode (int regno)
7239{
7240 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
7241
52de7525 7242 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
7243 if (GENERAL_REGNO_P (regno))
7244 save_mode = Pmode;
7245
76a4c804 7246 /* The rightmost 64 bits of vector registers are call-clobbered. */
7247 if (GET_MODE_SIZE (save_mode) > 8)
7248 save_mode = DImode;
7249
7250 return save_mode;
7251}
7252
4257b08a 7253#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
eddcdde1 7254/* Implement TARGET_MANGLE_TYPE. */
4257b08a 7255
7256static const char *
a9f1838b 7257s390_mangle_type (const_tree type)
4257b08a 7258{
07f32359 7259 type = TYPE_MAIN_VARIANT (type);
7260
7261 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
7262 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
7263 return NULL;
7264
7265 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
7266 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
7267 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
7268 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
7269
4257b08a 7270 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
7271 && TARGET_LONG_DOUBLE_128)
7272 return "g";
7273
7274 /* For all other types, use normal C++ mangling. */
7275 return NULL;
7276}
7277#endif
7278
e93986bb 7279/* In the name of slightly smaller debug output, and to cater to
06b27565 7280 general assembler lossage, recognize various UNSPEC sequences
e93986bb 7281 and turn them back into a direct symbol reference. */
7282
07576557 7283static rtx
b40da9a7 7284s390_delegitimize_address (rtx orig_x)
e93986bb 7285{
3b6b647c 7286 rtx x, y;
e93986bb 7287
3b6b647c 7288 orig_x = delegitimize_mem_from_attrs (orig_x);
7289 x = orig_x;
4796d433 7290
7291 /* Extract the symbol ref from:
7292 (plus:SI (reg:SI 12 %r12)
7293 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
2b2b857a 7294 UNSPEC_GOTOFF/PLTOFF)))
7295 and
7296 (plus:SI (reg:SI 12 %r12)
7297 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
7298 UNSPEC_GOTOFF/PLTOFF)
7299 (const_int 4 [0x4])))) */
4796d433 7300 if (GET_CODE (x) == PLUS
7301 && REG_P (XEXP (x, 0))
7302 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
7303 && GET_CODE (XEXP (x, 1)) == CONST)
7304 {
b6382e93 7305 HOST_WIDE_INT offset = 0;
7306
4796d433 7307 /* The const operand. */
7308 y = XEXP (XEXP (x, 1), 0);
2b2b857a 7309
7310 if (GET_CODE (y) == PLUS
7311 && GET_CODE (XEXP (y, 1)) == CONST_INT)
b6382e93 7312 {
7313 offset = INTVAL (XEXP (y, 1));
7314 y = XEXP (y, 0);
7315 }
2b2b857a 7316
4796d433 7317 if (GET_CODE (y) == UNSPEC
2b2b857a 7318 && (XINT (y, 1) == UNSPEC_GOTOFF
7319 || XINT (y, 1) == UNSPEC_PLTOFF))
29c05e22 7320 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
4796d433 7321 }
7322
e93986bb 7323 if (GET_CODE (x) != MEM)
7324 return orig_x;
7325
7326 x = XEXP (x, 0);
7327 if (GET_CODE (x) == PLUS
7328 && GET_CODE (XEXP (x, 1)) == CONST
7329 && GET_CODE (XEXP (x, 0)) == REG
7330 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7331 {
7332 y = XEXP (XEXP (x, 1), 0);
7333 if (GET_CODE (y) == UNSPEC
12ef3745 7334 && XINT (y, 1) == UNSPEC_GOT)
54cb44a3 7335 y = XVECEXP (y, 0, 0);
7336 else
7337 return orig_x;
e93986bb 7338 }
54cb44a3 7339 else if (GET_CODE (x) == CONST)
e93986bb 7340 {
2b2b857a 7341 /* Extract the symbol ref from:
7342 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
7343 UNSPEC_PLT/GOTENT))) */
7344
e93986bb 7345 y = XEXP (x, 0);
7346 if (GET_CODE (y) == UNSPEC
2b2b857a 7347 && (XINT (y, 1) == UNSPEC_GOTENT
7348 || XINT (y, 1) == UNSPEC_PLT))
54cb44a3 7349 y = XVECEXP (y, 0, 0);
7350 else
7351 return orig_x;
e93986bb 7352 }
54cb44a3 7353 else
7354 return orig_x;
e93986bb 7355
54cb44a3 7356 if (GET_MODE (orig_x) != Pmode)
7357 {
2b03de53 7358 if (GET_MODE (orig_x) == BLKmode)
7359 return orig_x;
54cb44a3 7360 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
7361 if (y == NULL_RTX)
7362 return orig_x;
7363 }
7364 return y;
e93986bb 7365}
2eb8fe23 7366
805a133b 7367/* Output operand OP to stdio stream FILE.
7368 OP is an address (register + offset) which is not used to address data;
7369 instead the rightmost bits are interpreted as the value. */
63ebd742 7370
7371static void
2be7449b 7372print_addrstyle_operand (FILE *file, rtx op)
63ebd742 7373{
6d6be381 7374 HOST_WIDE_INT offset;
7375 rtx base;
9a09ba70 7376
6d6be381 7377 /* Extract base register and offset. */
2be7449b 7378 if (!s390_decompose_addrstyle_without_index (op, &base, &offset))
6d6be381 7379 gcc_unreachable ();
63ebd742 7380
7381 /* Sanity check. */
6d6be381 7382 if (base)
32eda510 7383 {
6d6be381 7384 gcc_assert (GET_CODE (base) == REG);
7385 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
7386 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
32eda510 7387 }
63ebd742 7388
805a133b 7389 /* Offsets are constricted to twelve bits. */
7390 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
6d6be381 7391 if (base)
7392 fprintf (file, "(%s)", reg_names[REGNO (base)]);
63ebd742 7393}
7394
06877232 7395/* Assigns the number of NOP halfwords to be emitted before and after the
7396 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
7397 If hotpatching is disabled for the function, the values are set to zero.
7398*/
77bc9912 7399
06877232 7400static void
11762b83 7401s390_function_num_hotpatch_hw (tree decl,
7402 int *hw_before,
7403 int *hw_after)
77bc9912 7404{
7405 tree attr;
7406
11762b83 7407 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
7408
7409 /* Handle the arguments of the hotpatch attribute. The values
7410 specified via attribute might override the cmdline argument
7411 values. */
7412 if (attr)
77bc9912 7413 {
11762b83 7414 tree args = TREE_VALUE (attr);
7415
7416 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
7417 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
77bc9912 7418 }
11762b83 7419 else
77bc9912 7420 {
11762b83 7421 /* Use the values specified by the cmdline arguments. */
7422 *hw_before = s390_hotpatch_hw_before_label;
7423 *hw_after = s390_hotpatch_hw_after_label;
77bc9912 7424 }
77bc9912 7425}
7426
7a0cee35 7427/* Write the current .machine and .machinemode specification to the assembler
7428 file. */
7429
14d7e7e6 7430#ifdef HAVE_AS_MACHINE_MACHINEMODE
7a0cee35 7431static void
7432s390_asm_output_machine_for_arch (FILE *asm_out_file)
7433{
7434 fprintf (asm_out_file, "\t.machinemode %s\n",
7435 (TARGET_ZARCH) ? "zarch" : "esa");
a168a775 7436 fprintf (asm_out_file, "\t.machine \"%s",
7437 processor_table[s390_arch].binutils_name);
7a0cee35 7438 if (S390_USE_ARCHITECTURE_MODIFIERS)
7439 {
7440 int cpu_flags;
7441
7442 cpu_flags = processor_flags_table[(int) s390_arch];
7443 if (TARGET_HTM && !(cpu_flags & PF_TX))
7444 fprintf (asm_out_file, "+htm");
7445 else if (!TARGET_HTM && (cpu_flags & PF_TX))
7446 fprintf (asm_out_file, "+nohtm");
7447 if (TARGET_VX && !(cpu_flags & PF_VX))
7448 fprintf (asm_out_file, "+vx");
7449 else if (!TARGET_VX && (cpu_flags & PF_VX))
7450 fprintf (asm_out_file, "+novx");
7451 }
7452 fprintf (asm_out_file, "\"\n");
7453}
7454
7455/* Write an extra function header before the very start of the function. */
7456
7457void
7458s390_asm_output_function_prefix (FILE *asm_out_file,
7459 const char *fnname ATTRIBUTE_UNUSED)
7460{
7461 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
7462 return;
7463 /* Since only the function specific options are saved but not the indications
7464 which options are set, it's too much work here to figure out which options
7465 have actually changed. Thus, generate .machine and .machinemode whenever a
7466 function has the target attribute or pragma. */
7467 fprintf (asm_out_file, "\t.machinemode push\n");
7468 fprintf (asm_out_file, "\t.machine push\n");
7469 s390_asm_output_machine_for_arch (asm_out_file);
7470}
7471
7472/* Write an extra function footer after the very end of the function. */
7473
7474void
7475s390_asm_declare_function_size (FILE *asm_out_file,
0491d54f 7476 const char *fnname, tree decl)
7a0cee35 7477{
0491d54f 7478 if (!flag_inhibit_size_directive)
7479 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
7a0cee35 7480 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
7481 return;
7482 fprintf (asm_out_file, "\t.machine pop\n");
7483 fprintf (asm_out_file, "\t.machinemode pop\n");
7484}
7485#endif
7486
77bc9912 7487/* Write the extra assembler code needed to declare a function properly. */
7488
7489void
7490s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
7491 tree decl)
7492{
11762b83 7493 int hw_before, hw_after;
77bc9912 7494
06877232 7495 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
7496 if (hw_before > 0)
77bc9912 7497 {
f4252e72 7498 unsigned int function_alignment;
77bc9912 7499 int i;
7500
7501 /* Add a trampoline code area before the function label and initialize it
7502 with two-byte nop instructions. This area can be overwritten with code
7503 that jumps to a patched version of the function. */
2a4536cc 7504 asm_fprintf (asm_out_file, "\tnopr\t%%r0"
06877232 7505 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
7506 hw_before);
7507 for (i = 1; i < hw_before; i++)
2a4536cc 7508 fputs ("\tnopr\t%r0\n", asm_out_file);
06877232 7509
77bc9912 7510 /* Note: The function label must be aligned so that (a) the bytes of the
7511 following nop do not cross a cacheline boundary, and (b) a jump address
7512 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
7513 stored directly before the label without crossing a cacheline
7514 boundary. All this is necessary to make sure the trampoline code can
06877232 7515 be changed atomically.
7516 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
7517 if there are NOPs before the function label, the alignment is placed
7518 before them. So it is necessary to duplicate the alignment after the
7519 NOPs. */
f4252e72 7520 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
7521 if (! DECL_USER_ALIGN (decl))
7522 function_alignment = MAX (function_alignment,
7523 (unsigned int) align_functions);
06877232 7524 fputs ("\t# alignment for hotpatch\n", asm_out_file);
f4252e72 7525 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
77bc9912 7526 }
7527
7a0cee35 7528 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
7529 {
7530 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
7531 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
7532 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
7533 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
7534 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
7535 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
7536 s390_warn_framesize);
7537 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
7538 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
7539 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
7540 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
7541 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
7542 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
7543 TARGET_PACKED_STACK);
7544 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
7545 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
7546 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
7547 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
7548 s390_warn_dynamicstack_p);
7549 }
77bc9912 7550 ASM_OUTPUT_LABEL (asm_out_file, fname);
06877232 7551 if (hw_after > 0)
7552 asm_fprintf (asm_out_file,
7553 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
7554 hw_after);
77bc9912 7555}
7556
f588eb9f 7557/* Output machine-dependent UNSPECs occurring in address constant X
74d2529d 7558 in assembler syntax to stdio stream FILE. Returns true if the
7559 constant X could be recognized, false otherwise. */
4673c1a0 7560
1a561788 7561static bool
74d2529d 7562s390_output_addr_const_extra (FILE *file, rtx x)
4673c1a0 7563{
74d2529d 7564 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
7565 switch (XINT (x, 1))
7566 {
7567 case UNSPEC_GOTENT:
7568 output_addr_const (file, XVECEXP (x, 0, 0));
7569 fprintf (file, "@GOTENT");
7570 return true;
7571 case UNSPEC_GOT:
7572 output_addr_const (file, XVECEXP (x, 0, 0));
7573 fprintf (file, "@GOT");
7574 return true;
7575 case UNSPEC_GOTOFF:
7576 output_addr_const (file, XVECEXP (x, 0, 0));
7577 fprintf (file, "@GOTOFF");
7578 return true;
7579 case UNSPEC_PLT:
7580 output_addr_const (file, XVECEXP (x, 0, 0));
7581 fprintf (file, "@PLT");
7582 return true;
7583 case UNSPEC_PLTOFF:
7584 output_addr_const (file, XVECEXP (x, 0, 0));
7585 fprintf (file, "@PLTOFF");
7586 return true;
7587 case UNSPEC_TLSGD:
7588 output_addr_const (file, XVECEXP (x, 0, 0));
7589 fprintf (file, "@TLSGD");
7590 return true;
7591 case UNSPEC_TLSLDM:
7592 assemble_name (file, get_some_local_dynamic_name ());
7593 fprintf (file, "@TLSLDM");
7594 return true;
7595 case UNSPEC_DTPOFF:
7596 output_addr_const (file, XVECEXP (x, 0, 0));
7597 fprintf (file, "@DTPOFF");
7598 return true;
7599 case UNSPEC_NTPOFF:
7600 output_addr_const (file, XVECEXP (x, 0, 0));
7601 fprintf (file, "@NTPOFF");
7602 return true;
7603 case UNSPEC_GOTNTPOFF:
7604 output_addr_const (file, XVECEXP (x, 0, 0));
7605 fprintf (file, "@GOTNTPOFF");
7606 return true;
7607 case UNSPEC_INDNTPOFF:
7608 output_addr_const (file, XVECEXP (x, 0, 0));
7609 fprintf (file, "@INDNTPOFF");
7610 return true;
7611 }
4673c1a0 7612
1ed7a160 7613 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
7614 switch (XINT (x, 1))
7615 {
7616 case UNSPEC_POOL_OFFSET:
7617 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
7618 output_addr_const (file, x);
7619 return true;
7620 }
74d2529d 7621 return false;
4673c1a0 7622}
7623
f81e845f 7624/* Output address operand ADDR in assembler syntax to
56769981 7625 stdio stream FILE. */
4673c1a0 7626
7627void
b40da9a7 7628print_operand_address (FILE *file, rtx addr)
4673c1a0 7629{
7630 struct s390_address ad;
883b2519 7631 memset (&ad, 0, sizeof (s390_address));
4673c1a0 7632
2a672556 7633 if (s390_loadrelative_operand_p (addr, NULL, NULL))
e68d6a13 7634 {
53b9033c 7635 if (!TARGET_Z10)
7636 {
902602ef 7637 output_operand_lossage ("symbolic memory references are "
7638 "only supported on z10 or later");
53b9033c 7639 return;
7640 }
e68d6a13 7641 output_addr_const (file, addr);
7642 return;
7643 }
7644
8ba34dcd 7645 if (!s390_decompose_address (addr, &ad)
1e280623 7646 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7647 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3284a242 7648 output_operand_lossage ("cannot decompose address");
f81e845f 7649
4673c1a0 7650 if (ad.disp)
74d2529d 7651 output_addr_const (file, ad.disp);
4673c1a0 7652 else
7653 fprintf (file, "0");
7654
7655 if (ad.base && ad.indx)
7656 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7657 reg_names[REGNO (ad.base)]);
7658 else if (ad.base)
7659 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7660}
7661
f81e845f 7662/* Output operand X in assembler syntax to stdio stream FILE.
7663 CODE specified the format flag. The following format flags
56769981 7664 are recognized:
7665
7666 'C': print opcode suffix for branch condition.
7667 'D': print opcode suffix for inverse branch condition.
f1443d23 7668 'E': print opcode suffix for branch on index instruction.
cc87d0c5 7669 'G': print the size of the operand in bytes.
0d46035f 7670 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7671 'M': print the second word of a TImode operand.
7672 'N': print the second word of a DImode operand.
76a4c804 7673 'O': print only the displacement of a memory reference or address.
7674 'R': print only the base register of a memory reference or address.
0574acbe 7675 'S': print S-type memory reference (base+displacement).
2be7449b 7676 'Y': print address style operand without index (e.g. shift count or setmem
7677 operand).
56769981 7678
45981c0a 7679 'b': print integer X as if it's an unsigned byte.
e68d6a13 7680 'c': print integer X as if it's an signed byte.
76a4c804 7681 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7682 'f': "end" contiguous bitmask X in SImode.
b9059d39 7683 'h': print integer X as if it's a signed halfword.
64a1078f 7684 'i': print the first nonzero HImode part of X.
b9059d39 7685 'j': print the first HImode part unequal to -1 of X.
7686 'k': print the first nonzero SImode part of X.
7687 'm': print the first SImode part unequal to -1 of X.
0d46035f 7688 'o': print integer X as if it's an unsigned 32bit word.
76a4c804 7689 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7690 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7691 CONST_VECTOR: Generate a bitmask for vgbm instruction.
0d46035f 7692 'x': print integer X as if it's an unsigned halfword.
76a4c804 7693 'v': print register number as vector register (v1 instead of f1).
0d46035f 7694*/
4673c1a0 7695
7696void
b40da9a7 7697print_operand (FILE *file, rtx x, int code)
4673c1a0 7698{
0d46035f 7699 HOST_WIDE_INT ival;
7700
4673c1a0 7701 switch (code)
7702 {
7703 case 'C':
2eb8fe23 7704 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
4673c1a0 7705 return;
7706
7707 case 'D':
2eb8fe23 7708 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
4673c1a0 7709 return;
7710
f1443d23 7711 case 'E':
7712 if (GET_CODE (x) == LE)
7713 fprintf (file, "l");
7714 else if (GET_CODE (x) == GT)
7715 fprintf (file, "h");
7716 else
902602ef 7717 output_operand_lossage ("invalid comparison operator "
7718 "for 'E' output modifier");
f1443d23 7719 return;
7720
be00aaa8 7721 case 'J':
7722 if (GET_CODE (x) == SYMBOL_REF)
7723 {
7724 fprintf (file, "%s", ":tls_load:");
7725 output_addr_const (file, x);
7726 }
7727 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7728 {
7729 fprintf (file, "%s", ":tls_gdcall:");
7730 output_addr_const (file, XVECEXP (x, 0, 0));
7731 }
7732 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7733 {
7734 fprintf (file, "%s", ":tls_ldcall:");
3677652f 7735 const char *name = get_some_local_dynamic_name ();
7736 gcc_assert (name);
7737 assemble_name (file, name);
be00aaa8 7738 }
7739 else
902602ef 7740 output_operand_lossage ("invalid reference for 'J' output modifier");
be00aaa8 7741 return;
7742
cc87d0c5 7743 case 'G':
7744 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7745 return;
7746
4673c1a0 7747 case 'O':
7748 {
7749 struct s390_address ad;
32eda510 7750 int ret;
4673c1a0 7751
76a4c804 7752 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
53b9033c 7753
7754 if (!ret
7755 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7756 || ad.indx)
7757 {
902602ef 7758 output_operand_lossage ("invalid address for 'O' output modifier");
53b9033c 7759 return;
7760 }
4673c1a0 7761
7762 if (ad.disp)
74d2529d 7763 output_addr_const (file, ad.disp);
4673c1a0 7764 else
7765 fprintf (file, "0");
7766 }
7767 return;
7768
7769 case 'R':
7770 {
7771 struct s390_address ad;
32eda510 7772 int ret;
4673c1a0 7773
76a4c804 7774 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
53b9033c 7775
7776 if (!ret
7777 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7778 || ad.indx)
7779 {
902602ef 7780 output_operand_lossage ("invalid address for 'R' output modifier");
53b9033c 7781 return;
7782 }
4673c1a0 7783
7784 if (ad.base)
7785 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7786 else
7787 fprintf (file, "0");
7788 }
7789 return;
7790
0574acbe 7791 case 'S':
7792 {
7793 struct s390_address ad;
32eda510 7794 int ret;
0574acbe 7795
53b9033c 7796 if (!MEM_P (x))
7797 {
902602ef 7798 output_operand_lossage ("memory reference expected for "
7799 "'S' output modifier");
53b9033c 7800 return;
7801 }
32eda510 7802 ret = s390_decompose_address (XEXP (x, 0), &ad);
53b9033c 7803
7804 if (!ret
7805 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7806 || ad.indx)
7807 {
902602ef 7808 output_operand_lossage ("invalid address for 'S' output modifier");
53b9033c 7809 return;
7810 }
0574acbe 7811
7812 if (ad.disp)
7813 output_addr_const (file, ad.disp);
7814 else
7815 fprintf (file, "0");
7816
7817 if (ad.base)
7818 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7819 }
7820 return;
7821
4673c1a0 7822 case 'N':
7823 if (GET_CODE (x) == REG)
7824 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7825 else if (GET_CODE (x) == MEM)
29c05e22 7826 x = change_address (x, VOIDmode,
7827 plus_constant (Pmode, XEXP (x, 0), 4));
4673c1a0 7828 else
902602ef 7829 output_operand_lossage ("register or memory expression expected "
7830 "for 'N' output modifier");
4673c1a0 7831 break;
7832
7833 case 'M':
7834 if (GET_CODE (x) == REG)
7835 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7836 else if (GET_CODE (x) == MEM)
29c05e22 7837 x = change_address (x, VOIDmode,
7838 plus_constant (Pmode, XEXP (x, 0), 8));
4673c1a0 7839 else
902602ef 7840 output_operand_lossage ("register or memory expression expected "
7841 "for 'M' output modifier");
4673c1a0 7842 break;
63ebd742 7843
7844 case 'Y':
2be7449b 7845 print_addrstyle_operand (file, x);
63ebd742 7846 return;
4673c1a0 7847 }
7848
7849 switch (GET_CODE (x))
7850 {
7851 case REG:
76a4c804 7852 /* Print FP regs as fx instead of vx when they are accessed
7853 through non-vector mode. */
7854 if (code == 'v'
7855 || VECTOR_NOFP_REG_P (x)
7856 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7857 || (VECTOR_REG_P (x)
7858 && (GET_MODE_SIZE (GET_MODE (x)) /
7859 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7860 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7861 else
7862 fprintf (file, "%s", reg_names[REGNO (x)]);
4673c1a0 7863 break;
7864
7865 case MEM:
3c047fe9 7866 output_address (GET_MODE (x), XEXP (x, 0));
4673c1a0 7867 break;
7868
7869 case CONST:
7870 case CODE_LABEL:
7871 case LABEL_REF:
7872 case SYMBOL_REF:
74d2529d 7873 output_addr_const (file, x);
4673c1a0 7874 break;
7875
7876 case CONST_INT:
0d46035f 7877 ival = INTVAL (x);
7878 switch (code)
7879 {
7880 case 0:
7881 break;
7882 case 'b':
7883 ival &= 0xff;
7884 break;
7885 case 'c':
7886 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7887 break;
7888 case 'x':
7889 ival &= 0xffff;
7890 break;
7891 case 'h':
7892 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7893 break;
7894 case 'i':
7895 ival = s390_extract_part (x, HImode, 0);
7896 break;
7897 case 'j':
7898 ival = s390_extract_part (x, HImode, -1);
7899 break;
7900 case 'k':
7901 ival = s390_extract_part (x, SImode, 0);
7902 break;
7903 case 'm':
7904 ival = s390_extract_part (x, SImode, -1);
7905 break;
7906 case 'o':
7907 ival &= 0xffffffff;
7908 break;
7909 case 'e': case 'f':
7910 case 's': case 't':
7911 {
e64f5133 7912 int start, end;
7913 int len;
0d46035f 7914 bool ok;
7915
7916 len = (code == 's' || code == 'e' ? 64 : 32);
e64f5133 7917 ok = s390_contiguous_bitmask_p (ival, true, len, &start, &end);
0d46035f 7918 gcc_assert (ok);
7919 if (code == 's' || code == 't')
e64f5133 7920 ival = start;
0d46035f 7921 else
e64f5133 7922 ival = end;
0d46035f 7923 }
7924 break;
7925 default:
7926 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7927 }
7928 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
8b4a4127 7929 break;
7930
ba0e61d6 7931 case CONST_WIDE_INT:
8b4a4127 7932 if (code == 'b')
ba0e61d6 7933 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7934 CONST_WIDE_INT_ELT (x, 0) & 0xff);
4673c1a0 7935 else if (code == 'x')
ba0e61d6 7936 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7937 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
4673c1a0 7938 else if (code == 'h')
902602ef 7939 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
ba0e61d6 7940 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
4673c1a0 7941 else
53b9033c 7942 {
7943 if (code == 0)
902602ef 7944 output_operand_lossage ("invalid constant - try using "
7945 "an output modifier");
53b9033c 7946 else
902602ef 7947 output_operand_lossage ("invalid constant for output modifier '%c'",
7948 code);
53b9033c 7949 }
4673c1a0 7950 break;
76a4c804 7951 case CONST_VECTOR:
7952 switch (code)
7953 {
80fc7f56 7954 case 'h':
7955 gcc_assert (const_vec_duplicate_p (x));
7956 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7957 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7958 break;
76a4c804 7959 case 'e':
7960 case 's':
7961 {
e64f5133 7962 int start, end;
76a4c804 7963 bool ok;
7964
e64f5133 7965 ok = s390_contiguous_bitmask_vector_p (x, &start, &end);
76a4c804 7966 gcc_assert (ok);
e64f5133 7967 ival = (code == 's') ? start : end;
76a4c804 7968 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7969 }
7970 break;
7971 case 't':
7972 {
7973 unsigned mask;
7974 bool ok = s390_bytemask_vector_p (x, &mask);
7975 gcc_assert (ok);
7976 fprintf (file, "%u", mask);
7977 }
7978 break;
7979
7980 default:
7981 output_operand_lossage ("invalid constant vector for output "
7982 "modifier '%c'", code);
7983 }
7984 break;
4673c1a0 7985
7986 default:
53b9033c 7987 if (code == 0)
902602ef 7988 output_operand_lossage ("invalid expression - try using "
7989 "an output modifier");
53b9033c 7990 else
902602ef 7991 output_operand_lossage ("invalid expression for output "
7992 "modifier '%c'", code);
4673c1a0 7993 break;
7994 }
7995}
7996
58356836 7997/* Target hook for assembling integer objects. We need to define it
7998 here to work a round a bug in some versions of GAS, which couldn't
7999 handle values smaller than INT_MIN when printed in decimal. */
8000
8001static bool
b40da9a7 8002s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
58356836 8003{
8004 if (size == 8 && aligned_p
8005 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
8006 {
4840a03a 8007 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
8008 INTVAL (x));
58356836 8009 return true;
8010 }
8011 return default_assemble_integer (x, size, aligned_p);
8012}
8013
f81e845f 8014/* Returns true if register REGNO is used for forming
56769981 8015 a memory address in expression X. */
4673c1a0 8016
e5537457 8017static bool
b40da9a7 8018reg_used_in_mem_p (int regno, rtx x)
4673c1a0 8019{
8020 enum rtx_code code = GET_CODE (x);
8021 int i, j;
8022 const char *fmt;
f81e845f 8023
4673c1a0 8024 if (code == MEM)
8025 {
2ec77a7c 8026 if (refers_to_regno_p (regno, XEXP (x, 0)))
e5537457 8027 return true;
4673c1a0 8028 }
f81e845f 8029 else if (code == SET
8b4a4127 8030 && GET_CODE (SET_DEST (x)) == PC)
8031 {
2ec77a7c 8032 if (refers_to_regno_p (regno, SET_SRC (x)))
e5537457 8033 return true;
8b4a4127 8034 }
4673c1a0 8035
8036 fmt = GET_RTX_FORMAT (code);
8037 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8038 {
8039 if (fmt[i] == 'e'
8040 && reg_used_in_mem_p (regno, XEXP (x, i)))
e5537457 8041 return true;
f81e845f 8042
4673c1a0 8043 else if (fmt[i] == 'E')
8044 for (j = 0; j < XVECLEN (x, i); j++)
8045 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
e5537457 8046 return true;
4673c1a0 8047 }
e5537457 8048 return false;
4673c1a0 8049}
8050
0c034860 8051/* Returns true if expression DEP_RTX sets an address register
56769981 8052 used by instruction INSN to address memory. */
4673c1a0 8053
e5537457 8054static bool
ed3e6e5d 8055addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
4673c1a0 8056{
8b4a4127 8057 rtx target, pat;
4673c1a0 8058
aa90bb35 8059 if (NONJUMP_INSN_P (dep_rtx))
77985f1a 8060 dep_rtx = PATTERN (dep_rtx);
71343e6b 8061
4673c1a0 8062 if (GET_CODE (dep_rtx) == SET)
8063 {
8064 target = SET_DEST (dep_rtx);
147b6a2d 8065 if (GET_CODE (target) == STRICT_LOW_PART)
8066 target = XEXP (target, 0);
8067 while (GET_CODE (target) == SUBREG)
8068 target = SUBREG_REG (target);
8069
4673c1a0 8070 if (GET_CODE (target) == REG)
8071 {
8072 int regno = REGNO (target);
8073
71343e6b 8074 if (s390_safe_attr_type (insn) == TYPE_LA)
8b4a4127 8075 {
8076 pat = PATTERN (insn);
8077 if (GET_CODE (pat) == PARALLEL)
8078 {
32eda510 8079 gcc_assert (XVECLEN (pat, 0) == 2);
8b4a4127 8080 pat = XVECEXP (pat, 0, 0);
8081 }
32eda510 8082 gcc_assert (GET_CODE (pat) == SET);
2ec77a7c 8083 return refers_to_regno_p (regno, SET_SRC (pat));
8b4a4127 8084 }
71343e6b 8085 else if (get_attr_atype (insn) == ATYPE_AGEN)
8b4a4127 8086 return reg_used_in_mem_p (regno, PATTERN (insn));
8087 }
4673c1a0 8088 }
e5537457 8089 return false;
4673c1a0 8090}
8091
71343e6b 8092/* Return 1, if dep_insn sets register used in insn in the agen unit. */
8093
f81e845f 8094int
ed3e6e5d 8095s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
f81e845f 8096{
71343e6b 8097 rtx dep_rtx = PATTERN (dep_insn);
8098 int i;
f81e845f 8099
8100 if (GET_CODE (dep_rtx) == SET
71343e6b 8101 && addr_generation_dependency_p (dep_rtx, insn))
8102 return 1;
8103 else if (GET_CODE (dep_rtx) == PARALLEL)
8104 {
8105 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
8106 {
8107 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
8108 return 1;
8109 }
8110 }
8111 return 0;
8112}
8113
510c2327 8114
e51ae8ff 8115/* A C statement (sans semicolon) to update the integer scheduling priority
8116 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
8117 reduce the priority to execute INSN later. Do not define this macro if
f81e845f 8118 you do not need to adjust the scheduling priorities of insns.
e51ae8ff 8119
f81e845f 8120 A STD instruction should be scheduled earlier,
e51ae8ff 8121 in order to use the bypass. */
e51ae8ff 8122static int
18282db0 8123s390_adjust_priority (rtx_insn *insn, int priority)
e51ae8ff 8124{
8125 if (! INSN_P (insn))
8126 return priority;
8127
9aae2901 8128 if (s390_tune <= PROCESSOR_2064_Z900)
e51ae8ff 8129 return priority;
8130
8131 switch (s390_safe_attr_type (insn))
8132 {
11f88fec 8133 case TYPE_FSTOREDF:
8134 case TYPE_FSTORESF:
e51ae8ff 8135 priority = priority << 3;
8136 break;
8137 case TYPE_STORE:
76dbb8df 8138 case TYPE_STM:
e51ae8ff 8139 priority = priority << 1;
8140 break;
8141 default:
8142 break;
8143 }
8144 return priority;
8145}
369293ed 8146
b0eacf26 8147
71343e6b 8148/* The number of instructions that can be issued per cycle. */
369293ed 8149
71343e6b 8150static int
b40da9a7 8151s390_issue_rate (void)
71343e6b 8152{
a850370e 8153 switch (s390_tune)
8154 {
8155 case PROCESSOR_2084_Z990:
8156 case PROCESSOR_2094_Z9_109:
9aae2901 8157 case PROCESSOR_2094_Z9_EC:
33d033da 8158 case PROCESSOR_2817_Z196:
a850370e 8159 return 3;
8160 case PROCESSOR_2097_Z10:
8161 return 2;
117d67d0 8162 case PROCESSOR_9672_G5:
8163 case PROCESSOR_9672_G6:
8164 case PROCESSOR_2064_Z900:
5ed1f72b 8165 /* Starting with EC12 we use the sched_reorder hook to take care
8166 of instruction dispatch constraints. The algorithm only
8167 picks the best instruction and assumes only a single
8168 instruction gets issued per cycle. */
8169 case PROCESSOR_2827_ZEC12:
117d67d0 8170 case PROCESSOR_2964_Z13:
a168a775 8171 case PROCESSOR_3906_Z14:
a850370e 8172 default:
8173 return 1;
8174 }
71343e6b 8175}
369293ed 8176
e51ae8ff 8177static int
b40da9a7 8178s390_first_cycle_multipass_dfa_lookahead (void)
e51ae8ff 8179{
a65ea517 8180 return 4;
e51ae8ff 8181}
8182
20074f87 8183/* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
8184 Fix up MEMs as required. */
8185
8186static void
8187annotate_constant_pool_refs (rtx *x)
8188{
8189 int i, j;
8190 const char *fmt;
8191
32eda510 8192 gcc_assert (GET_CODE (*x) != SYMBOL_REF
8193 || !CONSTANT_POOL_ADDRESS_P (*x));
20074f87 8194
8195 /* Literal pool references can only occur inside a MEM ... */
8196 if (GET_CODE (*x) == MEM)
8197 {
8198 rtx memref = XEXP (*x, 0);
8199
8200 if (GET_CODE (memref) == SYMBOL_REF
8201 && CONSTANT_POOL_ADDRESS_P (memref))
8202 {
8203 rtx base = cfun->machine->base_reg;
8204 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
8205 UNSPEC_LTREF);
8206
8207 *x = replace_equiv_address (*x, addr);
8208 return;
8209 }
8210
8211 if (GET_CODE (memref) == CONST
8212 && GET_CODE (XEXP (memref, 0)) == PLUS
8213 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
8214 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
8215 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
8216 {
8217 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
8218 rtx sym = XEXP (XEXP (memref, 0), 0);
8219 rtx base = cfun->machine->base_reg;
8220 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
8221 UNSPEC_LTREF);
8222
29c05e22 8223 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
20074f87 8224 return;
8225 }
8226 }
8227
8228 /* ... or a load-address type pattern. */
8229 if (GET_CODE (*x) == SET)
8230 {
8231 rtx addrref = SET_SRC (*x);
8232
8233 if (GET_CODE (addrref) == SYMBOL_REF
8234 && CONSTANT_POOL_ADDRESS_P (addrref))
8235 {
8236 rtx base = cfun->machine->base_reg;
8237 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
8238 UNSPEC_LTREF);
8239
8240 SET_SRC (*x) = addr;
8241 return;
8242 }
8243
8244 if (GET_CODE (addrref) == CONST
8245 && GET_CODE (XEXP (addrref, 0)) == PLUS
8246 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
8247 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
8248 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
8249 {
8250 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
8251 rtx sym = XEXP (XEXP (addrref, 0), 0);
8252 rtx base = cfun->machine->base_reg;
8253 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
8254 UNSPEC_LTREF);
8255
29c05e22 8256 SET_SRC (*x) = plus_constant (Pmode, addr, off);
20074f87 8257 return;
8258 }
8259 }
8260
8261 /* Annotate LTREL_BASE as well. */
8262 if (GET_CODE (*x) == UNSPEC
8263 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8264 {
8265 rtx base = cfun->machine->base_reg;
8266 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
8267 UNSPEC_LTREL_BASE);
8268 return;
8269 }
8270
8271 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8272 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8273 {
8274 if (fmt[i] == 'e')
8275 {
8276 annotate_constant_pool_refs (&XEXP (*x, i));
8277 }
8278 else if (fmt[i] == 'E')
8279 {
8280 for (j = 0; j < XVECLEN (*x, i); j++)
8281 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
8282 }
8283 }
8284}
8285
875862bf 8286/* Split all branches that exceed the maximum distance.
8287 Returns true if this created a new literal pool entry. */
8288
8289static int
8290s390_split_branches (void)
8291{
8292 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
32eda510 8293 int new_literal = 0, ret;
93e0956b 8294 rtx_insn *insn;
ed7591be 8295 rtx pat, target;
875862bf 8296 rtx *label;
8297
8298 /* We need correct insn addresses. */
8299
8300 shorten_branches (get_insns ());
8301
8302 /* Find all branches that exceed 64KB, and split them. */
8303
8304 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8305 {
245402e7 8306 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
875862bf 8307 continue;
8308
8309 pat = PATTERN (insn);
245402e7 8310 if (GET_CODE (pat) == PARALLEL)
875862bf 8311 pat = XVECEXP (pat, 0, 0);
8312 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
8313 continue;
8314
8315 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
8316 {
8317 label = &SET_SRC (pat);
8318 }
8319 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
8320 {
8321 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
8322 label = &XEXP (SET_SRC (pat), 1);
8323 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
8324 label = &XEXP (SET_SRC (pat), 2);
8325 else
8326 continue;
8327 }
8328 else
8329 continue;
8330
8331 if (get_attr_length (insn) <= 4)
8332 continue;
8333
77beec48 8334 /* We are going to use the return register as scratch register,
8335 make sure it will be saved/restored by the prologue/epilogue. */
8336 cfun_frame_layout.save_return_addr_p = 1;
8337
875862bf 8338 if (!flag_pic)
8339 {
8340 new_literal = 1;
ed7591be 8341 rtx mem = force_const_mem (Pmode, *label);
d1f9b275 8342 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
8343 insn);
ed7591be 8344 INSN_ADDRESSES_NEW (set_insn, -1);
8345 annotate_constant_pool_refs (&PATTERN (set_insn));
875862bf 8346
8347 target = temp_reg;
8348 }
8349 else
8350 {
8351 new_literal = 1;
8352 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
8353 UNSPEC_LTREL_OFFSET);
8354 target = gen_rtx_CONST (Pmode, target);
8355 target = force_const_mem (Pmode, target);
d1f9b275 8356 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
8357 insn);
ed7591be 8358 INSN_ADDRESSES_NEW (set_insn, -1);
8359 annotate_constant_pool_refs (&PATTERN (set_insn));
875862bf 8360
8361 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
8362 cfun->machine->base_reg),
8363 UNSPEC_LTREL_BASE);
8364 target = gen_rtx_PLUS (Pmode, temp_reg, target);
8365 }
8366
32eda510 8367 ret = validate_change (insn, label, target, 0);
8368 gcc_assert (ret);
875862bf 8369 }
8370
8371 return new_literal;
8372}
8373
0756cebb 8374
ffead1ca 8375/* Find an annotated literal pool symbol referenced in RTX X,
8376 and store it at REF. Will abort if X contains references to
20074f87 8377 more than one such pool symbol; multiple references to the same
8378 symbol are allowed, however.
0756cebb 8379
f81e845f 8380 The rtx pointed to by REF must be initialized to NULL_RTX
0756cebb 8381 by the caller before calling this routine. */
8382
8383static void
b40da9a7 8384find_constant_pool_ref (rtx x, rtx *ref)
0756cebb 8385{
8386 int i, j;
8387 const char *fmt;
8388
12ef3745 8389 /* Ignore LTREL_BASE references. */
8390 if (GET_CODE (x) == UNSPEC
8391 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8392 return;
c2c1332a 8393 /* Likewise POOL_ENTRY insns. */
8394 if (GET_CODE (x) == UNSPEC_VOLATILE
8395 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
8396 return;
12ef3745 8397
32eda510 8398 gcc_assert (GET_CODE (x) != SYMBOL_REF
8399 || !CONSTANT_POOL_ADDRESS_P (x));
20074f87 8400
8401 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
0756cebb 8402 {
20074f87 8403 rtx sym = XVECEXP (x, 0, 0);
32eda510 8404 gcc_assert (GET_CODE (sym) == SYMBOL_REF
8405 && CONSTANT_POOL_ADDRESS_P (sym));
20074f87 8406
0756cebb 8407 if (*ref == NULL_RTX)
20074f87 8408 *ref = sym;
ffead1ca 8409 else
32eda510 8410 gcc_assert (*ref == sym);
20074f87 8411
8412 return;
0756cebb 8413 }
8414
8415 fmt = GET_RTX_FORMAT (GET_CODE (x));
8416 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8417 {
8418 if (fmt[i] == 'e')
8419 {
8420 find_constant_pool_ref (XEXP (x, i), ref);
8421 }
8422 else if (fmt[i] == 'E')
8423 {
8424 for (j = 0; j < XVECLEN (x, i); j++)
8425 find_constant_pool_ref (XVECEXP (x, i, j), ref);
8426 }
8427 }
8428}
8429
ffead1ca 8430/* Replace every reference to the annotated literal pool
20074f87 8431 symbol REF in X by its base plus OFFSET. */
0756cebb 8432
8433static void
20074f87 8434replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
0756cebb 8435{
8436 int i, j;
8437 const char *fmt;
8438
32eda510 8439 gcc_assert (*x != ref);
0756cebb 8440
20074f87 8441 if (GET_CODE (*x) == UNSPEC
8442 && XINT (*x, 1) == UNSPEC_LTREF
8443 && XVECEXP (*x, 0, 0) == ref)
0756cebb 8444 {
20074f87 8445 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
8446 return;
0756cebb 8447 }
8448
20074f87 8449 if (GET_CODE (*x) == PLUS
8450 && GET_CODE (XEXP (*x, 1)) == CONST_INT
8451 && GET_CODE (XEXP (*x, 0)) == UNSPEC
8452 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
8453 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
0756cebb 8454 {
20074f87 8455 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
29c05e22 8456 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
20074f87 8457 return;
0756cebb 8458 }
8459
8460 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8461 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8462 {
8463 if (fmt[i] == 'e')
8464 {
20074f87 8465 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
0756cebb 8466 }
8467 else if (fmt[i] == 'E')
8468 {
8469 for (j = 0; j < XVECLEN (*x, i); j++)
20074f87 8470 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
0756cebb 8471 }
8472 }
8473}
8474
f81e845f 8475/* Check whether X contains an UNSPEC_LTREL_BASE.
12ef3745 8476 Return its constant pool symbol if found, NULL_RTX otherwise. */
96be3ab6 8477
12ef3745 8478static rtx
b40da9a7 8479find_ltrel_base (rtx x)
96be3ab6 8480{
96be3ab6 8481 int i, j;
8482 const char *fmt;
8483
12ef3745 8484 if (GET_CODE (x) == UNSPEC
8485 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8486 return XVECEXP (x, 0, 0);
96be3ab6 8487
8488 fmt = GET_RTX_FORMAT (GET_CODE (x));
8489 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8490 {
8491 if (fmt[i] == 'e')
8492 {
12ef3745 8493 rtx fnd = find_ltrel_base (XEXP (x, i));
8494 if (fnd)
8495 return fnd;
96be3ab6 8496 }
8497 else if (fmt[i] == 'E')
8498 {
8499 for (j = 0; j < XVECLEN (x, i); j++)
12ef3745 8500 {
8501 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
8502 if (fnd)
8503 return fnd;
8504 }
96be3ab6 8505 }
8506 }
8507
12ef3745 8508 return NULL_RTX;
96be3ab6 8509}
8510
20074f87 8511/* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
96be3ab6 8512
8513static void
20074f87 8514replace_ltrel_base (rtx *x)
96be3ab6 8515{
12ef3745 8516 int i, j;
96be3ab6 8517 const char *fmt;
8518
12ef3745 8519 if (GET_CODE (*x) == UNSPEC
8520 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
96be3ab6 8521 {
20074f87 8522 *x = XVECEXP (*x, 0, 1);
12ef3745 8523 return;
96be3ab6 8524 }
8525
8526 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8527 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8528 {
8529 if (fmt[i] == 'e')
8530 {
20074f87 8531 replace_ltrel_base (&XEXP (*x, i));
96be3ab6 8532 }
8533 else if (fmt[i] == 'E')
8534 {
8535 for (j = 0; j < XVECLEN (*x, i); j++)
20074f87 8536 replace_ltrel_base (&XVECEXP (*x, i, j));
96be3ab6 8537 }
8538 }
8539}
8540
8541
12ef3745 8542/* We keep a list of constants which we have to add to internal
0756cebb 8543 constant tables in the middle of large functions. */
8544
02b901ef 8545#define NR_C_MODES 32
3754d046 8546machine_mode constant_modes[NR_C_MODES] =
0756cebb 8547{
36868490 8548 TFmode, TImode, TDmode,
02b901ef 8549 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
8550 V4SFmode, V2DFmode, V1TFmode,
36868490 8551 DFmode, DImode, DDmode,
76a4c804 8552 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
36868490 8553 SFmode, SImode, SDmode,
76a4c804 8554 V4QImode, V2HImode, V1SImode, V1SFmode,
0756cebb 8555 HImode,
76a4c804 8556 V2QImode, V1HImode,
8557 QImode,
8558 V1QImode
0756cebb 8559};
8560
0756cebb 8561struct constant
8562{
8563 struct constant *next;
8564 rtx value;
93e0956b 8565 rtx_code_label *label;
0756cebb 8566};
8567
8568struct constant_pool
8569{
8570 struct constant_pool *next;
93e0956b 8571 rtx_insn *first_insn;
8572 rtx_insn *pool_insn;
96be3ab6 8573 bitmap insns;
93e0956b 8574 rtx_insn *emit_pool_after;
0756cebb 8575
8576 struct constant *constants[NR_C_MODES];
d345b493 8577 struct constant *execute;
93e0956b 8578 rtx_code_label *label;
0756cebb 8579 int size;
8580};
8581
875862bf 8582/* Allocate new constant_pool structure. */
8583
8584static struct constant_pool *
8585s390_alloc_pool (void)
8586{
8587 struct constant_pool *pool;
8588 int i;
8589
8590 pool = (struct constant_pool *) xmalloc (sizeof *pool);
8591 pool->next = NULL;
8592 for (i = 0; i < NR_C_MODES; i++)
8593 pool->constants[i] = NULL;
8594
8595 pool->execute = NULL;
8596 pool->label = gen_label_rtx ();
93e0956b 8597 pool->first_insn = NULL;
8598 pool->pool_insn = NULL;
875862bf 8599 pool->insns = BITMAP_ALLOC (NULL);
8600 pool->size = 0;
93e0956b 8601 pool->emit_pool_after = NULL;
875862bf 8602
8603 return pool;
8604}
0756cebb 8605
8606/* Create new constant pool covering instructions starting at INSN
8607 and chain it to the end of POOL_LIST. */
8608
8609static struct constant_pool *
93e0956b 8610s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
0756cebb 8611{
8612 struct constant_pool *pool, **prev;
0756cebb 8613
c2c1332a 8614 pool = s390_alloc_pool ();
0756cebb 8615 pool->first_insn = insn;
96be3ab6 8616
0756cebb 8617 for (prev = pool_list; *prev; prev = &(*prev)->next)
8618 ;
8619 *prev = pool;
8620
8621 return pool;
8622}
8623
96be3ab6 8624/* End range of instructions covered by POOL at INSN and emit
8625 placeholder insn representing the pool. */
0756cebb 8626
8627static void
93e0956b 8628s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
0756cebb 8629{
96be3ab6 8630 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
8631
8632 if (!insn)
8633 insn = get_last_insn ();
8634
8635 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
8636 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8637}
8638
8639/* Add INSN to the list of insns covered by POOL. */
8640
8641static void
b40da9a7 8642s390_add_pool_insn (struct constant_pool *pool, rtx insn)
96be3ab6 8643{
8644 bitmap_set_bit (pool->insns, INSN_UID (insn));
0756cebb 8645}
8646
8647/* Return pool out of POOL_LIST that covers INSN. */
8648
8649static struct constant_pool *
b40da9a7 8650s390_find_pool (struct constant_pool *pool_list, rtx insn)
0756cebb 8651{
0756cebb 8652 struct constant_pool *pool;
8653
0756cebb 8654 for (pool = pool_list; pool; pool = pool->next)
96be3ab6 8655 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
0756cebb 8656 break;
8657
8658 return pool;
8659}
8660
96be3ab6 8661/* Add constant VAL of mode MODE to the constant pool POOL. */
0756cebb 8662
96be3ab6 8663static void
3754d046 8664s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
0756cebb 8665{
8666 struct constant *c;
0756cebb 8667 int i;
8668
8669 for (i = 0; i < NR_C_MODES; i++)
8670 if (constant_modes[i] == mode)
8671 break;
32eda510 8672 gcc_assert (i != NR_C_MODES);
0756cebb 8673
8674 for (c = pool->constants[i]; c != NULL; c = c->next)
8675 if (rtx_equal_p (val, c->value))
8676 break;
8677
8678 if (c == NULL)
8679 {
8680 c = (struct constant *) xmalloc (sizeof *c);
8681 c->value = val;
8682 c->label = gen_label_rtx ();
8683 c->next = pool->constants[i];
8684 pool->constants[i] = c;
8685 pool->size += GET_MODE_SIZE (mode);
8686 }
96be3ab6 8687}
0756cebb 8688
1ed7a160 8689/* Return an rtx that represents the offset of X from the start of
8690 pool POOL. */
8691
8692static rtx
8693s390_pool_offset (struct constant_pool *pool, rtx x)
8694{
8695 rtx label;
8696
8697 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8698 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8699 UNSPEC_POOL_OFFSET);
8700 return gen_rtx_CONST (GET_MODE (x), x);
8701}
8702
96be3ab6 8703/* Find constant VAL of mode MODE in the constant pool POOL.
8704 Return an RTX describing the distance from the start of
8705 the pool to the location of the new constant. */
f81e845f 8706
96be3ab6 8707static rtx
b40da9a7 8708s390_find_constant (struct constant_pool *pool, rtx val,
3754d046 8709 machine_mode mode)
96be3ab6 8710{
8711 struct constant *c;
96be3ab6 8712 int i;
f81e845f 8713
96be3ab6 8714 for (i = 0; i < NR_C_MODES; i++)
8715 if (constant_modes[i] == mode)
8716 break;
32eda510 8717 gcc_assert (i != NR_C_MODES);
f81e845f 8718
96be3ab6 8719 for (c = pool->constants[i]; c != NULL; c = c->next)
8720 if (rtx_equal_p (val, c->value))
8721 break;
f81e845f 8722
32eda510 8723 gcc_assert (c);
f81e845f 8724
1ed7a160 8725 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
0756cebb 8726}
8727
875862bf 8728/* Check whether INSN is an execute. Return the label_ref to its
8729 execute target template if so, NULL_RTX otherwise. */
8730
8731static rtx
8732s390_execute_label (rtx insn)
8733{
aa90bb35 8734 if (NONJUMP_INSN_P (insn)
875862bf 8735 && GET_CODE (PATTERN (insn)) == PARALLEL
8736 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8737 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8738 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8739
8740 return NULL_RTX;
8741}
8742
d345b493 8743/* Add execute target for INSN to the constant pool POOL. */
8744
8745static void
8746s390_add_execute (struct constant_pool *pool, rtx insn)
8747{
8748 struct constant *c;
8749
8750 for (c = pool->execute; c != NULL; c = c->next)
8751 if (INSN_UID (insn) == INSN_UID (c->value))
8752 break;
8753
8754 if (c == NULL)
8755 {
d345b493 8756 c = (struct constant *) xmalloc (sizeof *c);
8757 c->value = insn;
babfdedf 8758 c->label = gen_label_rtx ();
d345b493 8759 c->next = pool->execute;
8760 pool->execute = c;
babfdedf 8761 pool->size += 6;
d345b493 8762 }
8763}
8764
8765/* Find execute target for INSN in the constant pool POOL.
8766 Return an RTX describing the distance from the start of
8767 the pool to the location of the execute target. */
8768
8769static rtx
8770s390_find_execute (struct constant_pool *pool, rtx insn)
8771{
8772 struct constant *c;
d345b493 8773
8774 for (c = pool->execute; c != NULL; c = c->next)
8775 if (INSN_UID (insn) == INSN_UID (c->value))
8776 break;
8777
32eda510 8778 gcc_assert (c);
d345b493 8779
1ed7a160 8780 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
d345b493 8781}
8782
875862bf 8783/* For an execute INSN, extract the execute target template. */
d345b493 8784
8785static rtx
875862bf 8786s390_execute_target (rtx insn)
d345b493 8787{
875862bf 8788 rtx pattern = PATTERN (insn);
8789 gcc_assert (s390_execute_label (insn));
d345b493 8790
8791 if (XVECLEN (pattern, 0) == 2)
8792 {
8793 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8794 }
8795 else
8796 {
8797 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8798 int i;
8799
8800 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8801 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8802
8803 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8804 }
8805
8806 return pattern;
8807}
8808
8809/* Indicate that INSN cannot be duplicated. This is the case for
8810 execute insns that carry a unique label. */
8811
8812static bool
18282db0 8813s390_cannot_copy_insn_p (rtx_insn *insn)
d345b493 8814{
8815 rtx label = s390_execute_label (insn);
8816 return label && label != const0_rtx;
8817}
8818
c2c1332a 8819/* Dump out the constants in POOL. If REMOTE_LABEL is true,
8820 do not emit the pool base label. */
0756cebb 8821
d345b493 8822static void
c2c1332a 8823s390_dump_pool (struct constant_pool *pool, bool remote_label)
0756cebb 8824{
8825 struct constant *c;
93e0956b 8826 rtx_insn *insn = pool->pool_insn;
0756cebb 8827 int i;
8828
d345b493 8829 /* Switch to rodata section. */
8830 if (TARGET_CPU_ZARCH)
8831 {
8832 insn = emit_insn_after (gen_pool_section_start (), insn);
8833 INSN_ADDRESSES_NEW (insn, -1);
8834 }
8835
8836 /* Ensure minimum pool alignment. */
dafc8d45 8837 if (TARGET_CPU_ZARCH)
d345b493 8838 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
0756cebb 8839 else
d345b493 8840 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
0756cebb 8841 INSN_ADDRESSES_NEW (insn, -1);
8842
d345b493 8843 /* Emit pool base label. */
c2c1332a 8844 if (!remote_label)
8845 {
8846 insn = emit_label_after (pool->label, insn);
8847 INSN_ADDRESSES_NEW (insn, -1);
8848 }
0756cebb 8849
8850 /* Dump constants in descending alignment requirement order,
8851 ensuring proper alignment for every constant. */
8852 for (i = 0; i < NR_C_MODES; i++)
8853 for (c = pool->constants[i]; c; c = c->next)
8854 {
12ef3745 8855 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
b2ed6df1 8856 rtx value = copy_rtx (c->value);
96be3ab6 8857 if (GET_CODE (value) == CONST
8858 && GET_CODE (XEXP (value, 0)) == UNSPEC
12ef3745 8859 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
96be3ab6 8860 && XVECLEN (XEXP (value, 0), 0) == 1)
1ed7a160 8861 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
96be3ab6 8862
0756cebb 8863 insn = emit_label_after (c->label, insn);
8864 INSN_ADDRESSES_NEW (insn, -1);
df82fb76 8865
f588eb9f 8866 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
df82fb76 8867 gen_rtvec (1, value),
8868 UNSPECV_POOL_ENTRY);
8869 insn = emit_insn_after (value, insn);
0756cebb 8870 INSN_ADDRESSES_NEW (insn, -1);
8871 }
8872
d345b493 8873 /* Ensure minimum alignment for instructions. */
8874 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
0756cebb 8875 INSN_ADDRESSES_NEW (insn, -1);
8876
d345b493 8877 /* Output in-pool execute template insns. */
8878 for (c = pool->execute; c; c = c->next)
8879 {
d345b493 8880 insn = emit_label_after (c->label, insn);
8881 INSN_ADDRESSES_NEW (insn, -1);
8882
8883 insn = emit_insn_after (s390_execute_target (c->value), insn);
8884 INSN_ADDRESSES_NEW (insn, -1);
8885 }
8886
8887 /* Switch back to previous section. */
8888 if (TARGET_CPU_ZARCH)
8889 {
8890 insn = emit_insn_after (gen_pool_section_end (), insn);
8891 INSN_ADDRESSES_NEW (insn, -1);
8892 }
8893
0756cebb 8894 insn = emit_barrier_after (insn);
8895 INSN_ADDRESSES_NEW (insn, -1);
8896
96be3ab6 8897 /* Remove placeholder insn. */
8898 remove_insn (pool->pool_insn);
d345b493 8899}
8900
0756cebb 8901/* Free all memory used by POOL. */
8902
8903static void
b40da9a7 8904s390_free_pool (struct constant_pool *pool)
0756cebb 8905{
d345b493 8906 struct constant *c, *next;
0756cebb 8907 int i;
8908
8909 for (i = 0; i < NR_C_MODES; i++)
d345b493 8910 for (c = pool->constants[i]; c; c = next)
8911 {
8912 next = c->next;
8913 free (c);
8914 }
8915
8916 for (c = pool->execute; c; c = next)
0756cebb 8917 {
d345b493 8918 next = c->next;
8919 free (c);
0756cebb 8920 }
8921
4d6e8511 8922 BITMAP_FREE (pool->insns);
0756cebb 8923 free (pool);
f81e845f 8924}
0756cebb 8925
0756cebb 8926
c2c1332a 8927/* Collect main literal pool. Return NULL on overflow. */
8928
8929static struct constant_pool *
8930s390_mainpool_start (void)
8931{
8932 struct constant_pool *pool;
93e0956b 8933 rtx_insn *insn;
c2c1332a 8934
8935 pool = s390_alloc_pool ();
8936
8937 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8938 {
aa90bb35 8939 if (NONJUMP_INSN_P (insn)
20074f87 8940 && GET_CODE (PATTERN (insn)) == SET
8941 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8942 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
c2c1332a 8943 {
7a64c761 8944 /* There might be two main_pool instructions if base_reg
8945 is call-clobbered; one for shrink-wrapped code and one
8946 for the rest. We want to keep the first. */
8947 if (pool->pool_insn)
8948 {
8949 insn = PREV_INSN (insn);
8950 delete_insn (NEXT_INSN (insn));
8951 continue;
8952 }
c2c1332a 8953 pool->pool_insn = insn;
8954 }
8955
babfdedf 8956 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
d345b493 8957 {
8958 s390_add_execute (pool, insn);
8959 }
aa90bb35 8960 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
c2c1332a 8961 {
8962 rtx pool_ref = NULL_RTX;
8963 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8964 if (pool_ref)
8965 {
8966 rtx constant = get_pool_constant (pool_ref);
3754d046 8967 machine_mode mode = get_pool_mode (pool_ref);
c2c1332a 8968 s390_add_constant (pool, constant, mode);
8969 }
8970 }
86428198 8971
8972 /* If hot/cold partitioning is enabled we have to make sure that
8973 the literal pool is emitted in the same section where the
8974 initialization of the literal pool base pointer takes place.
8975 emit_pool_after is only used in the non-overflow case on non
8976 Z cpus where we can emit the literal pool at the end of the
8977 function body within the text section. */
8978 if (NOTE_P (insn)
7338c728 8979 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8980 && !pool->emit_pool_after)
8981 pool->emit_pool_after = PREV_INSN (insn);
c2c1332a 8982 }
8983
32eda510 8984 gcc_assert (pool->pool_insn || pool->size == 0);
c2c1332a 8985
8986 if (pool->size >= 4096)
8987 {
7de9f7aa 8988 /* We're going to chunkify the pool, so remove the main
8989 pool placeholder insn. */
8990 remove_insn (pool->pool_insn);
8991
c2c1332a 8992 s390_free_pool (pool);
8993 pool = NULL;
8994 }
8995
86428198 8996 /* If the functions ends with the section where the literal pool
8997 should be emitted set the marker to its end. */
7338c728 8998 if (pool && !pool->emit_pool_after)
86428198 8999 pool->emit_pool_after = get_last_insn ();
9000
c2c1332a 9001 return pool;
9002}
9003
9004/* POOL holds the main literal pool as collected by s390_mainpool_start.
9005 Modify the current function to output the pool constants as well as
20074f87 9006 the pool register setup instruction. */
c2c1332a 9007
9008static void
20074f87 9009s390_mainpool_finish (struct constant_pool *pool)
c2c1332a 9010{
4fed3f99 9011 rtx base_reg = cfun->machine->base_reg;
c2c1332a 9012
9013 /* If the pool is empty, we're done. */
9014 if (pool->size == 0)
9015 {
4fed3f99 9016 /* We don't actually need a base register after all. */
9017 cfun->machine->base_reg = NULL_RTX;
9018
9019 if (pool->pool_insn)
9020 remove_insn (pool->pool_insn);
c2c1332a 9021 s390_free_pool (pool);
9022 return;
9023 }
9024
9025 /* We need correct insn addresses. */
9026 shorten_branches (get_insns ());
9027
dafc8d45 9028 /* On zSeries, we use a LARL to load the pool register. The pool is
c2c1332a 9029 located in the .rodata section, so we emit it after the function. */
dafc8d45 9030 if (TARGET_CPU_ZARCH)
c2c1332a 9031 {
ed7591be 9032 rtx set = gen_main_base_64 (base_reg, pool->label);
9033 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
c2c1332a 9034 INSN_ADDRESSES_NEW (insn, -1);
9035 remove_insn (pool->pool_insn);
f588eb9f 9036
9037 insn = get_last_insn ();
c2c1332a 9038 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9039 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9040
9041 s390_dump_pool (pool, 0);
9042 }
9043
dafc8d45 9044 /* On S/390, if the total size of the function's code plus literal pool
c2c1332a 9045 does not exceed 4096 bytes, we use BASR to set up a function base
9046 pointer, and emit the literal pool at the end of the function. */
86428198 9047 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
c2c1332a 9048 + pool->size + 8 /* alignment slop */ < 4096)
9049 {
ed7591be 9050 rtx set = gen_main_base_31_small (base_reg, pool->label);
9051 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
c2c1332a 9052 INSN_ADDRESSES_NEW (insn, -1);
9053 remove_insn (pool->pool_insn);
9054
9055 insn = emit_label_after (pool->label, insn);
9056 INSN_ADDRESSES_NEW (insn, -1);
9057
86428198 9058 /* emit_pool_after will be set by s390_mainpool_start to the
9059 last insn of the section where the literal pool should be
9060 emitted. */
9061 insn = pool->emit_pool_after;
9062
c2c1332a 9063 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9064 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9065
9066 s390_dump_pool (pool, 1);
9067 }
9068
9069 /* Otherwise, we emit an inline literal pool and use BASR to branch
9070 over it, setting up the pool register at the same time. */
9071 else
9072 {
ed7591be 9073 rtx_code_label *pool_end = gen_label_rtx ();
c2c1332a 9074
ed7591be 9075 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
9076 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
12f0f6d7 9077 JUMP_LABEL (insn) = pool_end;
c2c1332a 9078 INSN_ADDRESSES_NEW (insn, -1);
9079 remove_insn (pool->pool_insn);
9080
9081 insn = emit_label_after (pool->label, insn);
9082 INSN_ADDRESSES_NEW (insn, -1);
9083
9084 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9085 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9086
9087 insn = emit_label_after (pool_end, pool->pool_insn);
9088 INSN_ADDRESSES_NEW (insn, -1);
9089
9090 s390_dump_pool (pool, 1);
9091 }
9092
9093
9094 /* Replace all literal pool references. */
9095
91a55c11 9096 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
c2c1332a 9097 {
9098 if (INSN_P (insn))
20074f87 9099 replace_ltrel_base (&PATTERN (insn));
c2c1332a 9100
aa90bb35 9101 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
c2c1332a 9102 {
9103 rtx addr, pool_ref = NULL_RTX;
9104 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9105 if (pool_ref)
9106 {
d345b493 9107 if (s390_execute_label (insn))
9108 addr = s390_find_execute (pool, insn);
9109 else
9110 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
9111 get_pool_mode (pool_ref));
9112
c2c1332a 9113 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9114 INSN_CODE (insn) = -1;
9115 }
9116 }
9117 }
9118
9119
9120 /* Free the pool. */
9121 s390_free_pool (pool);
9122}
9123
9124/* POOL holds the main literal pool as collected by s390_mainpool_start.
9125 We have decided we cannot use this pool, so revert all changes
9126 to the current function that were done by s390_mainpool_start. */
9127static void
9128s390_mainpool_cancel (struct constant_pool *pool)
9129{
9130 /* We didn't actually change the instruction stream, so simply
9131 free the pool memory. */
9132 s390_free_pool (pool);
9133}
9134
9135
20074f87 9136/* Chunkify the literal pool. */
4673c1a0 9137
0756cebb 9138#define S390_POOL_CHUNK_MIN 0xc00
9139#define S390_POOL_CHUNK_MAX 0xe00
9140
f81e845f 9141static struct constant_pool *
20074f87 9142s390_chunkify_start (void)
4673c1a0 9143{
0756cebb 9144 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
9145 int extra_size = 0;
9146 bitmap far_labels;
12ef3745 9147 rtx pending_ltrel = NULL_RTX;
93e0956b 9148 rtx_insn *insn;
4673c1a0 9149
b40da9a7 9150 rtx (*gen_reload_base) (rtx, rtx) =
dafc8d45 9151 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
96be3ab6 9152
9153
9a2a66ae 9154 /* We need correct insn addresses. */
9155
9156 shorten_branches (get_insns ());
9157
12ef3745 9158 /* Scan all insns and move literals to pool chunks. */
479ca6e8 9159
479ca6e8 9160 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
4673c1a0 9161 {
86428198 9162 bool section_switch_p = false;
9163
12ef3745 9164 /* Check for pending LTREL_BASE. */
9165 if (INSN_P (insn))
9166 {
9167 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
9168 if (ltrel_base)
9169 {
32eda510 9170 gcc_assert (ltrel_base == pending_ltrel);
9171 pending_ltrel = NULL_RTX;
12ef3745 9172 }
9173 }
9174
babfdedf 9175 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
d345b493 9176 {
9177 if (!curr_pool)
9178 curr_pool = s390_start_pool (&pool_list, insn);
9179
9180 s390_add_execute (curr_pool, insn);
9181 s390_add_pool_insn (curr_pool, insn);
9182 }
aa90bb35 9183 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
0756cebb 9184 {
96be3ab6 9185 rtx pool_ref = NULL_RTX;
0756cebb 9186 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9187 if (pool_ref)
9188 {
12ef3745 9189 rtx constant = get_pool_constant (pool_ref);
3754d046 9190 machine_mode mode = get_pool_mode (pool_ref);
12ef3745 9191
0756cebb 9192 if (!curr_pool)
9193 curr_pool = s390_start_pool (&pool_list, insn);
9194
12ef3745 9195 s390_add_constant (curr_pool, constant, mode);
96be3ab6 9196 s390_add_pool_insn (curr_pool, insn);
96be3ab6 9197
12ef3745 9198 /* Don't split the pool chunk between a LTREL_OFFSET load
9199 and the corresponding LTREL_BASE. */
9200 if (GET_CODE (constant) == CONST
9201 && GET_CODE (XEXP (constant, 0)) == UNSPEC
9202 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
9203 {
32eda510 9204 gcc_assert (!pending_ltrel);
12ef3745 9205 pending_ltrel = pool_ref;
9206 }
0756cebb 9207 }
9208 }
9209
91f71fa3 9210 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
12ef3745 9211 {
9212 if (curr_pool)
9213 s390_add_pool_insn (curr_pool, insn);
9214 /* An LTREL_BASE must follow within the same basic block. */
32eda510 9215 gcc_assert (!pending_ltrel);
12ef3745 9216 }
96be3ab6 9217
414bc417 9218 if (NOTE_P (insn))
9219 switch (NOTE_KIND (insn))
9220 {
9221 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
9222 section_switch_p = true;
9223 break;
9224 case NOTE_INSN_VAR_LOCATION:
9225 case NOTE_INSN_CALL_ARG_LOCATION:
9226 continue;
9227 default:
9228 break;
9229 }
86428198 9230
f81e845f 9231 if (!curr_pool
0756cebb 9232 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
9233 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
4673c1a0 9234 continue;
479ca6e8 9235
dafc8d45 9236 if (TARGET_CPU_ZARCH)
4673c1a0 9237 {
0756cebb 9238 if (curr_pool->size < S390_POOL_CHUNK_MAX)
9239 continue;
479ca6e8 9240
93e0956b 9241 s390_end_pool (curr_pool, NULL);
0756cebb 9242 curr_pool = NULL;
9243 }
9244 else
4673c1a0 9245 {
0756cebb 9246 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
b40da9a7 9247 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
0756cebb 9248 + extra_size;
9249
9250 /* We will later have to insert base register reload insns.
9251 Those will have an effect on code size, which we need to
9252 consider here. This calculation makes rather pessimistic
9253 worst-case assumptions. */
aa90bb35 9254 if (LABEL_P (insn))
0756cebb 9255 extra_size += 6;
0756cebb 9256
9257 if (chunk_size < S390_POOL_CHUNK_MIN
86428198 9258 && curr_pool->size < S390_POOL_CHUNK_MIN
9259 && !section_switch_p)
0756cebb 9260 continue;
9261
9262 /* Pool chunks can only be inserted after BARRIERs ... */
aa90bb35 9263 if (BARRIER_P (insn))
0756cebb 9264 {
9265 s390_end_pool (curr_pool, insn);
9266 curr_pool = NULL;
9267 extra_size = 0;
9268 }
9269
9270 /* ... so if we don't find one in time, create one. */
86428198 9271 else if (chunk_size > S390_POOL_CHUNK_MAX
9272 || curr_pool->size > S390_POOL_CHUNK_MAX
9273 || section_switch_p)
0756cebb 9274 {
93e0956b 9275 rtx_insn *label, *jump, *barrier, *next, *prev;
0756cebb 9276
86428198 9277 if (!section_switch_p)
9278 {
9279 /* We can insert the barrier only after a 'real' insn. */
aa90bb35 9280 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
86428198 9281 continue;
9282 if (get_attr_length (insn) == 0)
9283 continue;
9284 /* Don't separate LTREL_BASE from the corresponding
414bc417 9285 LTREL_OFFSET load. */
86428198 9286 if (pending_ltrel)
9287 continue;
414bc417 9288 next = insn;
9289 do
9290 {
9291 insn = next;
9292 next = NEXT_INSN (insn);
9293 }
9294 while (next
9295 && NOTE_P (next)
9296 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
9297 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
86428198 9298 }
9299 else
9300 {
9301 gcc_assert (!pending_ltrel);
9302
9303 /* The old pool has to end before the section switch
9304 note in order to make it part of the current
9305 section. */
9306 insn = PREV_INSN (insn);
9307 }
96be3ab6 9308
b40da9a7 9309 label = gen_label_rtx ();
414bc417 9310 prev = insn;
9311 if (prev && NOTE_P (prev))
9312 prev = prev_nonnote_insn (prev);
9313 if (prev)
9314 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
d53c050c 9315 INSN_LOCATION (prev));
414bc417 9316 else
9317 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
0756cebb 9318 barrier = emit_barrier_after (jump);
9319 insn = emit_label_after (label, barrier);
9320 JUMP_LABEL (jump) = label;
9321 LABEL_NUSES (label) = 1;
9322
96be3ab6 9323 INSN_ADDRESSES_NEW (jump, -1);
9324 INSN_ADDRESSES_NEW (barrier, -1);
0756cebb 9325 INSN_ADDRESSES_NEW (insn, -1);
9326
9327 s390_end_pool (curr_pool, barrier);
9328 curr_pool = NULL;
9329 extra_size = 0;
9330 }
479ca6e8 9331 }
4673c1a0 9332 }
9fa6d5d9 9333
96be3ab6 9334 if (curr_pool)
93e0956b 9335 s390_end_pool (curr_pool, NULL);
32eda510 9336 gcc_assert (!pending_ltrel);
0756cebb 9337
f81e845f 9338 /* Find all labels that are branched into
479ca6e8 9339 from an insn belonging to a different chunk. */
9fa6d5d9 9340
4d6e8511 9341 far_labels = BITMAP_ALLOC (NULL);
a8ef833a 9342
479ca6e8 9343 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
4673c1a0 9344 {
c86d86ff 9345 rtx_jump_table_data *table;
245402e7 9346
0756cebb 9347 /* Labels marked with LABEL_PRESERVE_P can be target
9348 of non-local jumps, so we have to mark them.
9349 The same holds for named labels.
9350
9351 Don't do that, however, if it is the label before
9352 a jump table. */
9353
aa90bb35 9354 if (LABEL_P (insn)
0756cebb 9355 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
9356 {
93e0956b 9357 rtx_insn *vec_insn = NEXT_INSN (insn);
77985f1a 9358 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
0756cebb 9359 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
9360 }
245402e7 9361 /* Check potential targets in a table jump (casesi_jump). */
9362 else if (tablejump_p (insn, NULL, &table))
9363 {
9364 rtx vec_pat = PATTERN (table);
9365 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
9366
9367 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
9368 {
9369 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
0756cebb 9370
245402e7 9371 if (s390_find_pool (pool_list, label)
9372 != s390_find_pool (pool_list, insn))
9373 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9374 }
9375 }
9376 /* If we have a direct jump (conditional or unconditional),
9377 check all potential targets. */
aa90bb35 9378 else if (JUMP_P (insn))
479ca6e8 9379 {
245402e7 9380 rtx pat = PATTERN (insn);
0cd9a9a9 9381
245402e7 9382 if (GET_CODE (pat) == PARALLEL)
3c482144 9383 pat = XVECEXP (pat, 0, 0);
9384
245402e7 9385 if (GET_CODE (pat) == SET)
9386 {
96be3ab6 9387 rtx label = JUMP_LABEL (insn);
7a64c761 9388 if (label && !ANY_RETURN_P (label))
479ca6e8 9389 {
245402e7 9390 if (s390_find_pool (pool_list, label)
0756cebb 9391 != s390_find_pool (pool_list, insn))
9392 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
479ca6e8 9393 }
0756cebb 9394 }
245402e7 9395 }
4673c1a0 9396 }
9fa6d5d9 9397
0756cebb 9398 /* Insert base register reload insns before every pool. */
9399
9400 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
96be3ab6 9401 {
ffead1ca 9402 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
20074f87 9403 curr_pool->label);
93e0956b 9404 rtx_insn *insn = curr_pool->first_insn;
96be3ab6 9405 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
9406 }
0756cebb 9407
9408 /* Insert base register reload insns at every far label. */
479ca6e8 9409
479ca6e8 9410 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
aa90bb35 9411 if (LABEL_P (insn)
0756cebb 9412 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
9413 {
9414 struct constant_pool *pool = s390_find_pool (pool_list, insn);
9415 if (pool)
9416 {
ffead1ca 9417 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
20074f87 9418 pool->label);
96be3ab6 9419 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
0756cebb 9420 }
9421 }
9422
96be3ab6 9423
4d6e8511 9424 BITMAP_FREE (far_labels);
479ca6e8 9425
479ca6e8 9426
9427 /* Recompute insn addresses. */
9428
9429 init_insn_lengths ();
9430 shorten_branches (get_insns ());
4673c1a0 9431
96be3ab6 9432 return pool_list;
9433}
4673c1a0 9434
96be3ab6 9435/* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
f81e845f 9436 After we have decided to use this list, finish implementing
20074f87 9437 all changes to the current function as required. */
f81e845f 9438
96be3ab6 9439static void
20074f87 9440s390_chunkify_finish (struct constant_pool *pool_list)
96be3ab6 9441{
96be3ab6 9442 struct constant_pool *curr_pool = NULL;
93e0956b 9443 rtx_insn *insn;
f81e845f 9444
9445
96be3ab6 9446 /* Replace all literal pool references. */
9447
f81e845f 9448 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
96be3ab6 9449 {
12ef3745 9450 if (INSN_P (insn))
20074f87 9451 replace_ltrel_base (&PATTERN (insn));
12ef3745 9452
96be3ab6 9453 curr_pool = s390_find_pool (pool_list, insn);
9454 if (!curr_pool)
9455 continue;
9456
aa90bb35 9457 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
96be3ab6 9458 {
9459 rtx addr, pool_ref = NULL_RTX;
9460 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9461 if (pool_ref)
9462 {
d345b493 9463 if (s390_execute_label (insn))
9464 addr = s390_find_execute (curr_pool, insn);
9465 else
9466 addr = s390_find_constant (curr_pool,
9467 get_pool_constant (pool_ref),
9468 get_pool_mode (pool_ref));
9469
96be3ab6 9470 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9471 INSN_CODE (insn) = -1;
9472 }
96be3ab6 9473 }
9474 }
9475
9476 /* Dump out all literal pools. */
f81e845f 9477
96be3ab6 9478 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
c2c1332a 9479 s390_dump_pool (curr_pool, 0);
f81e845f 9480
96be3ab6 9481 /* Free pool list. */
9482
9483 while (pool_list)
9484 {
9485 struct constant_pool *next = pool_list->next;
9486 s390_free_pool (pool_list);
9487 pool_list = next;
9488 }
9489}
9490
9491/* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9492 We have decided we cannot use this list, so revert all changes
9493 to the current function that were done by s390_chunkify_start. */
f81e845f 9494
96be3ab6 9495static void
b40da9a7 9496s390_chunkify_cancel (struct constant_pool *pool_list)
96be3ab6 9497{
9498 struct constant_pool *curr_pool = NULL;
93e0956b 9499 rtx_insn *insn;
96be3ab6 9500
9501 /* Remove all pool placeholder insns. */
9502
9503 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9504 {
9505 /* Did we insert an extra barrier? Remove it. */
93e0956b 9506 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
9507 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
9508 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
96be3ab6 9509
aa90bb35 9510 if (jump && JUMP_P (jump)
9511 && barrier && BARRIER_P (barrier)
9512 && label && LABEL_P (label)
96be3ab6 9513 && GET_CODE (PATTERN (jump)) == SET
9514 && SET_DEST (PATTERN (jump)) == pc_rtx
9515 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
9516 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
9517 {
9518 remove_insn (jump);
9519 remove_insn (barrier);
9520 remove_insn (label);
0756cebb 9521 }
4673c1a0 9522
96be3ab6 9523 remove_insn (curr_pool->pool_insn);
9524 }
9525
12ef3745 9526 /* Remove all base register reload insns. */
96be3ab6 9527
9528 for (insn = get_insns (); insn; )
9529 {
93e0956b 9530 rtx_insn *next_insn = NEXT_INSN (insn);
96be3ab6 9531
aa90bb35 9532 if (NONJUMP_INSN_P (insn)
96be3ab6 9533 && GET_CODE (PATTERN (insn)) == SET
9534 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
12ef3745 9535 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
96be3ab6 9536 remove_insn (insn);
4673c1a0 9537
96be3ab6 9538 insn = next_insn;
9539 }
9540
9541 /* Free pool list. */
4673c1a0 9542
0756cebb 9543 while (pool_list)
4673c1a0 9544 {
0756cebb 9545 struct constant_pool *next = pool_list->next;
9546 s390_free_pool (pool_list);
9547 pool_list = next;
4673c1a0 9548 }
4673c1a0 9549}
9550
74d2529d 9551/* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
df82fb76 9552
9553void
3754d046 9554s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
df82fb76 9555{
df82fb76 9556 switch (GET_MODE_CLASS (mode))
9557 {
9558 case MODE_FLOAT:
36868490 9559 case MODE_DECIMAL_FLOAT:
32eda510 9560 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
df82fb76 9561
0f97e0f5 9562 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp),
9563 as_a <scalar_float_mode> (mode), align);
df82fb76 9564 break;
9565
9566 case MODE_INT:
74d2529d 9567 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
af2a449c 9568 mark_symbol_refs_as_used (exp);
df82fb76 9569 break;
9570
76a4c804 9571 case MODE_VECTOR_INT:
9572 case MODE_VECTOR_FLOAT:
9573 {
9574 int i;
9575 machine_mode inner_mode;
9576 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
9577
9578 inner_mode = GET_MODE_INNER (GET_MODE (exp));
9579 for (i = 0; i < XVECLEN (exp, 0); i++)
9580 s390_output_pool_entry (XVECEXP (exp, 0, i),
9581 inner_mode,
9582 i == 0
9583 ? align
9584 : GET_MODE_BITSIZE (inner_mode));
9585 }
9586 break;
9587
df82fb76 9588 default:
32eda510 9589 gcc_unreachable ();
df82fb76 9590 }
9591}
9592
9593
875862bf 9594/* Return an RTL expression representing the value of the return address
9595 for the frame COUNT steps up from the current frame. FRAME is the
9596 frame pointer of that frame. */
0756cebb 9597
875862bf 9598rtx
9599s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
0756cebb 9600{
875862bf 9601 int offset;
9602 rtx addr;
96be3ab6 9603
875862bf 9604 /* Without backchain, we fail for all but the current frame. */
9a2a66ae 9605
875862bf 9606 if (!TARGET_BACKCHAIN && count > 0)
9607 return NULL_RTX;
9a2a66ae 9608
875862bf 9609 /* For the current frame, we need to make sure the initial
9610 value of RETURN_REGNUM is actually saved. */
9a2a66ae 9611
875862bf 9612 if (count == 0)
9a2a66ae 9613 {
1e639cb0 9614 /* On non-z architectures branch splitting could overwrite r14. */
9615 if (TARGET_CPU_ZARCH)
9616 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
9617 else
9618 {
9619 cfun_frame_layout.save_return_addr_p = true;
9620 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
9621 }
875862bf 9622 }
9a2a66ae 9623
875862bf 9624 if (TARGET_PACKED_STACK)
b5fdc416 9625 offset = -2 * UNITS_PER_LONG;
875862bf 9626 else
b5fdc416 9627 offset = RETURN_REGNUM * UNITS_PER_LONG;
9a2a66ae 9628
29c05e22 9629 addr = plus_constant (Pmode, frame, offset);
875862bf 9630 addr = memory_address (Pmode, addr);
9631 return gen_rtx_MEM (Pmode, addr);
9632}
9a2a66ae 9633
875862bf 9634/* Return an RTL expression representing the back chain stored in
9635 the current stack frame. */
5fe74ca1 9636
875862bf 9637rtx
9638s390_back_chain_rtx (void)
9639{
9640 rtx chain;
5fe74ca1 9641
875862bf 9642 gcc_assert (TARGET_BACKCHAIN);
5fe74ca1 9643
875862bf 9644 if (TARGET_PACKED_STACK)
29c05e22 9645 chain = plus_constant (Pmode, stack_pointer_rtx,
b5fdc416 9646 STACK_POINTER_OFFSET - UNITS_PER_LONG);
875862bf 9647 else
9648 chain = stack_pointer_rtx;
5fe74ca1 9649
875862bf 9650 chain = gen_rtx_MEM (Pmode, chain);
9651 return chain;
9652}
9a2a66ae 9653
875862bf 9654/* Find first call clobbered register unused in a function.
9655 This could be used as base register in a leaf function
9656 or for holding the return address before epilogue. */
9a2a66ae 9657
875862bf 9658static int
9659find_unused_clobbered_reg (void)
9660{
9661 int i;
9662 for (i = 0; i < 6; i++)
3072d30e 9663 if (!df_regs_ever_live_p (i))
875862bf 9664 return i;
9665 return 0;
9666}
9a2a66ae 9667
1e639cb0 9668
ffead1ca 9669/* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
1e639cb0 9670 clobbered hard regs in SETREG. */
9671
9672static void
81a410b1 9673s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
1e639cb0 9674{
ff4ce128 9675 char *regs_ever_clobbered = (char *)data;
1e639cb0 9676 unsigned int i, regno;
3754d046 9677 machine_mode mode = GET_MODE (setreg);
1e639cb0 9678
9679 if (GET_CODE (setreg) == SUBREG)
9680 {
9681 rtx inner = SUBREG_REG (setreg);
5ada7a14 9682 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
1e639cb0 9683 return;
9684 regno = subreg_regno (setreg);
9685 }
5ada7a14 9686 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
1e639cb0 9687 regno = REGNO (setreg);
9688 else
9689 return;
9690
9691 for (i = regno;
16b9e38b 9692 i < end_hard_regno (mode, regno);
1e639cb0 9693 i++)
9694 regs_ever_clobbered[i] = 1;
9695}
9696
9697/* Walks through all basic blocks of the current function looking
9698 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9699 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9700 each of those regs. */
9701
9702static void
ff4ce128 9703s390_regs_ever_clobbered (char regs_ever_clobbered[])
1e639cb0 9704{
9705 basic_block cur_bb;
93e0956b 9706 rtx_insn *cur_insn;
1e639cb0 9707 unsigned int i;
9708
ff4ce128 9709 memset (regs_ever_clobbered, 0, 32);
1e639cb0 9710
9711 /* For non-leaf functions we have to consider all call clobbered regs to be
9712 clobbered. */
d5bf7b64 9713 if (!crtl->is_leaf)
1e639cb0 9714 {
5ada7a14 9715 for (i = 0; i < 32; i++)
1e639cb0 9716 regs_ever_clobbered[i] = call_really_used_regs[i];
9717 }
9718
9719 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9720 this work is done by liveness analysis (mark_regs_live_at_end).
9721 Special care is needed for functions containing landing pads. Landing pads
9722 may use the eh registers, but the code which sets these registers is not
9723 contained in that function. Hence s390_regs_ever_clobbered is not able to
9724 deal with this automatically. */
18d50ae6 9725 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
1e639cb0 9726 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
ffead1ca 9727 if (crtl->calls_eh_return
9728 || (cfun->machine->has_landing_pad_p
3072d30e 9729 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
220be973 9730 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
1e639cb0 9731
9732 /* For nonlocal gotos all call-saved registers have to be saved.
9733 This flag is also set for the unwinding code in libgcc.
9734 See expand_builtin_unwind_init. For regs_ever_live this is done by
9735 reload. */
ff4ce128 9736 if (crtl->saves_all_registers)
5ada7a14 9737 for (i = 0; i < 32; i++)
1e639cb0 9738 if (!call_really_used_regs[i])
9739 regs_ever_clobbered[i] = 1;
9740
fc00614f 9741 FOR_EACH_BB_FN (cur_bb, cfun)
1e639cb0 9742 {
9743 FOR_BB_INSNS (cur_bb, cur_insn)
9744 {
ff4ce128 9745 rtx pat;
9746
9747 if (!INSN_P (cur_insn))
9748 continue;
9749
9750 pat = PATTERN (cur_insn);
9751
9752 /* Ignore GPR restore insns. */
9753 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9754 {
9755 if (GET_CODE (pat) == SET
9756 && GENERAL_REG_P (SET_DEST (pat)))
9757 {
9758 /* lgdr */
9759 if (GET_MODE (SET_SRC (pat)) == DImode
9760 && FP_REG_P (SET_SRC (pat)))
9761 continue;
9762
9763 /* l / lg */
9764 if (GET_CODE (SET_SRC (pat)) == MEM)
9765 continue;
9766 }
9767
9768 /* lm / lmg */
9769 if (GET_CODE (pat) == PARALLEL
9770 && load_multiple_operation (pat, VOIDmode))
9771 continue;
9772 }
9773
9774 note_stores (pat,
9775 s390_reg_clobbered_rtx,
9776 regs_ever_clobbered);
1e639cb0 9777 }
9778 }
9779}
9780
ffead1ca 9781/* Determine the frame area which actually has to be accessed
9782 in the function epilogue. The values are stored at the
875862bf 9783 given pointers AREA_BOTTOM (address of the lowest used stack
ffead1ca 9784 address) and AREA_TOP (address of the first item which does
875862bf 9785 not belong to the stack frame). */
5fe74ca1 9786
875862bf 9787static void
9788s390_frame_area (int *area_bottom, int *area_top)
9789{
9790 int b, t;
5fe74ca1 9791
875862bf 9792 b = INT_MAX;
9793 t = INT_MIN;
67928721 9794
9795 if (cfun_frame_layout.first_restore_gpr != -1)
9796 {
9797 b = (cfun_frame_layout.gprs_offset
b5fdc416 9798 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
67928721 9799 t = b + (cfun_frame_layout.last_restore_gpr
b5fdc416 9800 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
67928721 9801 }
9802
9803 if (TARGET_64BIT && cfun_save_high_fprs_p)
9804 {
9805 b = MIN (b, cfun_frame_layout.f8_offset);
9806 t = MAX (t, (cfun_frame_layout.f8_offset
9807 + cfun_frame_layout.high_fprs * 8));
9808 }
9809
9810 if (!TARGET_64BIT)
29439367 9811 {
6a2469fe 9812 if (cfun_fpr_save_p (FPR4_REGNUM))
67928721 9813 {
29439367 9814 b = MIN (b, cfun_frame_layout.f4_offset);
9815 t = MAX (t, cfun_frame_layout.f4_offset + 8);
67928721 9816 }
6a2469fe 9817 if (cfun_fpr_save_p (FPR6_REGNUM))
29439367 9818 {
9819 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9820 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9821 }
9822 }
67928721 9823 *area_bottom = b;
9824 *area_top = t;
9825}
ff4ce128 9826/* Update gpr_save_slots in the frame layout trying to make use of
9827 FPRs as GPR save slots.
9828 This is a helper routine of s390_register_info. */
8b4a4127 9829
9830static void
ff4ce128 9831s390_register_info_gprtofpr ()
8b4a4127 9832{
ff4ce128 9833 int save_reg_slot = FPR0_REGNUM;
8b4a4127 9834 int i, j;
8b4a4127 9835
ff4ce128 9836 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9837 return;
1e639cb0 9838
a8078ffb 9839 /* builtin_eh_return needs to be able to modify the return address
9840 on the stack. It could also adjust the FPR save slot instead but
9841 is it worth the trouble?! */
9842 if (crtl->calls_eh_return)
9843 return;
9844
ff4ce128 9845 for (i = 15; i >= 6; i--)
5ada7a14 9846 {
1d3cea74 9847 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
ff4ce128 9848 continue;
68bc0408 9849
ff4ce128 9850 /* Advance to the next FP register which can be used as a
9851 GPR save slot. */
9852 while ((!call_really_used_regs[save_reg_slot]
9853 || df_regs_ever_live_p (save_reg_slot)
9854 || cfun_fpr_save_p (save_reg_slot))
9855 && FP_REGNO_P (save_reg_slot))
9856 save_reg_slot++;
9857 if (!FP_REGNO_P (save_reg_slot))
9858 {
9859 /* We only want to use ldgr/lgdr if we can get rid of
9860 stm/lm entirely. So undo the gpr slot allocation in
9861 case we ran out of FPR save slots. */
9862 for (j = 6; j <= 15; j++)
9863 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
1d3cea74 9864 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
ff4ce128 9865 break;
68bc0408 9866 }
ff4ce128 9867 cfun_gpr_save_slot (i) = save_reg_slot++;
5ada7a14 9868 }
ff4ce128 9869}
5ada7a14 9870
ff4ce128 9871/* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9872 stdarg.
9873 This is a helper routine for s390_register_info. */
1e639cb0 9874
ff4ce128 9875static void
9876s390_register_info_stdarg_fpr ()
9877{
9878 int i;
9879 int min_fpr;
9880 int max_fpr;
9881
9882 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9883 f0-f4 for 64 bit. */
9884 if (!cfun->stdarg
9885 || !TARGET_HARD_FLOAT
9886 || !cfun->va_list_fpr_size
9887 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9888 return;
9889
9890 min_fpr = crtl->args.info.fprs;
1d3cea74 9891 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9892 if (max_fpr >= FP_ARG_NUM_REG)
9893 max_fpr = FP_ARG_NUM_REG - 1;
ff4ce128 9894
1d3cea74 9895 /* FPR argument regs start at f0. */
9896 min_fpr += FPR0_REGNUM;
9897 max_fpr += FPR0_REGNUM;
9898
9899 for (i = min_fpr; i <= max_fpr; i++)
9900 cfun_set_fpr_save (i);
ff4ce128 9901}
9902
9903/* Reserve the GPR save slots for GPRs which need to be saved due to
9904 stdarg.
9905 This is a helper routine for s390_register_info. */
9906
9907static void
9908s390_register_info_stdarg_gpr ()
9909{
9910 int i;
9911 int min_gpr;
9912 int max_gpr;
9913
9914 if (!cfun->stdarg
9915 || !cfun->va_list_gpr_size
9916 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9917 return;
9918
9919 min_gpr = crtl->args.info.gprs;
1d3cea74 9920 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9921 if (max_gpr >= GP_ARG_NUM_REG)
9922 max_gpr = GP_ARG_NUM_REG - 1;
9923
9924 /* GPR argument regs start at r2. */
9925 min_gpr += GPR2_REGNUM;
9926 max_gpr += GPR2_REGNUM;
9927
9928 /* If r6 was supposed to be saved into an FPR and now needs to go to
9929 the stack for vararg we have to adjust the restore range to make
9930 sure that the restore is done from stack as well. */
9931 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9932 && min_gpr <= GPR6_REGNUM
9933 && max_gpr >= GPR6_REGNUM)
9934 {
9935 if (cfun_frame_layout.first_restore_gpr == -1
9936 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9937 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9938 if (cfun_frame_layout.last_restore_gpr == -1
9939 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9940 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9941 }
9942
9943 if (cfun_frame_layout.first_save_gpr == -1
9944 || cfun_frame_layout.first_save_gpr > min_gpr)
9945 cfun_frame_layout.first_save_gpr = min_gpr;
9946
9947 if (cfun_frame_layout.last_save_gpr == -1
9948 || cfun_frame_layout.last_save_gpr < max_gpr)
9949 cfun_frame_layout.last_save_gpr = max_gpr;
9950
9951 for (i = min_gpr; i <= max_gpr; i++)
9952 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9953}
9954
9955/* Calculate the save and restore ranges for stm(g) and lm(g) in the
9956 prologue and epilogue. */
ff4ce128 9957
1d3cea74 9958static void
9959s390_register_info_set_ranges ()
9960{
9961 int i, j;
9962
9963 /* Find the first and the last save slot supposed to use the stack
9964 to set the restore range.
9965 Vararg regs might be marked as save to stack but only the
9966 call-saved regs really need restoring (i.e. r6). This code
9967 assumes that the vararg regs have not yet been recorded in
9968 cfun_gpr_save_slot. */
9969 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
9970 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
9971 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9972 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
1d3cea74 9973 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9974 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
ff4ce128 9975}
9976
9977/* The GPR and FPR save slots in cfun->machine->frame_layout are set
9978 for registers which need to be saved in function prologue.
9979 This function can be used until the insns emitted for save/restore
9980 of the regs are visible in the RTL stream. */
9981
9982static void
9983s390_register_info ()
9984{
1d3cea74 9985 int i;
ff4ce128 9986 char clobbered_regs[32];
9987
9988 gcc_assert (!epilogue_completed);
9989
9990 if (reload_completed)
9991 /* After reload we rely on our own routine to determine which
9992 registers need saving. */
9993 s390_regs_ever_clobbered (clobbered_regs);
9994 else
9995 /* During reload we use regs_ever_live as a base since reload
9996 does changes in there which we otherwise would not be aware
9997 of. */
9998 for (i = 0; i < 32; i++)
9999 clobbered_regs[i] = df_regs_ever_live_p (i);
10000
10001 for (i = 0; i < 32; i++)
10002 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
10003
10004 /* Mark the call-saved FPRs which need to be saved.
10005 This needs to be done before checking the special GPRs since the
10006 stack pointer usage depends on whether high FPRs have to be saved
10007 or not. */
10008 cfun_frame_layout.fpr_bitmap = 0;
10009 cfun_frame_layout.high_fprs = 0;
10010 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
10011 if (clobbered_regs[i] && !call_really_used_regs[i])
10012 {
10013 cfun_set_fpr_save (i);
10014 if (i >= FPR8_REGNUM)
10015 cfun_frame_layout.high_fprs++;
10016 }
9a2a66ae 10017
c6d481f7 10018 /* Register 12 is used for GOT address, but also as temp in prologue
10019 for split-stack stdarg functions (unless r14 is available). */
10020 clobbered_regs[12]
10021 |= ((flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
10022 || (flag_split_stack && cfun->stdarg
10023 && (crtl->is_leaf || TARGET_TPF_PROFILING
10024 || has_hard_reg_initial_val (Pmode, RETURN_REGNUM))));
4fed3f99 10025
ffead1ca 10026 clobbered_regs[BASE_REGNUM]
77beec48 10027 |= (cfun->machine->base_reg
ff4ce128 10028 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
4fed3f99 10029
ff4ce128 10030 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
10031 |= !!frame_pointer_needed;
10032
10033 /* On pre z900 machines this might take until machine dependent
10034 reorg to decide.
10035 save_return_addr_p will only be set on non-zarch machines so
10036 there is no risk that r14 goes into an FPR instead of a stack
10037 slot. */
1e639cb0 10038 clobbered_regs[RETURN_REGNUM]
d5bf7b64 10039 |= (!crtl->is_leaf
9bee2845 10040 || TARGET_TPF_PROFILING
77beec48 10041 || cfun->machine->split_branches_pending_p
10042 || cfun_frame_layout.save_return_addr_p
ff4ce128 10043 || crtl->calls_eh_return);
4fed3f99 10044
1e639cb0 10045 clobbered_regs[STACK_POINTER_REGNUM]
d5bf7b64 10046 |= (!crtl->is_leaf
77beec48 10047 || TARGET_TPF_PROFILING
10048 || cfun_save_high_fprs_p
10049 || get_frame_size () > 0
68bc0408 10050 || (reload_completed && cfun_frame_layout.frame_size > 0)
ff4ce128 10051 || cfun->calls_alloca);
10052
1d3cea74 10053 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
1e639cb0 10054
beee1f75 10055 for (i = 6; i < 16; i++)
ff4ce128 10056 if (clobbered_regs[i])
1d3cea74 10057 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9a2a66ae 10058
ff4ce128 10059 s390_register_info_stdarg_fpr ();
10060 s390_register_info_gprtofpr ();
1d3cea74 10061 s390_register_info_set_ranges ();
ff4ce128 10062 /* stdarg functions might need to save GPRs 2 to 6. This might
1d3cea74 10063 override the GPR->FPR save decision made by
10064 s390_register_info_gprtofpr for r6 since vararg regs must go to
10065 the stack. */
ff4ce128 10066 s390_register_info_stdarg_gpr ();
ff4ce128 10067}
9a2a66ae 10068
ff4ce128 10069/* This function is called by s390_optimize_prologue in order to get
10070 rid of unnecessary GPR save/restore instructions. The register info
10071 for the GPRs is re-computed and the ranges are re-calculated. */
6902d973 10072
ff4ce128 10073static void
10074s390_optimize_register_info ()
10075{
10076 char clobbered_regs[32];
1d3cea74 10077 int i;
6902d973 10078
ff4ce128 10079 gcc_assert (epilogue_completed);
10080 gcc_assert (!cfun->machine->split_branches_pending_p);
beee1f75 10081
ff4ce128 10082 s390_regs_ever_clobbered (clobbered_regs);
6902d973 10083
ff4ce128 10084 for (i = 0; i < 32; i++)
10085 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
6902d973 10086
ff4ce128 10087 /* There is still special treatment needed for cases invisible to
10088 s390_regs_ever_clobbered. */
10089 clobbered_regs[RETURN_REGNUM]
10090 |= (TARGET_TPF_PROFILING
10091 /* When expanding builtin_return_addr in ESA mode we do not
10092 know whether r14 will later be needed as scratch reg when
10093 doing branch splitting. So the builtin always accesses the
10094 r14 save slot and we need to stick to the save/restore
10095 decision for r14 even if it turns out that it didn't get
10096 clobbered. */
10097 || cfun_frame_layout.save_return_addr_p
10098 || crtl->calls_eh_return);
10099
1d3cea74 10100 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
ff4ce128 10101
10102 for (i = 6; i < 16; i++)
10103 if (!clobbered_regs[i])
1d3cea74 10104 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
ff4ce128 10105
1d3cea74 10106 s390_register_info_set_ranges ();
ff4ce128 10107 s390_register_info_stdarg_gpr ();
67928721 10108}
10109
4fed3f99 10110/* Fill cfun->machine with info about frame of current function. */
67928721 10111
10112static void
4fed3f99 10113s390_frame_info (void)
67928721 10114{
62eb9236 10115 HOST_WIDE_INT lowest_offset;
67928721 10116
ff4ce128 10117 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
10118 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
10119
10120 /* The va_arg builtin uses a constant distance of 16 *
10121 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
10122 pointer. So even if we are going to save the stack pointer in an
10123 FPR we need the stack space in order to keep the offsets
10124 correct. */
10125 if (cfun->stdarg && cfun_save_arg_fprs_p)
10126 {
10127 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
10128
10129 if (cfun_frame_layout.first_save_gpr_slot == -1)
10130 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
10131 }
10132
67928721 10133 cfun_frame_layout.frame_size = get_frame_size ();
67928721 10134 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
c05be867 10135 fatal_error (input_location,
10136 "total size of local variables exceeds architecture limit");
ffead1ca 10137
646a946e 10138 if (!TARGET_PACKED_STACK)
67928721 10139 {
62eb9236 10140 /* Fixed stack layout. */
67928721 10141 cfun_frame_layout.backchain_offset = 0;
b5fdc416 10142 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
67928721 10143 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
10144 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
5214e6ae 10145 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
b5fdc416 10146 * UNITS_PER_LONG);
67928721 10147 }
62eb9236 10148 else if (TARGET_BACKCHAIN)
67928721 10149 {
62eb9236 10150 /* Kernel stack layout - packed stack, backchain, no float */
10151 gcc_assert (TARGET_SOFT_FLOAT);
67928721 10152 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
b5fdc416 10153 - UNITS_PER_LONG);
62eb9236 10154
10155 /* The distance between the backchain and the return address
10156 save slot must not change. So we always need a slot for the
10157 stack pointer which resides in between. */
10158 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
10159
ffead1ca 10160 cfun_frame_layout.gprs_offset
62eb9236 10161 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
ffead1ca 10162
62eb9236 10163 /* FPRs will not be saved. Nevertheless pick sane values to
10164 keep area calculations valid. */
10165 cfun_frame_layout.f0_offset =
10166 cfun_frame_layout.f4_offset =
10167 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
67928721 10168 }
62eb9236 10169 else
67928721 10170 {
031bdf83 10171 int num_fprs;
10172
62eb9236 10173 /* Packed stack layout without backchain. */
ffead1ca 10174
031bdf83 10175 /* With stdarg FPRs need their dedicated slots. */
10176 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
10177 : (cfun_fpr_save_p (FPR4_REGNUM) +
10178 cfun_fpr_save_p (FPR6_REGNUM)));
10179 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
10180
10181 num_fprs = (cfun->stdarg ? 2
10182 : (cfun_fpr_save_p (FPR0_REGNUM)
10183 + cfun_fpr_save_p (FPR2_REGNUM)));
10184 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
ffead1ca 10185
10186 cfun_frame_layout.gprs_offset
67928721 10187 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
62eb9236 10188
10189 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
10190 - cfun_frame_layout.high_fprs * 8);
67928721 10191 }
10192
62eb9236 10193 if (cfun_save_high_fprs_p)
10194 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
10195
10196 if (!crtl->is_leaf)
10197 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
10198
10199 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
10200 sized area at the bottom of the stack. This is required also for
10201 leaf functions. When GCC generates a local stack reference it
10202 will always add STACK_POINTER_OFFSET to all these references. */
d5bf7b64 10203 if (crtl->is_leaf
67928721 10204 && !TARGET_TPF_PROFILING
10205 && cfun_frame_layout.frame_size == 0
ff4ce128 10206 && !cfun->calls_alloca)
67928721 10207 return;
10208
62eb9236 10209 /* Calculate the number of bytes we have used in our own register
10210 save area. With the packed stack layout we can re-use the
10211 remaining bytes for normal stack elements. */
67928721 10212
62eb9236 10213 if (TARGET_PACKED_STACK)
10214 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
10215 cfun_frame_layout.f4_offset),
10216 cfun_frame_layout.gprs_offset);
10217 else
10218 lowest_offset = 0;
ffead1ca 10219
62eb9236 10220 if (TARGET_BACKCHAIN)
10221 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
ffead1ca 10222
62eb9236 10223 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
67928721 10224
62eb9236 10225 /* If under 31 bit an odd number of gprs has to be saved we have to
10226 adjust the frame size to sustain 8 byte alignment of stack
10227 frames. */
10228 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
10229 STACK_BOUNDARY / BITS_PER_UNIT - 1)
10230 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
8b4a4127 10231}
10232
4fed3f99 10233/* Generate frame layout. Fills in register and frame data for the current
10234 function in cfun->machine. This routine can be called multiple times;
10235 it will re-do the complete frame layout every time. */
8b4a4127 10236
4fed3f99 10237static void
10238s390_init_frame_layout (void)
4673c1a0 10239{
4fed3f99 10240 HOST_WIDE_INT frame_size;
10241 int base_used;
ff4ce128 10242
b85ca4c8 10243 /* After LRA the frame layout is supposed to be read-only and should
10244 not be re-computed. */
10245 if (reload_completed)
10246 return;
beee1f75 10247
4fed3f99 10248 /* On S/390 machines, we may need to perform branch splitting, which
10249 will require both base and return address register. We have no
10250 choice but to assume we're going to need them until right at the
10251 end of the machine dependent reorg phase. */
10252 if (!TARGET_CPU_ZARCH)
10253 cfun->machine->split_branches_pending_p = true;
10254
10255 do
10256 {
10257 frame_size = cfun_frame_layout.frame_size;
10258
10259 /* Try to predict whether we'll need the base register. */
10260 base_used = cfun->machine->split_branches_pending_p
18d50ae6 10261 || crtl->uses_const_pool
3ea2a559 10262 || (!DISP_IN_RANGE (frame_size)
10263 && !CONST_OK_FOR_K (frame_size));
4fed3f99 10264
10265 /* Decide which register to use as literal pool base. In small
10266 leaf functions, try to use an unused call-clobbered register
10267 as base register to avoid save/restore overhead. */
10268 if (!base_used)
10269 cfun->machine->base_reg = NULL_RTX;
4fed3f99 10270 else
fee9fc9f 10271 {
10272 int br = 0;
10273
10274 if (crtl->is_leaf)
10275 /* Prefer r5 (most likely to be free). */
10276 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
10277 ;
10278 cfun->machine->base_reg =
009c4697 10279 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
fee9fc9f 10280 }
67928721 10281
ff4ce128 10282 s390_register_info ();
4fed3f99 10283 s390_frame_info ();
10284 }
10285 while (frame_size != cfun_frame_layout.frame_size);
4673c1a0 10286}
10287
5ada7a14 10288/* Remove the FPR clobbers from a tbegin insn if it can be proven that
10289 the TX is nonescaping. A transaction is considered escaping if
10290 there is at least one path from tbegin returning CC0 to the
10291 function exit block without an tend.
10292
10293 The check so far has some limitations:
10294 - only single tbegin/tend BBs are supported
10295 - the first cond jump after tbegin must separate the CC0 path from ~CC0
10296 - when CC is copied to a GPR and the CC0 check is done with the GPR
10297 this is not supported
10298*/
10299
10300static void
10301s390_optimize_nonescaping_tx (void)
10302{
10303 const unsigned int CC0 = 1 << 3;
10304 basic_block tbegin_bb = NULL;
10305 basic_block tend_bb = NULL;
10306 basic_block bb;
93e0956b 10307 rtx_insn *insn;
5ada7a14 10308 bool result = true;
10309 int bb_index;
93e0956b 10310 rtx_insn *tbegin_insn = NULL;
5ada7a14 10311
10312 if (!cfun->machine->tbegin_p)
10313 return;
10314
a28770e1 10315 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
5ada7a14 10316 {
f5a6b05f 10317 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
5ada7a14 10318
91dfd73e 10319 if (!bb)
10320 continue;
10321
5ada7a14 10322 FOR_BB_INSNS (bb, insn)
10323 {
10324 rtx ite, cc, pat, target;
10325 unsigned HOST_WIDE_INT mask;
10326
10327 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10328 continue;
10329
10330 pat = PATTERN (insn);
10331
10332 if (GET_CODE (pat) == PARALLEL)
10333 pat = XVECEXP (pat, 0, 0);
10334
10335 if (GET_CODE (pat) != SET
10336 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
10337 continue;
10338
10339 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
10340 {
91a55c11 10341 rtx_insn *tmp;
5ada7a14 10342
10343 tbegin_insn = insn;
10344
10345 /* Just return if the tbegin doesn't have clobbers. */
10346 if (GET_CODE (PATTERN (insn)) != PARALLEL)
10347 return;
10348
10349 if (tbegin_bb != NULL)
10350 return;
10351
10352 /* Find the next conditional jump. */
10353 for (tmp = NEXT_INSN (insn);
10354 tmp != NULL_RTX;
10355 tmp = NEXT_INSN (tmp))
10356 {
10357 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
10358 return;
10359 if (!JUMP_P (tmp))
10360 continue;
10361
10362 ite = SET_SRC (PATTERN (tmp));
10363 if (GET_CODE (ite) != IF_THEN_ELSE)
10364 continue;
10365
10366 cc = XEXP (XEXP (ite, 0), 0);
10367 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
10368 || GET_MODE (cc) != CCRAWmode
10369 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
10370 return;
10371
10372 if (bb->succs->length () != 2)
10373 return;
10374
10375 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
10376 if (GET_CODE (XEXP (ite, 0)) == NE)
10377 mask ^= 0xf;
10378
10379 if (mask == CC0)
10380 target = XEXP (ite, 1);
10381 else if (mask == (CC0 ^ 0xf))
10382 target = XEXP (ite, 2);
10383 else
10384 return;
10385
10386 {
10387 edge_iterator ei;
10388 edge e1, e2;
10389
10390 ei = ei_start (bb->succs);
10391 e1 = ei_safe_edge (ei);
10392 ei_next (&ei);
10393 e2 = ei_safe_edge (ei);
10394
10395 if (e2->flags & EDGE_FALLTHRU)
10396 {
10397 e2 = e1;
10398 e1 = ei_safe_edge (ei);
10399 }
10400
10401 if (!(e1->flags & EDGE_FALLTHRU))
10402 return;
10403
10404 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
10405 }
10406 if (tmp == BB_END (bb))
10407 break;
10408 }
10409 }
10410
10411 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
10412 {
10413 if (tend_bb != NULL)
10414 return;
10415 tend_bb = bb;
10416 }
10417 }
10418 }
10419
10420 /* Either we successfully remove the FPR clobbers here or we are not
10421 able to do anything for this TX. Both cases don't qualify for
10422 another look. */
10423 cfun->machine->tbegin_p = false;
10424
10425 if (tbegin_bb == NULL || tend_bb == NULL)
10426 return;
10427
10428 calculate_dominance_info (CDI_POST_DOMINATORS);
10429 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
10430 free_dominance_info (CDI_POST_DOMINATORS);
10431
10432 if (!result)
10433 return;
10434
91dfd73e 10435 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
10436 gen_rtvec (2,
10437 XVECEXP (PATTERN (tbegin_insn), 0, 0),
10438 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
5ada7a14 10439 INSN_CODE (tbegin_insn) = -1;
10440 df_insn_rescan (tbegin_insn);
10441
10442 return;
10443}
10444
74f68e49 10445/* Implement TARGET_HARD_REGNO_NREGS. Because all registers in a class
10446 have the same size, this is equivalent to CLASS_MAX_NREGS. */
10447
10448static unsigned int
10449s390_hard_regno_nregs (unsigned int regno, machine_mode mode)
10450{
10451 return s390_class_max_nregs (REGNO_REG_CLASS (regno), mode);
10452}
10453
10454/* Implement TARGET_HARD_REGNO_MODE_OK.
10455
10456 Integer modes <= word size fit into any GPR.
10457 Integer modes > word size fit into successive GPRs, starting with
10458 an even-numbered register.
10459 SImode and DImode fit into FPRs as well.
10460
10461 Floating point modes <= word size fit into any FPR or GPR.
10462 Floating point modes > word size (i.e. DFmode on 32-bit) fit
10463 into any FPR, or an even-odd GPR pair.
10464 TFmode fits only into an even-odd FPR pair.
10465
10466 Complex floating point modes fit either into two FPRs, or into
10467 successive GPRs (again starting with an even number).
10468 TCmode fits only into two successive even-odd FPR pairs.
10469
10470 Condition code modes fit only into the CC register. */
8f1128bb 10471
b395382f 10472static bool
3754d046 10473s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
8f1128bb 10474{
76a4c804 10475 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
10476 return false;
10477
8f1128bb 10478 switch (REGNO_REG_CLASS (regno))
10479 {
76a4c804 10480 case VEC_REGS:
10481 return ((GET_MODE_CLASS (mode) == MODE_INT
10482 && s390_class_max_nregs (VEC_REGS, mode) == 1)
10483 || mode == DFmode
80912819 10484 || (TARGET_VXE && mode == SFmode)
76a4c804 10485 || s390_vector_mode_supported_p (mode));
10486 break;
8f1128bb 10487 case FP_REGS:
76a4c804 10488 if (TARGET_VX
10489 && ((GET_MODE_CLASS (mode) == MODE_INT
10490 && s390_class_max_nregs (FP_REGS, mode) == 1)
10491 || mode == DFmode
10492 || s390_vector_mode_supported_p (mode)))
10493 return true;
10494
8f1128bb 10495 if (REGNO_PAIR_OK (regno, mode))
10496 {
10497 if (mode == SImode || mode == DImode)
10498 return true;
10499
10500 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
10501 return true;
10502 }
10503 break;
10504 case ADDR_REGS:
10505 if (FRAME_REGNO_P (regno) && mode == Pmode)
10506 return true;
10507
10508 /* fallthrough */
10509 case GENERAL_REGS:
10510 if (REGNO_PAIR_OK (regno, mode))
10511 {
b5fdc416 10512 if (TARGET_ZARCH
36868490 10513 || (mode != TFmode && mode != TCmode && mode != TDmode))
8f1128bb 10514 return true;
ffead1ca 10515 }
8f1128bb 10516 break;
10517 case CC_REGS:
10518 if (GET_MODE_CLASS (mode) == MODE_CC)
10519 return true;
10520 break;
10521 case ACCESS_REGS:
10522 if (REGNO_PAIR_OK (regno, mode))
10523 {
10524 if (mode == SImode || mode == Pmode)
10525 return true;
10526 }
10527 break;
10528 default:
10529 return false;
10530 }
ffead1ca 10531
8f1128bb 10532 return false;
10533}
10534
5f6dcf1a 10535/* Implement TARGET_MODES_TIEABLE_P. */
10536
10537static bool
10538s390_modes_tieable_p (machine_mode mode1, machine_mode mode2)
10539{
10540 return ((mode1 == SFmode || mode1 == DFmode)
10541 == (mode2 == SFmode || mode2 == DFmode));
10542}
10543
d1a5573e 10544/* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
10545
10546bool
10547s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
10548{
10549 /* Once we've decided upon a register to use as base register, it must
10550 no longer be used for any other purpose. */
10551 if (cfun->machine->base_reg)
10552 if (REGNO (cfun->machine->base_reg) == old_reg
10553 || REGNO (cfun->machine->base_reg) == new_reg)
10554 return false;
10555
ff4ce128 10556 /* Prevent regrename from using call-saved regs which haven't
10557 actually been saved. This is necessary since regrename assumes
10558 the backend save/restore decisions are based on
10559 df_regs_ever_live. Since we have our own routine we have to tell
10560 regrename manually about it. */
10561 if (GENERAL_REGNO_P (new_reg)
10562 && !call_really_used_regs[new_reg]
1d3cea74 10563 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
ff4ce128 10564 return false;
10565
10566 return true;
10567}
10568
10569/* Return nonzero if register REGNO can be used as a scratch register
10570 in peephole2. */
10571
10572static bool
10573s390_hard_regno_scratch_ok (unsigned int regno)
10574{
10575 /* See s390_hard_regno_rename_ok. */
10576 if (GENERAL_REGNO_P (regno)
10577 && !call_really_used_regs[regno]
1d3cea74 10578 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
ff4ce128 10579 return false;
10580
d1a5573e 10581 return true;
10582}
10583
5da94e60 10584/* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. When generating
10585 code that runs in z/Architecture mode, but conforms to the 31-bit
10586 ABI, GPRs can hold 8 bytes; the ABI guarantees only that the lower 4
10587 bytes are saved across calls, however. */
10588
10589static bool
10590s390_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
10591{
10592 if (!TARGET_64BIT
10593 && TARGET_ZARCH
10594 && GET_MODE_SIZE (mode) > 4
10595 && ((regno >= 6 && regno <= 15) || regno == 32))
10596 return true;
10597
10598 if (TARGET_VX
10599 && GET_MODE_SIZE (mode) > 8
10600 && (((TARGET_64BIT && regno >= 24 && regno <= 31))
10601 || (!TARGET_64BIT && (regno == 18 || regno == 19))))
10602 return true;
10603
10604 return false;
10605}
10606
8f1128bb 10607/* Maximum number of registers to represent a value of mode MODE
8deb3959 10608 in a register of class RCLASS. */
8f1128bb 10609
6c2d82ab 10610int
3754d046 10611s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
8f1128bb 10612{
76a4c804 10613 int reg_size;
10614 bool reg_pair_required_p = false;
10615
8deb3959 10616 switch (rclass)
8f1128bb 10617 {
10618 case FP_REGS:
76a4c804 10619 case VEC_REGS:
10620 reg_size = TARGET_VX ? 16 : 8;
10621
10622 /* TF and TD modes would fit into a VR but we put them into a
10623 register pair since we do not have 128bit FP instructions on
10624 full VRs. */
10625 if (TARGET_VX
10626 && SCALAR_FLOAT_MODE_P (mode)
10627 && GET_MODE_SIZE (mode) >= 16)
10628 reg_pair_required_p = true;
10629
10630 /* Even if complex types would fit into a single FPR/VR we force
10631 them into a register pair to deal with the parts more easily.
10632 (FIXME: What about complex ints?) */
8f1128bb 10633 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
76a4c804 10634 reg_pair_required_p = true;
10635 break;
8f1128bb 10636 case ACCESS_REGS:
76a4c804 10637 reg_size = 4;
10638 break;
8f1128bb 10639 default:
76a4c804 10640 reg_size = UNITS_PER_WORD;
8f1128bb 10641 break;
10642 }
76a4c804 10643
10644 if (reg_pair_required_p)
10645 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
10646
10647 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
10648}
10649
b56a9dbc 10650/* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
76a4c804 10651
b56a9dbc 10652static bool
10653s390_can_change_mode_class (machine_mode from_mode,
10654 machine_mode to_mode,
10655 reg_class_t rclass)
76a4c804 10656{
10657 machine_mode small_mode;
10658 machine_mode big_mode;
10659
80912819 10660 /* V1TF and TF have different representations in vector
10661 registers. */
10662 if (reg_classes_intersect_p (VEC_REGS, rclass)
10663 && ((from_mode == V1TFmode && to_mode == TFmode)
10664 || (from_mode == TFmode && to_mode == V1TFmode)))
b56a9dbc 10665 return false;
80912819 10666
76a4c804 10667 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
b56a9dbc 10668 return true;
76a4c804 10669
10670 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
10671 {
10672 small_mode = from_mode;
10673 big_mode = to_mode;
10674 }
10675 else
10676 {
10677 small_mode = to_mode;
10678 big_mode = from_mode;
10679 }
10680
10681 /* Values residing in VRs are little-endian style. All modes are
10682 placed left-aligned in an VR. This means that we cannot allow
10683 switching between modes with differing sizes. Also if the vector
10684 facility is available we still place TFmode values in VR register
10685 pairs, since the only instructions we have operating on TFmodes
10686 only deal with register pairs. Therefore we have to allow DFmode
10687 subregs of TFmodes to enable the TFmode splitters. */
10688 if (reg_classes_intersect_p (VEC_REGS, rclass)
10689 && (GET_MODE_SIZE (small_mode) < 8
10690 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
b56a9dbc 10691 return false;
76a4c804 10692
10693 /* Likewise for access registers, since they have only half the
10694 word size on 64-bit. */
10695 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
b56a9dbc 10696 return false;
76a4c804 10697
b56a9dbc 10698 return true;
8f1128bb 10699}
10700
7b1bda1c 10701/* Return true if we use LRA instead of reload pass. */
10702static bool
10703s390_lra_p (void)
10704{
10705 return s390_lra_flag;
10706}
10707
4fed3f99 10708/* Return true if register FROM can be eliminated via register TO. */
10709
cd90919d 10710static bool
10711s390_can_eliminate (const int from, const int to)
4fed3f99 10712{
d1a5573e 10713 /* On zSeries machines, we have not marked the base register as fixed.
10714 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10715 If a function requires the base register, we say here that this
10716 elimination cannot be performed. This will cause reload to free
10717 up the base register (as if it were fixed). On the other hand,
10718 if the current function does *not* require the base register, we
10719 say here the elimination succeeds, which in turn allows reload
10720 to allocate the base register for any other purpose. */
10721 if (from == BASE_REGNUM && to == BASE_REGNUM)
10722 {
10723 if (TARGET_CPU_ZARCH)
10724 {
10725 s390_init_frame_layout ();
10726 return cfun->machine->base_reg == NULL_RTX;
10727 }
10728
10729 return false;
10730 }
10731
10732 /* Everything else must point into the stack frame. */
4fed3f99 10733 gcc_assert (to == STACK_POINTER_REGNUM
10734 || to == HARD_FRAME_POINTER_REGNUM);
10735
10736 gcc_assert (from == FRAME_POINTER_REGNUM
10737 || from == ARG_POINTER_REGNUM
10738 || from == RETURN_ADDRESS_POINTER_REGNUM);
10739
10740 /* Make sure we actually saved the return address. */
10741 if (from == RETURN_ADDRESS_POINTER_REGNUM)
18d50ae6 10742 if (!crtl->calls_eh_return
10743 && !cfun->stdarg
4fed3f99 10744 && !cfun_frame_layout.save_return_addr_p)
10745 return false;
10746
10747 return true;
10748}
10749
10750/* Return offset between register FROM and TO initially after prolog. */
7cbfc974 10751
10752HOST_WIDE_INT
4fed3f99 10753s390_initial_elimination_offset (int from, int to)
7cbfc974 10754{
4fed3f99 10755 HOST_WIDE_INT offset;
7cbfc974 10756
4fed3f99 10757 /* ??? Why are we called for non-eliminable pairs? */
10758 if (!s390_can_eliminate (from, to))
10759 return 0;
10760
10761 switch (from)
10762 {
10763 case FRAME_POINTER_REGNUM:
ffead1ca 10764 offset = (get_frame_size()
119114cb 10765 + STACK_POINTER_OFFSET
abe32cce 10766 + crtl->outgoing_args_size);
4fed3f99 10767 break;
67928721 10768
4fed3f99 10769 case ARG_POINTER_REGNUM:
10770 s390_init_frame_layout ();
10771 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10772 break;
10773
10774 case RETURN_ADDRESS_POINTER_REGNUM:
10775 s390_init_frame_layout ();
ff4ce128 10776
10777 if (cfun_frame_layout.first_save_gpr_slot == -1)
10778 {
10779 /* If it turns out that for stdarg nothing went into the reg
10780 save area we also do not need the return address
10781 pointer. */
10782 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10783 return 0;
10784
10785 gcc_unreachable ();
10786 }
10787
10788 /* In order to make the following work it is not necessary for
10789 r14 to have a save slot. It is sufficient if one other GPR
10790 got one. Since the GPRs are always stored without gaps we
10791 are able to calculate where the r14 save slot would
10792 reside. */
10793 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10794 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10795 UNITS_PER_LONG);
4fed3f99 10796 break;
10797
d1a5573e 10798 case BASE_REGNUM:
10799 offset = 0;
10800 break;
10801
4fed3f99 10802 default:
10803 gcc_unreachable ();
10804 }
10805
10806 return offset;
7cbfc974 10807}
10808
8b4a4127 10809/* Emit insn to save fpr REGNUM at offset OFFSET relative
f81e845f 10810 to register BASE. Return generated insn. */
56769981 10811
4673c1a0 10812static rtx
b40da9a7 10813save_fpr (rtx base, int offset, int regnum)
4673c1a0 10814{
8b4a4127 10815 rtx addr;
29c05e22 10816 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
ce1d5a67 10817
10818 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10819 set_mem_alias_set (addr, get_varargs_alias_set ());
10820 else
10821 set_mem_alias_set (addr, get_frame_alias_set ());
4673c1a0 10822
8b4a4127 10823 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10824}
4673c1a0 10825
8b4a4127 10826/* Emit insn to restore fpr REGNUM from offset OFFSET relative
f81e845f 10827 to register BASE. Return generated insn. */
4673c1a0 10828
8b4a4127 10829static rtx
b40da9a7 10830restore_fpr (rtx base, int offset, int regnum)
8b4a4127 10831{
10832 rtx addr;
29c05e22 10833 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
ce1d5a67 10834 set_mem_alias_set (addr, get_frame_alias_set ());
4673c1a0 10835
8b4a4127 10836 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
4673c1a0 10837}
10838
a3cd0f6a 10839/* Return true if REGNO is a global register, but not one
10840 of the special ones that need to be saved/restored in anyway. */
10841
10842static inline bool
10843global_not_special_regno_p (int regno)
10844{
10845 return (global_regs[regno]
10846 /* These registers are special and need to be
10847 restored in any case. */
10848 && !(regno == STACK_POINTER_REGNUM
10849 || regno == RETURN_REGNUM
10850 || regno == BASE_REGNUM
10851 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10852}
10853
9a2a66ae 10854/* Generate insn to save registers FIRST to LAST into
f81e845f 10855 the register save area located at offset OFFSET
9a2a66ae 10856 relative to register BASE. */
4673c1a0 10857
9a2a66ae 10858static rtx
b40da9a7 10859save_gprs (rtx base, int offset, int first, int last)
4673c1a0 10860{
9a2a66ae 10861 rtx addr, insn, note;
10862 int i;
10863
29c05e22 10864 addr = plus_constant (Pmode, base, offset);
9a2a66ae 10865 addr = gen_rtx_MEM (Pmode, addr);
ce1d5a67 10866
10867 set_mem_alias_set (addr, get_frame_alias_set ());
9a2a66ae 10868
10869 /* Special-case single register. */
10870 if (first == last)
10871 {
10872 if (TARGET_64BIT)
10873 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10874 else
10875 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10876
a3cd0f6a 10877 if (!global_not_special_regno_p (first))
10878 RTX_FRAME_RELATED_P (insn) = 1;
9a2a66ae 10879 return insn;
10880 }
10881
10882
10883 insn = gen_store_multiple (addr,
10884 gen_rtx_REG (Pmode, first),
10885 GEN_INT (last - first + 1));
10886
18d50ae6 10887 if (first <= 6 && cfun->stdarg)
ce1d5a67 10888 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10889 {
10890 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
ffead1ca 10891
ce1d5a67 10892 if (first + i <= 6)
10893 set_mem_alias_set (mem, get_varargs_alias_set ());
10894 }
9a2a66ae 10895
10896 /* We need to set the FRAME_RELATED flag on all SETs
10897 inside the store-multiple pattern.
10898
10899 However, we must not emit DWARF records for registers 2..5
f81e845f 10900 if they are stored for use by variable arguments ...
9a2a66ae 10901
3ce7ff97 10902 ??? Unfortunately, it is not enough to simply not the
9a2a66ae 10903 FRAME_RELATED flags for those SETs, because the first SET
10904 of the PARALLEL is always treated as if it had the flag
10905 set, even if it does not. Therefore we emit a new pattern
10906 without those registers as REG_FRAME_RELATED_EXPR note. */
10907
a3cd0f6a 10908 if (first >= 6 && !global_not_special_regno_p (first))
9a2a66ae 10909 {
10910 rtx pat = PATTERN (insn);
10911
10912 for (i = 0; i < XVECLEN (pat, 0); i++)
a3cd0f6a 10913 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10914 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10915 0, i)))))
9a2a66ae 10916 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10917
10918 RTX_FRAME_RELATED_P (insn) = 1;
10919 }
10920 else if (last >= 6)
10921 {
a3cd0f6a 10922 int start;
10923
10924 for (start = first >= 6 ? first : 6; start <= last; start++)
10925 if (!global_not_special_regno_p (start))
10926 break;
10927
10928 if (start > last)
10929 return insn;
10930
29c05e22 10931 addr = plus_constant (Pmode, base,
10932 offset + (start - first) * UNITS_PER_LONG);
ff4ce128 10933
10934 if (start == last)
10935 {
10936 if (TARGET_64BIT)
10937 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10938 gen_rtx_REG (Pmode, start));
10939 else
10940 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10941 gen_rtx_REG (Pmode, start));
10942 note = PATTERN (note);
10943
10944 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10945 RTX_FRAME_RELATED_P (insn) = 1;
10946
10947 return insn;
10948 }
10949
f81e845f 10950 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
a3cd0f6a 10951 gen_rtx_REG (Pmode, start),
10952 GEN_INT (last - start + 1));
9a2a66ae 10953 note = PATTERN (note);
10954
b9c74b4d 10955 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
9a2a66ae 10956
10957 for (i = 0; i < XVECLEN (note, 0); i++)
a3cd0f6a 10958 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10959 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10960 0, i)))))
9a2a66ae 10961 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10962
10963 RTX_FRAME_RELATED_P (insn) = 1;
10964 }
10965
10966 return insn;
8b4a4127 10967}
4673c1a0 10968
9a2a66ae 10969/* Generate insn to restore registers FIRST to LAST from
f81e845f 10970 the register save area located at offset OFFSET
9a2a66ae 10971 relative to register BASE. */
4673c1a0 10972
9a2a66ae 10973static rtx
b40da9a7 10974restore_gprs (rtx base, int offset, int first, int last)
8b4a4127 10975{
9a2a66ae 10976 rtx addr, insn;
10977
29c05e22 10978 addr = plus_constant (Pmode, base, offset);
9a2a66ae 10979 addr = gen_rtx_MEM (Pmode, addr);
ce1d5a67 10980 set_mem_alias_set (addr, get_frame_alias_set ());
9a2a66ae 10981
10982 /* Special-case single register. */
10983 if (first == last)
10984 {
10985 if (TARGET_64BIT)
10986 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10987 else
10988 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10989
ff4ce128 10990 RTX_FRAME_RELATED_P (insn) = 1;
9a2a66ae 10991 return insn;
10992 }
10993
10994 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10995 addr,
10996 GEN_INT (last - first + 1));
ff4ce128 10997 RTX_FRAME_RELATED_P (insn) = 1;
9a2a66ae 10998 return insn;
8b4a4127 10999}
4673c1a0 11000
20074f87 11001/* Return insn sequence to load the GOT register. */
12ef3745 11002
93e0956b 11003rtx_insn *
20074f87 11004s390_load_got (void)
12ef3745 11005{
93e0956b 11006 rtx_insn *insns;
20074f87 11007
c60a7572 11008 /* We cannot use pic_offset_table_rtx here since we use this
11009 function also for non-pic if __tls_get_offset is called and in
11010 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
11011 aren't usable. */
11012 rtx got_rtx = gen_rtx_REG (Pmode, 12);
11013
20074f87 11014 start_sequence ();
11015
dafc8d45 11016 if (TARGET_CPU_ZARCH)
12ef3745 11017 {
9852c8ae 11018 emit_move_insn (got_rtx, s390_got_symbol ());
12ef3745 11019 }
11020 else
11021 {
20074f87 11022 rtx offset;
12ef3745 11023
9852c8ae 11024 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, s390_got_symbol ()),
12ef3745 11025 UNSPEC_LTREL_OFFSET);
11026 offset = gen_rtx_CONST (Pmode, offset);
11027 offset = force_const_mem (Pmode, offset);
11028
c60a7572 11029 emit_move_insn (got_rtx, offset);
12ef3745 11030
f81e845f 11031 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
12ef3745 11032 UNSPEC_LTREL_BASE);
c60a7572 11033 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
12ef3745 11034
c60a7572 11035 emit_move_insn (got_rtx, offset);
12ef3745 11036 }
20074f87 11037
11038 insns = get_insns ();
11039 end_sequence ();
11040 return insns;
12ef3745 11041}
11042
062c49fd 11043/* This ties together stack memory (MEM with an alias set of frame_alias_set)
11044 and the change to the stack pointer. */
11045
11046static void
11047s390_emit_stack_tie (void)
11048{
11049 rtx mem = gen_frame_mem (BLKmode,
11050 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
11051
11052 emit_insn (gen_stack_tie (mem));
11053}
11054
ff4ce128 11055/* Copy GPRS into FPR save slots. */
11056
11057static void
11058s390_save_gprs_to_fprs (void)
11059{
11060 int i;
11061
11062 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
11063 return;
11064
11065 for (i = 6; i < 16; i++)
11066 {
11067 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
11068 {
93e0956b 11069 rtx_insn *insn =
ff4ce128 11070 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
11071 gen_rtx_REG (DImode, i));
11072 RTX_FRAME_RELATED_P (insn) = 1;
c5dad799 11073 /* This prevents dwarf2cfi from interpreting the set. Doing
11074 so it might emit def_cfa_register infos setting an FPR as
11075 new CFA. */
9e165059 11076 add_reg_note (insn, REG_CFA_REGISTER, copy_rtx (PATTERN (insn)));
ff4ce128 11077 }
11078 }
11079}
11080
11081/* Restore GPRs from FPR save slots. */
11082
11083static void
11084s390_restore_gprs_from_fprs (void)
11085{
11086 int i;
11087
11088 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
11089 return;
11090
11091 for (i = 6; i < 16; i++)
11092 {
54530437 11093 rtx_insn *insn;
11094
11095 if (!FP_REGNO_P (cfun_gpr_save_slot (i)))
11096 continue;
11097
11098 rtx fpr = gen_rtx_REG (DImode, cfun_gpr_save_slot (i));
11099
11100 if (i == STACK_POINTER_REGNUM)
11101 insn = emit_insn (gen_stack_restore_from_fpr (fpr));
11102 else
11103 insn = emit_move_insn (gen_rtx_REG (DImode, i), fpr);
11104
11105 df_set_regs_ever_live (i, true);
11106 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
11107 if (i == STACK_POINTER_REGNUM)
11108 add_reg_note (insn, REG_CFA_DEF_CFA,
11109 plus_constant (Pmode, stack_pointer_rtx,
11110 STACK_POINTER_OFFSET));
11111 RTX_FRAME_RELATED_P (insn) = 1;
ff4ce128 11112 }
11113}
11114
4673c1a0 11115
0b8be04c 11116/* A pass run immediately before shrink-wrapping and prologue and epilogue
11117 generation. */
11118
0b8be04c 11119namespace {
11120
11121const pass_data pass_data_s390_early_mach =
11122{
11123 RTL_PASS, /* type */
11124 "early_mach", /* name */
11125 OPTGROUP_NONE, /* optinfo_flags */
0b8be04c 11126 TV_MACH_DEP, /* tv_id */
11127 0, /* properties_required */
11128 0, /* properties_provided */
11129 0, /* properties_destroyed */
11130 0, /* todo_flags_start */
8b88439e 11131 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
0b8be04c 11132};
20074f87 11133
0b8be04c 11134class pass_s390_early_mach : public rtl_opt_pass
11135{
11136public:
11137 pass_s390_early_mach (gcc::context *ctxt)
11138 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
11139 {}
11140
11141 /* opt_pass methods: */
65b0537f 11142 virtual unsigned int execute (function *);
0b8be04c 11143
11144}; // class pass_s390_early_mach
11145
65b0537f 11146unsigned int
11147pass_s390_early_mach::execute (function *fun)
11148{
93e0956b 11149 rtx_insn *insn;
65b0537f 11150
11151 /* Try to get rid of the FPR clobbers. */
11152 s390_optimize_nonescaping_tx ();
11153
11154 /* Re-compute register info. */
11155 s390_register_info ();
11156
11157 /* If we're using a base register, ensure that it is always valid for
11158 the first non-prologue instruction. */
11159 if (fun->machine->base_reg)
11160 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
11161
11162 /* Annotate all constant pool references to let the scheduler know
11163 they implicitly use the base register. */
11164 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11165 if (INSN_P (insn))
11166 {
11167 annotate_constant_pool_refs (&PATTERN (insn));
11168 df_insn_rescan (insn);
11169 }
11170 return 0;
11171}
11172
0b8be04c 11173} // anon namespace
11174
82e9b0b4 11175/* Calculate TARGET = REG + OFFSET as s390_emit_prologue would do it.
11176 - push too big immediates to the literal pool and annotate the refs
11177 - emit frame related notes for stack pointer changes. */
11178
11179static rtx
11180s390_prologue_plus_offset (rtx target, rtx reg, rtx offset, bool frame_related_p)
11181{
11182 rtx insn;
11183 rtx orig_offset = offset;
11184
11185 gcc_assert (REG_P (target));
11186 gcc_assert (REG_P (reg));
11187 gcc_assert (CONST_INT_P (offset));
11188
11189 if (offset == const0_rtx) /* lr/lgr */
11190 {
11191 insn = emit_move_insn (target, reg);
11192 }
11193 else if (DISP_IN_RANGE (INTVAL (offset))) /* la */
11194 {
11195 insn = emit_move_insn (target, gen_rtx_PLUS (Pmode, reg,
11196 offset));
11197 }
11198 else
11199 {
11200 if (!satisfies_constraint_K (offset) /* ahi/aghi */
11201 && (!TARGET_EXTIMM
11202 || (!satisfies_constraint_Op (offset) /* alfi/algfi */
11203 && !satisfies_constraint_On (offset)))) /* slfi/slgfi */
11204 offset = force_const_mem (Pmode, offset);
11205
11206 if (target != reg)
11207 {
11208 insn = emit_move_insn (target, reg);
11209 RTX_FRAME_RELATED_P (insn) = frame_related_p ? 1 : 0;
11210 }
11211
11212 insn = emit_insn (gen_add2_insn (target, offset));
11213
11214 if (!CONST_INT_P (offset))
11215 {
11216 annotate_constant_pool_refs (&PATTERN (insn));
11217
11218 if (frame_related_p)
11219 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11220 gen_rtx_SET (target,
11221 gen_rtx_PLUS (Pmode, target,
11222 orig_offset)));
11223 }
11224 }
11225
11226 RTX_FRAME_RELATED_P (insn) = frame_related_p ? 1 : 0;
11227
11228 /* If this is a stack adjustment and we are generating a stack clash
11229 prologue, then add a REG_STACK_CHECK note to signal that this insn
11230 should be left alone. */
11231 if (flag_stack_clash_protection && target == stack_pointer_rtx)
11232 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
11233
11234 return insn;
11235}
11236
11237/* Emit a compare instruction with a volatile memory access as stack
11238 probe. It does not waste store tags and does not clobber any
11239 registers apart from the condition code. */
11240static void
11241s390_emit_stack_probe (rtx addr)
11242{
11243 rtx tmp = gen_rtx_MEM (Pmode, addr);
11244 MEM_VOLATILE_P (tmp) = 1;
11245 s390_emit_compare (EQ, gen_rtx_REG (Pmode, 0), tmp);
11246 emit_insn (gen_blockage ());
11247}
11248
11249/* Use a runtime loop if we have to emit more probes than this. */
11250#define MIN_UNROLL_PROBES 3
11251
11252/* Allocate SIZE bytes of stack space, using TEMP_REG as a temporary
11253 if necessary. LAST_PROBE_OFFSET contains the offset of the closest
11254 probe relative to the stack pointer.
11255
11256 Note that SIZE is negative.
11257
11258 The return value is true if TEMP_REG has been clobbered. */
11259static bool
11260allocate_stack_space (rtx size, HOST_WIDE_INT last_probe_offset,
11261 rtx temp_reg)
11262{
11263 bool temp_reg_clobbered_p = false;
11264 HOST_WIDE_INT probe_interval
11265 = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
11266 HOST_WIDE_INT guard_size
11267 = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
11268
11269 if (flag_stack_clash_protection)
11270 {
11271 if (last_probe_offset + -INTVAL (size) < guard_size)
11272 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
11273 else
11274 {
11275 rtx offset = GEN_INT (probe_interval - UNITS_PER_LONG);
11276 HOST_WIDE_INT rounded_size = -INTVAL (size) & -probe_interval;
11277 HOST_WIDE_INT num_probes = rounded_size / probe_interval;
11278 HOST_WIDE_INT residual = -INTVAL (size) - rounded_size;
11279
11280 if (num_probes < MIN_UNROLL_PROBES)
11281 {
11282 /* Emit unrolled probe statements. */
11283
11284 for (unsigned int i = 0; i < num_probes; i++)
11285 {
11286 s390_prologue_plus_offset (stack_pointer_rtx,
11287 stack_pointer_rtx,
11288 GEN_INT (-probe_interval), true);
11289 s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
11290 stack_pointer_rtx,
11291 offset));
11292 }
11293 dump_stack_clash_frame_info (PROBE_INLINE, residual != 0);
11294 }
11295 else
11296 {
11297 /* Emit a loop probing the pages. */
11298
11299 rtx_code_label *loop_start_label = gen_label_rtx ();
11300
11301 /* From now on temp_reg will be the CFA register. */
11302 s390_prologue_plus_offset (temp_reg, stack_pointer_rtx,
11303 GEN_INT (-rounded_size), true);
11304 emit_label (loop_start_label);
11305
11306 s390_prologue_plus_offset (stack_pointer_rtx,
11307 stack_pointer_rtx,
11308 GEN_INT (-probe_interval), false);
11309 s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
11310 stack_pointer_rtx,
11311 offset));
11312 emit_cmp_and_jump_insns (stack_pointer_rtx, temp_reg,
11313 GT, NULL_RTX,
11314 Pmode, 1, loop_start_label);
11315
11316 /* Without this make_edges ICEes. */
11317 JUMP_LABEL (get_last_insn ()) = loop_start_label;
11318 LABEL_NUSES (loop_start_label) = 1;
11319
11320 /* That's going to be a NOP since stack pointer and
11321 temp_reg are supposed to be the same here. We just
11322 emit it to set the CFA reg back to r15. */
11323 s390_prologue_plus_offset (stack_pointer_rtx, temp_reg,
11324 const0_rtx, true);
11325 temp_reg_clobbered_p = true;
11326 dump_stack_clash_frame_info (PROBE_LOOP, residual != 0);
11327 }
11328
11329 /* Handle any residual allocation request. */
11330 s390_prologue_plus_offset (stack_pointer_rtx,
11331 stack_pointer_rtx,
11332 GEN_INT (-residual), true);
11333 last_probe_offset += residual;
11334 if (last_probe_offset >= probe_interval)
11335 s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
11336 stack_pointer_rtx,
11337 GEN_INT (residual
11338 - UNITS_PER_LONG)));
11339
11340 return temp_reg_clobbered_p;
11341 }
11342 }
11343
11344 /* Subtract frame size from stack pointer. */
11345 s390_prologue_plus_offset (stack_pointer_rtx,
11346 stack_pointer_rtx,
11347 size, true);
11348
11349 return temp_reg_clobbered_p;
11350}
11351
0b8be04c 11352/* Expand the prologue into a bunch of separate insns. */
11353
11354void
11355s390_emit_prologue (void)
11356{
11357 rtx insn, addr;
11358 rtx temp_reg;
11359 int i;
11360 int offset;
11361 int next_fpr = 0;
20074f87 11362
f81e845f 11363 /* Choose best register to use for temp use within prologue.
c6d481f7 11364 TPF with profiling must avoid the register 14 - the tracing function
11365 needs the original contents of r14 to be preserved. */
f81e845f 11366
ffead1ca 11367 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
d5bf7b64 11368 && !crtl->is_leaf
1e639cb0 11369 && !TARGET_TPF_PROFILING)
8b4a4127 11370 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
c6d481f7 11371 else if (flag_split_stack && cfun->stdarg)
11372 temp_reg = gen_rtx_REG (Pmode, 12);
4673c1a0 11373 else
8b4a4127 11374 temp_reg = gen_rtx_REG (Pmode, 1);
4673c1a0 11375
82e9b0b4 11376 /* When probing for stack-clash mitigation, we have to track the distance
11377 between the stack pointer and closest known reference.
11378
5714ec4b 11379 Most of the time we have to make a worst case assumption. The
82e9b0b4 11380 only exception is when TARGET_BACKCHAIN is active, in which case
11381 we know *sp (offset 0) was written. */
11382 HOST_WIDE_INT probe_interval
11383 = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
11384 HOST_WIDE_INT last_probe_offset
11385 = (TARGET_BACKCHAIN
11386 ? (TARGET_PACKED_STACK ? STACK_POINTER_OFFSET - UNITS_PER_LONG : 0)
11387 : probe_interval - (STACK_BOUNDARY / UNITS_PER_WORD));
11388
ff4ce128 11389 s390_save_gprs_to_fprs ();
11390
8b4a4127 11391 /* Save call saved gprs. */
67928721 11392 if (cfun_frame_layout.first_save_gpr != -1)
4ac7fd98 11393 {
ffead1ca 11394 insn = save_gprs (stack_pointer_rtx,
11395 cfun_frame_layout.gprs_offset +
b5fdc416 11396 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
5214e6ae 11397 - cfun_frame_layout.first_save_gpr_slot),
ffead1ca 11398 cfun_frame_layout.first_save_gpr,
4ac7fd98 11399 cfun_frame_layout.last_save_gpr);
82e9b0b4 11400
11401 /* This is not 100% correct. If we have more than one register saved,
11402 then LAST_PROBE_OFFSET can move even closer to sp. */
11403 last_probe_offset
11404 = (cfun_frame_layout.gprs_offset +
11405 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
11406 - cfun_frame_layout.first_save_gpr_slot));
11407
4ac7fd98 11408 emit_insn (insn);
11409 }
8b4a4127 11410
c2c1332a 11411 /* Dummy insn to mark literal pool slot. */
f81e845f 11412
4fed3f99 11413 if (cfun->machine->base_reg)
11414 emit_insn (gen_main_pool (cfun->machine->base_reg));
f81e845f 11415
67928721 11416 offset = cfun_frame_layout.f0_offset;
8b4a4127 11417
67928721 11418 /* Save f0 and f2. */
6a2469fe 11419 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
67928721 11420 {
29439367 11421 if (cfun_fpr_save_p (i))
67928721 11422 {
29439367 11423 save_fpr (stack_pointer_rtx, offset, i);
82e9b0b4 11424 if (offset < last_probe_offset)
11425 last_probe_offset = offset;
67928721 11426 offset += 8;
11427 }
031bdf83 11428 else if (!TARGET_PACKED_STACK || cfun->stdarg)
11429 offset += 8;
67928721 11430 }
4673c1a0 11431
67928721 11432 /* Save f4 and f6. */
11433 offset = cfun_frame_layout.f4_offset;
6a2469fe 11434 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
67928721 11435 {
29439367 11436 if (cfun_fpr_save_p (i))
8b4a4127 11437 {
29439367 11438 insn = save_fpr (stack_pointer_rtx, offset, i);
82e9b0b4 11439 if (offset < last_probe_offset)
11440 last_probe_offset = offset;
67928721 11441 offset += 8;
11442
031bdf83 11443 /* If f4 and f6 are call clobbered they are saved due to
11444 stdargs and therefore are not frame related. */
29439367 11445 if (!call_really_used_regs[i])
67928721 11446 RTX_FRAME_RELATED_P (insn) = 1;
8b4a4127 11447 }
031bdf83 11448 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
67928721 11449 offset += 8;
11450 }
11451
646a946e 11452 if (TARGET_PACKED_STACK
67928721 11453 && cfun_save_high_fprs_p
11454 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
11455 {
11456 offset = (cfun_frame_layout.f8_offset
11457 + (cfun_frame_layout.high_fprs - 1) * 8);
11458
6a2469fe 11459 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
29439367 11460 if (cfun_fpr_save_p (i))
67928721 11461 {
29439367 11462 insn = save_fpr (stack_pointer_rtx, offset, i);
82e9b0b4 11463 if (offset < last_probe_offset)
11464 last_probe_offset = offset;
ffead1ca 11465
67928721 11466 RTX_FRAME_RELATED_P (insn) = 1;
11467 offset -= 8;
11468 }
11469 if (offset >= cfun_frame_layout.f8_offset)
29439367 11470 next_fpr = i;
67928721 11471 }
ffead1ca 11472
646a946e 11473 if (!TARGET_PACKED_STACK)
6a2469fe 11474 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
4673c1a0 11475
8c0dd614 11476 if (flag_stack_usage_info)
7810b579 11477 current_function_static_stack_size = cfun_frame_layout.frame_size;
11478
8b4a4127 11479 /* Decrement stack pointer. */
4673c1a0 11480
67928721 11481 if (cfun_frame_layout.frame_size > 0)
8b4a4127 11482 {
67928721 11483 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
82e9b0b4 11484 rtx_insn *stack_pointer_backup_loc;
11485 bool temp_reg_clobbered_p;
4673c1a0 11486
cbb300e8 11487 if (s390_stack_size)
82e9b0b4 11488 {
00d233e6 11489 HOST_WIDE_INT stack_guard;
cbb300e8 11490
00d233e6 11491 if (s390_stack_guard)
11492 stack_guard = s390_stack_guard;
cbb300e8 11493 else
00d233e6 11494 {
11495 /* If no value for stack guard is provided the smallest power of 2
11496 larger than the current frame size is chosen. */
11497 stack_guard = 1;
11498 while (stack_guard < cfun_frame_layout.frame_size)
11499 stack_guard <<= 1;
11500 }
cbb300e8 11501
00d233e6 11502 if (cfun_frame_layout.frame_size >= s390_stack_size)
11503 {
8ad6fff9 11504 warning (0, "frame size of function %qs is %wd"
00d233e6 11505 " bytes exceeding user provided stack limit of "
8ad6fff9 11506 "%d bytes. "
00d233e6 11507 "An unconditional trap is added.",
11508 current_function_name(), cfun_frame_layout.frame_size,
11509 s390_stack_size);
11510 emit_insn (gen_trap ());
482869e7 11511 emit_barrier ();
00d233e6 11512 }
11513 else
11514 {
b437383e 11515 /* stack_guard has to be smaller than s390_stack_size.
11516 Otherwise we would emit an AND with zero which would
11517 not match the test under mask pattern. */
11518 if (stack_guard >= s390_stack_size)
11519 {
7fe62d25 11520 warning (0, "frame size of function %qs is %wd"
b437383e 11521 " bytes which is more than half the stack size. "
11522 "The dynamic check would not be reliable. "
11523 "No check emitted for this function.",
11524 current_function_name(),
11525 cfun_frame_layout.frame_size);
11526 }
00d233e6 11527 else
b437383e 11528 {
11529 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
11530 & ~(stack_guard - 1));
11531
11532 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
11533 GEN_INT (stack_check_mask));
11534 if (TARGET_64BIT)
11535 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
11536 t, const0_rtx),
11537 t, const0_rtx, const0_rtx));
11538 else
11539 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
11540 t, const0_rtx),
11541 t, const0_rtx, const0_rtx));
11542 }
00d233e6 11543 }
cbb300e8 11544 }
11545
ffead1ca 11546 if (s390_warn_framesize > 0
cbb300e8 11547 && cfun_frame_layout.frame_size >= s390_warn_framesize)
7fe62d25 11548 warning (0, "frame size of %qs is %wd bytes",
cbb300e8 11549 current_function_name (), cfun_frame_layout.frame_size);
11550
11551 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
c3ceba8e 11552 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
cbb300e8 11553
82e9b0b4 11554 /* Save the location where we could backup the incoming stack
11555 pointer. */
11556 stack_pointer_backup_loc = get_last_insn ();
f81e845f 11557
82e9b0b4 11558 temp_reg_clobbered_p = allocate_stack_space (frame_off, last_probe_offset,
11559 temp_reg);
8b4a4127 11560
82e9b0b4 11561 if (TARGET_BACKCHAIN || next_fpr)
51aa1e9c 11562 {
82e9b0b4 11563 if (temp_reg_clobbered_p)
11564 {
11565 /* allocate_stack_space had to make use of temp_reg and
11566 we need it to hold a backup of the incoming stack
11567 pointer. Calculate back that value from the current
11568 stack pointer. */
11569 s390_prologue_plus_offset (temp_reg, stack_pointer_rtx,
11570 GEN_INT (cfun_frame_layout.frame_size),
11571 false);
11572 }
11573 else
11574 {
11575 /* allocate_stack_space didn't actually required
11576 temp_reg. Insert the stack pointer backup insn
11577 before the stack pointer decrement code - knowing now
11578 that the value will survive. */
11579 emit_insn_after (gen_move_insn (temp_reg, stack_pointer_rtx),
11580 stack_pointer_backup_loc);
11581 }
51aa1e9c 11582 }
8b4a4127 11583
8b4a4127 11584 /* Set backchain. */
f81e845f 11585
e5c64bfc 11586 if (TARGET_BACKCHAIN)
4673c1a0 11587 {
67928721 11588 if (cfun_frame_layout.backchain_offset)
ffead1ca 11589 addr = gen_rtx_MEM (Pmode,
29c05e22 11590 plus_constant (Pmode, stack_pointer_rtx,
67928721 11591 cfun_frame_layout.backchain_offset));
11592 else
ffead1ca 11593 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
ce1d5a67 11594 set_mem_alias_set (addr, get_frame_alias_set ());
8b4a4127 11595 insn = emit_insn (gen_move_insn (addr, temp_reg));
4673c1a0 11596 }
90524d70 11597
cbeb677e 11598 /* If we support non-call exceptions (e.g. for Java),
90524d70 11599 we need to make sure the backchain pointer is set up
11600 before any possibly trapping memory access. */
cbeb677e 11601 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
90524d70 11602 {
11603 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
18b42941 11604 emit_clobber (addr);
90524d70 11605 }
8b4a4127 11606 }
82e9b0b4 11607 else if (flag_stack_clash_protection)
11608 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
4673c1a0 11609
8b4a4127 11610 /* Save fprs 8 - 15 (64 bit ABI). */
f81e845f 11611
67928721 11612 if (cfun_save_high_fprs_p && next_fpr)
8b4a4127 11613 {
062c49fd 11614 /* If the stack might be accessed through a different register
11615 we have to make sure that the stack pointer decrement is not
11616 moved below the use of the stack slots. */
11617 s390_emit_stack_tie ();
11618
ffead1ca 11619 insn = emit_insn (gen_add2_insn (temp_reg,
67928721 11620 GEN_INT (cfun_frame_layout.f8_offset)));
11621
11622 offset = 0;
4673c1a0 11623
6a2469fe 11624 for (i = FPR8_REGNUM; i <= next_fpr; i++)
29439367 11625 if (cfun_fpr_save_p (i))
8b4a4127 11626 {
29c05e22 11627 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
67928721 11628 cfun_frame_layout.frame_size
11629 + cfun_frame_layout.f8_offset
11630 + offset);
ffead1ca 11631
67928721 11632 insn = save_fpr (temp_reg, offset, i);
11633 offset += 8;
8b4a4127 11634 RTX_FRAME_RELATED_P (insn) = 1;
b9c74b4d 11635 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
d1f9b275 11636 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
b9c74b4d 11637 gen_rtx_REG (DFmode, i)));
8b4a4127 11638 }
11639 }
f81e845f 11640
8b4a4127 11641 /* Set frame pointer, if needed. */
f81e845f 11642
5a5e802f 11643 if (frame_pointer_needed)
8b4a4127 11644 {
11645 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
11646 RTX_FRAME_RELATED_P (insn) = 1;
11647 }
4673c1a0 11648
8b4a4127 11649 /* Set up got pointer, if needed. */
f81e845f 11650
3072d30e 11651 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
20074f87 11652 {
93e0956b 11653 rtx_insn *insns = s390_load_got ();
20074f87 11654
91a55c11 11655 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
3072d30e 11656 annotate_constant_pool_refs (&PATTERN (insn));
20074f87 11657
11658 emit_insn (insns);
11659 }
f81e845f 11660
de253666 11661 if (TARGET_TPF_PROFILING)
f81e845f 11662 {
11663 /* Generate a BAS instruction to serve as a function
11664 entry intercept to facilitate the use of tracing
346fecd5 11665 algorithms located at the branch target. */
11666 emit_insn (gen_prologue_tpf ());
f81e845f 11667
11668 /* Emit a blockage here so that all code
11669 lies between the profiling mechanisms. */
11670 emit_insn (gen_blockage ());
11671 }
8b4a4127 11672}
4673c1a0 11673
d2833c15 11674/* Expand the epilogue into a bunch of separate insns. */
4673c1a0 11675
8b4a4127 11676void
7346ca58 11677s390_emit_epilogue (bool sibcall)
8b4a4127 11678{
a3cd0f6a 11679 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
abd8f04d 11680 int area_bottom, area_top, offset = 0;
67928721 11681 int next_offset;
8b4a4127 11682 rtvec p;
78c2b526 11683 int i;
4673c1a0 11684
de253666 11685 if (TARGET_TPF_PROFILING)
f81e845f 11686 {
11687
11688 /* Generate a BAS instruction to serve as a function
11689 entry intercept to facilitate the use of tracing
346fecd5 11690 algorithms located at the branch target. */
f81e845f 11691
f81e845f 11692 /* Emit a blockage here so that all code
11693 lies between the profiling mechanisms. */
11694 emit_insn (gen_blockage ());
11695
346fecd5 11696 emit_insn (gen_epilogue_tpf ());
f81e845f 11697 }
11698
8b4a4127 11699 /* Check whether to use frame or stack pointer for restore. */
4673c1a0 11700
ffead1ca 11701 frame_pointer = (frame_pointer_needed
67928721 11702 ? hard_frame_pointer_rtx : stack_pointer_rtx);
4673c1a0 11703
67928721 11704 s390_frame_area (&area_bottom, &area_top);
4673c1a0 11705
f81e845f 11706 /* Check whether we can access the register save area.
8b4a4127 11707 If not, increment the frame pointer as required. */
4673c1a0 11708
8b4a4127 11709 if (area_top <= area_bottom)
11710 {
11711 /* Nothing to restore. */
11712 }
67928721 11713 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
11714 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
8b4a4127 11715 {
11716 /* Area is in range. */
67928721 11717 offset = cfun_frame_layout.frame_size;
8b4a4127 11718 }
11719 else
11720 {
a3cd0f6a 11721 rtx insn, frame_off, cfa;
4673c1a0 11722
f81e845f 11723 offset = area_bottom < 0 ? -area_bottom : 0;
67928721 11724 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
4673c1a0 11725
d1f9b275 11726 cfa = gen_rtx_SET (frame_pointer,
a3cd0f6a 11727 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
51aa1e9c 11728 if (DISP_IN_RANGE (INTVAL (frame_off)))
11729 {
d1f9b275 11730 insn = gen_rtx_SET (frame_pointer,
51aa1e9c 11731 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11732 insn = emit_insn (insn);
11733 }
11734 else
11735 {
cb888f33 11736 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
51aa1e9c 11737 frame_off = force_const_mem (Pmode, frame_off);
4673c1a0 11738
51aa1e9c 11739 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
20074f87 11740 annotate_constant_pool_refs (&PATTERN (insn));
51aa1e9c 11741 }
a3cd0f6a 11742 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
11743 RTX_FRAME_RELATED_P (insn) = 1;
8b4a4127 11744 }
4673c1a0 11745
8b4a4127 11746 /* Restore call saved fprs. */
11747
11748 if (TARGET_64BIT)
4673c1a0 11749 {
67928721 11750 if (cfun_save_high_fprs_p)
11751 {
11752 next_offset = cfun_frame_layout.f8_offset;
6a2469fe 11753 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
67928721 11754 {
29439367 11755 if (cfun_fpr_save_p (i))
67928721 11756 {
11757 restore_fpr (frame_pointer,
11758 offset + next_offset, i);
a3cd0f6a 11759 cfa_restores
11760 = alloc_reg_note (REG_CFA_RESTORE,
11761 gen_rtx_REG (DFmode, i), cfa_restores);
67928721 11762 next_offset += 8;
11763 }
11764 }
11765 }
ffead1ca 11766
4673c1a0 11767 }
11768 else
11769 {
67928721 11770 next_offset = cfun_frame_layout.f4_offset;
29439367 11771 /* f4, f6 */
6a2469fe 11772 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
67928721 11773 {
29439367 11774 if (cfun_fpr_save_p (i))
67928721 11775 {
11776 restore_fpr (frame_pointer,
11777 offset + next_offset, i);
a3cd0f6a 11778 cfa_restores
11779 = alloc_reg_note (REG_CFA_RESTORE,
11780 gen_rtx_REG (DFmode, i), cfa_restores);
67928721 11781 next_offset += 8;
11782 }
646a946e 11783 else if (!TARGET_PACKED_STACK)
67928721 11784 next_offset += 8;
11785 }
ffead1ca 11786
8b4a4127 11787 }
4673c1a0 11788
8b4a4127 11789 /* Return register. */
11790
f81e845f 11791 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8b4a4127 11792
11793 /* Restore call saved gprs. */
11794
67928721 11795 if (cfun_frame_layout.first_restore_gpr != -1)
8b4a4127 11796 {
9a2a66ae 11797 rtx insn, addr;
43935856 11798 int i;
11799
f81e845f 11800 /* Check for global register and save them
43935856 11801 to stack location from where they get restored. */
11802
67928721 11803 for (i = cfun_frame_layout.first_restore_gpr;
11804 i <= cfun_frame_layout.last_restore_gpr;
43935856 11805 i++)
11806 {
a3cd0f6a 11807 if (global_not_special_regno_p (i))
43935856 11808 {
29c05e22 11809 addr = plus_constant (Pmode, frame_pointer,
ffead1ca 11810 offset + cfun_frame_layout.gprs_offset
5214e6ae 11811 + (i - cfun_frame_layout.first_save_gpr_slot)
b5fdc416 11812 * UNITS_PER_LONG);
43935856 11813 addr = gen_rtx_MEM (Pmode, addr);
ce1d5a67 11814 set_mem_alias_set (addr, get_frame_alias_set ());
43935856 11815 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
f81e845f 11816 }
a3cd0f6a 11817 else
11818 cfa_restores
11819 = alloc_reg_note (REG_CFA_RESTORE,
11820 gen_rtx_REG (Pmode, i), cfa_restores);
43935856 11821 }
8b4a4127 11822
b5e83b9b 11823 /* Fetch return address from stack before load multiple,
11824 this will do good for scheduling.
11825
11826 Only do this if we already decided that r14 needs to be
11827 saved to a stack slot. (And not just because r14 happens to
11828 be in between two GPRs which need saving.) Otherwise it
11829 would be difficult to take that decision back in
11830 s390_optimize_prologue.
11831
11832 This optimization is only helpful on in-order machines. */
11833 if (! sibcall
11834 && cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK
11835 && s390_tune <= PROCESSOR_2097_Z10)
11836 {
11837 int return_regnum = find_unused_clobbered_reg();
11838 if (!return_regnum)
11839 return_regnum = 4;
11840 return_reg = gen_rtx_REG (Pmode, return_regnum);
11841
11842 addr = plus_constant (Pmode, frame_pointer,
11843 offset + cfun_frame_layout.gprs_offset
11844 + (RETURN_REGNUM
11845 - cfun_frame_layout.first_save_gpr_slot)
11846 * UNITS_PER_LONG);
11847 addr = gen_rtx_MEM (Pmode, addr);
11848 set_mem_alias_set (addr, get_frame_alias_set ());
11849 emit_move_insn (return_reg, addr);
d7c99e1a 11850
b5e83b9b 11851 /* Once we did that optimization we have to make sure
11852 s390_optimize_prologue does not try to remove the store
11853 of r14 since we will not be able to find the load issued
11854 here. */
11855 cfun_frame_layout.save_return_addr_p = true;
4673c1a0 11856 }
8b4a4127 11857
67928721 11858 insn = restore_gprs (frame_pointer,
11859 offset + cfun_frame_layout.gprs_offset
ffead1ca 11860 + (cfun_frame_layout.first_restore_gpr
5214e6ae 11861 - cfun_frame_layout.first_save_gpr_slot)
b5fdc416 11862 * UNITS_PER_LONG,
67928721 11863 cfun_frame_layout.first_restore_gpr,
11864 cfun_frame_layout.last_restore_gpr);
a3cd0f6a 11865 insn = emit_insn (insn);
11866 REG_NOTES (insn) = cfa_restores;
11867 add_reg_note (insn, REG_CFA_DEF_CFA,
29c05e22 11868 plus_constant (Pmode, stack_pointer_rtx,
11869 STACK_POINTER_OFFSET));
a3cd0f6a 11870 RTX_FRAME_RELATED_P (insn) = 1;
8b4a4127 11871 }
4673c1a0 11872
ff4ce128 11873 s390_restore_gprs_from_fprs ();
11874
7346ca58 11875 if (! sibcall)
11876 {
f81e845f 11877
7346ca58 11878 /* Return to caller. */
f588eb9f 11879
7346ca58 11880 p = rtvec_alloc (2);
f588eb9f 11881
1a860023 11882 RTVEC_ELT (p, 0) = ret_rtx;
7346ca58 11883 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
11884 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
11885 }
4673c1a0 11886}
11887
7a64c761 11888/* Implement TARGET_SET_UP_BY_PROLOGUE. */
11889
11890static void
11891s300_set_up_by_prologue (hard_reg_set_container *regs)
11892{
11893 if (cfun->machine->base_reg
11894 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11895 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
11896}
11897
c6d481f7 11898/* -fsplit-stack support. */
11899
11900/* A SYMBOL_REF for __morestack. */
11901static GTY(()) rtx morestack_ref;
11902
11903/* When using -fsplit-stack, the allocation routines set a field in
11904 the TCB to the bottom of the stack plus this much space, measured
11905 in bytes. */
11906
11907#define SPLIT_STACK_AVAILABLE 1024
11908
11909/* Emit -fsplit-stack prologue, which goes before the regular function
11910 prologue. */
11911
11912void
11913s390_expand_split_stack_prologue (void)
11914{
11915 rtx r1, guard, cc = NULL;
11916 rtx_insn *insn;
11917 /* Offset from thread pointer to __private_ss. */
11918 int psso = TARGET_64BIT ? 0x38 : 0x20;
11919 /* Pointer size in bytes. */
11920 /* Frame size and argument size - the two parameters to __morestack. */
11921 HOST_WIDE_INT frame_size = cfun_frame_layout.frame_size;
11922 /* Align argument size to 8 bytes - simplifies __morestack code. */
11923 HOST_WIDE_INT args_size = crtl->args.size >= 0
11924 ? ((crtl->args.size + 7) & ~7)
11925 : 0;
11926 /* Label to be called by __morestack. */
11927 rtx_code_label *call_done = NULL;
11928 rtx_code_label *parm_base = NULL;
11929 rtx tmp;
11930
11931 gcc_assert (flag_split_stack && reload_completed);
11932 if (!TARGET_CPU_ZARCH)
11933 {
11934 sorry ("CPUs older than z900 are not supported for -fsplit-stack");
11935 return;
11936 }
11937
11938 r1 = gen_rtx_REG (Pmode, 1);
11939
11940 /* If no stack frame will be allocated, don't do anything. */
11941 if (!frame_size)
11942 {
11943 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11944 {
11945 /* If va_start is used, just use r15. */
11946 emit_move_insn (r1,
11947 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11948 GEN_INT (STACK_POINTER_OFFSET)));
11949
11950 }
11951 return;
11952 }
11953
11954 if (morestack_ref == NULL_RTX)
11955 {
11956 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11957 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
11958 | SYMBOL_FLAG_FUNCTION);
11959 }
11960
11961 if (CONST_OK_FOR_K (frame_size) || CONST_OK_FOR_Op (frame_size))
11962 {
11963 /* If frame_size will fit in an add instruction, do a stack space
11964 check, and only call __morestack if there's not enough space. */
11965
11966 /* Get thread pointer. r1 is the only register we can always destroy - r0
11967 could contain a static chain (and cannot be used to address memory
11968 anyway), r2-r6 can contain parameters, and r6-r15 are callee-saved. */
11969 emit_move_insn (r1, gen_rtx_REG (Pmode, TP_REGNUM));
11970 /* Aim at __private_ss. */
11971 guard = gen_rtx_MEM (Pmode, plus_constant (Pmode, r1, psso));
11972
11973 /* If less that 1kiB used, skip addition and compare directly with
11974 __private_ss. */
11975 if (frame_size > SPLIT_STACK_AVAILABLE)
11976 {
11977 emit_move_insn (r1, guard);
11978 if (TARGET_64BIT)
11979 emit_insn (gen_adddi3 (r1, r1, GEN_INT (frame_size)));
11980 else
11981 emit_insn (gen_addsi3 (r1, r1, GEN_INT (frame_size)));
11982 guard = r1;
11983 }
11984
11985 /* Compare the (maybe adjusted) guard with the stack pointer. */
11986 cc = s390_emit_compare (LT, stack_pointer_rtx, guard);
11987 }
11988
11989 call_done = gen_label_rtx ();
11990 parm_base = gen_label_rtx ();
11991
11992 /* Emit the parameter block. */
11993 tmp = gen_split_stack_data (parm_base, call_done,
11994 GEN_INT (frame_size),
11995 GEN_INT (args_size));
11996 insn = emit_insn (tmp);
11997 add_reg_note (insn, REG_LABEL_OPERAND, call_done);
11998 LABEL_NUSES (call_done)++;
11999 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
12000 LABEL_NUSES (parm_base)++;
12001
12002 /* %r1 = litbase. */
12003 insn = emit_move_insn (r1, gen_rtx_LABEL_REF (VOIDmode, parm_base));
12004 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
12005 LABEL_NUSES (parm_base)++;
12006
12007 /* Now, we need to call __morestack. It has very special calling
12008 conventions: it preserves param/return/static chain registers for
12009 calling main function body, and looks for its own parameters at %r1. */
12010
12011 if (cc != NULL)
12012 {
12013 tmp = gen_split_stack_cond_call (morestack_ref, cc, call_done);
12014
12015 insn = emit_jump_insn (tmp);
12016 JUMP_LABEL (insn) = call_done;
12017 LABEL_NUSES (call_done)++;
12018
12019 /* Mark the jump as very unlikely to be taken. */
61cb1816 12020 add_reg_br_prob_note (insn,
12021 profile_probability::very_unlikely ());
c6d481f7 12022
12023 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
12024 {
12025 /* If va_start is used, and __morestack was not called, just use
12026 r15. */
12027 emit_move_insn (r1,
12028 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
12029 GEN_INT (STACK_POINTER_OFFSET)));
12030 }
12031 }
12032 else
12033 {
12034 tmp = gen_split_stack_call (morestack_ref, call_done);
12035 insn = emit_jump_insn (tmp);
12036 JUMP_LABEL (insn) = call_done;
12037 LABEL_NUSES (call_done)++;
12038 emit_barrier ();
12039 }
12040
12041 /* __morestack will call us here. */
12042
12043 emit_label (call_done);
12044}
12045
12046/* We may have to tell the dataflow pass that the split stack prologue
12047 is initializing a register. */
12048
12049static void
12050s390_live_on_entry (bitmap regs)
12051{
12052 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
12053 {
12054 gcc_assert (flag_split_stack);
12055 bitmap_set_bit (regs, 1);
12056 }
12057}
12058
7a64c761 12059/* Return true if the function can use simple_return to return outside
12060 of a shrink-wrapped region. At present shrink-wrapping is supported
12061 in all cases. */
12062
12063bool
12064s390_can_use_simple_return_insn (void)
12065{
12066 return true;
12067}
12068
12069/* Return true if the epilogue is guaranteed to contain only a return
12070 instruction and if a direct return can therefore be used instead.
12071 One of the main advantages of using direct return instructions
12072 is that we can then use conditional returns. */
12073
12074bool
12075s390_can_use_return_insn (void)
12076{
12077 int i;
12078
12079 if (!reload_completed)
12080 return false;
12081
12082 if (crtl->profile)
12083 return false;
12084
12085 if (TARGET_TPF_PROFILING)
12086 return false;
12087
12088 for (i = 0; i < 16; i++)
1d3cea74 12089 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
7a64c761 12090 return false;
12091
06fa0630 12092 /* For 31 bit this is not covered by the frame_size check below
12093 since f4, f6 are saved in the register save area without needing
12094 additional stack space. */
12095 if (!TARGET_64BIT
12096 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
12097 return false;
12098
7a64c761 12099 if (cfun->machine->base_reg
12100 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
12101 return false;
12102
12103 return cfun_frame_layout.frame_size == 0;
12104}
4673c1a0 12105
76a4c804 12106/* The VX ABI differs for vararg functions. Therefore we need the
12107 prototype of the callee to be available when passing vector type
12108 values. */
12109static const char *
12110s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
12111{
12112 return ((TARGET_VX_ABI
12113 && typelist == 0
12114 && VECTOR_TYPE_P (TREE_TYPE (val))
12115 && (funcdecl == NULL_TREE
12116 || (TREE_CODE (funcdecl) == FUNCTION_DECL
12117 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
19abb0ad 12118 ? N_("vector argument passed to unprototyped function")
76a4c804 12119 : NULL);
12120}
12121
12122
f81e845f 12123/* Return the size in bytes of a function argument of
56769981 12124 type TYPE and/or mode MODE. At least one of TYPE or
12125 MODE must be specified. */
4673c1a0 12126
12127static int
3754d046 12128s390_function_arg_size (machine_mode mode, const_tree type)
4673c1a0 12129{
12130 if (type)
12131 return int_size_in_bytes (type);
12132
0c034860 12133 /* No type info available for some library calls ... */
4673c1a0 12134 if (mode != BLKmode)
12135 return GET_MODE_SIZE (mode);
12136
12137 /* If we have neither type nor mode, abort */
32eda510 12138 gcc_unreachable ();
4673c1a0 12139}
12140
76a4c804 12141/* Return true if a function argument of type TYPE and mode MODE
12142 is to be passed in a vector register, if available. */
12143
12144bool
12145s390_function_arg_vector (machine_mode mode, const_tree type)
12146{
12147 if (!TARGET_VX_ABI)
12148 return false;
12149
12150 if (s390_function_arg_size (mode, type) > 16)
12151 return false;
12152
12153 /* No type info available for some library calls ... */
12154 if (!type)
12155 return VECTOR_MODE_P (mode);
12156
12157 /* The ABI says that record types with a single member are treated
12158 just like that member would be. */
12159 while (TREE_CODE (type) == RECORD_TYPE)
12160 {
12161 tree field, single = NULL_TREE;
12162
12163 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
12164 {
12165 if (TREE_CODE (field) != FIELD_DECL)
12166 continue;
12167
12168 if (single == NULL_TREE)
12169 single = TREE_TYPE (field);
12170 else
12171 return false;
12172 }
12173
12174 if (single == NULL_TREE)
12175 return false;
12176 else
12177 {
12178 /* If the field declaration adds extra byte due to
12179 e.g. padding this is not accepted as vector type. */
12180 if (int_size_in_bytes (single) <= 0
12181 || int_size_in_bytes (single) != int_size_in_bytes (type))
12182 return false;
12183 type = single;
12184 }
12185 }
12186
12187 return VECTOR_TYPE_P (type);
12188}
12189
59652f3f 12190/* Return true if a function argument of type TYPE and mode MODE
12191 is to be passed in a floating-point register, if available. */
12192
12193static bool
3754d046 12194s390_function_arg_float (machine_mode mode, const_tree type)
59652f3f 12195{
76a4c804 12196 if (s390_function_arg_size (mode, type) > 8)
201e502c 12197 return false;
12198
59652f3f 12199 /* Soft-float changes the ABI: no floating-point registers are used. */
12200 if (TARGET_SOFT_FLOAT)
12201 return false;
12202
12203 /* No type info available for some library calls ... */
12204 if (!type)
36868490 12205 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
59652f3f 12206
12207 /* The ABI says that record types with a single member are treated
12208 just like that member would be. */
12209 while (TREE_CODE (type) == RECORD_TYPE)
12210 {
12211 tree field, single = NULL_TREE;
12212
1767a056 12213 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
59652f3f 12214 {
12215 if (TREE_CODE (field) != FIELD_DECL)
12216 continue;
12217
12218 if (single == NULL_TREE)
12219 single = TREE_TYPE (field);
12220 else
12221 return false;
12222 }
12223
12224 if (single == NULL_TREE)
12225 return false;
12226 else
12227 type = single;
12228 }
12229
12230 return TREE_CODE (type) == REAL_TYPE;
12231}
12232
201e502c 12233/* Return true if a function argument of type TYPE and mode MODE
12234 is to be passed in an integer register, or a pair of integer
12235 registers, if available. */
12236
12237static bool
3754d046 12238s390_function_arg_integer (machine_mode mode, const_tree type)
201e502c 12239{
12240 int size = s390_function_arg_size (mode, type);
12241 if (size > 8)
12242 return false;
12243
12244 /* No type info available for some library calls ... */
12245 if (!type)
12246 return GET_MODE_CLASS (mode) == MODE_INT
36868490 12247 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
201e502c 12248
12249 /* We accept small integral (and similar) types. */
12250 if (INTEGRAL_TYPE_P (type)
f588eb9f 12251 || POINTER_TYPE_P (type)
bd3e12e5 12252 || TREE_CODE (type) == NULLPTR_TYPE
201e502c 12253 || TREE_CODE (type) == OFFSET_TYPE
12254 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
12255 return true;
12256
12257 /* We also accept structs of size 1, 2, 4, 8 that are not
f588eb9f 12258 passed in floating-point registers. */
201e502c 12259 if (AGGREGATE_TYPE_P (type)
12260 && exact_log2 (size) >= 0
12261 && !s390_function_arg_float (mode, type))
12262 return true;
12263
12264 return false;
12265}
12266
56769981 12267/* Return 1 if a function argument of type TYPE and mode MODE
12268 is to be passed by reference. The ABI specifies that only
12269 structures of size 1, 2, 4, or 8 bytes are passed by value,
12270 all other structures (and complex numbers) are passed by
12271 reference. */
12272
b981d932 12273static bool
39cba157 12274s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
3754d046 12275 machine_mode mode, const_tree type,
b981d932 12276 bool named ATTRIBUTE_UNUSED)
4673c1a0 12277{
12278 int size = s390_function_arg_size (mode, type);
76a4c804 12279
12280 if (s390_function_arg_vector (mode, type))
12281 return false;
12282
201e502c 12283 if (size > 8)
12284 return true;
4673c1a0 12285
12286 if (type)
12287 {
201e502c 12288 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
76a4c804 12289 return true;
4673c1a0 12290
201e502c 12291 if (TREE_CODE (type) == COMPLEX_TYPE
12292 || TREE_CODE (type) == VECTOR_TYPE)
76a4c804 12293 return true;
4673c1a0 12294 }
f81e845f 12295
76a4c804 12296 return false;
4673c1a0 12297}
12298
12299/* Update the data in CUM to advance over an argument of mode MODE and
12300 data type TYPE. (TYPE is null for libcalls where that information
56769981 12301 may not be available.). The boolean NAMED specifies whether the
12302 argument is a named argument (as opposed to an unnamed argument
12303 matching an ellipsis). */
4673c1a0 12304
12bc26aa 12305static void
3754d046 12306s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
76a4c804 12307 const_tree type, bool named)
4673c1a0 12308{
39cba157 12309 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12310
76a4c804 12311 if (s390_function_arg_vector (mode, type))
12312 {
12313 /* We are called for unnamed vector stdarg arguments which are
12314 passed on the stack. In this case this hook does not have to
12315 do anything since stack arguments are tracked by common
12316 code. */
12317 if (!named)
12318 return;
12319 cum->vrs += 1;
12320 }
12321 else if (s390_function_arg_float (mode, type))
4673c1a0 12322 {
59652f3f 12323 cum->fprs += 1;
4673c1a0 12324 }
201e502c 12325 else if (s390_function_arg_integer (mode, type))
4673c1a0 12326 {
12327 int size = s390_function_arg_size (mode, type);
b5fdc416 12328 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
4673c1a0 12329 }
201e502c 12330 else
32eda510 12331 gcc_unreachable ();
4673c1a0 12332}
12333
56769981 12334/* Define where to put the arguments to a function.
12335 Value is zero to push the argument on the stack,
12336 or a hard register in which to store the argument.
12337
12338 MODE is the argument's machine mode.
12339 TYPE is the data type of the argument (as a tree).
12340 This is null for libcalls where that information may
12341 not be available.
12342 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12343 the preceding args and about the function being called.
12344 NAMED is nonzero if this argument is a named parameter
f81e845f 12345 (otherwise it is an extra parameter matching an ellipsis).
56769981 12346
12347 On S/390, we use general purpose registers 2 through 6 to
12348 pass integer, pointer, and certain structure arguments, and
12349 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
12350 to pass floating point arguments. All remaining arguments
12351 are pushed to the stack. */
4673c1a0 12352
12bc26aa 12353static rtx
3754d046 12354s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
76a4c804 12355 const_tree type, bool named)
4673c1a0 12356{
39cba157 12357 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12358
6b7cfb9c 12359 if (!named)
12360 s390_check_type_for_vector_abi (type, true, false);
76a4c804 12361
12362 if (s390_function_arg_vector (mode, type))
12363 {
12364 /* Vector arguments being part of the ellipsis are passed on the
12365 stack. */
12366 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
12367 return NULL_RTX;
12368
12369 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
12370 }
12371 else if (s390_function_arg_float (mode, type))
4673c1a0 12372 {
6902d973 12373 if (cum->fprs + 1 > FP_ARG_NUM_REG)
76a4c804 12374 return NULL_RTX;
4673c1a0 12375 else
1a83b3ff 12376 return gen_rtx_REG (mode, cum->fprs + 16);
4673c1a0 12377 }
201e502c 12378 else if (s390_function_arg_integer (mode, type))
4673c1a0 12379 {
12380 int size = s390_function_arg_size (mode, type);
b5fdc416 12381 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
4673c1a0 12382
6902d973 12383 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
76a4c804 12384 return NULL_RTX;
b5fdc416 12385 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
1a83b3ff 12386 return gen_rtx_REG (mode, cum->gprs + 2);
b5fdc416 12387 else if (n_gprs == 2)
12388 {
12389 rtvec p = rtvec_alloc (2);
12390
12391 RTVEC_ELT (p, 0)
12392 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
12393 const0_rtx);
12394 RTVEC_ELT (p, 1)
12395 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
12396 GEN_INT (4));
12397
12398 return gen_rtx_PARALLEL (mode, p);
12399 }
4673c1a0 12400 }
201e502c 12401
12402 /* After the real arguments, expand_call calls us once again
12403 with a void_type_node type. Whatever we return here is
12404 passed as operand 2 to the call expanders.
12405
12406 We don't need this feature ... */
12407 else if (type == void_type_node)
12408 return const0_rtx;
12409
32eda510 12410 gcc_unreachable ();
201e502c 12411}
12412
d7ab0e3d 12413/* Implement TARGET_FUNCTION_ARG_BOUNDARY. Vector arguments are
12414 left-justified when placed on the stack during parameter passing. */
12415
12416static pad_direction
12417s390_function_arg_padding (machine_mode mode, const_tree type)
12418{
12419 if (s390_function_arg_vector (mode, type))
12420 return PAD_UPWARD;
12421
12422 return default_function_arg_padding (mode, type);
12423}
12424
201e502c 12425/* Return true if return values of type TYPE should be returned
12426 in a memory buffer whose address is passed by the caller as
12427 hidden first argument. */
12428
12429static bool
fb80456a 12430s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
201e502c 12431{
12432 /* We accept small integral (and similar) types. */
12433 if (INTEGRAL_TYPE_P (type)
f588eb9f 12434 || POINTER_TYPE_P (type)
201e502c 12435 || TREE_CODE (type) == OFFSET_TYPE
12436 || TREE_CODE (type) == REAL_TYPE)
12437 return int_size_in_bytes (type) > 8;
12438
76a4c804 12439 /* vector types which fit into a VR. */
12440 if (TARGET_VX_ABI
12441 && VECTOR_TYPE_P (type)
12442 && int_size_in_bytes (type) <= 16)
12443 return false;
12444
201e502c 12445 /* Aggregates and similar constructs are always returned
12446 in memory. */
12447 if (AGGREGATE_TYPE_P (type)
12448 || TREE_CODE (type) == COMPLEX_TYPE
76a4c804 12449 || VECTOR_TYPE_P (type))
201e502c 12450 return true;
12451
12452 /* ??? We get called on all sorts of random stuff from
12453 aggregate_value_p. We can't abort, but it's not clear
12454 what's safe to return. Pretend it's a struct I guess. */
12455 return true;
12456}
12457
3b2411a8 12458/* Function arguments and return values are promoted to word size. */
12459
3754d046 12460static machine_mode
12461s390_promote_function_mode (const_tree type, machine_mode mode,
3b2411a8 12462 int *punsignedp,
12463 const_tree fntype ATTRIBUTE_UNUSED,
12464 int for_return ATTRIBUTE_UNUSED)
12465{
12466 if (INTEGRAL_MODE_P (mode)
b5fdc416 12467 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
3b2411a8 12468 {
adaf4ef0 12469 if (type != NULL_TREE && POINTER_TYPE_P (type))
3b2411a8 12470 *punsignedp = POINTERS_EXTEND_UNSIGNED;
12471 return Pmode;
12472 }
12473
12474 return mode;
12475}
12476
dc3b3062 12477/* Define where to return a (scalar) value of type RET_TYPE.
12478 If RET_TYPE is null, define where to return a (scalar)
201e502c 12479 value of mode MODE from a libcall. */
12480
dc3b3062 12481static rtx
3754d046 12482s390_function_and_libcall_value (machine_mode mode,
dc3b3062 12483 const_tree ret_type,
12484 const_tree fntype_or_decl,
12485 bool outgoing ATTRIBUTE_UNUSED)
201e502c 12486{
76a4c804 12487 /* For vector return types it is important to use the RET_TYPE
12488 argument whenever available since the middle-end might have
12489 changed the mode to a scalar mode. */
12490 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
12491 || (!ret_type && VECTOR_MODE_P (mode)));
12492
dc3b3062 12493 /* For normal functions perform the promotion as
12494 promote_function_mode would do. */
12495 if (ret_type)
201e502c 12496 {
dc3b3062 12497 int unsignedp = TYPE_UNSIGNED (ret_type);
12498 mode = promote_function_mode (ret_type, mode, &unsignedp,
12499 fntype_or_decl, 1);
201e502c 12500 }
12501
76a4c804 12502 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
12503 || SCALAR_FLOAT_MODE_P (mode)
12504 || (TARGET_VX_ABI && vector_ret_type_p));
12505 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
201e502c 12506
76a4c804 12507 if (TARGET_VX_ABI && vector_ret_type_p)
12508 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
12509 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
201e502c 12510 return gen_rtx_REG (mode, 16);
b5fdc416 12511 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
12512 || UNITS_PER_LONG == UNITS_PER_WORD)
201e502c 12513 return gen_rtx_REG (mode, 2);
b5fdc416 12514 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
12515 {
dc3b3062 12516 /* This case is triggered when returning a 64 bit value with
12517 -m31 -mzarch. Although the value would fit into a single
12518 register it has to be forced into a 32 bit register pair in
12519 order to match the ABI. */
b5fdc416 12520 rtvec p = rtvec_alloc (2);
12521
12522 RTVEC_ELT (p, 0)
12523 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
12524 RTVEC_ELT (p, 1)
12525 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
12526
12527 return gen_rtx_PARALLEL (mode, p);
12528 }
12529
12530 gcc_unreachable ();
4673c1a0 12531}
12532
dc3b3062 12533/* Define where to return a scalar return value of type RET_TYPE. */
12534
12535static rtx
12536s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
12537 bool outgoing)
12538{
12539 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
12540 fn_decl_or_type, outgoing);
12541}
12542
12543/* Define where to return a scalar libcall return value of mode
12544 MODE. */
12545
12546static rtx
3754d046 12547s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
dc3b3062 12548{
12549 return s390_function_and_libcall_value (mode, NULL_TREE,
12550 NULL_TREE, true);
12551}
12552
4673c1a0 12553
56769981 12554/* Create and return the va_list datatype.
12555
12556 On S/390, va_list is an array type equivalent to
12557
12558 typedef struct __va_list_tag
12559 {
12560 long __gpr;
12561 long __fpr;
12562 void *__overflow_arg_area;
12563 void *__reg_save_area;
56769981 12564 } va_list[1];
12565
12566 where __gpr and __fpr hold the number of general purpose
12567 or floating point arguments used up to now, respectively,
f81e845f 12568 __overflow_arg_area points to the stack location of the
56769981 12569 next argument passed on the stack, and __reg_save_area
12570 always points to the start of the register area in the
12571 call frame of the current function. The function prologue
12572 saves all registers used for argument passing into this
12573 area if the function uses variable arguments. */
4673c1a0 12574
2e15d750 12575static tree
12576s390_build_builtin_va_list (void)
4673c1a0 12577{
12578 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
12579
5ebb663d 12580 record = lang_hooks.types.make_type (RECORD_TYPE);
4673c1a0 12581
12582 type_decl =
54e46243 12583 build_decl (BUILTINS_LOCATION,
12584 TYPE_DECL, get_identifier ("__va_list_tag"), record);
4673c1a0 12585
54e46243 12586 f_gpr = build_decl (BUILTINS_LOCATION,
12587 FIELD_DECL, get_identifier ("__gpr"),
4673c1a0 12588 long_integer_type_node);
54e46243 12589 f_fpr = build_decl (BUILTINS_LOCATION,
12590 FIELD_DECL, get_identifier ("__fpr"),
4673c1a0 12591 long_integer_type_node);
54e46243 12592 f_ovf = build_decl (BUILTINS_LOCATION,
12593 FIELD_DECL, get_identifier ("__overflow_arg_area"),
4673c1a0 12594 ptr_type_node);
54e46243 12595 f_sav = build_decl (BUILTINS_LOCATION,
12596 FIELD_DECL, get_identifier ("__reg_save_area"),
4673c1a0 12597 ptr_type_node);
12598
6902d973 12599 va_list_gpr_counter_field = f_gpr;
12600 va_list_fpr_counter_field = f_fpr;
12601
4673c1a0 12602 DECL_FIELD_CONTEXT (f_gpr) = record;
12603 DECL_FIELD_CONTEXT (f_fpr) = record;
12604 DECL_FIELD_CONTEXT (f_ovf) = record;
12605 DECL_FIELD_CONTEXT (f_sav) = record;
12606
bc907808 12607 TYPE_STUB_DECL (record) = type_decl;
4673c1a0 12608 TYPE_NAME (record) = type_decl;
12609 TYPE_FIELDS (record) = f_gpr;
1767a056 12610 DECL_CHAIN (f_gpr) = f_fpr;
12611 DECL_CHAIN (f_fpr) = f_ovf;
12612 DECL_CHAIN (f_ovf) = f_sav;
4673c1a0 12613
12614 layout_type (record);
12615
12616 /* The correct type is an array type of one element. */
12617 return build_array_type (record, build_index_type (size_zero_node));
12618}
12619
56769981 12620/* Implement va_start by filling the va_list structure VALIST.
7ccc713a 12621 STDARG_P is always true, and ignored.
12622 NEXTARG points to the first anonymous stack argument.
56769981 12623
8ef587dc 12624 The following global variables are used to initialize
56769981 12625 the va_list structure:
12626
abe32cce 12627 crtl->args.info:
56769981 12628 holds number of gprs and fprs used for named arguments.
abe32cce 12629 crtl->args.arg_offset_rtx:
56769981 12630 holds the offset of the first anonymous stack argument
12631 (relative to the virtual arg pointer). */
4673c1a0 12632
8a58ed0a 12633static void
b40da9a7 12634s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
4673c1a0 12635{
12636 HOST_WIDE_INT n_gpr, n_fpr;
12637 int off;
12638 tree f_gpr, f_fpr, f_ovf, f_sav;
12639 tree gpr, fpr, ovf, sav, t;
12640
12641 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
1767a056 12642 f_fpr = DECL_CHAIN (f_gpr);
12643 f_ovf = DECL_CHAIN (f_fpr);
12644 f_sav = DECL_CHAIN (f_ovf);
4673c1a0 12645
170efcd4 12646 valist = build_simple_mem_ref (valist);
ed03eadb 12647 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12648 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12649 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12650 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4673c1a0 12651
12652 /* Count number of gp and fp argument registers used. */
12653
abe32cce 12654 n_gpr = crtl->args.info.gprs;
12655 n_fpr = crtl->args.info.fprs;
4673c1a0 12656
6902d973 12657 if (cfun->va_list_gpr_size)
12658 {
75a70cf9 12659 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12660 build_int_cst (NULL_TREE, n_gpr));
6902d973 12661 TREE_SIDE_EFFECTS (t) = 1;
12662 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12663 }
4673c1a0 12664
6902d973 12665 if (cfun->va_list_fpr_size)
12666 {
75a70cf9 12667 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
ed03eadb 12668 build_int_cst (NULL_TREE, n_fpr));
6902d973 12669 TREE_SIDE_EFFECTS (t) = 1;
12670 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12671 }
4673c1a0 12672
c6d481f7 12673 if (flag_split_stack
12674 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
12675 == NULL)
12676 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12677 {
12678 rtx reg;
12679 rtx_insn *seq;
12680
12681 reg = gen_reg_rtx (Pmode);
12682 cfun->machine->split_stack_varargs_pointer = reg;
12683
12684 start_sequence ();
12685 emit_move_insn (reg, gen_rtx_REG (Pmode, 1));
12686 seq = get_insns ();
12687 end_sequence ();
12688
12689 push_topmost_sequence ();
12690 emit_insn_after (seq, entry_of_function ());
12691 pop_topmost_sequence ();
12692 }
12693
76a4c804 12694 /* Find the overflow area.
12695 FIXME: This currently is too pessimistic when the vector ABI is
12696 enabled. In that case we *always* set up the overflow area
12697 pointer. */
6902d973 12698 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
76a4c804 12699 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
12700 || TARGET_VX_ABI)
6902d973 12701 {
c6d481f7 12702 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12703 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
12704 else
12705 t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
4673c1a0 12706
abe32cce 12707 off = INTVAL (crtl->args.arg_offset_rtx);
6902d973 12708 off = off < 0 ? 0 : off;
12709 if (TARGET_DEBUG_ARG)
12710 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
12711 (int)n_gpr, (int)n_fpr, off);
4673c1a0 12712
2cc66f2a 12713 t = fold_build_pointer_plus_hwi (t, off);
4673c1a0 12714
75a70cf9 12715 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
6902d973 12716 TREE_SIDE_EFFECTS (t) = 1;
12717 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12718 }
4673c1a0 12719
12720 /* Find the register save area. */
6902d973 12721 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
12722 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
12723 {
12724 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
2cc66f2a 12725 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
ffead1ca 12726
75a70cf9 12727 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
6902d973 12728 TREE_SIDE_EFFECTS (t) = 1;
12729 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12730 }
4673c1a0 12731}
12732
f81e845f 12733/* Implement va_arg by updating the va_list structure
56769981 12734 VALIST as required to retrieve an argument of type
f81e845f 12735 TYPE, and returning that argument.
12736
56769981 12737 Generates code equivalent to:
f81e845f 12738
4673c1a0 12739 if (integral value) {
12740 if (size <= 4 && args.gpr < 5 ||
f81e845f 12741 size > 4 && args.gpr < 4 )
4673c1a0 12742 ret = args.reg_save_area[args.gpr+8]
12743 else
12744 ret = *args.overflow_arg_area++;
76a4c804 12745 } else if (vector value) {
12746 ret = *args.overflow_arg_area;
12747 args.overflow_arg_area += size / 8;
4673c1a0 12748 } else if (float value) {
12749 if (args.fgpr < 2)
12750 ret = args.reg_save_area[args.fpr+64]
12751 else
12752 ret = *args.overflow_arg_area++;
12753 } else if (aggregate value) {
12754 if (args.gpr < 5)
12755 ret = *args.reg_save_area[args.gpr]
12756 else
12757 ret = **args.overflow_arg_area++;
12758 } */
12759
875862bf 12760static tree
ffead1ca 12761s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
75a70cf9 12762 gimple_seq *post_p ATTRIBUTE_UNUSED)
4673c1a0 12763{
12764 tree f_gpr, f_fpr, f_ovf, f_sav;
12765 tree gpr, fpr, ovf, sav, reg, t, u;
12766 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
883b2519 12767 tree lab_false, lab_over = NULL_TREE;
76a4c804 12768 tree addr = create_tmp_var (ptr_type_node, "addr");
12769 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
12770 a stack slot. */
4673c1a0 12771
12772 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
1767a056 12773 f_fpr = DECL_CHAIN (f_gpr);
12774 f_ovf = DECL_CHAIN (f_fpr);
12775 f_sav = DECL_CHAIN (f_ovf);
4673c1a0 12776
ed03eadb 12777 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12778 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
ed03eadb 12779 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4673c1a0 12780
75a70cf9 12781 /* The tree for args* cannot be shared between gpr/fpr and ovf since
12782 both appear on a lhs. */
12783 valist = unshare_expr (valist);
12784 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12785
4673c1a0 12786 size = int_size_in_bytes (type);
12787
6b7cfb9c 12788 s390_check_type_for_vector_abi (type, true, false);
12789
b981d932 12790 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4673c1a0 12791 {
12792 if (TARGET_DEBUG_ARG)
12793 {
12794 fprintf (stderr, "va_arg: aggregate type");
12795 debug_tree (type);
12796 }
12797
12798 /* Aggregates are passed by reference. */
12799 indirect_p = 1;
12800 reg = gpr;
12801 n_reg = 1;
99e8a714 12802
646a946e 12803 /* kernel stack layout on 31 bit: It is assumed here that no padding
99e8a714 12804 will be added by s390_frame_info because for va_args always an even
12805 number of gprs has to be saved r15-r2 = 14 regs. */
b5fdc416 12806 sav_ofs = 2 * UNITS_PER_LONG;
12807 sav_scale = UNITS_PER_LONG;
12808 size = UNITS_PER_LONG;
6902d973 12809 max_reg = GP_ARG_NUM_REG - n_reg;
76a4c804 12810 left_align_p = false;
12811 }
12812 else if (s390_function_arg_vector (TYPE_MODE (type), type))
12813 {
12814 if (TARGET_DEBUG_ARG)
12815 {
12816 fprintf (stderr, "va_arg: vector type");
12817 debug_tree (type);
12818 }
12819
12820 indirect_p = 0;
12821 reg = NULL_TREE;
12822 n_reg = 0;
12823 sav_ofs = 0;
12824 sav_scale = 8;
12825 max_reg = 0;
12826 left_align_p = true;
4673c1a0 12827 }
59652f3f 12828 else if (s390_function_arg_float (TYPE_MODE (type), type))
4673c1a0 12829 {
12830 if (TARGET_DEBUG_ARG)
12831 {
12832 fprintf (stderr, "va_arg: float type");
12833 debug_tree (type);
12834 }
12835
12836 /* FP args go in FP registers, if present. */
12837 indirect_p = 0;
12838 reg = fpr;
12839 n_reg = 1;
b5fdc416 12840 sav_ofs = 16 * UNITS_PER_LONG;
4673c1a0 12841 sav_scale = 8;
6902d973 12842 max_reg = FP_ARG_NUM_REG - n_reg;
76a4c804 12843 left_align_p = false;
4673c1a0 12844 }
12845 else
12846 {
12847 if (TARGET_DEBUG_ARG)
12848 {
12849 fprintf (stderr, "va_arg: other type");
12850 debug_tree (type);
12851 }
12852
12853 /* Otherwise into GP registers. */
12854 indirect_p = 0;
12855 reg = gpr;
b5fdc416 12856 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
99e8a714 12857
646a946e 12858 /* kernel stack layout on 31 bit: It is assumed here that no padding
12859 will be added by s390_frame_info because for va_args always an even
12860 number of gprs has to be saved r15-r2 = 14 regs. */
b5fdc416 12861 sav_ofs = 2 * UNITS_PER_LONG;
f81e845f 12862
b5fdc416 12863 if (size < UNITS_PER_LONG)
12864 sav_ofs += UNITS_PER_LONG - size;
4673c1a0 12865
b5fdc416 12866 sav_scale = UNITS_PER_LONG;
6902d973 12867 max_reg = GP_ARG_NUM_REG - n_reg;
76a4c804 12868 left_align_p = false;
4673c1a0 12869 }
12870
12871 /* Pull the value out of the saved registers ... */
12872
76a4c804 12873 if (reg != NULL_TREE)
12874 {
12875 /*
12876 if (reg > ((typeof (reg))max_reg))
12877 goto lab_false;
4673c1a0 12878
76a4c804 12879 addr = sav + sav_ofs + reg * save_scale;
4673c1a0 12880
76a4c804 12881 goto lab_over;
4673c1a0 12882
76a4c804 12883 lab_false:
12884 */
12885
12886 lab_false = create_artificial_label (UNKNOWN_LOCATION);
12887 lab_over = create_artificial_label (UNKNOWN_LOCATION);
12888
12889 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
12890 t = build2 (GT_EXPR, boolean_type_node, reg, t);
12891 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12892 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12893 gimplify_and_add (t, pre_p);
4673c1a0 12894
76a4c804 12895 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12896 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
12897 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
12898 t = fold_build_pointer_plus (t, u);
4673c1a0 12899
76a4c804 12900 gimplify_assign (addr, t, pre_p);
4673c1a0 12901
76a4c804 12902 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12903
12904 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
12905 }
4673c1a0 12906
12907 /* ... Otherwise out of the overflow area. */
12908
875862bf 12909 t = ovf;
76a4c804 12910 if (size < UNITS_PER_LONG && !left_align_p)
2cc66f2a 12911 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
875862bf 12912
12913 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12914
75a70cf9 12915 gimplify_assign (addr, t, pre_p);
875862bf 12916
76a4c804 12917 if (size < UNITS_PER_LONG && left_align_p)
12918 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
12919 else
12920 t = fold_build_pointer_plus_hwi (t, size);
12921
75a70cf9 12922 gimplify_assign (ovf, t, pre_p);
875862bf 12923
76a4c804 12924 if (reg != NULL_TREE)
12925 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
875862bf 12926
12927
12928 /* Increment register save count. */
12929
76a4c804 12930 if (n_reg > 0)
12931 {
12932 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
12933 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
12934 gimplify_and_add (u, pre_p);
12935 }
875862bf 12936
12937 if (indirect_p)
12938 {
8115f0af 12939 t = build_pointer_type_for_mode (build_pointer_type (type),
12940 ptr_mode, true);
875862bf 12941 addr = fold_convert (t, addr);
12942 addr = build_va_arg_indirect_ref (addr);
12943 }
12944 else
12945 {
8115f0af 12946 t = build_pointer_type_for_mode (type, ptr_mode, true);
875862bf 12947 addr = fold_convert (t, addr);
12948 }
12949
12950 return build_va_arg_indirect_ref (addr);
12951}
12952
5ada7a14 12953/* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
12954 expanders.
12955 DEST - Register location where CC will be stored.
12956 TDB - Pointer to a 256 byte area where to store the transaction.
12957 diagnostic block. NULL if TDB is not needed.
12958 RETRY - Retry count value. If non-NULL a retry loop for CC2
12959 is emitted
12960 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
12961 of the tbegin instruction pattern. */
12962
12963void
12964s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
12965{
91dfd73e 12966 rtx retry_plus_two = gen_reg_rtx (SImode);
5ada7a14 12967 rtx retry_reg = gen_reg_rtx (SImode);
79f6a8ed 12968 rtx_code_label *retry_label = NULL;
5ada7a14 12969
12970 if (retry != NULL_RTX)
12971 {
12972 emit_move_insn (retry_reg, retry);
91dfd73e 12973 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
12974 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
5ada7a14 12975 retry_label = gen_label_rtx ();
12976 emit_label (retry_label);
12977 }
12978
12979 if (clobber_fprs_p)
044a78dc 12980 {
12981 if (TARGET_VX)
12982 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12983 tdb));
12984 else
12985 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12986 tdb));
12987 }
5ada7a14 12988 else
91dfd73e 12989 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12990 tdb));
5ada7a14 12991
91dfd73e 12992 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
12993 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
12994 CC_REGNUM)),
12995 UNSPEC_CC_TO_INT));
5ada7a14 12996 if (retry != NULL_RTX)
12997 {
d089210f 12998 const int CC0 = 1 << 3;
12999 const int CC1 = 1 << 2;
13000 const int CC3 = 1 << 0;
13001 rtx jump;
5ada7a14 13002 rtx count = gen_reg_rtx (SImode);
93e0956b 13003 rtx_code_label *leave_label = gen_label_rtx ();
d089210f 13004
13005 /* Exit for success and permanent failures. */
5ada7a14 13006 jump = s390_emit_jump (leave_label,
13007 gen_rtx_EQ (VOIDmode,
13008 gen_rtx_REG (CCRAWmode, CC_REGNUM),
d089210f 13009 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
13010 LABEL_NUSES (leave_label) = 1;
5ada7a14 13011
13012 /* CC2 - transient failure. Perform retry with ppa. */
91dfd73e 13013 emit_move_insn (count, retry_plus_two);
5ada7a14 13014 emit_insn (gen_subsi3 (count, count, retry_reg));
13015 emit_insn (gen_tx_assist (count));
13016 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
13017 retry_reg,
13018 retry_reg));
13019 JUMP_LABEL (jump) = retry_label;
13020 LABEL_NUSES (retry_label) = 1;
d089210f 13021 emit_label (leave_label);
5ada7a14 13022 }
5ada7a14 13023}
13024
5ada7a14 13025
751c914e 13026/* Return the decl for the target specific builtin with the function
13027 code FCODE. */
13028
13029static tree
13030s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
13031{
13032 if (fcode >= S390_BUILTIN_MAX)
13033 return error_mark_node;
13034
13035 return s390_builtin_decls[fcode];
13036}
13037
d44f2f7c 13038/* We call mcount before the function prologue. So a profiled leaf
13039 function should stay a leaf function. */
13040
13041static bool
13042s390_keep_leaf_when_profiled ()
13043{
13044 return true;
13045}
5ada7a14 13046
875862bf 13047/* Output assembly code for the trampoline template to
13048 stdio stream FILE.
13049
13050 On S/390, we use gpr 1 internally in the trampoline code;
13051 gpr 0 is used to hold the static chain. */
13052
4d946732 13053static void
13054s390_asm_trampoline_template (FILE *file)
875862bf 13055{
13056 rtx op[2];
13057 op[0] = gen_rtx_REG (Pmode, 0);
13058 op[1] = gen_rtx_REG (Pmode, 1);
13059
13060 if (TARGET_64BIT)
13061 {
29335855 13062 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
13063 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
13064 output_asm_insn ("br\t%1", op); /* 2 byte */
875862bf 13065 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
13066 }
13067 else
13068 {
29335855 13069 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
13070 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
13071 output_asm_insn ("br\t%1", op); /* 2 byte */
875862bf 13072 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
13073 }
13074}
13075
13076/* Emit RTL insns to initialize the variable parts of a trampoline.
13077 FNADDR is an RTX for the address of the function's pure code.
13078 CXT is an RTX for the static chain value for the function. */
13079
4d946732 13080static void
13081s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
875862bf 13082{
4d946732 13083 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
13084 rtx mem;
8a2a84e3 13085
4d946732 13086 emit_block_move (m_tramp, assemble_trampoline_template (),
29335855 13087 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
4d946732 13088
29335855 13089 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
4d946732 13090 emit_move_insn (mem, cxt);
29335855 13091 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
4d946732 13092 emit_move_insn (mem, fnaddr);
875862bf 13093}
13094
875862bf 13095/* Output assembler code to FILE to increment profiler label # LABELNO
13096 for profiling a function entry. */
13097
13098void
13099s390_function_profiler (FILE *file, int labelno)
13100{
13101 rtx op[7];
13102
13103 char label[128];
13104 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
13105
13106 fprintf (file, "# function profiler \n");
13107
13108 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
13109 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29c05e22 13110 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
875862bf 13111
13112 op[2] = gen_rtx_REG (Pmode, 1);
13113 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
13114 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
13115
13116 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
13117 if (flag_pic)
13118 {
13119 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
13120 op[4] = gen_rtx_CONST (Pmode, op[4]);
13121 }
13122
13123 if (TARGET_64BIT)
13124 {
13125 output_asm_insn ("stg\t%0,%1", op);
13126 output_asm_insn ("larl\t%2,%3", op);
13127 output_asm_insn ("brasl\t%0,%4", op);
13128 output_asm_insn ("lg\t%0,%1", op);
13129 }
4bc40d24 13130 else if (TARGET_CPU_ZARCH)
13131 {
13132 output_asm_insn ("st\t%0,%1", op);
13133 output_asm_insn ("larl\t%2,%3", op);
13134 output_asm_insn ("brasl\t%0,%4", op);
13135 output_asm_insn ("l\t%0,%1", op);
13136 }
875862bf 13137 else if (!flag_pic)
13138 {
13139 op[6] = gen_label_rtx ();
13140
13141 output_asm_insn ("st\t%0,%1", op);
13142 output_asm_insn ("bras\t%2,%l6", op);
13143 output_asm_insn (".long\t%4", op);
13144 output_asm_insn (".long\t%3", op);
13145 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
13146 output_asm_insn ("l\t%0,0(%2)", op);
13147 output_asm_insn ("l\t%2,4(%2)", op);
13148 output_asm_insn ("basr\t%0,%0", op);
13149 output_asm_insn ("l\t%0,%1", op);
13150 }
13151 else
13152 {
13153 op[5] = gen_label_rtx ();
13154 op[6] = gen_label_rtx ();
13155
13156 output_asm_insn ("st\t%0,%1", op);
13157 output_asm_insn ("bras\t%2,%l6", op);
13158 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
13159 output_asm_insn (".long\t%4-%l5", op);
13160 output_asm_insn (".long\t%3-%l5", op);
13161 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
13162 output_asm_insn ("lr\t%0,%2", op);
13163 output_asm_insn ("a\t%0,0(%2)", op);
13164 output_asm_insn ("a\t%2,4(%2)", op);
13165 output_asm_insn ("basr\t%0,%0", op);
13166 output_asm_insn ("l\t%0,%1", op);
13167 }
13168}
13169
13170/* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
13171 into its SYMBOL_REF_FLAGS. */
13172
13173static void
13174s390_encode_section_info (tree decl, rtx rtl, int first)
13175{
13176 default_encode_section_info (decl, rtl, first);
13177
e68d6a13 13178 if (TREE_CODE (decl) == VAR_DECL)
13179 {
78affa36 13180 /* Store the alignment to be able to check if we can use
13181 a larl/load-relative instruction. We only handle the cases
ea283725 13182 that can go wrong (i.e. no FUNC_DECLs). */
09d899d1 13183 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
78affa36 13184 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
ea283725 13185 else if (DECL_ALIGN (decl) % 32)
13186 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
13187 else if (DECL_ALIGN (decl) % 64)
13188 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
e68d6a13 13189 }
13190
13191 /* Literal pool references don't have a decl so they are handled
13192 differently here. We rely on the information in the MEM_ALIGN
78affa36 13193 entry to decide upon the alignment. */
e68d6a13 13194 if (MEM_P (rtl)
13195 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
ea283725 13196 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0)))
78affa36 13197 {
09d899d1 13198 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
78affa36 13199 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
ea283725 13200 else if (MEM_ALIGN (rtl) % 32)
13201 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
13202 else if (MEM_ALIGN (rtl) % 64)
13203 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
78affa36 13204 }
875862bf 13205}
13206
13207/* Output thunk to FILE that implements a C++ virtual function call (with
13208 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
13209 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
13210 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
13211 relative to the resulting this pointer. */
13212
13213static void
13214s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
13215 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
13216 tree function)
13217{
13218 rtx op[10];
13219 int nonlocal = 0;
13220
21a38800 13221 /* Make sure unwind info is emitted for the thunk if needed. */
13222 final_start_function (emit_barrier (), file, 1);
13223
875862bf 13224 /* Operand 0 is the target function. */
13225 op[0] = XEXP (DECL_RTL (function), 0);
13226 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
13227 {
13228 nonlocal = 1;
13229 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
13230 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
13231 op[0] = gen_rtx_CONST (Pmode, op[0]);
13232 }
13233
13234 /* Operand 1 is the 'this' pointer. */
13235 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
13236 op[1] = gen_rtx_REG (Pmode, 3);
13237 else
13238 op[1] = gen_rtx_REG (Pmode, 2);
13239
13240 /* Operand 2 is the delta. */
13241 op[2] = GEN_INT (delta);
13242
13243 /* Operand 3 is the vcall_offset. */
13244 op[3] = GEN_INT (vcall_offset);
13245
13246 /* Operand 4 is the temporary register. */
13247 op[4] = gen_rtx_REG (Pmode, 1);
13248
13249 /* Operands 5 to 8 can be used as labels. */
13250 op[5] = NULL_RTX;
13251 op[6] = NULL_RTX;
13252 op[7] = NULL_RTX;
13253 op[8] = NULL_RTX;
13254
13255 /* Operand 9 can be used for temporary register. */
13256 op[9] = NULL_RTX;
13257
13258 /* Generate code. */
13259 if (TARGET_64BIT)
13260 {
13261 /* Setup literal pool pointer if required. */
13262 if ((!DISP_IN_RANGE (delta)
163277cf 13263 && !CONST_OK_FOR_K (delta)
13264 && !CONST_OK_FOR_Os (delta))
875862bf 13265 || (!DISP_IN_RANGE (vcall_offset)
163277cf 13266 && !CONST_OK_FOR_K (vcall_offset)
13267 && !CONST_OK_FOR_Os (vcall_offset)))
875862bf 13268 {
13269 op[5] = gen_label_rtx ();
13270 output_asm_insn ("larl\t%4,%5", op);
13271 }
13272
13273 /* Add DELTA to this pointer. */
13274 if (delta)
13275 {
cb888f33 13276 if (CONST_OK_FOR_J (delta))
875862bf 13277 output_asm_insn ("la\t%1,%2(%1)", op);
13278 else if (DISP_IN_RANGE (delta))
13279 output_asm_insn ("lay\t%1,%2(%1)", op);
cb888f33 13280 else if (CONST_OK_FOR_K (delta))
875862bf 13281 output_asm_insn ("aghi\t%1,%2", op);
163277cf 13282 else if (CONST_OK_FOR_Os (delta))
13283 output_asm_insn ("agfi\t%1,%2", op);
875862bf 13284 else
13285 {
13286 op[6] = gen_label_rtx ();
13287 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
13288 }
13289 }
13290
13291 /* Perform vcall adjustment. */
13292 if (vcall_offset)
13293 {
13294 if (DISP_IN_RANGE (vcall_offset))
13295 {
13296 output_asm_insn ("lg\t%4,0(%1)", op);
13297 output_asm_insn ("ag\t%1,%3(%4)", op);
13298 }
cb888f33 13299 else if (CONST_OK_FOR_K (vcall_offset))
875862bf 13300 {
13301 output_asm_insn ("lghi\t%4,%3", op);
13302 output_asm_insn ("ag\t%4,0(%1)", op);
13303 output_asm_insn ("ag\t%1,0(%4)", op);
13304 }
163277cf 13305 else if (CONST_OK_FOR_Os (vcall_offset))
13306 {
13307 output_asm_insn ("lgfi\t%4,%3", op);
13308 output_asm_insn ("ag\t%4,0(%1)", op);
13309 output_asm_insn ("ag\t%1,0(%4)", op);
13310 }
875862bf 13311 else
13312 {
13313 op[7] = gen_label_rtx ();
13314 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
13315 output_asm_insn ("ag\t%4,0(%1)", op);
13316 output_asm_insn ("ag\t%1,0(%4)", op);
13317 }
13318 }
13319
13320 /* Jump to target. */
13321 output_asm_insn ("jg\t%0", op);
13322
13323 /* Output literal pool if required. */
13324 if (op[5])
13325 {
13326 output_asm_insn (".align\t4", op);
13327 targetm.asm_out.internal_label (file, "L",
13328 CODE_LABEL_NUMBER (op[5]));
13329 }
13330 if (op[6])
13331 {
13332 targetm.asm_out.internal_label (file, "L",
13333 CODE_LABEL_NUMBER (op[6]));
13334 output_asm_insn (".long\t%2", op);
13335 }
13336 if (op[7])
13337 {
13338 targetm.asm_out.internal_label (file, "L",
13339 CODE_LABEL_NUMBER (op[7]));
13340 output_asm_insn (".long\t%3", op);
13341 }
13342 }
13343 else
13344 {
13345 /* Setup base pointer if required. */
13346 if (!vcall_offset
13347 || (!DISP_IN_RANGE (delta)
163277cf 13348 && !CONST_OK_FOR_K (delta)
13349 && !CONST_OK_FOR_Os (delta))
875862bf 13350 || (!DISP_IN_RANGE (delta)
163277cf 13351 && !CONST_OK_FOR_K (vcall_offset)
13352 && !CONST_OK_FOR_Os (vcall_offset)))
875862bf 13353 {
13354 op[5] = gen_label_rtx ();
13355 output_asm_insn ("basr\t%4,0", op);
13356 targetm.asm_out.internal_label (file, "L",
13357 CODE_LABEL_NUMBER (op[5]));
13358 }
13359
13360 /* Add DELTA to this pointer. */
13361 if (delta)
13362 {
cb888f33 13363 if (CONST_OK_FOR_J (delta))
875862bf 13364 output_asm_insn ("la\t%1,%2(%1)", op);
13365 else if (DISP_IN_RANGE (delta))
13366 output_asm_insn ("lay\t%1,%2(%1)", op);
cb888f33 13367 else if (CONST_OK_FOR_K (delta))
875862bf 13368 output_asm_insn ("ahi\t%1,%2", op);
163277cf 13369 else if (CONST_OK_FOR_Os (delta))
13370 output_asm_insn ("afi\t%1,%2", op);
875862bf 13371 else
13372 {
13373 op[6] = gen_label_rtx ();
13374 output_asm_insn ("a\t%1,%6-%5(%4)", op);
13375 }
13376 }
13377
13378 /* Perform vcall adjustment. */
13379 if (vcall_offset)
13380 {
cb888f33 13381 if (CONST_OK_FOR_J (vcall_offset))
875862bf 13382 {
0451e449 13383 output_asm_insn ("l\t%4,0(%1)", op);
875862bf 13384 output_asm_insn ("a\t%1,%3(%4)", op);
13385 }
13386 else if (DISP_IN_RANGE (vcall_offset))
13387 {
0451e449 13388 output_asm_insn ("l\t%4,0(%1)", op);
875862bf 13389 output_asm_insn ("ay\t%1,%3(%4)", op);
13390 }
cb888f33 13391 else if (CONST_OK_FOR_K (vcall_offset))
875862bf 13392 {
13393 output_asm_insn ("lhi\t%4,%3", op);
13394 output_asm_insn ("a\t%4,0(%1)", op);
13395 output_asm_insn ("a\t%1,0(%4)", op);
13396 }
163277cf 13397 else if (CONST_OK_FOR_Os (vcall_offset))
13398 {
13399 output_asm_insn ("iilf\t%4,%3", op);
13400 output_asm_insn ("a\t%4,0(%1)", op);
13401 output_asm_insn ("a\t%1,0(%4)", op);
13402 }
875862bf 13403 else
13404 {
13405 op[7] = gen_label_rtx ();
13406 output_asm_insn ("l\t%4,%7-%5(%4)", op);
13407 output_asm_insn ("a\t%4,0(%1)", op);
13408 output_asm_insn ("a\t%1,0(%4)", op);
13409 }
4673c1a0 13410
875862bf 13411 /* We had to clobber the base pointer register.
13412 Re-setup the base pointer (with a different base). */
13413 op[5] = gen_label_rtx ();
13414 output_asm_insn ("basr\t%4,0", op);
13415 targetm.asm_out.internal_label (file, "L",
13416 CODE_LABEL_NUMBER (op[5]));
13417 }
4673c1a0 13418
875862bf 13419 /* Jump to target. */
13420 op[8] = gen_label_rtx ();
4673c1a0 13421
875862bf 13422 if (!flag_pic)
13423 output_asm_insn ("l\t%4,%8-%5(%4)", op);
13424 else if (!nonlocal)
13425 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13426 /* We cannot call through .plt, since .plt requires %r12 loaded. */
13427 else if (flag_pic == 1)
13428 {
13429 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13430 output_asm_insn ("l\t%4,%0(%4)", op);
13431 }
13432 else if (flag_pic == 2)
13433 {
13434 op[9] = gen_rtx_REG (Pmode, 0);
13435 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
13436 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13437 output_asm_insn ("ar\t%4,%9", op);
13438 output_asm_insn ("l\t%4,0(%4)", op);
13439 }
4673c1a0 13440
875862bf 13441 output_asm_insn ("br\t%4", op);
4673c1a0 13442
875862bf 13443 /* Output literal pool. */
13444 output_asm_insn (".align\t4", op);
4673c1a0 13445
875862bf 13446 if (nonlocal && flag_pic == 2)
13447 output_asm_insn (".long\t%0", op);
13448 if (nonlocal)
13449 {
13450 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
13451 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
13452 }
d93e0d9f 13453
875862bf 13454 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
13455 if (!flag_pic)
13456 output_asm_insn (".long\t%0", op);
13457 else
13458 output_asm_insn (".long\t%0-%5", op);
4673c1a0 13459
875862bf 13460 if (op[6])
13461 {
13462 targetm.asm_out.internal_label (file, "L",
13463 CODE_LABEL_NUMBER (op[6]));
13464 output_asm_insn (".long\t%2", op);
13465 }
13466 if (op[7])
13467 {
13468 targetm.asm_out.internal_label (file, "L",
13469 CODE_LABEL_NUMBER (op[7]));
13470 output_asm_insn (".long\t%3", op);
13471 }
4673c1a0 13472 }
21a38800 13473 final_end_function ();
4673c1a0 13474}
13475
875862bf 13476static bool
f77c4496 13477s390_valid_pointer_mode (scalar_int_mode mode)
875862bf 13478{
13479 return (mode == SImode || (TARGET_64BIT && mode == DImode));
13480}
56769981 13481
347301d6 13482/* Checks whether the given CALL_EXPR would use a caller
875862bf 13483 saved register. This is used to decide whether sibling call
13484 optimization could be performed on the respective function
13485 call. */
be00aaa8 13486
875862bf 13487static bool
347301d6 13488s390_call_saved_register_used (tree call_expr)
be00aaa8 13489{
39cba157 13490 CUMULATIVE_ARGS cum_v;
13491 cumulative_args_t cum;
875862bf 13492 tree parameter;
3754d046 13493 machine_mode mode;
875862bf 13494 tree type;
13495 rtx parm_rtx;
347301d6 13496 int reg, i;
be00aaa8 13497
39cba157 13498 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
13499 cum = pack_cumulative_args (&cum_v);
be00aaa8 13500
347301d6 13501 for (i = 0; i < call_expr_nargs (call_expr); i++)
875862bf 13502 {
347301d6 13503 parameter = CALL_EXPR_ARG (call_expr, i);
32eda510 13504 gcc_assert (parameter);
be00aaa8 13505
875862bf 13506 /* For an undeclared variable passed as parameter we will get
13507 an ERROR_MARK node here. */
13508 if (TREE_CODE (parameter) == ERROR_MARK)
13509 return true;
be00aaa8 13510
32eda510 13511 type = TREE_TYPE (parameter);
13512 gcc_assert (type);
be00aaa8 13513
32eda510 13514 mode = TYPE_MODE (type);
13515 gcc_assert (mode);
be00aaa8 13516
76a4c804 13517 /* We assume that in the target function all parameters are
13518 named. This only has an impact on vector argument register
13519 usage none of which is call-saved. */
39cba157 13520 if (pass_by_reference (&cum_v, mode, type, true))
875862bf 13521 {
13522 mode = Pmode;
13523 type = build_pointer_type (type);
13524 }
be00aaa8 13525
76a4c804 13526 parm_rtx = s390_function_arg (cum, mode, type, true);
be00aaa8 13527
76a4c804 13528 s390_function_arg_advance (cum, mode, type, true);
be00aaa8 13529
b5fdc416 13530 if (!parm_rtx)
13531 continue;
13532
13533 if (REG_P (parm_rtx))
13534 {
10fa8f76 13535 for (reg = 0; reg < REG_NREGS (parm_rtx); reg++)
b5fdc416 13536 if (!call_used_regs[reg + REGNO (parm_rtx)])
13537 return true;
13538 }
13539
13540 if (GET_CODE (parm_rtx) == PARALLEL)
875862bf 13541 {
b5fdc416 13542 int i;
cc6a115b 13543
b5fdc416 13544 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
13545 {
13546 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
b5fdc416 13547
13548 gcc_assert (REG_P (r));
13549
10fa8f76 13550 for (reg = 0; reg < REG_NREGS (r); reg++)
b5fdc416 13551 if (!call_used_regs[reg + REGNO (r)])
13552 return true;
13553 }
875862bf 13554 }
b5fdc416 13555
875862bf 13556 }
13557 return false;
13558}
be00aaa8 13559
875862bf 13560/* Return true if the given call expression can be
13561 turned into a sibling call.
13562 DECL holds the declaration of the function to be called whereas
13563 EXP is the call expression itself. */
be00aaa8 13564
875862bf 13565static bool
13566s390_function_ok_for_sibcall (tree decl, tree exp)
13567{
13568 /* The TPF epilogue uses register 1. */
13569 if (TARGET_TPF_PROFILING)
13570 return false;
be00aaa8 13571
875862bf 13572 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
13573 which would have to be restored before the sibcall. */
a47b0dc3 13574 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
875862bf 13575 return false;
be00aaa8 13576
875862bf 13577 /* Register 6 on s390 is available as an argument register but unfortunately
13578 "caller saved". This makes functions needing this register for arguments
13579 not suitable for sibcalls. */
347301d6 13580 return !s390_call_saved_register_used (exp);
875862bf 13581}
be00aaa8 13582
875862bf 13583/* Return the fixed registers used for condition codes. */
be00aaa8 13584
875862bf 13585static bool
13586s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
13587{
13588 *p1 = CC_REGNUM;
13589 *p2 = INVALID_REGNUM;
ffead1ca 13590
875862bf 13591 return true;
13592}
be00aaa8 13593
875862bf 13594/* This function is used by the call expanders of the machine description.
13595 It emits the call insn itself together with the necessary operations
13596 to adjust the target address and returns the emitted insn.
13597 ADDR_LOCATION is the target address rtx
13598 TLS_CALL the location of the thread-local symbol
13599 RESULT_REG the register where the result of the call should be stored
13600 RETADDR_REG the register where the return address should be stored
13601 If this parameter is NULL_RTX the call is considered
13602 to be a sibling call. */
be00aaa8 13603
93e0956b 13604rtx_insn *
875862bf 13605s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
13606 rtx retaddr_reg)
4673c1a0 13607{
875862bf 13608 bool plt_call = false;
93e0956b 13609 rtx_insn *insn;
875862bf 13610 rtx call;
13611 rtx clobber;
13612 rtvec vec;
4a1c604e 13613
875862bf 13614 /* Direct function calls need special treatment. */
13615 if (GET_CODE (addr_location) == SYMBOL_REF)
4673c1a0 13616 {
875862bf 13617 /* When calling a global routine in PIC mode, we must
13618 replace the symbol itself with the PLT stub. */
13619 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
13620 {
aa5b4778 13621 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
9c7185f7 13622 {
13623 addr_location = gen_rtx_UNSPEC (Pmode,
13624 gen_rtvec (1, addr_location),
13625 UNSPEC_PLT);
13626 addr_location = gen_rtx_CONST (Pmode, addr_location);
13627 plt_call = true;
13628 }
13629 else
13630 /* For -fpic code the PLT entries might use r12 which is
13631 call-saved. Therefore we cannot do a sibcall when
13632 calling directly using a symbol ref. When reaching
13633 this point we decided (in s390_function_ok_for_sibcall)
13634 to do a sibcall for a function pointer but one of the
13635 optimizers was able to get rid of the function pointer
13636 by propagating the symbol ref into the call. This
13637 optimization is illegal for S/390 so we turn the direct
13638 call into a indirect call again. */
13639 addr_location = force_reg (Pmode, addr_location);
875862bf 13640 }
13641
13642 /* Unless we can use the bras(l) insn, force the
13643 routine address into a register. */
13644 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
13645 {
13646 if (flag_pic)
13647 addr_location = legitimize_pic_address (addr_location, 0);
13648 else
13649 addr_location = force_reg (Pmode, addr_location);
13650 }
4673c1a0 13651 }
875862bf 13652
13653 /* If it is already an indirect call or the code above moved the
13654 SYMBOL_REF to somewhere else make sure the address can be found in
13655 register 1. */
13656 if (retaddr_reg == NULL_RTX
13657 && GET_CODE (addr_location) != SYMBOL_REF
13658 && !plt_call)
4673c1a0 13659 {
875862bf 13660 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
13661 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
4673c1a0 13662 }
4673c1a0 13663
875862bf 13664 addr_location = gen_rtx_MEM (QImode, addr_location);
13665 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
8b4a4127 13666
875862bf 13667 if (result_reg != NULL_RTX)
d1f9b275 13668 call = gen_rtx_SET (result_reg, call);
8b4a4127 13669
875862bf 13670 if (retaddr_reg != NULL_RTX)
13671 {
13672 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
f81e845f 13673
875862bf 13674 if (tls_call != NULL_RTX)
13675 vec = gen_rtvec (3, call, clobber,
13676 gen_rtx_USE (VOIDmode, tls_call));
13677 else
13678 vec = gen_rtvec (2, call, clobber);
8b4a4127 13679
875862bf 13680 call = gen_rtx_PARALLEL (VOIDmode, vec);
13681 }
8b4a4127 13682
875862bf 13683 insn = emit_call_insn (call);
8b4a4127 13684
875862bf 13685 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
13686 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
13687 {
13688 /* s390_function_ok_for_sibcall should
13689 have denied sibcalls in this case. */
32eda510 13690 gcc_assert (retaddr_reg != NULL_RTX);
c60a7572 13691 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
875862bf 13692 }
13693 return insn;
13694}
8b4a4127 13695
b2d7ede1 13696/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
8b4a4127 13697
b2d7ede1 13698static void
875862bf 13699s390_conditional_register_usage (void)
13700{
13701 int i;
8b4a4127 13702
8b4a4127 13703 if (flag_pic)
13704 {
875862bf 13705 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13706 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
8b4a4127 13707 }
875862bf 13708 if (TARGET_CPU_ZARCH)
8b4a4127 13709 {
d1a5573e 13710 fixed_regs[BASE_REGNUM] = 0;
13711 call_used_regs[BASE_REGNUM] = 0;
875862bf 13712 fixed_regs[RETURN_REGNUM] = 0;
13713 call_used_regs[RETURN_REGNUM] = 0;
8b4a4127 13714 }
875862bf 13715 if (TARGET_64BIT)
8b4a4127 13716 {
6a2469fe 13717 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
875862bf 13718 call_used_regs[i] = call_really_used_regs[i] = 0;
8b4a4127 13719 }
13720 else
13721 {
6a2469fe 13722 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
13723 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
875862bf 13724 }
8b4a4127 13725
875862bf 13726 if (TARGET_SOFT_FLOAT)
13727 {
6a2469fe 13728 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
875862bf 13729 call_used_regs[i] = fixed_regs[i] = 1;
8b4a4127 13730 }
76a4c804 13731
13732 /* Disable v16 - v31 for non-vector target. */
13733 if (!TARGET_VX)
13734 {
13735 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
13736 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
13737 }
8b4a4127 13738}
13739
875862bf 13740/* Corresponding function to eh_return expander. */
7811991d 13741
875862bf 13742static GTY(()) rtx s390_tpf_eh_return_symbol;
13743void
13744s390_emit_tpf_eh_return (rtx target)
7811991d 13745{
93e0956b 13746 rtx_insn *insn;
13747 rtx reg, orig_ra;
525d1294 13748
875862bf 13749 if (!s390_tpf_eh_return_symbol)
13750 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
13751
13752 reg = gen_rtx_REG (Pmode, 2);
bcd3133e 13753 orig_ra = gen_rtx_REG (Pmode, 3);
875862bf 13754
13755 emit_move_insn (reg, target);
bcd3133e 13756 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
875862bf 13757 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
13758 gen_rtx_REG (Pmode, RETURN_REGNUM));
13759 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
bcd3133e 13760 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
875862bf 13761
13762 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
be00aaa8 13763}
13764
875862bf 13765/* Rework the prologue/epilogue to avoid saving/restoring
13766 registers unnecessarily. */
c20f8a1d 13767
6988553d 13768static void
875862bf 13769s390_optimize_prologue (void)
c6933ba6 13770{
93e0956b 13771 rtx_insn *insn, *new_insn, *next_insn;
c20f8a1d 13772
875862bf 13773 /* Do a final recompute of the frame-related data. */
ff4ce128 13774 s390_optimize_register_info ();
c20f8a1d 13775
875862bf 13776 /* If all special registers are in fact used, there's nothing we
13777 can do, so no point in walking the insn list. */
c20f8a1d 13778
ffead1ca 13779 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
875862bf 13780 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
ffead1ca 13781 && (TARGET_CPU_ZARCH
13782 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
875862bf 13783 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
13784 return;
c20f8a1d 13785
875862bf 13786 /* Search for prologue/epilogue insns and replace them. */
c20f8a1d 13787
875862bf 13788 for (insn = get_insns (); insn; insn = next_insn)
13789 {
13790 int first, last, off;
13791 rtx set, base, offset;
ff4ce128 13792 rtx pat;
c20f8a1d 13793
875862bf 13794 next_insn = NEXT_INSN (insn);
d7bec695 13795
ff4ce128 13796 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
875862bf 13797 continue;
c20f8a1d 13798
ff4ce128 13799 pat = PATTERN (insn);
13800
13801 /* Remove ldgr/lgdr instructions used for saving and restore
13802 GPRs if possible. */
54530437 13803 if (TARGET_Z10)
13804 {
13805 rtx tmp_pat = pat;
ff4ce128 13806
54530437 13807 if (INSN_CODE (insn) == CODE_FOR_stack_restore_from_fpr)
13808 tmp_pat = XVECEXP (pat, 0, 0);
ff4ce128 13809
54530437 13810 if (GET_CODE (tmp_pat) == SET
13811 && GET_MODE (SET_SRC (tmp_pat)) == DImode
13812 && REG_P (SET_SRC (tmp_pat))
13813 && REG_P (SET_DEST (tmp_pat)))
13814 {
13815 int src_regno = REGNO (SET_SRC (tmp_pat));
13816 int dest_regno = REGNO (SET_DEST (tmp_pat));
13817 int gpr_regno;
13818 int fpr_regno;
13819
13820 if (!((GENERAL_REGNO_P (src_regno)
13821 && FP_REGNO_P (dest_regno))
13822 || (FP_REGNO_P (src_regno)
13823 && GENERAL_REGNO_P (dest_regno))))
13824 continue;
ff4ce128 13825
54530437 13826 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
13827 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
ff4ce128 13828
54530437 13829 /* GPR must be call-saved, FPR must be call-clobbered. */
13830 if (!call_really_used_regs[fpr_regno]
13831 || call_really_used_regs[gpr_regno])
13832 continue;
13833
13834 /* It must not happen that what we once saved in an FPR now
13835 needs a stack slot. */
13836 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
13837
13838 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
13839 {
13840 remove_insn (insn);
13841 continue;
13842 }
ff4ce128 13843 }
13844 }
13845
13846 if (GET_CODE (pat) == PARALLEL
13847 && store_multiple_operation (pat, VOIDmode))
c20f8a1d 13848 {
ff4ce128 13849 set = XVECEXP (pat, 0, 0);
875862bf 13850 first = REGNO (SET_SRC (set));
ff4ce128 13851 last = first + XVECLEN (pat, 0) - 1;
875862bf 13852 offset = const0_rtx;
13853 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13854 off = INTVAL (offset);
c20f8a1d 13855
875862bf 13856 if (GET_CODE (base) != REG || off < 0)
13857 continue;
43944aa4 13858 if (cfun_frame_layout.first_save_gpr != -1
13859 && (cfun_frame_layout.first_save_gpr < first
13860 || cfun_frame_layout.last_save_gpr > last))
13861 continue;
875862bf 13862 if (REGNO (base) != STACK_POINTER_REGNUM
13863 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13864 continue;
13865 if (first > BASE_REGNUM || last < BASE_REGNUM)
13866 continue;
13867
13868 if (cfun_frame_layout.first_save_gpr != -1)
c20f8a1d 13869 {
93e0956b 13870 rtx s_pat = save_gprs (base,
875862bf 13871 off + (cfun_frame_layout.first_save_gpr
b5fdc416 13872 - first) * UNITS_PER_LONG,
875862bf 13873 cfun_frame_layout.first_save_gpr,
13874 cfun_frame_layout.last_save_gpr);
93e0956b 13875 new_insn = emit_insn_before (s_pat, insn);
875862bf 13876 INSN_ADDRESSES_NEW (new_insn, -1);
c20f8a1d 13877 }
c20f8a1d 13878
875862bf 13879 remove_insn (insn);
13880 continue;
c20f8a1d 13881 }
13882
43944aa4 13883 if (cfun_frame_layout.first_save_gpr == -1
ff4ce128 13884 && GET_CODE (pat) == SET
13885 && GENERAL_REG_P (SET_SRC (pat))
13886 && GET_CODE (SET_DEST (pat)) == MEM)
c20f8a1d 13887 {
ff4ce128 13888 set = pat;
875862bf 13889 first = REGNO (SET_SRC (set));
13890 offset = const0_rtx;
13891 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13892 off = INTVAL (offset);
c20f8a1d 13893
875862bf 13894 if (GET_CODE (base) != REG || off < 0)
13895 continue;
13896 if (REGNO (base) != STACK_POINTER_REGNUM
13897 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13898 continue;
c20f8a1d 13899
875862bf 13900 remove_insn (insn);
13901 continue;
c20f8a1d 13902 }
13903
ff4ce128 13904 if (GET_CODE (pat) == PARALLEL
13905 && load_multiple_operation (pat, VOIDmode))
d7bec695 13906 {
ff4ce128 13907 set = XVECEXP (pat, 0, 0);
875862bf 13908 first = REGNO (SET_DEST (set));
ff4ce128 13909 last = first + XVECLEN (pat, 0) - 1;
875862bf 13910 offset = const0_rtx;
13911 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13912 off = INTVAL (offset);
d7bec695 13913
875862bf 13914 if (GET_CODE (base) != REG || off < 0)
13915 continue;
ff4ce128 13916
43944aa4 13917 if (cfun_frame_layout.first_restore_gpr != -1
13918 && (cfun_frame_layout.first_restore_gpr < first
13919 || cfun_frame_layout.last_restore_gpr > last))
13920 continue;
875862bf 13921 if (REGNO (base) != STACK_POINTER_REGNUM
13922 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13923 continue;
13924 if (first > BASE_REGNUM || last < BASE_REGNUM)
13925 continue;
c20f8a1d 13926
875862bf 13927 if (cfun_frame_layout.first_restore_gpr != -1)
13928 {
93e0956b 13929 rtx rpat = restore_gprs (base,
875862bf 13930 off + (cfun_frame_layout.first_restore_gpr
b5fdc416 13931 - first) * UNITS_PER_LONG,
875862bf 13932 cfun_frame_layout.first_restore_gpr,
13933 cfun_frame_layout.last_restore_gpr);
8240ec0e 13934
13935 /* Remove REG_CFA_RESTOREs for registers that we no
13936 longer need to save. */
93e0956b 13937 REG_NOTES (rpat) = REG_NOTES (insn);
13938 for (rtx *ptr = &REG_NOTES (rpat); *ptr; )
8240ec0e 13939 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
13940 && ((int) REGNO (XEXP (*ptr, 0))
13941 < cfun_frame_layout.first_restore_gpr))
13942 *ptr = XEXP (*ptr, 1);
13943 else
13944 ptr = &XEXP (*ptr, 1);
93e0956b 13945 new_insn = emit_insn_before (rpat, insn);
8240ec0e 13946 RTX_FRAME_RELATED_P (new_insn) = 1;
875862bf 13947 INSN_ADDRESSES_NEW (new_insn, -1);
13948 }
d7bec695 13949
875862bf 13950 remove_insn (insn);
13951 continue;
d7bec695 13952 }
13953
43944aa4 13954 if (cfun_frame_layout.first_restore_gpr == -1
ff4ce128 13955 && GET_CODE (pat) == SET
13956 && GENERAL_REG_P (SET_DEST (pat))
13957 && GET_CODE (SET_SRC (pat)) == MEM)
c20f8a1d 13958 {
ff4ce128 13959 set = pat;
875862bf 13960 first = REGNO (SET_DEST (set));
13961 offset = const0_rtx;
13962 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13963 off = INTVAL (offset);
f81e845f 13964
875862bf 13965 if (GET_CODE (base) != REG || off < 0)
13966 continue;
ff4ce128 13967
875862bf 13968 if (REGNO (base) != STACK_POINTER_REGNUM
13969 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13970 continue;
5a5e802f 13971
875862bf 13972 remove_insn (insn);
13973 continue;
13974 }
13975 }
5a5e802f 13976}
13977
33d033da 13978/* On z10 and later the dynamic branch prediction must see the
13979 backward jump within a certain windows. If not it falls back to
13980 the static prediction. This function rearranges the loop backward
13981 branch in a way which makes the static prediction always correct.
13982 The function returns true if it added an instruction. */
73df8a45 13983static bool
93e0956b 13984s390_fix_long_loop_prediction (rtx_insn *insn)
73df8a45 13985{
13986 rtx set = single_set (insn);
db7dd023 13987 rtx code_label, label_ref;
158a522b 13988 rtx_insn *uncond_jump;
93e0956b 13989 rtx_insn *cur_insn;
73df8a45 13990 rtx tmp;
13991 int distance;
13992
13993 /* This will exclude branch on count and branch on index patterns
13994 since these are correctly statically predicted. */
13995 if (!set
13996 || SET_DEST (set) != pc_rtx
13997 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
13998 return false;
13999
7a64c761 14000 /* Skip conditional returns. */
14001 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
14002 && XEXP (SET_SRC (set), 2) == pc_rtx)
14003 return false;
14004
73df8a45 14005 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
14006 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
14007
14008 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
14009
14010 code_label = XEXP (label_ref, 0);
14011
14012 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
14013 || INSN_ADDRESSES (INSN_UID (insn)) == -1
14014 || (INSN_ADDRESSES (INSN_UID (insn))
33d033da 14015 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
73df8a45 14016 return false;
14017
14018 for (distance = 0, cur_insn = PREV_INSN (insn);
33d033da 14019 distance < PREDICT_DISTANCE - 6;
73df8a45 14020 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
14021 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
14022 return false;
14023
db7dd023 14024 rtx_code_label *new_label = gen_label_rtx ();
73df8a45 14025 uncond_jump = emit_jump_insn_after (
d1f9b275 14026 gen_rtx_SET (pc_rtx,
73df8a45 14027 gen_rtx_LABEL_REF (VOIDmode, code_label)),
14028 insn);
14029 emit_label_after (new_label, uncond_jump);
14030
14031 tmp = XEXP (SET_SRC (set), 1);
14032 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
14033 XEXP (SET_SRC (set), 2) = tmp;
14034 INSN_CODE (insn) = -1;
14035
14036 XEXP (label_ref, 0) = new_label;
14037 JUMP_LABEL (insn) = new_label;
14038 JUMP_LABEL (uncond_jump) = code_label;
14039
14040 return true;
14041}
14042
3b14a2e6 14043/* Returns 1 if INSN reads the value of REG for purposes not related
14044 to addressing of memory, and 0 otherwise. */
14045static int
93e0956b 14046s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
3b14a2e6 14047{
14048 return reg_referenced_p (reg, PATTERN (insn))
14049 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
14050}
14051
512d9edf 14052/* Starting from INSN find_cond_jump looks downwards in the insn
14053 stream for a single jump insn which is the last user of the
14054 condition code set in INSN. */
93e0956b 14055static rtx_insn *
14056find_cond_jump (rtx_insn *insn)
512d9edf 14057{
14058 for (; insn; insn = NEXT_INSN (insn))
14059 {
14060 rtx ite, cc;
14061
14062 if (LABEL_P (insn))
14063 break;
14064
14065 if (!JUMP_P (insn))
14066 {
14067 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
14068 break;
14069 continue;
14070 }
14071
14072 /* This will be triggered by a return. */
14073 if (GET_CODE (PATTERN (insn)) != SET)
14074 break;
14075
14076 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
14077 ite = SET_SRC (PATTERN (insn));
14078
14079 if (GET_CODE (ite) != IF_THEN_ELSE)
14080 break;
14081
14082 cc = XEXP (XEXP (ite, 0), 0);
14083 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
14084 break;
14085
14086 if (find_reg_note (insn, REG_DEAD, cc))
14087 return insn;
14088 break;
14089 }
14090
93e0956b 14091 return NULL;
512d9edf 14092}
14093
14094/* Swap the condition in COND and the operands in OP0 and OP1 so that
14095 the semantics does not change. If NULL_RTX is passed as COND the
14096 function tries to find the conditional jump starting with INSN. */
14097static void
93e0956b 14098s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
512d9edf 14099{
14100 rtx tmp = *op0;
14101
14102 if (cond == NULL_RTX)
14103 {
50fc2d35 14104 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
14105 rtx set = jump ? single_set (jump) : NULL_RTX;
512d9edf 14106
50fc2d35 14107 if (set == NULL_RTX)
512d9edf 14108 return;
14109
50fc2d35 14110 cond = XEXP (SET_SRC (set), 0);
512d9edf 14111 }
14112
14113 *op0 = *op1;
14114 *op1 = tmp;
14115 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
14116}
3b14a2e6 14117
14118/* On z10, instructions of the compare-and-branch family have the
14119 property to access the register occurring as second operand with
14120 its bits complemented. If such a compare is grouped with a second
14121 instruction that accesses the same register non-complemented, and
14122 if that register's value is delivered via a bypass, then the
14123 pipeline recycles, thereby causing significant performance decline.
14124 This function locates such situations and exchanges the two
73df8a45 14125 operands of the compare. The function return true whenever it
14126 added an insn. */
14127static bool
93e0956b 14128s390_z10_optimize_cmp (rtx_insn *insn)
3b14a2e6 14129{
93e0956b 14130 rtx_insn *prev_insn, *next_insn;
73df8a45 14131 bool insn_added_p = false;
14132 rtx cond, *op0, *op1;
3b14a2e6 14133
73df8a45 14134 if (GET_CODE (PATTERN (insn)) == PARALLEL)
3b14a2e6 14135 {
73df8a45 14136 /* Handle compare and branch and branch on count
14137 instructions. */
14138 rtx pattern = single_set (insn);
512d9edf 14139
73df8a45 14140 if (!pattern
14141 || SET_DEST (pattern) != pc_rtx
14142 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
14143 return false;
3b14a2e6 14144
73df8a45 14145 cond = XEXP (SET_SRC (pattern), 0);
14146 op0 = &XEXP (cond, 0);
14147 op1 = &XEXP (cond, 1);
14148 }
14149 else if (GET_CODE (PATTERN (insn)) == SET)
14150 {
14151 rtx src, dest;
3b14a2e6 14152
73df8a45 14153 /* Handle normal compare instructions. */
14154 src = SET_SRC (PATTERN (insn));
14155 dest = SET_DEST (PATTERN (insn));
512d9edf 14156
73df8a45 14157 if (!REG_P (dest)
14158 || !CC_REGNO_P (REGNO (dest))
14159 || GET_CODE (src) != COMPARE)
14160 return false;
512d9edf 14161
73df8a45 14162 /* s390_swap_cmp will try to find the conditional
14163 jump when passing NULL_RTX as condition. */
14164 cond = NULL_RTX;
14165 op0 = &XEXP (src, 0);
14166 op1 = &XEXP (src, 1);
14167 }
14168 else
14169 return false;
512d9edf 14170
73df8a45 14171 if (!REG_P (*op0) || !REG_P (*op1))
14172 return false;
512d9edf 14173
cc6056e1 14174 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
14175 return false;
14176
73df8a45 14177 /* Swap the COMPARE arguments and its mask if there is a
14178 conflicting access in the previous insn. */
bc1c8bc5 14179 prev_insn = prev_active_insn (insn);
73df8a45 14180 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
14181 && reg_referenced_p (*op1, PATTERN (prev_insn)))
14182 s390_swap_cmp (cond, op0, op1, insn);
14183
14184 /* Check if there is a conflict with the next insn. If there
14185 was no conflict with the previous insn, then swap the
14186 COMPARE arguments and its mask. If we already swapped
14187 the operands, or if swapping them would cause a conflict
14188 with the previous insn, issue a NOP after the COMPARE in
14189 order to separate the two instuctions. */
bc1c8bc5 14190 next_insn = next_active_insn (insn);
73df8a45 14191 if (next_insn != NULL_RTX && INSN_P (next_insn)
14192 && s390_non_addr_reg_read_p (*op1, next_insn))
14193 {
512d9edf 14194 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
73df8a45 14195 && s390_non_addr_reg_read_p (*op0, prev_insn))
512d9edf 14196 {
73df8a45 14197 if (REGNO (*op1) == 0)
14198 emit_insn_after (gen_nop1 (), insn);
512d9edf 14199 else
73df8a45 14200 emit_insn_after (gen_nop (), insn);
14201 insn_added_p = true;
3b14a2e6 14202 }
73df8a45 14203 else
14204 s390_swap_cmp (cond, op0, op1, insn);
3b14a2e6 14205 }
73df8a45 14206 return insn_added_p;
3b14a2e6 14207}
14208
987860a9 14209/* Number of INSNs to be scanned backward in the last BB of the loop
14210 and forward in the first BB of the loop. This usually should be a
14211 bit more than the number of INSNs which could go into one
14212 group. */
14213#define S390_OSC_SCAN_INSN_NUM 5
14214
14215/* Scan LOOP for static OSC collisions and return true if a osc_break
14216 should be issued for this loop. */
14217static bool
14218s390_adjust_loop_scan_osc (struct loop* loop)
14219
14220{
14221 HARD_REG_SET modregs, newregs;
14222 rtx_insn *insn, *store_insn = NULL;
14223 rtx set;
14224 struct s390_address addr_store, addr_load;
14225 subrtx_iterator::array_type array;
14226 int insn_count;
14227
14228 CLEAR_HARD_REG_SET (modregs);
14229
14230 insn_count = 0;
14231 FOR_BB_INSNS_REVERSE (loop->latch, insn)
14232 {
14233 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14234 continue;
14235
14236 insn_count++;
14237 if (insn_count > S390_OSC_SCAN_INSN_NUM)
14238 return false;
14239
14240 find_all_hard_reg_sets (insn, &newregs, true);
14241 IOR_HARD_REG_SET (modregs, newregs);
14242
14243 set = single_set (insn);
14244 if (!set)
14245 continue;
14246
14247 if (MEM_P (SET_DEST (set))
14248 && s390_decompose_address (XEXP (SET_DEST (set), 0), &addr_store))
14249 {
14250 store_insn = insn;
14251 break;
14252 }
14253 }
14254
14255 if (store_insn == NULL_RTX)
14256 return false;
14257
14258 insn_count = 0;
14259 FOR_BB_INSNS (loop->header, insn)
14260 {
14261 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14262 continue;
14263
14264 if (insn == store_insn)
14265 return false;
14266
14267 insn_count++;
14268 if (insn_count > S390_OSC_SCAN_INSN_NUM)
14269 return false;
14270
14271 find_all_hard_reg_sets (insn, &newregs, true);
14272 IOR_HARD_REG_SET (modregs, newregs);
14273
14274 set = single_set (insn);
14275 if (!set)
14276 continue;
14277
14278 /* An intermediate store disrupts static OSC checking
14279 anyway. */
14280 if (MEM_P (SET_DEST (set))
14281 && s390_decompose_address (XEXP (SET_DEST (set), 0), NULL))
14282 return false;
14283
14284 FOR_EACH_SUBRTX (iter, array, SET_SRC (set), NONCONST)
14285 if (MEM_P (*iter)
14286 && s390_decompose_address (XEXP (*iter, 0), &addr_load)
14287 && rtx_equal_p (addr_load.base, addr_store.base)
14288 && rtx_equal_p (addr_load.indx, addr_store.indx)
14289 && rtx_equal_p (addr_load.disp, addr_store.disp))
14290 {
14291 if ((addr_load.base != NULL_RTX
14292 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.base)))
14293 || (addr_load.indx != NULL_RTX
14294 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.indx))))
14295 return true;
14296 }
14297 }
14298 return false;
14299}
14300
14301/* Look for adjustments which can be done on simple innermost
14302 loops. */
14303static void
14304s390_adjust_loops ()
14305{
14306 struct loop *loop = NULL;
14307
14308 df_analyze ();
14309 compute_bb_for_insn ();
14310
14311 /* Find the loops. */
14312 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
14313
14314 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
14315 {
14316 if (dump_file)
14317 {
14318 flow_loop_dump (loop, dump_file, NULL, 0);
14319 fprintf (dump_file, ";; OSC loop scan Loop: ");
14320 }
14321 if (loop->latch == NULL
14322 || pc_set (BB_END (loop->latch)) == NULL_RTX
14323 || !s390_adjust_loop_scan_osc (loop))
14324 {
14325 if (dump_file)
14326 {
14327 if (loop->latch == NULL)
14328 fprintf (dump_file, " muliple backward jumps\n");
14329 else
14330 {
14331 fprintf (dump_file, " header insn: %d latch insn: %d ",
14332 INSN_UID (BB_HEAD (loop->header)),
14333 INSN_UID (BB_END (loop->latch)));
14334 if (pc_set (BB_END (loop->latch)) == NULL_RTX)
14335 fprintf (dump_file, " loop does not end with jump\n");
14336 else
14337 fprintf (dump_file, " not instrumented\n");
14338 }
14339 }
14340 }
14341 else
14342 {
14343 rtx_insn *new_insn;
14344
14345 if (dump_file)
14346 fprintf (dump_file, " adding OSC break insn: ");
14347 new_insn = emit_insn_before (gen_osc_break (),
14348 BB_END (loop->latch));
14349 INSN_ADDRESSES_NEW (new_insn, -1);
14350 }
14351 }
14352
14353 loop_optimizer_finalize ();
14354
14355 df_finish_pass (false);
14356}
14357
875862bf 14358/* Perform machine-dependent processing. */
7346ca58 14359
875862bf 14360static void
14361s390_reorg (void)
7346ca58 14362{
875862bf 14363 bool pool_overflow = false;
f4252e72 14364 int hw_before, hw_after;
7346ca58 14365
987860a9 14366 if (s390_tune == PROCESSOR_2964_Z13)
14367 s390_adjust_loops ();
14368
875862bf 14369 /* Make sure all splits have been performed; splits after
14370 machine_dependent_reorg might confuse insn length counts. */
14371 split_all_insns_noflow ();
f588eb9f 14372
875862bf 14373 /* Install the main literal pool and the associated base
14374 register load insns.
f588eb9f 14375
875862bf 14376 In addition, there are two problematic situations we need
14377 to correct:
7346ca58 14378
875862bf 14379 - the literal pool might be > 4096 bytes in size, so that
14380 some of its elements cannot be directly accessed
7346ca58 14381
875862bf 14382 - a branch target might be > 64K away from the branch, so that
14383 it is not possible to use a PC-relative instruction.
7346ca58 14384
875862bf 14385 To fix those, we split the single literal pool into multiple
14386 pool chunks, reloading the pool base register at various
14387 points throughout the function to ensure it always points to
14388 the pool chunk the following code expects, and / or replace
14389 PC-relative branches by absolute branches.
7346ca58 14390
875862bf 14391 However, the two problems are interdependent: splitting the
14392 literal pool can move a branch further away from its target,
14393 causing the 64K limit to overflow, and on the other hand,
14394 replacing a PC-relative branch by an absolute branch means
14395 we need to put the branch target address into the literal
14396 pool, possibly causing it to overflow.
44a61e21 14397
875862bf 14398 So, we loop trying to fix up both problems until we manage
14399 to satisfy both conditions at the same time. Note that the
14400 loop is guaranteed to terminate as every pass of the loop
14401 strictly decreases the total number of PC-relative branches
14402 in the function. (This is not completely true as there
14403 might be branch-over-pool insns introduced by chunkify_start.
14404 Those never need to be split however.) */
44a61e21 14405
875862bf 14406 for (;;)
14407 {
14408 struct constant_pool *pool = NULL;
80aaaa56 14409
875862bf 14410 /* Collect the literal pool. */
14411 if (!pool_overflow)
14412 {
14413 pool = s390_mainpool_start ();
14414 if (!pool)
14415 pool_overflow = true;
14416 }
80aaaa56 14417
875862bf 14418 /* If literal pool overflowed, start to chunkify it. */
14419 if (pool_overflow)
14420 pool = s390_chunkify_start ();
80aaaa56 14421
875862bf 14422 /* Split out-of-range branches. If this has created new
14423 literal pool entries, cancel current chunk list and
14424 recompute it. zSeries machines have large branch
14425 instructions, so we never need to split a branch. */
14426 if (!TARGET_CPU_ZARCH && s390_split_branches ())
14427 {
14428 if (pool_overflow)
14429 s390_chunkify_cancel (pool);
14430 else
14431 s390_mainpool_cancel (pool);
80aaaa56 14432
875862bf 14433 continue;
14434 }
14435
14436 /* If we made it up to here, both conditions are satisfied.
14437 Finish up literal pool related changes. */
14438 if (pool_overflow)
14439 s390_chunkify_finish (pool);
14440 else
14441 s390_mainpool_finish (pool);
14442
14443 /* We're done splitting branches. */
14444 cfun->machine->split_branches_pending_p = false;
14445 break;
80aaaa56 14446 }
80aaaa56 14447
babfdedf 14448 /* Generate out-of-pool execute target insns. */
14449 if (TARGET_CPU_ZARCH)
14450 {
93e0956b 14451 rtx_insn *insn, *target;
14452 rtx label;
babfdedf 14453
14454 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14455 {
14456 label = s390_execute_label (insn);
14457 if (!label)
14458 continue;
14459
14460 gcc_assert (label != const0_rtx);
14461
14462 target = emit_label (XEXP (label, 0));
14463 INSN_ADDRESSES_NEW (target, -1);
14464
14465 target = emit_insn (s390_execute_target (insn));
14466 INSN_ADDRESSES_NEW (target, -1);
14467 }
14468 }
14469
14470 /* Try to optimize prologue and epilogue further. */
875862bf 14471 s390_optimize_prologue ();
3b14a2e6 14472
33d033da 14473 /* Walk over the insns and do some >=z10 specific changes. */
117d67d0 14474 if (s390_tune >= PROCESSOR_2097_Z10)
73df8a45 14475 {
93e0956b 14476 rtx_insn *insn;
73df8a45 14477 bool insn_added_p = false;
14478
14479 /* The insn lengths and addresses have to be up to date for the
14480 following manipulations. */
14481 shorten_branches (get_insns ());
14482
14483 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14484 {
14485 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14486 continue;
14487
14488 if (JUMP_P (insn))
33d033da 14489 insn_added_p |= s390_fix_long_loop_prediction (insn);
73df8a45 14490
33d033da 14491 if ((GET_CODE (PATTERN (insn)) == PARALLEL
14492 || GET_CODE (PATTERN (insn)) == SET)
14493 && s390_tune == PROCESSOR_2097_Z10)
73df8a45 14494 insn_added_p |= s390_z10_optimize_cmp (insn);
14495 }
14496
14497 /* Adjust branches if we added new instructions. */
14498 if (insn_added_p)
14499 shorten_branches (get_insns ());
14500 }
f4252e72 14501
14502 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
14503 if (hw_after > 0)
14504 {
14505 rtx_insn *insn;
14506
06877232 14507 /* Insert NOPs for hotpatching. */
f4252e72 14508 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8ae6e291 14509 /* Emit NOPs
14510 1. inside the area covered by debug information to allow setting
14511 breakpoints at the NOPs,
14512 2. before any insn which results in an asm instruction,
14513 3. before in-function labels to avoid jumping to the NOPs, for
14514 example as part of a loop,
14515 4. before any barrier in case the function is completely empty
14516 (__builtin_unreachable ()) and has neither internal labels nor
14517 active insns.
14518 */
14519 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
14520 break;
14521 /* Output a series of NOPs before the first active insn. */
14522 while (insn && hw_after > 0)
f4252e72 14523 {
14524 if (hw_after >= 3 && TARGET_CPU_ZARCH)
14525 {
8ae6e291 14526 emit_insn_before (gen_nop_6_byte (), insn);
f4252e72 14527 hw_after -= 3;
14528 }
14529 else if (hw_after >= 2)
14530 {
8ae6e291 14531 emit_insn_before (gen_nop_4_byte (), insn);
f4252e72 14532 hw_after -= 2;
14533 }
14534 else
14535 {
8ae6e291 14536 emit_insn_before (gen_nop_2_byte (), insn);
f4252e72 14537 hw_after -= 1;
14538 }
14539 }
f4252e72 14540 }
875862bf 14541}
7346ca58 14542
8a2a84e3 14543/* Return true if INSN is a fp load insn writing register REGNO. */
14544static inline bool
ed3e6e5d 14545s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
8a2a84e3 14546{
14547 rtx set;
14548 enum attr_type flag = s390_safe_attr_type (insn);
14549
14550 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
14551 return false;
14552
14553 set = single_set (insn);
14554
14555 if (set == NULL_RTX)
14556 return false;
14557
14558 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
14559 return false;
14560
14561 if (REGNO (SET_DEST (set)) != regno)
14562 return false;
14563
14564 return true;
14565}
14566
14567/* This value describes the distance to be avoided between an
2fbe7a32 14568 arithmetic fp instruction and an fp load writing the same register.
8a2a84e3 14569 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
14570 fine but the exact value has to be avoided. Otherwise the FP
14571 pipeline will throw an exception causing a major penalty. */
14572#define Z10_EARLYLOAD_DISTANCE 7
14573
14574/* Rearrange the ready list in order to avoid the situation described
14575 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
14576 moved to the very end of the ready list. */
14577static void
b24ef467 14578s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
8a2a84e3 14579{
14580 unsigned int regno;
14581 int nready = *nready_p;
b24ef467 14582 rtx_insn *tmp;
8a2a84e3 14583 int i;
93e0956b 14584 rtx_insn *insn;
8a2a84e3 14585 rtx set;
14586 enum attr_type flag;
14587 int distance;
14588
14589 /* Skip DISTANCE - 1 active insns. */
14590 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
14591 distance > 0 && insn != NULL_RTX;
14592 distance--, insn = prev_active_insn (insn))
14593 if (CALL_P (insn) || JUMP_P (insn))
14594 return;
14595
14596 if (insn == NULL_RTX)
14597 return;
14598
14599 set = single_set (insn);
14600
14601 if (set == NULL_RTX || !REG_P (SET_DEST (set))
14602 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
14603 return;
14604
14605 flag = s390_safe_attr_type (insn);
14606
14607 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
14608 return;
14609
14610 regno = REGNO (SET_DEST (set));
14611 i = nready - 1;
14612
14613 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
14614 i--;
14615
14616 if (!i)
14617 return;
14618
14619 tmp = ready[i];
b24ef467 14620 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
8a2a84e3 14621 ready[0] = tmp;
14622}
14623
b3041173 14624/* Returns TRUE if BB is entered via a fallthru edge and all other
14625 incoming edges are less than unlikely. */
14626static bool
14627s390_bb_fallthru_entry_likely (basic_block bb)
14628{
14629 edge e, fallthru_edge;
14630 edge_iterator ei;
14631
14632 if (!bb)
14633 return false;
14634
14635 fallthru_edge = find_fallthru_edge (bb->preds);
14636 if (!fallthru_edge)
14637 return false;
14638
14639 FOR_EACH_EDGE (e, ei, bb->preds)
14640 if (e != fallthru_edge
14641 && e->probability >= profile_probability::unlikely ())
14642 return false;
14643
14644 return true;
14645}
81769881 14646
14647/* The s390_sched_state variable tracks the state of the current or
14648 the last instruction group.
14649
14650 0,1,2 number of instructions scheduled in the current group
14651 3 the last group is complete - normal insns
14652 4 the last group was a cracked/expanded insn */
14653
b3041173 14654static int s390_sched_state = 0;
81769881 14655
0cb69051 14656#define S390_SCHED_STATE_NORMAL 3
14657#define S390_SCHED_STATE_CRACKED 4
81769881 14658
0cb69051 14659#define S390_SCHED_ATTR_MASK_CRACKED 0x1
14660#define S390_SCHED_ATTR_MASK_EXPANDED 0x2
14661#define S390_SCHED_ATTR_MASK_ENDGROUP 0x4
14662#define S390_SCHED_ATTR_MASK_GROUPALONE 0x8
81769881 14663
14664static unsigned int
d3ffa7b4 14665s390_get_sched_attrmask (rtx_insn *insn)
81769881 14666{
14667 unsigned int mask = 0;
14668
0cb69051 14669 switch (s390_tune)
14670 {
14671 case PROCESSOR_2827_ZEC12:
14672 if (get_attr_zEC12_cracked (insn))
14673 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14674 if (get_attr_zEC12_expanded (insn))
14675 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14676 if (get_attr_zEC12_endgroup (insn))
14677 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14678 if (get_attr_zEC12_groupalone (insn))
14679 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14680 break;
14681 case PROCESSOR_2964_Z13:
a168a775 14682 case PROCESSOR_3906_Z14:
0cb69051 14683 if (get_attr_z13_cracked (insn))
14684 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14685 if (get_attr_z13_expanded (insn))
14686 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14687 if (get_attr_z13_endgroup (insn))
14688 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14689 if (get_attr_z13_groupalone (insn))
14690 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14691 break;
14692 default:
14693 gcc_unreachable ();
14694 }
14695 return mask;
14696}
14697
14698static unsigned int
14699s390_get_unit_mask (rtx_insn *insn, int *units)
14700{
14701 unsigned int mask = 0;
14702
14703 switch (s390_tune)
14704 {
14705 case PROCESSOR_2964_Z13:
a168a775 14706 case PROCESSOR_3906_Z14:
0cb69051 14707 *units = 3;
14708 if (get_attr_z13_unit_lsu (insn))
14709 mask |= 1 << 0;
14710 if (get_attr_z13_unit_fxu (insn))
14711 mask |= 1 << 1;
14712 if (get_attr_z13_unit_vfu (insn))
14713 mask |= 1 << 2;
14714 break;
14715 default:
14716 gcc_unreachable ();
14717 }
81769881 14718 return mask;
14719}
14720
14721/* Return the scheduling score for INSN. The higher the score the
14722 better. The score is calculated from the OOO scheduling attributes
14723 of INSN and the scheduling state s390_sched_state. */
14724static int
d3ffa7b4 14725s390_sched_score (rtx_insn *insn)
81769881 14726{
14727 unsigned int mask = s390_get_sched_attrmask (insn);
14728 int score = 0;
14729
14730 switch (s390_sched_state)
14731 {
14732 case 0:
14733 /* Try to put insns into the first slot which would otherwise
14734 break a group. */
0cb69051 14735 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14736 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
81769881 14737 score += 5;
0cb69051 14738 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
81769881 14739 score += 10;
0903985d 14740 /* fallthrough */
81769881 14741 case 1:
14742 /* Prefer not cracked insns while trying to put together a
14743 group. */
0cb69051 14744 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14745 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14746 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
81769881 14747 score += 10;
0cb69051 14748 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) == 0)
81769881 14749 score += 5;
14750 break;
14751 case 2:
14752 /* Prefer not cracked insns while trying to put together a
14753 group. */
0cb69051 14754 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14755 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14756 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
81769881 14757 score += 10;
14758 /* Prefer endgroup insns in the last slot. */
0cb69051 14759 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0)
81769881 14760 score += 10;
14761 break;
0cb69051 14762 case S390_SCHED_STATE_NORMAL:
81769881 14763 /* Prefer not cracked insns if the last was not cracked. */
0cb69051 14764 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14765 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0)
81769881 14766 score += 5;
0cb69051 14767 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
81769881 14768 score += 10;
14769 break;
0cb69051 14770 case S390_SCHED_STATE_CRACKED:
81769881 14771 /* Try to keep cracked insns together to prevent them from
14772 interrupting groups. */
0cb69051 14773 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14774 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
81769881 14775 score += 5;
14776 break;
14777 }
0cb69051 14778
c9213ca0 14779 if (s390_tune >= PROCESSOR_2964_Z13)
0cb69051 14780 {
14781 int units, i;
14782 unsigned unit_mask, m = 1;
14783
14784 unit_mask = s390_get_unit_mask (insn, &units);
14785 gcc_assert (units <= MAX_SCHED_UNITS);
14786
14787 /* Add a score in range 0..MAX_SCHED_MIX_SCORE depending on how long
14788 ago the last insn of this unit type got scheduled. This is
14789 supposed to help providing a proper instruction mix to the
14790 CPU. */
14791 for (i = 0; i < units; i++, m <<= 1)
14792 if (m & unit_mask)
14793 score += (last_scheduled_unit_distance[i] * MAX_SCHED_MIX_SCORE /
14794 MAX_SCHED_MIX_DISTANCE);
9f3ec181 14795
14796 unsigned latency = insn_default_latency (insn);
14797
14798 int other_side = 1 - current_side;
14799
14800 /* Try to delay long-running insns when side is busy. */
14801 if (latency > LONGRUNNING_THRESHOLD)
14802 {
14803 if (get_attr_z13_unit_fxu (insn) && fxu_longrunning[current_side]
14804 && fxu_longrunning[other_side] <= fxu_longrunning[current_side])
14805 score = MAX (0, score - 10);
14806
14807 if (get_attr_z13_unit_vfu (insn) && vfu_longrunning[current_side]
14808 && vfu_longrunning[other_side] <= vfu_longrunning[current_side])
14809 score = MAX (0, score - 10);
14810 }
0cb69051 14811 }
9f3ec181 14812
81769881 14813 return score;
14814}
14815
8a2a84e3 14816/* This function is called via hook TARGET_SCHED_REORDER before
4246a5c7 14817 issuing one insn from list READY which contains *NREADYP entries.
8a2a84e3 14818 For target z10 it reorders load instructions to avoid early load
14819 conflicts in the floating point pipeline */
14820static int
81769881 14821s390_sched_reorder (FILE *file, int verbose,
b24ef467 14822 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
8a2a84e3 14823{
117d67d0 14824 if (s390_tune == PROCESSOR_2097_Z10
14825 && reload_completed
14826 && *nreadyp > 1)
14827 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
8a2a84e3 14828
117d67d0 14829 if (s390_tune >= PROCESSOR_2827_ZEC12
81769881 14830 && reload_completed
14831 && *nreadyp > 1)
14832 {
14833 int i;
14834 int last_index = *nreadyp - 1;
14835 int max_index = -1;
14836 int max_score = -1;
b24ef467 14837 rtx_insn *tmp;
81769881 14838
14839 /* Just move the insn with the highest score to the top (the
14840 end) of the list. A full sort is not needed since a conflict
14841 in the hazard recognition cannot happen. So the top insn in
14842 the ready list will always be taken. */
14843 for (i = last_index; i >= 0; i--)
14844 {
14845 int score;
14846
14847 if (recog_memoized (ready[i]) < 0)
14848 continue;
14849
14850 score = s390_sched_score (ready[i]);
14851 if (score > max_score)
14852 {
14853 max_score = score;
14854 max_index = i;
14855 }
14856 }
14857
14858 if (max_index != -1)
14859 {
14860 if (max_index != last_index)
14861 {
14862 tmp = ready[max_index];
14863 ready[max_index] = ready[last_index];
14864 ready[last_index] = tmp;
14865
14866 if (verbose > 5)
14867 fprintf (file,
0cb69051 14868 ";;\t\tBACKEND: move insn %d to the top of list\n",
81769881 14869 INSN_UID (ready[last_index]));
14870 }
14871 else if (verbose > 5)
14872 fprintf (file,
0cb69051 14873 ";;\t\tBACKEND: best insn %d already on top\n",
81769881 14874 INSN_UID (ready[last_index]));
14875 }
14876
14877 if (verbose > 5)
14878 {
14879 fprintf (file, "ready list ooo attributes - sched state: %d\n",
14880 s390_sched_state);
14881
14882 for (i = last_index; i >= 0; i--)
14883 {
0cb69051 14884 unsigned int sched_mask;
14885 rtx_insn *insn = ready[i];
14886
14887 if (recog_memoized (insn) < 0)
81769881 14888 continue;
0cb69051 14889
14890 sched_mask = s390_get_sched_attrmask (insn);
14891 fprintf (file, ";;\t\tBACKEND: insn %d score: %d: ",
14892 INSN_UID (insn),
14893 s390_sched_score (insn));
14894#define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ",\
14895 ((M) & sched_mask) ? #ATTR : "");
14896 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14897 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14898 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14899 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14900#undef PRINT_SCHED_ATTR
c9213ca0 14901 if (s390_tune >= PROCESSOR_2964_Z13)
0cb69051 14902 {
14903 unsigned int unit_mask, m = 1;
14904 int units, j;
14905
14906 unit_mask = s390_get_unit_mask (insn, &units);
14907 fprintf (file, "(units:");
14908 for (j = 0; j < units; j++, m <<= 1)
14909 if (m & unit_mask)
14910 fprintf (file, " u%d", j);
14911 fprintf (file, ")");
14912 }
81769881 14913 fprintf (file, "\n");
14914 }
14915 }
14916 }
14917
8a2a84e3 14918 return s390_issue_rate ();
14919}
14920
81769881 14921
8a2a84e3 14922/* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
14923 the scheduler has issued INSN. It stores the last issued insn into
14924 last_scheduled_insn in order to make it available for
14925 s390_sched_reorder. */
14926static int
18282db0 14927s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
8a2a84e3 14928{
14929 last_scheduled_insn = insn;
14930
9f3ec181 14931 bool starts_group = false;
14932
117d67d0 14933 if (s390_tune >= PROCESSOR_2827_ZEC12
81769881 14934 && reload_completed
14935 && recog_memoized (insn) >= 0)
14936 {
14937 unsigned int mask = s390_get_sched_attrmask (insn);
14938
9f3ec181 14939 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14940 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0
14941 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14942 starts_group = true;
14943
0cb69051 14944 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14945 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14946 s390_sched_state = S390_SCHED_STATE_CRACKED;
14947 else if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0
14948 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14949 s390_sched_state = S390_SCHED_STATE_NORMAL;
81769881 14950 else
14951 {
14952 /* Only normal insns are left (mask == 0). */
14953 switch (s390_sched_state)
14954 {
14955 case 0:
9f3ec181 14956 starts_group = true;
14957 /* fallthrough */
81769881 14958 case 1:
14959 case 2:
9f3ec181 14960 s390_sched_state++;
14961 break;
0cb69051 14962 case S390_SCHED_STATE_NORMAL:
9f3ec181 14963 starts_group = true;
14964 s390_sched_state = 1;
81769881 14965 break;
0cb69051 14966 case S390_SCHED_STATE_CRACKED:
14967 s390_sched_state = S390_SCHED_STATE_NORMAL;
81769881 14968 break;
14969 }
14970 }
0cb69051 14971
c9213ca0 14972 if (s390_tune >= PROCESSOR_2964_Z13)
0cb69051 14973 {
14974 int units, i;
14975 unsigned unit_mask, m = 1;
14976
14977 unit_mask = s390_get_unit_mask (insn, &units);
14978 gcc_assert (units <= MAX_SCHED_UNITS);
14979
14980 for (i = 0; i < units; i++, m <<= 1)
14981 if (m & unit_mask)
14982 last_scheduled_unit_distance[i] = 0;
14983 else if (last_scheduled_unit_distance[i] < MAX_SCHED_MIX_DISTANCE)
14984 last_scheduled_unit_distance[i]++;
14985 }
14986
9f3ec181 14987 /* If this insn started a new group, the side flipped. */
14988 if (starts_group)
14989 current_side = current_side ? 0 : 1;
14990
14991 for (int i = 0; i < 2; i++)
14992 {
14993 if (fxu_longrunning[i] >= 1)
14994 fxu_longrunning[i] -= 1;
14995 if (vfu_longrunning[i] >= 1)
14996 vfu_longrunning[i] -= 1;
14997 }
14998
14999 unsigned latency = insn_default_latency (insn);
15000 if (latency > LONGRUNNING_THRESHOLD)
15001 {
15002 if (get_attr_z13_unit_fxu (insn))
15003 fxu_longrunning[current_side] = latency * LATENCY_FACTOR;
15004 else
15005 vfu_longrunning[current_side] = latency * LATENCY_FACTOR;
15006 }
15007
81769881 15008 if (verbose > 5)
15009 {
0cb69051 15010 unsigned int sched_mask;
15011
15012 sched_mask = s390_get_sched_attrmask (insn);
15013
15014 fprintf (file, ";;\t\tBACKEND: insn %d: ", INSN_UID (insn));
15015#define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ", ((M) & sched_mask) ? #ATTR : "");
15016 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
15017 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
15018 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
15019 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
15020#undef PRINT_SCHED_ATTR
15021
c9213ca0 15022 if (s390_tune >= PROCESSOR_2964_Z13)
0cb69051 15023 {
15024 unsigned int unit_mask, m = 1;
15025 int units, j;
15026
15027 unit_mask = s390_get_unit_mask (insn, &units);
15028 fprintf (file, "(units:");
15029 for (j = 0; j < units; j++, m <<= 1)
15030 if (m & unit_mask)
15031 fprintf (file, " %d", j);
15032 fprintf (file, ")");
15033 }
15034 fprintf (file, " sched state: %d\n", s390_sched_state);
15035
c9213ca0 15036 if (s390_tune >= PROCESSOR_2964_Z13)
0cb69051 15037 {
15038 int units, j;
15039
15040 s390_get_unit_mask (insn, &units);
15041
15042 fprintf (file, ";;\t\tBACKEND: units unused for: ");
15043 for (j = 0; j < units; j++)
15044 fprintf (file, "%d:%d ", j, last_scheduled_unit_distance[j]);
15045 fprintf (file, "\n");
15046 }
81769881 15047 }
15048 }
15049
8a2a84e3 15050 if (GET_CODE (PATTERN (insn)) != USE
15051 && GET_CODE (PATTERN (insn)) != CLOBBER)
15052 return more - 1;
15053 else
15054 return more;
15055}
7346ca58 15056
494d0169 15057static void
15058s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
15059 int verbose ATTRIBUTE_UNUSED,
15060 int max_ready ATTRIBUTE_UNUSED)
15061{
93e0956b 15062 last_scheduled_insn = NULL;
0cb69051 15063 memset (last_scheduled_unit_distance, 0, MAX_SCHED_UNITS * sizeof (int));
b3041173 15064
15065 /* If the next basic block is most likely entered via a fallthru edge
15066 we keep the last sched state. Otherwise we start a new group.
15067 The scheduler traverses basic blocks in "instruction stream" ordering
15068 so if we see a fallthru edge here, s390_sched_state will be of its
15069 source block.
15070
15071 current_sched_info->prev_head is the insn before the first insn of the
15072 block of insns to be scheduled.
15073 */
15074 rtx_insn *insn = current_sched_info->prev_head
15075 ? NEXT_INSN (current_sched_info->prev_head) : NULL;
15076 basic_block bb = insn ? BLOCK_FOR_INSN (insn) : NULL;
15077 if (s390_tune < PROCESSOR_2964_Z13 || !s390_bb_fallthru_entry_likely (bb))
15078 s390_sched_state = 0;
494d0169 15079}
15080
9ccaa774 15081/* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
33d033da 15082 a new number struct loop *loop should be unrolled if tuned for cpus with
15083 a built-in stride prefetcher.
15084 The loop is analyzed for memory accesses by calling check_dpu for
9ccaa774 15085 each rtx of the loop. Depending on the loop_depth and the amount of
15086 memory accesses a new number <=nunroll is returned to improve the
67cf9b55 15087 behavior of the hardware prefetch unit. */
9ccaa774 15088static unsigned
15089s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
15090{
15091 basic_block *bbs;
93e0956b 15092 rtx_insn *insn;
9ccaa774 15093 unsigned i;
15094 unsigned mem_count = 0;
15095
117d67d0 15096 if (s390_tune < PROCESSOR_2097_Z10)
9ccaa774 15097 return nunroll;
15098
15099 /* Count the number of memory references within the loop body. */
15100 bbs = get_loop_body (loop);
15e472ec 15101 subrtx_iterator::array_type array;
9ccaa774 15102 for (i = 0; i < loop->num_nodes; i++)
15e472ec 15103 FOR_BB_INSNS (bbs[i], insn)
15104 if (INSN_P (insn) && INSN_CODE (insn) != -1)
15105 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
15106 if (MEM_P (*iter))
15107 mem_count += 1;
9ccaa774 15108 free (bbs);
15109
15110 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
15111 if (mem_count == 0)
15112 return nunroll;
15113
15114 switch (loop_depth(loop))
15115 {
15116 case 1:
15117 return MIN (nunroll, 28 / mem_count);
15118 case 2:
15119 return MIN (nunroll, 22 / mem_count);
15120 default:
15121 return MIN (nunroll, 16 / mem_count);
15122 }
15123}
15124
7a0cee35 15125/* Restore the current options. This is a hook function and also called
15126 internally. */
15127
0b8be04c 15128static void
7a0cee35 15129s390_function_specific_restore (struct gcc_options *opts,
15130 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
0b8be04c 15131{
7a0cee35 15132 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
15133}
0b8be04c 15134
7a0cee35 15135static void
3bd8520f 15136s390_option_override_internal (bool main_args_p,
15137 struct gcc_options *opts,
7a0cee35 15138 const struct gcc_options *opts_set)
15139{
3bd8520f 15140 const char *prefix;
15141 const char *suffix;
15142
15143 /* Set up prefix/suffix so the error messages refer to either the command
15144 line argument, or the attribute(target). */
15145 if (main_args_p)
15146 {
15147 prefix = "-m";
15148 suffix = "";
15149 }
15150 else
15151 {
15152 prefix = "option(\"";
15153 suffix = "\")";
15154 }
15155
15156
0b8be04c 15157 /* Architecture mode defaults according to ABI. */
7a0cee35 15158 if (!(opts_set->x_target_flags & MASK_ZARCH))
0b8be04c 15159 {
15160 if (TARGET_64BIT)
7a0cee35 15161 opts->x_target_flags |= MASK_ZARCH;
0b8be04c 15162 else
7a0cee35 15163 opts->x_target_flags &= ~MASK_ZARCH;
0b8be04c 15164 }
15165
7a0cee35 15166 /* Set the march default in case it hasn't been specified on cmdline. */
15167 if (!opts_set->x_s390_arch)
3bd8520f 15168 opts->x_s390_arch = PROCESSOR_2064_Z900;
15169 else if (opts->x_s390_arch == PROCESSOR_9672_G5
15170 || opts->x_s390_arch == PROCESSOR_9672_G6)
15171 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
15172 "in future releases; use at least %sarch=z900%s",
15173 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
15174 suffix, prefix, suffix);
15175
7a0cee35 15176 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
0b8be04c 15177
15178 /* Determine processor to tune for. */
7a0cee35 15179 if (!opts_set->x_s390_tune)
15180 opts->x_s390_tune = opts->x_s390_arch;
3bd8520f 15181 else if (opts->x_s390_tune == PROCESSOR_9672_G5
15182 || opts->x_s390_tune == PROCESSOR_9672_G6)
15183 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
15184 "in future releases; use at least %stune=z900%s",
15185 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
15186 suffix, prefix, suffix);
15187
7a0cee35 15188 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
0b8be04c 15189
15190 /* Sanity checks. */
7a0cee35 15191 if (opts->x_s390_arch == PROCESSOR_NATIVE
15192 || opts->x_s390_tune == PROCESSOR_NATIVE)
db249f37 15193 gcc_unreachable ();
7a0cee35 15194 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
15195 error ("z/Architecture mode not supported on %s",
15196 processor_table[(int)opts->x_s390_arch].name);
15197 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
0b8be04c 15198 error ("64-bit ABI not supported in ESA/390 mode");
15199
0b8be04c 15200 /* Enable hardware transactions if available and not explicitly
15201 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
7a0cee35 15202 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
15203 {
15204 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
15205 opts->x_target_flags |= MASK_OPT_HTM;
15206 else
15207 opts->x_target_flags &= ~MASK_OPT_HTM;
15208 }
0b8be04c 15209
7a0cee35 15210 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
cc79fcc9 15211 {
7a0cee35 15212 if (TARGET_OPT_VX_P (opts->x_target_flags))
cc79fcc9 15213 {
7a0cee35 15214 if (!TARGET_CPU_VX_P (opts))
cc79fcc9 15215 error ("hardware vector support not available on %s",
7a0cee35 15216 processor_table[(int)opts->x_s390_arch].name);
15217 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
cc79fcc9 15218 error ("hardware vector support not available with -msoft-float");
15219 }
15220 }
7a0cee35 15221 else
15222 {
15223 if (TARGET_CPU_VX_P (opts))
15224 /* Enable vector support if available and not explicitly disabled
15225 by user. E.g. with -m31 -march=z13 -mzarch */
15226 opts->x_target_flags |= MASK_OPT_VX;
15227 else
15228 opts->x_target_flags &= ~MASK_OPT_VX;
15229 }
cc79fcc9 15230
7a0cee35 15231 /* Use hardware DFP if available and not explicitly disabled by
15232 user. E.g. with -m31 -march=z10 -mzarch */
15233 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
15234 {
15235 if (TARGET_DFP_P (opts))
15236 opts->x_target_flags |= MASK_HARD_DFP;
15237 else
15238 opts->x_target_flags &= ~MASK_HARD_DFP;
15239 }
15240
15241 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
0b8be04c 15242 {
7a0cee35 15243 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
0b8be04c 15244 {
7a0cee35 15245 if (!TARGET_CPU_DFP_P (opts))
0b8be04c 15246 error ("hardware decimal floating point instructions"
7a0cee35 15247 " not available on %s",
15248 processor_table[(int)opts->x_s390_arch].name);
15249 if (!TARGET_ZARCH_P (opts->x_target_flags))
0b8be04c 15250 error ("hardware decimal floating point instructions"
15251 " not available in ESA/390 mode");
15252 }
15253 else
7a0cee35 15254 opts->x_target_flags &= ~MASK_HARD_DFP;
0b8be04c 15255 }
15256
7a0cee35 15257 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
15258 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
0b8be04c 15259 {
7a0cee35 15260 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
15261 && TARGET_HARD_DFP_P (opts->x_target_flags))
0b8be04c 15262 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
15263
7a0cee35 15264 opts->x_target_flags &= ~MASK_HARD_DFP;
0b8be04c 15265 }
15266
7a0cee35 15267 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
15268 && TARGET_PACKED_STACK_P (opts->x_target_flags)
15269 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
0b8be04c 15270 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
15271 "in combination");
15272
7a0cee35 15273 if (opts->x_s390_stack_size)
0b8be04c 15274 {
7a0cee35 15275 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
0b8be04c 15276 error ("stack size must be greater than the stack guard value");
7a0cee35 15277 else if (opts->x_s390_stack_size > 1 << 16)
0b8be04c 15278 error ("stack size must not be greater than 64k");
15279 }
7a0cee35 15280 else if (opts->x_s390_stack_guard)
0b8be04c 15281 error ("-mstack-guard implies use of -mstack-size");
15282
15283#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
7a0cee35 15284 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
15285 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
0b8be04c 15286#endif
15287
7a0cee35 15288 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
0b8be04c 15289 {
15290 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
7a0cee35 15291 opts->x_param_values,
15292 opts_set->x_param_values);
0b8be04c 15293 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
7a0cee35 15294 opts->x_param_values,
15295 opts_set->x_param_values);
0b8be04c 15296 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
7a0cee35 15297 opts->x_param_values,
15298 opts_set->x_param_values);
0b8be04c 15299 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
7a0cee35 15300 opts->x_param_values,
15301 opts_set->x_param_values);
0b8be04c 15302 }
15303
15304 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
7a0cee35 15305 opts->x_param_values,
15306 opts_set->x_param_values);
0b8be04c 15307 /* values for loop prefetching */
15308 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
7a0cee35 15309 opts->x_param_values,
15310 opts_set->x_param_values);
0b8be04c 15311 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
7a0cee35 15312 opts->x_param_values,
15313 opts_set->x_param_values);
0b8be04c 15314 /* s390 has more than 2 levels and the size is much larger. Since
15315 we are always running virtualized assume that we only get a small
15316 part of the caches above l1. */
15317 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
7a0cee35 15318 opts->x_param_values,
15319 opts_set->x_param_values);
0b8be04c 15320 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
7a0cee35 15321 opts->x_param_values,
15322 opts_set->x_param_values);
0b8be04c 15323 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
7a0cee35 15324 opts->x_param_values,
15325 opts_set->x_param_values);
15326
15327 /* Use the alternative scheduling-pressure algorithm by default. */
15328 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
15329 opts->x_param_values,
15330 opts_set->x_param_values);
15331
e328d74f 15332 maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
15333 opts->x_param_values,
15334 opts_set->x_param_values);
15335
7a0cee35 15336 /* Call target specific restore function to do post-init work. At the moment,
15337 this just sets opts->x_s390_cost_pointer. */
15338 s390_function_specific_restore (opts, NULL);
15339}
15340
15341static void
15342s390_option_override (void)
15343{
15344 unsigned int i;
15345 cl_deferred_option *opt;
15346 vec<cl_deferred_option> *v =
15347 (vec<cl_deferred_option> *) s390_deferred_options;
15348
15349 if (v)
15350 FOR_EACH_VEC_ELT (*v, i, opt)
15351 {
15352 switch (opt->opt_index)
15353 {
15354 case OPT_mhotpatch_:
15355 {
15356 int val1;
15357 int val2;
15358 char s[256];
15359 char *t;
15360
15361 strncpy (s, opt->arg, 256);
15362 s[255] = 0;
15363 t = strchr (s, ',');
15364 if (t != NULL)
15365 {
15366 *t = 0;
15367 t++;
15368 val1 = integral_argument (s);
15369 val2 = integral_argument (t);
15370 }
15371 else
15372 {
15373 val1 = -1;
15374 val2 = -1;
15375 }
15376 if (val1 == -1 || val2 == -1)
15377 {
15378 /* argument is not a plain number */
15379 error ("arguments to %qs should be non-negative integers",
15380 "-mhotpatch=n,m");
15381 break;
15382 }
15383 else if (val1 > s390_hotpatch_hw_max
15384 || val2 > s390_hotpatch_hw_max)
15385 {
15386 error ("argument to %qs is too large (max. %d)",
15387 "-mhotpatch=n,m", s390_hotpatch_hw_max);
15388 break;
15389 }
15390 s390_hotpatch_hw_before_label = val1;
15391 s390_hotpatch_hw_after_label = val2;
15392 break;
15393 }
15394 default:
15395 gcc_unreachable ();
15396 }
15397 }
15398
15399 /* Set up function hooks. */
15400 init_machine_status = s390_init_machine_status;
15401
3bd8520f 15402 s390_option_override_internal (true, &global_options, &global_options_set);
7a0cee35 15403
15404 /* Save the initial options in case the user does function specific
15405 options. */
15406 target_option_default_node = build_target_option_node (&global_options);
15407 target_option_current_node = target_option_default_node;
0b8be04c 15408
15409 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
15410 requires the arch flags to be evaluated already. Since prefetching
15411 is beneficial on s390, we enable it if available. */
15412 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
15413 flag_prefetch_loop_arrays = 1;
15414
9852c8ae 15415 if (!s390_pic_data_is_text_relative && !flag_pic)
15416 error ("-mno-pic-data-is-text-relative cannot be used without -fpic/-fPIC");
15417
0b8be04c 15418 if (TARGET_TPF)
15419 {
15420 /* Don't emit DWARF3/4 unless specifically selected. The TPF
15421 debuggers do not yet support DWARF 3/4. */
15422 if (!global_options_set.x_dwarf_strict)
15423 dwarf_strict = 1;
15424 if (!global_options_set.x_dwarf_version)
15425 dwarf_version = 2;
15426 }
15427
15428 /* Register a target-specific optimization-and-lowering pass
15429 to run immediately before prologue and epilogue generation.
15430
15431 Registering the pass must be done at start up. It's
15432 convenient to do it here. */
15433 opt_pass *new_pass = new pass_s390_early_mach (g);
15434 struct register_pass_info insert_pass_s390_early_mach =
15435 {
15436 new_pass, /* pass */
15437 "pro_and_epilogue", /* reference_pass_name */
15438 1, /* ref_pass_instance_number */
15439 PASS_POS_INSERT_BEFORE /* po_op */
15440 };
15441 register_pass (&insert_pass_s390_early_mach);
15442}
15443
7a0cee35 15444#if S390_USE_TARGET_ATTRIBUTE
15445/* Inner function to process the attribute((target(...))), take an argument and
15446 set the current options from the argument. If we have a list, recursively go
15447 over the list. */
15448
15449static bool
15450s390_valid_target_attribute_inner_p (tree args,
15451 struct gcc_options *opts,
15452 struct gcc_options *new_opts_set,
15453 bool force_pragma)
15454{
15455 char *next_optstr;
15456 bool ret = true;
15457
15458#define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
15459#define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
15460 static const struct
15461 {
15462 const char *string;
15463 size_t len;
15464 int opt;
15465 int has_arg;
15466 int only_as_pragma;
15467 } attrs[] = {
15468 /* enum options */
15469 S390_ATTRIB ("arch=", OPT_march_, 1),
15470 S390_ATTRIB ("tune=", OPT_mtune_, 1),
15471 /* uinteger options */
15472 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
15473 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
15474 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
15475 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
15476 /* flag options */
15477 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
15478 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
15479 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
15480 S390_ATTRIB ("htm", OPT_mhtm, 0),
15481 S390_ATTRIB ("vx", OPT_mvx, 0),
15482 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
15483 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
15484 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
15485 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
15486 S390_PRAGMA ("zvector", OPT_mzvector, 0),
15487 /* boolean options */
15488 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
15489 };
15490#undef S390_ATTRIB
15491#undef S390_PRAGMA
15492
15493 /* If this is a list, recurse to get the options. */
15494 if (TREE_CODE (args) == TREE_LIST)
15495 {
15496 bool ret = true;
15497 int num_pragma_values;
15498 int i;
15499
15500 /* Note: attribs.c:decl_attributes prepends the values from
15501 current_target_pragma to the list of target attributes. To determine
15502 whether we're looking at a value of the attribute or the pragma we
15503 assume that the first [list_length (current_target_pragma)] values in
15504 the list are the values from the pragma. */
15505 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
15506 ? list_length (current_target_pragma) : 0;
15507 for (i = 0; args; args = TREE_CHAIN (args), i++)
15508 {
15509 bool is_pragma;
15510
15511 is_pragma = (force_pragma || i < num_pragma_values);
15512 if (TREE_VALUE (args)
15513 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
15514 opts, new_opts_set,
15515 is_pragma))
15516 {
15517 ret = false;
15518 }
15519 }
15520 return ret;
15521 }
15522
15523 else if (TREE_CODE (args) != STRING_CST)
15524 {
15525 error ("attribute %<target%> argument not a string");
15526 return false;
15527 }
15528
15529 /* Handle multiple arguments separated by commas. */
15530 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
15531
15532 while (next_optstr && *next_optstr != '\0')
15533 {
15534 char *p = next_optstr;
15535 char *orig_p = p;
15536 char *comma = strchr (next_optstr, ',');
15537 size_t len, opt_len;
15538 int opt;
15539 bool opt_set_p;
15540 char ch;
15541 unsigned i;
15542 int mask = 0;
15543 enum cl_var_type var_type;
15544 bool found;
15545
15546 if (comma)
15547 {
15548 *comma = '\0';
15549 len = comma - next_optstr;
15550 next_optstr = comma + 1;
15551 }
15552 else
15553 {
15554 len = strlen (p);
15555 next_optstr = NULL;
15556 }
15557
15558 /* Recognize no-xxx. */
15559 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
15560 {
15561 opt_set_p = false;
15562 p += 3;
15563 len -= 3;
15564 }
15565 else
15566 opt_set_p = true;
15567
15568 /* Find the option. */
15569 ch = *p;
15570 found = false;
15571 for (i = 0; i < ARRAY_SIZE (attrs); i++)
15572 {
15573 opt_len = attrs[i].len;
15574 if (ch == attrs[i].string[0]
15575 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
15576 && memcmp (p, attrs[i].string, opt_len) == 0)
15577 {
15578 opt = attrs[i].opt;
15579 if (!opt_set_p && cl_options[opt].cl_reject_negative)
15580 continue;
15581 mask = cl_options[opt].var_value;
15582 var_type = cl_options[opt].var_type;
15583 found = true;
15584 break;
15585 }
15586 }
15587
15588 /* Process the option. */
15589 if (!found)
15590 {
15591 error ("attribute(target(\"%s\")) is unknown", orig_p);
15592 return false;
15593 }
15594 else if (attrs[i].only_as_pragma && !force_pragma)
15595 {
15596 /* Value is not allowed for the target attribute. */
19abb0ad 15597 error ("value %qs is not supported by attribute %<target%>",
7a0cee35 15598 attrs[i].string);
15599 return false;
15600 }
15601
15602 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
15603 {
15604 if (var_type == CLVC_BIT_CLEAR)
15605 opt_set_p = !opt_set_p;
15606
15607 if (opt_set_p)
15608 opts->x_target_flags |= mask;
15609 else
15610 opts->x_target_flags &= ~mask;
15611 new_opts_set->x_target_flags |= mask;
15612 }
15613
15614 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
15615 {
15616 int value;
15617
15618 if (cl_options[opt].cl_uinteger)
15619 {
15620 /* Unsigned integer argument. Code based on the function
15621 decode_cmdline_option () in opts-common.c. */
15622 value = integral_argument (p + opt_len);
15623 }
15624 else
15625 value = (opt_set_p) ? 1 : 0;
15626
15627 if (value != -1)
15628 {
15629 struct cl_decoded_option decoded;
15630
15631 /* Value range check; only implemented for numeric and boolean
15632 options at the moment. */
15633 generate_option (opt, NULL, value, CL_TARGET, &decoded);
15634 s390_handle_option (opts, new_opts_set, &decoded, input_location);
15635 set_option (opts, new_opts_set, opt, value,
15636 p + opt_len, DK_UNSPECIFIED, input_location,
15637 global_dc);
15638 }
15639 else
15640 {
15641 error ("attribute(target(\"%s\")) is unknown", orig_p);
15642 ret = false;
15643 }
15644 }
15645
15646 else if (cl_options[opt].var_type == CLVC_ENUM)
15647 {
15648 bool arg_ok;
15649 int value;
15650
15651 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
15652 if (arg_ok)
15653 set_option (opts, new_opts_set, opt, value,
15654 p + opt_len, DK_UNSPECIFIED, input_location,
15655 global_dc);
15656 else
15657 {
15658 error ("attribute(target(\"%s\")) is unknown", orig_p);
15659 ret = false;
15660 }
15661 }
15662
15663 else
15664 gcc_unreachable ();
15665 }
15666 return ret;
15667}
15668
15669/* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
15670
15671tree
15672s390_valid_target_attribute_tree (tree args,
15673 struct gcc_options *opts,
15674 const struct gcc_options *opts_set,
15675 bool force_pragma)
15676{
15677 tree t = NULL_TREE;
15678 struct gcc_options new_opts_set;
15679
15680 memset (&new_opts_set, 0, sizeof (new_opts_set));
15681
15682 /* Process each of the options on the chain. */
15683 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
15684 force_pragma))
15685 return error_mark_node;
15686
15687 /* If some option was set (even if it has not changed), rerun
15688 s390_option_override_internal, and then save the options away. */
15689 if (new_opts_set.x_target_flags
15690 || new_opts_set.x_s390_arch
15691 || new_opts_set.x_s390_tune
15692 || new_opts_set.x_s390_stack_guard
15693 || new_opts_set.x_s390_stack_size
15694 || new_opts_set.x_s390_branch_cost
15695 || new_opts_set.x_s390_warn_framesize
15696 || new_opts_set.x_s390_warn_dynamicstack_p)
15697 {
15698 const unsigned char *src = (const unsigned char *)opts_set;
15699 unsigned char *dest = (unsigned char *)&new_opts_set;
15700 unsigned int i;
15701
15702 /* Merge the original option flags into the new ones. */
15703 for (i = 0; i < sizeof(*opts_set); i++)
15704 dest[i] |= src[i];
15705
15706 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3bd8520f 15707 s390_option_override_internal (false, opts, &new_opts_set);
7a0cee35 15708 /* Save the current options unless we are validating options for
15709 #pragma. */
15710 t = build_target_option_node (opts);
15711 }
15712 return t;
15713}
15714
15715/* Hook to validate attribute((target("string"))). */
15716
15717static bool
15718s390_valid_target_attribute_p (tree fndecl,
15719 tree ARG_UNUSED (name),
15720 tree args,
15721 int ARG_UNUSED (flags))
15722{
15723 struct gcc_options func_options;
15724 tree new_target, new_optimize;
15725 bool ret = true;
15726
15727 /* attribute((target("default"))) does nothing, beyond
15728 affecting multi-versioning. */
15729 if (TREE_VALUE (args)
15730 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
15731 && TREE_CHAIN (args) == NULL_TREE
15732 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
15733 return true;
15734
15735 tree old_optimize = build_optimization_node (&global_options);
15736
15737 /* Get the optimization options of the current function. */
15738 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
15739
15740 if (!func_optimize)
15741 func_optimize = old_optimize;
15742
15743 /* Init func_options. */
15744 memset (&func_options, 0, sizeof (func_options));
15745 init_options_struct (&func_options, NULL);
15746 lang_hooks.init_options_struct (&func_options);
15747
15748 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
15749
15750 /* Initialize func_options to the default before its target options can
15751 be set. */
15752 cl_target_option_restore (&func_options,
15753 TREE_TARGET_OPTION (target_option_default_node));
15754
15755 new_target = s390_valid_target_attribute_tree (args, &func_options,
15756 &global_options_set,
15757 (args ==
15758 current_target_pragma));
15759 new_optimize = build_optimization_node (&func_options);
15760 if (new_target == error_mark_node)
15761 ret = false;
15762 else if (fndecl && new_target)
15763 {
15764 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
15765 if (old_optimize != new_optimize)
15766 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
15767 }
15768 return ret;
15769}
15770
d5a90e99 15771/* Hook to determine if one function can safely inline another. */
15772
15773static bool
15774s390_can_inline_p (tree caller, tree callee)
15775{
15776 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
15777 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
15778
15779 if (!callee_tree)
15780 callee_tree = target_option_default_node;
15781 if (!caller_tree)
15782 caller_tree = target_option_default_node;
15783 if (callee_tree == caller_tree)
15784 return true;
15785
15786 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
15787 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
15788 bool ret = true;
15789
15790 if ((caller_opts->x_target_flags & ~(MASK_SOFT_FLOAT | MASK_HARD_DFP))
15791 != (callee_opts->x_target_flags & ~(MASK_SOFT_FLOAT | MASK_HARD_DFP)))
15792 ret = false;
15793
15794 /* Don't inline functions to be compiled for a more recent arch into a
15795 function for an older arch. */
15796 else if (caller_opts->x_s390_arch < callee_opts->x_s390_arch)
15797 ret = false;
15798
15799 /* Inlining a hard float function into a soft float function is only
15800 allowed if the hard float function doesn't actually make use of
15801 floating point.
15802
15803 We are called from FEs for multi-versioning call optimization, so
15804 beware of ipa_fn_summaries not available. */
15805 else if (((TARGET_SOFT_FLOAT_P (caller_opts->x_target_flags)
15806 && !TARGET_SOFT_FLOAT_P (callee_opts->x_target_flags))
15807 || (!TARGET_HARD_DFP_P (caller_opts->x_target_flags)
15808 && TARGET_HARD_DFP_P (callee_opts->x_target_flags)))
15809 && (! ipa_fn_summaries
15810 || ipa_fn_summaries->get
15811 (cgraph_node::get (callee))->fp_expressions))
15812 ret = false;
15813
15814 return ret;
15815}
15816
7a0cee35 15817/* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
15818 cache. */
15819
15820void
15821s390_activate_target_options (tree new_tree)
15822{
15823 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
15824 if (TREE_TARGET_GLOBALS (new_tree))
15825 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
15826 else if (new_tree == target_option_default_node)
15827 restore_target_globals (&default_target_globals);
15828 else
15829 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
15830 s390_previous_fndecl = NULL_TREE;
15831}
15832
15833/* Establish appropriate back-end context for processing the function
15834 FNDECL. The argument might be NULL to indicate processing at top
15835 level, outside of any function scope. */
15836static void
15837s390_set_current_function (tree fndecl)
15838{
15839 /* Only change the context if the function changes. This hook is called
15840 several times in the course of compiling a function, and we don't want to
15841 slow things down too much or call target_reinit when it isn't safe. */
15842 if (fndecl == s390_previous_fndecl)
15843 return;
15844
15845 tree old_tree;
15846 if (s390_previous_fndecl == NULL_TREE)
15847 old_tree = target_option_current_node;
15848 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
15849 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
15850 else
15851 old_tree = target_option_default_node;
15852
15853 if (fndecl == NULL_TREE)
15854 {
15855 if (old_tree != target_option_current_node)
15856 s390_activate_target_options (target_option_current_node);
15857 return;
15858 }
15859
15860 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
15861 if (new_tree == NULL_TREE)
15862 new_tree = target_option_default_node;
15863
15864 if (old_tree != new_tree)
15865 s390_activate_target_options (new_tree);
15866 s390_previous_fndecl = fndecl;
15867}
15868#endif
15869
a83f0e2c 15870/* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
15871
15872static bool
89da42b6 15873s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
a83f0e2c 15874 unsigned int align ATTRIBUTE_UNUSED,
15875 enum by_pieces_operation op ATTRIBUTE_UNUSED,
15876 bool speed_p ATTRIBUTE_UNUSED)
15877{
15878 return (size == 1 || size == 2
15879 || size == 4 || (TARGET_ZARCH && size == 8));
15880}
15881
90f58e2a 15882/* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
15883
15884static void
15885s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
15886{
07f32359 15887 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
15888 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
90f58e2a 15889 tree call_efpc = build_call_expr (efpc, 0);
9550ce87 15890 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
90f58e2a 15891
15892#define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
15893#define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
15894#define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
15895#define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
15896#define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
15897#define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
15898
15899 /* Generates the equivalent of feholdexcept (&fenv_var)
15900
15901 fenv_var = __builtin_s390_efpc ();
15902 __builtin_s390_sfpc (fenv_var & mask) */
15903 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
15904 tree new_fpc =
15905 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
15906 build_int_cst (unsigned_type_node,
15907 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
15908 FPC_EXCEPTION_MASK)));
15909 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
15910 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
15911
15912 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
15913
15914 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
15915 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
15916 build_int_cst (unsigned_type_node,
15917 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
15918 *clear = build_call_expr (sfpc, 1, new_fpc);
15919
15920 /* Generates the equivalent of feupdateenv (fenv_var)
15921
15922 old_fpc = __builtin_s390_efpc ();
15923 __builtin_s390_sfpc (fenv_var);
15924 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
15925
9550ce87 15926 old_fpc = create_tmp_var_raw (unsigned_type_node);
90f58e2a 15927 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
15928 old_fpc, call_efpc);
15929
15930 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
15931
15932 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
15933 build_int_cst (unsigned_type_node,
15934 FPC_FLAGS_MASK));
15935 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
15936 build_int_cst (unsigned_type_node,
15937 FPC_FLAGS_SHIFT));
15938 tree atomic_feraiseexcept
15939 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
15940 raise_old_except = build_call_expr (atomic_feraiseexcept,
15941 1, raise_old_except);
15942
15943 *update = build2 (COMPOUND_EXPR, void_type_node,
15944 build2 (COMPOUND_EXPR, void_type_node,
15945 store_old_fpc, set_new_fpc),
15946 raise_old_except);
15947
15948#undef FPC_EXCEPTION_MASK
15949#undef FPC_FLAGS_MASK
15950#undef FPC_DXC_MASK
15951#undef FPC_EXCEPTION_MASK_SHIFT
15952#undef FPC_FLAGS_SHIFT
15953#undef FPC_DXC_SHIFT
15954}
15955
76a4c804 15956/* Return the vector mode to be used for inner mode MODE when doing
15957 vectorization. */
15958static machine_mode
4c1a1be2 15959s390_preferred_simd_mode (scalar_mode mode)
76a4c804 15960{
4de9f101 15961 if (TARGET_VXE)
15962 switch (mode)
15963 {
15964 case E_SFmode:
15965 return V4SFmode;
15966 default:;
15967 }
15968
76a4c804 15969 if (TARGET_VX)
15970 switch (mode)
15971 {
916ace94 15972 case E_DFmode:
76a4c804 15973 return V2DFmode;
916ace94 15974 case E_DImode:
76a4c804 15975 return V2DImode;
916ace94 15976 case E_SImode:
76a4c804 15977 return V4SImode;
916ace94 15978 case E_HImode:
76a4c804 15979 return V8HImode;
916ace94 15980 case E_QImode:
76a4c804 15981 return V16QImode;
15982 default:;
15983 }
15984 return word_mode;
15985}
15986
15987/* Our hardware does not require vectors to be strictly aligned. */
15988static bool
15989s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
15990 const_tree type ATTRIBUTE_UNUSED,
15991 int misalignment ATTRIBUTE_UNUSED,
15992 bool is_packed ATTRIBUTE_UNUSED)
15993{
6bb09dc9 15994 if (TARGET_VX)
15995 return true;
15996
15997 return default_builtin_support_vector_misalignment (mode, type, misalignment,
15998 is_packed);
76a4c804 15999}
16000
16001/* The vector ABI requires vector types to be aligned on an 8 byte
16002 boundary (our stack alignment). However, we allow this to be
16003 overriden by the user, while this definitely breaks the ABI. */
16004static HOST_WIDE_INT
16005s390_vector_alignment (const_tree type)
16006{
16007 if (!TARGET_VX_ABI)
16008 return default_vector_alignment (type);
16009
16010 if (TYPE_USER_ALIGN (type))
16011 return TYPE_ALIGN (type);
16012
16013 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
16014}
16015
579d67ba 16016/* Implement TARGET_CONSTANT_ALIGNMENT. Alignment on even addresses for
16017 LARL instruction. */
16018
16019static HOST_WIDE_INT
16020s390_constant_alignment (const_tree, HOST_WIDE_INT align)
16021{
16022 return MAX (align, 16);
16023}
16024
14d7e7e6 16025#ifdef HAVE_AS_MACHINE_MACHINEMODE
16026/* Implement TARGET_ASM_FILE_START. */
16027static void
16028s390_asm_file_start (void)
16029{
b904831d 16030 default_file_start ();
14d7e7e6 16031 s390_asm_output_machine_for_arch (asm_out_file);
16032}
16033#endif
16034
6b7cfb9c 16035/* Implement TARGET_ASM_FILE_END. */
16036static void
16037s390_asm_file_end (void)
16038{
16039#ifdef HAVE_AS_GNU_ATTRIBUTE
16040 varpool_node *vnode;
16041 cgraph_node *cnode;
16042
16043 FOR_EACH_VARIABLE (vnode)
16044 if (TREE_PUBLIC (vnode->decl))
16045 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
16046
16047 FOR_EACH_FUNCTION (cnode)
16048 if (TREE_PUBLIC (cnode->decl))
16049 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
16050
16051
16052 if (s390_vector_abi != 0)
16053 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
16054 s390_vector_abi);
16055#endif
16056 file_end_indicate_exec_stack ();
c6d481f7 16057
16058 if (flag_split_stack)
16059 file_end_indicate_split_stack ();
6b7cfb9c 16060}
76a4c804 16061
f0c550e7 16062/* Return true if TYPE is a vector bool type. */
16063static inline bool
16064s390_vector_bool_type_p (const_tree type)
16065{
16066 return TYPE_VECTOR_OPAQUE (type);
16067}
16068
16069/* Return the diagnostic message string if the binary operation OP is
16070 not permitted on TYPE1 and TYPE2, NULL otherwise. */
16071static const char*
16072s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
16073{
16074 bool bool1_p, bool2_p;
16075 bool plusminus_p;
16076 bool muldiv_p;
16077 bool compare_p;
16078 machine_mode mode1, mode2;
16079
16080 if (!TARGET_ZVECTOR)
16081 return NULL;
16082
16083 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
16084 return NULL;
16085
16086 bool1_p = s390_vector_bool_type_p (type1);
16087 bool2_p = s390_vector_bool_type_p (type2);
16088
16089 /* Mixing signed and unsigned types is forbidden for all
16090 operators. */
16091 if (!bool1_p && !bool2_p
16092 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
d0abd9e0 16093 return N_("types differ in signedness");
f0c550e7 16094
16095 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
16096 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
16097 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
16098 || op == ROUND_DIV_EXPR);
16099 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
16100 || op == EQ_EXPR || op == NE_EXPR);
16101
16102 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
16103 return N_("binary operator does not support two vector bool operands");
16104
16105 if (bool1_p != bool2_p && (muldiv_p || compare_p))
16106 return N_("binary operator does not support vector bool operand");
16107
16108 mode1 = TYPE_MODE (type1);
16109 mode2 = TYPE_MODE (type2);
16110
16111 if (bool1_p != bool2_p && plusminus_p
16112 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
16113 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
16114 return N_("binary operator does not support mixing vector "
16115 "bool with floating point vector operands");
16116
16117 return NULL;
16118}
16119
1de60655 16120/* Implement TARGET_C_EXCESS_PRECISION.
16121
16122 FIXME: For historical reasons, float_t and double_t are typedef'ed to
16123 double on s390, causing operations on float_t to operate in a higher
16124 precision than is necessary. However, it is not the case that SFmode
16125 operations have implicit excess precision, and we generate more optimal
16126 code if we let the compiler know no implicit extra precision is added.
16127
16128 That means when we are compiling with -fexcess-precision=fast, the value
16129 we set for FLT_EVAL_METHOD will be out of line with the actual precision of
16130 float_t (though they would be correct for -fexcess-precision=standard).
16131
16132 A complete fix would modify glibc to remove the unnecessary typedef
16133 of float_t to double. */
16134
16135static enum flt_eval_method
16136s390_excess_precision (enum excess_precision_type type)
16137{
16138 switch (type)
16139 {
16140 case EXCESS_PRECISION_TYPE_IMPLICIT:
16141 case EXCESS_PRECISION_TYPE_FAST:
16142 /* The fastest type to promote to will always be the native type,
16143 whether that occurs with implicit excess precision or
16144 otherwise. */
16145 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
16146 case EXCESS_PRECISION_TYPE_STANDARD:
16147 /* Otherwise, when we are in a standards compliant mode, to
16148 ensure consistency with the implementation in glibc, report that
16149 float is evaluated to the range and precision of double. */
16150 return FLT_EVAL_METHOD_PROMOTE_TO_DOUBLE;
16151 default:
16152 gcc_unreachable ();
16153 }
16154 return FLT_EVAL_METHOD_UNPREDICTABLE;
16155}
16156
fff1179b 16157/* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
16158
16159static unsigned HOST_WIDE_INT
16160s390_asan_shadow_offset (void)
16161{
16162 return TARGET_64BIT ? HOST_WIDE_INT_1U << 52 : HOST_WIDE_INT_UC (0x20000000);
16163}
16164
875862bf 16165/* Initialize GCC target structure. */
f588eb9f 16166
875862bf 16167#undef TARGET_ASM_ALIGNED_HI_OP
16168#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
16169#undef TARGET_ASM_ALIGNED_DI_OP
16170#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
16171#undef TARGET_ASM_INTEGER
16172#define TARGET_ASM_INTEGER s390_assemble_integer
7346ca58 16173
875862bf 16174#undef TARGET_ASM_OPEN_PAREN
16175#define TARGET_ASM_OPEN_PAREN ""
f588eb9f 16176
875862bf 16177#undef TARGET_ASM_CLOSE_PAREN
16178#define TARGET_ASM_CLOSE_PAREN ""
7346ca58 16179
4c834714 16180#undef TARGET_OPTION_OVERRIDE
16181#define TARGET_OPTION_OVERRIDE s390_option_override
16182
8a23256f 16183#ifdef TARGET_THREAD_SSP_OFFSET
16184#undef TARGET_STACK_PROTECT_GUARD
16185#define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
16186#endif
16187
875862bf 16188#undef TARGET_ENCODE_SECTION_INFO
16189#define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
7346ca58 16190
b5fdc416 16191#undef TARGET_SCALAR_MODE_SUPPORTED_P
16192#define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
16193
875862bf 16194#ifdef HAVE_AS_TLS
16195#undef TARGET_HAVE_TLS
16196#define TARGET_HAVE_TLS true
16197#endif
16198#undef TARGET_CANNOT_FORCE_CONST_MEM
16199#define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
7346ca58 16200
875862bf 16201#undef TARGET_DELEGITIMIZE_ADDRESS
16202#define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
7346ca58 16203
41e3a0c7 16204#undef TARGET_LEGITIMIZE_ADDRESS
16205#define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
16206
875862bf 16207#undef TARGET_RETURN_IN_MEMORY
16208#define TARGET_RETURN_IN_MEMORY s390_return_in_memory
f588eb9f 16209
5ada7a14 16210#undef TARGET_INIT_BUILTINS
16211#define TARGET_INIT_BUILTINS s390_init_builtins
16212#undef TARGET_EXPAND_BUILTIN
16213#define TARGET_EXPAND_BUILTIN s390_expand_builtin
751c914e 16214#undef TARGET_BUILTIN_DECL
16215#define TARGET_BUILTIN_DECL s390_builtin_decl
5ada7a14 16216
1a561788 16217#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
16218#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
16219
875862bf 16220#undef TARGET_ASM_OUTPUT_MI_THUNK
16221#define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
16222#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
a9f1838b 16223#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
7346ca58 16224
1de60655 16225#undef TARGET_C_EXCESS_PRECISION
16226#define TARGET_C_EXCESS_PRECISION s390_excess_precision
16227
875862bf 16228#undef TARGET_SCHED_ADJUST_PRIORITY
16229#define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
16230#undef TARGET_SCHED_ISSUE_RATE
16231#define TARGET_SCHED_ISSUE_RATE s390_issue_rate
16232#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
16233#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
f588eb9f 16234
8a2a84e3 16235#undef TARGET_SCHED_VARIABLE_ISSUE
16236#define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
16237#undef TARGET_SCHED_REORDER
16238#define TARGET_SCHED_REORDER s390_sched_reorder
494d0169 16239#undef TARGET_SCHED_INIT
16240#define TARGET_SCHED_INIT s390_sched_init
8a2a84e3 16241
875862bf 16242#undef TARGET_CANNOT_COPY_INSN_P
16243#define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
16244#undef TARGET_RTX_COSTS
16245#define TARGET_RTX_COSTS s390_rtx_costs
16246#undef TARGET_ADDRESS_COST
16247#define TARGET_ADDRESS_COST s390_address_cost
fa7a995b 16248#undef TARGET_REGISTER_MOVE_COST
16249#define TARGET_REGISTER_MOVE_COST s390_register_move_cost
16250#undef TARGET_MEMORY_MOVE_COST
16251#define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
292e369f 16252#undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
16253#define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
16254 s390_builtin_vectorization_cost
f588eb9f 16255
875862bf 16256#undef TARGET_MACHINE_DEPENDENT_REORG
16257#define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
71597dac 16258
875862bf 16259#undef TARGET_VALID_POINTER_MODE
16260#define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
f588eb9f 16261
875862bf 16262#undef TARGET_BUILD_BUILTIN_VA_LIST
16263#define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
8a58ed0a 16264#undef TARGET_EXPAND_BUILTIN_VA_START
16265#define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
fff1179b 16266#undef TARGET_ASAN_SHADOW_OFFSET
16267#define TARGET_ASAN_SHADOW_OFFSET s390_asan_shadow_offset
875862bf 16268#undef TARGET_GIMPLIFY_VA_ARG_EXPR
16269#define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
b33c41a1 16270
3b2411a8 16271#undef TARGET_PROMOTE_FUNCTION_MODE
16272#define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
875862bf 16273#undef TARGET_PASS_BY_REFERENCE
16274#define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
b33c41a1 16275
875862bf 16276#undef TARGET_FUNCTION_OK_FOR_SIBCALL
16277#define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
12bc26aa 16278#undef TARGET_FUNCTION_ARG
16279#define TARGET_FUNCTION_ARG s390_function_arg
16280#undef TARGET_FUNCTION_ARG_ADVANCE
16281#define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
d7ab0e3d 16282#undef TARGET_FUNCTION_ARG_PADDING
16283#define TARGET_FUNCTION_ARG_PADDING s390_function_arg_padding
dc3b3062 16284#undef TARGET_FUNCTION_VALUE
16285#define TARGET_FUNCTION_VALUE s390_function_value
16286#undef TARGET_LIBCALL_VALUE
16287#define TARGET_LIBCALL_VALUE s390_libcall_value
76a4c804 16288#undef TARGET_STRICT_ARGUMENT_NAMING
16289#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
b33c41a1 16290
d44f2f7c 16291#undef TARGET_KEEP_LEAF_WHEN_PROFILED
16292#define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
16293
875862bf 16294#undef TARGET_FIXED_CONDITION_CODE_REGS
16295#define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
b33c41a1 16296
875862bf 16297#undef TARGET_CC_MODES_COMPATIBLE
16298#define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
b33c41a1 16299
1606e68a 16300#undef TARGET_INVALID_WITHIN_DOLOOP
18282db0 16301#define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
e75dabf7 16302
40af64cc 16303#ifdef HAVE_AS_TLS
16304#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
16305#define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
16306#endif
16307
76a4c804 16308#undef TARGET_DWARF_FRAME_REG_MODE
16309#define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
16310
4257b08a 16311#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
eddcdde1 16312#undef TARGET_MANGLE_TYPE
16313#define TARGET_MANGLE_TYPE s390_mangle_type
4257b08a 16314#endif
16315
36868490 16316#undef TARGET_SCALAR_MODE_SUPPORTED_P
16317#define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
16318
76a4c804 16319#undef TARGET_VECTOR_MODE_SUPPORTED_P
16320#define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
16321
3359ccfd 16322#undef TARGET_PREFERRED_RELOAD_CLASS
16323#define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
16324
328d5423 16325#undef TARGET_SECONDARY_RELOAD
16326#define TARGET_SECONDARY_RELOAD s390_secondary_reload
c836e75b 16327#undef TARGET_SECONDARY_MEMORY_NEEDED
16328#define TARGET_SECONDARY_MEMORY_NEEDED s390_secondary_memory_needed
1041f930 16329#undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
16330#define TARGET_SECONDARY_MEMORY_NEEDED_MODE s390_secondary_memory_needed_mode
328d5423 16331
0ef89dfd 16332#undef TARGET_LIBGCC_CMP_RETURN_MODE
16333#define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
16334
16335#undef TARGET_LIBGCC_SHIFT_COUNT_MODE
16336#define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
16337
fd50b071 16338#undef TARGET_LEGITIMATE_ADDRESS_P
16339#define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
16340
ca316360 16341#undef TARGET_LEGITIMATE_CONSTANT_P
16342#define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
16343
7b1bda1c 16344#undef TARGET_LRA_P
16345#define TARGET_LRA_P s390_lra_p
16346
cd90919d 16347#undef TARGET_CAN_ELIMINATE
16348#define TARGET_CAN_ELIMINATE s390_can_eliminate
16349
b2d7ede1 16350#undef TARGET_CONDITIONAL_REGISTER_USAGE
16351#define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
16352
9ccaa774 16353#undef TARGET_LOOP_UNROLL_ADJUST
16354#define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
16355
4d946732 16356#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
16357#define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
16358#undef TARGET_TRAMPOLINE_INIT
16359#define TARGET_TRAMPOLINE_INIT s390_trampoline_init
16360
8805debf 16361/* PR 79421 */
16362#undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
16363#define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
16364
b5fdc416 16365#undef TARGET_UNWIND_WORD_MODE
16366#define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
16367
d5065e6e 16368#undef TARGET_CANONICALIZE_COMPARISON
16369#define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
16370
ff4ce128 16371#undef TARGET_HARD_REGNO_SCRATCH_OK
16372#define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
16373
74f68e49 16374#undef TARGET_HARD_REGNO_NREGS
16375#define TARGET_HARD_REGNO_NREGS s390_hard_regno_nregs
b395382f 16376#undef TARGET_HARD_REGNO_MODE_OK
16377#define TARGET_HARD_REGNO_MODE_OK s390_hard_regno_mode_ok
5f6dcf1a 16378#undef TARGET_MODES_TIEABLE_P
16379#define TARGET_MODES_TIEABLE_P s390_modes_tieable_p
b395382f 16380
5da94e60 16381#undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
16382#define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
16383 s390_hard_regno_call_part_clobbered
16384
77bc9912 16385#undef TARGET_ATTRIBUTE_TABLE
16386#define TARGET_ATTRIBUTE_TABLE s390_attribute_table
16387
11762b83 16388#undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
16389#define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
77bc9912 16390
7a64c761 16391#undef TARGET_SET_UP_BY_PROLOGUE
16392#define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
16393
c6d481f7 16394#undef TARGET_EXTRA_LIVE_ON_ENTRY
16395#define TARGET_EXTRA_LIVE_ON_ENTRY s390_live_on_entry
16396
a83f0e2c 16397#undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
16398#define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
16399 s390_use_by_pieces_infrastructure_p
16400
90f58e2a 16401#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
16402#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
16403
76a4c804 16404#undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
16405#define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
16406
16407#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
16408#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
16409
16410#undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
16411#define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
16412
16413#undef TARGET_VECTOR_ALIGNMENT
16414#define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
16415
f0c550e7 16416#undef TARGET_INVALID_BINARY_OP
16417#define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
16418
14d7e7e6 16419#ifdef HAVE_AS_MACHINE_MACHINEMODE
16420#undef TARGET_ASM_FILE_START
16421#define TARGET_ASM_FILE_START s390_asm_file_start
16422#endif
16423
6b7cfb9c 16424#undef TARGET_ASM_FILE_END
16425#define TARGET_ASM_FILE_END s390_asm_file_end
16426
7a0cee35 16427#if S390_USE_TARGET_ATTRIBUTE
16428#undef TARGET_SET_CURRENT_FUNCTION
16429#define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
16430
16431#undef TARGET_OPTION_VALID_ATTRIBUTE_P
16432#define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
d5a90e99 16433
16434#undef TARGET_CAN_INLINE_P
16435#define TARGET_CAN_INLINE_P s390_can_inline_p
7a0cee35 16436#endif
16437
16438#undef TARGET_OPTION_RESTORE
16439#define TARGET_OPTION_RESTORE s390_function_specific_restore
16440
b56a9dbc 16441#undef TARGET_CAN_CHANGE_MODE_CLASS
16442#define TARGET_CAN_CHANGE_MODE_CLASS s390_can_change_mode_class
16443
579d67ba 16444#undef TARGET_CONSTANT_ALIGNMENT
16445#define TARGET_CONSTANT_ALIGNMENT s390_constant_alignment
16446
875862bf 16447struct gcc_target targetm = TARGET_INITIALIZER;
f588eb9f 16448
5a5e802f 16449#include "gt-s390.h"