]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/gimple-ssa-store-merging.c
c++: variadic lambda template and empty pack [PR97246]
[thirdparty/gcc.git] / gcc / gimple-ssa-store-merging.c
CommitLineData
dffec8eb 1/* GIMPLE store merging and byte swapping passes.
99dee823 2 Copyright (C) 2009-2021 Free Software Foundation, Inc.
f663d9ad
KT
3 Contributed by ARM Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
c94c3532
EB
21/* The purpose of the store merging pass is to combine multiple memory stores
22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
24
f663d9ad
KT
25 For example, if we have a sequence peforming four byte stores to
26 consecutive memory locations:
27 [p ] := imm1;
28 [p + 1B] := imm2;
29 [p + 2B] := imm3;
30 [p + 3B] := imm4;
31 we can transform this into a single 4-byte store if the target supports it:
c94c3532 32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
f663d9ad 33
245f6de1
JJ
34 Or:
35 [p ] := [q ];
36 [p + 1B] := [q + 1B];
37 [p + 2B] := [q + 2B];
38 [p + 3B] := [q + 3B];
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
41
42 Or:
43 [p ] := [q ] ^ imm1;
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
49
c94c3532
EB
50 Or:
51 [p:1 ] := imm;
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
55
f663d9ad
KT
56 The algorithm is applied to each basic block in three phases:
57
c94c3532
EB
58 1) Scan through the basic block and record assignments to destinations
59 that can be expressed as a store to memory of a certain size at a certain
60 bit offset from base expressions we can handle. For bit-fields we also
61 record the surrounding bit region, i.e. bits that could be stored in
245f6de1
JJ
62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
700d4cb0 64 terminate such chains when appropriate (for example when the stored
245f6de1 65 values get used subsequently).
f663d9ad
KT
66 These stores can be a result of structure element initializers, array stores
67 etc. A store_immediate_info object is recorded for every such store.
68 Record as many such assignments to a single base as possible until a
69 statement that interferes with the store sequence is encountered.
c94c3532
EB
70 Each store has up to 2 operands, which can be a either constant, a memory
71 load or an SSA name, from which the value to be stored can be computed.
245f6de1
JJ
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
f663d9ad 74
c94c3532 75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
f663d9ad 76 store_immediate_info objects) and coalesce contiguous stores into
c94c3532 77 merged_store_group objects. For bit-field stores, we don't need to
245f6de1
JJ
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
f663d9ad
KT
83
84 For example, given the stores:
85 [p ] := 0;
86 [p + 1B] := 1;
87 [p + 3B] := 0;
88 [p + 4B] := 1;
89 [p + 5B] := 0;
90 [p + 6B] := 0;
91 This phase would produce two merged_store_group objects, one recording the
92 two bytes stored in the memory region [p : p + 1] and another
93 recording the four bytes stored in the memory region [p + 3 : p + 6].
94
95 3) The merged_store_group objects produced in phase 2) are processed
96 to generate the sequence of wider stores that set the contiguous memory
97 regions to the sequence of bytes that correspond to it. This may emit
98 multiple stores per store group to handle contiguous stores that are not
99 of a size that is a power of 2. For example it can try to emit a 40-bit
100 store as a 32-bit store followed by an 8-bit store.
c94c3532
EB
101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
f663d9ad
KT
103
104 Note on endianness and example:
105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
106 [p ] := 0x1234;
107 [p + 2B] := 0x5678;
108 [p + 4B] := 0xab;
109 [p + 5B] := 0xcd;
110
111 The memory layout for little-endian (LE) and big-endian (BE) must be:
112 p |LE|BE|
113 ---------
114 0 |34|12|
115 1 |12|34|
116 2 |78|56|
117 3 |56|78|
118 4 |ab|ab|
119 5 |cd|cd|
120
121 To merge these into a single 48-bit merged value 'val' in phase 2)
122 on little-endian we insert stores to higher (consecutive) bitpositions
123 into the most significant bits of the merged value.
124 The final merged value would be: 0xcdab56781234
125
126 For big-endian we insert stores to higher bitpositions into the least
127 significant bits of the merged value.
128 The final merged value would be: 0x12345678abcd
129
130 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
131 followed by a 16-bit store. Again, we must consider endianness when
132 breaking down the 48-bit value 'val' computed above.
133 For little endian we emit:
134 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
135 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
136
137 Whereas for big-endian we emit:
138 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
139 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
140
141#include "config.h"
142#include "system.h"
143#include "coretypes.h"
144#include "backend.h"
145#include "tree.h"
146#include "gimple.h"
147#include "builtins.h"
148#include "fold-const.h"
149#include "tree-pass.h"
150#include "ssa.h"
151#include "gimple-pretty-print.h"
152#include "alias.h"
153#include "fold-const.h"
f663d9ad
KT
154#include "print-tree.h"
155#include "tree-hash-traits.h"
156#include "gimple-iterator.h"
157#include "gimplify.h"
c94c3532 158#include "gimple-fold.h"
f663d9ad
KT
159#include "stor-layout.h"
160#include "timevar.h"
629387a6
EB
161#include "cfganal.h"
162#include "cfgcleanup.h"
f663d9ad 163#include "tree-cfg.h"
629387a6 164#include "except.h"
f663d9ad
KT
165#include "tree-eh.h"
166#include "target.h"
aa55dc0c 167#include "gimplify-me.h"
a62b3dc5
JJ
168#include "rtl.h"
169#include "expr.h" /* For get_bit_range. */
dffec8eb 170#include "optabs-tree.h"
a95b474a 171#include "dbgcnt.h"
c22d8787 172#include "selftest.h"
f663d9ad
KT
173
174/* The maximum size (in bits) of the stores this pass should generate. */
175#define MAX_STORE_BITSIZE (BITS_PER_WORD)
176#define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
177
245f6de1
JJ
178/* Limit to bound the number of aliasing checks for loads with the same
179 vuse as the corresponding store. */
180#define MAX_STORE_ALIAS_CHECKS 64
181
f663d9ad
KT
182namespace {
183
bebadeca 184struct bswap_stat
dffec8eb
JJ
185{
186 /* Number of hand-written 16-bit nop / bswaps found. */
187 int found_16bit;
188
189 /* Number of hand-written 32-bit nop / bswaps found. */
190 int found_32bit;
191
192 /* Number of hand-written 64-bit nop / bswaps found. */
193 int found_64bit;
194} nop_stats, bswap_stats;
195
196/* A symbolic number structure is used to detect byte permutation and selection
197 patterns of a source. To achieve that, its field N contains an artificial
198 number consisting of BITS_PER_MARKER sized markers tracking where does each
199 byte come from in the source:
200
201 0 - target byte has the value 0
202 FF - target byte has an unknown value (eg. due to sign extension)
203 1..size - marker value is the byte index in the source (0 for lsb).
204
205 To detect permutations on memory sources (arrays and structures), a symbolic
206 number is also associated:
207 - a base address BASE_ADDR and an OFFSET giving the address of the source;
208 - a range which gives the difference between the highest and lowest accessed
209 memory location to make such a symbolic number;
210 - the address SRC of the source element of lowest address as a convenience
211 to easily get BASE_ADDR + offset + lowest bytepos;
212 - number of expressions N_OPS bitwise ored together to represent
213 approximate cost of the computation.
214
215 Note 1: the range is different from size as size reflects the size of the
216 type of the current expression. For instance, for an array char a[],
217 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
218 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
219 time a range of 1.
220
221 Note 2: for non-memory sources, range holds the same value as size.
222
223 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
224
225struct symbolic_number {
226 uint64_t n;
227 tree type;
228 tree base_addr;
229 tree offset;
4a022c70 230 poly_int64_pod bytepos;
dffec8eb
JJ
231 tree src;
232 tree alias_set;
233 tree vuse;
234 unsigned HOST_WIDE_INT range;
235 int n_ops;
236};
237
238#define BITS_PER_MARKER 8
239#define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
240#define MARKER_BYTE_UNKNOWN MARKER_MASK
241#define HEAD_MARKER(n, size) \
242 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
243
244/* The number which the find_bswap_or_nop_1 result should match in
245 order to have a nop. The number is masked according to the size of
246 the symbolic number before using it. */
247#define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
248 (uint64_t)0x08070605 << 32 | 0x04030201)
249
250/* The number which the find_bswap_or_nop_1 result should match in
251 order to have a byte swap. The number is masked according to the
252 size of the symbolic number before using it. */
253#define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
254 (uint64_t)0x01020304 << 32 | 0x05060708)
255
256/* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
257 number N. Return false if the requested operation is not permitted
258 on a symbolic number. */
259
260inline bool
261do_shift_rotate (enum tree_code code,
262 struct symbolic_number *n,
263 int count)
264{
265 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
266 unsigned head_marker;
267
444cda74
JJ
268 if (count < 0
269 || count >= TYPE_PRECISION (n->type)
270 || count % BITS_PER_UNIT != 0)
dffec8eb
JJ
271 return false;
272 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
273
274 /* Zero out the extra bits of N in order to avoid them being shifted
275 into the significant bits. */
276 if (size < 64 / BITS_PER_MARKER)
277 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
278
279 switch (code)
280 {
281 case LSHIFT_EXPR:
282 n->n <<= count;
283 break;
284 case RSHIFT_EXPR:
285 head_marker = HEAD_MARKER (n->n, size);
286 n->n >>= count;
287 /* Arithmetic shift of signed type: result is dependent on the value. */
288 if (!TYPE_UNSIGNED (n->type) && head_marker)
289 for (i = 0; i < count / BITS_PER_MARKER; i++)
290 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
291 << ((size - 1 - i) * BITS_PER_MARKER);
292 break;
293 case LROTATE_EXPR:
294 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
295 break;
296 case RROTATE_EXPR:
297 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
298 break;
299 default:
300 return false;
301 }
302 /* Zero unused bits for size. */
303 if (size < 64 / BITS_PER_MARKER)
304 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
305 return true;
306}
307
308/* Perform sanity checking for the symbolic number N and the gimple
309 statement STMT. */
310
311inline bool
312verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
313{
314 tree lhs_type;
315
316 lhs_type = gimple_expr_type (stmt);
317
5ea39b24
JJ
318 if (TREE_CODE (lhs_type) != INTEGER_TYPE
319 && TREE_CODE (lhs_type) != ENUMERAL_TYPE)
dffec8eb
JJ
320 return false;
321
322 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
323 return false;
324
325 return true;
326}
327
328/* Initialize the symbolic number N for the bswap pass from the base element
329 SRC manipulated by the bitwise OR expression. */
330
331bool
332init_symbolic_number (struct symbolic_number *n, tree src)
333{
334 int size;
335
336 if (! INTEGRAL_TYPE_P (TREE_TYPE (src)))
337 return false;
338
339 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
340 n->src = src;
341
342 /* Set up the symbolic number N by setting each byte to a value between 1 and
343 the byte size of rhs1. The highest order byte is set to n->size and the
344 lowest order byte to 1. */
345 n->type = TREE_TYPE (src);
346 size = TYPE_PRECISION (n->type);
347 if (size % BITS_PER_UNIT != 0)
348 return false;
349 size /= BITS_PER_UNIT;
350 if (size > 64 / BITS_PER_MARKER)
351 return false;
352 n->range = size;
353 n->n = CMPNOP;
354 n->n_ops = 1;
355
356 if (size < 64 / BITS_PER_MARKER)
357 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
358
359 return true;
360}
361
362/* Check if STMT might be a byte swap or a nop from a memory source and returns
363 the answer. If so, REF is that memory source and the base of the memory area
364 accessed and the offset of the access from that base are recorded in N. */
365
366bool
367find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
368{
369 /* Leaf node is an array or component ref. Memorize its base and
370 offset from base to compare to other such leaf node. */
f37fac2b 371 poly_int64 bitsize, bitpos, bytepos;
dffec8eb
JJ
372 machine_mode mode;
373 int unsignedp, reversep, volatilep;
374 tree offset, base_addr;
375
376 /* Not prepared to handle PDP endian. */
377 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
378 return false;
379
380 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
381 return false;
382
383 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
384 &unsignedp, &reversep, &volatilep);
385
4b84d9b8
JJ
386 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
387 /* Do not rewrite TARGET_MEM_REF. */
388 return false;
389 else if (TREE_CODE (base_addr) == MEM_REF)
dffec8eb 390 {
3fed2ce9 391 poly_offset_int bit_offset = 0;
dffec8eb
JJ
392 tree off = TREE_OPERAND (base_addr, 1);
393
394 if (!integer_zerop (off))
395 {
3fed2ce9
RS
396 poly_offset_int boff = mem_ref_offset (base_addr);
397 boff <<= LOG2_BITS_PER_UNIT;
dffec8eb
JJ
398 bit_offset += boff;
399 }
400
401 base_addr = TREE_OPERAND (base_addr, 0);
402
403 /* Avoid returning a negative bitpos as this may wreak havoc later. */
3fed2ce9 404 if (maybe_lt (bit_offset, 0))
dffec8eb 405 {
3fed2ce9
RS
406 tree byte_offset = wide_int_to_tree
407 (sizetype, bits_to_bytes_round_down (bit_offset));
408 bit_offset = num_trailing_bits (bit_offset);
dffec8eb 409 if (offset)
3fed2ce9 410 offset = size_binop (PLUS_EXPR, offset, byte_offset);
dffec8eb 411 else
3fed2ce9 412 offset = byte_offset;
dffec8eb
JJ
413 }
414
3fed2ce9 415 bitpos += bit_offset.force_shwi ();
dffec8eb 416 }
4b84d9b8
JJ
417 else
418 base_addr = build_fold_addr_expr (base_addr);
dffec8eb 419
f37fac2b 420 if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
dffec8eb 421 return false;
f37fac2b 422 if (!multiple_p (bitsize, BITS_PER_UNIT))
dffec8eb
JJ
423 return false;
424 if (reversep)
425 return false;
426
427 if (!init_symbolic_number (n, ref))
428 return false;
429 n->base_addr = base_addr;
430 n->offset = offset;
f37fac2b 431 n->bytepos = bytepos;
dffec8eb
JJ
432 n->alias_set = reference_alias_ptr_type (ref);
433 n->vuse = gimple_vuse (stmt);
434 return true;
435}
436
437/* Compute the symbolic number N representing the result of a bitwise OR on 2
438 symbolic number N1 and N2 whose source statements are respectively
439 SOURCE_STMT1 and SOURCE_STMT2. */
440
441gimple *
442perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
443 gimple *source_stmt2, struct symbolic_number *n2,
444 struct symbolic_number *n)
445{
446 int i, size;
447 uint64_t mask;
448 gimple *source_stmt;
449 struct symbolic_number *n_start;
450
451 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
452 if (TREE_CODE (rhs1) == BIT_FIELD_REF
453 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
454 rhs1 = TREE_OPERAND (rhs1, 0);
455 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
456 if (TREE_CODE (rhs2) == BIT_FIELD_REF
457 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
458 rhs2 = TREE_OPERAND (rhs2, 0);
459
460 /* Sources are different, cancel bswap if they are not memory location with
461 the same base (array, structure, ...). */
462 if (rhs1 != rhs2)
463 {
464 uint64_t inc;
4a022c70 465 HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end;
dffec8eb
JJ
466 struct symbolic_number *toinc_n_ptr, *n_end;
467 basic_block bb1, bb2;
468
469 if (!n1->base_addr || !n2->base_addr
470 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
471 return NULL;
472
473 if (!n1->offset != !n2->offset
474 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
475 return NULL;
476
4a022c70
RS
477 start1 = 0;
478 if (!(n2->bytepos - n1->bytepos).is_constant (&start2))
479 return NULL;
480
481 if (start1 < start2)
dffec8eb
JJ
482 {
483 n_start = n1;
4a022c70 484 start_sub = start2 - start1;
dffec8eb
JJ
485 }
486 else
487 {
488 n_start = n2;
4a022c70 489 start_sub = start1 - start2;
dffec8eb
JJ
490 }
491
492 bb1 = gimple_bb (source_stmt1);
493 bb2 = gimple_bb (source_stmt2);
494 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
495 source_stmt = source_stmt1;
496 else
497 source_stmt = source_stmt2;
498
499 /* Find the highest address at which a load is performed and
500 compute related info. */
4a022c70
RS
501 end1 = start1 + (n1->range - 1);
502 end2 = start2 + (n2->range - 1);
dffec8eb
JJ
503 if (end1 < end2)
504 {
505 end = end2;
506 end_sub = end2 - end1;
507 }
508 else
509 {
510 end = end1;
511 end_sub = end1 - end2;
512 }
513 n_end = (end2 > end1) ? n2 : n1;
514
515 /* Find symbolic number whose lsb is the most significant. */
516 if (BYTES_BIG_ENDIAN)
517 toinc_n_ptr = (n_end == n1) ? n2 : n1;
518 else
519 toinc_n_ptr = (n_start == n1) ? n2 : n1;
520
4a022c70 521 n->range = end - MIN (start1, start2) + 1;
dffec8eb
JJ
522
523 /* Check that the range of memory covered can be represented by
524 a symbolic number. */
525 if (n->range > 64 / BITS_PER_MARKER)
526 return NULL;
527
528 /* Reinterpret byte marks in symbolic number holding the value of
529 bigger weight according to target endianness. */
530 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
531 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
532 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
533 {
534 unsigned marker
535 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
536 if (marker && marker != MARKER_BYTE_UNKNOWN)
537 toinc_n_ptr->n += inc;
538 }
539 }
540 else
541 {
542 n->range = n1->range;
543 n_start = n1;
544 source_stmt = source_stmt1;
545 }
546
547 if (!n1->alias_set
548 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
549 n->alias_set = n1->alias_set;
550 else
551 n->alias_set = ptr_type_node;
552 n->vuse = n_start->vuse;
553 n->base_addr = n_start->base_addr;
554 n->offset = n_start->offset;
555 n->src = n_start->src;
556 n->bytepos = n_start->bytepos;
557 n->type = n_start->type;
558 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
559
560 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
561 {
562 uint64_t masked1, masked2;
563
564 masked1 = n1->n & mask;
565 masked2 = n2->n & mask;
566 if (masked1 && masked2 && masked1 != masked2)
567 return NULL;
568 }
569 n->n = n1->n | n2->n;
570 n->n_ops = n1->n_ops + n2->n_ops;
571
572 return source_stmt;
573}
574
575/* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
576 the operation given by the rhs of STMT on the result. If the operation
577 could successfully be executed the function returns a gimple stmt whose
578 rhs's first tree is the expression of the source operand and NULL
579 otherwise. */
580
581gimple *
582find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
583{
584 enum tree_code code;
585 tree rhs1, rhs2 = NULL;
586 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
587 enum gimple_rhs_class rhs_class;
588
589 if (!limit || !is_gimple_assign (stmt))
590 return NULL;
591
592 rhs1 = gimple_assign_rhs1 (stmt);
593
594 if (find_bswap_or_nop_load (stmt, rhs1, n))
595 return stmt;
596
597 /* Handle BIT_FIELD_REF. */
598 if (TREE_CODE (rhs1) == BIT_FIELD_REF
599 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
600 {
35cf3c55
KZ
601 if (!tree_fits_uhwi_p (TREE_OPERAND (rhs1, 1))
602 || !tree_fits_uhwi_p (TREE_OPERAND (rhs1, 2)))
603 return NULL;
604
dffec8eb
JJ
605 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
606 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
607 if (bitpos % BITS_PER_UNIT == 0
608 && bitsize % BITS_PER_UNIT == 0
609 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
610 {
611 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
612 if (BYTES_BIG_ENDIAN)
613 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
614
615 /* Shift. */
616 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
617 return NULL;
618
619 /* Mask. */
620 uint64_t mask = 0;
621 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
622 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
623 i++, tmp <<= BITS_PER_UNIT)
624 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
625 n->n &= mask;
626
627 /* Convert. */
628 n->type = TREE_TYPE (rhs1);
629 if (!n->base_addr)
630 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
631
632 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
633 }
634
635 return NULL;
636 }
637
638 if (TREE_CODE (rhs1) != SSA_NAME)
639 return NULL;
640
641 code = gimple_assign_rhs_code (stmt);
642 rhs_class = gimple_assign_rhs_class (stmt);
643 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
644
645 if (rhs_class == GIMPLE_BINARY_RHS)
646 rhs2 = gimple_assign_rhs2 (stmt);
647
648 /* Handle unary rhs and binary rhs with integer constants as second
649 operand. */
650
651 if (rhs_class == GIMPLE_UNARY_RHS
652 || (rhs_class == GIMPLE_BINARY_RHS
653 && TREE_CODE (rhs2) == INTEGER_CST))
654 {
655 if (code != BIT_AND_EXPR
656 && code != LSHIFT_EXPR
657 && code != RSHIFT_EXPR
658 && code != LROTATE_EXPR
659 && code != RROTATE_EXPR
660 && !CONVERT_EXPR_CODE_P (code))
661 return NULL;
662
663 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
664
665 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
666 we have to initialize the symbolic number. */
667 if (!source_stmt1)
668 {
669 if (gimple_assign_load_p (stmt)
670 || !init_symbolic_number (n, rhs1))
671 return NULL;
672 source_stmt1 = stmt;
673 }
674
675 switch (code)
676 {
677 case BIT_AND_EXPR:
678 {
679 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
680 uint64_t val = int_cst_value (rhs2), mask = 0;
681 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
682
683 /* Only constants masking full bytes are allowed. */
684 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
685 if ((val & tmp) != 0 && (val & tmp) != tmp)
686 return NULL;
687 else if (val & tmp)
688 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
689
690 n->n &= mask;
691 }
692 break;
693 case LSHIFT_EXPR:
694 case RSHIFT_EXPR:
695 case LROTATE_EXPR:
696 case RROTATE_EXPR:
697 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
698 return NULL;
699 break;
700 CASE_CONVERT:
701 {
702 int i, type_size, old_type_size;
703 tree type;
704
705 type = gimple_expr_type (stmt);
706 type_size = TYPE_PRECISION (type);
707 if (type_size % BITS_PER_UNIT != 0)
708 return NULL;
709 type_size /= BITS_PER_UNIT;
710 if (type_size > 64 / BITS_PER_MARKER)
711 return NULL;
712
713 /* Sign extension: result is dependent on the value. */
714 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
715 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
716 && HEAD_MARKER (n->n, old_type_size))
717 for (i = 0; i < type_size - old_type_size; i++)
718 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
719 << ((type_size - 1 - i) * BITS_PER_MARKER);
720
721 if (type_size < 64 / BITS_PER_MARKER)
722 {
723 /* If STMT casts to a smaller type mask out the bits not
724 belonging to the target type. */
725 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
726 }
727 n->type = type;
728 if (!n->base_addr)
729 n->range = type_size;
730 }
731 break;
732 default:
733 return NULL;
734 };
735 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
736 }
737
738 /* Handle binary rhs. */
739
740 if (rhs_class == GIMPLE_BINARY_RHS)
741 {
742 struct symbolic_number n1, n2;
743 gimple *source_stmt, *source_stmt2;
744
745 if (code != BIT_IOR_EXPR)
746 return NULL;
747
748 if (TREE_CODE (rhs2) != SSA_NAME)
749 return NULL;
750
751 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
752
753 switch (code)
754 {
755 case BIT_IOR_EXPR:
756 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
757
758 if (!source_stmt1)
759 return NULL;
760
761 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
762
763 if (!source_stmt2)
764 return NULL;
765
766 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
767 return NULL;
768
4b84d9b8 769 if (n1.vuse != n2.vuse)
dffec8eb
JJ
770 return NULL;
771
772 source_stmt
773 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
774
775 if (!source_stmt)
776 return NULL;
777
778 if (!verify_symbolic_number_p (n, stmt))
779 return NULL;
780
781 break;
782 default:
783 return NULL;
784 }
785 return source_stmt;
786 }
787 return NULL;
788}
789
4b84d9b8
JJ
790/* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
791 *CMPXCHG, *CMPNOP and adjust *N. */
dffec8eb 792
4b84d9b8
JJ
793void
794find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg,
795 uint64_t *cmpnop)
dffec8eb
JJ
796{
797 unsigned rsize;
798 uint64_t tmpn, mask;
dffec8eb 799
4b84d9b8
JJ
800 /* The number which the find_bswap_or_nop_1 result should match in order
801 to have a full byte swap. The number is shifted to the right
802 according to the size of the symbolic number before using it. */
803 *cmpxchg = CMPXCHG;
804 *cmpnop = CMPNOP;
dffec8eb
JJ
805
806 /* Find real size of result (highest non-zero byte). */
807 if (n->base_addr)
808 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
809 else
810 rsize = n->range;
811
812 /* Zero out the bits corresponding to untouched bytes in original gimple
813 expression. */
814 if (n->range < (int) sizeof (int64_t))
815 {
816 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
4b84d9b8
JJ
817 *cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
818 *cmpnop &= mask;
dffec8eb
JJ
819 }
820
821 /* Zero out the bits corresponding to unused bytes in the result of the
822 gimple expression. */
823 if (rsize < n->range)
824 {
825 if (BYTES_BIG_ENDIAN)
826 {
827 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
4b84d9b8
JJ
828 *cmpxchg &= mask;
829 *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
dffec8eb
JJ
830 }
831 else
832 {
833 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
4b84d9b8
JJ
834 *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
835 *cmpnop &= mask;
dffec8eb
JJ
836 }
837 n->range = rsize;
838 }
839
4b84d9b8
JJ
840 n->range *= BITS_PER_UNIT;
841}
842
843/* Check if STMT completes a bswap implementation or a read in a given
844 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
845 accordingly. It also sets N to represent the kind of operations
846 performed: size of the resulting expression and whether it works on
847 a memory source, and if so alias-set and vuse. At last, the
848 function returns a stmt whose rhs's first tree is the source
849 expression. */
850
851gimple *
852find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
853{
7f0ce82a
KT
854 tree type_size = TYPE_SIZE_UNIT (gimple_expr_type (stmt));
855 if (!tree_fits_uhwi_p (type_size))
856 return NULL;
857
4b84d9b8
JJ
858 /* The last parameter determines the depth search limit. It usually
859 correlates directly to the number n of bytes to be touched. We
0f507a36 860 increase that number by 2 * (log2(n) + 1) here in order to also
4b84d9b8
JJ
861 cover signed -> unsigned conversions of the src operand as can be seen
862 in libgcc, and for initial shift/and operation of the src operand. */
7f0ce82a 863 int limit = tree_to_uhwi (type_size);
0f507a36 864 limit += 2 * (1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit));
4b84d9b8
JJ
865 gimple *ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
866
867 if (!ins_stmt)
cd676dfa
JJ
868 {
869 if (gimple_assign_rhs_code (stmt) != CONSTRUCTOR
870 || BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
871 return NULL;
872 unsigned HOST_WIDE_INT sz = tree_to_uhwi (type_size) * BITS_PER_UNIT;
873 if (sz != 16 && sz != 32 && sz != 64)
874 return NULL;
875 tree rhs = gimple_assign_rhs1 (stmt);
9032d2b2
JJ
876 if (CONSTRUCTOR_NELTS (rhs) == 0)
877 return NULL;
cd676dfa
JJ
878 tree eltype = TREE_TYPE (TREE_TYPE (rhs));
879 unsigned HOST_WIDE_INT eltsz
880 = int_size_in_bytes (eltype) * BITS_PER_UNIT;
881 if (TYPE_PRECISION (eltype) != eltsz)
882 return NULL;
883 constructor_elt *elt;
884 unsigned int i;
885 tree type = build_nonstandard_integer_type (sz, 1);
886 FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (rhs), i, elt)
887 {
888 if (TREE_CODE (elt->value) != SSA_NAME
889 || !INTEGRAL_TYPE_P (TREE_TYPE (elt->value)))
890 return NULL;
891 struct symbolic_number n1;
892 gimple *source_stmt
893 = find_bswap_or_nop_1 (SSA_NAME_DEF_STMT (elt->value), &n1,
894 limit - 1);
895
896 if (!source_stmt)
897 return NULL;
898
899 n1.type = type;
900 if (!n1.base_addr)
901 n1.range = sz / BITS_PER_UNIT;
902
903 if (i == 0)
904 {
905 ins_stmt = source_stmt;
906 *n = n1;
907 }
908 else
909 {
910 if (n->vuse != n1.vuse)
911 return NULL;
912
913 struct symbolic_number n0 = *n;
914
915 if (!BYTES_BIG_ENDIAN)
916 {
917 if (!do_shift_rotate (LSHIFT_EXPR, &n1, i * eltsz))
918 return NULL;
919 }
920 else if (!do_shift_rotate (LSHIFT_EXPR, &n0, eltsz))
921 return NULL;
922 ins_stmt
923 = perform_symbolic_merge (ins_stmt, &n0, source_stmt, &n1, n);
924
925 if (!ins_stmt)
926 return NULL;
927 }
928 }
929 }
4b84d9b8
JJ
930
931 uint64_t cmpxchg, cmpnop;
932 find_bswap_or_nop_finalize (n, &cmpxchg, &cmpnop);
933
dffec8eb
JJ
934 /* A complete byte swap should make the symbolic number to start with
935 the largest digit in the highest order byte. Unchanged symbolic
936 number indicates a read with same endianness as target architecture. */
937 if (n->n == cmpnop)
938 *bswap = false;
939 else if (n->n == cmpxchg)
940 *bswap = true;
941 else
942 return NULL;
943
944 /* Useless bit manipulation performed by code. */
945 if (!n->base_addr && n->n == cmpnop && n->n_ops == 1)
946 return NULL;
947
dffec8eb
JJ
948 return ins_stmt;
949}
950
951const pass_data pass_data_optimize_bswap =
952{
953 GIMPLE_PASS, /* type */
954 "bswap", /* name */
955 OPTGROUP_NONE, /* optinfo_flags */
956 TV_NONE, /* tv_id */
957 PROP_ssa, /* properties_required */
958 0, /* properties_provided */
959 0, /* properties_destroyed */
960 0, /* todo_flags_start */
961 0, /* todo_flags_finish */
962};
963
964class pass_optimize_bswap : public gimple_opt_pass
965{
966public:
967 pass_optimize_bswap (gcc::context *ctxt)
968 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
969 {}
970
971 /* opt_pass methods: */
972 virtual bool gate (function *)
973 {
974 return flag_expensive_optimizations && optimize && BITS_PER_UNIT == 8;
975 }
976
977 virtual unsigned int execute (function *);
978
979}; // class pass_optimize_bswap
980
d02a8b63
JJ
981/* Helper function for bswap_replace. Build VIEW_CONVERT_EXPR from
982 VAL to TYPE. If VAL has different type size, emit a NOP_EXPR cast
983 first. */
984
985static tree
986bswap_view_convert (gimple_stmt_iterator *gsi, tree type, tree val)
987{
988 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (val)));
989 if (TYPE_SIZE (type) != TYPE_SIZE (TREE_TYPE (val)))
990 {
991 HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_SIZE (type));
992 tree itype = build_nonstandard_integer_type (prec, 1);
993 gimple *g = gimple_build_assign (make_ssa_name (itype), NOP_EXPR, val);
994 gsi_insert_before (gsi, g, GSI_SAME_STMT);
995 val = gimple_assign_lhs (g);
996 }
997 return build1 (VIEW_CONVERT_EXPR, type, val);
998}
999
dffec8eb 1000/* Perform the bswap optimization: replace the expression computed in the rhs
4b84d9b8
JJ
1001 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
1002 bswap, load or load + bswap expression.
dffec8eb
JJ
1003 Which of these alternatives replace the rhs is given by N->base_addr (non
1004 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
1005 load to perform are also given in N while the builtin bswap invoke is given
4b84d9b8
JJ
1006 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
1007 load statements involved to construct the rhs in gsi_stmt (GSI) and
1008 N->range gives the size of the rhs expression for maintaining some
1009 statistics.
dffec8eb 1010
4b84d9b8
JJ
1011 Note that if the replacement involve a load and if gsi_stmt (GSI) is
1012 non-NULL, that stmt is moved just after INS_STMT to do the load with the
1013 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
dffec8eb 1014
4b84d9b8
JJ
1015tree
1016bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl,
dffec8eb
JJ
1017 tree bswap_type, tree load_type, struct symbolic_number *n,
1018 bool bswap)
1019{
4b84d9b8 1020 tree src, tmp, tgt = NULL_TREE;
dffec8eb 1021 gimple *bswap_stmt;
cd676dfa 1022 tree_code conv_code = NOP_EXPR;
dffec8eb 1023
4b84d9b8 1024 gimple *cur_stmt = gsi_stmt (gsi);
dffec8eb 1025 src = n->src;
4b84d9b8 1026 if (cur_stmt)
cd676dfa
JJ
1027 {
1028 tgt = gimple_assign_lhs (cur_stmt);
1029 if (gimple_assign_rhs_code (cur_stmt) == CONSTRUCTOR
1030 && tgt
1031 && VECTOR_TYPE_P (TREE_TYPE (tgt)))
1032 conv_code = VIEW_CONVERT_EXPR;
1033 }
dffec8eb
JJ
1034
1035 /* Need to load the value from memory first. */
1036 if (n->base_addr)
1037 {
4b84d9b8
JJ
1038 gimple_stmt_iterator gsi_ins = gsi;
1039 if (ins_stmt)
1040 gsi_ins = gsi_for_stmt (ins_stmt);
dffec8eb
JJ
1041 tree addr_expr, addr_tmp, val_expr, val_tmp;
1042 tree load_offset_ptr, aligned_load_type;
4b84d9b8
JJ
1043 gimple *load_stmt;
1044 unsigned align = get_object_alignment (src);
4a022c70 1045 poly_int64 load_offset = 0;
dffec8eb 1046
4b84d9b8
JJ
1047 if (cur_stmt)
1048 {
1049 basic_block ins_bb = gimple_bb (ins_stmt);
1050 basic_block cur_bb = gimple_bb (cur_stmt);
1051 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
1052 return NULL_TREE;
1053
1054 /* Move cur_stmt just before one of the load of the original
1055 to ensure it has the same VUSE. See PR61517 for what could
1056 go wrong. */
1057 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
1058 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
1059 gsi_move_before (&gsi, &gsi_ins);
1060 gsi = gsi_for_stmt (cur_stmt);
1061 }
1062 else
1063 gsi = gsi_ins;
dffec8eb
JJ
1064
1065 /* Compute address to load from and cast according to the size
1066 of the load. */
4b84d9b8 1067 addr_expr = build_fold_addr_expr (src);
dffec8eb 1068 if (is_gimple_mem_ref_addr (addr_expr))
4b84d9b8 1069 addr_tmp = unshare_expr (addr_expr);
dffec8eb
JJ
1070 else
1071 {
4b84d9b8
JJ
1072 addr_tmp = unshare_expr (n->base_addr);
1073 if (!is_gimple_mem_ref_addr (addr_tmp))
1074 addr_tmp = force_gimple_operand_gsi_1 (&gsi, addr_tmp,
1075 is_gimple_mem_ref_addr,
1076 NULL_TREE, true,
1077 GSI_SAME_STMT);
1078 load_offset = n->bytepos;
1079 if (n->offset)
1080 {
1081 tree off
1082 = force_gimple_operand_gsi (&gsi, unshare_expr (n->offset),
1083 true, NULL_TREE, true,
1084 GSI_SAME_STMT);
1085 gimple *stmt
1086 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp)),
1087 POINTER_PLUS_EXPR, addr_tmp, off);
1088 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1089 addr_tmp = gimple_assign_lhs (stmt);
1090 }
dffec8eb
JJ
1091 }
1092
1093 /* Perform the load. */
1094 aligned_load_type = load_type;
1095 if (align < TYPE_ALIGN (load_type))
1096 aligned_load_type = build_aligned_type (load_type, align);
1097 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
1098 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
1099 load_offset_ptr);
1100
1101 if (!bswap)
1102 {
1103 if (n->range == 16)
1104 nop_stats.found_16bit++;
1105 else if (n->range == 32)
1106 nop_stats.found_32bit++;
1107 else
1108 {
1109 gcc_assert (n->range == 64);
1110 nop_stats.found_64bit++;
1111 }
1112
1113 /* Convert the result of load if necessary. */
4b84d9b8 1114 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), load_type))
dffec8eb
JJ
1115 {
1116 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
1117 "load_dst");
1118 load_stmt = gimple_build_assign (val_tmp, val_expr);
1119 gimple_set_vuse (load_stmt, n->vuse);
1120 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
cd676dfa 1121 if (conv_code == VIEW_CONVERT_EXPR)
d02a8b63 1122 val_tmp = bswap_view_convert (&gsi, TREE_TYPE (tgt), val_tmp);
cd676dfa 1123 gimple_assign_set_rhs_with_ops (&gsi, conv_code, val_tmp);
4b84d9b8 1124 update_stmt (cur_stmt);
dffec8eb 1125 }
4b84d9b8 1126 else if (cur_stmt)
dffec8eb
JJ
1127 {
1128 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
1129 gimple_set_vuse (cur_stmt, n->vuse);
4b84d9b8
JJ
1130 update_stmt (cur_stmt);
1131 }
1132 else
1133 {
1134 tgt = make_ssa_name (load_type);
1135 cur_stmt = gimple_build_assign (tgt, MEM_REF, val_expr);
1136 gimple_set_vuse (cur_stmt, n->vuse);
1137 gsi_insert_before (&gsi, cur_stmt, GSI_SAME_STMT);
dffec8eb 1138 }
dffec8eb
JJ
1139
1140 if (dump_file)
1141 {
1142 fprintf (dump_file,
1143 "%d bit load in target endianness found at: ",
1144 (int) n->range);
1145 print_gimple_stmt (dump_file, cur_stmt, 0);
1146 }
4b84d9b8 1147 return tgt;
dffec8eb
JJ
1148 }
1149 else
1150 {
1151 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
1152 load_stmt = gimple_build_assign (val_tmp, val_expr);
1153 gimple_set_vuse (load_stmt, n->vuse);
1154 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1155 }
1156 src = val_tmp;
1157 }
1158 else if (!bswap)
1159 {
4b84d9b8
JJ
1160 gimple *g = NULL;
1161 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src)))
dffec8eb
JJ
1162 {
1163 if (!is_gimple_val (src))
4b84d9b8 1164 return NULL_TREE;
cd676dfa 1165 if (conv_code == VIEW_CONVERT_EXPR)
d02a8b63 1166 src = bswap_view_convert (&gsi, TREE_TYPE (tgt), src);
cd676dfa 1167 g = gimple_build_assign (tgt, conv_code, src);
dffec8eb 1168 }
4b84d9b8 1169 else if (cur_stmt)
dffec8eb 1170 g = gimple_build_assign (tgt, src);
4b84d9b8
JJ
1171 else
1172 tgt = src;
dffec8eb
JJ
1173 if (n->range == 16)
1174 nop_stats.found_16bit++;
1175 else if (n->range == 32)
1176 nop_stats.found_32bit++;
1177 else
1178 {
1179 gcc_assert (n->range == 64);
1180 nop_stats.found_64bit++;
1181 }
1182 if (dump_file)
1183 {
1184 fprintf (dump_file,
1185 "%d bit reshuffle in target endianness found at: ",
1186 (int) n->range);
4b84d9b8
JJ
1187 if (cur_stmt)
1188 print_gimple_stmt (dump_file, cur_stmt, 0);
1189 else
1190 {
4af78ef8 1191 print_generic_expr (dump_file, tgt, TDF_NONE);
4b84d9b8
JJ
1192 fprintf (dump_file, "\n");
1193 }
dffec8eb 1194 }
4b84d9b8
JJ
1195 if (cur_stmt)
1196 gsi_replace (&gsi, g, true);
1197 return tgt;
dffec8eb
JJ
1198 }
1199 else if (TREE_CODE (src) == BIT_FIELD_REF)
1200 src = TREE_OPERAND (src, 0);
1201
1202 if (n->range == 16)
1203 bswap_stats.found_16bit++;
1204 else if (n->range == 32)
1205 bswap_stats.found_32bit++;
1206 else
1207 {
1208 gcc_assert (n->range == 64);
1209 bswap_stats.found_64bit++;
1210 }
1211
1212 tmp = src;
1213
1214 /* Convert the src expression if necessary. */
1215 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
1216 {
1217 gimple *convert_stmt;
1218
1219 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
1220 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
1221 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
1222 }
1223
1224 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1225 are considered as rotation of 2N bit values by N bits is generally not
1226 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1227 gives 0x03040102 while a bswap for that value is 0x04030201. */
1228 if (bswap && n->range == 16)
1229 {
1230 tree count = build_int_cst (NULL, BITS_PER_UNIT);
1231 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
1232 bswap_stmt = gimple_build_assign (NULL, src);
1233 }
1234 else
1235 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
1236
4b84d9b8
JJ
1237 if (tgt == NULL_TREE)
1238 tgt = make_ssa_name (bswap_type);
dffec8eb
JJ
1239 tmp = tgt;
1240
1241 /* Convert the result if necessary. */
1242 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
1243 {
1244 gimple *convert_stmt;
1245
1246 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
cd676dfa
JJ
1247 tree atmp = tmp;
1248 if (conv_code == VIEW_CONVERT_EXPR)
d02a8b63 1249 atmp = bswap_view_convert (&gsi, TREE_TYPE (tgt), tmp);
cd676dfa 1250 convert_stmt = gimple_build_assign (tgt, conv_code, atmp);
dffec8eb
JJ
1251 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
1252 }
1253
1254 gimple_set_lhs (bswap_stmt, tmp);
1255
1256 if (dump_file)
1257 {
1258 fprintf (dump_file, "%d bit bswap implementation found at: ",
1259 (int) n->range);
4b84d9b8
JJ
1260 if (cur_stmt)
1261 print_gimple_stmt (dump_file, cur_stmt, 0);
1262 else
1263 {
4af78ef8 1264 print_generic_expr (dump_file, tgt, TDF_NONE);
4b84d9b8
JJ
1265 fprintf (dump_file, "\n");
1266 }
dffec8eb
JJ
1267 }
1268
4b84d9b8
JJ
1269 if (cur_stmt)
1270 {
1271 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
1272 gsi_remove (&gsi, true);
1273 }
1274 else
1275 gsi_insert_before (&gsi, bswap_stmt, GSI_SAME_STMT);
1276 return tgt;
dffec8eb
JJ
1277}
1278
a7553ad6
JJ
1279/* Try to optimize an assignment CUR_STMT with CONSTRUCTOR on the rhs
1280 using bswap optimizations. CDI_DOMINATORS need to be
1281 computed on entry. Return true if it has been optimized and
1282 TODO_update_ssa is needed. */
1283
1284static bool
1285maybe_optimize_vector_constructor (gimple *cur_stmt)
1286{
1287 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1288 struct symbolic_number n;
1289 bool bswap;
1290
1291 gcc_assert (is_gimple_assign (cur_stmt)
1292 && gimple_assign_rhs_code (cur_stmt) == CONSTRUCTOR);
1293
1294 tree rhs = gimple_assign_rhs1 (cur_stmt);
1295 if (!VECTOR_TYPE_P (TREE_TYPE (rhs))
1296 || !INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs)))
1297 || gimple_assign_lhs (cur_stmt) == NULL_TREE)
1298 return false;
1299
1300 HOST_WIDE_INT sz = int_size_in_bytes (TREE_TYPE (rhs)) * BITS_PER_UNIT;
1301 switch (sz)
1302 {
1303 case 16:
1304 load_type = bswap_type = uint16_type_node;
1305 break;
1306 case 32:
1307 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1308 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
1309 {
1310 load_type = uint32_type_node;
1311 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1312 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1313 }
1314 else
1315 return false;
1316 break;
1317 case 64:
1318 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1319 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1320 || (word_mode == SImode
1321 && builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1322 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)))
1323 {
1324 load_type = uint64_type_node;
1325 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1326 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1327 }
1328 else
1329 return false;
1330 break;
1331 default:
1332 return false;
1333 }
1334
1335 gimple *ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
1336 if (!ins_stmt || n.range != (unsigned HOST_WIDE_INT) sz)
1337 return false;
1338
1339 if (bswap && !fndecl && n.range != 16)
1340 return false;
1341
1342 memset (&nop_stats, 0, sizeof (nop_stats));
1343 memset (&bswap_stats, 0, sizeof (bswap_stats));
1344 return bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1345 bswap_type, load_type, &n, bswap) != NULL_TREE;
1346}
1347
dffec8eb
JJ
1348/* Find manual byte swap implementations as well as load in a given
1349 endianness. Byte swaps are turned into a bswap builtin invokation
1350 while endian loads are converted to bswap builtin invokation or
1351 simple load according to the target endianness. */
1352
1353unsigned int
1354pass_optimize_bswap::execute (function *fun)
1355{
1356 basic_block bb;
1357 bool bswap32_p, bswap64_p;
1358 bool changed = false;
1359 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1360
1361 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1362 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1363 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1364 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1365 || (bswap32_p && word_mode == SImode)));
1366
1367 /* Determine the argument type of the builtins. The code later on
1368 assumes that the return and argument type are the same. */
1369 if (bswap32_p)
1370 {
1371 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1372 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1373 }
1374
1375 if (bswap64_p)
1376 {
1377 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1378 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1379 }
1380
1381 memset (&nop_stats, 0, sizeof (nop_stats));
1382 memset (&bswap_stats, 0, sizeof (bswap_stats));
1383 calculate_dominance_info (CDI_DOMINATORS);
1384
1385 FOR_EACH_BB_FN (bb, fun)
1386 {
1387 gimple_stmt_iterator gsi;
1388
1389 /* We do a reverse scan for bswap patterns to make sure we get the
1390 widest match. As bswap pattern matching doesn't handle previously
1391 inserted smaller bswap replacements as sub-patterns, the wider
1392 variant wouldn't be detected. */
1393 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
1394 {
1395 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
1396 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1397 enum tree_code code;
1398 struct symbolic_number n;
1399 bool bswap;
1400
1401 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1402 might be moved to a different basic block by bswap_replace and gsi
1403 must not points to it if that's the case. Moving the gsi_prev
1404 there make sure that gsi points to the statement previous to
1405 cur_stmt while still making sure that all statements are
1406 considered in this basic block. */
1407 gsi_prev (&gsi);
1408
1409 if (!is_gimple_assign (cur_stmt))
1410 continue;
1411
1412 code = gimple_assign_rhs_code (cur_stmt);
1413 switch (code)
1414 {
1415 case LROTATE_EXPR:
1416 case RROTATE_EXPR:
1417 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
1418 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
1419 % BITS_PER_UNIT)
1420 continue;
1421 /* Fall through. */
1422 case BIT_IOR_EXPR:
1423 break;
cd676dfa
JJ
1424 case CONSTRUCTOR:
1425 {
1426 tree rhs = gimple_assign_rhs1 (cur_stmt);
1427 if (VECTOR_TYPE_P (TREE_TYPE (rhs))
1428 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs))))
1429 break;
1430 }
1431 continue;
dffec8eb
JJ
1432 default:
1433 continue;
1434 }
1435
1436 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
1437
1438 if (!ins_stmt)
1439 continue;
1440
1441 switch (n.range)
1442 {
1443 case 16:
1444 /* Already in canonical form, nothing to do. */
1445 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
1446 continue;
1447 load_type = bswap_type = uint16_type_node;
1448 break;
1449 case 32:
1450 load_type = uint32_type_node;
1451 if (bswap32_p)
1452 {
1453 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1454 bswap_type = bswap32_type;
1455 }
1456 break;
1457 case 64:
1458 load_type = uint64_type_node;
1459 if (bswap64_p)
1460 {
1461 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1462 bswap_type = bswap64_type;
1463 }
1464 break;
1465 default:
1466 continue;
1467 }
1468
1469 if (bswap && !fndecl && n.range != 16)
1470 continue;
1471
4b84d9b8
JJ
1472 if (bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1473 bswap_type, load_type, &n, bswap))
dffec8eb
JJ
1474 changed = true;
1475 }
1476 }
1477
1478 statistics_counter_event (fun, "16-bit nop implementations found",
1479 nop_stats.found_16bit);
1480 statistics_counter_event (fun, "32-bit nop implementations found",
1481 nop_stats.found_32bit);
1482 statistics_counter_event (fun, "64-bit nop implementations found",
1483 nop_stats.found_64bit);
1484 statistics_counter_event (fun, "16-bit bswap implementations found",
1485 bswap_stats.found_16bit);
1486 statistics_counter_event (fun, "32-bit bswap implementations found",
1487 bswap_stats.found_32bit);
1488 statistics_counter_event (fun, "64-bit bswap implementations found",
1489 bswap_stats.found_64bit);
1490
1491 return (changed ? TODO_update_ssa : 0);
1492}
1493
1494} // anon namespace
1495
1496gimple_opt_pass *
1497make_pass_optimize_bswap (gcc::context *ctxt)
1498{
1499 return new pass_optimize_bswap (ctxt);
1500}
1501
1502namespace {
1503
245f6de1 1504/* Struct recording one operand for the store, which is either a constant,
c94c3532
EB
1505 then VAL represents the constant and all the other fields are zero, or
1506 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1507 and the other fields also reflect the memory load, or an SSA name, then
1508 VAL represents the SSA name and all the other fields are zero, */
245f6de1 1509
6c1dae73 1510class store_operand_info
245f6de1 1511{
6c1dae73 1512public:
245f6de1
JJ
1513 tree val;
1514 tree base_addr;
8a91d545
RS
1515 poly_uint64 bitsize;
1516 poly_uint64 bitpos;
1517 poly_uint64 bitregion_start;
1518 poly_uint64 bitregion_end;
245f6de1 1519 gimple *stmt;
383ac8dc 1520 bool bit_not_p;
245f6de1
JJ
1521 store_operand_info ();
1522};
1523
1524store_operand_info::store_operand_info ()
1525 : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0),
383ac8dc 1526 bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false)
245f6de1
JJ
1527{
1528}
1529
f663d9ad
KT
1530/* Struct recording the information about a single store of an immediate
1531 to memory. These are created in the first phase and coalesced into
1532 merged_store_group objects in the second phase. */
1533
6c1dae73 1534class store_immediate_info
f663d9ad 1535{
6c1dae73 1536public:
f663d9ad
KT
1537 unsigned HOST_WIDE_INT bitsize;
1538 unsigned HOST_WIDE_INT bitpos;
a62b3dc5
JJ
1539 unsigned HOST_WIDE_INT bitregion_start;
1540 /* This is one past the last bit of the bit region. */
1541 unsigned HOST_WIDE_INT bitregion_end;
f663d9ad
KT
1542 gimple *stmt;
1543 unsigned int order;
e362a897
EB
1544 /* INTEGER_CST for constant store, STRING_CST for string store,
1545 MEM_REF for memory copy, BIT_*_EXPR for logical bitwise operation,
1546 BIT_INSERT_EXPR for bit insertion.
4b84d9b8
JJ
1547 LROTATE_EXPR if it can be only bswap optimized and
1548 ops are not really meaningful.
1549 NOP_EXPR if bswap optimization detected identity, ops
1550 are not meaningful. */
245f6de1 1551 enum tree_code rhs_code;
4b84d9b8
JJ
1552 /* Two fields for bswap optimization purposes. */
1553 struct symbolic_number n;
1554 gimple *ins_stmt;
127ef369 1555 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
d60edaba 1556 bool bit_not_p;
127ef369
JJ
1557 /* True if ops have been swapped and thus ops[1] represents
1558 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1559 bool ops_swapped_p;
629387a6
EB
1560 /* The index number of the landing pad, or 0 if there is none. */
1561 int lp_nr;
245f6de1
JJ
1562 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1563 just the first one. */
1564 store_operand_info ops[2];
b5926e23 1565 store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
a62b3dc5 1566 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
4b84d9b8 1567 gimple *, unsigned int, enum tree_code,
629387a6 1568 struct symbolic_number &, gimple *, bool, int,
245f6de1
JJ
1569 const store_operand_info &,
1570 const store_operand_info &);
f663d9ad
KT
1571};
1572
1573store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs,
b5926e23 1574 unsigned HOST_WIDE_INT bp,
a62b3dc5
JJ
1575 unsigned HOST_WIDE_INT brs,
1576 unsigned HOST_WIDE_INT bre,
b5926e23 1577 gimple *st,
245f6de1
JJ
1578 unsigned int ord,
1579 enum tree_code rhscode,
4b84d9b8
JJ
1580 struct symbolic_number &nr,
1581 gimple *ins_stmtp,
d60edaba 1582 bool bitnotp,
629387a6 1583 int nr2,
245f6de1
JJ
1584 const store_operand_info &op0r,
1585 const store_operand_info &op1r)
a62b3dc5 1586 : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre),
4b84d9b8 1587 stmt (st), order (ord), rhs_code (rhscode), n (nr),
629387a6
EB
1588 ins_stmt (ins_stmtp), bit_not_p (bitnotp), ops_swapped_p (false),
1589 lp_nr (nr2)
245f6de1
JJ
1590#if __cplusplus >= 201103L
1591 , ops { op0r, op1r }
1592{
1593}
1594#else
f663d9ad 1595{
245f6de1
JJ
1596 ops[0] = op0r;
1597 ops[1] = op1r;
f663d9ad 1598}
245f6de1 1599#endif
f663d9ad
KT
1600
1601/* Struct representing a group of stores to contiguous memory locations.
1602 These are produced by the second phase (coalescing) and consumed in the
1603 third phase that outputs the widened stores. */
1604
6c1dae73 1605class merged_store_group
f663d9ad 1606{
6c1dae73 1607public:
f663d9ad
KT
1608 unsigned HOST_WIDE_INT start;
1609 unsigned HOST_WIDE_INT width;
a62b3dc5
JJ
1610 unsigned HOST_WIDE_INT bitregion_start;
1611 unsigned HOST_WIDE_INT bitregion_end;
1612 /* The size of the allocated memory for val and mask. */
f663d9ad 1613 unsigned HOST_WIDE_INT buf_size;
a62b3dc5 1614 unsigned HOST_WIDE_INT align_base;
8a91d545 1615 poly_uint64 load_align_base[2];
f663d9ad
KT
1616
1617 unsigned int align;
245f6de1 1618 unsigned int load_align[2];
f663d9ad
KT
1619 unsigned int first_order;
1620 unsigned int last_order;
7f5a3982 1621 bool bit_insertion;
e362a897 1622 bool string_concatenation;
18e0c3d1 1623 bool only_constants;
1b3c9813 1624 bool consecutive;
18e0c3d1 1625 unsigned int first_nonmergeable_order;
629387a6 1626 int lp_nr;
f663d9ad 1627
a62b3dc5 1628 auto_vec<store_immediate_info *> stores;
f663d9ad
KT
1629 /* We record the first and last original statements in the sequence because
1630 we'll need their vuse/vdef and replacement position. It's easier to keep
1631 track of them separately as 'stores' is reordered by apply_stores. */
1632 gimple *last_stmt;
1633 gimple *first_stmt;
1634 unsigned char *val;
a62b3dc5 1635 unsigned char *mask;
f663d9ad
KT
1636
1637 merged_store_group (store_immediate_info *);
1638 ~merged_store_group ();
7f5a3982 1639 bool can_be_merged_into (store_immediate_info *);
f663d9ad
KT
1640 void merge_into (store_immediate_info *);
1641 void merge_overlapping (store_immediate_info *);
1642 bool apply_stores ();
a62b3dc5
JJ
1643private:
1644 void do_merge (store_immediate_info *);
f663d9ad
KT
1645};
1646
1647/* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1648
1649static void
1650dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len)
1651{
1652 if (!fd)
1653 return;
1654
1655 for (unsigned int i = 0; i < len; i++)
c94c3532 1656 fprintf (fd, "%02x ", ptr[i]);
f663d9ad
KT
1657 fprintf (fd, "\n");
1658}
1659
f663d9ad
KT
1660/* Clear out LEN bits starting from bit START in the byte array
1661 PTR. This clears the bits to the *right* from START.
1662 START must be within [0, BITS_PER_UNIT) and counts starting from
1663 the least significant bit. */
1664
1665static void
1666clear_bit_region_be (unsigned char *ptr, unsigned int start,
1667 unsigned int len)
1668{
1669 if (len == 0)
1670 return;
1671 /* Clear len bits to the right of start. */
1672 else if (len <= start + 1)
1673 {
1674 unsigned char mask = (~(~0U << len));
1675 mask = mask << (start + 1U - len);
1676 ptr[0] &= ~mask;
1677 }
1678 else if (start != BITS_PER_UNIT - 1)
1679 {
1680 clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1);
1681 clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1,
1682 len - (start % BITS_PER_UNIT) - 1);
1683 }
1684 else if (start == BITS_PER_UNIT - 1
1685 && len > BITS_PER_UNIT)
1686 {
1687 unsigned int nbytes = len / BITS_PER_UNIT;
a62b3dc5 1688 memset (ptr, 0, nbytes);
f663d9ad
KT
1689 if (len % BITS_PER_UNIT != 0)
1690 clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1,
1691 len % BITS_PER_UNIT);
1692 }
1693 else
1694 gcc_unreachable ();
1695}
1696
1697/* In the byte array PTR clear the bit region starting at bit
1698 START and is LEN bits wide.
1699 For regions spanning multiple bytes do this recursively until we reach
1700 zero LEN or a region contained within a single byte. */
1701
1702static void
1703clear_bit_region (unsigned char *ptr, unsigned int start,
1704 unsigned int len)
1705{
1706 /* Degenerate base case. */
1707 if (len == 0)
1708 return;
1709 else if (start >= BITS_PER_UNIT)
1710 clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len);
1711 /* Second base case. */
1712 else if ((start + len) <= BITS_PER_UNIT)
1713 {
46a61395 1714 unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len);
f663d9ad
KT
1715 mask >>= BITS_PER_UNIT - (start + len);
1716
1717 ptr[0] &= ~mask;
1718
1719 return;
1720 }
1721 /* Clear most significant bits in a byte and proceed with the next byte. */
1722 else if (start != 0)
1723 {
1724 clear_bit_region (ptr, start, BITS_PER_UNIT - start);
1f069ef5 1725 clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start));
f663d9ad
KT
1726 }
1727 /* Whole bytes need to be cleared. */
1728 else if (start == 0 && len > BITS_PER_UNIT)
1729 {
1730 unsigned int nbytes = len / BITS_PER_UNIT;
a848c710
KT
1731 /* We could recurse on each byte but we clear whole bytes, so a simple
1732 memset will do. */
46a61395 1733 memset (ptr, '\0', nbytes);
f663d9ad
KT
1734 /* Clear the remaining sub-byte region if there is one. */
1735 if (len % BITS_PER_UNIT != 0)
1736 clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT);
1737 }
1738 else
1739 gcc_unreachable ();
1740}
1741
1742/* Write BITLEN bits of EXPR to the byte array PTR at
1743 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1744 Return true if the operation succeeded. */
1745
1746static bool
1747encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos,
46a61395 1748 unsigned int total_bytes)
f663d9ad
KT
1749{
1750 unsigned int first_byte = bitpos / BITS_PER_UNIT;
ad1de652
JJ
1751 bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT)
1752 || (bitpos % BITS_PER_UNIT)
f4b31647 1753 || !int_mode_for_size (bitlen, 0).exists ());
3afd514b
JJ
1754 bool empty_ctor_p
1755 = (TREE_CODE (expr) == CONSTRUCTOR
1756 && CONSTRUCTOR_NELTS (expr) == 0
1757 && TYPE_SIZE_UNIT (TREE_TYPE (expr))
1758 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (expr))));
f663d9ad
KT
1759
1760 if (!sub_byte_op_p)
3afd514b
JJ
1761 {
1762 if (first_byte >= total_bytes)
1763 return false;
1764 total_bytes -= first_byte;
1765 if (empty_ctor_p)
1766 {
1767 unsigned HOST_WIDE_INT rhs_bytes
1768 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1769 if (rhs_bytes > total_bytes)
1770 return false;
1771 memset (ptr + first_byte, '\0', rhs_bytes);
1772 return true;
1773 }
1774 return native_encode_expr (expr, ptr + first_byte, total_bytes) != 0;
1775 }
f663d9ad
KT
1776
1777 /* LITTLE-ENDIAN
1778 We are writing a non byte-sized quantity or at a position that is not
1779 at a byte boundary.
1780 |--------|--------|--------| ptr + first_byte
1781 ^ ^
1782 xxx xxxxxxxx xxx< bp>
1783 |______EXPR____|
1784
46a61395 1785 First native_encode_expr EXPR into a temporary buffer and shift each
f663d9ad
KT
1786 byte in the buffer by 'bp' (carrying the bits over as necessary).
1787 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1788 <------bitlen---->< bp>
1789 Then we clear the destination bits:
1790 |---00000|00000000|000-----| ptr + first_byte
1791 <-------bitlen--->< bp>
1792
1793 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1794 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1795
1796 BIG-ENDIAN
1797 We are writing a non byte-sized quantity or at a position that is not
1798 at a byte boundary.
1799 ptr + first_byte |--------|--------|--------|
1800 ^ ^
1801 <bp >xxx xxxxxxxx xxx
1802 |_____EXPR_____|
1803
46a61395 1804 First native_encode_expr EXPR into a temporary buffer and shift each
f663d9ad
KT
1805 byte in the buffer to the right by (carrying the bits over as necessary).
1806 We shift by as much as needed to align the most significant bit of EXPR
1807 with bitpos:
1808 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1809 <---bitlen----> <bp ><-----bitlen----->
1810 Then we clear the destination bits:
1811 ptr + first_byte |-----000||00000000||00000---|
1812 <bp ><-------bitlen----->
1813
1814 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1815 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1816 The awkwardness comes from the fact that bitpos is counted from the
1817 most significant bit of a byte. */
1818
ef1d3b57
RS
1819 /* We must be dealing with fixed-size data at this point, since the
1820 total size is also fixed. */
3afd514b
JJ
1821 unsigned int byte_size;
1822 if (empty_ctor_p)
1823 {
1824 unsigned HOST_WIDE_INT rhs_bytes
1825 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1826 if (rhs_bytes > total_bytes)
1827 return false;
1828 byte_size = rhs_bytes;
1829 }
1830 else
1831 {
1832 fixed_size_mode mode
1833 = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr)));
e362a897
EB
1834 byte_size
1835 = mode == BLKmode
1836 ? tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)))
1837 : GET_MODE_SIZE (mode);
3afd514b 1838 }
f663d9ad 1839 /* Allocate an extra byte so that we have space to shift into. */
3afd514b 1840 byte_size++;
f663d9ad 1841 unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size);
46a61395 1842 memset (tmpbuf, '\0', byte_size);
f663d9ad 1843 /* The store detection code should only have allowed constants that are
3afd514b
JJ
1844 accepted by native_encode_expr or empty ctors. */
1845 if (!empty_ctor_p
1846 && native_encode_expr (expr, tmpbuf, byte_size - 1) == 0)
f663d9ad
KT
1847 gcc_unreachable ();
1848
1849 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1850 bytes to write. This means it can write more than
1851 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1852 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1853 bitlen and zero out the bits that are not relevant as well (that may
1854 contain a sign bit due to sign-extension). */
1855 unsigned int padding
1856 = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1;
ad1de652
JJ
1857 /* On big-endian the padding is at the 'front' so just skip the initial
1858 bytes. */
1859 if (BYTES_BIG_ENDIAN)
1860 tmpbuf += padding;
1861
1862 byte_size -= padding;
1863
1864 if (bitlen % BITS_PER_UNIT != 0)
f663d9ad 1865 {
4b2c06f4 1866 if (BYTES_BIG_ENDIAN)
ad1de652
JJ
1867 clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1,
1868 BITS_PER_UNIT - (bitlen % BITS_PER_UNIT));
1869 else
1870 clear_bit_region (tmpbuf, bitlen,
1871 byte_size * BITS_PER_UNIT - bitlen);
f663d9ad 1872 }
ad1de652
JJ
1873 /* Left shifting relies on the last byte being clear if bitlen is
1874 a multiple of BITS_PER_UNIT, which might not be clear if
1875 there are padding bytes. */
1876 else if (!BYTES_BIG_ENDIAN)
1877 tmpbuf[byte_size - 1] = '\0';
f663d9ad
KT
1878
1879 /* Clear the bit region in PTR where the bits from TMPBUF will be
46a61395 1880 inserted into. */
f663d9ad
KT
1881 if (BYTES_BIG_ENDIAN)
1882 clear_bit_region_be (ptr + first_byte,
1883 BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen);
1884 else
1885 clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen);
1886
1887 int shift_amnt;
1888 int bitlen_mod = bitlen % BITS_PER_UNIT;
1889 int bitpos_mod = bitpos % BITS_PER_UNIT;
1890
1891 bool skip_byte = false;
1892 if (BYTES_BIG_ENDIAN)
1893 {
1894 /* BITPOS and BITLEN are exactly aligned and no shifting
1895 is necessary. */
1896 if (bitpos_mod + bitlen_mod == BITS_PER_UNIT
1897 || (bitpos_mod == 0 && bitlen_mod == 0))
1898 shift_amnt = 0;
1899 /* |. . . . . . . .|
1900 <bp > <blen >.
1901 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1902 of the value until it aligns with 'bp' in the next byte over. */
1903 else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT)
1904 {
1905 shift_amnt = bitlen_mod + bitpos_mod;
1906 skip_byte = bitlen_mod != 0;
1907 }
1908 /* |. . . . . . . .|
1909 <----bp--->
1910 <---blen---->.
1911 Shift the value right within the same byte so it aligns with 'bp'. */
1912 else
1913 shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT;
1914 }
1915 else
1916 shift_amnt = bitpos % BITS_PER_UNIT;
1917
1918 /* Create the shifted version of EXPR. */
1919 if (!BYTES_BIG_ENDIAN)
46a61395 1920 {
8aba425f 1921 shift_bytes_in_array_left (tmpbuf, byte_size, shift_amnt);
46a61395
JJ
1922 if (shift_amnt == 0)
1923 byte_size--;
1924 }
f663d9ad
KT
1925 else
1926 {
1927 gcc_assert (BYTES_BIG_ENDIAN);
1928 shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt);
1929 /* If shifting right forced us to move into the next byte skip the now
1930 empty byte. */
1931 if (skip_byte)
1932 {
1933 tmpbuf++;
1934 byte_size--;
1935 }
1936 }
1937
1938 /* Insert the bits from TMPBUF. */
1939 for (unsigned int i = 0; i < byte_size; i++)
1940 ptr[first_byte + i] |= tmpbuf[i];
1941
1942 return true;
1943}
1944
1945/* Sorting function for store_immediate_info objects.
1946 Sorts them by bitposition. */
1947
1948static int
1949sort_by_bitpos (const void *x, const void *y)
1950{
1951 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1952 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1953
109cca3b 1954 if ((*tmp)->bitpos < (*tmp2)->bitpos)
f663d9ad
KT
1955 return -1;
1956 else if ((*tmp)->bitpos > (*tmp2)->bitpos)
1957 return 1;
109cca3b 1958 else
0f0027d1
KT
1959 /* If they are the same let's use the order which is guaranteed to
1960 be different. */
1961 return (*tmp)->order - (*tmp2)->order;
f663d9ad
KT
1962}
1963
1964/* Sorting function for store_immediate_info objects.
1965 Sorts them by the order field. */
1966
1967static int
1968sort_by_order (const void *x, const void *y)
1969{
1970 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1971 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1972
1973 if ((*tmp)->order < (*tmp2)->order)
1974 return -1;
1975 else if ((*tmp)->order > (*tmp2)->order)
1976 return 1;
1977
1978 gcc_unreachable ();
1979}
1980
1981/* Initialize a merged_store_group object from a store_immediate_info
1982 object. */
1983
1984merged_store_group::merged_store_group (store_immediate_info *info)
1985{
1986 start = info->bitpos;
1987 width = info->bitsize;
a62b3dc5
JJ
1988 bitregion_start = info->bitregion_start;
1989 bitregion_end = info->bitregion_end;
f663d9ad
KT
1990 /* VAL has memory allocated for it in apply_stores once the group
1991 width has been finalized. */
1992 val = NULL;
a62b3dc5 1993 mask = NULL;
e362a897
EB
1994 bit_insertion = info->rhs_code == BIT_INSERT_EXPR;
1995 string_concatenation = info->rhs_code == STRING_CST;
18e0c3d1 1996 only_constants = info->rhs_code == INTEGER_CST;
1b3c9813 1997 consecutive = true;
18e0c3d1 1998 first_nonmergeable_order = ~0U;
629387a6 1999 lp_nr = info->lp_nr;
a62b3dc5
JJ
2000 unsigned HOST_WIDE_INT align_bitpos = 0;
2001 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
2002 &align, &align_bitpos);
2003 align_base = start - align_bitpos;
245f6de1
JJ
2004 for (int i = 0; i < 2; ++i)
2005 {
2006 store_operand_info &op = info->ops[i];
2007 if (op.base_addr == NULL_TREE)
2008 {
2009 load_align[i] = 0;
2010 load_align_base[i] = 0;
2011 }
2012 else
2013 {
2014 get_object_alignment_1 (op.val, &load_align[i], &align_bitpos);
2015 load_align_base[i] = op.bitpos - align_bitpos;
2016 }
2017 }
f663d9ad
KT
2018 stores.create (1);
2019 stores.safe_push (info);
2020 last_stmt = info->stmt;
2021 last_order = info->order;
2022 first_stmt = last_stmt;
2023 first_order = last_order;
2024 buf_size = 0;
2025}
2026
2027merged_store_group::~merged_store_group ()
2028{
2029 if (val)
2030 XDELETEVEC (val);
2031}
2032
7f5a3982
EB
2033/* Return true if the store described by INFO can be merged into the group. */
2034
2035bool
2036merged_store_group::can_be_merged_into (store_immediate_info *info)
2037{
2038 /* Do not merge bswap patterns. */
2039 if (info->rhs_code == LROTATE_EXPR)
2040 return false;
2041
629387a6
EB
2042 if (info->lp_nr != lp_nr)
2043 return false;
2044
7f5a3982
EB
2045 /* The canonical case. */
2046 if (info->rhs_code == stores[0]->rhs_code)
2047 return true;
2048
e362a897 2049 /* BIT_INSERT_EXPR is compatible with INTEGER_CST if no STRING_CST. */
7f5a3982 2050 if (info->rhs_code == BIT_INSERT_EXPR && stores[0]->rhs_code == INTEGER_CST)
e362a897 2051 return !string_concatenation;
7f5a3982
EB
2052
2053 if (stores[0]->rhs_code == BIT_INSERT_EXPR && info->rhs_code == INTEGER_CST)
e362a897 2054 return !string_concatenation;
7f5a3982 2055
ed01d707
EB
2056 /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores, but do it
2057 only for small regions since this can generate a lot of instructions. */
7f5a3982
EB
2058 if (info->rhs_code == MEM_REF
2059 && (stores[0]->rhs_code == INTEGER_CST
2060 || stores[0]->rhs_code == BIT_INSERT_EXPR)
2061 && info->bitregion_start == stores[0]->bitregion_start
ed01d707 2062 && info->bitregion_end == stores[0]->bitregion_end
2815558a 2063 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
e362a897 2064 return !string_concatenation;
7f5a3982
EB
2065
2066 if (stores[0]->rhs_code == MEM_REF
2067 && (info->rhs_code == INTEGER_CST
2068 || info->rhs_code == BIT_INSERT_EXPR)
2069 && info->bitregion_start == stores[0]->bitregion_start
ed01d707 2070 && info->bitregion_end == stores[0]->bitregion_end
2815558a 2071 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
e362a897
EB
2072 return !string_concatenation;
2073
2074 /* STRING_CST is compatible with INTEGER_CST if no BIT_INSERT_EXPR. */
2075 if (info->rhs_code == STRING_CST
2076 && stores[0]->rhs_code == INTEGER_CST
2077 && stores[0]->bitsize == CHAR_BIT)
2078 return !bit_insertion;
2079
2080 if (stores[0]->rhs_code == STRING_CST
2081 && info->rhs_code == INTEGER_CST
2082 && info->bitsize == CHAR_BIT)
2083 return !bit_insertion;
7f5a3982
EB
2084
2085 return false;
2086}
2087
a62b3dc5
JJ
2088/* Helper method for merge_into and merge_overlapping to do
2089 the common part. */
7f5a3982 2090
f663d9ad 2091void
a62b3dc5 2092merged_store_group::do_merge (store_immediate_info *info)
f663d9ad 2093{
a62b3dc5
JJ
2094 bitregion_start = MIN (bitregion_start, info->bitregion_start);
2095 bitregion_end = MAX (bitregion_end, info->bitregion_end);
2096
2097 unsigned int this_align;
2098 unsigned HOST_WIDE_INT align_bitpos = 0;
2099 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
2100 &this_align, &align_bitpos);
2101 if (this_align > align)
2102 {
2103 align = this_align;
2104 align_base = info->bitpos - align_bitpos;
2105 }
245f6de1
JJ
2106 for (int i = 0; i < 2; ++i)
2107 {
2108 store_operand_info &op = info->ops[i];
2109 if (!op.base_addr)
2110 continue;
2111
2112 get_object_alignment_1 (op.val, &this_align, &align_bitpos);
2113 if (this_align > load_align[i])
2114 {
2115 load_align[i] = this_align;
2116 load_align_base[i] = op.bitpos - align_bitpos;
2117 }
2118 }
f663d9ad 2119
f663d9ad
KT
2120 gimple *stmt = info->stmt;
2121 stores.safe_push (info);
2122 if (info->order > last_order)
2123 {
2124 last_order = info->order;
2125 last_stmt = stmt;
2126 }
2127 else if (info->order < first_order)
2128 {
2129 first_order = info->order;
2130 first_stmt = stmt;
2131 }
e362a897 2132
1b3c9813
EB
2133 if (info->bitpos != start + width)
2134 consecutive = false;
2135
e362a897
EB
2136 /* We need to use extraction if there is any bit-field. */
2137 if (info->rhs_code == BIT_INSERT_EXPR)
2138 {
2139 bit_insertion = true;
2140 gcc_assert (!string_concatenation);
2141 }
2142
1b3c9813 2143 /* We want to use concatenation if there is any string. */
e362a897
EB
2144 if (info->rhs_code == STRING_CST)
2145 {
2146 string_concatenation = true;
2147 gcc_assert (!bit_insertion);
2148 }
2149
1b3c9813
EB
2150 /* But we cannot use it if we don't have consecutive stores. */
2151 if (!consecutive)
2152 string_concatenation = false;
2153
18e0c3d1
JJ
2154 if (info->rhs_code != INTEGER_CST)
2155 only_constants = false;
f663d9ad
KT
2156}
2157
a62b3dc5
JJ
2158/* Merge a store recorded by INFO into this merged store.
2159 The store is not overlapping with the existing recorded
2160 stores. */
2161
2162void
2163merged_store_group::merge_into (store_immediate_info *info)
2164{
1b3c9813
EB
2165 do_merge (info);
2166
a62b3dc5
JJ
2167 /* Make sure we're inserting in the position we think we're inserting. */
2168 gcc_assert (info->bitpos >= start + width
2169 && info->bitregion_start <= bitregion_end);
2170
c5679c37 2171 width = info->bitpos + info->bitsize - start;
a62b3dc5
JJ
2172}
2173
f663d9ad
KT
2174/* Merge a store described by INFO into this merged store.
2175 INFO overlaps in some way with the current store (i.e. it's not contiguous
2176 which is handled by merged_store_group::merge_into). */
2177
2178void
2179merged_store_group::merge_overlapping (store_immediate_info *info)
2180{
1b3c9813
EB
2181 do_merge (info);
2182
f663d9ad 2183 /* If the store extends the size of the group, extend the width. */
a62b3dc5 2184 if (info->bitpos + info->bitsize > start + width)
c5679c37 2185 width = info->bitpos + info->bitsize - start;
f663d9ad
KT
2186}
2187
2188/* Go through all the recorded stores in this group in program order and
2189 apply their values to the VAL byte array to create the final merged
2190 value. Return true if the operation succeeded. */
2191
2192bool
2193merged_store_group::apply_stores ()
2194{
e362a897
EB
2195 store_immediate_info *info;
2196 unsigned int i;
2197
a62b3dc5
JJ
2198 /* Make sure we have more than one store in the group, otherwise we cannot
2199 merge anything. */
2200 if (bitregion_start % BITS_PER_UNIT != 0
2201 || bitregion_end % BITS_PER_UNIT != 0
f663d9ad
KT
2202 || stores.length () == 1)
2203 return false;
2204
e362a897
EB
2205 buf_size = (bitregion_end - bitregion_start) / BITS_PER_UNIT;
2206
2207 /* Really do string concatenation for large strings only. */
2208 if (buf_size <= MOVE_MAX)
2209 string_concatenation = false;
2210
c94c3532 2211 /* Create a power-of-2-sized buffer for native_encode_expr. */
e362a897
EB
2212 if (!string_concatenation)
2213 buf_size = 1 << ceil_log2 (buf_size);
2214
a62b3dc5
JJ
2215 val = XNEWVEC (unsigned char, 2 * buf_size);
2216 mask = val + buf_size;
2217 memset (val, 0, buf_size);
2218 memset (mask, ~0U, buf_size);
f663d9ad 2219
e362a897
EB
2220 stores.qsort (sort_by_order);
2221
f663d9ad
KT
2222 FOR_EACH_VEC_ELT (stores, i, info)
2223 {
a62b3dc5 2224 unsigned int pos_in_buffer = info->bitpos - bitregion_start;
c94c3532 2225 tree cst;
245f6de1
JJ
2226 if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE)
2227 cst = info->ops[0].val;
2228 else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE)
2229 cst = info->ops[1].val;
c94c3532
EB
2230 else
2231 cst = NULL_TREE;
245f6de1 2232 bool ret = true;
e362a897
EB
2233 if (cst && info->rhs_code != BIT_INSERT_EXPR)
2234 ret = encode_tree_to_bitpos (cst, val, info->bitsize, pos_in_buffer,
2235 buf_size);
c94c3532
EB
2236 unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT);
2237 if (BYTES_BIG_ENDIAN)
2238 clear_bit_region_be (m, (BITS_PER_UNIT - 1
2239 - (pos_in_buffer % BITS_PER_UNIT)),
2240 info->bitsize);
2241 else
2242 clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize);
245f6de1 2243 if (cst && dump_file && (dump_flags & TDF_DETAILS))
f663d9ad
KT
2244 {
2245 if (ret)
2246 {
c94c3532 2247 fputs ("After writing ", dump_file);
4af78ef8 2248 print_generic_expr (dump_file, cst, TDF_NONE);
f663d9ad 2249 fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC
c94c3532
EB
2250 " at position %d\n", info->bitsize, pos_in_buffer);
2251 fputs (" the merged value contains ", dump_file);
f663d9ad 2252 dump_char_array (dump_file, val, buf_size);
c94c3532
EB
2253 fputs (" the merged mask contains ", dump_file);
2254 dump_char_array (dump_file, mask, buf_size);
2255 if (bit_insertion)
2256 fputs (" bit insertion is required\n", dump_file);
e362a897
EB
2257 if (string_concatenation)
2258 fputs (" string concatenation is required\n", dump_file);
f663d9ad
KT
2259 }
2260 else
2261 fprintf (dump_file, "Failed to merge stores\n");
4b84d9b8 2262 }
f663d9ad
KT
2263 if (!ret)
2264 return false;
2265 }
4b84d9b8 2266 stores.qsort (sort_by_bitpos);
f663d9ad
KT
2267 return true;
2268}
2269
2270/* Structure describing the store chain. */
2271
6c1dae73 2272class imm_store_chain_info
f663d9ad 2273{
6c1dae73 2274public:
50b6d676
AO
2275 /* Doubly-linked list that imposes an order on chain processing.
2276 PNXP (prev's next pointer) points to the head of a list, or to
2277 the next field in the previous chain in the list.
2278 See pass_store_merging::m_stores_head for more rationale. */
2279 imm_store_chain_info *next, **pnxp;
b5926e23 2280 tree base_addr;
a62b3dc5 2281 auto_vec<store_immediate_info *> m_store_info;
f663d9ad
KT
2282 auto_vec<merged_store_group *> m_merged_store_groups;
2283
50b6d676
AO
2284 imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a)
2285 : next (inspt), pnxp (&inspt), base_addr (b_a)
2286 {
2287 inspt = this;
2288 if (next)
2289 {
2290 gcc_checking_assert (pnxp == next->pnxp);
2291 next->pnxp = &next;
2292 }
2293 }
2294 ~imm_store_chain_info ()
2295 {
2296 *pnxp = next;
2297 if (next)
2298 {
2299 gcc_checking_assert (&next == next->pnxp);
2300 next->pnxp = pnxp;
2301 }
2302 }
b5926e23 2303 bool terminate_and_process_chain ();
bd909071
JJ
2304 bool try_coalesce_bswap (merged_store_group *, unsigned int, unsigned int,
2305 unsigned int);
f663d9ad 2306 bool coalesce_immediate_stores ();
b5926e23
RB
2307 bool output_merged_store (merged_store_group *);
2308 bool output_merged_stores ();
f663d9ad
KT
2309};
2310
2311const pass_data pass_data_tree_store_merging = {
2312 GIMPLE_PASS, /* type */
2313 "store-merging", /* name */
2314 OPTGROUP_NONE, /* optinfo_flags */
2315 TV_GIMPLE_STORE_MERGING, /* tv_id */
2316 PROP_ssa, /* properties_required */
2317 0, /* properties_provided */
2318 0, /* properties_destroyed */
2319 0, /* todo_flags_start */
2320 TODO_update_ssa, /* todo_flags_finish */
2321};
2322
2323class pass_store_merging : public gimple_opt_pass
2324{
2325public:
2326 pass_store_merging (gcc::context *ctxt)
faec5f24 2327 : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head ()
f663d9ad
KT
2328 {
2329 }
2330
c94c3532
EB
2331 /* Pass not supported for PDP-endian, nor for insane hosts or
2332 target character sizes where native_{encode,interpret}_expr
a62b3dc5 2333 doesn't work properly. */
f663d9ad
KT
2334 virtual bool
2335 gate (function *)
2336 {
a62b3dc5 2337 return flag_store_merging
c94c3532 2338 && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
a62b3dc5
JJ
2339 && CHAR_BIT == 8
2340 && BITS_PER_UNIT == 8;
f663d9ad
KT
2341 }
2342
2343 virtual unsigned int execute (function *);
2344
2345private:
99b1c316 2346 hash_map<tree_operand_hash, class imm_store_chain_info *> m_stores;
f663d9ad 2347
50b6d676
AO
2348 /* Form a doubly-linked stack of the elements of m_stores, so that
2349 we can iterate over them in a predictable way. Using this order
2350 avoids extraneous differences in the compiler output just because
2351 of tree pointer variations (e.g. different chains end up in
2352 different positions of m_stores, so they are handled in different
2353 orders, so they allocate or release SSA names in different
2354 orders, and when they get reused, subsequent passes end up
2355 getting different SSA names, which may ultimately change
2356 decisions when going out of SSA). */
2357 imm_store_chain_info *m_stores_head;
2358
629387a6
EB
2359 bool process_store (gimple *);
2360 bool terminate_and_process_chain (imm_store_chain_info *);
383ac8dc 2361 bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *);
629387a6 2362 bool terminate_and_process_all_chains ();
f663d9ad
KT
2363}; // class pass_store_merging
2364
2365/* Terminate and process all recorded chains. Return true if any changes
2366 were made. */
2367
2368bool
2369pass_store_merging::terminate_and_process_all_chains ()
2370{
f663d9ad 2371 bool ret = false;
50b6d676 2372 while (m_stores_head)
629387a6 2373 ret |= terminate_and_process_chain (m_stores_head);
b119c055 2374 gcc_assert (m_stores.is_empty ());
f663d9ad
KT
2375 return ret;
2376}
2377
383ac8dc
JJ
2378/* Terminate all chains that are affected by the statement STMT.
2379 CHAIN_INFO is the chain we should ignore from the checks if
629387a6 2380 non-NULL. Return true if any changes were made. */
f663d9ad
KT
2381
2382bool
20770eb8 2383pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
b5926e23 2384 **chain_info,
f663d9ad
KT
2385 gimple *stmt)
2386{
2387 bool ret = false;
2388
2389 /* If the statement doesn't touch memory it can't alias. */
2390 if (!gimple_vuse (stmt))
2391 return false;
2392
9e875fd8 2393 tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
6b412bf6
RB
2394 ao_ref store_lhs_ref;
2395 ao_ref_init (&store_lhs_ref, store_lhs);
383ac8dc 2396 for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
f663d9ad 2397 {
383ac8dc
JJ
2398 next = cur->next;
2399
2400 /* We already checked all the stores in chain_info and terminated the
2401 chain if necessary. Skip it here. */
2402 if (chain_info && *chain_info == cur)
2403 continue;
2404
245f6de1
JJ
2405 store_immediate_info *info;
2406 unsigned int i;
383ac8dc 2407 FOR_EACH_VEC_ELT (cur->m_store_info, i, info)
f663d9ad 2408 {
9e875fd8 2409 tree lhs = gimple_assign_lhs (info->stmt);
6b412bf6
RB
2410 ao_ref lhs_ref;
2411 ao_ref_init (&lhs_ref, lhs);
2412 if (ref_maybe_used_by_stmt_p (stmt, &lhs_ref)
2413 || stmt_may_clobber_ref_p_1 (stmt, &lhs_ref)
2414 || (store_lhs && refs_may_alias_p_1 (&store_lhs_ref,
2415 &lhs_ref, false)))
f663d9ad 2416 {
245f6de1 2417 if (dump_file && (dump_flags & TDF_DETAILS))
f663d9ad 2418 {
245f6de1
JJ
2419 fprintf (dump_file, "stmt causes chain termination:\n");
2420 print_gimple_stmt (dump_file, stmt, 0);
f663d9ad 2421 }
629387a6 2422 ret |= terminate_and_process_chain (cur);
245f6de1 2423 break;
f663d9ad
KT
2424 }
2425 }
2426 }
2427
f663d9ad
KT
2428 return ret;
2429}
2430
2431/* Helper function. Terminate the recorded chain storing to base object
2432 BASE. Return true if the merging and output was successful. The m_stores
2433 entry is removed after the processing in any case. */
2434
2435bool
629387a6 2436pass_store_merging::terminate_and_process_chain (imm_store_chain_info *chain_info)
f663d9ad 2437{
b5926e23
RB
2438 bool ret = chain_info->terminate_and_process_chain ();
2439 m_stores.remove (chain_info->base_addr);
2440 delete chain_info;
f663d9ad
KT
2441 return ret;
2442}
2443
245f6de1 2444/* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
629387a6
EB
2445 may clobber REF. FIRST and LAST must have non-NULL vdef. We want to
2446 be able to sink load of REF across stores between FIRST and LAST, up
2447 to right before LAST. */
245f6de1
JJ
2448
2449bool
2450stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref)
2451{
2452 ao_ref r;
2453 ao_ref_init (&r, ref);
2454 unsigned int count = 0;
2455 tree vop = gimple_vdef (last);
2456 gimple *stmt;
2457
629387a6
EB
2458 /* Return true conservatively if the basic blocks are different. */
2459 if (gimple_bb (first) != gimple_bb (last))
2460 return true;
2461
245f6de1
JJ
2462 do
2463 {
2464 stmt = SSA_NAME_DEF_STMT (vop);
2465 if (stmt_may_clobber_ref_p_1 (stmt, &r))
2466 return true;
4b84d9b8
JJ
2467 if (gimple_store_p (stmt)
2468 && refs_anti_dependent_p (ref, gimple_get_lhs (stmt)))
2469 return true;
245f6de1
JJ
2470 /* Avoid quadratic compile time by bounding the number of checks
2471 we perform. */
2472 if (++count > MAX_STORE_ALIAS_CHECKS)
2473 return true;
2474 vop = gimple_vuse (stmt);
2475 }
2476 while (stmt != first);
629387a6 2477
245f6de1
JJ
2478 return false;
2479}
2480
2481/* Return true if INFO->ops[IDX] is mergeable with the
2482 corresponding loads already in MERGED_STORE group.
2483 BASE_ADDR is the base address of the whole store group. */
2484
2485bool
2486compatible_load_p (merged_store_group *merged_store,
2487 store_immediate_info *info,
2488 tree base_addr, int idx)
2489{
2490 store_immediate_info *infof = merged_store->stores[0];
2491 if (!info->ops[idx].base_addr
8a91d545
RS
2492 || maybe_ne (info->ops[idx].bitpos - infof->ops[idx].bitpos,
2493 info->bitpos - infof->bitpos)
245f6de1
JJ
2494 || !operand_equal_p (info->ops[idx].base_addr,
2495 infof->ops[idx].base_addr, 0))
2496 return false;
2497
2498 store_immediate_info *infol = merged_store->stores.last ();
2499 tree load_vuse = gimple_vuse (info->ops[idx].stmt);
2500 /* In this case all vuses should be the same, e.g.
2501 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2502 or
2503 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2504 and we can emit the coalesced load next to any of those loads. */
2505 if (gimple_vuse (infof->ops[idx].stmt) == load_vuse
2506 && gimple_vuse (infol->ops[idx].stmt) == load_vuse)
2507 return true;
2508
2509 /* Otherwise, at least for now require that the load has the same
2510 vuse as the store. See following examples. */
2511 if (gimple_vuse (info->stmt) != load_vuse)
2512 return false;
2513
2514 if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt)
2515 || (infof != infol
2516 && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt)))
2517 return false;
2518
2519 /* If the load is from the same location as the store, already
2520 the construction of the immediate chain info guarantees no intervening
2521 stores, so no further checks are needed. Example:
2522 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
8a91d545 2523 if (known_eq (info->ops[idx].bitpos, info->bitpos)
245f6de1
JJ
2524 && operand_equal_p (info->ops[idx].base_addr, base_addr, 0))
2525 return true;
2526
2527 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2528 of the stores in the group, or any other stores in between those.
2529 Previous calls to compatible_load_p ensured that for all the
2530 merged_store->stores IDX loads, no stmts starting with
2531 merged_store->first_stmt and ending right before merged_store->last_stmt
2532 clobbers those loads. */
2533 gimple *first = merged_store->first_stmt;
2534 gimple *last = merged_store->last_stmt;
2535 unsigned int i;
2536 store_immediate_info *infoc;
2537 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2538 comes before the so far first load, we'll be changing
2539 merged_store->first_stmt. In that case we need to give up if
2540 any of the earlier processed loads clobber with the stmts in the new
2541 range. */
2542 if (info->order < merged_store->first_order)
2543 {
2544 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2545 if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val))
2546 return false;
2547 first = info->stmt;
2548 }
2549 /* Similarly, we could change merged_store->last_stmt, so ensure
2550 in that case no stmts in the new range clobber any of the earlier
2551 processed loads. */
2552 else if (info->order > merged_store->last_order)
2553 {
2554 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2555 if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val))
2556 return false;
2557 last = info->stmt;
2558 }
2559 /* And finally, we'd be adding a new load to the set, ensure it isn't
2560 clobbered in the new range. */
2561 if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val))
2562 return false;
2563
2564 /* Otherwise, we are looking for:
2565 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2566 or
2567 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2568 return true;
2569}
2570
4b84d9b8
JJ
2571/* Add all refs loaded to compute VAL to REFS vector. */
2572
2573void
2574gather_bswap_load_refs (vec<tree> *refs, tree val)
2575{
2576 if (TREE_CODE (val) != SSA_NAME)
2577 return;
2578
2579 gimple *stmt = SSA_NAME_DEF_STMT (val);
2580 if (!is_gimple_assign (stmt))
2581 return;
2582
2583 if (gimple_assign_load_p (stmt))
2584 {
2585 refs->safe_push (gimple_assign_rhs1 (stmt));
2586 return;
2587 }
2588
2589 switch (gimple_assign_rhs_class (stmt))
2590 {
2591 case GIMPLE_BINARY_RHS:
2592 gather_bswap_load_refs (refs, gimple_assign_rhs2 (stmt));
2593 /* FALLTHRU */
2594 case GIMPLE_UNARY_RHS:
2595 gather_bswap_load_refs (refs, gimple_assign_rhs1 (stmt));
2596 break;
2597 default:
2598 gcc_unreachable ();
2599 }
2600}
2601
c5679c37
JJ
2602/* Check if there are any stores in M_STORE_INFO after index I
2603 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2604 a potential group ending with END that have their order
4d213bf6
JJ
2605 smaller than LAST_ORDER. ALL_INTEGER_CST_P is true if
2606 all the stores already merged and the one under consideration
2607 have rhs_code of INTEGER_CST. Return true if there are no such stores.
c5679c37
JJ
2608 Consider:
2609 MEM[(long long int *)p_28] = 0;
2610 MEM[(long long int *)p_28 + 8B] = 0;
2611 MEM[(long long int *)p_28 + 16B] = 0;
2612 MEM[(long long int *)p_28 + 24B] = 0;
2613 _129 = (int) _130;
2614 MEM[(int *)p_28 + 8B] = _129;
2615 MEM[(int *)p_28].a = -1;
2616 We already have
2617 MEM[(long long int *)p_28] = 0;
2618 MEM[(int *)p_28].a = -1;
2619 stmts in the current group and need to consider if it is safe to
2620 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2621 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2622 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2623 into the group and merging of those 3 stores is successful, merged
2624 stmts will be emitted at the latest store from that group, i.e.
2625 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2626 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2627 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2628 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2629 into the group. That way it will be its own store group and will
4d213bf6 2630 not be touched. If ALL_INTEGER_CST_P and there are overlapping
c5679c37 2631 INTEGER_CST stores, those are mergeable using merge_overlapping,
bd909071
JJ
2632 so don't return false for those.
2633
2634 Similarly, check stores from FIRST_EARLIER (inclusive) to END_EARLIER
2635 (exclusive), whether they don't overlap the bitrange START to END
2636 and have order in between FIRST_ORDER and LAST_ORDER. This is to
2637 prevent merging in cases like:
2638 MEM <char[12]> [&b + 8B] = {};
2639 MEM[(short *) &b] = 5;
2640 _5 = *x_4(D);
2641 MEM <long long unsigned int> [&b + 2B] = _5;
2642 MEM[(char *)&b + 16B] = 88;
2643 MEM[(int *)&b + 20B] = 1;
2644 The = {} store comes in sort_by_bitpos before the = 88 store, and can't
2645 be merged with it, because the = _5 store overlaps these and is in between
2646 them in sort_by_order ordering. If it was merged, the merged store would
2647 go after the = _5 store and thus change behavior. */
c5679c37
JJ
2648
2649static bool
2650check_no_overlap (vec<store_immediate_info *> m_store_info, unsigned int i,
bd909071
JJ
2651 bool all_integer_cst_p, unsigned int first_order,
2652 unsigned int last_order, unsigned HOST_WIDE_INT start,
2653 unsigned HOST_WIDE_INT end, unsigned int first_earlier,
2654 unsigned end_earlier)
c5679c37
JJ
2655{
2656 unsigned int len = m_store_info.length ();
bd909071
JJ
2657 for (unsigned int j = first_earlier; j < end_earlier; j++)
2658 {
2659 store_immediate_info *info = m_store_info[j];
2660 if (info->order > first_order
2661 && info->order < last_order
2662 && info->bitpos + info->bitsize > start)
2663 return false;
2664 }
c5679c37
JJ
2665 for (++i; i < len; ++i)
2666 {
2667 store_immediate_info *info = m_store_info[i];
2668 if (info->bitpos >= end)
2669 break;
2670 if (info->order < last_order
4d213bf6 2671 && (!all_integer_cst_p || info->rhs_code != INTEGER_CST))
c5679c37
JJ
2672 return false;
2673 }
2674 return true;
2675}
2676
4b84d9b8
JJ
2677/* Return true if m_store_info[first] and at least one following store
2678 form a group which store try_size bitsize value which is byte swapped
2679 from a memory load or some value, or identity from some value.
2680 This uses the bswap pass APIs. */
2681
2682bool
2683imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store,
2684 unsigned int first,
bd909071
JJ
2685 unsigned int try_size,
2686 unsigned int first_earlier)
4b84d9b8
JJ
2687{
2688 unsigned int len = m_store_info.length (), last = first;
2689 unsigned HOST_WIDE_INT width = m_store_info[first]->bitsize;
2690 if (width >= try_size)
2691 return false;
2692 for (unsigned int i = first + 1; i < len; ++i)
2693 {
2694 if (m_store_info[i]->bitpos != m_store_info[first]->bitpos + width
cb76fcd7 2695 || m_store_info[i]->lp_nr != merged_store->lp_nr
4b84d9b8
JJ
2696 || m_store_info[i]->ins_stmt == NULL)
2697 return false;
2698 width += m_store_info[i]->bitsize;
2699 if (width >= try_size)
2700 {
2701 last = i;
2702 break;
2703 }
2704 }
2705 if (width != try_size)
2706 return false;
2707
2708 bool allow_unaligned
028d4092 2709 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
4b84d9b8
JJ
2710 /* Punt if the combined store would not be aligned and we need alignment. */
2711 if (!allow_unaligned)
2712 {
2713 unsigned int align = merged_store->align;
2714 unsigned HOST_WIDE_INT align_base = merged_store->align_base;
2715 for (unsigned int i = first + 1; i <= last; ++i)
2716 {
2717 unsigned int this_align;
2718 unsigned HOST_WIDE_INT align_bitpos = 0;
2719 get_object_alignment_1 (gimple_assign_lhs (m_store_info[i]->stmt),
2720 &this_align, &align_bitpos);
2721 if (this_align > align)
2722 {
2723 align = this_align;
2724 align_base = m_store_info[i]->bitpos - align_bitpos;
2725 }
2726 }
2727 unsigned HOST_WIDE_INT align_bitpos
2728 = (m_store_info[first]->bitpos - align_base) & (align - 1);
2729 if (align_bitpos)
2730 align = least_bit_hwi (align_bitpos);
2731 if (align < try_size)
2732 return false;
2733 }
2734
2735 tree type;
2736 switch (try_size)
2737 {
2738 case 16: type = uint16_type_node; break;
2739 case 32: type = uint32_type_node; break;
2740 case 64: type = uint64_type_node; break;
2741 default: gcc_unreachable ();
2742 }
2743 struct symbolic_number n;
2744 gimple *ins_stmt = NULL;
2745 int vuse_store = -1;
2746 unsigned int first_order = merged_store->first_order;
2747 unsigned int last_order = merged_store->last_order;
2748 gimple *first_stmt = merged_store->first_stmt;
2749 gimple *last_stmt = merged_store->last_stmt;
c5679c37 2750 unsigned HOST_WIDE_INT end = merged_store->start + merged_store->width;
4b84d9b8
JJ
2751 store_immediate_info *infof = m_store_info[first];
2752
2753 for (unsigned int i = first; i <= last; ++i)
2754 {
2755 store_immediate_info *info = m_store_info[i];
2756 struct symbolic_number this_n = info->n;
2757 this_n.type = type;
2758 if (!this_n.base_addr)
2759 this_n.range = try_size / BITS_PER_UNIT;
30fa8e9c
JJ
2760 else
2761 /* Update vuse in case it has changed by output_merged_stores. */
2762 this_n.vuse = gimple_vuse (info->ins_stmt);
4b84d9b8
JJ
2763 unsigned int bitpos = info->bitpos - infof->bitpos;
2764 if (!do_shift_rotate (LSHIFT_EXPR, &this_n,
2765 BYTES_BIG_ENDIAN
2766 ? try_size - info->bitsize - bitpos
2767 : bitpos))
2768 return false;
aa11164a 2769 if (this_n.base_addr && vuse_store)
4b84d9b8
JJ
2770 {
2771 unsigned int j;
2772 for (j = first; j <= last; ++j)
2773 if (this_n.vuse == gimple_vuse (m_store_info[j]->stmt))
2774 break;
2775 if (j > last)
2776 {
2777 if (vuse_store == 1)
2778 return false;
2779 vuse_store = 0;
2780 }
2781 }
2782 if (i == first)
2783 {
2784 n = this_n;
2785 ins_stmt = info->ins_stmt;
2786 }
2787 else
2788 {
c5679c37 2789 if (n.base_addr && n.vuse != this_n.vuse)
4b84d9b8 2790 {
c5679c37
JJ
2791 if (vuse_store == 0)
2792 return false;
2793 vuse_store = 1;
4b84d9b8 2794 }
c5679c37
JJ
2795 if (info->order > last_order)
2796 {
2797 last_order = info->order;
2798 last_stmt = info->stmt;
2799 }
2800 else if (info->order < first_order)
2801 {
2802 first_order = info->order;
2803 first_stmt = info->stmt;
2804 }
2805 end = MAX (end, info->bitpos + info->bitsize);
4b84d9b8
JJ
2806
2807 ins_stmt = perform_symbolic_merge (ins_stmt, &n, info->ins_stmt,
2808 &this_n, &n);
2809 if (ins_stmt == NULL)
2810 return false;
2811 }
2812 }
2813
2814 uint64_t cmpxchg, cmpnop;
2815 find_bswap_or_nop_finalize (&n, &cmpxchg, &cmpnop);
2816
2817 /* A complete byte swap should make the symbolic number to start with
2818 the largest digit in the highest order byte. Unchanged symbolic
2819 number indicates a read with same endianness as target architecture. */
2820 if (n.n != cmpnop && n.n != cmpxchg)
2821 return false;
2822
2823 if (n.base_addr == NULL_TREE && !is_gimple_val (n.src))
2824 return false;
2825
bd909071
JJ
2826 if (!check_no_overlap (m_store_info, last, false, first_order, last_order,
2827 merged_store->start, end, first_earlier, first))
c5679c37
JJ
2828 return false;
2829
4b84d9b8
JJ
2830 /* Don't handle memory copy this way if normal non-bswap processing
2831 would handle it too. */
2832 if (n.n == cmpnop && (unsigned) n.n_ops == last - first + 1)
2833 {
2834 unsigned int i;
2835 for (i = first; i <= last; ++i)
2836 if (m_store_info[i]->rhs_code != MEM_REF)
2837 break;
2838 if (i == last + 1)
2839 return false;
2840 }
2841
2842 if (n.n == cmpxchg)
2843 switch (try_size)
2844 {
2845 case 16:
2846 /* Will emit LROTATE_EXPR. */
2847 break;
2848 case 32:
2849 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2850 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
2851 break;
2852 return false;
2853 case 64:
2854 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2855 && optab_handler (bswap_optab, DImode) != CODE_FOR_nothing)
2856 break;
2857 return false;
2858 default:
2859 gcc_unreachable ();
2860 }
2861
2862 if (!allow_unaligned && n.base_addr)
2863 {
2864 unsigned int align = get_object_alignment (n.src);
2865 if (align < try_size)
2866 return false;
2867 }
2868
2869 /* If each load has vuse of the corresponding store, need to verify
2870 the loads can be sunk right before the last store. */
2871 if (vuse_store == 1)
2872 {
2873 auto_vec<tree, 64> refs;
2874 for (unsigned int i = first; i <= last; ++i)
2875 gather_bswap_load_refs (&refs,
2876 gimple_assign_rhs1 (m_store_info[i]->stmt));
2877
2878 unsigned int i;
2879 tree ref;
2880 FOR_EACH_VEC_ELT (refs, i, ref)
2881 if (stmts_may_clobber_ref_p (first_stmt, last_stmt, ref))
2882 return false;
2883 n.vuse = NULL_TREE;
2884 }
2885
2886 infof->n = n;
2887 infof->ins_stmt = ins_stmt;
2888 for (unsigned int i = first; i <= last; ++i)
2889 {
2890 m_store_info[i]->rhs_code = n.n == cmpxchg ? LROTATE_EXPR : NOP_EXPR;
2891 m_store_info[i]->ops[0].base_addr = NULL_TREE;
2892 m_store_info[i]->ops[1].base_addr = NULL_TREE;
2893 if (i != first)
2894 merged_store->merge_into (m_store_info[i]);
2895 }
2896
2897 return true;
2898}
2899
f663d9ad
KT
2900/* Go through the candidate stores recorded in m_store_info and merge them
2901 into merged_store_group objects recorded into m_merged_store_groups
2902 representing the widened stores. Return true if coalescing was successful
2903 and the number of widened stores is fewer than the original number
2904 of stores. */
2905
2906bool
2907imm_store_chain_info::coalesce_immediate_stores ()
2908{
2909 /* Anything less can't be processed. */
2910 if (m_store_info.length () < 2)
2911 return false;
2912
2913 if (dump_file && (dump_flags & TDF_DETAILS))
c94c3532 2914 fprintf (dump_file, "Attempting to coalesce %u stores in chain\n",
f663d9ad
KT
2915 m_store_info.length ());
2916
2917 store_immediate_info *info;
4b84d9b8 2918 unsigned int i, ignore = 0;
bd909071
JJ
2919 unsigned int first_earlier = 0;
2920 unsigned int end_earlier = 0;
f663d9ad
KT
2921
2922 /* Order the stores by the bitposition they write to. */
2923 m_store_info.qsort (sort_by_bitpos);
2924
2925 info = m_store_info[0];
2926 merged_store_group *merged_store = new merged_store_group (info);
c94c3532
EB
2927 if (dump_file && (dump_flags & TDF_DETAILS))
2928 fputs ("New store group\n", dump_file);
f663d9ad
KT
2929
2930 FOR_EACH_VEC_ELT (m_store_info, i, info)
2931 {
3afd514b
JJ
2932 unsigned HOST_WIDE_INT new_bitregion_start, new_bitregion_end;
2933
4b84d9b8 2934 if (i <= ignore)
c94c3532 2935 goto done;
f663d9ad 2936
bd909071
JJ
2937 while (first_earlier < end_earlier
2938 && (m_store_info[first_earlier]->bitpos
2939 + m_store_info[first_earlier]->bitsize
2940 <= merged_store->start))
2941 first_earlier++;
2942
4b84d9b8
JJ
2943 /* First try to handle group of stores like:
2944 p[0] = data >> 24;
2945 p[1] = data >> 16;
2946 p[2] = data >> 8;
2947 p[3] = data;
2948 using the bswap framework. */
2949 if (info->bitpos == merged_store->start + merged_store->width
2950 && merged_store->stores.length () == 1
2951 && merged_store->stores[0]->ins_stmt != NULL
cb76fcd7 2952 && info->lp_nr == merged_store->lp_nr
4b84d9b8
JJ
2953 && info->ins_stmt != NULL)
2954 {
2955 unsigned int try_size;
2956 for (try_size = 64; try_size >= 16; try_size >>= 1)
bd909071
JJ
2957 if (try_coalesce_bswap (merged_store, i - 1, try_size,
2958 first_earlier))
4b84d9b8
JJ
2959 break;
2960
2961 if (try_size >= 16)
2962 {
2963 ignore = i + merged_store->stores.length () - 1;
2964 m_merged_store_groups.safe_push (merged_store);
2965 if (ignore < m_store_info.length ())
bd909071
JJ
2966 {
2967 merged_store = new merged_store_group (m_store_info[ignore]);
2968 end_earlier = ignore;
2969 }
4b84d9b8
JJ
2970 else
2971 merged_store = NULL;
c94c3532 2972 goto done;
4b84d9b8
JJ
2973 }
2974 }
2975
3afd514b
JJ
2976 new_bitregion_start
2977 = MIN (merged_store->bitregion_start, info->bitregion_start);
2978 new_bitregion_end
2979 = MAX (merged_store->bitregion_end, info->bitregion_end);
2980
2981 if (info->order >= merged_store->first_nonmergeable_order
2982 || (((new_bitregion_end - new_bitregion_start + 1) / BITS_PER_UNIT)
028d4092 2983 > (unsigned) param_store_merging_max_size))
18e0c3d1
JJ
2984 ;
2985
f663d9ad
KT
2986 /* |---store 1---|
2987 |---store 2---|
4b84d9b8 2988 Overlapping stores. */
18e0c3d1 2989 else if (IN_RANGE (info->bitpos, merged_store->start,
4d213bf6
JJ
2990 merged_store->start + merged_store->width - 1)
2991 /* |---store 1---||---store 2---|
2992 Handle also the consecutive INTEGER_CST stores case here,
2993 as we have here the code to deal with overlaps. */
2994 || (info->bitregion_start <= merged_store->bitregion_end
2995 && info->rhs_code == INTEGER_CST
2996 && merged_store->only_constants
2997 && merged_store->can_be_merged_into (info)))
f663d9ad 2998 {
245f6de1 2999 /* Only allow overlapping stores of constants. */
629387a6
EB
3000 if (info->rhs_code == INTEGER_CST
3001 && merged_store->only_constants
3002 && info->lp_nr == merged_store->lp_nr)
245f6de1 3003 {
bd909071
JJ
3004 unsigned int first_order
3005 = MIN (merged_store->first_order, info->order);
6cd4c66e
JJ
3006 unsigned int last_order
3007 = MAX (merged_store->last_order, info->order);
3008 unsigned HOST_WIDE_INT end
3009 = MAX (merged_store->start + merged_store->width,
3010 info->bitpos + info->bitsize);
bd909071
JJ
3011 if (check_no_overlap (m_store_info, i, true, first_order,
3012 last_order, merged_store->start, end,
3013 first_earlier, end_earlier))
6cd4c66e
JJ
3014 {
3015 /* check_no_overlap call above made sure there are no
3016 overlapping stores with non-INTEGER_CST rhs_code
3017 in between the first and last of the stores we've
3018 just merged. If there are any INTEGER_CST rhs_code
3019 stores in between, we need to merge_overlapping them
3020 even if in the sort_by_bitpos order there are other
3021 overlapping stores in between. Keep those stores as is.
3022 Example:
3023 MEM[(int *)p_28] = 0;
3024 MEM[(char *)p_28 + 3B] = 1;
3025 MEM[(char *)p_28 + 1B] = 2;
3026 MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B];
3027 We can't merge the zero store with the store of two and
3028 not merge anything else, because the store of one is
3029 in the original order in between those two, but in
3030 store_by_bitpos order it comes after the last store that
3031 we can't merge with them. We can merge the first 3 stores
3032 and keep the last store as is though. */
18e0c3d1
JJ
3033 unsigned int len = m_store_info.length ();
3034 unsigned int try_order = last_order;
3035 unsigned int first_nonmergeable_order;
3036 unsigned int k;
3037 bool last_iter = false;
3038 int attempts = 0;
3039 do
6cd4c66e 3040 {
18e0c3d1 3041 unsigned int max_order = 0;
bd909071 3042 unsigned int min_order = first_order;
18e0c3d1
JJ
3043 unsigned first_nonmergeable_int_order = ~0U;
3044 unsigned HOST_WIDE_INT this_end = end;
3045 k = i;
3046 first_nonmergeable_order = ~0U;
3047 for (unsigned int j = i + 1; j < len; ++j)
6cd4c66e 3048 {
18e0c3d1
JJ
3049 store_immediate_info *info2 = m_store_info[j];
3050 if (info2->bitpos >= this_end)
3051 break;
3052 if (info2->order < try_order)
6cd4c66e 3053 {
4119cd69
JJ
3054 if (info2->rhs_code != INTEGER_CST
3055 || info2->lp_nr != merged_store->lp_nr)
18e0c3d1
JJ
3056 {
3057 /* Normally check_no_overlap makes sure this
3058 doesn't happen, but if end grows below,
3059 then we need to process more stores than
3060 check_no_overlap verified. Example:
3061 MEM[(int *)p_5] = 0;
3062 MEM[(short *)p_5 + 3B] = 1;
3063 MEM[(char *)p_5 + 4B] = _9;
3064 MEM[(char *)p_5 + 2B] = 2; */
3065 k = 0;
3066 break;
3067 }
3068 k = j;
bd909071 3069 min_order = MIN (min_order, info2->order);
18e0c3d1
JJ
3070 this_end = MAX (this_end,
3071 info2->bitpos + info2->bitsize);
6cd4c66e 3072 }
18e0c3d1 3073 else if (info2->rhs_code == INTEGER_CST
4119cd69 3074 && info2->lp_nr == merged_store->lp_nr
18e0c3d1
JJ
3075 && !last_iter)
3076 {
3077 max_order = MAX (max_order, info2->order + 1);
3078 first_nonmergeable_int_order
3079 = MIN (first_nonmergeable_int_order,
3080 info2->order);
3081 }
3082 else
3083 first_nonmergeable_order
3084 = MIN (first_nonmergeable_order, info2->order);
6cd4c66e 3085 }
bd909071
JJ
3086 if (k > i
3087 && !check_no_overlap (m_store_info, len - 1, true,
3088 min_order, try_order,
3089 merged_store->start, this_end,
3090 first_earlier, end_earlier))
3091 k = 0;
18e0c3d1
JJ
3092 if (k == 0)
3093 {
3094 if (last_order == try_order)
3095 break;
3096 /* If this failed, but only because we grew
3097 try_order, retry with the last working one,
3098 so that we merge at least something. */
3099 try_order = last_order;
3100 last_iter = true;
3101 continue;
3102 }
3103 last_order = try_order;
3104 /* Retry with a larger try_order to see if we could
3105 merge some further INTEGER_CST stores. */
3106 if (max_order
3107 && (first_nonmergeable_int_order
3108 < first_nonmergeable_order))
3109 {
3110 try_order = MIN (max_order,
3111 first_nonmergeable_order);
3112 try_order
3113 = MIN (try_order,
3114 merged_store->first_nonmergeable_order);
3115 if (try_order > last_order && ++attempts < 16)
3116 continue;
3117 }
3118 first_nonmergeable_order
3119 = MIN (first_nonmergeable_order,
3120 first_nonmergeable_int_order);
3121 end = this_end;
3122 break;
6cd4c66e 3123 }
18e0c3d1 3124 while (1);
6cd4c66e
JJ
3125
3126 if (k != 0)
3127 {
3128 merged_store->merge_overlapping (info);
3129
18e0c3d1
JJ
3130 merged_store->first_nonmergeable_order
3131 = MIN (merged_store->first_nonmergeable_order,
3132 first_nonmergeable_order);
3133
6cd4c66e
JJ
3134 for (unsigned int j = i + 1; j <= k; j++)
3135 {
3136 store_immediate_info *info2 = m_store_info[j];
3137 gcc_assert (info2->bitpos < end);
3138 if (info2->order < last_order)
3139 {
3140 gcc_assert (info2->rhs_code == INTEGER_CST);
18e0c3d1
JJ
3141 if (info != info2)
3142 merged_store->merge_overlapping (info2);
6cd4c66e
JJ
3143 }
3144 /* Other stores are kept and not merged in any
3145 way. */
3146 }
3147 ignore = k;
3148 goto done;
3149 }
3150 }
245f6de1 3151 }
f663d9ad 3152 }
245f6de1
JJ
3153 /* |---store 1---||---store 2---|
3154 This store is consecutive to the previous one.
3155 Merge it into the current store group. There can be gaps in between
3156 the stores, but there can't be gaps in between bitregions. */
c94c3532 3157 else if (info->bitregion_start <= merged_store->bitregion_end
7f5a3982 3158 && merged_store->can_be_merged_into (info))
f663d9ad 3159 {
245f6de1
JJ
3160 store_immediate_info *infof = merged_store->stores[0];
3161
3162 /* All the rhs_code ops that take 2 operands are commutative,
3163 swap the operands if it could make the operands compatible. */
3164 if (infof->ops[0].base_addr
3165 && infof->ops[1].base_addr
3166 && info->ops[0].base_addr
3167 && info->ops[1].base_addr
8a91d545
RS
3168 && known_eq (info->ops[1].bitpos - infof->ops[0].bitpos,
3169 info->bitpos - infof->bitpos)
245f6de1
JJ
3170 && operand_equal_p (info->ops[1].base_addr,
3171 infof->ops[0].base_addr, 0))
127ef369
JJ
3172 {
3173 std::swap (info->ops[0], info->ops[1]);
3174 info->ops_swapped_p = true;
3175 }
4d213bf6 3176 if (check_no_overlap (m_store_info, i, false,
bd909071 3177 MIN (merged_store->first_order, info->order),
a7fe6482 3178 MAX (merged_store->last_order, info->order),
bd909071 3179 merged_store->start,
a7fe6482 3180 MAX (merged_store->start + merged_store->width,
bd909071
JJ
3181 info->bitpos + info->bitsize),
3182 first_earlier, end_earlier))
245f6de1 3183 {
7f5a3982
EB
3184 /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
3185 if (info->rhs_code == MEM_REF && infof->rhs_code != MEM_REF)
3186 {
3187 info->rhs_code = BIT_INSERT_EXPR;
3188 info->ops[0].val = gimple_assign_rhs1 (info->stmt);
3189 info->ops[0].base_addr = NULL_TREE;
3190 }
3191 else if (infof->rhs_code == MEM_REF && info->rhs_code != MEM_REF)
3192 {
3193 store_immediate_info *infoj;
3194 unsigned int j;
3195 FOR_EACH_VEC_ELT (merged_store->stores, j, infoj)
3196 {
3197 infoj->rhs_code = BIT_INSERT_EXPR;
3198 infoj->ops[0].val = gimple_assign_rhs1 (infoj->stmt);
3199 infoj->ops[0].base_addr = NULL_TREE;
3200 }
e362a897 3201 merged_store->bit_insertion = true;
7f5a3982
EB
3202 }
3203 if ((infof->ops[0].base_addr
3204 ? compatible_load_p (merged_store, info, base_addr, 0)
3205 : !info->ops[0].base_addr)
3206 && (infof->ops[1].base_addr
3207 ? compatible_load_p (merged_store, info, base_addr, 1)
3208 : !info->ops[1].base_addr))
3209 {
3210 merged_store->merge_into (info);
3211 goto done;
3212 }
245f6de1
JJ
3213 }
3214 }
f663d9ad 3215
245f6de1
JJ
3216 /* |---store 1---| <gap> |---store 2---|.
3217 Gap between stores or the rhs not compatible. Start a new group. */
f663d9ad 3218
245f6de1
JJ
3219 /* Try to apply all the stores recorded for the group to determine
3220 the bitpattern they write and discard it if that fails.
3221 This will also reject single-store groups. */
c94c3532 3222 if (merged_store->apply_stores ())
245f6de1 3223 m_merged_store_groups.safe_push (merged_store);
c94c3532
EB
3224 else
3225 delete merged_store;
f663d9ad 3226
245f6de1 3227 merged_store = new merged_store_group (info);
bd909071 3228 end_earlier = i;
c94c3532
EB
3229 if (dump_file && (dump_flags & TDF_DETAILS))
3230 fputs ("New store group\n", dump_file);
3231
3232 done:
3233 if (dump_file && (dump_flags & TDF_DETAILS))
3234 {
3235 fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
3236 " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:",
3237 i, info->bitsize, info->bitpos);
3238 print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt));
3239 fputc ('\n', dump_file);
3240 }
f663d9ad
KT
3241 }
3242
a62b3dc5 3243 /* Record or discard the last store group. */
4b84d9b8
JJ
3244 if (merged_store)
3245 {
c94c3532 3246 if (merged_store->apply_stores ())
4b84d9b8 3247 m_merged_store_groups.safe_push (merged_store);
c94c3532
EB
3248 else
3249 delete merged_store;
4b84d9b8 3250 }
f663d9ad
KT
3251
3252 gcc_assert (m_merged_store_groups.length () <= m_store_info.length ());
c94c3532 3253
f663d9ad
KT
3254 bool success
3255 = !m_merged_store_groups.is_empty ()
3256 && m_merged_store_groups.length () < m_store_info.length ();
3257
3258 if (success && dump_file)
c94c3532 3259 fprintf (dump_file, "Coalescing successful!\nMerged into %u stores\n",
a62b3dc5 3260 m_merged_store_groups.length ());
f663d9ad
KT
3261
3262 return success;
3263}
3264
245f6de1
JJ
3265/* Return the type to use for the merged stores or loads described by STMTS.
3266 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
3267 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
3268 of the MEM_REFs if any. */
f663d9ad
KT
3269
3270static tree
245f6de1
JJ
3271get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load,
3272 unsigned short *cliquep, unsigned short *basep)
f663d9ad
KT
3273{
3274 gimple *stmt;
3275 unsigned int i;
245f6de1
JJ
3276 tree type = NULL_TREE;
3277 tree ret = NULL_TREE;
3278 *cliquep = 0;
3279 *basep = 0;
f663d9ad
KT
3280
3281 FOR_EACH_VEC_ELT (stmts, i, stmt)
3282 {
245f6de1
JJ
3283 tree ref = is_load ? gimple_assign_rhs1 (stmt)
3284 : gimple_assign_lhs (stmt);
3285 tree type1 = reference_alias_ptr_type (ref);
3286 tree base = get_base_address (ref);
f663d9ad 3287
245f6de1
JJ
3288 if (i == 0)
3289 {
3290 if (TREE_CODE (base) == MEM_REF)
3291 {
3292 *cliquep = MR_DEPENDENCE_CLIQUE (base);
3293 *basep = MR_DEPENDENCE_BASE (base);
3294 }
3295 ret = type = type1;
3296 continue;
3297 }
f663d9ad 3298 if (!alias_ptr_types_compatible_p (type, type1))
245f6de1
JJ
3299 ret = ptr_type_node;
3300 if (TREE_CODE (base) != MEM_REF
3301 || *cliquep != MR_DEPENDENCE_CLIQUE (base)
3302 || *basep != MR_DEPENDENCE_BASE (base))
3303 {
3304 *cliquep = 0;
3305 *basep = 0;
3306 }
f663d9ad 3307 }
245f6de1 3308 return ret;
f663d9ad
KT
3309}
3310
3311/* Return the location_t information we can find among the statements
3312 in STMTS. */
3313
3314static location_t
245f6de1 3315get_location_for_stmts (vec<gimple *> &stmts)
f663d9ad
KT
3316{
3317 gimple *stmt;
3318 unsigned int i;
3319
3320 FOR_EACH_VEC_ELT (stmts, i, stmt)
3321 if (gimple_has_location (stmt))
3322 return gimple_location (stmt);
3323
3324 return UNKNOWN_LOCATION;
3325}
3326
3327/* Used to decribe a store resulting from splitting a wide store in smaller
3328 regularly-sized stores in split_group. */
3329
6c1dae73 3330class split_store
f663d9ad 3331{
6c1dae73 3332public:
f663d9ad
KT
3333 unsigned HOST_WIDE_INT bytepos;
3334 unsigned HOST_WIDE_INT size;
3335 unsigned HOST_WIDE_INT align;
245f6de1 3336 auto_vec<store_immediate_info *> orig_stores;
a62b3dc5
JJ
3337 /* True if there is a single orig stmt covering the whole split store. */
3338 bool orig;
f663d9ad
KT
3339 split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
3340 unsigned HOST_WIDE_INT);
3341};
3342
3343/* Simple constructor. */
3344
3345split_store::split_store (unsigned HOST_WIDE_INT bp,
3346 unsigned HOST_WIDE_INT sz,
3347 unsigned HOST_WIDE_INT al)
a62b3dc5 3348 : bytepos (bp), size (sz), align (al), orig (false)
f663d9ad 3349{
245f6de1 3350 orig_stores.create (0);
f663d9ad
KT
3351}
3352
245f6de1
JJ
3353/* Record all stores in GROUP that write to the region starting at BITPOS and
3354 is of size BITSIZE. Record infos for such statements in STORES if
3355 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
5384a802
JJ
3356 if there is exactly one original store in the range (in that case ignore
3357 clobber stmts, unless there are only clobber stmts). */
f663d9ad 3358
a62b3dc5 3359static store_immediate_info *
99b1c316 3360find_constituent_stores (class merged_store_group *group,
245f6de1
JJ
3361 vec<store_immediate_info *> *stores,
3362 unsigned int *first,
3363 unsigned HOST_WIDE_INT bitpos,
3364 unsigned HOST_WIDE_INT bitsize)
f663d9ad 3365{
a62b3dc5 3366 store_immediate_info *info, *ret = NULL;
f663d9ad 3367 unsigned int i;
a62b3dc5
JJ
3368 bool second = false;
3369 bool update_first = true;
f663d9ad 3370 unsigned HOST_WIDE_INT end = bitpos + bitsize;
a62b3dc5 3371 for (i = *first; group->stores.iterate (i, &info); ++i)
f663d9ad
KT
3372 {
3373 unsigned HOST_WIDE_INT stmt_start = info->bitpos;
3374 unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize;
a62b3dc5
JJ
3375 if (stmt_end <= bitpos)
3376 {
3377 /* BITPOS passed to this function never decreases from within the
3378 same split_group call, so optimize and don't scan info records
3379 which are known to end before or at BITPOS next time.
3380 Only do it if all stores before this one also pass this. */
3381 if (update_first)
3382 *first = i + 1;
3383 continue;
3384 }
3385 else
3386 update_first = false;
3387
f663d9ad 3388 /* The stores in GROUP are ordered by bitposition so if we're past
a62b3dc5
JJ
3389 the region for this group return early. */
3390 if (stmt_start >= end)
3391 return ret;
3392
5384a802
JJ
3393 if (gimple_clobber_p (info->stmt))
3394 {
3395 if (stores)
3396 stores->safe_push (info);
3397 if (ret == NULL)
3398 ret = info;
3399 continue;
3400 }
245f6de1 3401 if (stores)
a62b3dc5 3402 {
245f6de1 3403 stores->safe_push (info);
5384a802 3404 if (ret && !gimple_clobber_p (ret->stmt))
a62b3dc5
JJ
3405 {
3406 ret = NULL;
3407 second = true;
3408 }
3409 }
5384a802 3410 else if (ret && !gimple_clobber_p (ret->stmt))
a62b3dc5
JJ
3411 return NULL;
3412 if (!second)
3413 ret = info;
f663d9ad 3414 }
a62b3dc5 3415 return ret;
f663d9ad
KT
3416}
3417
d7a9512e
JJ
3418/* Return how many SSA_NAMEs used to compute value to store in the INFO
3419 store have multiple uses. If any SSA_NAME has multiple uses, also
3420 count statements needed to compute it. */
3421
3422static unsigned
3423count_multiple_uses (store_immediate_info *info)
3424{
3425 gimple *stmt = info->stmt;
3426 unsigned ret = 0;
3427 switch (info->rhs_code)
3428 {
3429 case INTEGER_CST:
e362a897 3430 case STRING_CST:
d7a9512e
JJ
3431 return 0;
3432 case BIT_AND_EXPR:
3433 case BIT_IOR_EXPR:
3434 case BIT_XOR_EXPR:
d60edaba
JJ
3435 if (info->bit_not_p)
3436 {
3437 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3438 ret = 1; /* Fall through below to return
3439 the BIT_NOT_EXPR stmt and then
3440 BIT_{AND,IOR,XOR}_EXPR and anything it
3441 uses. */
3442 else
3443 /* stmt is after this the BIT_NOT_EXPR. */
3444 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3445 }
d7a9512e
JJ
3446 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3447 {
3448 ret += 1 + info->ops[0].bit_not_p;
3449 if (info->ops[1].base_addr)
3450 ret += 1 + info->ops[1].bit_not_p;
3451 return ret + 1;
3452 }
3453 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3454 /* stmt is now the BIT_*_EXPR. */
3455 if (!has_single_use (gimple_assign_rhs1 (stmt)))
127ef369
JJ
3456 ret += 1 + info->ops[info->ops_swapped_p].bit_not_p;
3457 else if (info->ops[info->ops_swapped_p].bit_not_p)
d7a9512e
JJ
3458 {
3459 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3460 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3461 ++ret;
3462 }
3463 if (info->ops[1].base_addr == NULL_TREE)
127ef369
JJ
3464 {
3465 gcc_checking_assert (!info->ops_swapped_p);
3466 return ret;
3467 }
d7a9512e 3468 if (!has_single_use (gimple_assign_rhs2 (stmt)))
127ef369
JJ
3469 ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p;
3470 else if (info->ops[1 - info->ops_swapped_p].bit_not_p)
d7a9512e
JJ
3471 {
3472 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
3473 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3474 ++ret;
3475 }
3476 return ret;
3477 case MEM_REF:
3478 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3479 return 1 + info->ops[0].bit_not_p;
3480 else if (info->ops[0].bit_not_p)
3481 {
3482 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3483 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3484 return 1;
3485 }
3486 return 0;
c94c3532
EB
3487 case BIT_INSERT_EXPR:
3488 return has_single_use (gimple_assign_rhs1 (stmt)) ? 0 : 1;
d7a9512e
JJ
3489 default:
3490 gcc_unreachable ();
3491 }
3492}
3493
f663d9ad 3494/* Split a merged store described by GROUP by populating the SPLIT_STORES
a62b3dc5
JJ
3495 vector (if non-NULL) with split_store structs describing the byte offset
3496 (from the base), the bit size and alignment of each store as well as the
3497 original statements involved in each such split group.
f663d9ad
KT
3498 This is to separate the splitting strategy from the statement
3499 building/emission/linking done in output_merged_store.
a62b3dc5 3500 Return number of new stores.
245f6de1
JJ
3501 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
3502 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
3afd514b
JJ
3503 BZERO_FIRST may be true only when the first store covers the whole group
3504 and clears it; if BZERO_FIRST is true, keep that first store in the set
3505 unmodified and emit further stores for the overrides only.
a62b3dc5
JJ
3506 If SPLIT_STORES is NULL, it is just a dry run to count number of
3507 new stores. */
f663d9ad 3508
a62b3dc5 3509static unsigned int
245f6de1 3510split_group (merged_store_group *group, bool allow_unaligned_store,
3afd514b 3511 bool allow_unaligned_load, bool bzero_first,
99b1c316 3512 vec<split_store *> *split_stores,
d7a9512e
JJ
3513 unsigned *total_orig,
3514 unsigned *total_new)
f663d9ad 3515{
a62b3dc5
JJ
3516 unsigned HOST_WIDE_INT pos = group->bitregion_start;
3517 unsigned HOST_WIDE_INT size = group->bitregion_end - pos;
f663d9ad 3518 unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT;
a62b3dc5
JJ
3519 unsigned HOST_WIDE_INT group_align = group->align;
3520 unsigned HOST_WIDE_INT align_base = group->align_base;
245f6de1 3521 unsigned HOST_WIDE_INT group_load_align = group_align;
d7a9512e 3522 bool any_orig = false;
f663d9ad 3523
f663d9ad
KT
3524 gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0));
3525
e362a897
EB
3526 /* For bswap framework using sets of stores, all the checking has been done
3527 earlier in try_coalesce_bswap and the result always needs to be emitted
3528 as a single store. Likewise for string concatenation, */
4b84d9b8 3529 if (group->stores[0]->rhs_code == LROTATE_EXPR
e362a897
EB
3530 || group->stores[0]->rhs_code == NOP_EXPR
3531 || group->string_concatenation)
4b84d9b8 3532 {
3afd514b 3533 gcc_assert (!bzero_first);
4b84d9b8
JJ
3534 if (total_orig)
3535 {
3536 /* Avoid the old/new stmt count heuristics. It should be
3537 always beneficial. */
3538 total_new[0] = 1;
3539 total_orig[0] = 2;
3540 }
3541
3542 if (split_stores)
3543 {
3544 unsigned HOST_WIDE_INT align_bitpos
3545 = (group->start - align_base) & (group_align - 1);
3546 unsigned HOST_WIDE_INT align = group_align;
3547 if (align_bitpos)
3548 align = least_bit_hwi (align_bitpos);
3549 bytepos = group->start / BITS_PER_UNIT;
99b1c316 3550 split_store *store
4b84d9b8
JJ
3551 = new split_store (bytepos, group->width, align);
3552 unsigned int first = 0;
3553 find_constituent_stores (group, &store->orig_stores,
3554 &first, group->start, group->width);
3555 split_stores->safe_push (store);
3556 }
3557
3558 return 1;
3559 }
3560
a62b3dc5 3561 unsigned int ret = 0, first = 0;
f663d9ad 3562 unsigned HOST_WIDE_INT try_pos = bytepos;
f663d9ad 3563
d7a9512e
JJ
3564 if (total_orig)
3565 {
3566 unsigned int i;
3567 store_immediate_info *info = group->stores[0];
3568
3569 total_new[0] = 0;
3570 total_orig[0] = 1; /* The orig store. */
3571 info = group->stores[0];
3572 if (info->ops[0].base_addr)
a6fbd154 3573 total_orig[0]++;
d7a9512e 3574 if (info->ops[1].base_addr)
a6fbd154 3575 total_orig[0]++;
d7a9512e
JJ
3576 switch (info->rhs_code)
3577 {
3578 case BIT_AND_EXPR:
3579 case BIT_IOR_EXPR:
3580 case BIT_XOR_EXPR:
3581 total_orig[0]++; /* The orig BIT_*_EXPR stmt. */
3582 break;
3583 default:
3584 break;
3585 }
3586 total_orig[0] *= group->stores.length ();
3587
3588 FOR_EACH_VEC_ELT (group->stores, i, info)
a6fbd154
JJ
3589 {
3590 total_new[0] += count_multiple_uses (info);
3591 total_orig[0] += (info->bit_not_p
3592 + info->ops[0].bit_not_p
3593 + info->ops[1].bit_not_p);
3594 }
d7a9512e
JJ
3595 }
3596
245f6de1
JJ
3597 if (!allow_unaligned_load)
3598 for (int i = 0; i < 2; ++i)
3599 if (group->load_align[i])
3600 group_load_align = MIN (group_load_align, group->load_align[i]);
3601
3afd514b
JJ
3602 if (bzero_first)
3603 {
5384a802
JJ
3604 store_immediate_info *gstore;
3605 FOR_EACH_VEC_ELT (group->stores, first, gstore)
3606 if (!gimple_clobber_p (gstore->stmt))
3607 break;
3608 ++first;
3afd514b
JJ
3609 ret = 1;
3610 if (split_stores)
3611 {
99b1c316 3612 split_store *store
5384a802
JJ
3613 = new split_store (bytepos, gstore->bitsize, align_base);
3614 store->orig_stores.safe_push (gstore);
3afd514b
JJ
3615 store->orig = true;
3616 any_orig = true;
3617 split_stores->safe_push (store);
3618 }
3619 }
3620
f663d9ad
KT
3621 while (size > 0)
3622 {
245f6de1 3623 if ((allow_unaligned_store || group_align <= BITS_PER_UNIT)
3afd514b
JJ
3624 && (group->mask[try_pos - bytepos] == (unsigned char) ~0U
3625 || (bzero_first && group->val[try_pos - bytepos] == 0)))
a62b3dc5
JJ
3626 {
3627 /* Skip padding bytes. */
3628 ++try_pos;
3629 size -= BITS_PER_UNIT;
3630 continue;
3631 }
3632
f663d9ad 3633 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
a62b3dc5
JJ
3634 unsigned int try_size = MAX_STORE_BITSIZE, nonmasked;
3635 unsigned HOST_WIDE_INT align_bitpos
3636 = (try_bitpos - align_base) & (group_align - 1);
3637 unsigned HOST_WIDE_INT align = group_align;
5384a802 3638 bool found_orig = false;
a62b3dc5
JJ
3639 if (align_bitpos)
3640 align = least_bit_hwi (align_bitpos);
245f6de1 3641 if (!allow_unaligned_store)
a62b3dc5 3642 try_size = MIN (try_size, align);
245f6de1
JJ
3643 if (!allow_unaligned_load)
3644 {
3645 /* If we can't do or don't want to do unaligned stores
3646 as well as loads, we need to take the loads into account
3647 as well. */
3648 unsigned HOST_WIDE_INT load_align = group_load_align;
3649 align_bitpos = (try_bitpos - align_base) & (load_align - 1);
3650 if (align_bitpos)
3651 load_align = least_bit_hwi (align_bitpos);
3652 for (int i = 0; i < 2; ++i)
3653 if (group->load_align[i])
3654 {
8a91d545
RS
3655 align_bitpos
3656 = known_alignment (try_bitpos
3657 - group->stores[0]->bitpos
3658 + group->stores[0]->ops[i].bitpos
3659 - group->load_align_base[i]);
3660 if (align_bitpos & (group_load_align - 1))
245f6de1
JJ
3661 {
3662 unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos);
3663 load_align = MIN (load_align, a);
3664 }
3665 }
3666 try_size = MIN (try_size, load_align);
3667 }
a62b3dc5 3668 store_immediate_info *info
245f6de1 3669 = find_constituent_stores (group, NULL, &first, try_bitpos, try_size);
5384a802 3670 if (info && !gimple_clobber_p (info->stmt))
a62b3dc5
JJ
3671 {
3672 /* If there is just one original statement for the range, see if
3673 we can just reuse the original store which could be even larger
3674 than try_size. */
3675 unsigned HOST_WIDE_INT stmt_end
3676 = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT);
245f6de1
JJ
3677 info = find_constituent_stores (group, NULL, &first, try_bitpos,
3678 stmt_end - try_bitpos);
a62b3dc5
JJ
3679 if (info && info->bitpos >= try_bitpos)
3680 {
5384a802
JJ
3681 store_immediate_info *info2 = NULL;
3682 unsigned int first_copy = first;
3683 if (info->bitpos > try_bitpos
3684 && stmt_end - try_bitpos <= try_size)
3685 {
3686 info2 = find_constituent_stores (group, NULL, &first_copy,
3687 try_bitpos,
3688 info->bitpos - try_bitpos);
3689 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3690 }
3691 if (info2 == NULL && stmt_end - try_bitpos < try_size)
3692 {
3693 info2 = find_constituent_stores (group, NULL, &first_copy,
3694 stmt_end,
3695 (try_bitpos + try_size)
3696 - stmt_end);
3697 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3698 }
3699 if (info2 == NULL)
3700 {
3701 try_size = stmt_end - try_bitpos;
3702 found_orig = true;
3703 goto found;
3704 }
a62b3dc5
JJ
3705 }
3706 }
f663d9ad 3707
a62b3dc5
JJ
3708 /* Approximate store bitsize for the case when there are no padding
3709 bits. */
3710 while (try_size > size)
3711 try_size /= 2;
3712 /* Now look for whole padding bytes at the end of that bitsize. */
3713 for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked)
3714 if (group->mask[try_pos - bytepos + nonmasked - 1]
3afd514b
JJ
3715 != (unsigned char) ~0U
3716 && (!bzero_first
3717 || group->val[try_pos - bytepos + nonmasked - 1] != 0))
a62b3dc5 3718 break;
5384a802 3719 if (nonmasked == 0 || (info && gimple_clobber_p (info->stmt)))
a62b3dc5
JJ
3720 {
3721 /* If entire try_size range is padding, skip it. */
3722 try_pos += try_size / BITS_PER_UNIT;
3723 size -= try_size;
3724 continue;
3725 }
3726 /* Otherwise try to decrease try_size if second half, last 3 quarters
3727 etc. are padding. */
3728 nonmasked *= BITS_PER_UNIT;
3729 while (nonmasked <= try_size / 2)
3730 try_size /= 2;
245f6de1 3731 if (!allow_unaligned_store && group_align > BITS_PER_UNIT)
a62b3dc5
JJ
3732 {
3733 /* Now look for whole padding bytes at the start of that bitsize. */
3734 unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked;
3735 for (masked = 0; masked < try_bytesize; ++masked)
3afd514b
JJ
3736 if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U
3737 && (!bzero_first
3738 || group->val[try_pos - bytepos + masked] != 0))
a62b3dc5
JJ
3739 break;
3740 masked *= BITS_PER_UNIT;
3741 gcc_assert (masked < try_size);
3742 if (masked >= try_size / 2)
3743 {
3744 while (masked >= try_size / 2)
3745 {
3746 try_size /= 2;
3747 try_pos += try_size / BITS_PER_UNIT;
3748 size -= try_size;
3749 masked -= try_size;
3750 }
3751 /* Need to recompute the alignment, so just retry at the new
3752 position. */
3753 continue;
3754 }
3755 }
3756
3757 found:
3758 ++ret;
f663d9ad 3759
a62b3dc5
JJ
3760 if (split_stores)
3761 {
99b1c316 3762 split_store *store
a62b3dc5 3763 = new split_store (try_pos, try_size, align);
245f6de1
JJ
3764 info = find_constituent_stores (group, &store->orig_stores,
3765 &first, try_bitpos, try_size);
a62b3dc5 3766 if (info
5384a802 3767 && !gimple_clobber_p (info->stmt)
a62b3dc5 3768 && info->bitpos >= try_bitpos
5384a802
JJ
3769 && info->bitpos + info->bitsize <= try_bitpos + try_size
3770 && (store->orig_stores.length () == 1
3771 || found_orig
3772 || (info->bitpos == try_bitpos
3773 && (info->bitpos + info->bitsize
3774 == try_bitpos + try_size))))
d7a9512e
JJ
3775 {
3776 store->orig = true;
3777 any_orig = true;
3778 }
a62b3dc5
JJ
3779 split_stores->safe_push (store);
3780 }
3781
3782 try_pos += try_size / BITS_PER_UNIT;
f663d9ad 3783 size -= try_size;
f663d9ad 3784 }
a62b3dc5 3785
d7a9512e
JJ
3786 if (total_orig)
3787 {
a6fbd154 3788 unsigned int i;
99b1c316 3789 split_store *store;
d7a9512e
JJ
3790 /* If we are reusing some original stores and any of the
3791 original SSA_NAMEs had multiple uses, we need to subtract
3792 those now before we add the new ones. */
3793 if (total_new[0] && any_orig)
3794 {
d7a9512e
JJ
3795 FOR_EACH_VEC_ELT (*split_stores, i, store)
3796 if (store->orig)
3797 total_new[0] -= count_multiple_uses (store->orig_stores[0]);
3798 }
3799 total_new[0] += ret; /* The new store. */
3800 store_immediate_info *info = group->stores[0];
3801 if (info->ops[0].base_addr)
a6fbd154 3802 total_new[0] += ret;
d7a9512e 3803 if (info->ops[1].base_addr)
a6fbd154 3804 total_new[0] += ret;
d7a9512e
JJ
3805 switch (info->rhs_code)
3806 {
3807 case BIT_AND_EXPR:
3808 case BIT_IOR_EXPR:
3809 case BIT_XOR_EXPR:
3810 total_new[0] += ret; /* The new BIT_*_EXPR stmt. */
3811 break;
3812 default:
3813 break;
3814 }
a6fbd154
JJ
3815 FOR_EACH_VEC_ELT (*split_stores, i, store)
3816 {
3817 unsigned int j;
3818 bool bit_not_p[3] = { false, false, false };
3819 /* If all orig_stores have certain bit_not_p set, then
3820 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3821 If some orig_stores have certain bit_not_p set, then
3822 we'd use a BIT_XOR_EXPR with a mask and need to account for
3823 it. */
3824 FOR_EACH_VEC_ELT (store->orig_stores, j, info)
3825 {
3826 if (info->ops[0].bit_not_p)
3827 bit_not_p[0] = true;
3828 if (info->ops[1].bit_not_p)
3829 bit_not_p[1] = true;
3830 if (info->bit_not_p)
3831 bit_not_p[2] = true;
3832 }
3833 total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2];
3834 }
3835
d7a9512e
JJ
3836 }
3837
a62b3dc5 3838 return ret;
f663d9ad
KT
3839}
3840
a6fbd154
JJ
3841/* Return the operation through which the operand IDX (if < 2) or
3842 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3843 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3844 the bits should be xored with mask. */
3845
3846static enum tree_code
3847invert_op (split_store *split_store, int idx, tree int_type, tree &mask)
3848{
3849 unsigned int i;
3850 store_immediate_info *info;
3851 unsigned int cnt = 0;
e215422f 3852 bool any_paddings = false;
a6fbd154
JJ
3853 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3854 {
3855 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3856 if (bit_not_p)
e215422f
JJ
3857 {
3858 ++cnt;
3859 tree lhs = gimple_assign_lhs (info->stmt);
3860 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3861 && TYPE_PRECISION (TREE_TYPE (lhs)) < info->bitsize)
3862 any_paddings = true;
3863 }
a6fbd154
JJ
3864 }
3865 mask = NULL_TREE;
3866 if (cnt == 0)
3867 return NOP_EXPR;
e215422f 3868 if (cnt == split_store->orig_stores.length () && !any_paddings)
a6fbd154
JJ
3869 return BIT_NOT_EXPR;
3870
3871 unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT;
3872 unsigned buf_size = split_store->size / BITS_PER_UNIT;
3873 unsigned char *buf
3874 = XALLOCAVEC (unsigned char, buf_size);
3875 memset (buf, ~0U, buf_size);
3876 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3877 {
3878 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3879 if (!bit_not_p)
3880 continue;
3881 /* Clear regions with bit_not_p and invert afterwards, rather than
3882 clear regions with !bit_not_p, so that gaps in between stores aren't
3883 set in the mask. */
3884 unsigned HOST_WIDE_INT bitsize = info->bitsize;
e215422f 3885 unsigned HOST_WIDE_INT prec = bitsize;
a6fbd154 3886 unsigned int pos_in_buffer = 0;
e215422f
JJ
3887 if (any_paddings)
3888 {
3889 tree lhs = gimple_assign_lhs (info->stmt);
3890 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3891 && TYPE_PRECISION (TREE_TYPE (lhs)) < bitsize)
3892 prec = TYPE_PRECISION (TREE_TYPE (lhs));
3893 }
a6fbd154
JJ
3894 if (info->bitpos < try_bitpos)
3895 {
3896 gcc_assert (info->bitpos + bitsize > try_bitpos);
e215422f
JJ
3897 if (!BYTES_BIG_ENDIAN)
3898 {
3899 if (prec <= try_bitpos - info->bitpos)
3900 continue;
3901 prec -= try_bitpos - info->bitpos;
3902 }
3903 bitsize -= try_bitpos - info->bitpos;
3904 if (BYTES_BIG_ENDIAN && prec > bitsize)
3905 prec = bitsize;
a6fbd154
JJ
3906 }
3907 else
3908 pos_in_buffer = info->bitpos - try_bitpos;
e215422f
JJ
3909 if (prec < bitsize)
3910 {
3911 /* If this is a bool inversion, invert just the least significant
3912 prec bits rather than all bits of it. */
3913 if (BYTES_BIG_ENDIAN)
3914 {
3915 pos_in_buffer += bitsize - prec;
3916 if (pos_in_buffer >= split_store->size)
3917 continue;
3918 }
3919 bitsize = prec;
3920 }
a6fbd154
JJ
3921 if (pos_in_buffer + bitsize > split_store->size)
3922 bitsize = split_store->size - pos_in_buffer;
3923 unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT);
3924 if (BYTES_BIG_ENDIAN)
3925 clear_bit_region_be (p, (BITS_PER_UNIT - 1
3926 - (pos_in_buffer % BITS_PER_UNIT)), bitsize);
3927 else
3928 clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize);
3929 }
3930 for (unsigned int i = 0; i < buf_size; ++i)
3931 buf[i] = ~buf[i];
3932 mask = native_interpret_expr (int_type, buf, buf_size);
3933 return BIT_XOR_EXPR;
3934}
3935
f663d9ad
KT
3936/* Given a merged store group GROUP output the widened version of it.
3937 The store chain is against the base object BASE.
3938 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3939 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3940 Make sure that the number of statements output is less than the number of
3941 original statements. If a better sequence is possible emit it and
3942 return true. */
3943
3944bool
b5926e23 3945imm_store_chain_info::output_merged_store (merged_store_group *group)
f663d9ad 3946{
e362a897 3947 const unsigned HOST_WIDE_INT start_byte_pos
a62b3dc5 3948 = group->bitregion_start / BITS_PER_UNIT;
f663d9ad
KT
3949 unsigned int orig_num_stmts = group->stores.length ();
3950 if (orig_num_stmts < 2)
3951 return false;
3952
245f6de1 3953 bool allow_unaligned_store
028d4092 3954 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
245f6de1 3955 bool allow_unaligned_load = allow_unaligned_store;
3afd514b 3956 bool bzero_first = false;
5384a802
JJ
3957 store_immediate_info *store;
3958 unsigned int num_clobber_stmts = 0;
3959 if (group->stores[0]->rhs_code == INTEGER_CST)
3960 {
e362a897 3961 unsigned int i;
5384a802
JJ
3962 FOR_EACH_VEC_ELT (group->stores, i, store)
3963 if (gimple_clobber_p (store->stmt))
3964 num_clobber_stmts++;
3965 else if (TREE_CODE (gimple_assign_rhs1 (store->stmt)) == CONSTRUCTOR
3966 && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (store->stmt)) == 0
3967 && group->start == store->bitpos
3968 && group->width == store->bitsize
3969 && (group->start % BITS_PER_UNIT) == 0
3970 && (group->width % BITS_PER_UNIT) == 0)
3971 {
3972 bzero_first = true;
3973 break;
3974 }
3975 else
3976 break;
3977 FOR_EACH_VEC_ELT_FROM (group->stores, i, store, i)
3978 if (gimple_clobber_p (store->stmt))
3979 num_clobber_stmts++;
3980 if (num_clobber_stmts == orig_num_stmts)
3981 return false;
3982 orig_num_stmts -= num_clobber_stmts;
3983 }
3afd514b 3984 if (allow_unaligned_store || bzero_first)
a62b3dc5
JJ
3985 {
3986 /* If unaligned stores are allowed, see how many stores we'd emit
3987 for unaligned and how many stores we'd emit for aligned stores.
3afd514b
JJ
3988 Only use unaligned stores if it allows fewer stores than aligned.
3989 Similarly, if there is a whole region clear first, prefer expanding
3990 it together compared to expanding clear first followed by merged
3991 further stores. */
21f65995 3992 unsigned cnt[4] = { ~0U, ~0U, ~0U, ~0U };
3afd514b
JJ
3993 int pass_min = 0;
3994 for (int pass = 0; pass < 4; ++pass)
3995 {
3996 if (!allow_unaligned_store && (pass & 1) != 0)
3997 continue;
3998 if (!bzero_first && (pass & 2) != 0)
3999 continue;
4000 cnt[pass] = split_group (group, (pass & 1) != 0,
4001 allow_unaligned_load, (pass & 2) != 0,
4002 NULL, NULL, NULL);
4003 if (cnt[pass] < cnt[pass_min])
4004 pass_min = pass;
4005 }
4006 if ((pass_min & 1) == 0)
245f6de1 4007 allow_unaligned_store = false;
3afd514b
JJ
4008 if ((pass_min & 2) == 0)
4009 bzero_first = false;
a62b3dc5 4010 }
e362a897
EB
4011
4012 auto_vec<class split_store *, 32> split_stores;
4013 split_store *split_store;
4014 unsigned total_orig, total_new, i;
3afd514b 4015 split_group (group, allow_unaligned_store, allow_unaligned_load, bzero_first,
d7a9512e 4016 &split_stores, &total_orig, &total_new);
a62b3dc5 4017
5384a802
JJ
4018 /* Determine if there is a clobber covering the whole group at the start,
4019 followed by proposed split stores that cover the whole group. In that
4020 case, prefer the transformation even if
4021 split_stores.length () == orig_num_stmts. */
4022 bool clobber_first = false;
4023 if (num_clobber_stmts
4024 && gimple_clobber_p (group->stores[0]->stmt)
4025 && group->start == group->stores[0]->bitpos
4026 && group->width == group->stores[0]->bitsize
4027 && (group->start % BITS_PER_UNIT) == 0
4028 && (group->width % BITS_PER_UNIT) == 0)
4029 {
4030 clobber_first = true;
4031 unsigned HOST_WIDE_INT pos = group->start / BITS_PER_UNIT;
4032 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4033 if (split_store->bytepos != pos)
4034 {
4035 clobber_first = false;
4036 break;
4037 }
4038 else
4039 pos += split_store->size / BITS_PER_UNIT;
4040 if (pos != (group->start + group->width) / BITS_PER_UNIT)
4041 clobber_first = false;
4042 }
4043
4044 if (split_stores.length () >= orig_num_stmts + clobber_first)
a62b3dc5 4045 {
5384a802 4046
a62b3dc5
JJ
4047 /* We didn't manage to reduce the number of statements. Bail out. */
4048 if (dump_file && (dump_flags & TDF_DETAILS))
d7a9512e
JJ
4049 fprintf (dump_file, "Exceeded original number of stmts (%u)."
4050 " Not profitable to emit new sequence.\n",
4051 orig_num_stmts);
dd172744
RB
4052 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4053 delete split_store;
a62b3dc5
JJ
4054 return false;
4055 }
d7a9512e
JJ
4056 if (total_orig <= total_new)
4057 {
4058 /* If number of estimated new statements is above estimated original
4059 statements, bail out too. */
4060 if (dump_file && (dump_flags & TDF_DETAILS))
4061 fprintf (dump_file, "Estimated number of original stmts (%u)"
4062 " not larger than estimated number of new"
4063 " stmts (%u).\n",
4064 total_orig, total_new);
dd172744
RB
4065 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4066 delete split_store;
4b84d9b8 4067 return false;
d7a9512e 4068 }
5384a802
JJ
4069 if (group->stores[0]->rhs_code == INTEGER_CST)
4070 {
4071 bool all_orig = true;
4072 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4073 if (!split_store->orig)
4074 {
4075 all_orig = false;
4076 break;
4077 }
4078 if (all_orig)
4079 {
4080 unsigned int cnt = split_stores.length ();
4081 store_immediate_info *store;
4082 FOR_EACH_VEC_ELT (group->stores, i, store)
4083 if (gimple_clobber_p (store->stmt))
4084 ++cnt;
4085 /* Punt if we wouldn't make any real changes, i.e. keep all
4086 orig stmts + all clobbers. */
4087 if (cnt == group->stores.length ())
4088 {
4089 if (dump_file && (dump_flags & TDF_DETAILS))
4090 fprintf (dump_file, "Exceeded original number of stmts (%u)."
4091 " Not profitable to emit new sequence.\n",
4092 orig_num_stmts);
4093 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4094 delete split_store;
4095 return false;
4096 }
4097 }
4098 }
f663d9ad
KT
4099
4100 gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt);
4101 gimple_seq seq = NULL;
f663d9ad
KT
4102 tree last_vdef, new_vuse;
4103 last_vdef = gimple_vdef (group->last_stmt);
4104 new_vuse = gimple_vuse (group->last_stmt);
4b84d9b8
JJ
4105 tree bswap_res = NULL_TREE;
4106
5384a802
JJ
4107 /* Clobbers are not removed. */
4108 if (gimple_clobber_p (group->last_stmt))
4109 {
4110 new_vuse = make_ssa_name (gimple_vop (cfun), group->last_stmt);
4111 gimple_set_vdef (group->last_stmt, new_vuse);
4112 }
4113
4b84d9b8
JJ
4114 if (group->stores[0]->rhs_code == LROTATE_EXPR
4115 || group->stores[0]->rhs_code == NOP_EXPR)
4116 {
4117 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
4118 gimple *ins_stmt = group->stores[0]->ins_stmt;
4119 struct symbolic_number *n = &group->stores[0]->n;
4120 bool bswap = group->stores[0]->rhs_code == LROTATE_EXPR;
4121
4122 switch (n->range)
4123 {
4124 case 16:
4125 load_type = bswap_type = uint16_type_node;
4126 break;
4127 case 32:
4128 load_type = uint32_type_node;
4129 if (bswap)
4130 {
4131 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
4132 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
4133 }
4134 break;
4135 case 64:
4136 load_type = uint64_type_node;
4137 if (bswap)
4138 {
4139 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
4140 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
4141 }
4142 break;
4143 default:
4144 gcc_unreachable ();
4145 }
4146
4147 /* If the loads have each vuse of the corresponding store,
4148 we've checked the aliasing already in try_coalesce_bswap and
4149 we want to sink the need load into seq. So need to use new_vuse
4150 on the load. */
30fa8e9c 4151 if (n->base_addr)
4b84d9b8 4152 {
30fa8e9c
JJ
4153 if (n->vuse == NULL)
4154 {
4155 n->vuse = new_vuse;
4156 ins_stmt = NULL;
4157 }
4158 else
4159 /* Update vuse in case it has changed by output_merged_stores. */
4160 n->vuse = gimple_vuse (ins_stmt);
4b84d9b8
JJ
4161 }
4162 bswap_res = bswap_replace (gsi_start (seq), ins_stmt, fndecl,
4163 bswap_type, load_type, n, bswap);
4164 gcc_assert (bswap_res);
4165 }
f663d9ad
KT
4166
4167 gimple *stmt = NULL;
245f6de1 4168 auto_vec<gimple *, 32> orig_stmts;
4b84d9b8
JJ
4169 gimple_seq this_seq;
4170 tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq,
aa55dc0c 4171 is_gimple_mem_ref_addr, NULL_TREE);
4b84d9b8 4172 gimple_seq_add_seq_without_update (&seq, this_seq);
245f6de1
JJ
4173
4174 tree load_addr[2] = { NULL_TREE, NULL_TREE };
4175 gimple_seq load_seq[2] = { NULL, NULL };
4176 gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () };
4177 for (int j = 0; j < 2; ++j)
4178 {
4179 store_operand_info &op = group->stores[0]->ops[j];
4180 if (op.base_addr == NULL_TREE)
4181 continue;
4182
4183 store_immediate_info *infol = group->stores.last ();
4184 if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt))
4185 {
97031af7
JJ
4186 /* We can't pick the location randomly; while we've verified
4187 all the loads have the same vuse, they can be still in different
4188 basic blocks and we need to pick the one from the last bb:
4189 int x = q[0];
4190 if (x == N) return;
4191 int y = q[1];
4192 p[0] = x;
4193 p[1] = y;
4194 otherwise if we put the wider load at the q[0] load, we might
4195 segfault if q[1] is not mapped. */
4196 basic_block bb = gimple_bb (op.stmt);
4197 gimple *ostmt = op.stmt;
4198 store_immediate_info *info;
4199 FOR_EACH_VEC_ELT (group->stores, i, info)
4200 {
4201 gimple *tstmt = info->ops[j].stmt;
4202 basic_block tbb = gimple_bb (tstmt);
4203 if (dominated_by_p (CDI_DOMINATORS, tbb, bb))
4204 {
4205 ostmt = tstmt;
4206 bb = tbb;
4207 }
4208 }
4209 load_gsi[j] = gsi_for_stmt (ostmt);
245f6de1
JJ
4210 load_addr[j]
4211 = force_gimple_operand_1 (unshare_expr (op.base_addr),
4212 &load_seq[j], is_gimple_mem_ref_addr,
4213 NULL_TREE);
4214 }
4215 else if (operand_equal_p (base_addr, op.base_addr, 0))
4216 load_addr[j] = addr;
4217 else
3e2927a1 4218 {
3e2927a1
JJ
4219 load_addr[j]
4220 = force_gimple_operand_1 (unshare_expr (op.base_addr),
4221 &this_seq, is_gimple_mem_ref_addr,
4222 NULL_TREE);
4223 gimple_seq_add_seq_without_update (&seq, this_seq);
4224 }
245f6de1
JJ
4225 }
4226
f663d9ad
KT
4227 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4228 {
e362a897
EB
4229 const unsigned HOST_WIDE_INT try_size = split_store->size;
4230 const unsigned HOST_WIDE_INT try_pos = split_store->bytepos;
4231 const unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
4232 const unsigned HOST_WIDE_INT try_align = split_store->align;
4233 const unsigned HOST_WIDE_INT try_offset = try_pos - start_byte_pos;
a62b3dc5
JJ
4234 tree dest, src;
4235 location_t loc;
e362a897 4236
a62b3dc5
JJ
4237 if (split_store->orig)
4238 {
5384a802
JJ
4239 /* If there is just a single non-clobber constituent store
4240 which covers the whole area, just reuse the lhs and rhs. */
4241 gimple *orig_stmt = NULL;
4242 store_immediate_info *store;
4243 unsigned int j;
4244 FOR_EACH_VEC_ELT (split_store->orig_stores, j, store)
4245 if (!gimple_clobber_p (store->stmt))
4246 {
4247 orig_stmt = store->stmt;
4248 break;
4249 }
245f6de1
JJ
4250 dest = gimple_assign_lhs (orig_stmt);
4251 src = gimple_assign_rhs1 (orig_stmt);
4252 loc = gimple_location (orig_stmt);
a62b3dc5
JJ
4253 }
4254 else
4255 {
245f6de1
JJ
4256 store_immediate_info *info;
4257 unsigned short clique, base;
4258 unsigned int k;
4259 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4260 orig_stmts.safe_push (info->stmt);
a62b3dc5 4261 tree offset_type
245f6de1 4262 = get_alias_type_for_stmts (orig_stmts, false, &clique, &base);
e362a897 4263 tree dest_type;
245f6de1
JJ
4264 loc = get_location_for_stmts (orig_stmts);
4265 orig_stmts.truncate (0);
a62b3dc5 4266
e362a897
EB
4267 if (group->string_concatenation)
4268 dest_type
4269 = build_array_type_nelts (char_type_node,
4270 try_size / BITS_PER_UNIT);
4271 else
4272 {
4273 dest_type = build_nonstandard_integer_type (try_size, UNSIGNED);
4274 dest_type = build_aligned_type (dest_type, try_align);
4275 }
4276 dest = fold_build2 (MEM_REF, dest_type, addr,
a62b3dc5 4277 build_int_cst (offset_type, try_pos));
245f6de1
JJ
4278 if (TREE_CODE (dest) == MEM_REF)
4279 {
4280 MR_DEPENDENCE_CLIQUE (dest) = clique;
4281 MR_DEPENDENCE_BASE (dest) = base;
4282 }
4283
c94c3532 4284 tree mask;
e362a897 4285 if (bswap_res || group->string_concatenation)
c94c3532
EB
4286 mask = integer_zero_node;
4287 else
e362a897
EB
4288 mask = native_interpret_expr (dest_type,
4289 group->mask + try_offset,
4b84d9b8 4290 group->buf_size);
245f6de1
JJ
4291
4292 tree ops[2];
4293 for (int j = 0;
4294 j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE);
4295 ++j)
4296 {
4297 store_operand_info &op = split_store->orig_stores[0]->ops[j];
4b84d9b8
JJ
4298 if (bswap_res)
4299 ops[j] = bswap_res;
e362a897
EB
4300 else if (group->string_concatenation)
4301 {
4302 ops[j] = build_string (try_size / BITS_PER_UNIT,
4303 (const char *) group->val + try_offset);
4304 TREE_TYPE (ops[j]) = dest_type;
4305 }
4b84d9b8 4306 else if (op.base_addr)
245f6de1
JJ
4307 {
4308 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4309 orig_stmts.safe_push (info->ops[j].stmt);
4310
4311 offset_type = get_alias_type_for_stmts (orig_stmts, true,
4312 &clique, &base);
4313 location_t load_loc = get_location_for_stmts (orig_stmts);
4314 orig_stmts.truncate (0);
4315
4316 unsigned HOST_WIDE_INT load_align = group->load_align[j];
4317 unsigned HOST_WIDE_INT align_bitpos
c94c3532 4318 = known_alignment (try_bitpos
8a91d545
RS
4319 - split_store->orig_stores[0]->bitpos
4320 + op.bitpos);
4321 if (align_bitpos & (load_align - 1))
245f6de1
JJ
4322 load_align = least_bit_hwi (align_bitpos);
4323
4324 tree load_int_type
4325 = build_nonstandard_integer_type (try_size, UNSIGNED);
4326 load_int_type
4327 = build_aligned_type (load_int_type, load_align);
4328
8a91d545 4329 poly_uint64 load_pos
c94c3532 4330 = exact_div (try_bitpos
8a91d545
RS
4331 - split_store->orig_stores[0]->bitpos
4332 + op.bitpos,
4333 BITS_PER_UNIT);
245f6de1
JJ
4334 ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j],
4335 build_int_cst (offset_type, load_pos));
4336 if (TREE_CODE (ops[j]) == MEM_REF)
4337 {
4338 MR_DEPENDENCE_CLIQUE (ops[j]) = clique;
4339 MR_DEPENDENCE_BASE (ops[j]) = base;
4340 }
4341 if (!integer_zerop (mask))
4342 /* The load might load some bits (that will be masked off
4343 later on) uninitialized, avoid -W*uninitialized
4344 warnings in that case. */
4345 TREE_NO_WARNING (ops[j]) = 1;
4346
e362a897 4347 stmt = gimple_build_assign (make_ssa_name (dest_type), ops[j]);
245f6de1
JJ
4348 gimple_set_location (stmt, load_loc);
4349 if (gsi_bb (load_gsi[j]))
4350 {
4351 gimple_set_vuse (stmt, gimple_vuse (op.stmt));
4352 gimple_seq_add_stmt_without_update (&load_seq[j], stmt);
4353 }
4354 else
4355 {
4356 gimple_set_vuse (stmt, new_vuse);
4357 gimple_seq_add_stmt_without_update (&seq, stmt);
4358 }
4359 ops[j] = gimple_assign_lhs (stmt);
a6fbd154
JJ
4360 tree xor_mask;
4361 enum tree_code inv_op
e362a897 4362 = invert_op (split_store, j, dest_type, xor_mask);
a6fbd154 4363 if (inv_op != NOP_EXPR)
383ac8dc 4364 {
e362a897 4365 stmt = gimple_build_assign (make_ssa_name (dest_type),
a6fbd154 4366 inv_op, ops[j], xor_mask);
383ac8dc
JJ
4367 gimple_set_location (stmt, load_loc);
4368 ops[j] = gimple_assign_lhs (stmt);
4369
4370 if (gsi_bb (load_gsi[j]))
4371 gimple_seq_add_stmt_without_update (&load_seq[j],
4372 stmt);
4373 else
4374 gimple_seq_add_stmt_without_update (&seq, stmt);
4375 }
245f6de1
JJ
4376 }
4377 else
e362a897
EB
4378 ops[j] = native_interpret_expr (dest_type,
4379 group->val + try_offset,
245f6de1
JJ
4380 group->buf_size);
4381 }
4382
4383 switch (split_store->orig_stores[0]->rhs_code)
4384 {
4385 case BIT_AND_EXPR:
4386 case BIT_IOR_EXPR:
4387 case BIT_XOR_EXPR:
4388 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4389 {
4390 tree rhs1 = gimple_assign_rhs1 (info->stmt);
4391 orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1));
4392 }
4393 location_t bit_loc;
4394 bit_loc = get_location_for_stmts (orig_stmts);
4395 orig_stmts.truncate (0);
4396
4397 stmt
e362a897 4398 = gimple_build_assign (make_ssa_name (dest_type),
245f6de1
JJ
4399 split_store->orig_stores[0]->rhs_code,
4400 ops[0], ops[1]);
4401 gimple_set_location (stmt, bit_loc);
4402 /* If there is just one load and there is a separate
4403 load_seq[0], emit the bitwise op right after it. */
4404 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4405 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4406 /* Otherwise, if at least one load is in seq, we need to
4407 emit the bitwise op right before the store. If there
4408 are two loads and are emitted somewhere else, it would
4409 be better to emit the bitwise op as early as possible;
4410 we don't track where that would be possible right now
4411 though. */
4412 else
4413 gimple_seq_add_stmt_without_update (&seq, stmt);
4414 src = gimple_assign_lhs (stmt);
a6fbd154
JJ
4415 tree xor_mask;
4416 enum tree_code inv_op;
e362a897 4417 inv_op = invert_op (split_store, 2, dest_type, xor_mask);
a6fbd154 4418 if (inv_op != NOP_EXPR)
d60edaba 4419 {
e362a897 4420 stmt = gimple_build_assign (make_ssa_name (dest_type),
a6fbd154 4421 inv_op, src, xor_mask);
d60edaba
JJ
4422 gimple_set_location (stmt, bit_loc);
4423 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4424 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4425 else
4426 gimple_seq_add_stmt_without_update (&seq, stmt);
4427 src = gimple_assign_lhs (stmt);
4428 }
245f6de1 4429 break;
4b84d9b8
JJ
4430 case LROTATE_EXPR:
4431 case NOP_EXPR:
4432 src = ops[0];
4433 if (!is_gimple_val (src))
4434 {
4435 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (src)),
4436 src);
4437 gimple_seq_add_stmt_without_update (&seq, stmt);
4438 src = gimple_assign_lhs (stmt);
4439 }
e362a897 4440 if (!useless_type_conversion_p (dest_type, TREE_TYPE (src)))
4b84d9b8 4441 {
e362a897 4442 stmt = gimple_build_assign (make_ssa_name (dest_type),
4b84d9b8
JJ
4443 NOP_EXPR, src);
4444 gimple_seq_add_stmt_without_update (&seq, stmt);
4445 src = gimple_assign_lhs (stmt);
4446 }
e362a897 4447 inv_op = invert_op (split_store, 2, dest_type, xor_mask);
be52ac73
JJ
4448 if (inv_op != NOP_EXPR)
4449 {
e362a897 4450 stmt = gimple_build_assign (make_ssa_name (dest_type),
be52ac73
JJ
4451 inv_op, src, xor_mask);
4452 gimple_set_location (stmt, loc);
4453 gimple_seq_add_stmt_without_update (&seq, stmt);
4454 src = gimple_assign_lhs (stmt);
4455 }
4b84d9b8 4456 break;
245f6de1
JJ
4457 default:
4458 src = ops[0];
4459 break;
4460 }
4461
c94c3532
EB
4462 /* If bit insertion is required, we use the source as an accumulator
4463 into which the successive bit-field values are manually inserted.
4464 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
4465 if (group->bit_insertion)
4466 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4467 if (info->rhs_code == BIT_INSERT_EXPR
4468 && info->bitpos < try_bitpos + try_size
4469 && info->bitpos + info->bitsize > try_bitpos)
4470 {
4471 /* Mask, truncate, convert to final type, shift and ior into
4472 the accumulator. Note that every step can be a no-op. */
4473 const HOST_WIDE_INT start_gap = info->bitpos - try_bitpos;
4474 const HOST_WIDE_INT end_gap
4475 = (try_bitpos + try_size) - (info->bitpos + info->bitsize);
4476 tree tem = info->ops[0].val;
ed01d707
EB
4477 if (!INTEGRAL_TYPE_P (TREE_TYPE (tem)))
4478 {
4479 const unsigned HOST_WIDE_INT size
4480 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (tem)));
4481 tree integer_type
4482 = build_nonstandard_integer_type (size, UNSIGNED);
4483 tem = gimple_build (&seq, loc, VIEW_CONVERT_EXPR,
4484 integer_type, tem);
4485 }
c14add82
EB
4486 if (TYPE_PRECISION (TREE_TYPE (tem)) <= info->bitsize)
4487 {
4488 tree bitfield_type
4489 = build_nonstandard_integer_type (info->bitsize,
4490 UNSIGNED);
4491 tem = gimple_convert (&seq, loc, bitfield_type, tem);
4492 }
4493 else if ((BYTES_BIG_ENDIAN ? start_gap : end_gap) > 0)
c94c3532
EB
4494 {
4495 const unsigned HOST_WIDE_INT imask
4496 = (HOST_WIDE_INT_1U << info->bitsize) - 1;
4497 tem = gimple_build (&seq, loc,
4498 BIT_AND_EXPR, TREE_TYPE (tem), tem,
4499 build_int_cst (TREE_TYPE (tem),
4500 imask));
4501 }
4502 const HOST_WIDE_INT shift
4503 = (BYTES_BIG_ENDIAN ? end_gap : start_gap);
4504 if (shift < 0)
4505 tem = gimple_build (&seq, loc,
4506 RSHIFT_EXPR, TREE_TYPE (tem), tem,
4507 build_int_cst (NULL_TREE, -shift));
e362a897 4508 tem = gimple_convert (&seq, loc, dest_type, tem);
c94c3532
EB
4509 if (shift > 0)
4510 tem = gimple_build (&seq, loc,
e362a897 4511 LSHIFT_EXPR, dest_type, tem,
c94c3532
EB
4512 build_int_cst (NULL_TREE, shift));
4513 src = gimple_build (&seq, loc,
e362a897 4514 BIT_IOR_EXPR, dest_type, tem, src);
c94c3532
EB
4515 }
4516
a62b3dc5
JJ
4517 if (!integer_zerop (mask))
4518 {
e362a897 4519 tree tem = make_ssa_name (dest_type);
a62b3dc5
JJ
4520 tree load_src = unshare_expr (dest);
4521 /* The load might load some or all bits uninitialized,
4522 avoid -W*uninitialized warnings in that case.
4523 As optimization, it would be nice if all the bits are
4524 provably uninitialized (no stores at all yet or previous
4525 store a CLOBBER) we'd optimize away the load and replace
4526 it e.g. with 0. */
4527 TREE_NO_WARNING (load_src) = 1;
4528 stmt = gimple_build_assign (tem, load_src);
4529 gimple_set_location (stmt, loc);
4530 gimple_set_vuse (stmt, new_vuse);
4531 gimple_seq_add_stmt_without_update (&seq, stmt);
4532
4533 /* FIXME: If there is a single chunk of zero bits in mask,
4534 perhaps use BIT_INSERT_EXPR instead? */
e362a897 4535 stmt = gimple_build_assign (make_ssa_name (dest_type),
a62b3dc5
JJ
4536 BIT_AND_EXPR, tem, mask);
4537 gimple_set_location (stmt, loc);
4538 gimple_seq_add_stmt_without_update (&seq, stmt);
4539 tem = gimple_assign_lhs (stmt);
4540
245f6de1 4541 if (TREE_CODE (src) == INTEGER_CST)
e362a897 4542 src = wide_int_to_tree (dest_type,
245f6de1
JJ
4543 wi::bit_and_not (wi::to_wide (src),
4544 wi::to_wide (mask)));
4545 else
4546 {
4547 tree nmask
e362a897 4548 = wide_int_to_tree (dest_type,
245f6de1 4549 wi::bit_not (wi::to_wide (mask)));
e362a897 4550 stmt = gimple_build_assign (make_ssa_name (dest_type),
245f6de1
JJ
4551 BIT_AND_EXPR, src, nmask);
4552 gimple_set_location (stmt, loc);
4553 gimple_seq_add_stmt_without_update (&seq, stmt);
4554 src = gimple_assign_lhs (stmt);
4555 }
e362a897 4556 stmt = gimple_build_assign (make_ssa_name (dest_type),
a62b3dc5
JJ
4557 BIT_IOR_EXPR, tem, src);
4558 gimple_set_location (stmt, loc);
4559 gimple_seq_add_stmt_without_update (&seq, stmt);
4560 src = gimple_assign_lhs (stmt);
4561 }
4562 }
f663d9ad
KT
4563
4564 stmt = gimple_build_assign (dest, src);
4565 gimple_set_location (stmt, loc);
4566 gimple_set_vuse (stmt, new_vuse);
4567 gimple_seq_add_stmt_without_update (&seq, stmt);
4568
629387a6
EB
4569 if (group->lp_nr && stmt_could_throw_p (cfun, stmt))
4570 add_stmt_to_eh_lp (stmt, group->lp_nr);
4571
f663d9ad
KT
4572 tree new_vdef;
4573 if (i < split_stores.length () - 1)
a62b3dc5 4574 new_vdef = make_ssa_name (gimple_vop (cfun), stmt);
f663d9ad
KT
4575 else
4576 new_vdef = last_vdef;
4577
4578 gimple_set_vdef (stmt, new_vdef);
4579 SSA_NAME_DEF_STMT (new_vdef) = stmt;
4580 new_vuse = new_vdef;
4581 }
4582
4583 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4584 delete split_store;
4585
f663d9ad
KT
4586 gcc_assert (seq);
4587 if (dump_file)
4588 {
4589 fprintf (dump_file,
c94c3532 4590 "New sequence of %u stores to replace old one of %u stores\n",
a62b3dc5 4591 split_stores.length (), orig_num_stmts);
f663d9ad
KT
4592 if (dump_flags & TDF_DETAILS)
4593 print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS);
4594 }
629387a6 4595
5384a802
JJ
4596 if (gimple_clobber_p (group->last_stmt))
4597 update_stmt (group->last_stmt);
4598
629387a6
EB
4599 if (group->lp_nr > 0)
4600 {
4601 /* We're going to insert a sequence of (potentially) throwing stores
4602 into an active EH region. This means that we're going to create
4603 new basic blocks with EH edges pointing to the post landing pad
4604 and, therefore, to have to update its PHI nodes, if any. For the
4605 virtual PHI node, we're going to use the VDEFs created above, but
4606 for the other nodes, we need to record the original reaching defs. */
4607 eh_landing_pad lp = get_eh_landing_pad_from_number (group->lp_nr);
4608 basic_block lp_bb = label_to_block (cfun, lp->post_landing_pad);
4609 basic_block last_bb = gimple_bb (group->last_stmt);
4610 edge last_edge = find_edge (last_bb, lp_bb);
4611 auto_vec<tree, 16> last_defs;
4612 gphi_iterator gpi;
4613 for (gpi = gsi_start_phis (lp_bb); !gsi_end_p (gpi); gsi_next (&gpi))
4614 {
4615 gphi *phi = gpi.phi ();
4616 tree last_def;
4617 if (virtual_operand_p (gimple_phi_result (phi)))
4618 last_def = NULL_TREE;
4619 else
4620 last_def = gimple_phi_arg_def (phi, last_edge->dest_idx);
4621 last_defs.safe_push (last_def);
4622 }
4623
4624 /* Do the insertion. Then, if new basic blocks have been created in the
4625 process, rewind the chain of VDEFs create above to walk the new basic
4626 blocks and update the corresponding arguments of the PHI nodes. */
4627 update_modified_stmts (seq);
4628 if (gimple_find_sub_bbs (seq, &last_gsi))
4629 while (last_vdef != gimple_vuse (group->last_stmt))
4630 {
4631 gimple *stmt = SSA_NAME_DEF_STMT (last_vdef);
4632 if (stmt_could_throw_p (cfun, stmt))
4633 {
4634 edge new_edge = find_edge (gimple_bb (stmt), lp_bb);
4635 unsigned int i;
4636 for (gpi = gsi_start_phis (lp_bb), i = 0;
4637 !gsi_end_p (gpi);
4638 gsi_next (&gpi), i++)
4639 {
4640 gphi *phi = gpi.phi ();
4641 tree new_def;
4642 if (virtual_operand_p (gimple_phi_result (phi)))
4643 new_def = last_vdef;
4644 else
4645 new_def = last_defs[i];
4646 add_phi_arg (phi, new_def, new_edge, UNKNOWN_LOCATION);
4647 }
4648 }
4649 last_vdef = gimple_vuse (stmt);
4650 }
4651 }
4652 else
4653 gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT);
4654
245f6de1
JJ
4655 for (int j = 0; j < 2; ++j)
4656 if (load_seq[j])
4657 gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT);
f663d9ad
KT
4658
4659 return true;
4660}
4661
4662/* Process the merged_store_group objects created in the coalescing phase.
4663 The stores are all against the base object BASE.
4664 Try to output the widened stores and delete the original statements if
4665 successful. Return true iff any changes were made. */
4666
4667bool
b5926e23 4668imm_store_chain_info::output_merged_stores ()
f663d9ad
KT
4669{
4670 unsigned int i;
4671 merged_store_group *merged_store;
4672 bool ret = false;
4673 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store)
4674 {
a95b474a
ML
4675 if (dbg_cnt (store_merging)
4676 && output_merged_store (merged_store))
f663d9ad
KT
4677 {
4678 unsigned int j;
4679 store_immediate_info *store;
4680 FOR_EACH_VEC_ELT (merged_store->stores, j, store)
4681 {
4682 gimple *stmt = store->stmt;
4683 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
5384a802
JJ
4684 /* Don't remove clobbers, they are still useful even if
4685 everything is overwritten afterwards. */
4686 if (gimple_clobber_p (stmt))
4687 continue;
f663d9ad 4688 gsi_remove (&gsi, true);
629387a6
EB
4689 if (store->lp_nr)
4690 remove_stmt_from_eh_lp (stmt);
f663d9ad
KT
4691 if (stmt != merged_store->last_stmt)
4692 {
4693 unlink_stmt_vdef (stmt);
4694 release_defs (stmt);
4695 }
4696 }
4697 ret = true;
4698 }
4699 }
4700 if (ret && dump_file)
4701 fprintf (dump_file, "Merging successful!\n");
4702
4703 return ret;
4704}
4705
4706/* Coalesce the store_immediate_info objects recorded against the base object
4707 BASE in the first phase and output them.
4708 Delete the allocated structures.
4709 Return true if any changes were made. */
4710
4711bool
b5926e23 4712imm_store_chain_info::terminate_and_process_chain ()
f663d9ad
KT
4713{
4714 /* Process store chain. */
4715 bool ret = false;
4716 if (m_store_info.length () > 1)
4717 {
4718 ret = coalesce_immediate_stores ();
4719 if (ret)
b5926e23 4720 ret = output_merged_stores ();
f663d9ad
KT
4721 }
4722
4723 /* Delete all the entries we allocated ourselves. */
4724 store_immediate_info *info;
4725 unsigned int i;
4726 FOR_EACH_VEC_ELT (m_store_info, i, info)
4727 delete info;
4728
4729 merged_store_group *merged_info;
4730 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info)
4731 delete merged_info;
4732
4733 return ret;
4734}
4735
4736/* Return true iff LHS is a destination potentially interesting for
4737 store merging. In practice these are the codes that get_inner_reference
4738 can process. */
4739
4740static bool
4741lhs_valid_for_store_merging_p (tree lhs)
4742{
629387a6 4743 if (DECL_P (lhs))
f663d9ad
KT
4744 return true;
4745
629387a6
EB
4746 switch (TREE_CODE (lhs))
4747 {
4748 case ARRAY_REF:
4749 case ARRAY_RANGE_REF:
4750 case BIT_FIELD_REF:
4751 case COMPONENT_REF:
4752 case MEM_REF:
e362a897 4753 case VIEW_CONVERT_EXPR:
629387a6
EB
4754 return true;
4755 default:
4756 return false;
4757 }
4758
4759 gcc_unreachable ();
f663d9ad
KT
4760}
4761
4762/* Return true if the tree RHS is a constant we want to consider
4763 during store merging. In practice accept all codes that
4764 native_encode_expr accepts. */
4765
4766static bool
4767rhs_valid_for_store_merging_p (tree rhs)
4768{
cf098191 4769 unsigned HOST_WIDE_INT size;
3afd514b 4770 if (TREE_CODE (rhs) == CONSTRUCTOR
3afd514b
JJ
4771 && CONSTRUCTOR_NELTS (rhs) == 0
4772 && TYPE_SIZE_UNIT (TREE_TYPE (rhs))
4773 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs))))
4774 return true;
cf098191
RS
4775 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (&size)
4776 && native_encode_expr (rhs, NULL, size) != 0);
f663d9ad
KT
4777}
4778
629387a6
EB
4779/* Adjust *PBITPOS, *PBITREGION_START and *PBITREGION_END by BYTE_OFF bytes
4780 and return true on success or false on failure. */
4781
4782static bool
4783adjust_bit_pos (poly_offset_int byte_off,
4784 poly_int64 *pbitpos,
4785 poly_uint64 *pbitregion_start,
4786 poly_uint64 *pbitregion_end)
4787{
4788 poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT;
4789 bit_off += *pbitpos;
4790
4791 if (known_ge (bit_off, 0) && bit_off.to_shwi (pbitpos))
4792 {
4793 if (maybe_ne (*pbitregion_end, 0U))
4794 {
4795 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4796 bit_off += *pbitregion_start;
4797 if (bit_off.to_uhwi (pbitregion_start))
4798 {
4799 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4800 bit_off += *pbitregion_end;
4801 if (!bit_off.to_uhwi (pbitregion_end))
4802 *pbitregion_end = 0;
4803 }
4804 else
4805 *pbitregion_end = 0;
4806 }
4807 return true;
4808 }
4809 else
4810 return false;
4811}
4812
245f6de1
JJ
4813/* If MEM is a memory reference usable for store merging (either as
4814 store destination or for loads), return the non-NULL base_addr
4815 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
4816 Otherwise return NULL, *PBITPOS should be still valid even for that
4817 case. */
4818
4819static tree
8a91d545
RS
4820mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize,
4821 poly_uint64 *pbitpos,
4822 poly_uint64 *pbitregion_start,
4823 poly_uint64 *pbitregion_end)
245f6de1 4824{
8a91d545
RS
4825 poly_int64 bitsize, bitpos;
4826 poly_uint64 bitregion_start = 0, bitregion_end = 0;
245f6de1
JJ
4827 machine_mode mode;
4828 int unsignedp = 0, reversep = 0, volatilep = 0;
4829 tree offset;
4830 tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode,
4831 &unsignedp, &reversep, &volatilep);
4832 *pbitsize = bitsize;
8a91d545 4833 if (known_eq (bitsize, 0))
245f6de1
JJ
4834 return NULL_TREE;
4835
4836 if (TREE_CODE (mem) == COMPONENT_REF
4837 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1)))
4838 {
4839 get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset);
8a91d545
RS
4840 if (maybe_ne (bitregion_end, 0U))
4841 bitregion_end += 1;
245f6de1
JJ
4842 }
4843
4844 if (reversep)
4845 return NULL_TREE;
4846
4847 /* We do not want to rewrite TARGET_MEM_REFs. */
4848 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
4849 return NULL_TREE;
4850 /* In some cases get_inner_reference may return a
4851 MEM_REF [ptr + byteoffset]. For the purposes of this pass
4852 canonicalize the base_addr to MEM_REF [ptr] and take
4853 byteoffset into account in the bitpos. This occurs in
4854 PR 23684 and this way we can catch more chains. */
4855 else if (TREE_CODE (base_addr) == MEM_REF)
4856 {
629387a6
EB
4857 if (!adjust_bit_pos (mem_ref_offset (base_addr), &bitpos,
4858 &bitregion_start, &bitregion_end))
245f6de1
JJ
4859 return NULL_TREE;
4860 base_addr = TREE_OPERAND (base_addr, 0);
4861 }
4862 /* get_inner_reference returns the base object, get at its
4863 address now. */
4864 else
4865 {
8a91d545 4866 if (maybe_lt (bitpos, 0))
245f6de1
JJ
4867 return NULL_TREE;
4868 base_addr = build_fold_addr_expr (base_addr);
4869 }
4870
629387a6 4871 if (offset)
245f6de1
JJ
4872 {
4873 /* If the access is variable offset then a base decl has to be
4874 address-taken to be able to emit pointer-based stores to it.
4875 ??? We might be able to get away with re-using the original
4876 base up to the first variable part and then wrapping that inside
4877 a BIT_FIELD_REF. */
4878 tree base = get_base_address (base_addr);
629387a6 4879 if (!base || (DECL_P (base) && !TREE_ADDRESSABLE (base)))
245f6de1
JJ
4880 return NULL_TREE;
4881
629387a6
EB
4882 /* Similarly to above for the base, remove constant from the offset. */
4883 if (TREE_CODE (offset) == PLUS_EXPR
4884 && TREE_CODE (TREE_OPERAND (offset, 1)) == INTEGER_CST
4885 && adjust_bit_pos (wi::to_poly_offset (TREE_OPERAND (offset, 1)),
4886 &bitpos, &bitregion_start, &bitregion_end))
4887 offset = TREE_OPERAND (offset, 0);
4888
245f6de1
JJ
4889 base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr),
4890 base_addr, offset);
4891 }
4892
629387a6
EB
4893 if (known_eq (bitregion_end, 0U))
4894 {
4895 bitregion_start = round_down_to_byte_boundary (bitpos);
4896 bitregion_end = round_up_to_byte_boundary (bitpos + bitsize);
4897 }
4898
245f6de1
JJ
4899 *pbitsize = bitsize;
4900 *pbitpos = bitpos;
4901 *pbitregion_start = bitregion_start;
4902 *pbitregion_end = bitregion_end;
4903 return base_addr;
4904}
4905
4906/* Return true if STMT is a load that can be used for store merging.
4907 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
4908 BITREGION_END are properties of the corresponding store. */
4909
4910static bool
4911handled_load (gimple *stmt, store_operand_info *op,
8a91d545
RS
4912 poly_uint64 bitsize, poly_uint64 bitpos,
4913 poly_uint64 bitregion_start, poly_uint64 bitregion_end)
245f6de1 4914{
383ac8dc 4915 if (!is_gimple_assign (stmt))
245f6de1 4916 return false;
383ac8dc
JJ
4917 if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR)
4918 {
4919 tree rhs1 = gimple_assign_rhs1 (stmt);
4920 if (TREE_CODE (rhs1) == SSA_NAME
383ac8dc
JJ
4921 && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos,
4922 bitregion_start, bitregion_end))
4923 {
d60edaba
JJ
4924 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
4925 been optimized earlier, but if allowed here, would confuse the
4926 multiple uses counting. */
4927 if (op->bit_not_p)
4928 return false;
383ac8dc
JJ
4929 op->bit_not_p = !op->bit_not_p;
4930 return true;
4931 }
4932 return false;
4933 }
4934 if (gimple_vuse (stmt)
4935 && gimple_assign_load_p (stmt)
36bbc05d 4936 && !stmt_can_throw_internal (cfun, stmt)
245f6de1
JJ
4937 && !gimple_has_volatile_ops (stmt))
4938 {
4939 tree mem = gimple_assign_rhs1 (stmt);
4940 op->base_addr
4941 = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos,
4942 &op->bitregion_start,
4943 &op->bitregion_end);
4944 if (op->base_addr != NULL_TREE
8a91d545
RS
4945 && known_eq (op->bitsize, bitsize)
4946 && multiple_p (op->bitpos - bitpos, BITS_PER_UNIT)
4947 && known_ge (op->bitpos - op->bitregion_start,
4948 bitpos - bitregion_start)
4949 && known_ge (op->bitregion_end - op->bitpos,
4950 bitregion_end - bitpos))
245f6de1
JJ
4951 {
4952 op->stmt = stmt;
4953 op->val = mem;
383ac8dc 4954 op->bit_not_p = false;
245f6de1
JJ
4955 return true;
4956 }
4957 }
4958 return false;
4959}
4960
629387a6
EB
4961/* Return the index number of the landing pad for STMT, if any. */
4962
4963static int
4964lp_nr_for_store (gimple *stmt)
4965{
4966 if (!cfun->can_throw_non_call_exceptions || !cfun->eh)
4967 return 0;
4968
4969 if (!stmt_could_throw_p (cfun, stmt))
4970 return 0;
4971
4972 return lookup_stmt_eh_lp (stmt);
4973}
4974
245f6de1 4975/* Record the store STMT for store merging optimization if it can be
629387a6 4976 optimized. Return true if any changes were made. */
245f6de1 4977
629387a6 4978bool
245f6de1
JJ
4979pass_store_merging::process_store (gimple *stmt)
4980{
4981 tree lhs = gimple_assign_lhs (stmt);
4982 tree rhs = gimple_assign_rhs1 (stmt);
2c832ffe
SSF
4983 poly_uint64 bitsize, bitpos = 0;
4984 poly_uint64 bitregion_start = 0, bitregion_end = 0;
245f6de1
JJ
4985 tree base_addr
4986 = mem_valid_for_store_merging (lhs, &bitsize, &bitpos,
4987 &bitregion_start, &bitregion_end);
8a91d545 4988 if (known_eq (bitsize, 0U))
629387a6 4989 return false;
245f6de1
JJ
4990
4991 bool invalid = (base_addr == NULL_TREE
8a91d545
RS
4992 || (maybe_gt (bitsize,
4993 (unsigned int) MAX_BITSIZE_MODE_ANY_INT)
3afd514b
JJ
4994 && TREE_CODE (rhs) != INTEGER_CST
4995 && (TREE_CODE (rhs) != CONSTRUCTOR
4996 || CONSTRUCTOR_NELTS (rhs) != 0)));
245f6de1 4997 enum tree_code rhs_code = ERROR_MARK;
d60edaba 4998 bool bit_not_p = false;
4b84d9b8
JJ
4999 struct symbolic_number n;
5000 gimple *ins_stmt = NULL;
245f6de1
JJ
5001 store_operand_info ops[2];
5002 if (invalid)
5003 ;
e362a897
EB
5004 else if (TREE_CODE (rhs) == STRING_CST)
5005 {
5006 rhs_code = STRING_CST;
5007 ops[0].val = rhs;
5008 }
245f6de1
JJ
5009 else if (rhs_valid_for_store_merging_p (rhs))
5010 {
5011 rhs_code = INTEGER_CST;
5012 ops[0].val = rhs;
5013 }
e362a897 5014 else if (TREE_CODE (rhs) == SSA_NAME)
245f6de1
JJ
5015 {
5016 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2;
5017 if (!is_gimple_assign (def_stmt))
5018 invalid = true;
5019 else if (handled_load (def_stmt, &ops[0], bitsize, bitpos,
5020 bitregion_start, bitregion_end))
5021 rhs_code = MEM_REF;
d60edaba
JJ
5022 else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR)
5023 {
5024 tree rhs1 = gimple_assign_rhs1 (def_stmt);
5025 if (TREE_CODE (rhs1) == SSA_NAME
5026 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1)))
5027 {
5028 bit_not_p = true;
5029 def_stmt = SSA_NAME_DEF_STMT (rhs1);
5030 }
5031 }
c94c3532 5032
d60edaba 5033 if (rhs_code == ERROR_MARK && !invalid)
245f6de1
JJ
5034 switch ((rhs_code = gimple_assign_rhs_code (def_stmt)))
5035 {
5036 case BIT_AND_EXPR:
5037 case BIT_IOR_EXPR:
5038 case BIT_XOR_EXPR:
5039 tree rhs1, rhs2;
5040 rhs1 = gimple_assign_rhs1 (def_stmt);
5041 rhs2 = gimple_assign_rhs2 (def_stmt);
5042 invalid = true;
d7a9512e 5043 if (TREE_CODE (rhs1) != SSA_NAME)
245f6de1
JJ
5044 break;
5045 def_stmt1 = SSA_NAME_DEF_STMT (rhs1);
5046 if (!is_gimple_assign (def_stmt1)
5047 || !handled_load (def_stmt1, &ops[0], bitsize, bitpos,
5048 bitregion_start, bitregion_end))
5049 break;
5050 if (rhs_valid_for_store_merging_p (rhs2))
5051 ops[1].val = rhs2;
d7a9512e 5052 else if (TREE_CODE (rhs2) != SSA_NAME)
245f6de1
JJ
5053 break;
5054 else
5055 {
5056 def_stmt2 = SSA_NAME_DEF_STMT (rhs2);
5057 if (!is_gimple_assign (def_stmt2))
5058 break;
5059 else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos,
5060 bitregion_start, bitregion_end))
5061 break;
5062 }
5063 invalid = false;
5064 break;
5065 default:
5066 invalid = true;
5067 break;
5068 }
c94c3532 5069
8a91d545
RS
5070 unsigned HOST_WIDE_INT const_bitsize;
5071 if (bitsize.is_constant (&const_bitsize)
c94c3532 5072 && (const_bitsize % BITS_PER_UNIT) == 0
8a91d545 5073 && const_bitsize <= 64
c94c3532 5074 && multiple_p (bitpos, BITS_PER_UNIT))
4b84d9b8
JJ
5075 {
5076 ins_stmt = find_bswap_or_nop_1 (def_stmt, &n, 12);
5077 if (ins_stmt)
5078 {
5079 uint64_t nn = n.n;
5080 for (unsigned HOST_WIDE_INT i = 0;
8a91d545
RS
5081 i < const_bitsize;
5082 i += BITS_PER_UNIT, nn >>= BITS_PER_MARKER)
4b84d9b8
JJ
5083 if ((nn & MARKER_MASK) == 0
5084 || (nn & MARKER_MASK) == MARKER_BYTE_UNKNOWN)
5085 {
5086 ins_stmt = NULL;
5087 break;
5088 }
5089 if (ins_stmt)
5090 {
5091 if (invalid)
5092 {
5093 rhs_code = LROTATE_EXPR;
5094 ops[0].base_addr = NULL_TREE;
5095 ops[1].base_addr = NULL_TREE;
5096 }
5097 invalid = false;
5098 }
5099 }
5100 }
c94c3532
EB
5101
5102 if (invalid
5103 && bitsize.is_constant (&const_bitsize)
5104 && ((const_bitsize % BITS_PER_UNIT) != 0
5105 || !multiple_p (bitpos, BITS_PER_UNIT))
ed01d707 5106 && const_bitsize <= MAX_FIXED_MODE_SIZE)
c94c3532 5107 {
c14add82 5108 /* Bypass a conversion to the bit-field type. */
31a5d8c5
EB
5109 if (!bit_not_p
5110 && is_gimple_assign (def_stmt)
5111 && CONVERT_EXPR_CODE_P (rhs_code))
c94c3532
EB
5112 {
5113 tree rhs1 = gimple_assign_rhs1 (def_stmt);
5114 if (TREE_CODE (rhs1) == SSA_NAME
c14add82 5115 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
c94c3532
EB
5116 rhs = rhs1;
5117 }
5118 rhs_code = BIT_INSERT_EXPR;
31a5d8c5 5119 bit_not_p = false;
c94c3532
EB
5120 ops[0].val = rhs;
5121 ops[0].base_addr = NULL_TREE;
5122 ops[1].base_addr = NULL_TREE;
5123 invalid = false;
5124 }
245f6de1 5125 }
e362a897
EB
5126 else
5127 invalid = true;
245f6de1 5128
8a91d545
RS
5129 unsigned HOST_WIDE_INT const_bitsize, const_bitpos;
5130 unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end;
5131 if (invalid
5132 || !bitsize.is_constant (&const_bitsize)
5133 || !bitpos.is_constant (&const_bitpos)
5134 || !bitregion_start.is_constant (&const_bitregion_start)
5135 || !bitregion_end.is_constant (&const_bitregion_end))
629387a6 5136 return terminate_all_aliasing_chains (NULL, stmt);
245f6de1 5137
4b84d9b8
JJ
5138 if (!ins_stmt)
5139 memset (&n, 0, sizeof (n));
5140
99b1c316 5141 class imm_store_chain_info **chain_info = NULL;
629387a6 5142 bool ret = false;
383ac8dc
JJ
5143 if (base_addr)
5144 chain_info = m_stores.get (base_addr);
5145
245f6de1
JJ
5146 store_immediate_info *info;
5147 if (chain_info)
5148 {
5149 unsigned int ord = (*chain_info)->m_store_info.length ();
8a91d545
RS
5150 info = new store_immediate_info (const_bitsize, const_bitpos,
5151 const_bitregion_start,
5152 const_bitregion_end,
5153 stmt, ord, rhs_code, n, ins_stmt,
629387a6
EB
5154 bit_not_p, lp_nr_for_store (stmt),
5155 ops[0], ops[1]);
245f6de1
JJ
5156 if (dump_file && (dump_flags & TDF_DETAILS))
5157 {
5158 fprintf (dump_file, "Recording immediate store from stmt:\n");
5159 print_gimple_stmt (dump_file, stmt, 0);
5160 }
5161 (*chain_info)->m_store_info.safe_push (info);
629387a6 5162 ret |= terminate_all_aliasing_chains (chain_info, stmt);
245f6de1
JJ
5163 /* If we reach the limit of stores to merge in a chain terminate and
5164 process the chain now. */
5165 if ((*chain_info)->m_store_info.length ()
028d4092 5166 == (unsigned int) param_max_stores_to_merge)
245f6de1
JJ
5167 {
5168 if (dump_file && (dump_flags & TDF_DETAILS))
5169 fprintf (dump_file,
5170 "Reached maximum number of statements to merge:\n");
629387a6 5171 ret |= terminate_and_process_chain (*chain_info);
245f6de1 5172 }
629387a6 5173 return ret;
245f6de1
JJ
5174 }
5175
5176 /* Store aliases any existing chain? */
629387a6 5177 ret |= terminate_all_aliasing_chains (NULL, stmt);
245f6de1 5178 /* Start a new chain. */
99b1c316 5179 class imm_store_chain_info *new_chain
245f6de1 5180 = new imm_store_chain_info (m_stores_head, base_addr);
8a91d545
RS
5181 info = new store_immediate_info (const_bitsize, const_bitpos,
5182 const_bitregion_start,
5183 const_bitregion_end,
5184 stmt, 0, rhs_code, n, ins_stmt,
629387a6
EB
5185 bit_not_p, lp_nr_for_store (stmt),
5186 ops[0], ops[1]);
245f6de1
JJ
5187 new_chain->m_store_info.safe_push (info);
5188 m_stores.put (base_addr, new_chain);
5189 if (dump_file && (dump_flags & TDF_DETAILS))
5190 {
5191 fprintf (dump_file, "Starting new chain with statement:\n");
5192 print_gimple_stmt (dump_file, stmt, 0);
5193 fprintf (dump_file, "The base object is:\n");
5194 print_generic_expr (dump_file, base_addr);
5195 fprintf (dump_file, "\n");
5196 }
629387a6
EB
5197 return ret;
5198}
5199
5200/* Return true if STMT is a store valid for store merging. */
5201
5202static bool
5203store_valid_for_store_merging_p (gimple *stmt)
5204{
5205 return gimple_assign_single_p (stmt)
5206 && gimple_vdef (stmt)
5207 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt))
5384a802 5208 && (!gimple_has_volatile_ops (stmt) || gimple_clobber_p (stmt));
629387a6
EB
5209}
5210
5211enum basic_block_status { BB_INVALID, BB_VALID, BB_EXTENDED_VALID };
5212
5213/* Return the status of basic block BB wrt store merging. */
5214
5215static enum basic_block_status
5216get_status_for_store_merging (basic_block bb)
5217{
5218 unsigned int num_statements = 0;
a7553ad6 5219 unsigned int num_constructors = 0;
629387a6
EB
5220 gimple_stmt_iterator gsi;
5221 edge e;
5222
5223 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5224 {
5225 gimple *stmt = gsi_stmt (gsi);
5226
5227 if (is_gimple_debug (stmt))
5228 continue;
5229
5230 if (store_valid_for_store_merging_p (stmt) && ++num_statements >= 2)
5231 break;
a7553ad6
JJ
5232
5233 if (is_gimple_assign (stmt)
5234 && gimple_assign_rhs_code (stmt) == CONSTRUCTOR)
5235 {
5236 tree rhs = gimple_assign_rhs1 (stmt);
5237 if (VECTOR_TYPE_P (TREE_TYPE (rhs))
5238 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs)))
5239 && gimple_assign_lhs (stmt) != NULL_TREE)
5240 {
5241 HOST_WIDE_INT sz
5242 = int_size_in_bytes (TREE_TYPE (rhs)) * BITS_PER_UNIT;
5243 if (sz == 16 || sz == 32 || sz == 64)
5244 {
5245 num_constructors = 1;
5246 break;
5247 }
5248 }
5249 }
629387a6
EB
5250 }
5251
a7553ad6 5252 if (num_statements == 0 && num_constructors == 0)
629387a6
EB
5253 return BB_INVALID;
5254
5255 if (cfun->can_throw_non_call_exceptions && cfun->eh
5256 && store_valid_for_store_merging_p (gimple_seq_last_stmt (bb_seq (bb)))
5257 && (e = find_fallthru_edge (bb->succs))
5258 && e->dest == bb->next_bb)
5259 return BB_EXTENDED_VALID;
5260
a7553ad6 5261 return (num_statements >= 2 || num_constructors) ? BB_VALID : BB_INVALID;
245f6de1
JJ
5262}
5263
f663d9ad 5264/* Entry point for the pass. Go over each basic block recording chains of
245f6de1
JJ
5265 immediate stores. Upon encountering a terminating statement (as defined
5266 by stmt_terminates_chain_p) process the recorded stores and emit the widened
5267 variants. */
f663d9ad
KT
5268
5269unsigned int
5270pass_store_merging::execute (function *fun)
5271{
5272 basic_block bb;
5273 hash_set<gimple *> orig_stmts;
629387a6
EB
5274 bool changed = false, open_chains = false;
5275
5276 /* If the function can throw and catch non-call exceptions, we'll be trying
5277 to merge stores across different basic blocks so we need to first unsplit
5278 the EH edges in order to streamline the CFG of the function. */
5279 if (cfun->can_throw_non_call_exceptions && cfun->eh)
5280 unsplit_eh_edges ();
f663d9ad 5281
4b84d9b8
JJ
5282 calculate_dominance_info (CDI_DOMINATORS);
5283
f663d9ad
KT
5284 FOR_EACH_BB_FN (bb, fun)
5285 {
629387a6 5286 const basic_block_status bb_status = get_status_for_store_merging (bb);
f663d9ad 5287 gimple_stmt_iterator gsi;
f663d9ad 5288
629387a6
EB
5289 if (open_chains && (bb_status == BB_INVALID || !single_pred_p (bb)))
5290 {
5291 changed |= terminate_and_process_all_chains ();
5292 open_chains = false;
f663d9ad
KT
5293 }
5294
629387a6 5295 if (bb_status == BB_INVALID)
f663d9ad
KT
5296 continue;
5297
5298 if (dump_file && (dump_flags & TDF_DETAILS))
5299 fprintf (dump_file, "Processing basic block <%d>:\n", bb->index);
5300
a7553ad6 5301 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
f663d9ad
KT
5302 {
5303 gimple *stmt = gsi_stmt (gsi);
a7553ad6 5304 gsi_next (&gsi);
f663d9ad 5305
50b6d676
AO
5306 if (is_gimple_debug (stmt))
5307 continue;
5308
5384a802 5309 if (gimple_has_volatile_ops (stmt) && !gimple_clobber_p (stmt))
f663d9ad
KT
5310 {
5311 /* Terminate all chains. */
5312 if (dump_file && (dump_flags & TDF_DETAILS))
5313 fprintf (dump_file, "Volatile access terminates "
5314 "all chains\n");
629387a6
EB
5315 changed |= terminate_and_process_all_chains ();
5316 open_chains = false;
f663d9ad
KT
5317 continue;
5318 }
5319
a7553ad6
JJ
5320 if (is_gimple_assign (stmt)
5321 && gimple_assign_rhs_code (stmt) == CONSTRUCTOR
5322 && maybe_optimize_vector_constructor (stmt))
5323 continue;
5324
629387a6
EB
5325 if (store_valid_for_store_merging_p (stmt))
5326 changed |= process_store (stmt);
245f6de1 5327 else
629387a6
EB
5328 changed |= terminate_all_aliasing_chains (NULL, stmt);
5329 }
5330
5331 if (bb_status == BB_EXTENDED_VALID)
5332 open_chains = true;
5333 else
5334 {
5335 changed |= terminate_and_process_all_chains ();
5336 open_chains = false;
f663d9ad 5337 }
f663d9ad 5338 }
629387a6
EB
5339
5340 if (open_chains)
5341 changed |= terminate_and_process_all_chains ();
5342
5343 /* If the function can throw and catch non-call exceptions and something
5344 changed during the pass, then the CFG has (very likely) changed too. */
5345 if (cfun->can_throw_non_call_exceptions && cfun->eh && changed)
5346 {
5347 free_dominance_info (CDI_DOMINATORS);
5348 return TODO_cleanup_cfg;
5349 }
5350
f663d9ad
KT
5351 return 0;
5352}
5353
5354} // anon namespace
5355
5356/* Construct and return a store merging pass object. */
5357
5358gimple_opt_pass *
5359make_pass_store_merging (gcc::context *ctxt)
5360{
5361 return new pass_store_merging (ctxt);
5362}
c22d8787
KT
5363
5364#if CHECKING_P
5365
5366namespace selftest {
5367
5368/* Selftests for store merging helpers. */
5369
5370/* Assert that all elements of the byte arrays X and Y, both of length N
5371 are equal. */
5372
5373static void
5374verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n)
5375{
5376 for (unsigned int i = 0; i < n; i++)
5377 {
5378 if (x[i] != y[i])
5379 {
5380 fprintf (stderr, "Arrays do not match. X:\n");
5381 dump_char_array (stderr, x, n);
5382 fprintf (stderr, "Y:\n");
5383 dump_char_array (stderr, y, n);
5384 }
5385 ASSERT_EQ (x[i], y[i]);
5386 }
5387}
5388
8aba425f 5389/* Test shift_bytes_in_array_left and that it carries bits across between
c22d8787
KT
5390 bytes correctly. */
5391
5392static void
8aba425f 5393verify_shift_bytes_in_array_left (void)
c22d8787
KT
5394{
5395 /* byte 1 | byte 0
5396 00011111 | 11100000. */
5397 unsigned char orig[2] = { 0xe0, 0x1f };
5398 unsigned char in[2];
5399 memcpy (in, orig, sizeof orig);
5400
5401 unsigned char expected[2] = { 0x80, 0x7f };
8aba425f 5402 shift_bytes_in_array_left (in, sizeof (in), 2);
c22d8787
KT
5403 verify_array_eq (in, expected, sizeof (in));
5404
5405 memcpy (in, orig, sizeof orig);
5406 memcpy (expected, orig, sizeof orig);
5407 /* Check that shifting by zero doesn't change anything. */
8aba425f 5408 shift_bytes_in_array_left (in, sizeof (in), 0);
c22d8787
KT
5409 verify_array_eq (in, expected, sizeof (in));
5410
5411}
5412
5413/* Test shift_bytes_in_array_right and that it carries bits across between
5414 bytes correctly. */
5415
5416static void
5417verify_shift_bytes_in_array_right (void)
5418{
5419 /* byte 1 | byte 0
5420 00011111 | 11100000. */
5421 unsigned char orig[2] = { 0x1f, 0xe0};
5422 unsigned char in[2];
5423 memcpy (in, orig, sizeof orig);
5424 unsigned char expected[2] = { 0x07, 0xf8};
5425 shift_bytes_in_array_right (in, sizeof (in), 2);
5426 verify_array_eq (in, expected, sizeof (in));
5427
5428 memcpy (in, orig, sizeof orig);
5429 memcpy (expected, orig, sizeof orig);
5430 /* Check that shifting by zero doesn't change anything. */
5431 shift_bytes_in_array_right (in, sizeof (in), 0);
5432 verify_array_eq (in, expected, sizeof (in));
5433}
5434
5435/* Test clear_bit_region that it clears exactly the bits asked and
5436 nothing more. */
5437
5438static void
5439verify_clear_bit_region (void)
5440{
5441 /* Start with all bits set and test clearing various patterns in them. */
5442 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5443 unsigned char in[3];
5444 unsigned char expected[3];
5445 memcpy (in, orig, sizeof in);
5446
5447 /* Check zeroing out all the bits. */
5448 clear_bit_region (in, 0, 3 * BITS_PER_UNIT);
5449 expected[0] = expected[1] = expected[2] = 0;
5450 verify_array_eq (in, expected, sizeof in);
5451
5452 memcpy (in, orig, sizeof in);
5453 /* Leave the first and last bits intact. */
5454 clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2);
5455 expected[0] = 0x1;
5456 expected[1] = 0;
5457 expected[2] = 0x80;
5458 verify_array_eq (in, expected, sizeof in);
5459}
5460
5384a802 5461/* Test clear_bit_region_be that it clears exactly the bits asked and
c22d8787
KT
5462 nothing more. */
5463
5464static void
5465verify_clear_bit_region_be (void)
5466{
5467 /* Start with all bits set and test clearing various patterns in them. */
5468 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5469 unsigned char in[3];
5470 unsigned char expected[3];
5471 memcpy (in, orig, sizeof in);
5472
5473 /* Check zeroing out all the bits. */
5474 clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT);
5475 expected[0] = expected[1] = expected[2] = 0;
5476 verify_array_eq (in, expected, sizeof in);
5477
5478 memcpy (in, orig, sizeof in);
5479 /* Leave the first and last bits intact. */
5480 clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2);
5481 expected[0] = 0x80;
5482 expected[1] = 0;
5483 expected[2] = 0x1;
5484 verify_array_eq (in, expected, sizeof in);
5485}
5486
5487
5488/* Run all of the selftests within this file. */
5489
5490void
5491store_merging_c_tests (void)
5492{
8aba425f 5493 verify_shift_bytes_in_array_left ();
c22d8787
KT
5494 verify_shift_bytes_in_array_right ();
5495 verify_clear_bit_region ();
5496 verify_clear_bit_region_be ();
5497}
5498
5499} // namespace selftest
5500#endif /* CHECKING_P. */