]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/gimple-ssa-store-merging.c
middle-end/97521 - always use single-bit bools in mask vector types
[thirdparty/gcc.git] / gcc / gimple-ssa-store-merging.c
CommitLineData
dffec8eb 1/* GIMPLE store merging and byte swapping passes.
8d9254fc 2 Copyright (C) 2009-2020 Free Software Foundation, Inc.
f663d9ad
KT
3 Contributed by ARM Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
c94c3532
EB
21/* The purpose of the store merging pass is to combine multiple memory stores
22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
24
f663d9ad
KT
25 For example, if we have a sequence peforming four byte stores to
26 consecutive memory locations:
27 [p ] := imm1;
28 [p + 1B] := imm2;
29 [p + 2B] := imm3;
30 [p + 3B] := imm4;
31 we can transform this into a single 4-byte store if the target supports it:
c94c3532 32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
f663d9ad 33
245f6de1
JJ
34 Or:
35 [p ] := [q ];
36 [p + 1B] := [q + 1B];
37 [p + 2B] := [q + 2B];
38 [p + 3B] := [q + 3B];
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
41
42 Or:
43 [p ] := [q ] ^ imm1;
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
49
c94c3532
EB
50 Or:
51 [p:1 ] := imm;
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
55
f663d9ad
KT
56 The algorithm is applied to each basic block in three phases:
57
c94c3532
EB
58 1) Scan through the basic block and record assignments to destinations
59 that can be expressed as a store to memory of a certain size at a certain
60 bit offset from base expressions we can handle. For bit-fields we also
61 record the surrounding bit region, i.e. bits that could be stored in
245f6de1
JJ
62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
700d4cb0 64 terminate such chains when appropriate (for example when the stored
245f6de1 65 values get used subsequently).
f663d9ad
KT
66 These stores can be a result of structure element initializers, array stores
67 etc. A store_immediate_info object is recorded for every such store.
68 Record as many such assignments to a single base as possible until a
69 statement that interferes with the store sequence is encountered.
c94c3532
EB
70 Each store has up to 2 operands, which can be a either constant, a memory
71 load or an SSA name, from which the value to be stored can be computed.
245f6de1
JJ
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
f663d9ad 74
c94c3532 75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
f663d9ad 76 store_immediate_info objects) and coalesce contiguous stores into
c94c3532 77 merged_store_group objects. For bit-field stores, we don't need to
245f6de1
JJ
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
f663d9ad
KT
83
84 For example, given the stores:
85 [p ] := 0;
86 [p + 1B] := 1;
87 [p + 3B] := 0;
88 [p + 4B] := 1;
89 [p + 5B] := 0;
90 [p + 6B] := 0;
91 This phase would produce two merged_store_group objects, one recording the
92 two bytes stored in the memory region [p : p + 1] and another
93 recording the four bytes stored in the memory region [p + 3 : p + 6].
94
95 3) The merged_store_group objects produced in phase 2) are processed
96 to generate the sequence of wider stores that set the contiguous memory
97 regions to the sequence of bytes that correspond to it. This may emit
98 multiple stores per store group to handle contiguous stores that are not
99 of a size that is a power of 2. For example it can try to emit a 40-bit
100 store as a 32-bit store followed by an 8-bit store.
c94c3532
EB
101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
f663d9ad
KT
103
104 Note on endianness and example:
105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
106 [p ] := 0x1234;
107 [p + 2B] := 0x5678;
108 [p + 4B] := 0xab;
109 [p + 5B] := 0xcd;
110
111 The memory layout for little-endian (LE) and big-endian (BE) must be:
112 p |LE|BE|
113 ---------
114 0 |34|12|
115 1 |12|34|
116 2 |78|56|
117 3 |56|78|
118 4 |ab|ab|
119 5 |cd|cd|
120
121 To merge these into a single 48-bit merged value 'val' in phase 2)
122 on little-endian we insert stores to higher (consecutive) bitpositions
123 into the most significant bits of the merged value.
124 The final merged value would be: 0xcdab56781234
125
126 For big-endian we insert stores to higher bitpositions into the least
127 significant bits of the merged value.
128 The final merged value would be: 0x12345678abcd
129
130 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
131 followed by a 16-bit store. Again, we must consider endianness when
132 breaking down the 48-bit value 'val' computed above.
133 For little endian we emit:
134 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
135 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
136
137 Whereas for big-endian we emit:
138 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
139 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
140
141#include "config.h"
142#include "system.h"
143#include "coretypes.h"
144#include "backend.h"
145#include "tree.h"
146#include "gimple.h"
147#include "builtins.h"
148#include "fold-const.h"
149#include "tree-pass.h"
150#include "ssa.h"
151#include "gimple-pretty-print.h"
152#include "alias.h"
153#include "fold-const.h"
f663d9ad
KT
154#include "print-tree.h"
155#include "tree-hash-traits.h"
156#include "gimple-iterator.h"
157#include "gimplify.h"
c94c3532 158#include "gimple-fold.h"
f663d9ad
KT
159#include "stor-layout.h"
160#include "timevar.h"
629387a6
EB
161#include "cfganal.h"
162#include "cfgcleanup.h"
f663d9ad 163#include "tree-cfg.h"
629387a6 164#include "except.h"
f663d9ad
KT
165#include "tree-eh.h"
166#include "target.h"
aa55dc0c 167#include "gimplify-me.h"
a62b3dc5
JJ
168#include "rtl.h"
169#include "expr.h" /* For get_bit_range. */
dffec8eb 170#include "optabs-tree.h"
a95b474a 171#include "dbgcnt.h"
c22d8787 172#include "selftest.h"
f663d9ad
KT
173
174/* The maximum size (in bits) of the stores this pass should generate. */
175#define MAX_STORE_BITSIZE (BITS_PER_WORD)
176#define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
177
245f6de1
JJ
178/* Limit to bound the number of aliasing checks for loads with the same
179 vuse as the corresponding store. */
180#define MAX_STORE_ALIAS_CHECKS 64
181
f663d9ad
KT
182namespace {
183
bebadeca 184struct bswap_stat
dffec8eb
JJ
185{
186 /* Number of hand-written 16-bit nop / bswaps found. */
187 int found_16bit;
188
189 /* Number of hand-written 32-bit nop / bswaps found. */
190 int found_32bit;
191
192 /* Number of hand-written 64-bit nop / bswaps found. */
193 int found_64bit;
194} nop_stats, bswap_stats;
195
196/* A symbolic number structure is used to detect byte permutation and selection
197 patterns of a source. To achieve that, its field N contains an artificial
198 number consisting of BITS_PER_MARKER sized markers tracking where does each
199 byte come from in the source:
200
201 0 - target byte has the value 0
202 FF - target byte has an unknown value (eg. due to sign extension)
203 1..size - marker value is the byte index in the source (0 for lsb).
204
205 To detect permutations on memory sources (arrays and structures), a symbolic
206 number is also associated:
207 - a base address BASE_ADDR and an OFFSET giving the address of the source;
208 - a range which gives the difference between the highest and lowest accessed
209 memory location to make such a symbolic number;
210 - the address SRC of the source element of lowest address as a convenience
211 to easily get BASE_ADDR + offset + lowest bytepos;
212 - number of expressions N_OPS bitwise ored together to represent
213 approximate cost of the computation.
214
215 Note 1: the range is different from size as size reflects the size of the
216 type of the current expression. For instance, for an array char a[],
217 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
218 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
219 time a range of 1.
220
221 Note 2: for non-memory sources, range holds the same value as size.
222
223 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
224
225struct symbolic_number {
226 uint64_t n;
227 tree type;
228 tree base_addr;
229 tree offset;
4a022c70 230 poly_int64_pod bytepos;
dffec8eb
JJ
231 tree src;
232 tree alias_set;
233 tree vuse;
234 unsigned HOST_WIDE_INT range;
235 int n_ops;
236};
237
238#define BITS_PER_MARKER 8
239#define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
240#define MARKER_BYTE_UNKNOWN MARKER_MASK
241#define HEAD_MARKER(n, size) \
242 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
243
244/* The number which the find_bswap_or_nop_1 result should match in
245 order to have a nop. The number is masked according to the size of
246 the symbolic number before using it. */
247#define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
248 (uint64_t)0x08070605 << 32 | 0x04030201)
249
250/* The number which the find_bswap_or_nop_1 result should match in
251 order to have a byte swap. The number is masked according to the
252 size of the symbolic number before using it. */
253#define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
254 (uint64_t)0x01020304 << 32 | 0x05060708)
255
256/* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
257 number N. Return false if the requested operation is not permitted
258 on a symbolic number. */
259
260inline bool
261do_shift_rotate (enum tree_code code,
262 struct symbolic_number *n,
263 int count)
264{
265 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
266 unsigned head_marker;
267
444cda74
JJ
268 if (count < 0
269 || count >= TYPE_PRECISION (n->type)
270 || count % BITS_PER_UNIT != 0)
dffec8eb
JJ
271 return false;
272 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
273
274 /* Zero out the extra bits of N in order to avoid them being shifted
275 into the significant bits. */
276 if (size < 64 / BITS_PER_MARKER)
277 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
278
279 switch (code)
280 {
281 case LSHIFT_EXPR:
282 n->n <<= count;
283 break;
284 case RSHIFT_EXPR:
285 head_marker = HEAD_MARKER (n->n, size);
286 n->n >>= count;
287 /* Arithmetic shift of signed type: result is dependent on the value. */
288 if (!TYPE_UNSIGNED (n->type) && head_marker)
289 for (i = 0; i < count / BITS_PER_MARKER; i++)
290 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
291 << ((size - 1 - i) * BITS_PER_MARKER);
292 break;
293 case LROTATE_EXPR:
294 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
295 break;
296 case RROTATE_EXPR:
297 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
298 break;
299 default:
300 return false;
301 }
302 /* Zero unused bits for size. */
303 if (size < 64 / BITS_PER_MARKER)
304 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
305 return true;
306}
307
308/* Perform sanity checking for the symbolic number N and the gimple
309 statement STMT. */
310
311inline bool
312verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
313{
314 tree lhs_type;
315
316 lhs_type = gimple_expr_type (stmt);
317
5ea39b24
JJ
318 if (TREE_CODE (lhs_type) != INTEGER_TYPE
319 && TREE_CODE (lhs_type) != ENUMERAL_TYPE)
dffec8eb
JJ
320 return false;
321
322 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
323 return false;
324
325 return true;
326}
327
328/* Initialize the symbolic number N for the bswap pass from the base element
329 SRC manipulated by the bitwise OR expression. */
330
331bool
332init_symbolic_number (struct symbolic_number *n, tree src)
333{
334 int size;
335
336 if (! INTEGRAL_TYPE_P (TREE_TYPE (src)))
337 return false;
338
339 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
340 n->src = src;
341
342 /* Set up the symbolic number N by setting each byte to a value between 1 and
343 the byte size of rhs1. The highest order byte is set to n->size and the
344 lowest order byte to 1. */
345 n->type = TREE_TYPE (src);
346 size = TYPE_PRECISION (n->type);
347 if (size % BITS_PER_UNIT != 0)
348 return false;
349 size /= BITS_PER_UNIT;
350 if (size > 64 / BITS_PER_MARKER)
351 return false;
352 n->range = size;
353 n->n = CMPNOP;
354 n->n_ops = 1;
355
356 if (size < 64 / BITS_PER_MARKER)
357 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
358
359 return true;
360}
361
362/* Check if STMT might be a byte swap or a nop from a memory source and returns
363 the answer. If so, REF is that memory source and the base of the memory area
364 accessed and the offset of the access from that base are recorded in N. */
365
366bool
367find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
368{
369 /* Leaf node is an array or component ref. Memorize its base and
370 offset from base to compare to other such leaf node. */
f37fac2b 371 poly_int64 bitsize, bitpos, bytepos;
dffec8eb
JJ
372 machine_mode mode;
373 int unsignedp, reversep, volatilep;
374 tree offset, base_addr;
375
376 /* Not prepared to handle PDP endian. */
377 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
378 return false;
379
380 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
381 return false;
382
383 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
384 &unsignedp, &reversep, &volatilep);
385
4b84d9b8
JJ
386 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
387 /* Do not rewrite TARGET_MEM_REF. */
388 return false;
389 else if (TREE_CODE (base_addr) == MEM_REF)
dffec8eb 390 {
3fed2ce9 391 poly_offset_int bit_offset = 0;
dffec8eb
JJ
392 tree off = TREE_OPERAND (base_addr, 1);
393
394 if (!integer_zerop (off))
395 {
3fed2ce9
RS
396 poly_offset_int boff = mem_ref_offset (base_addr);
397 boff <<= LOG2_BITS_PER_UNIT;
dffec8eb
JJ
398 bit_offset += boff;
399 }
400
401 base_addr = TREE_OPERAND (base_addr, 0);
402
403 /* Avoid returning a negative bitpos as this may wreak havoc later. */
3fed2ce9 404 if (maybe_lt (bit_offset, 0))
dffec8eb 405 {
3fed2ce9
RS
406 tree byte_offset = wide_int_to_tree
407 (sizetype, bits_to_bytes_round_down (bit_offset));
408 bit_offset = num_trailing_bits (bit_offset);
dffec8eb 409 if (offset)
3fed2ce9 410 offset = size_binop (PLUS_EXPR, offset, byte_offset);
dffec8eb 411 else
3fed2ce9 412 offset = byte_offset;
dffec8eb
JJ
413 }
414
3fed2ce9 415 bitpos += bit_offset.force_shwi ();
dffec8eb 416 }
4b84d9b8
JJ
417 else
418 base_addr = build_fold_addr_expr (base_addr);
dffec8eb 419
f37fac2b 420 if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
dffec8eb 421 return false;
f37fac2b 422 if (!multiple_p (bitsize, BITS_PER_UNIT))
dffec8eb
JJ
423 return false;
424 if (reversep)
425 return false;
426
427 if (!init_symbolic_number (n, ref))
428 return false;
429 n->base_addr = base_addr;
430 n->offset = offset;
f37fac2b 431 n->bytepos = bytepos;
dffec8eb
JJ
432 n->alias_set = reference_alias_ptr_type (ref);
433 n->vuse = gimple_vuse (stmt);
434 return true;
435}
436
437/* Compute the symbolic number N representing the result of a bitwise OR on 2
438 symbolic number N1 and N2 whose source statements are respectively
439 SOURCE_STMT1 and SOURCE_STMT2. */
440
441gimple *
442perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
443 gimple *source_stmt2, struct symbolic_number *n2,
444 struct symbolic_number *n)
445{
446 int i, size;
447 uint64_t mask;
448 gimple *source_stmt;
449 struct symbolic_number *n_start;
450
451 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
452 if (TREE_CODE (rhs1) == BIT_FIELD_REF
453 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
454 rhs1 = TREE_OPERAND (rhs1, 0);
455 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
456 if (TREE_CODE (rhs2) == BIT_FIELD_REF
457 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
458 rhs2 = TREE_OPERAND (rhs2, 0);
459
460 /* Sources are different, cancel bswap if they are not memory location with
461 the same base (array, structure, ...). */
462 if (rhs1 != rhs2)
463 {
464 uint64_t inc;
4a022c70 465 HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end;
dffec8eb
JJ
466 struct symbolic_number *toinc_n_ptr, *n_end;
467 basic_block bb1, bb2;
468
469 if (!n1->base_addr || !n2->base_addr
470 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
471 return NULL;
472
473 if (!n1->offset != !n2->offset
474 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
475 return NULL;
476
4a022c70
RS
477 start1 = 0;
478 if (!(n2->bytepos - n1->bytepos).is_constant (&start2))
479 return NULL;
480
481 if (start1 < start2)
dffec8eb
JJ
482 {
483 n_start = n1;
4a022c70 484 start_sub = start2 - start1;
dffec8eb
JJ
485 }
486 else
487 {
488 n_start = n2;
4a022c70 489 start_sub = start1 - start2;
dffec8eb
JJ
490 }
491
492 bb1 = gimple_bb (source_stmt1);
493 bb2 = gimple_bb (source_stmt2);
494 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
495 source_stmt = source_stmt1;
496 else
497 source_stmt = source_stmt2;
498
499 /* Find the highest address at which a load is performed and
500 compute related info. */
4a022c70
RS
501 end1 = start1 + (n1->range - 1);
502 end2 = start2 + (n2->range - 1);
dffec8eb
JJ
503 if (end1 < end2)
504 {
505 end = end2;
506 end_sub = end2 - end1;
507 }
508 else
509 {
510 end = end1;
511 end_sub = end1 - end2;
512 }
513 n_end = (end2 > end1) ? n2 : n1;
514
515 /* Find symbolic number whose lsb is the most significant. */
516 if (BYTES_BIG_ENDIAN)
517 toinc_n_ptr = (n_end == n1) ? n2 : n1;
518 else
519 toinc_n_ptr = (n_start == n1) ? n2 : n1;
520
4a022c70 521 n->range = end - MIN (start1, start2) + 1;
dffec8eb
JJ
522
523 /* Check that the range of memory covered can be represented by
524 a symbolic number. */
525 if (n->range > 64 / BITS_PER_MARKER)
526 return NULL;
527
528 /* Reinterpret byte marks in symbolic number holding the value of
529 bigger weight according to target endianness. */
530 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
531 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
532 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
533 {
534 unsigned marker
535 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
536 if (marker && marker != MARKER_BYTE_UNKNOWN)
537 toinc_n_ptr->n += inc;
538 }
539 }
540 else
541 {
542 n->range = n1->range;
543 n_start = n1;
544 source_stmt = source_stmt1;
545 }
546
547 if (!n1->alias_set
548 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
549 n->alias_set = n1->alias_set;
550 else
551 n->alias_set = ptr_type_node;
552 n->vuse = n_start->vuse;
553 n->base_addr = n_start->base_addr;
554 n->offset = n_start->offset;
555 n->src = n_start->src;
556 n->bytepos = n_start->bytepos;
557 n->type = n_start->type;
558 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
559
560 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
561 {
562 uint64_t masked1, masked2;
563
564 masked1 = n1->n & mask;
565 masked2 = n2->n & mask;
566 if (masked1 && masked2 && masked1 != masked2)
567 return NULL;
568 }
569 n->n = n1->n | n2->n;
570 n->n_ops = n1->n_ops + n2->n_ops;
571
572 return source_stmt;
573}
574
575/* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
576 the operation given by the rhs of STMT on the result. If the operation
577 could successfully be executed the function returns a gimple stmt whose
578 rhs's first tree is the expression of the source operand and NULL
579 otherwise. */
580
581gimple *
582find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
583{
584 enum tree_code code;
585 tree rhs1, rhs2 = NULL;
586 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
587 enum gimple_rhs_class rhs_class;
588
589 if (!limit || !is_gimple_assign (stmt))
590 return NULL;
591
592 rhs1 = gimple_assign_rhs1 (stmt);
593
594 if (find_bswap_or_nop_load (stmt, rhs1, n))
595 return stmt;
596
597 /* Handle BIT_FIELD_REF. */
598 if (TREE_CODE (rhs1) == BIT_FIELD_REF
599 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
600 {
35cf3c55
KZ
601 if (!tree_fits_uhwi_p (TREE_OPERAND (rhs1, 1))
602 || !tree_fits_uhwi_p (TREE_OPERAND (rhs1, 2)))
603 return NULL;
604
dffec8eb
JJ
605 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
606 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
607 if (bitpos % BITS_PER_UNIT == 0
608 && bitsize % BITS_PER_UNIT == 0
609 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
610 {
611 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
612 if (BYTES_BIG_ENDIAN)
613 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
614
615 /* Shift. */
616 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
617 return NULL;
618
619 /* Mask. */
620 uint64_t mask = 0;
621 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
622 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
623 i++, tmp <<= BITS_PER_UNIT)
624 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
625 n->n &= mask;
626
627 /* Convert. */
628 n->type = TREE_TYPE (rhs1);
629 if (!n->base_addr)
630 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
631
632 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
633 }
634
635 return NULL;
636 }
637
638 if (TREE_CODE (rhs1) != SSA_NAME)
639 return NULL;
640
641 code = gimple_assign_rhs_code (stmt);
642 rhs_class = gimple_assign_rhs_class (stmt);
643 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
644
645 if (rhs_class == GIMPLE_BINARY_RHS)
646 rhs2 = gimple_assign_rhs2 (stmt);
647
648 /* Handle unary rhs and binary rhs with integer constants as second
649 operand. */
650
651 if (rhs_class == GIMPLE_UNARY_RHS
652 || (rhs_class == GIMPLE_BINARY_RHS
653 && TREE_CODE (rhs2) == INTEGER_CST))
654 {
655 if (code != BIT_AND_EXPR
656 && code != LSHIFT_EXPR
657 && code != RSHIFT_EXPR
658 && code != LROTATE_EXPR
659 && code != RROTATE_EXPR
660 && !CONVERT_EXPR_CODE_P (code))
661 return NULL;
662
663 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
664
665 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
666 we have to initialize the symbolic number. */
667 if (!source_stmt1)
668 {
669 if (gimple_assign_load_p (stmt)
670 || !init_symbolic_number (n, rhs1))
671 return NULL;
672 source_stmt1 = stmt;
673 }
674
675 switch (code)
676 {
677 case BIT_AND_EXPR:
678 {
679 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
680 uint64_t val = int_cst_value (rhs2), mask = 0;
681 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
682
683 /* Only constants masking full bytes are allowed. */
684 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
685 if ((val & tmp) != 0 && (val & tmp) != tmp)
686 return NULL;
687 else if (val & tmp)
688 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
689
690 n->n &= mask;
691 }
692 break;
693 case LSHIFT_EXPR:
694 case RSHIFT_EXPR:
695 case LROTATE_EXPR:
696 case RROTATE_EXPR:
697 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
698 return NULL;
699 break;
700 CASE_CONVERT:
701 {
702 int i, type_size, old_type_size;
703 tree type;
704
705 type = gimple_expr_type (stmt);
706 type_size = TYPE_PRECISION (type);
707 if (type_size % BITS_PER_UNIT != 0)
708 return NULL;
709 type_size /= BITS_PER_UNIT;
710 if (type_size > 64 / BITS_PER_MARKER)
711 return NULL;
712
713 /* Sign extension: result is dependent on the value. */
714 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
715 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
716 && HEAD_MARKER (n->n, old_type_size))
717 for (i = 0; i < type_size - old_type_size; i++)
718 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
719 << ((type_size - 1 - i) * BITS_PER_MARKER);
720
721 if (type_size < 64 / BITS_PER_MARKER)
722 {
723 /* If STMT casts to a smaller type mask out the bits not
724 belonging to the target type. */
725 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
726 }
727 n->type = type;
728 if (!n->base_addr)
729 n->range = type_size;
730 }
731 break;
732 default:
733 return NULL;
734 };
735 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
736 }
737
738 /* Handle binary rhs. */
739
740 if (rhs_class == GIMPLE_BINARY_RHS)
741 {
742 struct symbolic_number n1, n2;
743 gimple *source_stmt, *source_stmt2;
744
745 if (code != BIT_IOR_EXPR)
746 return NULL;
747
748 if (TREE_CODE (rhs2) != SSA_NAME)
749 return NULL;
750
751 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
752
753 switch (code)
754 {
755 case BIT_IOR_EXPR:
756 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
757
758 if (!source_stmt1)
759 return NULL;
760
761 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
762
763 if (!source_stmt2)
764 return NULL;
765
766 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
767 return NULL;
768
4b84d9b8 769 if (n1.vuse != n2.vuse)
dffec8eb
JJ
770 return NULL;
771
772 source_stmt
773 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
774
775 if (!source_stmt)
776 return NULL;
777
778 if (!verify_symbolic_number_p (n, stmt))
779 return NULL;
780
781 break;
782 default:
783 return NULL;
784 }
785 return source_stmt;
786 }
787 return NULL;
788}
789
4b84d9b8
JJ
790/* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
791 *CMPXCHG, *CMPNOP and adjust *N. */
dffec8eb 792
4b84d9b8
JJ
793void
794find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg,
795 uint64_t *cmpnop)
dffec8eb
JJ
796{
797 unsigned rsize;
798 uint64_t tmpn, mask;
dffec8eb 799
4b84d9b8
JJ
800 /* The number which the find_bswap_or_nop_1 result should match in order
801 to have a full byte swap. The number is shifted to the right
802 according to the size of the symbolic number before using it. */
803 *cmpxchg = CMPXCHG;
804 *cmpnop = CMPNOP;
dffec8eb
JJ
805
806 /* Find real size of result (highest non-zero byte). */
807 if (n->base_addr)
808 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
809 else
810 rsize = n->range;
811
812 /* Zero out the bits corresponding to untouched bytes in original gimple
813 expression. */
814 if (n->range < (int) sizeof (int64_t))
815 {
816 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
4b84d9b8
JJ
817 *cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
818 *cmpnop &= mask;
dffec8eb
JJ
819 }
820
821 /* Zero out the bits corresponding to unused bytes in the result of the
822 gimple expression. */
823 if (rsize < n->range)
824 {
825 if (BYTES_BIG_ENDIAN)
826 {
827 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
4b84d9b8
JJ
828 *cmpxchg &= mask;
829 *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
dffec8eb
JJ
830 }
831 else
832 {
833 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
4b84d9b8
JJ
834 *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
835 *cmpnop &= mask;
dffec8eb
JJ
836 }
837 n->range = rsize;
838 }
839
4b84d9b8
JJ
840 n->range *= BITS_PER_UNIT;
841}
842
843/* Check if STMT completes a bswap implementation or a read in a given
844 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
845 accordingly. It also sets N to represent the kind of operations
846 performed: size of the resulting expression and whether it works on
847 a memory source, and if so alias-set and vuse. At last, the
848 function returns a stmt whose rhs's first tree is the source
849 expression. */
850
851gimple *
852find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
853{
854 /* The last parameter determines the depth search limit. It usually
855 correlates directly to the number n of bytes to be touched. We
0f507a36 856 increase that number by 2 * (log2(n) + 1) here in order to also
4b84d9b8
JJ
857 cover signed -> unsigned conversions of the src operand as can be seen
858 in libgcc, and for initial shift/and operation of the src operand. */
859 int limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
0f507a36 860 limit += 2 * (1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit));
4b84d9b8
JJ
861 gimple *ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
862
863 if (!ins_stmt)
864 return NULL;
865
866 uint64_t cmpxchg, cmpnop;
867 find_bswap_or_nop_finalize (n, &cmpxchg, &cmpnop);
868
dffec8eb
JJ
869 /* A complete byte swap should make the symbolic number to start with
870 the largest digit in the highest order byte. Unchanged symbolic
871 number indicates a read with same endianness as target architecture. */
872 if (n->n == cmpnop)
873 *bswap = false;
874 else if (n->n == cmpxchg)
875 *bswap = true;
876 else
877 return NULL;
878
879 /* Useless bit manipulation performed by code. */
880 if (!n->base_addr && n->n == cmpnop && n->n_ops == 1)
881 return NULL;
882
dffec8eb
JJ
883 return ins_stmt;
884}
885
886const pass_data pass_data_optimize_bswap =
887{
888 GIMPLE_PASS, /* type */
889 "bswap", /* name */
890 OPTGROUP_NONE, /* optinfo_flags */
891 TV_NONE, /* tv_id */
892 PROP_ssa, /* properties_required */
893 0, /* properties_provided */
894 0, /* properties_destroyed */
895 0, /* todo_flags_start */
896 0, /* todo_flags_finish */
897};
898
899class pass_optimize_bswap : public gimple_opt_pass
900{
901public:
902 pass_optimize_bswap (gcc::context *ctxt)
903 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
904 {}
905
906 /* opt_pass methods: */
907 virtual bool gate (function *)
908 {
909 return flag_expensive_optimizations && optimize && BITS_PER_UNIT == 8;
910 }
911
912 virtual unsigned int execute (function *);
913
914}; // class pass_optimize_bswap
915
916/* Perform the bswap optimization: replace the expression computed in the rhs
4b84d9b8
JJ
917 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
918 bswap, load or load + bswap expression.
dffec8eb
JJ
919 Which of these alternatives replace the rhs is given by N->base_addr (non
920 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
921 load to perform are also given in N while the builtin bswap invoke is given
4b84d9b8
JJ
922 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
923 load statements involved to construct the rhs in gsi_stmt (GSI) and
924 N->range gives the size of the rhs expression for maintaining some
925 statistics.
dffec8eb 926
4b84d9b8
JJ
927 Note that if the replacement involve a load and if gsi_stmt (GSI) is
928 non-NULL, that stmt is moved just after INS_STMT to do the load with the
929 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
dffec8eb 930
4b84d9b8
JJ
931tree
932bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl,
dffec8eb
JJ
933 tree bswap_type, tree load_type, struct symbolic_number *n,
934 bool bswap)
935{
4b84d9b8 936 tree src, tmp, tgt = NULL_TREE;
dffec8eb
JJ
937 gimple *bswap_stmt;
938
4b84d9b8 939 gimple *cur_stmt = gsi_stmt (gsi);
dffec8eb 940 src = n->src;
4b84d9b8
JJ
941 if (cur_stmt)
942 tgt = gimple_assign_lhs (cur_stmt);
dffec8eb
JJ
943
944 /* Need to load the value from memory first. */
945 if (n->base_addr)
946 {
4b84d9b8
JJ
947 gimple_stmt_iterator gsi_ins = gsi;
948 if (ins_stmt)
949 gsi_ins = gsi_for_stmt (ins_stmt);
dffec8eb
JJ
950 tree addr_expr, addr_tmp, val_expr, val_tmp;
951 tree load_offset_ptr, aligned_load_type;
4b84d9b8
JJ
952 gimple *load_stmt;
953 unsigned align = get_object_alignment (src);
4a022c70 954 poly_int64 load_offset = 0;
dffec8eb 955
4b84d9b8
JJ
956 if (cur_stmt)
957 {
958 basic_block ins_bb = gimple_bb (ins_stmt);
959 basic_block cur_bb = gimple_bb (cur_stmt);
960 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
961 return NULL_TREE;
962
963 /* Move cur_stmt just before one of the load of the original
964 to ensure it has the same VUSE. See PR61517 for what could
965 go wrong. */
966 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
967 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
968 gsi_move_before (&gsi, &gsi_ins);
969 gsi = gsi_for_stmt (cur_stmt);
970 }
971 else
972 gsi = gsi_ins;
dffec8eb
JJ
973
974 /* Compute address to load from and cast according to the size
975 of the load. */
4b84d9b8 976 addr_expr = build_fold_addr_expr (src);
dffec8eb 977 if (is_gimple_mem_ref_addr (addr_expr))
4b84d9b8 978 addr_tmp = unshare_expr (addr_expr);
dffec8eb
JJ
979 else
980 {
4b84d9b8
JJ
981 addr_tmp = unshare_expr (n->base_addr);
982 if (!is_gimple_mem_ref_addr (addr_tmp))
983 addr_tmp = force_gimple_operand_gsi_1 (&gsi, addr_tmp,
984 is_gimple_mem_ref_addr,
985 NULL_TREE, true,
986 GSI_SAME_STMT);
987 load_offset = n->bytepos;
988 if (n->offset)
989 {
990 tree off
991 = force_gimple_operand_gsi (&gsi, unshare_expr (n->offset),
992 true, NULL_TREE, true,
993 GSI_SAME_STMT);
994 gimple *stmt
995 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp)),
996 POINTER_PLUS_EXPR, addr_tmp, off);
997 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
998 addr_tmp = gimple_assign_lhs (stmt);
999 }
dffec8eb
JJ
1000 }
1001
1002 /* Perform the load. */
1003 aligned_load_type = load_type;
1004 if (align < TYPE_ALIGN (load_type))
1005 aligned_load_type = build_aligned_type (load_type, align);
1006 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
1007 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
1008 load_offset_ptr);
1009
1010 if (!bswap)
1011 {
1012 if (n->range == 16)
1013 nop_stats.found_16bit++;
1014 else if (n->range == 32)
1015 nop_stats.found_32bit++;
1016 else
1017 {
1018 gcc_assert (n->range == 64);
1019 nop_stats.found_64bit++;
1020 }
1021
1022 /* Convert the result of load if necessary. */
4b84d9b8 1023 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), load_type))
dffec8eb
JJ
1024 {
1025 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
1026 "load_dst");
1027 load_stmt = gimple_build_assign (val_tmp, val_expr);
1028 gimple_set_vuse (load_stmt, n->vuse);
1029 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1030 gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
4b84d9b8 1031 update_stmt (cur_stmt);
dffec8eb 1032 }
4b84d9b8 1033 else if (cur_stmt)
dffec8eb
JJ
1034 {
1035 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
1036 gimple_set_vuse (cur_stmt, n->vuse);
4b84d9b8
JJ
1037 update_stmt (cur_stmt);
1038 }
1039 else
1040 {
1041 tgt = make_ssa_name (load_type);
1042 cur_stmt = gimple_build_assign (tgt, MEM_REF, val_expr);
1043 gimple_set_vuse (cur_stmt, n->vuse);
1044 gsi_insert_before (&gsi, cur_stmt, GSI_SAME_STMT);
dffec8eb 1045 }
dffec8eb
JJ
1046
1047 if (dump_file)
1048 {
1049 fprintf (dump_file,
1050 "%d bit load in target endianness found at: ",
1051 (int) n->range);
1052 print_gimple_stmt (dump_file, cur_stmt, 0);
1053 }
4b84d9b8 1054 return tgt;
dffec8eb
JJ
1055 }
1056 else
1057 {
1058 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
1059 load_stmt = gimple_build_assign (val_tmp, val_expr);
1060 gimple_set_vuse (load_stmt, n->vuse);
1061 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1062 }
1063 src = val_tmp;
1064 }
1065 else if (!bswap)
1066 {
4b84d9b8
JJ
1067 gimple *g = NULL;
1068 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src)))
dffec8eb
JJ
1069 {
1070 if (!is_gimple_val (src))
4b84d9b8 1071 return NULL_TREE;
dffec8eb
JJ
1072 g = gimple_build_assign (tgt, NOP_EXPR, src);
1073 }
4b84d9b8 1074 else if (cur_stmt)
dffec8eb 1075 g = gimple_build_assign (tgt, src);
4b84d9b8
JJ
1076 else
1077 tgt = src;
dffec8eb
JJ
1078 if (n->range == 16)
1079 nop_stats.found_16bit++;
1080 else if (n->range == 32)
1081 nop_stats.found_32bit++;
1082 else
1083 {
1084 gcc_assert (n->range == 64);
1085 nop_stats.found_64bit++;
1086 }
1087 if (dump_file)
1088 {
1089 fprintf (dump_file,
1090 "%d bit reshuffle in target endianness found at: ",
1091 (int) n->range);
4b84d9b8
JJ
1092 if (cur_stmt)
1093 print_gimple_stmt (dump_file, cur_stmt, 0);
1094 else
1095 {
4af78ef8 1096 print_generic_expr (dump_file, tgt, TDF_NONE);
4b84d9b8
JJ
1097 fprintf (dump_file, "\n");
1098 }
dffec8eb 1099 }
4b84d9b8
JJ
1100 if (cur_stmt)
1101 gsi_replace (&gsi, g, true);
1102 return tgt;
dffec8eb
JJ
1103 }
1104 else if (TREE_CODE (src) == BIT_FIELD_REF)
1105 src = TREE_OPERAND (src, 0);
1106
1107 if (n->range == 16)
1108 bswap_stats.found_16bit++;
1109 else if (n->range == 32)
1110 bswap_stats.found_32bit++;
1111 else
1112 {
1113 gcc_assert (n->range == 64);
1114 bswap_stats.found_64bit++;
1115 }
1116
1117 tmp = src;
1118
1119 /* Convert the src expression if necessary. */
1120 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
1121 {
1122 gimple *convert_stmt;
1123
1124 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
1125 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
1126 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
1127 }
1128
1129 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1130 are considered as rotation of 2N bit values by N bits is generally not
1131 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1132 gives 0x03040102 while a bswap for that value is 0x04030201. */
1133 if (bswap && n->range == 16)
1134 {
1135 tree count = build_int_cst (NULL, BITS_PER_UNIT);
1136 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
1137 bswap_stmt = gimple_build_assign (NULL, src);
1138 }
1139 else
1140 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
1141
4b84d9b8
JJ
1142 if (tgt == NULL_TREE)
1143 tgt = make_ssa_name (bswap_type);
dffec8eb
JJ
1144 tmp = tgt;
1145
1146 /* Convert the result if necessary. */
1147 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
1148 {
1149 gimple *convert_stmt;
1150
1151 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
1152 convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
1153 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
1154 }
1155
1156 gimple_set_lhs (bswap_stmt, tmp);
1157
1158 if (dump_file)
1159 {
1160 fprintf (dump_file, "%d bit bswap implementation found at: ",
1161 (int) n->range);
4b84d9b8
JJ
1162 if (cur_stmt)
1163 print_gimple_stmt (dump_file, cur_stmt, 0);
1164 else
1165 {
4af78ef8 1166 print_generic_expr (dump_file, tgt, TDF_NONE);
4b84d9b8
JJ
1167 fprintf (dump_file, "\n");
1168 }
dffec8eb
JJ
1169 }
1170
4b84d9b8
JJ
1171 if (cur_stmt)
1172 {
1173 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
1174 gsi_remove (&gsi, true);
1175 }
1176 else
1177 gsi_insert_before (&gsi, bswap_stmt, GSI_SAME_STMT);
1178 return tgt;
dffec8eb
JJ
1179}
1180
1181/* Find manual byte swap implementations as well as load in a given
1182 endianness. Byte swaps are turned into a bswap builtin invokation
1183 while endian loads are converted to bswap builtin invokation or
1184 simple load according to the target endianness. */
1185
1186unsigned int
1187pass_optimize_bswap::execute (function *fun)
1188{
1189 basic_block bb;
1190 bool bswap32_p, bswap64_p;
1191 bool changed = false;
1192 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1193
1194 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1195 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1196 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1197 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1198 || (bswap32_p && word_mode == SImode)));
1199
1200 /* Determine the argument type of the builtins. The code later on
1201 assumes that the return and argument type are the same. */
1202 if (bswap32_p)
1203 {
1204 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1205 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1206 }
1207
1208 if (bswap64_p)
1209 {
1210 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1211 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1212 }
1213
1214 memset (&nop_stats, 0, sizeof (nop_stats));
1215 memset (&bswap_stats, 0, sizeof (bswap_stats));
1216 calculate_dominance_info (CDI_DOMINATORS);
1217
1218 FOR_EACH_BB_FN (bb, fun)
1219 {
1220 gimple_stmt_iterator gsi;
1221
1222 /* We do a reverse scan for bswap patterns to make sure we get the
1223 widest match. As bswap pattern matching doesn't handle previously
1224 inserted smaller bswap replacements as sub-patterns, the wider
1225 variant wouldn't be detected. */
1226 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
1227 {
1228 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
1229 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1230 enum tree_code code;
1231 struct symbolic_number n;
1232 bool bswap;
1233
1234 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1235 might be moved to a different basic block by bswap_replace and gsi
1236 must not points to it if that's the case. Moving the gsi_prev
1237 there make sure that gsi points to the statement previous to
1238 cur_stmt while still making sure that all statements are
1239 considered in this basic block. */
1240 gsi_prev (&gsi);
1241
1242 if (!is_gimple_assign (cur_stmt))
1243 continue;
1244
1245 code = gimple_assign_rhs_code (cur_stmt);
1246 switch (code)
1247 {
1248 case LROTATE_EXPR:
1249 case RROTATE_EXPR:
1250 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
1251 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
1252 % BITS_PER_UNIT)
1253 continue;
1254 /* Fall through. */
1255 case BIT_IOR_EXPR:
1256 break;
1257 default:
1258 continue;
1259 }
1260
1261 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
1262
1263 if (!ins_stmt)
1264 continue;
1265
1266 switch (n.range)
1267 {
1268 case 16:
1269 /* Already in canonical form, nothing to do. */
1270 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
1271 continue;
1272 load_type = bswap_type = uint16_type_node;
1273 break;
1274 case 32:
1275 load_type = uint32_type_node;
1276 if (bswap32_p)
1277 {
1278 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1279 bswap_type = bswap32_type;
1280 }
1281 break;
1282 case 64:
1283 load_type = uint64_type_node;
1284 if (bswap64_p)
1285 {
1286 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1287 bswap_type = bswap64_type;
1288 }
1289 break;
1290 default:
1291 continue;
1292 }
1293
1294 if (bswap && !fndecl && n.range != 16)
1295 continue;
1296
4b84d9b8
JJ
1297 if (bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1298 bswap_type, load_type, &n, bswap))
dffec8eb
JJ
1299 changed = true;
1300 }
1301 }
1302
1303 statistics_counter_event (fun, "16-bit nop implementations found",
1304 nop_stats.found_16bit);
1305 statistics_counter_event (fun, "32-bit nop implementations found",
1306 nop_stats.found_32bit);
1307 statistics_counter_event (fun, "64-bit nop implementations found",
1308 nop_stats.found_64bit);
1309 statistics_counter_event (fun, "16-bit bswap implementations found",
1310 bswap_stats.found_16bit);
1311 statistics_counter_event (fun, "32-bit bswap implementations found",
1312 bswap_stats.found_32bit);
1313 statistics_counter_event (fun, "64-bit bswap implementations found",
1314 bswap_stats.found_64bit);
1315
1316 return (changed ? TODO_update_ssa : 0);
1317}
1318
1319} // anon namespace
1320
1321gimple_opt_pass *
1322make_pass_optimize_bswap (gcc::context *ctxt)
1323{
1324 return new pass_optimize_bswap (ctxt);
1325}
1326
1327namespace {
1328
245f6de1 1329/* Struct recording one operand for the store, which is either a constant,
c94c3532
EB
1330 then VAL represents the constant and all the other fields are zero, or
1331 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1332 and the other fields also reflect the memory load, or an SSA name, then
1333 VAL represents the SSA name and all the other fields are zero, */
245f6de1 1334
6c1dae73 1335class store_operand_info
245f6de1 1336{
6c1dae73 1337public:
245f6de1
JJ
1338 tree val;
1339 tree base_addr;
8a91d545
RS
1340 poly_uint64 bitsize;
1341 poly_uint64 bitpos;
1342 poly_uint64 bitregion_start;
1343 poly_uint64 bitregion_end;
245f6de1 1344 gimple *stmt;
383ac8dc 1345 bool bit_not_p;
245f6de1
JJ
1346 store_operand_info ();
1347};
1348
1349store_operand_info::store_operand_info ()
1350 : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0),
383ac8dc 1351 bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false)
245f6de1
JJ
1352{
1353}
1354
f663d9ad
KT
1355/* Struct recording the information about a single store of an immediate
1356 to memory. These are created in the first phase and coalesced into
1357 merged_store_group objects in the second phase. */
1358
6c1dae73 1359class store_immediate_info
f663d9ad 1360{
6c1dae73 1361public:
f663d9ad
KT
1362 unsigned HOST_WIDE_INT bitsize;
1363 unsigned HOST_WIDE_INT bitpos;
a62b3dc5
JJ
1364 unsigned HOST_WIDE_INT bitregion_start;
1365 /* This is one past the last bit of the bit region. */
1366 unsigned HOST_WIDE_INT bitregion_end;
f663d9ad
KT
1367 gimple *stmt;
1368 unsigned int order;
e362a897
EB
1369 /* INTEGER_CST for constant store, STRING_CST for string store,
1370 MEM_REF for memory copy, BIT_*_EXPR for logical bitwise operation,
1371 BIT_INSERT_EXPR for bit insertion.
4b84d9b8
JJ
1372 LROTATE_EXPR if it can be only bswap optimized and
1373 ops are not really meaningful.
1374 NOP_EXPR if bswap optimization detected identity, ops
1375 are not meaningful. */
245f6de1 1376 enum tree_code rhs_code;
4b84d9b8
JJ
1377 /* Two fields for bswap optimization purposes. */
1378 struct symbolic_number n;
1379 gimple *ins_stmt;
127ef369 1380 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
d60edaba 1381 bool bit_not_p;
127ef369
JJ
1382 /* True if ops have been swapped and thus ops[1] represents
1383 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1384 bool ops_swapped_p;
629387a6
EB
1385 /* The index number of the landing pad, or 0 if there is none. */
1386 int lp_nr;
245f6de1
JJ
1387 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1388 just the first one. */
1389 store_operand_info ops[2];
b5926e23 1390 store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
a62b3dc5 1391 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
4b84d9b8 1392 gimple *, unsigned int, enum tree_code,
629387a6 1393 struct symbolic_number &, gimple *, bool, int,
245f6de1
JJ
1394 const store_operand_info &,
1395 const store_operand_info &);
f663d9ad
KT
1396};
1397
1398store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs,
b5926e23 1399 unsigned HOST_WIDE_INT bp,
a62b3dc5
JJ
1400 unsigned HOST_WIDE_INT brs,
1401 unsigned HOST_WIDE_INT bre,
b5926e23 1402 gimple *st,
245f6de1
JJ
1403 unsigned int ord,
1404 enum tree_code rhscode,
4b84d9b8
JJ
1405 struct symbolic_number &nr,
1406 gimple *ins_stmtp,
d60edaba 1407 bool bitnotp,
629387a6 1408 int nr2,
245f6de1
JJ
1409 const store_operand_info &op0r,
1410 const store_operand_info &op1r)
a62b3dc5 1411 : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre),
4b84d9b8 1412 stmt (st), order (ord), rhs_code (rhscode), n (nr),
629387a6
EB
1413 ins_stmt (ins_stmtp), bit_not_p (bitnotp), ops_swapped_p (false),
1414 lp_nr (nr2)
245f6de1
JJ
1415#if __cplusplus >= 201103L
1416 , ops { op0r, op1r }
1417{
1418}
1419#else
f663d9ad 1420{
245f6de1
JJ
1421 ops[0] = op0r;
1422 ops[1] = op1r;
f663d9ad 1423}
245f6de1 1424#endif
f663d9ad
KT
1425
1426/* Struct representing a group of stores to contiguous memory locations.
1427 These are produced by the second phase (coalescing) and consumed in the
1428 third phase that outputs the widened stores. */
1429
6c1dae73 1430class merged_store_group
f663d9ad 1431{
6c1dae73 1432public:
f663d9ad
KT
1433 unsigned HOST_WIDE_INT start;
1434 unsigned HOST_WIDE_INT width;
a62b3dc5
JJ
1435 unsigned HOST_WIDE_INT bitregion_start;
1436 unsigned HOST_WIDE_INT bitregion_end;
1437 /* The size of the allocated memory for val and mask. */
f663d9ad 1438 unsigned HOST_WIDE_INT buf_size;
a62b3dc5 1439 unsigned HOST_WIDE_INT align_base;
8a91d545 1440 poly_uint64 load_align_base[2];
f663d9ad
KT
1441
1442 unsigned int align;
245f6de1 1443 unsigned int load_align[2];
f663d9ad
KT
1444 unsigned int first_order;
1445 unsigned int last_order;
7f5a3982 1446 bool bit_insertion;
e362a897 1447 bool string_concatenation;
18e0c3d1
JJ
1448 bool only_constants;
1449 unsigned int first_nonmergeable_order;
629387a6 1450 int lp_nr;
f663d9ad 1451
a62b3dc5 1452 auto_vec<store_immediate_info *> stores;
f663d9ad
KT
1453 /* We record the first and last original statements in the sequence because
1454 we'll need their vuse/vdef and replacement position. It's easier to keep
1455 track of them separately as 'stores' is reordered by apply_stores. */
1456 gimple *last_stmt;
1457 gimple *first_stmt;
1458 unsigned char *val;
a62b3dc5 1459 unsigned char *mask;
f663d9ad
KT
1460
1461 merged_store_group (store_immediate_info *);
1462 ~merged_store_group ();
7f5a3982 1463 bool can_be_merged_into (store_immediate_info *);
f663d9ad
KT
1464 void merge_into (store_immediate_info *);
1465 void merge_overlapping (store_immediate_info *);
1466 bool apply_stores ();
a62b3dc5
JJ
1467private:
1468 void do_merge (store_immediate_info *);
f663d9ad
KT
1469};
1470
1471/* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1472
1473static void
1474dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len)
1475{
1476 if (!fd)
1477 return;
1478
1479 for (unsigned int i = 0; i < len; i++)
c94c3532 1480 fprintf (fd, "%02x ", ptr[i]);
f663d9ad
KT
1481 fprintf (fd, "\n");
1482}
1483
f663d9ad
KT
1484/* Clear out LEN bits starting from bit START in the byte array
1485 PTR. This clears the bits to the *right* from START.
1486 START must be within [0, BITS_PER_UNIT) and counts starting from
1487 the least significant bit. */
1488
1489static void
1490clear_bit_region_be (unsigned char *ptr, unsigned int start,
1491 unsigned int len)
1492{
1493 if (len == 0)
1494 return;
1495 /* Clear len bits to the right of start. */
1496 else if (len <= start + 1)
1497 {
1498 unsigned char mask = (~(~0U << len));
1499 mask = mask << (start + 1U - len);
1500 ptr[0] &= ~mask;
1501 }
1502 else if (start != BITS_PER_UNIT - 1)
1503 {
1504 clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1);
1505 clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1,
1506 len - (start % BITS_PER_UNIT) - 1);
1507 }
1508 else if (start == BITS_PER_UNIT - 1
1509 && len > BITS_PER_UNIT)
1510 {
1511 unsigned int nbytes = len / BITS_PER_UNIT;
a62b3dc5 1512 memset (ptr, 0, nbytes);
f663d9ad
KT
1513 if (len % BITS_PER_UNIT != 0)
1514 clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1,
1515 len % BITS_PER_UNIT);
1516 }
1517 else
1518 gcc_unreachable ();
1519}
1520
1521/* In the byte array PTR clear the bit region starting at bit
1522 START and is LEN bits wide.
1523 For regions spanning multiple bytes do this recursively until we reach
1524 zero LEN or a region contained within a single byte. */
1525
1526static void
1527clear_bit_region (unsigned char *ptr, unsigned int start,
1528 unsigned int len)
1529{
1530 /* Degenerate base case. */
1531 if (len == 0)
1532 return;
1533 else if (start >= BITS_PER_UNIT)
1534 clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len);
1535 /* Second base case. */
1536 else if ((start + len) <= BITS_PER_UNIT)
1537 {
46a61395 1538 unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len);
f663d9ad
KT
1539 mask >>= BITS_PER_UNIT - (start + len);
1540
1541 ptr[0] &= ~mask;
1542
1543 return;
1544 }
1545 /* Clear most significant bits in a byte and proceed with the next byte. */
1546 else if (start != 0)
1547 {
1548 clear_bit_region (ptr, start, BITS_PER_UNIT - start);
1f069ef5 1549 clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start));
f663d9ad
KT
1550 }
1551 /* Whole bytes need to be cleared. */
1552 else if (start == 0 && len > BITS_PER_UNIT)
1553 {
1554 unsigned int nbytes = len / BITS_PER_UNIT;
a848c710
KT
1555 /* We could recurse on each byte but we clear whole bytes, so a simple
1556 memset will do. */
46a61395 1557 memset (ptr, '\0', nbytes);
f663d9ad
KT
1558 /* Clear the remaining sub-byte region if there is one. */
1559 if (len % BITS_PER_UNIT != 0)
1560 clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT);
1561 }
1562 else
1563 gcc_unreachable ();
1564}
1565
1566/* Write BITLEN bits of EXPR to the byte array PTR at
1567 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1568 Return true if the operation succeeded. */
1569
1570static bool
1571encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos,
46a61395 1572 unsigned int total_bytes)
f663d9ad
KT
1573{
1574 unsigned int first_byte = bitpos / BITS_PER_UNIT;
ad1de652
JJ
1575 bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT)
1576 || (bitpos % BITS_PER_UNIT)
f4b31647 1577 || !int_mode_for_size (bitlen, 0).exists ());
3afd514b
JJ
1578 bool empty_ctor_p
1579 = (TREE_CODE (expr) == CONSTRUCTOR
1580 && CONSTRUCTOR_NELTS (expr) == 0
1581 && TYPE_SIZE_UNIT (TREE_TYPE (expr))
1582 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (expr))));
f663d9ad
KT
1583
1584 if (!sub_byte_op_p)
3afd514b
JJ
1585 {
1586 if (first_byte >= total_bytes)
1587 return false;
1588 total_bytes -= first_byte;
1589 if (empty_ctor_p)
1590 {
1591 unsigned HOST_WIDE_INT rhs_bytes
1592 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1593 if (rhs_bytes > total_bytes)
1594 return false;
1595 memset (ptr + first_byte, '\0', rhs_bytes);
1596 return true;
1597 }
1598 return native_encode_expr (expr, ptr + first_byte, total_bytes) != 0;
1599 }
f663d9ad
KT
1600
1601 /* LITTLE-ENDIAN
1602 We are writing a non byte-sized quantity or at a position that is not
1603 at a byte boundary.
1604 |--------|--------|--------| ptr + first_byte
1605 ^ ^
1606 xxx xxxxxxxx xxx< bp>
1607 |______EXPR____|
1608
46a61395 1609 First native_encode_expr EXPR into a temporary buffer and shift each
f663d9ad
KT
1610 byte in the buffer by 'bp' (carrying the bits over as necessary).
1611 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1612 <------bitlen---->< bp>
1613 Then we clear the destination bits:
1614 |---00000|00000000|000-----| ptr + first_byte
1615 <-------bitlen--->< bp>
1616
1617 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1618 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1619
1620 BIG-ENDIAN
1621 We are writing a non byte-sized quantity or at a position that is not
1622 at a byte boundary.
1623 ptr + first_byte |--------|--------|--------|
1624 ^ ^
1625 <bp >xxx xxxxxxxx xxx
1626 |_____EXPR_____|
1627
46a61395 1628 First native_encode_expr EXPR into a temporary buffer and shift each
f663d9ad
KT
1629 byte in the buffer to the right by (carrying the bits over as necessary).
1630 We shift by as much as needed to align the most significant bit of EXPR
1631 with bitpos:
1632 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1633 <---bitlen----> <bp ><-----bitlen----->
1634 Then we clear the destination bits:
1635 ptr + first_byte |-----000||00000000||00000---|
1636 <bp ><-------bitlen----->
1637
1638 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1639 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1640 The awkwardness comes from the fact that bitpos is counted from the
1641 most significant bit of a byte. */
1642
ef1d3b57
RS
1643 /* We must be dealing with fixed-size data at this point, since the
1644 total size is also fixed. */
3afd514b
JJ
1645 unsigned int byte_size;
1646 if (empty_ctor_p)
1647 {
1648 unsigned HOST_WIDE_INT rhs_bytes
1649 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1650 if (rhs_bytes > total_bytes)
1651 return false;
1652 byte_size = rhs_bytes;
1653 }
1654 else
1655 {
1656 fixed_size_mode mode
1657 = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr)));
e362a897
EB
1658 byte_size
1659 = mode == BLKmode
1660 ? tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)))
1661 : GET_MODE_SIZE (mode);
3afd514b 1662 }
f663d9ad 1663 /* Allocate an extra byte so that we have space to shift into. */
3afd514b 1664 byte_size++;
f663d9ad 1665 unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size);
46a61395 1666 memset (tmpbuf, '\0', byte_size);
f663d9ad 1667 /* The store detection code should only have allowed constants that are
3afd514b
JJ
1668 accepted by native_encode_expr or empty ctors. */
1669 if (!empty_ctor_p
1670 && native_encode_expr (expr, tmpbuf, byte_size - 1) == 0)
f663d9ad
KT
1671 gcc_unreachable ();
1672
1673 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1674 bytes to write. This means it can write more than
1675 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1676 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1677 bitlen and zero out the bits that are not relevant as well (that may
1678 contain a sign bit due to sign-extension). */
1679 unsigned int padding
1680 = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1;
ad1de652
JJ
1681 /* On big-endian the padding is at the 'front' so just skip the initial
1682 bytes. */
1683 if (BYTES_BIG_ENDIAN)
1684 tmpbuf += padding;
1685
1686 byte_size -= padding;
1687
1688 if (bitlen % BITS_PER_UNIT != 0)
f663d9ad 1689 {
4b2c06f4 1690 if (BYTES_BIG_ENDIAN)
ad1de652
JJ
1691 clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1,
1692 BITS_PER_UNIT - (bitlen % BITS_PER_UNIT));
1693 else
1694 clear_bit_region (tmpbuf, bitlen,
1695 byte_size * BITS_PER_UNIT - bitlen);
f663d9ad 1696 }
ad1de652
JJ
1697 /* Left shifting relies on the last byte being clear if bitlen is
1698 a multiple of BITS_PER_UNIT, which might not be clear if
1699 there are padding bytes. */
1700 else if (!BYTES_BIG_ENDIAN)
1701 tmpbuf[byte_size - 1] = '\0';
f663d9ad
KT
1702
1703 /* Clear the bit region in PTR where the bits from TMPBUF will be
46a61395 1704 inserted into. */
f663d9ad
KT
1705 if (BYTES_BIG_ENDIAN)
1706 clear_bit_region_be (ptr + first_byte,
1707 BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen);
1708 else
1709 clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen);
1710
1711 int shift_amnt;
1712 int bitlen_mod = bitlen % BITS_PER_UNIT;
1713 int bitpos_mod = bitpos % BITS_PER_UNIT;
1714
1715 bool skip_byte = false;
1716 if (BYTES_BIG_ENDIAN)
1717 {
1718 /* BITPOS and BITLEN are exactly aligned and no shifting
1719 is necessary. */
1720 if (bitpos_mod + bitlen_mod == BITS_PER_UNIT
1721 || (bitpos_mod == 0 && bitlen_mod == 0))
1722 shift_amnt = 0;
1723 /* |. . . . . . . .|
1724 <bp > <blen >.
1725 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1726 of the value until it aligns with 'bp' in the next byte over. */
1727 else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT)
1728 {
1729 shift_amnt = bitlen_mod + bitpos_mod;
1730 skip_byte = bitlen_mod != 0;
1731 }
1732 /* |. . . . . . . .|
1733 <----bp--->
1734 <---blen---->.
1735 Shift the value right within the same byte so it aligns with 'bp'. */
1736 else
1737 shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT;
1738 }
1739 else
1740 shift_amnt = bitpos % BITS_PER_UNIT;
1741
1742 /* Create the shifted version of EXPR. */
1743 if (!BYTES_BIG_ENDIAN)
46a61395 1744 {
8aba425f 1745 shift_bytes_in_array_left (tmpbuf, byte_size, shift_amnt);
46a61395
JJ
1746 if (shift_amnt == 0)
1747 byte_size--;
1748 }
f663d9ad
KT
1749 else
1750 {
1751 gcc_assert (BYTES_BIG_ENDIAN);
1752 shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt);
1753 /* If shifting right forced us to move into the next byte skip the now
1754 empty byte. */
1755 if (skip_byte)
1756 {
1757 tmpbuf++;
1758 byte_size--;
1759 }
1760 }
1761
1762 /* Insert the bits from TMPBUF. */
1763 for (unsigned int i = 0; i < byte_size; i++)
1764 ptr[first_byte + i] |= tmpbuf[i];
1765
1766 return true;
1767}
1768
1769/* Sorting function for store_immediate_info objects.
1770 Sorts them by bitposition. */
1771
1772static int
1773sort_by_bitpos (const void *x, const void *y)
1774{
1775 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1776 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1777
109cca3b 1778 if ((*tmp)->bitpos < (*tmp2)->bitpos)
f663d9ad
KT
1779 return -1;
1780 else if ((*tmp)->bitpos > (*tmp2)->bitpos)
1781 return 1;
109cca3b 1782 else
0f0027d1
KT
1783 /* If they are the same let's use the order which is guaranteed to
1784 be different. */
1785 return (*tmp)->order - (*tmp2)->order;
f663d9ad
KT
1786}
1787
1788/* Sorting function for store_immediate_info objects.
1789 Sorts them by the order field. */
1790
1791static int
1792sort_by_order (const void *x, const void *y)
1793{
1794 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1795 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1796
1797 if ((*tmp)->order < (*tmp2)->order)
1798 return -1;
1799 else if ((*tmp)->order > (*tmp2)->order)
1800 return 1;
1801
1802 gcc_unreachable ();
1803}
1804
1805/* Initialize a merged_store_group object from a store_immediate_info
1806 object. */
1807
1808merged_store_group::merged_store_group (store_immediate_info *info)
1809{
1810 start = info->bitpos;
1811 width = info->bitsize;
a62b3dc5
JJ
1812 bitregion_start = info->bitregion_start;
1813 bitregion_end = info->bitregion_end;
f663d9ad
KT
1814 /* VAL has memory allocated for it in apply_stores once the group
1815 width has been finalized. */
1816 val = NULL;
a62b3dc5 1817 mask = NULL;
e362a897
EB
1818 bit_insertion = info->rhs_code == BIT_INSERT_EXPR;
1819 string_concatenation = info->rhs_code == STRING_CST;
18e0c3d1
JJ
1820 only_constants = info->rhs_code == INTEGER_CST;
1821 first_nonmergeable_order = ~0U;
629387a6 1822 lp_nr = info->lp_nr;
a62b3dc5
JJ
1823 unsigned HOST_WIDE_INT align_bitpos = 0;
1824 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
1825 &align, &align_bitpos);
1826 align_base = start - align_bitpos;
245f6de1
JJ
1827 for (int i = 0; i < 2; ++i)
1828 {
1829 store_operand_info &op = info->ops[i];
1830 if (op.base_addr == NULL_TREE)
1831 {
1832 load_align[i] = 0;
1833 load_align_base[i] = 0;
1834 }
1835 else
1836 {
1837 get_object_alignment_1 (op.val, &load_align[i], &align_bitpos);
1838 load_align_base[i] = op.bitpos - align_bitpos;
1839 }
1840 }
f663d9ad
KT
1841 stores.create (1);
1842 stores.safe_push (info);
1843 last_stmt = info->stmt;
1844 last_order = info->order;
1845 first_stmt = last_stmt;
1846 first_order = last_order;
1847 buf_size = 0;
1848}
1849
1850merged_store_group::~merged_store_group ()
1851{
1852 if (val)
1853 XDELETEVEC (val);
1854}
1855
7f5a3982
EB
1856/* Return true if the store described by INFO can be merged into the group. */
1857
1858bool
1859merged_store_group::can_be_merged_into (store_immediate_info *info)
1860{
1861 /* Do not merge bswap patterns. */
1862 if (info->rhs_code == LROTATE_EXPR)
1863 return false;
1864
629387a6
EB
1865 if (info->lp_nr != lp_nr)
1866 return false;
1867
7f5a3982
EB
1868 /* The canonical case. */
1869 if (info->rhs_code == stores[0]->rhs_code)
1870 return true;
1871
e362a897 1872 /* BIT_INSERT_EXPR is compatible with INTEGER_CST if no STRING_CST. */
7f5a3982 1873 if (info->rhs_code == BIT_INSERT_EXPR && stores[0]->rhs_code == INTEGER_CST)
e362a897 1874 return !string_concatenation;
7f5a3982
EB
1875
1876 if (stores[0]->rhs_code == BIT_INSERT_EXPR && info->rhs_code == INTEGER_CST)
e362a897 1877 return !string_concatenation;
7f5a3982 1878
ed01d707
EB
1879 /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores, but do it
1880 only for small regions since this can generate a lot of instructions. */
7f5a3982
EB
1881 if (info->rhs_code == MEM_REF
1882 && (stores[0]->rhs_code == INTEGER_CST
1883 || stores[0]->rhs_code == BIT_INSERT_EXPR)
1884 && info->bitregion_start == stores[0]->bitregion_start
ed01d707 1885 && info->bitregion_end == stores[0]->bitregion_end
2815558a 1886 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
e362a897 1887 return !string_concatenation;
7f5a3982
EB
1888
1889 if (stores[0]->rhs_code == MEM_REF
1890 && (info->rhs_code == INTEGER_CST
1891 || info->rhs_code == BIT_INSERT_EXPR)
1892 && info->bitregion_start == stores[0]->bitregion_start
ed01d707 1893 && info->bitregion_end == stores[0]->bitregion_end
2815558a 1894 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
e362a897
EB
1895 return !string_concatenation;
1896
1897 /* STRING_CST is compatible with INTEGER_CST if no BIT_INSERT_EXPR. */
1898 if (info->rhs_code == STRING_CST
1899 && stores[0]->rhs_code == INTEGER_CST
1900 && stores[0]->bitsize == CHAR_BIT)
1901 return !bit_insertion;
1902
1903 if (stores[0]->rhs_code == STRING_CST
1904 && info->rhs_code == INTEGER_CST
1905 && info->bitsize == CHAR_BIT)
1906 return !bit_insertion;
7f5a3982
EB
1907
1908 return false;
1909}
1910
a62b3dc5
JJ
1911/* Helper method for merge_into and merge_overlapping to do
1912 the common part. */
7f5a3982 1913
f663d9ad 1914void
a62b3dc5 1915merged_store_group::do_merge (store_immediate_info *info)
f663d9ad 1916{
a62b3dc5
JJ
1917 bitregion_start = MIN (bitregion_start, info->bitregion_start);
1918 bitregion_end = MAX (bitregion_end, info->bitregion_end);
1919
1920 unsigned int this_align;
1921 unsigned HOST_WIDE_INT align_bitpos = 0;
1922 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
1923 &this_align, &align_bitpos);
1924 if (this_align > align)
1925 {
1926 align = this_align;
1927 align_base = info->bitpos - align_bitpos;
1928 }
245f6de1
JJ
1929 for (int i = 0; i < 2; ++i)
1930 {
1931 store_operand_info &op = info->ops[i];
1932 if (!op.base_addr)
1933 continue;
1934
1935 get_object_alignment_1 (op.val, &this_align, &align_bitpos);
1936 if (this_align > load_align[i])
1937 {
1938 load_align[i] = this_align;
1939 load_align_base[i] = op.bitpos - align_bitpos;
1940 }
1941 }
f663d9ad 1942
f663d9ad
KT
1943 gimple *stmt = info->stmt;
1944 stores.safe_push (info);
1945 if (info->order > last_order)
1946 {
1947 last_order = info->order;
1948 last_stmt = stmt;
1949 }
1950 else if (info->order < first_order)
1951 {
1952 first_order = info->order;
1953 first_stmt = stmt;
1954 }
e362a897
EB
1955
1956 /* We need to use extraction if there is any bit-field. */
1957 if (info->rhs_code == BIT_INSERT_EXPR)
1958 {
1959 bit_insertion = true;
1960 gcc_assert (!string_concatenation);
1961 }
1962
1963 /* We need to use concatenation if there is any string. */
1964 if (info->rhs_code == STRING_CST)
1965 {
1966 string_concatenation = true;
1967 gcc_assert (!bit_insertion);
1968 }
1969
18e0c3d1
JJ
1970 if (info->rhs_code != INTEGER_CST)
1971 only_constants = false;
f663d9ad
KT
1972}
1973
a62b3dc5
JJ
1974/* Merge a store recorded by INFO into this merged store.
1975 The store is not overlapping with the existing recorded
1976 stores. */
1977
1978void
1979merged_store_group::merge_into (store_immediate_info *info)
1980{
a62b3dc5
JJ
1981 /* Make sure we're inserting in the position we think we're inserting. */
1982 gcc_assert (info->bitpos >= start + width
1983 && info->bitregion_start <= bitregion_end);
1984
c5679c37 1985 width = info->bitpos + info->bitsize - start;
a62b3dc5
JJ
1986 do_merge (info);
1987}
1988
f663d9ad
KT
1989/* Merge a store described by INFO into this merged store.
1990 INFO overlaps in some way with the current store (i.e. it's not contiguous
1991 which is handled by merged_store_group::merge_into). */
1992
1993void
1994merged_store_group::merge_overlapping (store_immediate_info *info)
1995{
f663d9ad 1996 /* If the store extends the size of the group, extend the width. */
a62b3dc5 1997 if (info->bitpos + info->bitsize > start + width)
c5679c37 1998 width = info->bitpos + info->bitsize - start;
f663d9ad 1999
a62b3dc5 2000 do_merge (info);
f663d9ad
KT
2001}
2002
2003/* Go through all the recorded stores in this group in program order and
2004 apply their values to the VAL byte array to create the final merged
2005 value. Return true if the operation succeeded. */
2006
2007bool
2008merged_store_group::apply_stores ()
2009{
e362a897
EB
2010 store_immediate_info *info;
2011 unsigned int i;
2012
a62b3dc5
JJ
2013 /* Make sure we have more than one store in the group, otherwise we cannot
2014 merge anything. */
2015 if (bitregion_start % BITS_PER_UNIT != 0
2016 || bitregion_end % BITS_PER_UNIT != 0
f663d9ad
KT
2017 || stores.length () == 1)
2018 return false;
2019
e362a897
EB
2020 buf_size = (bitregion_end - bitregion_start) / BITS_PER_UNIT;
2021
2022 /* Really do string concatenation for large strings only. */
2023 if (buf_size <= MOVE_MAX)
2024 string_concatenation = false;
2025
c94c3532 2026 /* Create a power-of-2-sized buffer for native_encode_expr. */
e362a897
EB
2027 if (!string_concatenation)
2028 buf_size = 1 << ceil_log2 (buf_size);
2029
a62b3dc5
JJ
2030 val = XNEWVEC (unsigned char, 2 * buf_size);
2031 mask = val + buf_size;
2032 memset (val, 0, buf_size);
2033 memset (mask, ~0U, buf_size);
f663d9ad 2034
e362a897
EB
2035 stores.qsort (sort_by_order);
2036
f663d9ad
KT
2037 FOR_EACH_VEC_ELT (stores, i, info)
2038 {
a62b3dc5 2039 unsigned int pos_in_buffer = info->bitpos - bitregion_start;
c94c3532 2040 tree cst;
245f6de1
JJ
2041 if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE)
2042 cst = info->ops[0].val;
2043 else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE)
2044 cst = info->ops[1].val;
c94c3532
EB
2045 else
2046 cst = NULL_TREE;
245f6de1 2047 bool ret = true;
e362a897
EB
2048 if (cst && info->rhs_code != BIT_INSERT_EXPR)
2049 ret = encode_tree_to_bitpos (cst, val, info->bitsize, pos_in_buffer,
2050 buf_size);
c94c3532
EB
2051 unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT);
2052 if (BYTES_BIG_ENDIAN)
2053 clear_bit_region_be (m, (BITS_PER_UNIT - 1
2054 - (pos_in_buffer % BITS_PER_UNIT)),
2055 info->bitsize);
2056 else
2057 clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize);
245f6de1 2058 if (cst && dump_file && (dump_flags & TDF_DETAILS))
f663d9ad
KT
2059 {
2060 if (ret)
2061 {
c94c3532 2062 fputs ("After writing ", dump_file);
4af78ef8 2063 print_generic_expr (dump_file, cst, TDF_NONE);
f663d9ad 2064 fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC
c94c3532
EB
2065 " at position %d\n", info->bitsize, pos_in_buffer);
2066 fputs (" the merged value contains ", dump_file);
f663d9ad 2067 dump_char_array (dump_file, val, buf_size);
c94c3532
EB
2068 fputs (" the merged mask contains ", dump_file);
2069 dump_char_array (dump_file, mask, buf_size);
2070 if (bit_insertion)
2071 fputs (" bit insertion is required\n", dump_file);
e362a897
EB
2072 if (string_concatenation)
2073 fputs (" string concatenation is required\n", dump_file);
f663d9ad
KT
2074 }
2075 else
2076 fprintf (dump_file, "Failed to merge stores\n");
4b84d9b8 2077 }
f663d9ad
KT
2078 if (!ret)
2079 return false;
2080 }
4b84d9b8 2081 stores.qsort (sort_by_bitpos);
f663d9ad
KT
2082 return true;
2083}
2084
2085/* Structure describing the store chain. */
2086
6c1dae73 2087class imm_store_chain_info
f663d9ad 2088{
6c1dae73 2089public:
50b6d676
AO
2090 /* Doubly-linked list that imposes an order on chain processing.
2091 PNXP (prev's next pointer) points to the head of a list, or to
2092 the next field in the previous chain in the list.
2093 See pass_store_merging::m_stores_head for more rationale. */
2094 imm_store_chain_info *next, **pnxp;
b5926e23 2095 tree base_addr;
a62b3dc5 2096 auto_vec<store_immediate_info *> m_store_info;
f663d9ad
KT
2097 auto_vec<merged_store_group *> m_merged_store_groups;
2098
50b6d676
AO
2099 imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a)
2100 : next (inspt), pnxp (&inspt), base_addr (b_a)
2101 {
2102 inspt = this;
2103 if (next)
2104 {
2105 gcc_checking_assert (pnxp == next->pnxp);
2106 next->pnxp = &next;
2107 }
2108 }
2109 ~imm_store_chain_info ()
2110 {
2111 *pnxp = next;
2112 if (next)
2113 {
2114 gcc_checking_assert (&next == next->pnxp);
2115 next->pnxp = pnxp;
2116 }
2117 }
b5926e23 2118 bool terminate_and_process_chain ();
bd909071
JJ
2119 bool try_coalesce_bswap (merged_store_group *, unsigned int, unsigned int,
2120 unsigned int);
f663d9ad 2121 bool coalesce_immediate_stores ();
b5926e23
RB
2122 bool output_merged_store (merged_store_group *);
2123 bool output_merged_stores ();
f663d9ad
KT
2124};
2125
2126const pass_data pass_data_tree_store_merging = {
2127 GIMPLE_PASS, /* type */
2128 "store-merging", /* name */
2129 OPTGROUP_NONE, /* optinfo_flags */
2130 TV_GIMPLE_STORE_MERGING, /* tv_id */
2131 PROP_ssa, /* properties_required */
2132 0, /* properties_provided */
2133 0, /* properties_destroyed */
2134 0, /* todo_flags_start */
2135 TODO_update_ssa, /* todo_flags_finish */
2136};
2137
2138class pass_store_merging : public gimple_opt_pass
2139{
2140public:
2141 pass_store_merging (gcc::context *ctxt)
faec5f24 2142 : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head ()
f663d9ad
KT
2143 {
2144 }
2145
c94c3532
EB
2146 /* Pass not supported for PDP-endian, nor for insane hosts or
2147 target character sizes where native_{encode,interpret}_expr
a62b3dc5 2148 doesn't work properly. */
f663d9ad
KT
2149 virtual bool
2150 gate (function *)
2151 {
a62b3dc5 2152 return flag_store_merging
c94c3532 2153 && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
a62b3dc5
JJ
2154 && CHAR_BIT == 8
2155 && BITS_PER_UNIT == 8;
f663d9ad
KT
2156 }
2157
2158 virtual unsigned int execute (function *);
2159
2160private:
99b1c316 2161 hash_map<tree_operand_hash, class imm_store_chain_info *> m_stores;
f663d9ad 2162
50b6d676
AO
2163 /* Form a doubly-linked stack of the elements of m_stores, so that
2164 we can iterate over them in a predictable way. Using this order
2165 avoids extraneous differences in the compiler output just because
2166 of tree pointer variations (e.g. different chains end up in
2167 different positions of m_stores, so they are handled in different
2168 orders, so they allocate or release SSA names in different
2169 orders, and when they get reused, subsequent passes end up
2170 getting different SSA names, which may ultimately change
2171 decisions when going out of SSA). */
2172 imm_store_chain_info *m_stores_head;
2173
629387a6
EB
2174 bool process_store (gimple *);
2175 bool terminate_and_process_chain (imm_store_chain_info *);
383ac8dc 2176 bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *);
629387a6 2177 bool terminate_and_process_all_chains ();
f663d9ad
KT
2178}; // class pass_store_merging
2179
2180/* Terminate and process all recorded chains. Return true if any changes
2181 were made. */
2182
2183bool
2184pass_store_merging::terminate_and_process_all_chains ()
2185{
f663d9ad 2186 bool ret = false;
50b6d676 2187 while (m_stores_head)
629387a6 2188 ret |= terminate_and_process_chain (m_stores_head);
b119c055 2189 gcc_assert (m_stores.is_empty ());
f663d9ad
KT
2190 return ret;
2191}
2192
383ac8dc
JJ
2193/* Terminate all chains that are affected by the statement STMT.
2194 CHAIN_INFO is the chain we should ignore from the checks if
629387a6 2195 non-NULL. Return true if any changes were made. */
f663d9ad
KT
2196
2197bool
20770eb8 2198pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
b5926e23 2199 **chain_info,
f663d9ad
KT
2200 gimple *stmt)
2201{
2202 bool ret = false;
2203
2204 /* If the statement doesn't touch memory it can't alias. */
2205 if (!gimple_vuse (stmt))
2206 return false;
2207
9e875fd8 2208 tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
6b412bf6
RB
2209 ao_ref store_lhs_ref;
2210 ao_ref_init (&store_lhs_ref, store_lhs);
383ac8dc 2211 for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
f663d9ad 2212 {
383ac8dc
JJ
2213 next = cur->next;
2214
2215 /* We already checked all the stores in chain_info and terminated the
2216 chain if necessary. Skip it here. */
2217 if (chain_info && *chain_info == cur)
2218 continue;
2219
245f6de1
JJ
2220 store_immediate_info *info;
2221 unsigned int i;
383ac8dc 2222 FOR_EACH_VEC_ELT (cur->m_store_info, i, info)
f663d9ad 2223 {
9e875fd8 2224 tree lhs = gimple_assign_lhs (info->stmt);
6b412bf6
RB
2225 ao_ref lhs_ref;
2226 ao_ref_init (&lhs_ref, lhs);
2227 if (ref_maybe_used_by_stmt_p (stmt, &lhs_ref)
2228 || stmt_may_clobber_ref_p_1 (stmt, &lhs_ref)
2229 || (store_lhs && refs_may_alias_p_1 (&store_lhs_ref,
2230 &lhs_ref, false)))
f663d9ad 2231 {
245f6de1 2232 if (dump_file && (dump_flags & TDF_DETAILS))
f663d9ad 2233 {
245f6de1
JJ
2234 fprintf (dump_file, "stmt causes chain termination:\n");
2235 print_gimple_stmt (dump_file, stmt, 0);
f663d9ad 2236 }
629387a6 2237 ret |= terminate_and_process_chain (cur);
245f6de1 2238 break;
f663d9ad
KT
2239 }
2240 }
2241 }
2242
f663d9ad
KT
2243 return ret;
2244}
2245
2246/* Helper function. Terminate the recorded chain storing to base object
2247 BASE. Return true if the merging and output was successful. The m_stores
2248 entry is removed after the processing in any case. */
2249
2250bool
629387a6 2251pass_store_merging::terminate_and_process_chain (imm_store_chain_info *chain_info)
f663d9ad 2252{
b5926e23
RB
2253 bool ret = chain_info->terminate_and_process_chain ();
2254 m_stores.remove (chain_info->base_addr);
2255 delete chain_info;
f663d9ad
KT
2256 return ret;
2257}
2258
245f6de1 2259/* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
629387a6
EB
2260 may clobber REF. FIRST and LAST must have non-NULL vdef. We want to
2261 be able to sink load of REF across stores between FIRST and LAST, up
2262 to right before LAST. */
245f6de1
JJ
2263
2264bool
2265stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref)
2266{
2267 ao_ref r;
2268 ao_ref_init (&r, ref);
2269 unsigned int count = 0;
2270 tree vop = gimple_vdef (last);
2271 gimple *stmt;
2272
629387a6
EB
2273 /* Return true conservatively if the basic blocks are different. */
2274 if (gimple_bb (first) != gimple_bb (last))
2275 return true;
2276
245f6de1
JJ
2277 do
2278 {
2279 stmt = SSA_NAME_DEF_STMT (vop);
2280 if (stmt_may_clobber_ref_p_1 (stmt, &r))
2281 return true;
4b84d9b8
JJ
2282 if (gimple_store_p (stmt)
2283 && refs_anti_dependent_p (ref, gimple_get_lhs (stmt)))
2284 return true;
245f6de1
JJ
2285 /* Avoid quadratic compile time by bounding the number of checks
2286 we perform. */
2287 if (++count > MAX_STORE_ALIAS_CHECKS)
2288 return true;
2289 vop = gimple_vuse (stmt);
2290 }
2291 while (stmt != first);
629387a6 2292
245f6de1
JJ
2293 return false;
2294}
2295
2296/* Return true if INFO->ops[IDX] is mergeable with the
2297 corresponding loads already in MERGED_STORE group.
2298 BASE_ADDR is the base address of the whole store group. */
2299
2300bool
2301compatible_load_p (merged_store_group *merged_store,
2302 store_immediate_info *info,
2303 tree base_addr, int idx)
2304{
2305 store_immediate_info *infof = merged_store->stores[0];
2306 if (!info->ops[idx].base_addr
8a91d545
RS
2307 || maybe_ne (info->ops[idx].bitpos - infof->ops[idx].bitpos,
2308 info->bitpos - infof->bitpos)
245f6de1
JJ
2309 || !operand_equal_p (info->ops[idx].base_addr,
2310 infof->ops[idx].base_addr, 0))
2311 return false;
2312
2313 store_immediate_info *infol = merged_store->stores.last ();
2314 tree load_vuse = gimple_vuse (info->ops[idx].stmt);
2315 /* In this case all vuses should be the same, e.g.
2316 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2317 or
2318 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2319 and we can emit the coalesced load next to any of those loads. */
2320 if (gimple_vuse (infof->ops[idx].stmt) == load_vuse
2321 && gimple_vuse (infol->ops[idx].stmt) == load_vuse)
2322 return true;
2323
2324 /* Otherwise, at least for now require that the load has the same
2325 vuse as the store. See following examples. */
2326 if (gimple_vuse (info->stmt) != load_vuse)
2327 return false;
2328
2329 if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt)
2330 || (infof != infol
2331 && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt)))
2332 return false;
2333
2334 /* If the load is from the same location as the store, already
2335 the construction of the immediate chain info guarantees no intervening
2336 stores, so no further checks are needed. Example:
2337 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
8a91d545 2338 if (known_eq (info->ops[idx].bitpos, info->bitpos)
245f6de1
JJ
2339 && operand_equal_p (info->ops[idx].base_addr, base_addr, 0))
2340 return true;
2341
2342 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2343 of the stores in the group, or any other stores in between those.
2344 Previous calls to compatible_load_p ensured that for all the
2345 merged_store->stores IDX loads, no stmts starting with
2346 merged_store->first_stmt and ending right before merged_store->last_stmt
2347 clobbers those loads. */
2348 gimple *first = merged_store->first_stmt;
2349 gimple *last = merged_store->last_stmt;
2350 unsigned int i;
2351 store_immediate_info *infoc;
2352 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2353 comes before the so far first load, we'll be changing
2354 merged_store->first_stmt. In that case we need to give up if
2355 any of the earlier processed loads clobber with the stmts in the new
2356 range. */
2357 if (info->order < merged_store->first_order)
2358 {
2359 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2360 if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val))
2361 return false;
2362 first = info->stmt;
2363 }
2364 /* Similarly, we could change merged_store->last_stmt, so ensure
2365 in that case no stmts in the new range clobber any of the earlier
2366 processed loads. */
2367 else if (info->order > merged_store->last_order)
2368 {
2369 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2370 if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val))
2371 return false;
2372 last = info->stmt;
2373 }
2374 /* And finally, we'd be adding a new load to the set, ensure it isn't
2375 clobbered in the new range. */
2376 if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val))
2377 return false;
2378
2379 /* Otherwise, we are looking for:
2380 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2381 or
2382 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2383 return true;
2384}
2385
4b84d9b8
JJ
2386/* Add all refs loaded to compute VAL to REFS vector. */
2387
2388void
2389gather_bswap_load_refs (vec<tree> *refs, tree val)
2390{
2391 if (TREE_CODE (val) != SSA_NAME)
2392 return;
2393
2394 gimple *stmt = SSA_NAME_DEF_STMT (val);
2395 if (!is_gimple_assign (stmt))
2396 return;
2397
2398 if (gimple_assign_load_p (stmt))
2399 {
2400 refs->safe_push (gimple_assign_rhs1 (stmt));
2401 return;
2402 }
2403
2404 switch (gimple_assign_rhs_class (stmt))
2405 {
2406 case GIMPLE_BINARY_RHS:
2407 gather_bswap_load_refs (refs, gimple_assign_rhs2 (stmt));
2408 /* FALLTHRU */
2409 case GIMPLE_UNARY_RHS:
2410 gather_bswap_load_refs (refs, gimple_assign_rhs1 (stmt));
2411 break;
2412 default:
2413 gcc_unreachable ();
2414 }
2415}
2416
c5679c37
JJ
2417/* Check if there are any stores in M_STORE_INFO after index I
2418 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2419 a potential group ending with END that have their order
4d213bf6
JJ
2420 smaller than LAST_ORDER. ALL_INTEGER_CST_P is true if
2421 all the stores already merged and the one under consideration
2422 have rhs_code of INTEGER_CST. Return true if there are no such stores.
c5679c37
JJ
2423 Consider:
2424 MEM[(long long int *)p_28] = 0;
2425 MEM[(long long int *)p_28 + 8B] = 0;
2426 MEM[(long long int *)p_28 + 16B] = 0;
2427 MEM[(long long int *)p_28 + 24B] = 0;
2428 _129 = (int) _130;
2429 MEM[(int *)p_28 + 8B] = _129;
2430 MEM[(int *)p_28].a = -1;
2431 We already have
2432 MEM[(long long int *)p_28] = 0;
2433 MEM[(int *)p_28].a = -1;
2434 stmts in the current group and need to consider if it is safe to
2435 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2436 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2437 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2438 into the group and merging of those 3 stores is successful, merged
2439 stmts will be emitted at the latest store from that group, i.e.
2440 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2441 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2442 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2443 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2444 into the group. That way it will be its own store group and will
4d213bf6 2445 not be touched. If ALL_INTEGER_CST_P and there are overlapping
c5679c37 2446 INTEGER_CST stores, those are mergeable using merge_overlapping,
bd909071
JJ
2447 so don't return false for those.
2448
2449 Similarly, check stores from FIRST_EARLIER (inclusive) to END_EARLIER
2450 (exclusive), whether they don't overlap the bitrange START to END
2451 and have order in between FIRST_ORDER and LAST_ORDER. This is to
2452 prevent merging in cases like:
2453 MEM <char[12]> [&b + 8B] = {};
2454 MEM[(short *) &b] = 5;
2455 _5 = *x_4(D);
2456 MEM <long long unsigned int> [&b + 2B] = _5;
2457 MEM[(char *)&b + 16B] = 88;
2458 MEM[(int *)&b + 20B] = 1;
2459 The = {} store comes in sort_by_bitpos before the = 88 store, and can't
2460 be merged with it, because the = _5 store overlaps these and is in between
2461 them in sort_by_order ordering. If it was merged, the merged store would
2462 go after the = _5 store and thus change behavior. */
c5679c37
JJ
2463
2464static bool
2465check_no_overlap (vec<store_immediate_info *> m_store_info, unsigned int i,
bd909071
JJ
2466 bool all_integer_cst_p, unsigned int first_order,
2467 unsigned int last_order, unsigned HOST_WIDE_INT start,
2468 unsigned HOST_WIDE_INT end, unsigned int first_earlier,
2469 unsigned end_earlier)
c5679c37
JJ
2470{
2471 unsigned int len = m_store_info.length ();
bd909071
JJ
2472 for (unsigned int j = first_earlier; j < end_earlier; j++)
2473 {
2474 store_immediate_info *info = m_store_info[j];
2475 if (info->order > first_order
2476 && info->order < last_order
2477 && info->bitpos + info->bitsize > start)
2478 return false;
2479 }
c5679c37
JJ
2480 for (++i; i < len; ++i)
2481 {
2482 store_immediate_info *info = m_store_info[i];
2483 if (info->bitpos >= end)
2484 break;
2485 if (info->order < last_order
4d213bf6 2486 && (!all_integer_cst_p || info->rhs_code != INTEGER_CST))
c5679c37
JJ
2487 return false;
2488 }
2489 return true;
2490}
2491
4b84d9b8
JJ
2492/* Return true if m_store_info[first] and at least one following store
2493 form a group which store try_size bitsize value which is byte swapped
2494 from a memory load or some value, or identity from some value.
2495 This uses the bswap pass APIs. */
2496
2497bool
2498imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store,
2499 unsigned int first,
bd909071
JJ
2500 unsigned int try_size,
2501 unsigned int first_earlier)
4b84d9b8
JJ
2502{
2503 unsigned int len = m_store_info.length (), last = first;
2504 unsigned HOST_WIDE_INT width = m_store_info[first]->bitsize;
2505 if (width >= try_size)
2506 return false;
2507 for (unsigned int i = first + 1; i < len; ++i)
2508 {
2509 if (m_store_info[i]->bitpos != m_store_info[first]->bitpos + width
cb76fcd7 2510 || m_store_info[i]->lp_nr != merged_store->lp_nr
4b84d9b8
JJ
2511 || m_store_info[i]->ins_stmt == NULL)
2512 return false;
2513 width += m_store_info[i]->bitsize;
2514 if (width >= try_size)
2515 {
2516 last = i;
2517 break;
2518 }
2519 }
2520 if (width != try_size)
2521 return false;
2522
2523 bool allow_unaligned
028d4092 2524 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
4b84d9b8
JJ
2525 /* Punt if the combined store would not be aligned and we need alignment. */
2526 if (!allow_unaligned)
2527 {
2528 unsigned int align = merged_store->align;
2529 unsigned HOST_WIDE_INT align_base = merged_store->align_base;
2530 for (unsigned int i = first + 1; i <= last; ++i)
2531 {
2532 unsigned int this_align;
2533 unsigned HOST_WIDE_INT align_bitpos = 0;
2534 get_object_alignment_1 (gimple_assign_lhs (m_store_info[i]->stmt),
2535 &this_align, &align_bitpos);
2536 if (this_align > align)
2537 {
2538 align = this_align;
2539 align_base = m_store_info[i]->bitpos - align_bitpos;
2540 }
2541 }
2542 unsigned HOST_WIDE_INT align_bitpos
2543 = (m_store_info[first]->bitpos - align_base) & (align - 1);
2544 if (align_bitpos)
2545 align = least_bit_hwi (align_bitpos);
2546 if (align < try_size)
2547 return false;
2548 }
2549
2550 tree type;
2551 switch (try_size)
2552 {
2553 case 16: type = uint16_type_node; break;
2554 case 32: type = uint32_type_node; break;
2555 case 64: type = uint64_type_node; break;
2556 default: gcc_unreachable ();
2557 }
2558 struct symbolic_number n;
2559 gimple *ins_stmt = NULL;
2560 int vuse_store = -1;
2561 unsigned int first_order = merged_store->first_order;
2562 unsigned int last_order = merged_store->last_order;
2563 gimple *first_stmt = merged_store->first_stmt;
2564 gimple *last_stmt = merged_store->last_stmt;
c5679c37 2565 unsigned HOST_WIDE_INT end = merged_store->start + merged_store->width;
4b84d9b8
JJ
2566 store_immediate_info *infof = m_store_info[first];
2567
2568 for (unsigned int i = first; i <= last; ++i)
2569 {
2570 store_immediate_info *info = m_store_info[i];
2571 struct symbolic_number this_n = info->n;
2572 this_n.type = type;
2573 if (!this_n.base_addr)
2574 this_n.range = try_size / BITS_PER_UNIT;
30fa8e9c
JJ
2575 else
2576 /* Update vuse in case it has changed by output_merged_stores. */
2577 this_n.vuse = gimple_vuse (info->ins_stmt);
4b84d9b8
JJ
2578 unsigned int bitpos = info->bitpos - infof->bitpos;
2579 if (!do_shift_rotate (LSHIFT_EXPR, &this_n,
2580 BYTES_BIG_ENDIAN
2581 ? try_size - info->bitsize - bitpos
2582 : bitpos))
2583 return false;
aa11164a 2584 if (this_n.base_addr && vuse_store)
4b84d9b8
JJ
2585 {
2586 unsigned int j;
2587 for (j = first; j <= last; ++j)
2588 if (this_n.vuse == gimple_vuse (m_store_info[j]->stmt))
2589 break;
2590 if (j > last)
2591 {
2592 if (vuse_store == 1)
2593 return false;
2594 vuse_store = 0;
2595 }
2596 }
2597 if (i == first)
2598 {
2599 n = this_n;
2600 ins_stmt = info->ins_stmt;
2601 }
2602 else
2603 {
c5679c37 2604 if (n.base_addr && n.vuse != this_n.vuse)
4b84d9b8 2605 {
c5679c37
JJ
2606 if (vuse_store == 0)
2607 return false;
2608 vuse_store = 1;
4b84d9b8 2609 }
c5679c37
JJ
2610 if (info->order > last_order)
2611 {
2612 last_order = info->order;
2613 last_stmt = info->stmt;
2614 }
2615 else if (info->order < first_order)
2616 {
2617 first_order = info->order;
2618 first_stmt = info->stmt;
2619 }
2620 end = MAX (end, info->bitpos + info->bitsize);
4b84d9b8
JJ
2621
2622 ins_stmt = perform_symbolic_merge (ins_stmt, &n, info->ins_stmt,
2623 &this_n, &n);
2624 if (ins_stmt == NULL)
2625 return false;
2626 }
2627 }
2628
2629 uint64_t cmpxchg, cmpnop;
2630 find_bswap_or_nop_finalize (&n, &cmpxchg, &cmpnop);
2631
2632 /* A complete byte swap should make the symbolic number to start with
2633 the largest digit in the highest order byte. Unchanged symbolic
2634 number indicates a read with same endianness as target architecture. */
2635 if (n.n != cmpnop && n.n != cmpxchg)
2636 return false;
2637
2638 if (n.base_addr == NULL_TREE && !is_gimple_val (n.src))
2639 return false;
2640
bd909071
JJ
2641 if (!check_no_overlap (m_store_info, last, false, first_order, last_order,
2642 merged_store->start, end, first_earlier, first))
c5679c37
JJ
2643 return false;
2644
4b84d9b8
JJ
2645 /* Don't handle memory copy this way if normal non-bswap processing
2646 would handle it too. */
2647 if (n.n == cmpnop && (unsigned) n.n_ops == last - first + 1)
2648 {
2649 unsigned int i;
2650 for (i = first; i <= last; ++i)
2651 if (m_store_info[i]->rhs_code != MEM_REF)
2652 break;
2653 if (i == last + 1)
2654 return false;
2655 }
2656
2657 if (n.n == cmpxchg)
2658 switch (try_size)
2659 {
2660 case 16:
2661 /* Will emit LROTATE_EXPR. */
2662 break;
2663 case 32:
2664 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2665 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
2666 break;
2667 return false;
2668 case 64:
2669 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2670 && optab_handler (bswap_optab, DImode) != CODE_FOR_nothing)
2671 break;
2672 return false;
2673 default:
2674 gcc_unreachable ();
2675 }
2676
2677 if (!allow_unaligned && n.base_addr)
2678 {
2679 unsigned int align = get_object_alignment (n.src);
2680 if (align < try_size)
2681 return false;
2682 }
2683
2684 /* If each load has vuse of the corresponding store, need to verify
2685 the loads can be sunk right before the last store. */
2686 if (vuse_store == 1)
2687 {
2688 auto_vec<tree, 64> refs;
2689 for (unsigned int i = first; i <= last; ++i)
2690 gather_bswap_load_refs (&refs,
2691 gimple_assign_rhs1 (m_store_info[i]->stmt));
2692
2693 unsigned int i;
2694 tree ref;
2695 FOR_EACH_VEC_ELT (refs, i, ref)
2696 if (stmts_may_clobber_ref_p (first_stmt, last_stmt, ref))
2697 return false;
2698 n.vuse = NULL_TREE;
2699 }
2700
2701 infof->n = n;
2702 infof->ins_stmt = ins_stmt;
2703 for (unsigned int i = first; i <= last; ++i)
2704 {
2705 m_store_info[i]->rhs_code = n.n == cmpxchg ? LROTATE_EXPR : NOP_EXPR;
2706 m_store_info[i]->ops[0].base_addr = NULL_TREE;
2707 m_store_info[i]->ops[1].base_addr = NULL_TREE;
2708 if (i != first)
2709 merged_store->merge_into (m_store_info[i]);
2710 }
2711
2712 return true;
2713}
2714
f663d9ad
KT
2715/* Go through the candidate stores recorded in m_store_info and merge them
2716 into merged_store_group objects recorded into m_merged_store_groups
2717 representing the widened stores. Return true if coalescing was successful
2718 and the number of widened stores is fewer than the original number
2719 of stores. */
2720
2721bool
2722imm_store_chain_info::coalesce_immediate_stores ()
2723{
2724 /* Anything less can't be processed. */
2725 if (m_store_info.length () < 2)
2726 return false;
2727
2728 if (dump_file && (dump_flags & TDF_DETAILS))
c94c3532 2729 fprintf (dump_file, "Attempting to coalesce %u stores in chain\n",
f663d9ad
KT
2730 m_store_info.length ());
2731
2732 store_immediate_info *info;
4b84d9b8 2733 unsigned int i, ignore = 0;
bd909071
JJ
2734 unsigned int first_earlier = 0;
2735 unsigned int end_earlier = 0;
f663d9ad
KT
2736
2737 /* Order the stores by the bitposition they write to. */
2738 m_store_info.qsort (sort_by_bitpos);
2739
2740 info = m_store_info[0];
2741 merged_store_group *merged_store = new merged_store_group (info);
c94c3532
EB
2742 if (dump_file && (dump_flags & TDF_DETAILS))
2743 fputs ("New store group\n", dump_file);
f663d9ad
KT
2744
2745 FOR_EACH_VEC_ELT (m_store_info, i, info)
2746 {
3afd514b
JJ
2747 unsigned HOST_WIDE_INT new_bitregion_start, new_bitregion_end;
2748
4b84d9b8 2749 if (i <= ignore)
c94c3532 2750 goto done;
f663d9ad 2751
bd909071
JJ
2752 while (first_earlier < end_earlier
2753 && (m_store_info[first_earlier]->bitpos
2754 + m_store_info[first_earlier]->bitsize
2755 <= merged_store->start))
2756 first_earlier++;
2757
4b84d9b8
JJ
2758 /* First try to handle group of stores like:
2759 p[0] = data >> 24;
2760 p[1] = data >> 16;
2761 p[2] = data >> 8;
2762 p[3] = data;
2763 using the bswap framework. */
2764 if (info->bitpos == merged_store->start + merged_store->width
2765 && merged_store->stores.length () == 1
2766 && merged_store->stores[0]->ins_stmt != NULL
cb76fcd7 2767 && info->lp_nr == merged_store->lp_nr
4b84d9b8
JJ
2768 && info->ins_stmt != NULL)
2769 {
2770 unsigned int try_size;
2771 for (try_size = 64; try_size >= 16; try_size >>= 1)
bd909071
JJ
2772 if (try_coalesce_bswap (merged_store, i - 1, try_size,
2773 first_earlier))
4b84d9b8
JJ
2774 break;
2775
2776 if (try_size >= 16)
2777 {
2778 ignore = i + merged_store->stores.length () - 1;
2779 m_merged_store_groups.safe_push (merged_store);
2780 if (ignore < m_store_info.length ())
bd909071
JJ
2781 {
2782 merged_store = new merged_store_group (m_store_info[ignore]);
2783 end_earlier = ignore;
2784 }
4b84d9b8
JJ
2785 else
2786 merged_store = NULL;
c94c3532 2787 goto done;
4b84d9b8
JJ
2788 }
2789 }
2790
3afd514b
JJ
2791 new_bitregion_start
2792 = MIN (merged_store->bitregion_start, info->bitregion_start);
2793 new_bitregion_end
2794 = MAX (merged_store->bitregion_end, info->bitregion_end);
2795
2796 if (info->order >= merged_store->first_nonmergeable_order
2797 || (((new_bitregion_end - new_bitregion_start + 1) / BITS_PER_UNIT)
028d4092 2798 > (unsigned) param_store_merging_max_size))
18e0c3d1
JJ
2799 ;
2800
f663d9ad
KT
2801 /* |---store 1---|
2802 |---store 2---|
4b84d9b8 2803 Overlapping stores. */
18e0c3d1 2804 else if (IN_RANGE (info->bitpos, merged_store->start,
4d213bf6
JJ
2805 merged_store->start + merged_store->width - 1)
2806 /* |---store 1---||---store 2---|
2807 Handle also the consecutive INTEGER_CST stores case here,
2808 as we have here the code to deal with overlaps. */
2809 || (info->bitregion_start <= merged_store->bitregion_end
2810 && info->rhs_code == INTEGER_CST
2811 && merged_store->only_constants
2812 && merged_store->can_be_merged_into (info)))
f663d9ad 2813 {
245f6de1 2814 /* Only allow overlapping stores of constants. */
629387a6
EB
2815 if (info->rhs_code == INTEGER_CST
2816 && merged_store->only_constants
2817 && info->lp_nr == merged_store->lp_nr)
245f6de1 2818 {
bd909071
JJ
2819 unsigned int first_order
2820 = MIN (merged_store->first_order, info->order);
6cd4c66e
JJ
2821 unsigned int last_order
2822 = MAX (merged_store->last_order, info->order);
2823 unsigned HOST_WIDE_INT end
2824 = MAX (merged_store->start + merged_store->width,
2825 info->bitpos + info->bitsize);
bd909071
JJ
2826 if (check_no_overlap (m_store_info, i, true, first_order,
2827 last_order, merged_store->start, end,
2828 first_earlier, end_earlier))
6cd4c66e
JJ
2829 {
2830 /* check_no_overlap call above made sure there are no
2831 overlapping stores with non-INTEGER_CST rhs_code
2832 in between the first and last of the stores we've
2833 just merged. If there are any INTEGER_CST rhs_code
2834 stores in between, we need to merge_overlapping them
2835 even if in the sort_by_bitpos order there are other
2836 overlapping stores in between. Keep those stores as is.
2837 Example:
2838 MEM[(int *)p_28] = 0;
2839 MEM[(char *)p_28 + 3B] = 1;
2840 MEM[(char *)p_28 + 1B] = 2;
2841 MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B];
2842 We can't merge the zero store with the store of two and
2843 not merge anything else, because the store of one is
2844 in the original order in between those two, but in
2845 store_by_bitpos order it comes after the last store that
2846 we can't merge with them. We can merge the first 3 stores
2847 and keep the last store as is though. */
18e0c3d1
JJ
2848 unsigned int len = m_store_info.length ();
2849 unsigned int try_order = last_order;
2850 unsigned int first_nonmergeable_order;
2851 unsigned int k;
2852 bool last_iter = false;
2853 int attempts = 0;
2854 do
6cd4c66e 2855 {
18e0c3d1 2856 unsigned int max_order = 0;
bd909071 2857 unsigned int min_order = first_order;
18e0c3d1
JJ
2858 unsigned first_nonmergeable_int_order = ~0U;
2859 unsigned HOST_WIDE_INT this_end = end;
2860 k = i;
2861 first_nonmergeable_order = ~0U;
2862 for (unsigned int j = i + 1; j < len; ++j)
6cd4c66e 2863 {
18e0c3d1
JJ
2864 store_immediate_info *info2 = m_store_info[j];
2865 if (info2->bitpos >= this_end)
2866 break;
2867 if (info2->order < try_order)
6cd4c66e 2868 {
4119cd69
JJ
2869 if (info2->rhs_code != INTEGER_CST
2870 || info2->lp_nr != merged_store->lp_nr)
18e0c3d1
JJ
2871 {
2872 /* Normally check_no_overlap makes sure this
2873 doesn't happen, but if end grows below,
2874 then we need to process more stores than
2875 check_no_overlap verified. Example:
2876 MEM[(int *)p_5] = 0;
2877 MEM[(short *)p_5 + 3B] = 1;
2878 MEM[(char *)p_5 + 4B] = _9;
2879 MEM[(char *)p_5 + 2B] = 2; */
2880 k = 0;
2881 break;
2882 }
2883 k = j;
bd909071 2884 min_order = MIN (min_order, info2->order);
18e0c3d1
JJ
2885 this_end = MAX (this_end,
2886 info2->bitpos + info2->bitsize);
6cd4c66e 2887 }
18e0c3d1 2888 else if (info2->rhs_code == INTEGER_CST
4119cd69 2889 && info2->lp_nr == merged_store->lp_nr
18e0c3d1
JJ
2890 && !last_iter)
2891 {
2892 max_order = MAX (max_order, info2->order + 1);
2893 first_nonmergeable_int_order
2894 = MIN (first_nonmergeable_int_order,
2895 info2->order);
2896 }
2897 else
2898 first_nonmergeable_order
2899 = MIN (first_nonmergeable_order, info2->order);
6cd4c66e 2900 }
bd909071
JJ
2901 if (k > i
2902 && !check_no_overlap (m_store_info, len - 1, true,
2903 min_order, try_order,
2904 merged_store->start, this_end,
2905 first_earlier, end_earlier))
2906 k = 0;
18e0c3d1
JJ
2907 if (k == 0)
2908 {
2909 if (last_order == try_order)
2910 break;
2911 /* If this failed, but only because we grew
2912 try_order, retry with the last working one,
2913 so that we merge at least something. */
2914 try_order = last_order;
2915 last_iter = true;
2916 continue;
2917 }
2918 last_order = try_order;
2919 /* Retry with a larger try_order to see if we could
2920 merge some further INTEGER_CST stores. */
2921 if (max_order
2922 && (first_nonmergeable_int_order
2923 < first_nonmergeable_order))
2924 {
2925 try_order = MIN (max_order,
2926 first_nonmergeable_order);
2927 try_order
2928 = MIN (try_order,
2929 merged_store->first_nonmergeable_order);
2930 if (try_order > last_order && ++attempts < 16)
2931 continue;
2932 }
2933 first_nonmergeable_order
2934 = MIN (first_nonmergeable_order,
2935 first_nonmergeable_int_order);
2936 end = this_end;
2937 break;
6cd4c66e 2938 }
18e0c3d1 2939 while (1);
6cd4c66e
JJ
2940
2941 if (k != 0)
2942 {
2943 merged_store->merge_overlapping (info);
2944
18e0c3d1
JJ
2945 merged_store->first_nonmergeable_order
2946 = MIN (merged_store->first_nonmergeable_order,
2947 first_nonmergeable_order);
2948
6cd4c66e
JJ
2949 for (unsigned int j = i + 1; j <= k; j++)
2950 {
2951 store_immediate_info *info2 = m_store_info[j];
2952 gcc_assert (info2->bitpos < end);
2953 if (info2->order < last_order)
2954 {
2955 gcc_assert (info2->rhs_code == INTEGER_CST);
18e0c3d1
JJ
2956 if (info != info2)
2957 merged_store->merge_overlapping (info2);
6cd4c66e
JJ
2958 }
2959 /* Other stores are kept and not merged in any
2960 way. */
2961 }
2962 ignore = k;
2963 goto done;
2964 }
2965 }
245f6de1 2966 }
f663d9ad 2967 }
245f6de1
JJ
2968 /* |---store 1---||---store 2---|
2969 This store is consecutive to the previous one.
2970 Merge it into the current store group. There can be gaps in between
2971 the stores, but there can't be gaps in between bitregions. */
c94c3532 2972 else if (info->bitregion_start <= merged_store->bitregion_end
7f5a3982 2973 && merged_store->can_be_merged_into (info))
f663d9ad 2974 {
245f6de1
JJ
2975 store_immediate_info *infof = merged_store->stores[0];
2976
2977 /* All the rhs_code ops that take 2 operands are commutative,
2978 swap the operands if it could make the operands compatible. */
2979 if (infof->ops[0].base_addr
2980 && infof->ops[1].base_addr
2981 && info->ops[0].base_addr
2982 && info->ops[1].base_addr
8a91d545
RS
2983 && known_eq (info->ops[1].bitpos - infof->ops[0].bitpos,
2984 info->bitpos - infof->bitpos)
245f6de1
JJ
2985 && operand_equal_p (info->ops[1].base_addr,
2986 infof->ops[0].base_addr, 0))
127ef369
JJ
2987 {
2988 std::swap (info->ops[0], info->ops[1]);
2989 info->ops_swapped_p = true;
2990 }
4d213bf6 2991 if (check_no_overlap (m_store_info, i, false,
bd909071 2992 MIN (merged_store->first_order, info->order),
a7fe6482 2993 MAX (merged_store->last_order, info->order),
bd909071 2994 merged_store->start,
a7fe6482 2995 MAX (merged_store->start + merged_store->width,
bd909071
JJ
2996 info->bitpos + info->bitsize),
2997 first_earlier, end_earlier))
245f6de1 2998 {
7f5a3982
EB
2999 /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
3000 if (info->rhs_code == MEM_REF && infof->rhs_code != MEM_REF)
3001 {
3002 info->rhs_code = BIT_INSERT_EXPR;
3003 info->ops[0].val = gimple_assign_rhs1 (info->stmt);
3004 info->ops[0].base_addr = NULL_TREE;
3005 }
3006 else if (infof->rhs_code == MEM_REF && info->rhs_code != MEM_REF)
3007 {
3008 store_immediate_info *infoj;
3009 unsigned int j;
3010 FOR_EACH_VEC_ELT (merged_store->stores, j, infoj)
3011 {
3012 infoj->rhs_code = BIT_INSERT_EXPR;
3013 infoj->ops[0].val = gimple_assign_rhs1 (infoj->stmt);
3014 infoj->ops[0].base_addr = NULL_TREE;
3015 }
e362a897 3016 merged_store->bit_insertion = true;
7f5a3982
EB
3017 }
3018 if ((infof->ops[0].base_addr
3019 ? compatible_load_p (merged_store, info, base_addr, 0)
3020 : !info->ops[0].base_addr)
3021 && (infof->ops[1].base_addr
3022 ? compatible_load_p (merged_store, info, base_addr, 1)
3023 : !info->ops[1].base_addr))
3024 {
3025 merged_store->merge_into (info);
3026 goto done;
3027 }
245f6de1
JJ
3028 }
3029 }
f663d9ad 3030
245f6de1
JJ
3031 /* |---store 1---| <gap> |---store 2---|.
3032 Gap between stores or the rhs not compatible. Start a new group. */
f663d9ad 3033
245f6de1
JJ
3034 /* Try to apply all the stores recorded for the group to determine
3035 the bitpattern they write and discard it if that fails.
3036 This will also reject single-store groups. */
c94c3532 3037 if (merged_store->apply_stores ())
245f6de1 3038 m_merged_store_groups.safe_push (merged_store);
c94c3532
EB
3039 else
3040 delete merged_store;
f663d9ad 3041
245f6de1 3042 merged_store = new merged_store_group (info);
bd909071 3043 end_earlier = i;
c94c3532
EB
3044 if (dump_file && (dump_flags & TDF_DETAILS))
3045 fputs ("New store group\n", dump_file);
3046
3047 done:
3048 if (dump_file && (dump_flags & TDF_DETAILS))
3049 {
3050 fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
3051 " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:",
3052 i, info->bitsize, info->bitpos);
3053 print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt));
3054 fputc ('\n', dump_file);
3055 }
f663d9ad
KT
3056 }
3057
a62b3dc5 3058 /* Record or discard the last store group. */
4b84d9b8
JJ
3059 if (merged_store)
3060 {
c94c3532 3061 if (merged_store->apply_stores ())
4b84d9b8 3062 m_merged_store_groups.safe_push (merged_store);
c94c3532
EB
3063 else
3064 delete merged_store;
4b84d9b8 3065 }
f663d9ad
KT
3066
3067 gcc_assert (m_merged_store_groups.length () <= m_store_info.length ());
c94c3532 3068
f663d9ad
KT
3069 bool success
3070 = !m_merged_store_groups.is_empty ()
3071 && m_merged_store_groups.length () < m_store_info.length ();
3072
3073 if (success && dump_file)
c94c3532 3074 fprintf (dump_file, "Coalescing successful!\nMerged into %u stores\n",
a62b3dc5 3075 m_merged_store_groups.length ());
f663d9ad
KT
3076
3077 return success;
3078}
3079
245f6de1
JJ
3080/* Return the type to use for the merged stores or loads described by STMTS.
3081 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
3082 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
3083 of the MEM_REFs if any. */
f663d9ad
KT
3084
3085static tree
245f6de1
JJ
3086get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load,
3087 unsigned short *cliquep, unsigned short *basep)
f663d9ad
KT
3088{
3089 gimple *stmt;
3090 unsigned int i;
245f6de1
JJ
3091 tree type = NULL_TREE;
3092 tree ret = NULL_TREE;
3093 *cliquep = 0;
3094 *basep = 0;
f663d9ad
KT
3095
3096 FOR_EACH_VEC_ELT (stmts, i, stmt)
3097 {
245f6de1
JJ
3098 tree ref = is_load ? gimple_assign_rhs1 (stmt)
3099 : gimple_assign_lhs (stmt);
3100 tree type1 = reference_alias_ptr_type (ref);
3101 tree base = get_base_address (ref);
f663d9ad 3102
245f6de1
JJ
3103 if (i == 0)
3104 {
3105 if (TREE_CODE (base) == MEM_REF)
3106 {
3107 *cliquep = MR_DEPENDENCE_CLIQUE (base);
3108 *basep = MR_DEPENDENCE_BASE (base);
3109 }
3110 ret = type = type1;
3111 continue;
3112 }
f663d9ad 3113 if (!alias_ptr_types_compatible_p (type, type1))
245f6de1
JJ
3114 ret = ptr_type_node;
3115 if (TREE_CODE (base) != MEM_REF
3116 || *cliquep != MR_DEPENDENCE_CLIQUE (base)
3117 || *basep != MR_DEPENDENCE_BASE (base))
3118 {
3119 *cliquep = 0;
3120 *basep = 0;
3121 }
f663d9ad 3122 }
245f6de1 3123 return ret;
f663d9ad
KT
3124}
3125
3126/* Return the location_t information we can find among the statements
3127 in STMTS. */
3128
3129static location_t
245f6de1 3130get_location_for_stmts (vec<gimple *> &stmts)
f663d9ad
KT
3131{
3132 gimple *stmt;
3133 unsigned int i;
3134
3135 FOR_EACH_VEC_ELT (stmts, i, stmt)
3136 if (gimple_has_location (stmt))
3137 return gimple_location (stmt);
3138
3139 return UNKNOWN_LOCATION;
3140}
3141
3142/* Used to decribe a store resulting from splitting a wide store in smaller
3143 regularly-sized stores in split_group. */
3144
6c1dae73 3145class split_store
f663d9ad 3146{
6c1dae73 3147public:
f663d9ad
KT
3148 unsigned HOST_WIDE_INT bytepos;
3149 unsigned HOST_WIDE_INT size;
3150 unsigned HOST_WIDE_INT align;
245f6de1 3151 auto_vec<store_immediate_info *> orig_stores;
a62b3dc5
JJ
3152 /* True if there is a single orig stmt covering the whole split store. */
3153 bool orig;
f663d9ad
KT
3154 split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
3155 unsigned HOST_WIDE_INT);
3156};
3157
3158/* Simple constructor. */
3159
3160split_store::split_store (unsigned HOST_WIDE_INT bp,
3161 unsigned HOST_WIDE_INT sz,
3162 unsigned HOST_WIDE_INT al)
a62b3dc5 3163 : bytepos (bp), size (sz), align (al), orig (false)
f663d9ad 3164{
245f6de1 3165 orig_stores.create (0);
f663d9ad
KT
3166}
3167
245f6de1
JJ
3168/* Record all stores in GROUP that write to the region starting at BITPOS and
3169 is of size BITSIZE. Record infos for such statements in STORES if
3170 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
5384a802
JJ
3171 if there is exactly one original store in the range (in that case ignore
3172 clobber stmts, unless there are only clobber stmts). */
f663d9ad 3173
a62b3dc5 3174static store_immediate_info *
99b1c316 3175find_constituent_stores (class merged_store_group *group,
245f6de1
JJ
3176 vec<store_immediate_info *> *stores,
3177 unsigned int *first,
3178 unsigned HOST_WIDE_INT bitpos,
3179 unsigned HOST_WIDE_INT bitsize)
f663d9ad 3180{
a62b3dc5 3181 store_immediate_info *info, *ret = NULL;
f663d9ad 3182 unsigned int i;
a62b3dc5
JJ
3183 bool second = false;
3184 bool update_first = true;
f663d9ad 3185 unsigned HOST_WIDE_INT end = bitpos + bitsize;
a62b3dc5 3186 for (i = *first; group->stores.iterate (i, &info); ++i)
f663d9ad
KT
3187 {
3188 unsigned HOST_WIDE_INT stmt_start = info->bitpos;
3189 unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize;
a62b3dc5
JJ
3190 if (stmt_end <= bitpos)
3191 {
3192 /* BITPOS passed to this function never decreases from within the
3193 same split_group call, so optimize and don't scan info records
3194 which are known to end before or at BITPOS next time.
3195 Only do it if all stores before this one also pass this. */
3196 if (update_first)
3197 *first = i + 1;
3198 continue;
3199 }
3200 else
3201 update_first = false;
3202
f663d9ad 3203 /* The stores in GROUP are ordered by bitposition so if we're past
a62b3dc5
JJ
3204 the region for this group return early. */
3205 if (stmt_start >= end)
3206 return ret;
3207
5384a802
JJ
3208 if (gimple_clobber_p (info->stmt))
3209 {
3210 if (stores)
3211 stores->safe_push (info);
3212 if (ret == NULL)
3213 ret = info;
3214 continue;
3215 }
245f6de1 3216 if (stores)
a62b3dc5 3217 {
245f6de1 3218 stores->safe_push (info);
5384a802 3219 if (ret && !gimple_clobber_p (ret->stmt))
a62b3dc5
JJ
3220 {
3221 ret = NULL;
3222 second = true;
3223 }
3224 }
5384a802 3225 else if (ret && !gimple_clobber_p (ret->stmt))
a62b3dc5
JJ
3226 return NULL;
3227 if (!second)
3228 ret = info;
f663d9ad 3229 }
a62b3dc5 3230 return ret;
f663d9ad
KT
3231}
3232
d7a9512e
JJ
3233/* Return how many SSA_NAMEs used to compute value to store in the INFO
3234 store have multiple uses. If any SSA_NAME has multiple uses, also
3235 count statements needed to compute it. */
3236
3237static unsigned
3238count_multiple_uses (store_immediate_info *info)
3239{
3240 gimple *stmt = info->stmt;
3241 unsigned ret = 0;
3242 switch (info->rhs_code)
3243 {
3244 case INTEGER_CST:
e362a897 3245 case STRING_CST:
d7a9512e
JJ
3246 return 0;
3247 case BIT_AND_EXPR:
3248 case BIT_IOR_EXPR:
3249 case BIT_XOR_EXPR:
d60edaba
JJ
3250 if (info->bit_not_p)
3251 {
3252 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3253 ret = 1; /* Fall through below to return
3254 the BIT_NOT_EXPR stmt and then
3255 BIT_{AND,IOR,XOR}_EXPR and anything it
3256 uses. */
3257 else
3258 /* stmt is after this the BIT_NOT_EXPR. */
3259 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3260 }
d7a9512e
JJ
3261 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3262 {
3263 ret += 1 + info->ops[0].bit_not_p;
3264 if (info->ops[1].base_addr)
3265 ret += 1 + info->ops[1].bit_not_p;
3266 return ret + 1;
3267 }
3268 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3269 /* stmt is now the BIT_*_EXPR. */
3270 if (!has_single_use (gimple_assign_rhs1 (stmt)))
127ef369
JJ
3271 ret += 1 + info->ops[info->ops_swapped_p].bit_not_p;
3272 else if (info->ops[info->ops_swapped_p].bit_not_p)
d7a9512e
JJ
3273 {
3274 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3275 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3276 ++ret;
3277 }
3278 if (info->ops[1].base_addr == NULL_TREE)
127ef369
JJ
3279 {
3280 gcc_checking_assert (!info->ops_swapped_p);
3281 return ret;
3282 }
d7a9512e 3283 if (!has_single_use (gimple_assign_rhs2 (stmt)))
127ef369
JJ
3284 ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p;
3285 else if (info->ops[1 - info->ops_swapped_p].bit_not_p)
d7a9512e
JJ
3286 {
3287 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
3288 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3289 ++ret;
3290 }
3291 return ret;
3292 case MEM_REF:
3293 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3294 return 1 + info->ops[0].bit_not_p;
3295 else if (info->ops[0].bit_not_p)
3296 {
3297 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3298 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3299 return 1;
3300 }
3301 return 0;
c94c3532
EB
3302 case BIT_INSERT_EXPR:
3303 return has_single_use (gimple_assign_rhs1 (stmt)) ? 0 : 1;
d7a9512e
JJ
3304 default:
3305 gcc_unreachable ();
3306 }
3307}
3308
f663d9ad 3309/* Split a merged store described by GROUP by populating the SPLIT_STORES
a62b3dc5
JJ
3310 vector (if non-NULL) with split_store structs describing the byte offset
3311 (from the base), the bit size and alignment of each store as well as the
3312 original statements involved in each such split group.
f663d9ad
KT
3313 This is to separate the splitting strategy from the statement
3314 building/emission/linking done in output_merged_store.
a62b3dc5 3315 Return number of new stores.
245f6de1
JJ
3316 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
3317 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
3afd514b
JJ
3318 BZERO_FIRST may be true only when the first store covers the whole group
3319 and clears it; if BZERO_FIRST is true, keep that first store in the set
3320 unmodified and emit further stores for the overrides only.
a62b3dc5
JJ
3321 If SPLIT_STORES is NULL, it is just a dry run to count number of
3322 new stores. */
f663d9ad 3323
a62b3dc5 3324static unsigned int
245f6de1 3325split_group (merged_store_group *group, bool allow_unaligned_store,
3afd514b 3326 bool allow_unaligned_load, bool bzero_first,
99b1c316 3327 vec<split_store *> *split_stores,
d7a9512e
JJ
3328 unsigned *total_orig,
3329 unsigned *total_new)
f663d9ad 3330{
a62b3dc5
JJ
3331 unsigned HOST_WIDE_INT pos = group->bitregion_start;
3332 unsigned HOST_WIDE_INT size = group->bitregion_end - pos;
f663d9ad 3333 unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT;
a62b3dc5
JJ
3334 unsigned HOST_WIDE_INT group_align = group->align;
3335 unsigned HOST_WIDE_INT align_base = group->align_base;
245f6de1 3336 unsigned HOST_WIDE_INT group_load_align = group_align;
d7a9512e 3337 bool any_orig = false;
f663d9ad 3338
f663d9ad
KT
3339 gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0));
3340
e362a897
EB
3341 /* For bswap framework using sets of stores, all the checking has been done
3342 earlier in try_coalesce_bswap and the result always needs to be emitted
3343 as a single store. Likewise for string concatenation, */
4b84d9b8 3344 if (group->stores[0]->rhs_code == LROTATE_EXPR
e362a897
EB
3345 || group->stores[0]->rhs_code == NOP_EXPR
3346 || group->string_concatenation)
4b84d9b8 3347 {
3afd514b 3348 gcc_assert (!bzero_first);
4b84d9b8
JJ
3349 if (total_orig)
3350 {
3351 /* Avoid the old/new stmt count heuristics. It should be
3352 always beneficial. */
3353 total_new[0] = 1;
3354 total_orig[0] = 2;
3355 }
3356
3357 if (split_stores)
3358 {
3359 unsigned HOST_WIDE_INT align_bitpos
3360 = (group->start - align_base) & (group_align - 1);
3361 unsigned HOST_WIDE_INT align = group_align;
3362 if (align_bitpos)
3363 align = least_bit_hwi (align_bitpos);
3364 bytepos = group->start / BITS_PER_UNIT;
99b1c316 3365 split_store *store
4b84d9b8
JJ
3366 = new split_store (bytepos, group->width, align);
3367 unsigned int first = 0;
3368 find_constituent_stores (group, &store->orig_stores,
3369 &first, group->start, group->width);
3370 split_stores->safe_push (store);
3371 }
3372
3373 return 1;
3374 }
3375
a62b3dc5 3376 unsigned int ret = 0, first = 0;
f663d9ad 3377 unsigned HOST_WIDE_INT try_pos = bytepos;
f663d9ad 3378
d7a9512e
JJ
3379 if (total_orig)
3380 {
3381 unsigned int i;
3382 store_immediate_info *info = group->stores[0];
3383
3384 total_new[0] = 0;
3385 total_orig[0] = 1; /* The orig store. */
3386 info = group->stores[0];
3387 if (info->ops[0].base_addr)
a6fbd154 3388 total_orig[0]++;
d7a9512e 3389 if (info->ops[1].base_addr)
a6fbd154 3390 total_orig[0]++;
d7a9512e
JJ
3391 switch (info->rhs_code)
3392 {
3393 case BIT_AND_EXPR:
3394 case BIT_IOR_EXPR:
3395 case BIT_XOR_EXPR:
3396 total_orig[0]++; /* The orig BIT_*_EXPR stmt. */
3397 break;
3398 default:
3399 break;
3400 }
3401 total_orig[0] *= group->stores.length ();
3402
3403 FOR_EACH_VEC_ELT (group->stores, i, info)
a6fbd154
JJ
3404 {
3405 total_new[0] += count_multiple_uses (info);
3406 total_orig[0] += (info->bit_not_p
3407 + info->ops[0].bit_not_p
3408 + info->ops[1].bit_not_p);
3409 }
d7a9512e
JJ
3410 }
3411
245f6de1
JJ
3412 if (!allow_unaligned_load)
3413 for (int i = 0; i < 2; ++i)
3414 if (group->load_align[i])
3415 group_load_align = MIN (group_load_align, group->load_align[i]);
3416
3afd514b
JJ
3417 if (bzero_first)
3418 {
5384a802
JJ
3419 store_immediate_info *gstore;
3420 FOR_EACH_VEC_ELT (group->stores, first, gstore)
3421 if (!gimple_clobber_p (gstore->stmt))
3422 break;
3423 ++first;
3afd514b
JJ
3424 ret = 1;
3425 if (split_stores)
3426 {
99b1c316 3427 split_store *store
5384a802
JJ
3428 = new split_store (bytepos, gstore->bitsize, align_base);
3429 store->orig_stores.safe_push (gstore);
3afd514b
JJ
3430 store->orig = true;
3431 any_orig = true;
3432 split_stores->safe_push (store);
3433 }
3434 }
3435
f663d9ad
KT
3436 while (size > 0)
3437 {
245f6de1 3438 if ((allow_unaligned_store || group_align <= BITS_PER_UNIT)
3afd514b
JJ
3439 && (group->mask[try_pos - bytepos] == (unsigned char) ~0U
3440 || (bzero_first && group->val[try_pos - bytepos] == 0)))
a62b3dc5
JJ
3441 {
3442 /* Skip padding bytes. */
3443 ++try_pos;
3444 size -= BITS_PER_UNIT;
3445 continue;
3446 }
3447
f663d9ad 3448 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
a62b3dc5
JJ
3449 unsigned int try_size = MAX_STORE_BITSIZE, nonmasked;
3450 unsigned HOST_WIDE_INT align_bitpos
3451 = (try_bitpos - align_base) & (group_align - 1);
3452 unsigned HOST_WIDE_INT align = group_align;
5384a802 3453 bool found_orig = false;
a62b3dc5
JJ
3454 if (align_bitpos)
3455 align = least_bit_hwi (align_bitpos);
245f6de1 3456 if (!allow_unaligned_store)
a62b3dc5 3457 try_size = MIN (try_size, align);
245f6de1
JJ
3458 if (!allow_unaligned_load)
3459 {
3460 /* If we can't do or don't want to do unaligned stores
3461 as well as loads, we need to take the loads into account
3462 as well. */
3463 unsigned HOST_WIDE_INT load_align = group_load_align;
3464 align_bitpos = (try_bitpos - align_base) & (load_align - 1);
3465 if (align_bitpos)
3466 load_align = least_bit_hwi (align_bitpos);
3467 for (int i = 0; i < 2; ++i)
3468 if (group->load_align[i])
3469 {
8a91d545
RS
3470 align_bitpos
3471 = known_alignment (try_bitpos
3472 - group->stores[0]->bitpos
3473 + group->stores[0]->ops[i].bitpos
3474 - group->load_align_base[i]);
3475 if (align_bitpos & (group_load_align - 1))
245f6de1
JJ
3476 {
3477 unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos);
3478 load_align = MIN (load_align, a);
3479 }
3480 }
3481 try_size = MIN (try_size, load_align);
3482 }
a62b3dc5 3483 store_immediate_info *info
245f6de1 3484 = find_constituent_stores (group, NULL, &first, try_bitpos, try_size);
5384a802 3485 if (info && !gimple_clobber_p (info->stmt))
a62b3dc5
JJ
3486 {
3487 /* If there is just one original statement for the range, see if
3488 we can just reuse the original store which could be even larger
3489 than try_size. */
3490 unsigned HOST_WIDE_INT stmt_end
3491 = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT);
245f6de1
JJ
3492 info = find_constituent_stores (group, NULL, &first, try_bitpos,
3493 stmt_end - try_bitpos);
a62b3dc5
JJ
3494 if (info && info->bitpos >= try_bitpos)
3495 {
5384a802
JJ
3496 store_immediate_info *info2 = NULL;
3497 unsigned int first_copy = first;
3498 if (info->bitpos > try_bitpos
3499 && stmt_end - try_bitpos <= try_size)
3500 {
3501 info2 = find_constituent_stores (group, NULL, &first_copy,
3502 try_bitpos,
3503 info->bitpos - try_bitpos);
3504 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3505 }
3506 if (info2 == NULL && stmt_end - try_bitpos < try_size)
3507 {
3508 info2 = find_constituent_stores (group, NULL, &first_copy,
3509 stmt_end,
3510 (try_bitpos + try_size)
3511 - stmt_end);
3512 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3513 }
3514 if (info2 == NULL)
3515 {
3516 try_size = stmt_end - try_bitpos;
3517 found_orig = true;
3518 goto found;
3519 }
a62b3dc5
JJ
3520 }
3521 }
f663d9ad 3522
a62b3dc5
JJ
3523 /* Approximate store bitsize for the case when there are no padding
3524 bits. */
3525 while (try_size > size)
3526 try_size /= 2;
3527 /* Now look for whole padding bytes at the end of that bitsize. */
3528 for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked)
3529 if (group->mask[try_pos - bytepos + nonmasked - 1]
3afd514b
JJ
3530 != (unsigned char) ~0U
3531 && (!bzero_first
3532 || group->val[try_pos - bytepos + nonmasked - 1] != 0))
a62b3dc5 3533 break;
5384a802 3534 if (nonmasked == 0 || (info && gimple_clobber_p (info->stmt)))
a62b3dc5
JJ
3535 {
3536 /* If entire try_size range is padding, skip it. */
3537 try_pos += try_size / BITS_PER_UNIT;
3538 size -= try_size;
3539 continue;
3540 }
3541 /* Otherwise try to decrease try_size if second half, last 3 quarters
3542 etc. are padding. */
3543 nonmasked *= BITS_PER_UNIT;
3544 while (nonmasked <= try_size / 2)
3545 try_size /= 2;
245f6de1 3546 if (!allow_unaligned_store && group_align > BITS_PER_UNIT)
a62b3dc5
JJ
3547 {
3548 /* Now look for whole padding bytes at the start of that bitsize. */
3549 unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked;
3550 for (masked = 0; masked < try_bytesize; ++masked)
3afd514b
JJ
3551 if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U
3552 && (!bzero_first
3553 || group->val[try_pos - bytepos + masked] != 0))
a62b3dc5
JJ
3554 break;
3555 masked *= BITS_PER_UNIT;
3556 gcc_assert (masked < try_size);
3557 if (masked >= try_size / 2)
3558 {
3559 while (masked >= try_size / 2)
3560 {
3561 try_size /= 2;
3562 try_pos += try_size / BITS_PER_UNIT;
3563 size -= try_size;
3564 masked -= try_size;
3565 }
3566 /* Need to recompute the alignment, so just retry at the new
3567 position. */
3568 continue;
3569 }
3570 }
3571
3572 found:
3573 ++ret;
f663d9ad 3574
a62b3dc5
JJ
3575 if (split_stores)
3576 {
99b1c316 3577 split_store *store
a62b3dc5 3578 = new split_store (try_pos, try_size, align);
245f6de1
JJ
3579 info = find_constituent_stores (group, &store->orig_stores,
3580 &first, try_bitpos, try_size);
a62b3dc5 3581 if (info
5384a802 3582 && !gimple_clobber_p (info->stmt)
a62b3dc5 3583 && info->bitpos >= try_bitpos
5384a802
JJ
3584 && info->bitpos + info->bitsize <= try_bitpos + try_size
3585 && (store->orig_stores.length () == 1
3586 || found_orig
3587 || (info->bitpos == try_bitpos
3588 && (info->bitpos + info->bitsize
3589 == try_bitpos + try_size))))
d7a9512e
JJ
3590 {
3591 store->orig = true;
3592 any_orig = true;
3593 }
a62b3dc5
JJ
3594 split_stores->safe_push (store);
3595 }
3596
3597 try_pos += try_size / BITS_PER_UNIT;
f663d9ad 3598 size -= try_size;
f663d9ad 3599 }
a62b3dc5 3600
d7a9512e
JJ
3601 if (total_orig)
3602 {
a6fbd154 3603 unsigned int i;
99b1c316 3604 split_store *store;
d7a9512e
JJ
3605 /* If we are reusing some original stores and any of the
3606 original SSA_NAMEs had multiple uses, we need to subtract
3607 those now before we add the new ones. */
3608 if (total_new[0] && any_orig)
3609 {
d7a9512e
JJ
3610 FOR_EACH_VEC_ELT (*split_stores, i, store)
3611 if (store->orig)
3612 total_new[0] -= count_multiple_uses (store->orig_stores[0]);
3613 }
3614 total_new[0] += ret; /* The new store. */
3615 store_immediate_info *info = group->stores[0];
3616 if (info->ops[0].base_addr)
a6fbd154 3617 total_new[0] += ret;
d7a9512e 3618 if (info->ops[1].base_addr)
a6fbd154 3619 total_new[0] += ret;
d7a9512e
JJ
3620 switch (info->rhs_code)
3621 {
3622 case BIT_AND_EXPR:
3623 case BIT_IOR_EXPR:
3624 case BIT_XOR_EXPR:
3625 total_new[0] += ret; /* The new BIT_*_EXPR stmt. */
3626 break;
3627 default:
3628 break;
3629 }
a6fbd154
JJ
3630 FOR_EACH_VEC_ELT (*split_stores, i, store)
3631 {
3632 unsigned int j;
3633 bool bit_not_p[3] = { false, false, false };
3634 /* If all orig_stores have certain bit_not_p set, then
3635 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3636 If some orig_stores have certain bit_not_p set, then
3637 we'd use a BIT_XOR_EXPR with a mask and need to account for
3638 it. */
3639 FOR_EACH_VEC_ELT (store->orig_stores, j, info)
3640 {
3641 if (info->ops[0].bit_not_p)
3642 bit_not_p[0] = true;
3643 if (info->ops[1].bit_not_p)
3644 bit_not_p[1] = true;
3645 if (info->bit_not_p)
3646 bit_not_p[2] = true;
3647 }
3648 total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2];
3649 }
3650
d7a9512e
JJ
3651 }
3652
a62b3dc5 3653 return ret;
f663d9ad
KT
3654}
3655
a6fbd154
JJ
3656/* Return the operation through which the operand IDX (if < 2) or
3657 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3658 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3659 the bits should be xored with mask. */
3660
3661static enum tree_code
3662invert_op (split_store *split_store, int idx, tree int_type, tree &mask)
3663{
3664 unsigned int i;
3665 store_immediate_info *info;
3666 unsigned int cnt = 0;
e215422f 3667 bool any_paddings = false;
a6fbd154
JJ
3668 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3669 {
3670 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3671 if (bit_not_p)
e215422f
JJ
3672 {
3673 ++cnt;
3674 tree lhs = gimple_assign_lhs (info->stmt);
3675 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3676 && TYPE_PRECISION (TREE_TYPE (lhs)) < info->bitsize)
3677 any_paddings = true;
3678 }
a6fbd154
JJ
3679 }
3680 mask = NULL_TREE;
3681 if (cnt == 0)
3682 return NOP_EXPR;
e215422f 3683 if (cnt == split_store->orig_stores.length () && !any_paddings)
a6fbd154
JJ
3684 return BIT_NOT_EXPR;
3685
3686 unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT;
3687 unsigned buf_size = split_store->size / BITS_PER_UNIT;
3688 unsigned char *buf
3689 = XALLOCAVEC (unsigned char, buf_size);
3690 memset (buf, ~0U, buf_size);
3691 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3692 {
3693 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3694 if (!bit_not_p)
3695 continue;
3696 /* Clear regions with bit_not_p and invert afterwards, rather than
3697 clear regions with !bit_not_p, so that gaps in between stores aren't
3698 set in the mask. */
3699 unsigned HOST_WIDE_INT bitsize = info->bitsize;
e215422f 3700 unsigned HOST_WIDE_INT prec = bitsize;
a6fbd154 3701 unsigned int pos_in_buffer = 0;
e215422f
JJ
3702 if (any_paddings)
3703 {
3704 tree lhs = gimple_assign_lhs (info->stmt);
3705 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3706 && TYPE_PRECISION (TREE_TYPE (lhs)) < bitsize)
3707 prec = TYPE_PRECISION (TREE_TYPE (lhs));
3708 }
a6fbd154
JJ
3709 if (info->bitpos < try_bitpos)
3710 {
3711 gcc_assert (info->bitpos + bitsize > try_bitpos);
e215422f
JJ
3712 if (!BYTES_BIG_ENDIAN)
3713 {
3714 if (prec <= try_bitpos - info->bitpos)
3715 continue;
3716 prec -= try_bitpos - info->bitpos;
3717 }
3718 bitsize -= try_bitpos - info->bitpos;
3719 if (BYTES_BIG_ENDIAN && prec > bitsize)
3720 prec = bitsize;
a6fbd154
JJ
3721 }
3722 else
3723 pos_in_buffer = info->bitpos - try_bitpos;
e215422f
JJ
3724 if (prec < bitsize)
3725 {
3726 /* If this is a bool inversion, invert just the least significant
3727 prec bits rather than all bits of it. */
3728 if (BYTES_BIG_ENDIAN)
3729 {
3730 pos_in_buffer += bitsize - prec;
3731 if (pos_in_buffer >= split_store->size)
3732 continue;
3733 }
3734 bitsize = prec;
3735 }
a6fbd154
JJ
3736 if (pos_in_buffer + bitsize > split_store->size)
3737 bitsize = split_store->size - pos_in_buffer;
3738 unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT);
3739 if (BYTES_BIG_ENDIAN)
3740 clear_bit_region_be (p, (BITS_PER_UNIT - 1
3741 - (pos_in_buffer % BITS_PER_UNIT)), bitsize);
3742 else
3743 clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize);
3744 }
3745 for (unsigned int i = 0; i < buf_size; ++i)
3746 buf[i] = ~buf[i];
3747 mask = native_interpret_expr (int_type, buf, buf_size);
3748 return BIT_XOR_EXPR;
3749}
3750
f663d9ad
KT
3751/* Given a merged store group GROUP output the widened version of it.
3752 The store chain is against the base object BASE.
3753 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3754 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3755 Make sure that the number of statements output is less than the number of
3756 original statements. If a better sequence is possible emit it and
3757 return true. */
3758
3759bool
b5926e23 3760imm_store_chain_info::output_merged_store (merged_store_group *group)
f663d9ad 3761{
e362a897 3762 const unsigned HOST_WIDE_INT start_byte_pos
a62b3dc5 3763 = group->bitregion_start / BITS_PER_UNIT;
f663d9ad
KT
3764 unsigned int orig_num_stmts = group->stores.length ();
3765 if (orig_num_stmts < 2)
3766 return false;
3767
245f6de1 3768 bool allow_unaligned_store
028d4092 3769 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
245f6de1 3770 bool allow_unaligned_load = allow_unaligned_store;
3afd514b 3771 bool bzero_first = false;
5384a802
JJ
3772 store_immediate_info *store;
3773 unsigned int num_clobber_stmts = 0;
3774 if (group->stores[0]->rhs_code == INTEGER_CST)
3775 {
e362a897 3776 unsigned int i;
5384a802
JJ
3777 FOR_EACH_VEC_ELT (group->stores, i, store)
3778 if (gimple_clobber_p (store->stmt))
3779 num_clobber_stmts++;
3780 else if (TREE_CODE (gimple_assign_rhs1 (store->stmt)) == CONSTRUCTOR
3781 && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (store->stmt)) == 0
3782 && group->start == store->bitpos
3783 && group->width == store->bitsize
3784 && (group->start % BITS_PER_UNIT) == 0
3785 && (group->width % BITS_PER_UNIT) == 0)
3786 {
3787 bzero_first = true;
3788 break;
3789 }
3790 else
3791 break;
3792 FOR_EACH_VEC_ELT_FROM (group->stores, i, store, i)
3793 if (gimple_clobber_p (store->stmt))
3794 num_clobber_stmts++;
3795 if (num_clobber_stmts == orig_num_stmts)
3796 return false;
3797 orig_num_stmts -= num_clobber_stmts;
3798 }
3afd514b 3799 if (allow_unaligned_store || bzero_first)
a62b3dc5
JJ
3800 {
3801 /* If unaligned stores are allowed, see how many stores we'd emit
3802 for unaligned and how many stores we'd emit for aligned stores.
3afd514b
JJ
3803 Only use unaligned stores if it allows fewer stores than aligned.
3804 Similarly, if there is a whole region clear first, prefer expanding
3805 it together compared to expanding clear first followed by merged
3806 further stores. */
21f65995 3807 unsigned cnt[4] = { ~0U, ~0U, ~0U, ~0U };
3afd514b
JJ
3808 int pass_min = 0;
3809 for (int pass = 0; pass < 4; ++pass)
3810 {
3811 if (!allow_unaligned_store && (pass & 1) != 0)
3812 continue;
3813 if (!bzero_first && (pass & 2) != 0)
3814 continue;
3815 cnt[pass] = split_group (group, (pass & 1) != 0,
3816 allow_unaligned_load, (pass & 2) != 0,
3817 NULL, NULL, NULL);
3818 if (cnt[pass] < cnt[pass_min])
3819 pass_min = pass;
3820 }
3821 if ((pass_min & 1) == 0)
245f6de1 3822 allow_unaligned_store = false;
3afd514b
JJ
3823 if ((pass_min & 2) == 0)
3824 bzero_first = false;
a62b3dc5 3825 }
e362a897
EB
3826
3827 auto_vec<class split_store *, 32> split_stores;
3828 split_store *split_store;
3829 unsigned total_orig, total_new, i;
3afd514b 3830 split_group (group, allow_unaligned_store, allow_unaligned_load, bzero_first,
d7a9512e 3831 &split_stores, &total_orig, &total_new);
a62b3dc5 3832
5384a802
JJ
3833 /* Determine if there is a clobber covering the whole group at the start,
3834 followed by proposed split stores that cover the whole group. In that
3835 case, prefer the transformation even if
3836 split_stores.length () == orig_num_stmts. */
3837 bool clobber_first = false;
3838 if (num_clobber_stmts
3839 && gimple_clobber_p (group->stores[0]->stmt)
3840 && group->start == group->stores[0]->bitpos
3841 && group->width == group->stores[0]->bitsize
3842 && (group->start % BITS_PER_UNIT) == 0
3843 && (group->width % BITS_PER_UNIT) == 0)
3844 {
3845 clobber_first = true;
3846 unsigned HOST_WIDE_INT pos = group->start / BITS_PER_UNIT;
3847 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3848 if (split_store->bytepos != pos)
3849 {
3850 clobber_first = false;
3851 break;
3852 }
3853 else
3854 pos += split_store->size / BITS_PER_UNIT;
3855 if (pos != (group->start + group->width) / BITS_PER_UNIT)
3856 clobber_first = false;
3857 }
3858
3859 if (split_stores.length () >= orig_num_stmts + clobber_first)
a62b3dc5 3860 {
5384a802 3861
a62b3dc5
JJ
3862 /* We didn't manage to reduce the number of statements. Bail out. */
3863 if (dump_file && (dump_flags & TDF_DETAILS))
d7a9512e
JJ
3864 fprintf (dump_file, "Exceeded original number of stmts (%u)."
3865 " Not profitable to emit new sequence.\n",
3866 orig_num_stmts);
dd172744
RB
3867 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3868 delete split_store;
a62b3dc5
JJ
3869 return false;
3870 }
d7a9512e
JJ
3871 if (total_orig <= total_new)
3872 {
3873 /* If number of estimated new statements is above estimated original
3874 statements, bail out too. */
3875 if (dump_file && (dump_flags & TDF_DETAILS))
3876 fprintf (dump_file, "Estimated number of original stmts (%u)"
3877 " not larger than estimated number of new"
3878 " stmts (%u).\n",
3879 total_orig, total_new);
dd172744
RB
3880 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3881 delete split_store;
4b84d9b8 3882 return false;
d7a9512e 3883 }
5384a802
JJ
3884 if (group->stores[0]->rhs_code == INTEGER_CST)
3885 {
3886 bool all_orig = true;
3887 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3888 if (!split_store->orig)
3889 {
3890 all_orig = false;
3891 break;
3892 }
3893 if (all_orig)
3894 {
3895 unsigned int cnt = split_stores.length ();
3896 store_immediate_info *store;
3897 FOR_EACH_VEC_ELT (group->stores, i, store)
3898 if (gimple_clobber_p (store->stmt))
3899 ++cnt;
3900 /* Punt if we wouldn't make any real changes, i.e. keep all
3901 orig stmts + all clobbers. */
3902 if (cnt == group->stores.length ())
3903 {
3904 if (dump_file && (dump_flags & TDF_DETAILS))
3905 fprintf (dump_file, "Exceeded original number of stmts (%u)."
3906 " Not profitable to emit new sequence.\n",
3907 orig_num_stmts);
3908 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3909 delete split_store;
3910 return false;
3911 }
3912 }
3913 }
f663d9ad
KT
3914
3915 gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt);
3916 gimple_seq seq = NULL;
f663d9ad
KT
3917 tree last_vdef, new_vuse;
3918 last_vdef = gimple_vdef (group->last_stmt);
3919 new_vuse = gimple_vuse (group->last_stmt);
4b84d9b8
JJ
3920 tree bswap_res = NULL_TREE;
3921
5384a802
JJ
3922 /* Clobbers are not removed. */
3923 if (gimple_clobber_p (group->last_stmt))
3924 {
3925 new_vuse = make_ssa_name (gimple_vop (cfun), group->last_stmt);
3926 gimple_set_vdef (group->last_stmt, new_vuse);
3927 }
3928
4b84d9b8
JJ
3929 if (group->stores[0]->rhs_code == LROTATE_EXPR
3930 || group->stores[0]->rhs_code == NOP_EXPR)
3931 {
3932 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
3933 gimple *ins_stmt = group->stores[0]->ins_stmt;
3934 struct symbolic_number *n = &group->stores[0]->n;
3935 bool bswap = group->stores[0]->rhs_code == LROTATE_EXPR;
3936
3937 switch (n->range)
3938 {
3939 case 16:
3940 load_type = bswap_type = uint16_type_node;
3941 break;
3942 case 32:
3943 load_type = uint32_type_node;
3944 if (bswap)
3945 {
3946 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
3947 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
3948 }
3949 break;
3950 case 64:
3951 load_type = uint64_type_node;
3952 if (bswap)
3953 {
3954 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
3955 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
3956 }
3957 break;
3958 default:
3959 gcc_unreachable ();
3960 }
3961
3962 /* If the loads have each vuse of the corresponding store,
3963 we've checked the aliasing already in try_coalesce_bswap and
3964 we want to sink the need load into seq. So need to use new_vuse
3965 on the load. */
30fa8e9c 3966 if (n->base_addr)
4b84d9b8 3967 {
30fa8e9c
JJ
3968 if (n->vuse == NULL)
3969 {
3970 n->vuse = new_vuse;
3971 ins_stmt = NULL;
3972 }
3973 else
3974 /* Update vuse in case it has changed by output_merged_stores. */
3975 n->vuse = gimple_vuse (ins_stmt);
4b84d9b8
JJ
3976 }
3977 bswap_res = bswap_replace (gsi_start (seq), ins_stmt, fndecl,
3978 bswap_type, load_type, n, bswap);
3979 gcc_assert (bswap_res);
3980 }
f663d9ad
KT
3981
3982 gimple *stmt = NULL;
245f6de1 3983 auto_vec<gimple *, 32> orig_stmts;
4b84d9b8
JJ
3984 gimple_seq this_seq;
3985 tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq,
aa55dc0c 3986 is_gimple_mem_ref_addr, NULL_TREE);
4b84d9b8 3987 gimple_seq_add_seq_without_update (&seq, this_seq);
245f6de1
JJ
3988
3989 tree load_addr[2] = { NULL_TREE, NULL_TREE };
3990 gimple_seq load_seq[2] = { NULL, NULL };
3991 gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () };
3992 for (int j = 0; j < 2; ++j)
3993 {
3994 store_operand_info &op = group->stores[0]->ops[j];
3995 if (op.base_addr == NULL_TREE)
3996 continue;
3997
3998 store_immediate_info *infol = group->stores.last ();
3999 if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt))
4000 {
97031af7
JJ
4001 /* We can't pick the location randomly; while we've verified
4002 all the loads have the same vuse, they can be still in different
4003 basic blocks and we need to pick the one from the last bb:
4004 int x = q[0];
4005 if (x == N) return;
4006 int y = q[1];
4007 p[0] = x;
4008 p[1] = y;
4009 otherwise if we put the wider load at the q[0] load, we might
4010 segfault if q[1] is not mapped. */
4011 basic_block bb = gimple_bb (op.stmt);
4012 gimple *ostmt = op.stmt;
4013 store_immediate_info *info;
4014 FOR_EACH_VEC_ELT (group->stores, i, info)
4015 {
4016 gimple *tstmt = info->ops[j].stmt;
4017 basic_block tbb = gimple_bb (tstmt);
4018 if (dominated_by_p (CDI_DOMINATORS, tbb, bb))
4019 {
4020 ostmt = tstmt;
4021 bb = tbb;
4022 }
4023 }
4024 load_gsi[j] = gsi_for_stmt (ostmt);
245f6de1
JJ
4025 load_addr[j]
4026 = force_gimple_operand_1 (unshare_expr (op.base_addr),
4027 &load_seq[j], is_gimple_mem_ref_addr,
4028 NULL_TREE);
4029 }
4030 else if (operand_equal_p (base_addr, op.base_addr, 0))
4031 load_addr[j] = addr;
4032 else
3e2927a1 4033 {
3e2927a1
JJ
4034 load_addr[j]
4035 = force_gimple_operand_1 (unshare_expr (op.base_addr),
4036 &this_seq, is_gimple_mem_ref_addr,
4037 NULL_TREE);
4038 gimple_seq_add_seq_without_update (&seq, this_seq);
4039 }
245f6de1
JJ
4040 }
4041
f663d9ad
KT
4042 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4043 {
e362a897
EB
4044 const unsigned HOST_WIDE_INT try_size = split_store->size;
4045 const unsigned HOST_WIDE_INT try_pos = split_store->bytepos;
4046 const unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
4047 const unsigned HOST_WIDE_INT try_align = split_store->align;
4048 const unsigned HOST_WIDE_INT try_offset = try_pos - start_byte_pos;
a62b3dc5
JJ
4049 tree dest, src;
4050 location_t loc;
e362a897 4051
a62b3dc5
JJ
4052 if (split_store->orig)
4053 {
5384a802
JJ
4054 /* If there is just a single non-clobber constituent store
4055 which covers the whole area, just reuse the lhs and rhs. */
4056 gimple *orig_stmt = NULL;
4057 store_immediate_info *store;
4058 unsigned int j;
4059 FOR_EACH_VEC_ELT (split_store->orig_stores, j, store)
4060 if (!gimple_clobber_p (store->stmt))
4061 {
4062 orig_stmt = store->stmt;
4063 break;
4064 }
245f6de1
JJ
4065 dest = gimple_assign_lhs (orig_stmt);
4066 src = gimple_assign_rhs1 (orig_stmt);
4067 loc = gimple_location (orig_stmt);
a62b3dc5
JJ
4068 }
4069 else
4070 {
245f6de1
JJ
4071 store_immediate_info *info;
4072 unsigned short clique, base;
4073 unsigned int k;
4074 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4075 orig_stmts.safe_push (info->stmt);
a62b3dc5 4076 tree offset_type
245f6de1 4077 = get_alias_type_for_stmts (orig_stmts, false, &clique, &base);
e362a897 4078 tree dest_type;
245f6de1
JJ
4079 loc = get_location_for_stmts (orig_stmts);
4080 orig_stmts.truncate (0);
a62b3dc5 4081
e362a897
EB
4082 if (group->string_concatenation)
4083 dest_type
4084 = build_array_type_nelts (char_type_node,
4085 try_size / BITS_PER_UNIT);
4086 else
4087 {
4088 dest_type = build_nonstandard_integer_type (try_size, UNSIGNED);
4089 dest_type = build_aligned_type (dest_type, try_align);
4090 }
4091 dest = fold_build2 (MEM_REF, dest_type, addr,
a62b3dc5 4092 build_int_cst (offset_type, try_pos));
245f6de1
JJ
4093 if (TREE_CODE (dest) == MEM_REF)
4094 {
4095 MR_DEPENDENCE_CLIQUE (dest) = clique;
4096 MR_DEPENDENCE_BASE (dest) = base;
4097 }
4098
c94c3532 4099 tree mask;
e362a897 4100 if (bswap_res || group->string_concatenation)
c94c3532
EB
4101 mask = integer_zero_node;
4102 else
e362a897
EB
4103 mask = native_interpret_expr (dest_type,
4104 group->mask + try_offset,
4b84d9b8 4105 group->buf_size);
245f6de1
JJ
4106
4107 tree ops[2];
4108 for (int j = 0;
4109 j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE);
4110 ++j)
4111 {
4112 store_operand_info &op = split_store->orig_stores[0]->ops[j];
4b84d9b8
JJ
4113 if (bswap_res)
4114 ops[j] = bswap_res;
e362a897
EB
4115 else if (group->string_concatenation)
4116 {
4117 ops[j] = build_string (try_size / BITS_PER_UNIT,
4118 (const char *) group->val + try_offset);
4119 TREE_TYPE (ops[j]) = dest_type;
4120 }
4b84d9b8 4121 else if (op.base_addr)
245f6de1
JJ
4122 {
4123 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4124 orig_stmts.safe_push (info->ops[j].stmt);
4125
4126 offset_type = get_alias_type_for_stmts (orig_stmts, true,
4127 &clique, &base);
4128 location_t load_loc = get_location_for_stmts (orig_stmts);
4129 orig_stmts.truncate (0);
4130
4131 unsigned HOST_WIDE_INT load_align = group->load_align[j];
4132 unsigned HOST_WIDE_INT align_bitpos
c94c3532 4133 = known_alignment (try_bitpos
8a91d545
RS
4134 - split_store->orig_stores[0]->bitpos
4135 + op.bitpos);
4136 if (align_bitpos & (load_align - 1))
245f6de1
JJ
4137 load_align = least_bit_hwi (align_bitpos);
4138
4139 tree load_int_type
4140 = build_nonstandard_integer_type (try_size, UNSIGNED);
4141 load_int_type
4142 = build_aligned_type (load_int_type, load_align);
4143
8a91d545 4144 poly_uint64 load_pos
c94c3532 4145 = exact_div (try_bitpos
8a91d545
RS
4146 - split_store->orig_stores[0]->bitpos
4147 + op.bitpos,
4148 BITS_PER_UNIT);
245f6de1
JJ
4149 ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j],
4150 build_int_cst (offset_type, load_pos));
4151 if (TREE_CODE (ops[j]) == MEM_REF)
4152 {
4153 MR_DEPENDENCE_CLIQUE (ops[j]) = clique;
4154 MR_DEPENDENCE_BASE (ops[j]) = base;
4155 }
4156 if (!integer_zerop (mask))
4157 /* The load might load some bits (that will be masked off
4158 later on) uninitialized, avoid -W*uninitialized
4159 warnings in that case. */
4160 TREE_NO_WARNING (ops[j]) = 1;
4161
e362a897 4162 stmt = gimple_build_assign (make_ssa_name (dest_type), ops[j]);
245f6de1
JJ
4163 gimple_set_location (stmt, load_loc);
4164 if (gsi_bb (load_gsi[j]))
4165 {
4166 gimple_set_vuse (stmt, gimple_vuse (op.stmt));
4167 gimple_seq_add_stmt_without_update (&load_seq[j], stmt);
4168 }
4169 else
4170 {
4171 gimple_set_vuse (stmt, new_vuse);
4172 gimple_seq_add_stmt_without_update (&seq, stmt);
4173 }
4174 ops[j] = gimple_assign_lhs (stmt);
a6fbd154
JJ
4175 tree xor_mask;
4176 enum tree_code inv_op
e362a897 4177 = invert_op (split_store, j, dest_type, xor_mask);
a6fbd154 4178 if (inv_op != NOP_EXPR)
383ac8dc 4179 {
e362a897 4180 stmt = gimple_build_assign (make_ssa_name (dest_type),
a6fbd154 4181 inv_op, ops[j], xor_mask);
383ac8dc
JJ
4182 gimple_set_location (stmt, load_loc);
4183 ops[j] = gimple_assign_lhs (stmt);
4184
4185 if (gsi_bb (load_gsi[j]))
4186 gimple_seq_add_stmt_without_update (&load_seq[j],
4187 stmt);
4188 else
4189 gimple_seq_add_stmt_without_update (&seq, stmt);
4190 }
245f6de1
JJ
4191 }
4192 else
e362a897
EB
4193 ops[j] = native_interpret_expr (dest_type,
4194 group->val + try_offset,
245f6de1
JJ
4195 group->buf_size);
4196 }
4197
4198 switch (split_store->orig_stores[0]->rhs_code)
4199 {
4200 case BIT_AND_EXPR:
4201 case BIT_IOR_EXPR:
4202 case BIT_XOR_EXPR:
4203 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4204 {
4205 tree rhs1 = gimple_assign_rhs1 (info->stmt);
4206 orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1));
4207 }
4208 location_t bit_loc;
4209 bit_loc = get_location_for_stmts (orig_stmts);
4210 orig_stmts.truncate (0);
4211
4212 stmt
e362a897 4213 = gimple_build_assign (make_ssa_name (dest_type),
245f6de1
JJ
4214 split_store->orig_stores[0]->rhs_code,
4215 ops[0], ops[1]);
4216 gimple_set_location (stmt, bit_loc);
4217 /* If there is just one load and there is a separate
4218 load_seq[0], emit the bitwise op right after it. */
4219 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4220 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4221 /* Otherwise, if at least one load is in seq, we need to
4222 emit the bitwise op right before the store. If there
4223 are two loads and are emitted somewhere else, it would
4224 be better to emit the bitwise op as early as possible;
4225 we don't track where that would be possible right now
4226 though. */
4227 else
4228 gimple_seq_add_stmt_without_update (&seq, stmt);
4229 src = gimple_assign_lhs (stmt);
a6fbd154
JJ
4230 tree xor_mask;
4231 enum tree_code inv_op;
e362a897 4232 inv_op = invert_op (split_store, 2, dest_type, xor_mask);
a6fbd154 4233 if (inv_op != NOP_EXPR)
d60edaba 4234 {
e362a897 4235 stmt = gimple_build_assign (make_ssa_name (dest_type),
a6fbd154 4236 inv_op, src, xor_mask);
d60edaba
JJ
4237 gimple_set_location (stmt, bit_loc);
4238 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4239 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4240 else
4241 gimple_seq_add_stmt_without_update (&seq, stmt);
4242 src = gimple_assign_lhs (stmt);
4243 }
245f6de1 4244 break;
4b84d9b8
JJ
4245 case LROTATE_EXPR:
4246 case NOP_EXPR:
4247 src = ops[0];
4248 if (!is_gimple_val (src))
4249 {
4250 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (src)),
4251 src);
4252 gimple_seq_add_stmt_without_update (&seq, stmt);
4253 src = gimple_assign_lhs (stmt);
4254 }
e362a897 4255 if (!useless_type_conversion_p (dest_type, TREE_TYPE (src)))
4b84d9b8 4256 {
e362a897 4257 stmt = gimple_build_assign (make_ssa_name (dest_type),
4b84d9b8
JJ
4258 NOP_EXPR, src);
4259 gimple_seq_add_stmt_without_update (&seq, stmt);
4260 src = gimple_assign_lhs (stmt);
4261 }
e362a897 4262 inv_op = invert_op (split_store, 2, dest_type, xor_mask);
be52ac73
JJ
4263 if (inv_op != NOP_EXPR)
4264 {
e362a897 4265 stmt = gimple_build_assign (make_ssa_name (dest_type),
be52ac73
JJ
4266 inv_op, src, xor_mask);
4267 gimple_set_location (stmt, loc);
4268 gimple_seq_add_stmt_without_update (&seq, stmt);
4269 src = gimple_assign_lhs (stmt);
4270 }
4b84d9b8 4271 break;
245f6de1
JJ
4272 default:
4273 src = ops[0];
4274 break;
4275 }
4276
c94c3532
EB
4277 /* If bit insertion is required, we use the source as an accumulator
4278 into which the successive bit-field values are manually inserted.
4279 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
4280 if (group->bit_insertion)
4281 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4282 if (info->rhs_code == BIT_INSERT_EXPR
4283 && info->bitpos < try_bitpos + try_size
4284 && info->bitpos + info->bitsize > try_bitpos)
4285 {
4286 /* Mask, truncate, convert to final type, shift and ior into
4287 the accumulator. Note that every step can be a no-op. */
4288 const HOST_WIDE_INT start_gap = info->bitpos - try_bitpos;
4289 const HOST_WIDE_INT end_gap
4290 = (try_bitpos + try_size) - (info->bitpos + info->bitsize);
4291 tree tem = info->ops[0].val;
ed01d707
EB
4292 if (!INTEGRAL_TYPE_P (TREE_TYPE (tem)))
4293 {
4294 const unsigned HOST_WIDE_INT size
4295 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (tem)));
4296 tree integer_type
4297 = build_nonstandard_integer_type (size, UNSIGNED);
4298 tem = gimple_build (&seq, loc, VIEW_CONVERT_EXPR,
4299 integer_type, tem);
4300 }
c14add82
EB
4301 if (TYPE_PRECISION (TREE_TYPE (tem)) <= info->bitsize)
4302 {
4303 tree bitfield_type
4304 = build_nonstandard_integer_type (info->bitsize,
4305 UNSIGNED);
4306 tem = gimple_convert (&seq, loc, bitfield_type, tem);
4307 }
4308 else if ((BYTES_BIG_ENDIAN ? start_gap : end_gap) > 0)
c94c3532
EB
4309 {
4310 const unsigned HOST_WIDE_INT imask
4311 = (HOST_WIDE_INT_1U << info->bitsize) - 1;
4312 tem = gimple_build (&seq, loc,
4313 BIT_AND_EXPR, TREE_TYPE (tem), tem,
4314 build_int_cst (TREE_TYPE (tem),
4315 imask));
4316 }
4317 const HOST_WIDE_INT shift
4318 = (BYTES_BIG_ENDIAN ? end_gap : start_gap);
4319 if (shift < 0)
4320 tem = gimple_build (&seq, loc,
4321 RSHIFT_EXPR, TREE_TYPE (tem), tem,
4322 build_int_cst (NULL_TREE, -shift));
e362a897 4323 tem = gimple_convert (&seq, loc, dest_type, tem);
c94c3532
EB
4324 if (shift > 0)
4325 tem = gimple_build (&seq, loc,
e362a897 4326 LSHIFT_EXPR, dest_type, tem,
c94c3532
EB
4327 build_int_cst (NULL_TREE, shift));
4328 src = gimple_build (&seq, loc,
e362a897 4329 BIT_IOR_EXPR, dest_type, tem, src);
c94c3532
EB
4330 }
4331
a62b3dc5
JJ
4332 if (!integer_zerop (mask))
4333 {
e362a897 4334 tree tem = make_ssa_name (dest_type);
a62b3dc5
JJ
4335 tree load_src = unshare_expr (dest);
4336 /* The load might load some or all bits uninitialized,
4337 avoid -W*uninitialized warnings in that case.
4338 As optimization, it would be nice if all the bits are
4339 provably uninitialized (no stores at all yet or previous
4340 store a CLOBBER) we'd optimize away the load and replace
4341 it e.g. with 0. */
4342 TREE_NO_WARNING (load_src) = 1;
4343 stmt = gimple_build_assign (tem, load_src);
4344 gimple_set_location (stmt, loc);
4345 gimple_set_vuse (stmt, new_vuse);
4346 gimple_seq_add_stmt_without_update (&seq, stmt);
4347
4348 /* FIXME: If there is a single chunk of zero bits in mask,
4349 perhaps use BIT_INSERT_EXPR instead? */
e362a897 4350 stmt = gimple_build_assign (make_ssa_name (dest_type),
a62b3dc5
JJ
4351 BIT_AND_EXPR, tem, mask);
4352 gimple_set_location (stmt, loc);
4353 gimple_seq_add_stmt_without_update (&seq, stmt);
4354 tem = gimple_assign_lhs (stmt);
4355
245f6de1 4356 if (TREE_CODE (src) == INTEGER_CST)
e362a897 4357 src = wide_int_to_tree (dest_type,
245f6de1
JJ
4358 wi::bit_and_not (wi::to_wide (src),
4359 wi::to_wide (mask)));
4360 else
4361 {
4362 tree nmask
e362a897 4363 = wide_int_to_tree (dest_type,
245f6de1 4364 wi::bit_not (wi::to_wide (mask)));
e362a897 4365 stmt = gimple_build_assign (make_ssa_name (dest_type),
245f6de1
JJ
4366 BIT_AND_EXPR, src, nmask);
4367 gimple_set_location (stmt, loc);
4368 gimple_seq_add_stmt_without_update (&seq, stmt);
4369 src = gimple_assign_lhs (stmt);
4370 }
e362a897 4371 stmt = gimple_build_assign (make_ssa_name (dest_type),
a62b3dc5
JJ
4372 BIT_IOR_EXPR, tem, src);
4373 gimple_set_location (stmt, loc);
4374 gimple_seq_add_stmt_without_update (&seq, stmt);
4375 src = gimple_assign_lhs (stmt);
4376 }
4377 }
f663d9ad
KT
4378
4379 stmt = gimple_build_assign (dest, src);
4380 gimple_set_location (stmt, loc);
4381 gimple_set_vuse (stmt, new_vuse);
4382 gimple_seq_add_stmt_without_update (&seq, stmt);
4383
629387a6
EB
4384 if (group->lp_nr && stmt_could_throw_p (cfun, stmt))
4385 add_stmt_to_eh_lp (stmt, group->lp_nr);
4386
f663d9ad
KT
4387 tree new_vdef;
4388 if (i < split_stores.length () - 1)
a62b3dc5 4389 new_vdef = make_ssa_name (gimple_vop (cfun), stmt);
f663d9ad
KT
4390 else
4391 new_vdef = last_vdef;
4392
4393 gimple_set_vdef (stmt, new_vdef);
4394 SSA_NAME_DEF_STMT (new_vdef) = stmt;
4395 new_vuse = new_vdef;
4396 }
4397
4398 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4399 delete split_store;
4400
f663d9ad
KT
4401 gcc_assert (seq);
4402 if (dump_file)
4403 {
4404 fprintf (dump_file,
c94c3532 4405 "New sequence of %u stores to replace old one of %u stores\n",
a62b3dc5 4406 split_stores.length (), orig_num_stmts);
f663d9ad
KT
4407 if (dump_flags & TDF_DETAILS)
4408 print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS);
4409 }
629387a6 4410
5384a802
JJ
4411 if (gimple_clobber_p (group->last_stmt))
4412 update_stmt (group->last_stmt);
4413
629387a6
EB
4414 if (group->lp_nr > 0)
4415 {
4416 /* We're going to insert a sequence of (potentially) throwing stores
4417 into an active EH region. This means that we're going to create
4418 new basic blocks with EH edges pointing to the post landing pad
4419 and, therefore, to have to update its PHI nodes, if any. For the
4420 virtual PHI node, we're going to use the VDEFs created above, but
4421 for the other nodes, we need to record the original reaching defs. */
4422 eh_landing_pad lp = get_eh_landing_pad_from_number (group->lp_nr);
4423 basic_block lp_bb = label_to_block (cfun, lp->post_landing_pad);
4424 basic_block last_bb = gimple_bb (group->last_stmt);
4425 edge last_edge = find_edge (last_bb, lp_bb);
4426 auto_vec<tree, 16> last_defs;
4427 gphi_iterator gpi;
4428 for (gpi = gsi_start_phis (lp_bb); !gsi_end_p (gpi); gsi_next (&gpi))
4429 {
4430 gphi *phi = gpi.phi ();
4431 tree last_def;
4432 if (virtual_operand_p (gimple_phi_result (phi)))
4433 last_def = NULL_TREE;
4434 else
4435 last_def = gimple_phi_arg_def (phi, last_edge->dest_idx);
4436 last_defs.safe_push (last_def);
4437 }
4438
4439 /* Do the insertion. Then, if new basic blocks have been created in the
4440 process, rewind the chain of VDEFs create above to walk the new basic
4441 blocks and update the corresponding arguments of the PHI nodes. */
4442 update_modified_stmts (seq);
4443 if (gimple_find_sub_bbs (seq, &last_gsi))
4444 while (last_vdef != gimple_vuse (group->last_stmt))
4445 {
4446 gimple *stmt = SSA_NAME_DEF_STMT (last_vdef);
4447 if (stmt_could_throw_p (cfun, stmt))
4448 {
4449 edge new_edge = find_edge (gimple_bb (stmt), lp_bb);
4450 unsigned int i;
4451 for (gpi = gsi_start_phis (lp_bb), i = 0;
4452 !gsi_end_p (gpi);
4453 gsi_next (&gpi), i++)
4454 {
4455 gphi *phi = gpi.phi ();
4456 tree new_def;
4457 if (virtual_operand_p (gimple_phi_result (phi)))
4458 new_def = last_vdef;
4459 else
4460 new_def = last_defs[i];
4461 add_phi_arg (phi, new_def, new_edge, UNKNOWN_LOCATION);
4462 }
4463 }
4464 last_vdef = gimple_vuse (stmt);
4465 }
4466 }
4467 else
4468 gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT);
4469
245f6de1
JJ
4470 for (int j = 0; j < 2; ++j)
4471 if (load_seq[j])
4472 gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT);
f663d9ad
KT
4473
4474 return true;
4475}
4476
4477/* Process the merged_store_group objects created in the coalescing phase.
4478 The stores are all against the base object BASE.
4479 Try to output the widened stores and delete the original statements if
4480 successful. Return true iff any changes were made. */
4481
4482bool
b5926e23 4483imm_store_chain_info::output_merged_stores ()
f663d9ad
KT
4484{
4485 unsigned int i;
4486 merged_store_group *merged_store;
4487 bool ret = false;
4488 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store)
4489 {
a95b474a
ML
4490 if (dbg_cnt (store_merging)
4491 && output_merged_store (merged_store))
f663d9ad
KT
4492 {
4493 unsigned int j;
4494 store_immediate_info *store;
4495 FOR_EACH_VEC_ELT (merged_store->stores, j, store)
4496 {
4497 gimple *stmt = store->stmt;
4498 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
5384a802
JJ
4499 /* Don't remove clobbers, they are still useful even if
4500 everything is overwritten afterwards. */
4501 if (gimple_clobber_p (stmt))
4502 continue;
f663d9ad 4503 gsi_remove (&gsi, true);
629387a6
EB
4504 if (store->lp_nr)
4505 remove_stmt_from_eh_lp (stmt);
f663d9ad
KT
4506 if (stmt != merged_store->last_stmt)
4507 {
4508 unlink_stmt_vdef (stmt);
4509 release_defs (stmt);
4510 }
4511 }
4512 ret = true;
4513 }
4514 }
4515 if (ret && dump_file)
4516 fprintf (dump_file, "Merging successful!\n");
4517
4518 return ret;
4519}
4520
4521/* Coalesce the store_immediate_info objects recorded against the base object
4522 BASE in the first phase and output them.
4523 Delete the allocated structures.
4524 Return true if any changes were made. */
4525
4526bool
b5926e23 4527imm_store_chain_info::terminate_and_process_chain ()
f663d9ad
KT
4528{
4529 /* Process store chain. */
4530 bool ret = false;
4531 if (m_store_info.length () > 1)
4532 {
4533 ret = coalesce_immediate_stores ();
4534 if (ret)
b5926e23 4535 ret = output_merged_stores ();
f663d9ad
KT
4536 }
4537
4538 /* Delete all the entries we allocated ourselves. */
4539 store_immediate_info *info;
4540 unsigned int i;
4541 FOR_EACH_VEC_ELT (m_store_info, i, info)
4542 delete info;
4543
4544 merged_store_group *merged_info;
4545 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info)
4546 delete merged_info;
4547
4548 return ret;
4549}
4550
4551/* Return true iff LHS is a destination potentially interesting for
4552 store merging. In practice these are the codes that get_inner_reference
4553 can process. */
4554
4555static bool
4556lhs_valid_for_store_merging_p (tree lhs)
4557{
629387a6 4558 if (DECL_P (lhs))
f663d9ad
KT
4559 return true;
4560
629387a6
EB
4561 switch (TREE_CODE (lhs))
4562 {
4563 case ARRAY_REF:
4564 case ARRAY_RANGE_REF:
4565 case BIT_FIELD_REF:
4566 case COMPONENT_REF:
4567 case MEM_REF:
e362a897 4568 case VIEW_CONVERT_EXPR:
629387a6
EB
4569 return true;
4570 default:
4571 return false;
4572 }
4573
4574 gcc_unreachable ();
f663d9ad
KT
4575}
4576
4577/* Return true if the tree RHS is a constant we want to consider
4578 during store merging. In practice accept all codes that
4579 native_encode_expr accepts. */
4580
4581static bool
4582rhs_valid_for_store_merging_p (tree rhs)
4583{
cf098191 4584 unsigned HOST_WIDE_INT size;
3afd514b 4585 if (TREE_CODE (rhs) == CONSTRUCTOR
3afd514b
JJ
4586 && CONSTRUCTOR_NELTS (rhs) == 0
4587 && TYPE_SIZE_UNIT (TREE_TYPE (rhs))
4588 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs))))
4589 return true;
cf098191
RS
4590 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (&size)
4591 && native_encode_expr (rhs, NULL, size) != 0);
f663d9ad
KT
4592}
4593
629387a6
EB
4594/* Adjust *PBITPOS, *PBITREGION_START and *PBITREGION_END by BYTE_OFF bytes
4595 and return true on success or false on failure. */
4596
4597static bool
4598adjust_bit_pos (poly_offset_int byte_off,
4599 poly_int64 *pbitpos,
4600 poly_uint64 *pbitregion_start,
4601 poly_uint64 *pbitregion_end)
4602{
4603 poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT;
4604 bit_off += *pbitpos;
4605
4606 if (known_ge (bit_off, 0) && bit_off.to_shwi (pbitpos))
4607 {
4608 if (maybe_ne (*pbitregion_end, 0U))
4609 {
4610 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4611 bit_off += *pbitregion_start;
4612 if (bit_off.to_uhwi (pbitregion_start))
4613 {
4614 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4615 bit_off += *pbitregion_end;
4616 if (!bit_off.to_uhwi (pbitregion_end))
4617 *pbitregion_end = 0;
4618 }
4619 else
4620 *pbitregion_end = 0;
4621 }
4622 return true;
4623 }
4624 else
4625 return false;
4626}
4627
245f6de1
JJ
4628/* If MEM is a memory reference usable for store merging (either as
4629 store destination or for loads), return the non-NULL base_addr
4630 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
4631 Otherwise return NULL, *PBITPOS should be still valid even for that
4632 case. */
4633
4634static tree
8a91d545
RS
4635mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize,
4636 poly_uint64 *pbitpos,
4637 poly_uint64 *pbitregion_start,
4638 poly_uint64 *pbitregion_end)
245f6de1 4639{
8a91d545
RS
4640 poly_int64 bitsize, bitpos;
4641 poly_uint64 bitregion_start = 0, bitregion_end = 0;
245f6de1
JJ
4642 machine_mode mode;
4643 int unsignedp = 0, reversep = 0, volatilep = 0;
4644 tree offset;
4645 tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode,
4646 &unsignedp, &reversep, &volatilep);
4647 *pbitsize = bitsize;
8a91d545 4648 if (known_eq (bitsize, 0))
245f6de1
JJ
4649 return NULL_TREE;
4650
4651 if (TREE_CODE (mem) == COMPONENT_REF
4652 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1)))
4653 {
4654 get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset);
8a91d545
RS
4655 if (maybe_ne (bitregion_end, 0U))
4656 bitregion_end += 1;
245f6de1
JJ
4657 }
4658
4659 if (reversep)
4660 return NULL_TREE;
4661
4662 /* We do not want to rewrite TARGET_MEM_REFs. */
4663 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
4664 return NULL_TREE;
4665 /* In some cases get_inner_reference may return a
4666 MEM_REF [ptr + byteoffset]. For the purposes of this pass
4667 canonicalize the base_addr to MEM_REF [ptr] and take
4668 byteoffset into account in the bitpos. This occurs in
4669 PR 23684 and this way we can catch more chains. */
4670 else if (TREE_CODE (base_addr) == MEM_REF)
4671 {
629387a6
EB
4672 if (!adjust_bit_pos (mem_ref_offset (base_addr), &bitpos,
4673 &bitregion_start, &bitregion_end))
245f6de1
JJ
4674 return NULL_TREE;
4675 base_addr = TREE_OPERAND (base_addr, 0);
4676 }
4677 /* get_inner_reference returns the base object, get at its
4678 address now. */
4679 else
4680 {
8a91d545 4681 if (maybe_lt (bitpos, 0))
245f6de1
JJ
4682 return NULL_TREE;
4683 base_addr = build_fold_addr_expr (base_addr);
4684 }
4685
629387a6 4686 if (offset)
245f6de1
JJ
4687 {
4688 /* If the access is variable offset then a base decl has to be
4689 address-taken to be able to emit pointer-based stores to it.
4690 ??? We might be able to get away with re-using the original
4691 base up to the first variable part and then wrapping that inside
4692 a BIT_FIELD_REF. */
4693 tree base = get_base_address (base_addr);
629387a6 4694 if (!base || (DECL_P (base) && !TREE_ADDRESSABLE (base)))
245f6de1
JJ
4695 return NULL_TREE;
4696
629387a6
EB
4697 /* Similarly to above for the base, remove constant from the offset. */
4698 if (TREE_CODE (offset) == PLUS_EXPR
4699 && TREE_CODE (TREE_OPERAND (offset, 1)) == INTEGER_CST
4700 && adjust_bit_pos (wi::to_poly_offset (TREE_OPERAND (offset, 1)),
4701 &bitpos, &bitregion_start, &bitregion_end))
4702 offset = TREE_OPERAND (offset, 0);
4703
245f6de1
JJ
4704 base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr),
4705 base_addr, offset);
4706 }
4707
629387a6
EB
4708 if (known_eq (bitregion_end, 0U))
4709 {
4710 bitregion_start = round_down_to_byte_boundary (bitpos);
4711 bitregion_end = round_up_to_byte_boundary (bitpos + bitsize);
4712 }
4713
245f6de1
JJ
4714 *pbitsize = bitsize;
4715 *pbitpos = bitpos;
4716 *pbitregion_start = bitregion_start;
4717 *pbitregion_end = bitregion_end;
4718 return base_addr;
4719}
4720
4721/* Return true if STMT is a load that can be used for store merging.
4722 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
4723 BITREGION_END are properties of the corresponding store. */
4724
4725static bool
4726handled_load (gimple *stmt, store_operand_info *op,
8a91d545
RS
4727 poly_uint64 bitsize, poly_uint64 bitpos,
4728 poly_uint64 bitregion_start, poly_uint64 bitregion_end)
245f6de1 4729{
383ac8dc 4730 if (!is_gimple_assign (stmt))
245f6de1 4731 return false;
383ac8dc
JJ
4732 if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR)
4733 {
4734 tree rhs1 = gimple_assign_rhs1 (stmt);
4735 if (TREE_CODE (rhs1) == SSA_NAME
383ac8dc
JJ
4736 && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos,
4737 bitregion_start, bitregion_end))
4738 {
d60edaba
JJ
4739 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
4740 been optimized earlier, but if allowed here, would confuse the
4741 multiple uses counting. */
4742 if (op->bit_not_p)
4743 return false;
383ac8dc
JJ
4744 op->bit_not_p = !op->bit_not_p;
4745 return true;
4746 }
4747 return false;
4748 }
4749 if (gimple_vuse (stmt)
4750 && gimple_assign_load_p (stmt)
36bbc05d 4751 && !stmt_can_throw_internal (cfun, stmt)
245f6de1
JJ
4752 && !gimple_has_volatile_ops (stmt))
4753 {
4754 tree mem = gimple_assign_rhs1 (stmt);
4755 op->base_addr
4756 = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos,
4757 &op->bitregion_start,
4758 &op->bitregion_end);
4759 if (op->base_addr != NULL_TREE
8a91d545
RS
4760 && known_eq (op->bitsize, bitsize)
4761 && multiple_p (op->bitpos - bitpos, BITS_PER_UNIT)
4762 && known_ge (op->bitpos - op->bitregion_start,
4763 bitpos - bitregion_start)
4764 && known_ge (op->bitregion_end - op->bitpos,
4765 bitregion_end - bitpos))
245f6de1
JJ
4766 {
4767 op->stmt = stmt;
4768 op->val = mem;
383ac8dc 4769 op->bit_not_p = false;
245f6de1
JJ
4770 return true;
4771 }
4772 }
4773 return false;
4774}
4775
629387a6
EB
4776/* Return the index number of the landing pad for STMT, if any. */
4777
4778static int
4779lp_nr_for_store (gimple *stmt)
4780{
4781 if (!cfun->can_throw_non_call_exceptions || !cfun->eh)
4782 return 0;
4783
4784 if (!stmt_could_throw_p (cfun, stmt))
4785 return 0;
4786
4787 return lookup_stmt_eh_lp (stmt);
4788}
4789
245f6de1 4790/* Record the store STMT for store merging optimization if it can be
629387a6 4791 optimized. Return true if any changes were made. */
245f6de1 4792
629387a6 4793bool
245f6de1
JJ
4794pass_store_merging::process_store (gimple *stmt)
4795{
4796 tree lhs = gimple_assign_lhs (stmt);
4797 tree rhs = gimple_assign_rhs1 (stmt);
2c832ffe
SSF
4798 poly_uint64 bitsize, bitpos = 0;
4799 poly_uint64 bitregion_start = 0, bitregion_end = 0;
245f6de1
JJ
4800 tree base_addr
4801 = mem_valid_for_store_merging (lhs, &bitsize, &bitpos,
4802 &bitregion_start, &bitregion_end);
8a91d545 4803 if (known_eq (bitsize, 0U))
629387a6 4804 return false;
245f6de1
JJ
4805
4806 bool invalid = (base_addr == NULL_TREE
8a91d545
RS
4807 || (maybe_gt (bitsize,
4808 (unsigned int) MAX_BITSIZE_MODE_ANY_INT)
3afd514b
JJ
4809 && TREE_CODE (rhs) != INTEGER_CST
4810 && (TREE_CODE (rhs) != CONSTRUCTOR
4811 || CONSTRUCTOR_NELTS (rhs) != 0)));
245f6de1 4812 enum tree_code rhs_code = ERROR_MARK;
d60edaba 4813 bool bit_not_p = false;
4b84d9b8
JJ
4814 struct symbolic_number n;
4815 gimple *ins_stmt = NULL;
245f6de1
JJ
4816 store_operand_info ops[2];
4817 if (invalid)
4818 ;
e362a897
EB
4819 else if (TREE_CODE (rhs) == STRING_CST)
4820 {
4821 rhs_code = STRING_CST;
4822 ops[0].val = rhs;
4823 }
245f6de1
JJ
4824 else if (rhs_valid_for_store_merging_p (rhs))
4825 {
4826 rhs_code = INTEGER_CST;
4827 ops[0].val = rhs;
4828 }
e362a897 4829 else if (TREE_CODE (rhs) == SSA_NAME)
245f6de1
JJ
4830 {
4831 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2;
4832 if (!is_gimple_assign (def_stmt))
4833 invalid = true;
4834 else if (handled_load (def_stmt, &ops[0], bitsize, bitpos,
4835 bitregion_start, bitregion_end))
4836 rhs_code = MEM_REF;
d60edaba
JJ
4837 else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR)
4838 {
4839 tree rhs1 = gimple_assign_rhs1 (def_stmt);
4840 if (TREE_CODE (rhs1) == SSA_NAME
4841 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1)))
4842 {
4843 bit_not_p = true;
4844 def_stmt = SSA_NAME_DEF_STMT (rhs1);
4845 }
4846 }
c94c3532 4847
d60edaba 4848 if (rhs_code == ERROR_MARK && !invalid)
245f6de1
JJ
4849 switch ((rhs_code = gimple_assign_rhs_code (def_stmt)))
4850 {
4851 case BIT_AND_EXPR:
4852 case BIT_IOR_EXPR:
4853 case BIT_XOR_EXPR:
4854 tree rhs1, rhs2;
4855 rhs1 = gimple_assign_rhs1 (def_stmt);
4856 rhs2 = gimple_assign_rhs2 (def_stmt);
4857 invalid = true;
d7a9512e 4858 if (TREE_CODE (rhs1) != SSA_NAME)
245f6de1
JJ
4859 break;
4860 def_stmt1 = SSA_NAME_DEF_STMT (rhs1);
4861 if (!is_gimple_assign (def_stmt1)
4862 || !handled_load (def_stmt1, &ops[0], bitsize, bitpos,
4863 bitregion_start, bitregion_end))
4864 break;
4865 if (rhs_valid_for_store_merging_p (rhs2))
4866 ops[1].val = rhs2;
d7a9512e 4867 else if (TREE_CODE (rhs2) != SSA_NAME)
245f6de1
JJ
4868 break;
4869 else
4870 {
4871 def_stmt2 = SSA_NAME_DEF_STMT (rhs2);
4872 if (!is_gimple_assign (def_stmt2))
4873 break;
4874 else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos,
4875 bitregion_start, bitregion_end))
4876 break;
4877 }
4878 invalid = false;
4879 break;
4880 default:
4881 invalid = true;
4882 break;
4883 }
c94c3532 4884
8a91d545
RS
4885 unsigned HOST_WIDE_INT const_bitsize;
4886 if (bitsize.is_constant (&const_bitsize)
c94c3532 4887 && (const_bitsize % BITS_PER_UNIT) == 0
8a91d545 4888 && const_bitsize <= 64
c94c3532 4889 && multiple_p (bitpos, BITS_PER_UNIT))
4b84d9b8
JJ
4890 {
4891 ins_stmt = find_bswap_or_nop_1 (def_stmt, &n, 12);
4892 if (ins_stmt)
4893 {
4894 uint64_t nn = n.n;
4895 for (unsigned HOST_WIDE_INT i = 0;
8a91d545
RS
4896 i < const_bitsize;
4897 i += BITS_PER_UNIT, nn >>= BITS_PER_MARKER)
4b84d9b8
JJ
4898 if ((nn & MARKER_MASK) == 0
4899 || (nn & MARKER_MASK) == MARKER_BYTE_UNKNOWN)
4900 {
4901 ins_stmt = NULL;
4902 break;
4903 }
4904 if (ins_stmt)
4905 {
4906 if (invalid)
4907 {
4908 rhs_code = LROTATE_EXPR;
4909 ops[0].base_addr = NULL_TREE;
4910 ops[1].base_addr = NULL_TREE;
4911 }
4912 invalid = false;
4913 }
4914 }
4915 }
c94c3532
EB
4916
4917 if (invalid
4918 && bitsize.is_constant (&const_bitsize)
4919 && ((const_bitsize % BITS_PER_UNIT) != 0
4920 || !multiple_p (bitpos, BITS_PER_UNIT))
ed01d707 4921 && const_bitsize <= MAX_FIXED_MODE_SIZE)
c94c3532 4922 {
c14add82 4923 /* Bypass a conversion to the bit-field type. */
31a5d8c5
EB
4924 if (!bit_not_p
4925 && is_gimple_assign (def_stmt)
4926 && CONVERT_EXPR_CODE_P (rhs_code))
c94c3532
EB
4927 {
4928 tree rhs1 = gimple_assign_rhs1 (def_stmt);
4929 if (TREE_CODE (rhs1) == SSA_NAME
c14add82 4930 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
c94c3532
EB
4931 rhs = rhs1;
4932 }
4933 rhs_code = BIT_INSERT_EXPR;
31a5d8c5 4934 bit_not_p = false;
c94c3532
EB
4935 ops[0].val = rhs;
4936 ops[0].base_addr = NULL_TREE;
4937 ops[1].base_addr = NULL_TREE;
4938 invalid = false;
4939 }
245f6de1 4940 }
e362a897
EB
4941 else
4942 invalid = true;
245f6de1 4943
8a91d545
RS
4944 unsigned HOST_WIDE_INT const_bitsize, const_bitpos;
4945 unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end;
4946 if (invalid
4947 || !bitsize.is_constant (&const_bitsize)
4948 || !bitpos.is_constant (&const_bitpos)
4949 || !bitregion_start.is_constant (&const_bitregion_start)
4950 || !bitregion_end.is_constant (&const_bitregion_end))
629387a6 4951 return terminate_all_aliasing_chains (NULL, stmt);
245f6de1 4952
4b84d9b8
JJ
4953 if (!ins_stmt)
4954 memset (&n, 0, sizeof (n));
4955
99b1c316 4956 class imm_store_chain_info **chain_info = NULL;
629387a6 4957 bool ret = false;
383ac8dc
JJ
4958 if (base_addr)
4959 chain_info = m_stores.get (base_addr);
4960
245f6de1
JJ
4961 store_immediate_info *info;
4962 if (chain_info)
4963 {
4964 unsigned int ord = (*chain_info)->m_store_info.length ();
8a91d545
RS
4965 info = new store_immediate_info (const_bitsize, const_bitpos,
4966 const_bitregion_start,
4967 const_bitregion_end,
4968 stmt, ord, rhs_code, n, ins_stmt,
629387a6
EB
4969 bit_not_p, lp_nr_for_store (stmt),
4970 ops[0], ops[1]);
245f6de1
JJ
4971 if (dump_file && (dump_flags & TDF_DETAILS))
4972 {
4973 fprintf (dump_file, "Recording immediate store from stmt:\n");
4974 print_gimple_stmt (dump_file, stmt, 0);
4975 }
4976 (*chain_info)->m_store_info.safe_push (info);
629387a6 4977 ret |= terminate_all_aliasing_chains (chain_info, stmt);
245f6de1
JJ
4978 /* If we reach the limit of stores to merge in a chain terminate and
4979 process the chain now. */
4980 if ((*chain_info)->m_store_info.length ()
028d4092 4981 == (unsigned int) param_max_stores_to_merge)
245f6de1
JJ
4982 {
4983 if (dump_file && (dump_flags & TDF_DETAILS))
4984 fprintf (dump_file,
4985 "Reached maximum number of statements to merge:\n");
629387a6 4986 ret |= terminate_and_process_chain (*chain_info);
245f6de1 4987 }
629387a6 4988 return ret;
245f6de1
JJ
4989 }
4990
4991 /* Store aliases any existing chain? */
629387a6 4992 ret |= terminate_all_aliasing_chains (NULL, stmt);
245f6de1 4993 /* Start a new chain. */
99b1c316 4994 class imm_store_chain_info *new_chain
245f6de1 4995 = new imm_store_chain_info (m_stores_head, base_addr);
8a91d545
RS
4996 info = new store_immediate_info (const_bitsize, const_bitpos,
4997 const_bitregion_start,
4998 const_bitregion_end,
4999 stmt, 0, rhs_code, n, ins_stmt,
629387a6
EB
5000 bit_not_p, lp_nr_for_store (stmt),
5001 ops[0], ops[1]);
245f6de1
JJ
5002 new_chain->m_store_info.safe_push (info);
5003 m_stores.put (base_addr, new_chain);
5004 if (dump_file && (dump_flags & TDF_DETAILS))
5005 {
5006 fprintf (dump_file, "Starting new chain with statement:\n");
5007 print_gimple_stmt (dump_file, stmt, 0);
5008 fprintf (dump_file, "The base object is:\n");
5009 print_generic_expr (dump_file, base_addr);
5010 fprintf (dump_file, "\n");
5011 }
629387a6
EB
5012 return ret;
5013}
5014
5015/* Return true if STMT is a store valid for store merging. */
5016
5017static bool
5018store_valid_for_store_merging_p (gimple *stmt)
5019{
5020 return gimple_assign_single_p (stmt)
5021 && gimple_vdef (stmt)
5022 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt))
5384a802 5023 && (!gimple_has_volatile_ops (stmt) || gimple_clobber_p (stmt));
629387a6
EB
5024}
5025
5026enum basic_block_status { BB_INVALID, BB_VALID, BB_EXTENDED_VALID };
5027
5028/* Return the status of basic block BB wrt store merging. */
5029
5030static enum basic_block_status
5031get_status_for_store_merging (basic_block bb)
5032{
5033 unsigned int num_statements = 0;
5034 gimple_stmt_iterator gsi;
5035 edge e;
5036
5037 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5038 {
5039 gimple *stmt = gsi_stmt (gsi);
5040
5041 if (is_gimple_debug (stmt))
5042 continue;
5043
5044 if (store_valid_for_store_merging_p (stmt) && ++num_statements >= 2)
5045 break;
5046 }
5047
5048 if (num_statements == 0)
5049 return BB_INVALID;
5050
5051 if (cfun->can_throw_non_call_exceptions && cfun->eh
5052 && store_valid_for_store_merging_p (gimple_seq_last_stmt (bb_seq (bb)))
5053 && (e = find_fallthru_edge (bb->succs))
5054 && e->dest == bb->next_bb)
5055 return BB_EXTENDED_VALID;
5056
5057 return num_statements >= 2 ? BB_VALID : BB_INVALID;
245f6de1
JJ
5058}
5059
f663d9ad 5060/* Entry point for the pass. Go over each basic block recording chains of
245f6de1
JJ
5061 immediate stores. Upon encountering a terminating statement (as defined
5062 by stmt_terminates_chain_p) process the recorded stores and emit the widened
5063 variants. */
f663d9ad
KT
5064
5065unsigned int
5066pass_store_merging::execute (function *fun)
5067{
5068 basic_block bb;
5069 hash_set<gimple *> orig_stmts;
629387a6
EB
5070 bool changed = false, open_chains = false;
5071
5072 /* If the function can throw and catch non-call exceptions, we'll be trying
5073 to merge stores across different basic blocks so we need to first unsplit
5074 the EH edges in order to streamline the CFG of the function. */
5075 if (cfun->can_throw_non_call_exceptions && cfun->eh)
5076 unsplit_eh_edges ();
f663d9ad 5077
4b84d9b8
JJ
5078 calculate_dominance_info (CDI_DOMINATORS);
5079
f663d9ad
KT
5080 FOR_EACH_BB_FN (bb, fun)
5081 {
629387a6 5082 const basic_block_status bb_status = get_status_for_store_merging (bb);
f663d9ad 5083 gimple_stmt_iterator gsi;
f663d9ad 5084
629387a6
EB
5085 if (open_chains && (bb_status == BB_INVALID || !single_pred_p (bb)))
5086 {
5087 changed |= terminate_and_process_all_chains ();
5088 open_chains = false;
f663d9ad
KT
5089 }
5090
629387a6 5091 if (bb_status == BB_INVALID)
f663d9ad
KT
5092 continue;
5093
5094 if (dump_file && (dump_flags & TDF_DETAILS))
5095 fprintf (dump_file, "Processing basic block <%d>:\n", bb->index);
5096
5097 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5098 {
5099 gimple *stmt = gsi_stmt (gsi);
5100
50b6d676
AO
5101 if (is_gimple_debug (stmt))
5102 continue;
5103
5384a802 5104 if (gimple_has_volatile_ops (stmt) && !gimple_clobber_p (stmt))
f663d9ad
KT
5105 {
5106 /* Terminate all chains. */
5107 if (dump_file && (dump_flags & TDF_DETAILS))
5108 fprintf (dump_file, "Volatile access terminates "
5109 "all chains\n");
629387a6
EB
5110 changed |= terminate_and_process_all_chains ();
5111 open_chains = false;
f663d9ad
KT
5112 continue;
5113 }
5114
629387a6
EB
5115 if (store_valid_for_store_merging_p (stmt))
5116 changed |= process_store (stmt);
245f6de1 5117 else
629387a6
EB
5118 changed |= terminate_all_aliasing_chains (NULL, stmt);
5119 }
5120
5121 if (bb_status == BB_EXTENDED_VALID)
5122 open_chains = true;
5123 else
5124 {
5125 changed |= terminate_and_process_all_chains ();
5126 open_chains = false;
f663d9ad 5127 }
f663d9ad 5128 }
629387a6
EB
5129
5130 if (open_chains)
5131 changed |= terminate_and_process_all_chains ();
5132
5133 /* If the function can throw and catch non-call exceptions and something
5134 changed during the pass, then the CFG has (very likely) changed too. */
5135 if (cfun->can_throw_non_call_exceptions && cfun->eh && changed)
5136 {
5137 free_dominance_info (CDI_DOMINATORS);
5138 return TODO_cleanup_cfg;
5139 }
5140
f663d9ad
KT
5141 return 0;
5142}
5143
5144} // anon namespace
5145
5146/* Construct and return a store merging pass object. */
5147
5148gimple_opt_pass *
5149make_pass_store_merging (gcc::context *ctxt)
5150{
5151 return new pass_store_merging (ctxt);
5152}
c22d8787
KT
5153
5154#if CHECKING_P
5155
5156namespace selftest {
5157
5158/* Selftests for store merging helpers. */
5159
5160/* Assert that all elements of the byte arrays X and Y, both of length N
5161 are equal. */
5162
5163static void
5164verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n)
5165{
5166 for (unsigned int i = 0; i < n; i++)
5167 {
5168 if (x[i] != y[i])
5169 {
5170 fprintf (stderr, "Arrays do not match. X:\n");
5171 dump_char_array (stderr, x, n);
5172 fprintf (stderr, "Y:\n");
5173 dump_char_array (stderr, y, n);
5174 }
5175 ASSERT_EQ (x[i], y[i]);
5176 }
5177}
5178
8aba425f 5179/* Test shift_bytes_in_array_left and that it carries bits across between
c22d8787
KT
5180 bytes correctly. */
5181
5182static void
8aba425f 5183verify_shift_bytes_in_array_left (void)
c22d8787
KT
5184{
5185 /* byte 1 | byte 0
5186 00011111 | 11100000. */
5187 unsigned char orig[2] = { 0xe0, 0x1f };
5188 unsigned char in[2];
5189 memcpy (in, orig, sizeof orig);
5190
5191 unsigned char expected[2] = { 0x80, 0x7f };
8aba425f 5192 shift_bytes_in_array_left (in, sizeof (in), 2);
c22d8787
KT
5193 verify_array_eq (in, expected, sizeof (in));
5194
5195 memcpy (in, orig, sizeof orig);
5196 memcpy (expected, orig, sizeof orig);
5197 /* Check that shifting by zero doesn't change anything. */
8aba425f 5198 shift_bytes_in_array_left (in, sizeof (in), 0);
c22d8787
KT
5199 verify_array_eq (in, expected, sizeof (in));
5200
5201}
5202
5203/* Test shift_bytes_in_array_right and that it carries bits across between
5204 bytes correctly. */
5205
5206static void
5207verify_shift_bytes_in_array_right (void)
5208{
5209 /* byte 1 | byte 0
5210 00011111 | 11100000. */
5211 unsigned char orig[2] = { 0x1f, 0xe0};
5212 unsigned char in[2];
5213 memcpy (in, orig, sizeof orig);
5214 unsigned char expected[2] = { 0x07, 0xf8};
5215 shift_bytes_in_array_right (in, sizeof (in), 2);
5216 verify_array_eq (in, expected, sizeof (in));
5217
5218 memcpy (in, orig, sizeof orig);
5219 memcpy (expected, orig, sizeof orig);
5220 /* Check that shifting by zero doesn't change anything. */
5221 shift_bytes_in_array_right (in, sizeof (in), 0);
5222 verify_array_eq (in, expected, sizeof (in));
5223}
5224
5225/* Test clear_bit_region that it clears exactly the bits asked and
5226 nothing more. */
5227
5228static void
5229verify_clear_bit_region (void)
5230{
5231 /* Start with all bits set and test clearing various patterns in them. */
5232 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5233 unsigned char in[3];
5234 unsigned char expected[3];
5235 memcpy (in, orig, sizeof in);
5236
5237 /* Check zeroing out all the bits. */
5238 clear_bit_region (in, 0, 3 * BITS_PER_UNIT);
5239 expected[0] = expected[1] = expected[2] = 0;
5240 verify_array_eq (in, expected, sizeof in);
5241
5242 memcpy (in, orig, sizeof in);
5243 /* Leave the first and last bits intact. */
5244 clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2);
5245 expected[0] = 0x1;
5246 expected[1] = 0;
5247 expected[2] = 0x80;
5248 verify_array_eq (in, expected, sizeof in);
5249}
5250
5384a802 5251/* Test clear_bit_region_be that it clears exactly the bits asked and
c22d8787
KT
5252 nothing more. */
5253
5254static void
5255verify_clear_bit_region_be (void)
5256{
5257 /* Start with all bits set and test clearing various patterns in them. */
5258 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5259 unsigned char in[3];
5260 unsigned char expected[3];
5261 memcpy (in, orig, sizeof in);
5262
5263 /* Check zeroing out all the bits. */
5264 clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT);
5265 expected[0] = expected[1] = expected[2] = 0;
5266 verify_array_eq (in, expected, sizeof in);
5267
5268 memcpy (in, orig, sizeof in);
5269 /* Leave the first and last bits intact. */
5270 clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2);
5271 expected[0] = 0x80;
5272 expected[1] = 0;
5273 expected[2] = 0x1;
5274 verify_array_eq (in, expected, sizeof in);
5275}
5276
5277
5278/* Run all of the selftests within this file. */
5279
5280void
5281store_merging_c_tests (void)
5282{
8aba425f 5283 verify_shift_bytes_in_array_left ();
c22d8787
KT
5284 verify_shift_bytes_in_array_right ();
5285 verify_clear_bit_region ();
5286 verify_clear_bit_region_be ();
5287}
5288
5289} // namespace selftest
5290#endif /* CHECKING_P. */