]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/gimple-ssa-store-merging.c
cp: add support for per-location warning groups.
[thirdparty/gcc.git] / gcc / gimple-ssa-store-merging.c
CommitLineData
dffec8eb 1/* GIMPLE store merging and byte swapping passes.
99dee823 2 Copyright (C) 2009-2021 Free Software Foundation, Inc.
f663d9ad
KT
3 Contributed by ARM Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
c94c3532
EB
21/* The purpose of the store merging pass is to combine multiple memory stores
22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
24
f663d9ad
KT
25 For example, if we have a sequence peforming four byte stores to
26 consecutive memory locations:
27 [p ] := imm1;
28 [p + 1B] := imm2;
29 [p + 2B] := imm3;
30 [p + 3B] := imm4;
31 we can transform this into a single 4-byte store if the target supports it:
c94c3532 32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
f663d9ad 33
245f6de1
JJ
34 Or:
35 [p ] := [q ];
36 [p + 1B] := [q + 1B];
37 [p + 2B] := [q + 2B];
38 [p + 3B] := [q + 3B];
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
41
42 Or:
43 [p ] := [q ] ^ imm1;
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
49
c94c3532
EB
50 Or:
51 [p:1 ] := imm;
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
55
f663d9ad
KT
56 The algorithm is applied to each basic block in three phases:
57
c94c3532
EB
58 1) Scan through the basic block and record assignments to destinations
59 that can be expressed as a store to memory of a certain size at a certain
60 bit offset from base expressions we can handle. For bit-fields we also
61 record the surrounding bit region, i.e. bits that could be stored in
245f6de1
JJ
62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
700d4cb0 64 terminate such chains when appropriate (for example when the stored
245f6de1 65 values get used subsequently).
f663d9ad
KT
66 These stores can be a result of structure element initializers, array stores
67 etc. A store_immediate_info object is recorded for every such store.
68 Record as many such assignments to a single base as possible until a
69 statement that interferes with the store sequence is encountered.
c94c3532
EB
70 Each store has up to 2 operands, which can be a either constant, a memory
71 load or an SSA name, from which the value to be stored can be computed.
245f6de1
JJ
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
f663d9ad 74
c94c3532 75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
f663d9ad 76 store_immediate_info objects) and coalesce contiguous stores into
c94c3532 77 merged_store_group objects. For bit-field stores, we don't need to
245f6de1
JJ
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
f663d9ad
KT
83
84 For example, given the stores:
85 [p ] := 0;
86 [p + 1B] := 1;
87 [p + 3B] := 0;
88 [p + 4B] := 1;
89 [p + 5B] := 0;
90 [p + 6B] := 0;
91 This phase would produce two merged_store_group objects, one recording the
92 two bytes stored in the memory region [p : p + 1] and another
93 recording the four bytes stored in the memory region [p + 3 : p + 6].
94
95 3) The merged_store_group objects produced in phase 2) are processed
96 to generate the sequence of wider stores that set the contiguous memory
97 regions to the sequence of bytes that correspond to it. This may emit
98 multiple stores per store group to handle contiguous stores that are not
99 of a size that is a power of 2. For example it can try to emit a 40-bit
100 store as a 32-bit store followed by an 8-bit store.
c94c3532
EB
101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
f663d9ad
KT
103
104 Note on endianness and example:
105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
106 [p ] := 0x1234;
107 [p + 2B] := 0x5678;
108 [p + 4B] := 0xab;
109 [p + 5B] := 0xcd;
110
111 The memory layout for little-endian (LE) and big-endian (BE) must be:
112 p |LE|BE|
113 ---------
114 0 |34|12|
115 1 |12|34|
116 2 |78|56|
117 3 |56|78|
118 4 |ab|ab|
119 5 |cd|cd|
120
121 To merge these into a single 48-bit merged value 'val' in phase 2)
122 on little-endian we insert stores to higher (consecutive) bitpositions
123 into the most significant bits of the merged value.
124 The final merged value would be: 0xcdab56781234
125
126 For big-endian we insert stores to higher bitpositions into the least
127 significant bits of the merged value.
128 The final merged value would be: 0x12345678abcd
129
130 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
131 followed by a 16-bit store. Again, we must consider endianness when
132 breaking down the 48-bit value 'val' computed above.
133 For little endian we emit:
134 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
135 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
136
137 Whereas for big-endian we emit:
138 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
139 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
140
141#include "config.h"
142#include "system.h"
143#include "coretypes.h"
144#include "backend.h"
145#include "tree.h"
146#include "gimple.h"
147#include "builtins.h"
148#include "fold-const.h"
149#include "tree-pass.h"
150#include "ssa.h"
151#include "gimple-pretty-print.h"
152#include "alias.h"
153#include "fold-const.h"
f663d9ad
KT
154#include "print-tree.h"
155#include "tree-hash-traits.h"
156#include "gimple-iterator.h"
157#include "gimplify.h"
c94c3532 158#include "gimple-fold.h"
f663d9ad
KT
159#include "stor-layout.h"
160#include "timevar.h"
629387a6
EB
161#include "cfganal.h"
162#include "cfgcleanup.h"
f663d9ad 163#include "tree-cfg.h"
629387a6 164#include "except.h"
f663d9ad
KT
165#include "tree-eh.h"
166#include "target.h"
aa55dc0c 167#include "gimplify-me.h"
a62b3dc5
JJ
168#include "rtl.h"
169#include "expr.h" /* For get_bit_range. */
dffec8eb 170#include "optabs-tree.h"
a95b474a 171#include "dbgcnt.h"
c22d8787 172#include "selftest.h"
f663d9ad
KT
173
174/* The maximum size (in bits) of the stores this pass should generate. */
175#define MAX_STORE_BITSIZE (BITS_PER_WORD)
176#define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
177
245f6de1
JJ
178/* Limit to bound the number of aliasing checks for loads with the same
179 vuse as the corresponding store. */
180#define MAX_STORE_ALIAS_CHECKS 64
181
f663d9ad
KT
182namespace {
183
bebadeca 184struct bswap_stat
dffec8eb
JJ
185{
186 /* Number of hand-written 16-bit nop / bswaps found. */
187 int found_16bit;
188
189 /* Number of hand-written 32-bit nop / bswaps found. */
190 int found_32bit;
191
192 /* Number of hand-written 64-bit nop / bswaps found. */
193 int found_64bit;
194} nop_stats, bswap_stats;
195
196/* A symbolic number structure is used to detect byte permutation and selection
197 patterns of a source. To achieve that, its field N contains an artificial
198 number consisting of BITS_PER_MARKER sized markers tracking where does each
199 byte come from in the source:
200
201 0 - target byte has the value 0
202 FF - target byte has an unknown value (eg. due to sign extension)
203 1..size - marker value is the byte index in the source (0 for lsb).
204
205 To detect permutations on memory sources (arrays and structures), a symbolic
206 number is also associated:
207 - a base address BASE_ADDR and an OFFSET giving the address of the source;
208 - a range which gives the difference between the highest and lowest accessed
209 memory location to make such a symbolic number;
210 - the address SRC of the source element of lowest address as a convenience
211 to easily get BASE_ADDR + offset + lowest bytepos;
212 - number of expressions N_OPS bitwise ored together to represent
213 approximate cost of the computation.
214
215 Note 1: the range is different from size as size reflects the size of the
216 type of the current expression. For instance, for an array char a[],
217 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
218 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
219 time a range of 1.
220
221 Note 2: for non-memory sources, range holds the same value as size.
222
223 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
224
225struct symbolic_number {
226 uint64_t n;
227 tree type;
228 tree base_addr;
229 tree offset;
4a022c70 230 poly_int64_pod bytepos;
dffec8eb
JJ
231 tree src;
232 tree alias_set;
233 tree vuse;
234 unsigned HOST_WIDE_INT range;
235 int n_ops;
236};
237
238#define BITS_PER_MARKER 8
239#define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
240#define MARKER_BYTE_UNKNOWN MARKER_MASK
241#define HEAD_MARKER(n, size) \
242 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
243
244/* The number which the find_bswap_or_nop_1 result should match in
245 order to have a nop. The number is masked according to the size of
246 the symbolic number before using it. */
247#define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
248 (uint64_t)0x08070605 << 32 | 0x04030201)
249
250/* The number which the find_bswap_or_nop_1 result should match in
251 order to have a byte swap. The number is masked according to the
252 size of the symbolic number before using it. */
253#define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
254 (uint64_t)0x01020304 << 32 | 0x05060708)
255
256/* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
257 number N. Return false if the requested operation is not permitted
258 on a symbolic number. */
259
260inline bool
261do_shift_rotate (enum tree_code code,
262 struct symbolic_number *n,
263 int count)
264{
265 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
266 unsigned head_marker;
267
444cda74
JJ
268 if (count < 0
269 || count >= TYPE_PRECISION (n->type)
270 || count % BITS_PER_UNIT != 0)
dffec8eb
JJ
271 return false;
272 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
273
274 /* Zero out the extra bits of N in order to avoid them being shifted
275 into the significant bits. */
276 if (size < 64 / BITS_PER_MARKER)
277 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
278
279 switch (code)
280 {
281 case LSHIFT_EXPR:
282 n->n <<= count;
283 break;
284 case RSHIFT_EXPR:
285 head_marker = HEAD_MARKER (n->n, size);
286 n->n >>= count;
287 /* Arithmetic shift of signed type: result is dependent on the value. */
288 if (!TYPE_UNSIGNED (n->type) && head_marker)
289 for (i = 0; i < count / BITS_PER_MARKER; i++)
290 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
291 << ((size - 1 - i) * BITS_PER_MARKER);
292 break;
293 case LROTATE_EXPR:
294 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
295 break;
296 case RROTATE_EXPR:
297 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
298 break;
299 default:
300 return false;
301 }
302 /* Zero unused bits for size. */
303 if (size < 64 / BITS_PER_MARKER)
304 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
305 return true;
306}
307
308/* Perform sanity checking for the symbolic number N and the gimple
309 statement STMT. */
310
311inline bool
312verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
313{
314 tree lhs_type;
315
316 lhs_type = gimple_expr_type (stmt);
317
5ea39b24
JJ
318 if (TREE_CODE (lhs_type) != INTEGER_TYPE
319 && TREE_CODE (lhs_type) != ENUMERAL_TYPE)
dffec8eb
JJ
320 return false;
321
322 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
323 return false;
324
325 return true;
326}
327
328/* Initialize the symbolic number N for the bswap pass from the base element
329 SRC manipulated by the bitwise OR expression. */
330
331bool
332init_symbolic_number (struct symbolic_number *n, tree src)
333{
334 int size;
335
5b9a65ec 336 if (!INTEGRAL_TYPE_P (TREE_TYPE (src)) && !POINTER_TYPE_P (TREE_TYPE (src)))
dffec8eb
JJ
337 return false;
338
339 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
340 n->src = src;
341
342 /* Set up the symbolic number N by setting each byte to a value between 1 and
343 the byte size of rhs1. The highest order byte is set to n->size and the
344 lowest order byte to 1. */
345 n->type = TREE_TYPE (src);
346 size = TYPE_PRECISION (n->type);
347 if (size % BITS_PER_UNIT != 0)
348 return false;
349 size /= BITS_PER_UNIT;
350 if (size > 64 / BITS_PER_MARKER)
351 return false;
352 n->range = size;
353 n->n = CMPNOP;
354 n->n_ops = 1;
355
356 if (size < 64 / BITS_PER_MARKER)
357 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
358
359 return true;
360}
361
362/* Check if STMT might be a byte swap or a nop from a memory source and returns
363 the answer. If so, REF is that memory source and the base of the memory area
364 accessed and the offset of the access from that base are recorded in N. */
365
366bool
367find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
368{
369 /* Leaf node is an array or component ref. Memorize its base and
370 offset from base to compare to other such leaf node. */
f37fac2b 371 poly_int64 bitsize, bitpos, bytepos;
dffec8eb
JJ
372 machine_mode mode;
373 int unsignedp, reversep, volatilep;
374 tree offset, base_addr;
375
376 /* Not prepared to handle PDP endian. */
377 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
378 return false;
379
380 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
381 return false;
382
383 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
384 &unsignedp, &reversep, &volatilep);
385
4b84d9b8
JJ
386 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
387 /* Do not rewrite TARGET_MEM_REF. */
388 return false;
389 else if (TREE_CODE (base_addr) == MEM_REF)
dffec8eb 390 {
3fed2ce9 391 poly_offset_int bit_offset = 0;
dffec8eb
JJ
392 tree off = TREE_OPERAND (base_addr, 1);
393
394 if (!integer_zerop (off))
395 {
3fed2ce9
RS
396 poly_offset_int boff = mem_ref_offset (base_addr);
397 boff <<= LOG2_BITS_PER_UNIT;
dffec8eb
JJ
398 bit_offset += boff;
399 }
400
401 base_addr = TREE_OPERAND (base_addr, 0);
402
403 /* Avoid returning a negative bitpos as this may wreak havoc later. */
3fed2ce9 404 if (maybe_lt (bit_offset, 0))
dffec8eb 405 {
3fed2ce9
RS
406 tree byte_offset = wide_int_to_tree
407 (sizetype, bits_to_bytes_round_down (bit_offset));
408 bit_offset = num_trailing_bits (bit_offset);
dffec8eb 409 if (offset)
3fed2ce9 410 offset = size_binop (PLUS_EXPR, offset, byte_offset);
dffec8eb 411 else
3fed2ce9 412 offset = byte_offset;
dffec8eb
JJ
413 }
414
3fed2ce9 415 bitpos += bit_offset.force_shwi ();
dffec8eb 416 }
4b84d9b8
JJ
417 else
418 base_addr = build_fold_addr_expr (base_addr);
dffec8eb 419
f37fac2b 420 if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
dffec8eb 421 return false;
f37fac2b 422 if (!multiple_p (bitsize, BITS_PER_UNIT))
dffec8eb
JJ
423 return false;
424 if (reversep)
425 return false;
426
427 if (!init_symbolic_number (n, ref))
428 return false;
429 n->base_addr = base_addr;
430 n->offset = offset;
f37fac2b 431 n->bytepos = bytepos;
dffec8eb
JJ
432 n->alias_set = reference_alias_ptr_type (ref);
433 n->vuse = gimple_vuse (stmt);
434 return true;
435}
436
437/* Compute the symbolic number N representing the result of a bitwise OR on 2
438 symbolic number N1 and N2 whose source statements are respectively
439 SOURCE_STMT1 and SOURCE_STMT2. */
440
441gimple *
442perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
443 gimple *source_stmt2, struct symbolic_number *n2,
444 struct symbolic_number *n)
445{
446 int i, size;
447 uint64_t mask;
448 gimple *source_stmt;
449 struct symbolic_number *n_start;
450
451 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
452 if (TREE_CODE (rhs1) == BIT_FIELD_REF
453 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
454 rhs1 = TREE_OPERAND (rhs1, 0);
455 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
456 if (TREE_CODE (rhs2) == BIT_FIELD_REF
457 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
458 rhs2 = TREE_OPERAND (rhs2, 0);
459
460 /* Sources are different, cancel bswap if they are not memory location with
461 the same base (array, structure, ...). */
462 if (rhs1 != rhs2)
463 {
464 uint64_t inc;
4a022c70 465 HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end;
dffec8eb
JJ
466 struct symbolic_number *toinc_n_ptr, *n_end;
467 basic_block bb1, bb2;
468
469 if (!n1->base_addr || !n2->base_addr
470 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
471 return NULL;
472
473 if (!n1->offset != !n2->offset
474 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
475 return NULL;
476
4a022c70
RS
477 start1 = 0;
478 if (!(n2->bytepos - n1->bytepos).is_constant (&start2))
479 return NULL;
480
481 if (start1 < start2)
dffec8eb
JJ
482 {
483 n_start = n1;
4a022c70 484 start_sub = start2 - start1;
dffec8eb
JJ
485 }
486 else
487 {
488 n_start = n2;
4a022c70 489 start_sub = start1 - start2;
dffec8eb
JJ
490 }
491
492 bb1 = gimple_bb (source_stmt1);
493 bb2 = gimple_bb (source_stmt2);
494 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
495 source_stmt = source_stmt1;
496 else
497 source_stmt = source_stmt2;
498
499 /* Find the highest address at which a load is performed and
500 compute related info. */
4a022c70
RS
501 end1 = start1 + (n1->range - 1);
502 end2 = start2 + (n2->range - 1);
dffec8eb
JJ
503 if (end1 < end2)
504 {
505 end = end2;
506 end_sub = end2 - end1;
507 }
508 else
509 {
510 end = end1;
511 end_sub = end1 - end2;
512 }
513 n_end = (end2 > end1) ? n2 : n1;
514
515 /* Find symbolic number whose lsb is the most significant. */
516 if (BYTES_BIG_ENDIAN)
517 toinc_n_ptr = (n_end == n1) ? n2 : n1;
518 else
519 toinc_n_ptr = (n_start == n1) ? n2 : n1;
520
4a022c70 521 n->range = end - MIN (start1, start2) + 1;
dffec8eb
JJ
522
523 /* Check that the range of memory covered can be represented by
524 a symbolic number. */
525 if (n->range > 64 / BITS_PER_MARKER)
526 return NULL;
527
528 /* Reinterpret byte marks in symbolic number holding the value of
529 bigger weight according to target endianness. */
530 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
531 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
532 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
533 {
534 unsigned marker
535 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
536 if (marker && marker != MARKER_BYTE_UNKNOWN)
537 toinc_n_ptr->n += inc;
538 }
539 }
540 else
541 {
542 n->range = n1->range;
543 n_start = n1;
544 source_stmt = source_stmt1;
545 }
546
547 if (!n1->alias_set
548 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
549 n->alias_set = n1->alias_set;
550 else
551 n->alias_set = ptr_type_node;
552 n->vuse = n_start->vuse;
553 n->base_addr = n_start->base_addr;
554 n->offset = n_start->offset;
555 n->src = n_start->src;
556 n->bytepos = n_start->bytepos;
557 n->type = n_start->type;
558 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
559
560 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
561 {
562 uint64_t masked1, masked2;
563
564 masked1 = n1->n & mask;
565 masked2 = n2->n & mask;
566 if (masked1 && masked2 && masked1 != masked2)
567 return NULL;
568 }
569 n->n = n1->n | n2->n;
570 n->n_ops = n1->n_ops + n2->n_ops;
571
572 return source_stmt;
573}
574
575/* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
576 the operation given by the rhs of STMT on the result. If the operation
577 could successfully be executed the function returns a gimple stmt whose
578 rhs's first tree is the expression of the source operand and NULL
579 otherwise. */
580
581gimple *
582find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
583{
584 enum tree_code code;
585 tree rhs1, rhs2 = NULL;
586 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
587 enum gimple_rhs_class rhs_class;
588
589 if (!limit || !is_gimple_assign (stmt))
590 return NULL;
591
592 rhs1 = gimple_assign_rhs1 (stmt);
593
594 if (find_bswap_or_nop_load (stmt, rhs1, n))
595 return stmt;
596
597 /* Handle BIT_FIELD_REF. */
598 if (TREE_CODE (rhs1) == BIT_FIELD_REF
599 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
600 {
35cf3c55
KZ
601 if (!tree_fits_uhwi_p (TREE_OPERAND (rhs1, 1))
602 || !tree_fits_uhwi_p (TREE_OPERAND (rhs1, 2)))
603 return NULL;
604
dffec8eb
JJ
605 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
606 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
607 if (bitpos % BITS_PER_UNIT == 0
608 && bitsize % BITS_PER_UNIT == 0
609 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
610 {
611 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
612 if (BYTES_BIG_ENDIAN)
613 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
614
615 /* Shift. */
616 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
617 return NULL;
618
619 /* Mask. */
620 uint64_t mask = 0;
621 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
622 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
623 i++, tmp <<= BITS_PER_UNIT)
624 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
625 n->n &= mask;
626
627 /* Convert. */
628 n->type = TREE_TYPE (rhs1);
629 if (!n->base_addr)
630 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
631
632 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
633 }
634
635 return NULL;
636 }
637
638 if (TREE_CODE (rhs1) != SSA_NAME)
639 return NULL;
640
641 code = gimple_assign_rhs_code (stmt);
642 rhs_class = gimple_assign_rhs_class (stmt);
643 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
644
645 if (rhs_class == GIMPLE_BINARY_RHS)
646 rhs2 = gimple_assign_rhs2 (stmt);
647
648 /* Handle unary rhs and binary rhs with integer constants as second
649 operand. */
650
651 if (rhs_class == GIMPLE_UNARY_RHS
652 || (rhs_class == GIMPLE_BINARY_RHS
653 && TREE_CODE (rhs2) == INTEGER_CST))
654 {
655 if (code != BIT_AND_EXPR
656 && code != LSHIFT_EXPR
657 && code != RSHIFT_EXPR
658 && code != LROTATE_EXPR
659 && code != RROTATE_EXPR
660 && !CONVERT_EXPR_CODE_P (code))
661 return NULL;
662
663 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
664
665 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
666 we have to initialize the symbolic number. */
667 if (!source_stmt1)
668 {
669 if (gimple_assign_load_p (stmt)
670 || !init_symbolic_number (n, rhs1))
671 return NULL;
672 source_stmt1 = stmt;
673 }
674
675 switch (code)
676 {
677 case BIT_AND_EXPR:
678 {
679 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
680 uint64_t val = int_cst_value (rhs2), mask = 0;
681 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
682
683 /* Only constants masking full bytes are allowed. */
684 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
685 if ((val & tmp) != 0 && (val & tmp) != tmp)
686 return NULL;
687 else if (val & tmp)
688 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
689
690 n->n &= mask;
691 }
692 break;
693 case LSHIFT_EXPR:
694 case RSHIFT_EXPR:
695 case LROTATE_EXPR:
696 case RROTATE_EXPR:
697 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
698 return NULL;
699 break;
700 CASE_CONVERT:
701 {
702 int i, type_size, old_type_size;
703 tree type;
704
705 type = gimple_expr_type (stmt);
706 type_size = TYPE_PRECISION (type);
707 if (type_size % BITS_PER_UNIT != 0)
708 return NULL;
709 type_size /= BITS_PER_UNIT;
710 if (type_size > 64 / BITS_PER_MARKER)
711 return NULL;
712
713 /* Sign extension: result is dependent on the value. */
714 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
715 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
716 && HEAD_MARKER (n->n, old_type_size))
717 for (i = 0; i < type_size - old_type_size; i++)
718 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
719 << ((type_size - 1 - i) * BITS_PER_MARKER);
720
721 if (type_size < 64 / BITS_PER_MARKER)
722 {
723 /* If STMT casts to a smaller type mask out the bits not
724 belonging to the target type. */
725 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
726 }
727 n->type = type;
728 if (!n->base_addr)
729 n->range = type_size;
730 }
731 break;
732 default:
733 return NULL;
734 };
735 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
736 }
737
738 /* Handle binary rhs. */
739
740 if (rhs_class == GIMPLE_BINARY_RHS)
741 {
742 struct symbolic_number n1, n2;
743 gimple *source_stmt, *source_stmt2;
744
745 if (code != BIT_IOR_EXPR)
746 return NULL;
747
748 if (TREE_CODE (rhs2) != SSA_NAME)
749 return NULL;
750
751 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
752
753 switch (code)
754 {
755 case BIT_IOR_EXPR:
756 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
757
758 if (!source_stmt1)
759 return NULL;
760
761 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
762
763 if (!source_stmt2)
764 return NULL;
765
766 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
767 return NULL;
768
4b84d9b8 769 if (n1.vuse != n2.vuse)
dffec8eb
JJ
770 return NULL;
771
772 source_stmt
773 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
774
775 if (!source_stmt)
776 return NULL;
777
778 if (!verify_symbolic_number_p (n, stmt))
779 return NULL;
780
781 break;
782 default:
783 return NULL;
784 }
785 return source_stmt;
786 }
787 return NULL;
788}
789
4b84d9b8
JJ
790/* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
791 *CMPXCHG, *CMPNOP and adjust *N. */
dffec8eb 792
4b84d9b8
JJ
793void
794find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg,
795 uint64_t *cmpnop)
dffec8eb
JJ
796{
797 unsigned rsize;
798 uint64_t tmpn, mask;
dffec8eb 799
4b84d9b8
JJ
800 /* The number which the find_bswap_or_nop_1 result should match in order
801 to have a full byte swap. The number is shifted to the right
802 according to the size of the symbolic number before using it. */
803 *cmpxchg = CMPXCHG;
804 *cmpnop = CMPNOP;
dffec8eb
JJ
805
806 /* Find real size of result (highest non-zero byte). */
807 if (n->base_addr)
808 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
809 else
810 rsize = n->range;
811
812 /* Zero out the bits corresponding to untouched bytes in original gimple
813 expression. */
814 if (n->range < (int) sizeof (int64_t))
815 {
816 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
4b84d9b8
JJ
817 *cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
818 *cmpnop &= mask;
dffec8eb
JJ
819 }
820
821 /* Zero out the bits corresponding to unused bytes in the result of the
822 gimple expression. */
823 if (rsize < n->range)
824 {
825 if (BYTES_BIG_ENDIAN)
826 {
827 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
4b84d9b8
JJ
828 *cmpxchg &= mask;
829 *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
dffec8eb
JJ
830 }
831 else
832 {
833 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
4b84d9b8
JJ
834 *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
835 *cmpnop &= mask;
dffec8eb
JJ
836 }
837 n->range = rsize;
838 }
839
4b84d9b8
JJ
840 n->range *= BITS_PER_UNIT;
841}
842
843/* Check if STMT completes a bswap implementation or a read in a given
844 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
845 accordingly. It also sets N to represent the kind of operations
846 performed: size of the resulting expression and whether it works on
847 a memory source, and if so alias-set and vuse. At last, the
848 function returns a stmt whose rhs's first tree is the source
849 expression. */
850
851gimple *
852find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
853{
7f0ce82a
KT
854 tree type_size = TYPE_SIZE_UNIT (gimple_expr_type (stmt));
855 if (!tree_fits_uhwi_p (type_size))
856 return NULL;
857
4b84d9b8
JJ
858 /* The last parameter determines the depth search limit. It usually
859 correlates directly to the number n of bytes to be touched. We
0f507a36 860 increase that number by 2 * (log2(n) + 1) here in order to also
4b84d9b8
JJ
861 cover signed -> unsigned conversions of the src operand as can be seen
862 in libgcc, and for initial shift/and operation of the src operand. */
7f0ce82a 863 int limit = tree_to_uhwi (type_size);
0f507a36 864 limit += 2 * (1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit));
4b84d9b8
JJ
865 gimple *ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
866
867 if (!ins_stmt)
cd676dfa
JJ
868 {
869 if (gimple_assign_rhs_code (stmt) != CONSTRUCTOR
870 || BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
871 return NULL;
872 unsigned HOST_WIDE_INT sz = tree_to_uhwi (type_size) * BITS_PER_UNIT;
873 if (sz != 16 && sz != 32 && sz != 64)
874 return NULL;
875 tree rhs = gimple_assign_rhs1 (stmt);
9032d2b2
JJ
876 if (CONSTRUCTOR_NELTS (rhs) == 0)
877 return NULL;
cd676dfa
JJ
878 tree eltype = TREE_TYPE (TREE_TYPE (rhs));
879 unsigned HOST_WIDE_INT eltsz
880 = int_size_in_bytes (eltype) * BITS_PER_UNIT;
881 if (TYPE_PRECISION (eltype) != eltsz)
882 return NULL;
883 constructor_elt *elt;
884 unsigned int i;
885 tree type = build_nonstandard_integer_type (sz, 1);
886 FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (rhs), i, elt)
887 {
888 if (TREE_CODE (elt->value) != SSA_NAME
889 || !INTEGRAL_TYPE_P (TREE_TYPE (elt->value)))
890 return NULL;
891 struct symbolic_number n1;
892 gimple *source_stmt
893 = find_bswap_or_nop_1 (SSA_NAME_DEF_STMT (elt->value), &n1,
894 limit - 1);
895
896 if (!source_stmt)
897 return NULL;
898
899 n1.type = type;
900 if (!n1.base_addr)
901 n1.range = sz / BITS_PER_UNIT;
902
903 if (i == 0)
904 {
905 ins_stmt = source_stmt;
906 *n = n1;
907 }
908 else
909 {
910 if (n->vuse != n1.vuse)
911 return NULL;
912
913 struct symbolic_number n0 = *n;
914
915 if (!BYTES_BIG_ENDIAN)
916 {
917 if (!do_shift_rotate (LSHIFT_EXPR, &n1, i * eltsz))
918 return NULL;
919 }
920 else if (!do_shift_rotate (LSHIFT_EXPR, &n0, eltsz))
921 return NULL;
922 ins_stmt
923 = perform_symbolic_merge (ins_stmt, &n0, source_stmt, &n1, n);
924
925 if (!ins_stmt)
926 return NULL;
927 }
928 }
929 }
4b84d9b8
JJ
930
931 uint64_t cmpxchg, cmpnop;
932 find_bswap_or_nop_finalize (n, &cmpxchg, &cmpnop);
933
dffec8eb
JJ
934 /* A complete byte swap should make the symbolic number to start with
935 the largest digit in the highest order byte. Unchanged symbolic
936 number indicates a read with same endianness as target architecture. */
937 if (n->n == cmpnop)
938 *bswap = false;
939 else if (n->n == cmpxchg)
940 *bswap = true;
941 else
942 return NULL;
943
944 /* Useless bit manipulation performed by code. */
945 if (!n->base_addr && n->n == cmpnop && n->n_ops == 1)
946 return NULL;
947
dffec8eb
JJ
948 return ins_stmt;
949}
950
951const pass_data pass_data_optimize_bswap =
952{
953 GIMPLE_PASS, /* type */
954 "bswap", /* name */
955 OPTGROUP_NONE, /* optinfo_flags */
956 TV_NONE, /* tv_id */
957 PROP_ssa, /* properties_required */
958 0, /* properties_provided */
959 0, /* properties_destroyed */
960 0, /* todo_flags_start */
961 0, /* todo_flags_finish */
962};
963
964class pass_optimize_bswap : public gimple_opt_pass
965{
966public:
967 pass_optimize_bswap (gcc::context *ctxt)
968 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
969 {}
970
971 /* opt_pass methods: */
972 virtual bool gate (function *)
973 {
974 return flag_expensive_optimizations && optimize && BITS_PER_UNIT == 8;
975 }
976
977 virtual unsigned int execute (function *);
978
979}; // class pass_optimize_bswap
980
d02a8b63
JJ
981/* Helper function for bswap_replace. Build VIEW_CONVERT_EXPR from
982 VAL to TYPE. If VAL has different type size, emit a NOP_EXPR cast
983 first. */
984
985static tree
986bswap_view_convert (gimple_stmt_iterator *gsi, tree type, tree val)
987{
a4001578
JJ
988 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (val))
989 || POINTER_TYPE_P (TREE_TYPE (val)));
d02a8b63
JJ
990 if (TYPE_SIZE (type) != TYPE_SIZE (TREE_TYPE (val)))
991 {
992 HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_SIZE (type));
a4001578
JJ
993 if (POINTER_TYPE_P (TREE_TYPE (val)))
994 {
995 gimple *g
996 = gimple_build_assign (make_ssa_name (pointer_sized_int_node),
997 NOP_EXPR, val);
998 gsi_insert_before (gsi, g, GSI_SAME_STMT);
999 val = gimple_assign_lhs (g);
1000 }
d02a8b63
JJ
1001 tree itype = build_nonstandard_integer_type (prec, 1);
1002 gimple *g = gimple_build_assign (make_ssa_name (itype), NOP_EXPR, val);
1003 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1004 val = gimple_assign_lhs (g);
1005 }
1006 return build1 (VIEW_CONVERT_EXPR, type, val);
1007}
1008
dffec8eb 1009/* Perform the bswap optimization: replace the expression computed in the rhs
4b84d9b8
JJ
1010 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
1011 bswap, load or load + bswap expression.
dffec8eb
JJ
1012 Which of these alternatives replace the rhs is given by N->base_addr (non
1013 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
1014 load to perform are also given in N while the builtin bswap invoke is given
4b84d9b8
JJ
1015 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
1016 load statements involved to construct the rhs in gsi_stmt (GSI) and
1017 N->range gives the size of the rhs expression for maintaining some
1018 statistics.
dffec8eb 1019
4b84d9b8
JJ
1020 Note that if the replacement involve a load and if gsi_stmt (GSI) is
1021 non-NULL, that stmt is moved just after INS_STMT to do the load with the
1022 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
dffec8eb 1023
4b84d9b8
JJ
1024tree
1025bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl,
dffec8eb
JJ
1026 tree bswap_type, tree load_type, struct symbolic_number *n,
1027 bool bswap)
1028{
4b84d9b8 1029 tree src, tmp, tgt = NULL_TREE;
dffec8eb 1030 gimple *bswap_stmt;
cd676dfa 1031 tree_code conv_code = NOP_EXPR;
dffec8eb 1032
4b84d9b8 1033 gimple *cur_stmt = gsi_stmt (gsi);
dffec8eb 1034 src = n->src;
4b84d9b8 1035 if (cur_stmt)
cd676dfa
JJ
1036 {
1037 tgt = gimple_assign_lhs (cur_stmt);
1038 if (gimple_assign_rhs_code (cur_stmt) == CONSTRUCTOR
1039 && tgt
1040 && VECTOR_TYPE_P (TREE_TYPE (tgt)))
1041 conv_code = VIEW_CONVERT_EXPR;
1042 }
dffec8eb
JJ
1043
1044 /* Need to load the value from memory first. */
1045 if (n->base_addr)
1046 {
4b84d9b8
JJ
1047 gimple_stmt_iterator gsi_ins = gsi;
1048 if (ins_stmt)
1049 gsi_ins = gsi_for_stmt (ins_stmt);
dffec8eb
JJ
1050 tree addr_expr, addr_tmp, val_expr, val_tmp;
1051 tree load_offset_ptr, aligned_load_type;
4b84d9b8
JJ
1052 gimple *load_stmt;
1053 unsigned align = get_object_alignment (src);
4a022c70 1054 poly_int64 load_offset = 0;
dffec8eb 1055
4b84d9b8
JJ
1056 if (cur_stmt)
1057 {
1058 basic_block ins_bb = gimple_bb (ins_stmt);
1059 basic_block cur_bb = gimple_bb (cur_stmt);
1060 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
1061 return NULL_TREE;
1062
1063 /* Move cur_stmt just before one of the load of the original
1064 to ensure it has the same VUSE. See PR61517 for what could
1065 go wrong. */
1066 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
1067 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
1068 gsi_move_before (&gsi, &gsi_ins);
1069 gsi = gsi_for_stmt (cur_stmt);
1070 }
1071 else
1072 gsi = gsi_ins;
dffec8eb
JJ
1073
1074 /* Compute address to load from and cast according to the size
1075 of the load. */
4b84d9b8 1076 addr_expr = build_fold_addr_expr (src);
dffec8eb 1077 if (is_gimple_mem_ref_addr (addr_expr))
4b84d9b8 1078 addr_tmp = unshare_expr (addr_expr);
dffec8eb
JJ
1079 else
1080 {
4b84d9b8
JJ
1081 addr_tmp = unshare_expr (n->base_addr);
1082 if (!is_gimple_mem_ref_addr (addr_tmp))
1083 addr_tmp = force_gimple_operand_gsi_1 (&gsi, addr_tmp,
1084 is_gimple_mem_ref_addr,
1085 NULL_TREE, true,
1086 GSI_SAME_STMT);
1087 load_offset = n->bytepos;
1088 if (n->offset)
1089 {
1090 tree off
1091 = force_gimple_operand_gsi (&gsi, unshare_expr (n->offset),
1092 true, NULL_TREE, true,
1093 GSI_SAME_STMT);
1094 gimple *stmt
1095 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp)),
1096 POINTER_PLUS_EXPR, addr_tmp, off);
1097 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1098 addr_tmp = gimple_assign_lhs (stmt);
1099 }
dffec8eb
JJ
1100 }
1101
1102 /* Perform the load. */
1103 aligned_load_type = load_type;
1104 if (align < TYPE_ALIGN (load_type))
1105 aligned_load_type = build_aligned_type (load_type, align);
1106 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
1107 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
1108 load_offset_ptr);
1109
1110 if (!bswap)
1111 {
1112 if (n->range == 16)
1113 nop_stats.found_16bit++;
1114 else if (n->range == 32)
1115 nop_stats.found_32bit++;
1116 else
1117 {
1118 gcc_assert (n->range == 64);
1119 nop_stats.found_64bit++;
1120 }
1121
1122 /* Convert the result of load if necessary. */
4b84d9b8 1123 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), load_type))
dffec8eb
JJ
1124 {
1125 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
1126 "load_dst");
1127 load_stmt = gimple_build_assign (val_tmp, val_expr);
1128 gimple_set_vuse (load_stmt, n->vuse);
1129 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
cd676dfa 1130 if (conv_code == VIEW_CONVERT_EXPR)
d02a8b63 1131 val_tmp = bswap_view_convert (&gsi, TREE_TYPE (tgt), val_tmp);
cd676dfa 1132 gimple_assign_set_rhs_with_ops (&gsi, conv_code, val_tmp);
4b84d9b8 1133 update_stmt (cur_stmt);
dffec8eb 1134 }
4b84d9b8 1135 else if (cur_stmt)
dffec8eb
JJ
1136 {
1137 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
1138 gimple_set_vuse (cur_stmt, n->vuse);
4b84d9b8
JJ
1139 update_stmt (cur_stmt);
1140 }
1141 else
1142 {
1143 tgt = make_ssa_name (load_type);
1144 cur_stmt = gimple_build_assign (tgt, MEM_REF, val_expr);
1145 gimple_set_vuse (cur_stmt, n->vuse);
1146 gsi_insert_before (&gsi, cur_stmt, GSI_SAME_STMT);
dffec8eb 1147 }
dffec8eb
JJ
1148
1149 if (dump_file)
1150 {
1151 fprintf (dump_file,
1152 "%d bit load in target endianness found at: ",
1153 (int) n->range);
1154 print_gimple_stmt (dump_file, cur_stmt, 0);
1155 }
4b84d9b8 1156 return tgt;
dffec8eb
JJ
1157 }
1158 else
1159 {
1160 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
1161 load_stmt = gimple_build_assign (val_tmp, val_expr);
1162 gimple_set_vuse (load_stmt, n->vuse);
1163 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1164 }
1165 src = val_tmp;
1166 }
1167 else if (!bswap)
1168 {
4b84d9b8
JJ
1169 gimple *g = NULL;
1170 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src)))
dffec8eb
JJ
1171 {
1172 if (!is_gimple_val (src))
4b84d9b8 1173 return NULL_TREE;
cd676dfa 1174 if (conv_code == VIEW_CONVERT_EXPR)
d02a8b63 1175 src = bswap_view_convert (&gsi, TREE_TYPE (tgt), src);
cd676dfa 1176 g = gimple_build_assign (tgt, conv_code, src);
dffec8eb 1177 }
4b84d9b8 1178 else if (cur_stmt)
dffec8eb 1179 g = gimple_build_assign (tgt, src);
4b84d9b8
JJ
1180 else
1181 tgt = src;
dffec8eb
JJ
1182 if (n->range == 16)
1183 nop_stats.found_16bit++;
1184 else if (n->range == 32)
1185 nop_stats.found_32bit++;
1186 else
1187 {
1188 gcc_assert (n->range == 64);
1189 nop_stats.found_64bit++;
1190 }
1191 if (dump_file)
1192 {
1193 fprintf (dump_file,
1194 "%d bit reshuffle in target endianness found at: ",
1195 (int) n->range);
4b84d9b8
JJ
1196 if (cur_stmt)
1197 print_gimple_stmt (dump_file, cur_stmt, 0);
1198 else
1199 {
4af78ef8 1200 print_generic_expr (dump_file, tgt, TDF_NONE);
4b84d9b8
JJ
1201 fprintf (dump_file, "\n");
1202 }
dffec8eb 1203 }
4b84d9b8
JJ
1204 if (cur_stmt)
1205 gsi_replace (&gsi, g, true);
1206 return tgt;
dffec8eb
JJ
1207 }
1208 else if (TREE_CODE (src) == BIT_FIELD_REF)
1209 src = TREE_OPERAND (src, 0);
1210
1211 if (n->range == 16)
1212 bswap_stats.found_16bit++;
1213 else if (n->range == 32)
1214 bswap_stats.found_32bit++;
1215 else
1216 {
1217 gcc_assert (n->range == 64);
1218 bswap_stats.found_64bit++;
1219 }
1220
1221 tmp = src;
1222
1223 /* Convert the src expression if necessary. */
1224 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
1225 {
1226 gimple *convert_stmt;
1227
1228 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
1229 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
1230 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
1231 }
1232
1233 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1234 are considered as rotation of 2N bit values by N bits is generally not
1235 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1236 gives 0x03040102 while a bswap for that value is 0x04030201. */
1237 if (bswap && n->range == 16)
1238 {
1239 tree count = build_int_cst (NULL, BITS_PER_UNIT);
1240 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
1241 bswap_stmt = gimple_build_assign (NULL, src);
1242 }
1243 else
1244 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
1245
4b84d9b8
JJ
1246 if (tgt == NULL_TREE)
1247 tgt = make_ssa_name (bswap_type);
dffec8eb
JJ
1248 tmp = tgt;
1249
1250 /* Convert the result if necessary. */
1251 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
1252 {
1253 gimple *convert_stmt;
1254
1255 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
cd676dfa
JJ
1256 tree atmp = tmp;
1257 if (conv_code == VIEW_CONVERT_EXPR)
d02a8b63 1258 atmp = bswap_view_convert (&gsi, TREE_TYPE (tgt), tmp);
cd676dfa 1259 convert_stmt = gimple_build_assign (tgt, conv_code, atmp);
dffec8eb
JJ
1260 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
1261 }
1262
1263 gimple_set_lhs (bswap_stmt, tmp);
1264
1265 if (dump_file)
1266 {
1267 fprintf (dump_file, "%d bit bswap implementation found at: ",
1268 (int) n->range);
4b84d9b8
JJ
1269 if (cur_stmt)
1270 print_gimple_stmt (dump_file, cur_stmt, 0);
1271 else
1272 {
4af78ef8 1273 print_generic_expr (dump_file, tgt, TDF_NONE);
4b84d9b8
JJ
1274 fprintf (dump_file, "\n");
1275 }
dffec8eb
JJ
1276 }
1277
4b84d9b8
JJ
1278 if (cur_stmt)
1279 {
1280 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
1281 gsi_remove (&gsi, true);
1282 }
1283 else
1284 gsi_insert_before (&gsi, bswap_stmt, GSI_SAME_STMT);
1285 return tgt;
dffec8eb
JJ
1286}
1287
a7553ad6
JJ
1288/* Try to optimize an assignment CUR_STMT with CONSTRUCTOR on the rhs
1289 using bswap optimizations. CDI_DOMINATORS need to be
1290 computed on entry. Return true if it has been optimized and
1291 TODO_update_ssa is needed. */
1292
1293static bool
1294maybe_optimize_vector_constructor (gimple *cur_stmt)
1295{
1296 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1297 struct symbolic_number n;
1298 bool bswap;
1299
1300 gcc_assert (is_gimple_assign (cur_stmt)
1301 && gimple_assign_rhs_code (cur_stmt) == CONSTRUCTOR);
1302
1303 tree rhs = gimple_assign_rhs1 (cur_stmt);
1304 if (!VECTOR_TYPE_P (TREE_TYPE (rhs))
1305 || !INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs)))
1306 || gimple_assign_lhs (cur_stmt) == NULL_TREE)
1307 return false;
1308
1309 HOST_WIDE_INT sz = int_size_in_bytes (TREE_TYPE (rhs)) * BITS_PER_UNIT;
1310 switch (sz)
1311 {
1312 case 16:
1313 load_type = bswap_type = uint16_type_node;
1314 break;
1315 case 32:
1316 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1317 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
1318 {
1319 load_type = uint32_type_node;
1320 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1321 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1322 }
1323 else
1324 return false;
1325 break;
1326 case 64:
1327 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1328 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1329 || (word_mode == SImode
1330 && builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1331 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)))
1332 {
1333 load_type = uint64_type_node;
1334 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1335 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1336 }
1337 else
1338 return false;
1339 break;
1340 default:
1341 return false;
1342 }
1343
1344 gimple *ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
1345 if (!ins_stmt || n.range != (unsigned HOST_WIDE_INT) sz)
1346 return false;
1347
1348 if (bswap && !fndecl && n.range != 16)
1349 return false;
1350
1351 memset (&nop_stats, 0, sizeof (nop_stats));
1352 memset (&bswap_stats, 0, sizeof (bswap_stats));
1353 return bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1354 bswap_type, load_type, &n, bswap) != NULL_TREE;
1355}
1356
dffec8eb
JJ
1357/* Find manual byte swap implementations as well as load in a given
1358 endianness. Byte swaps are turned into a bswap builtin invokation
1359 while endian loads are converted to bswap builtin invokation or
1360 simple load according to the target endianness. */
1361
1362unsigned int
1363pass_optimize_bswap::execute (function *fun)
1364{
1365 basic_block bb;
1366 bool bswap32_p, bswap64_p;
1367 bool changed = false;
1368 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1369
1370 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1371 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1372 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1373 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1374 || (bswap32_p && word_mode == SImode)));
1375
1376 /* Determine the argument type of the builtins. The code later on
1377 assumes that the return and argument type are the same. */
1378 if (bswap32_p)
1379 {
1380 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1381 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1382 }
1383
1384 if (bswap64_p)
1385 {
1386 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1387 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1388 }
1389
1390 memset (&nop_stats, 0, sizeof (nop_stats));
1391 memset (&bswap_stats, 0, sizeof (bswap_stats));
1392 calculate_dominance_info (CDI_DOMINATORS);
1393
1394 FOR_EACH_BB_FN (bb, fun)
1395 {
1396 gimple_stmt_iterator gsi;
1397
1398 /* We do a reverse scan for bswap patterns to make sure we get the
1399 widest match. As bswap pattern matching doesn't handle previously
1400 inserted smaller bswap replacements as sub-patterns, the wider
1401 variant wouldn't be detected. */
1402 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
1403 {
1404 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
1405 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1406 enum tree_code code;
1407 struct symbolic_number n;
1408 bool bswap;
1409
1410 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1411 might be moved to a different basic block by bswap_replace and gsi
1412 must not points to it if that's the case. Moving the gsi_prev
1413 there make sure that gsi points to the statement previous to
1414 cur_stmt while still making sure that all statements are
1415 considered in this basic block. */
1416 gsi_prev (&gsi);
1417
1418 if (!is_gimple_assign (cur_stmt))
1419 continue;
1420
1421 code = gimple_assign_rhs_code (cur_stmt);
1422 switch (code)
1423 {
1424 case LROTATE_EXPR:
1425 case RROTATE_EXPR:
1426 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
1427 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
1428 % BITS_PER_UNIT)
1429 continue;
1430 /* Fall through. */
1431 case BIT_IOR_EXPR:
1432 break;
cd676dfa
JJ
1433 case CONSTRUCTOR:
1434 {
1435 tree rhs = gimple_assign_rhs1 (cur_stmt);
1436 if (VECTOR_TYPE_P (TREE_TYPE (rhs))
1437 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs))))
1438 break;
1439 }
1440 continue;
dffec8eb
JJ
1441 default:
1442 continue;
1443 }
1444
1445 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
1446
1447 if (!ins_stmt)
1448 continue;
1449
1450 switch (n.range)
1451 {
1452 case 16:
1453 /* Already in canonical form, nothing to do. */
1454 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
1455 continue;
1456 load_type = bswap_type = uint16_type_node;
1457 break;
1458 case 32:
1459 load_type = uint32_type_node;
1460 if (bswap32_p)
1461 {
1462 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1463 bswap_type = bswap32_type;
1464 }
1465 break;
1466 case 64:
1467 load_type = uint64_type_node;
1468 if (bswap64_p)
1469 {
1470 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1471 bswap_type = bswap64_type;
1472 }
1473 break;
1474 default:
1475 continue;
1476 }
1477
1478 if (bswap && !fndecl && n.range != 16)
1479 continue;
1480
4b84d9b8
JJ
1481 if (bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1482 bswap_type, load_type, &n, bswap))
dffec8eb
JJ
1483 changed = true;
1484 }
1485 }
1486
1487 statistics_counter_event (fun, "16-bit nop implementations found",
1488 nop_stats.found_16bit);
1489 statistics_counter_event (fun, "32-bit nop implementations found",
1490 nop_stats.found_32bit);
1491 statistics_counter_event (fun, "64-bit nop implementations found",
1492 nop_stats.found_64bit);
1493 statistics_counter_event (fun, "16-bit bswap implementations found",
1494 bswap_stats.found_16bit);
1495 statistics_counter_event (fun, "32-bit bswap implementations found",
1496 bswap_stats.found_32bit);
1497 statistics_counter_event (fun, "64-bit bswap implementations found",
1498 bswap_stats.found_64bit);
1499
1500 return (changed ? TODO_update_ssa : 0);
1501}
1502
1503} // anon namespace
1504
1505gimple_opt_pass *
1506make_pass_optimize_bswap (gcc::context *ctxt)
1507{
1508 return new pass_optimize_bswap (ctxt);
1509}
1510
1511namespace {
1512
245f6de1 1513/* Struct recording one operand for the store, which is either a constant,
c94c3532
EB
1514 then VAL represents the constant and all the other fields are zero, or
1515 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1516 and the other fields also reflect the memory load, or an SSA name, then
1517 VAL represents the SSA name and all the other fields are zero, */
245f6de1 1518
6c1dae73 1519class store_operand_info
245f6de1 1520{
6c1dae73 1521public:
245f6de1
JJ
1522 tree val;
1523 tree base_addr;
8a91d545
RS
1524 poly_uint64 bitsize;
1525 poly_uint64 bitpos;
1526 poly_uint64 bitregion_start;
1527 poly_uint64 bitregion_end;
245f6de1 1528 gimple *stmt;
383ac8dc 1529 bool bit_not_p;
245f6de1
JJ
1530 store_operand_info ();
1531};
1532
1533store_operand_info::store_operand_info ()
1534 : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0),
383ac8dc 1535 bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false)
245f6de1
JJ
1536{
1537}
1538
f663d9ad
KT
1539/* Struct recording the information about a single store of an immediate
1540 to memory. These are created in the first phase and coalesced into
1541 merged_store_group objects in the second phase. */
1542
6c1dae73 1543class store_immediate_info
f663d9ad 1544{
6c1dae73 1545public:
f663d9ad
KT
1546 unsigned HOST_WIDE_INT bitsize;
1547 unsigned HOST_WIDE_INT bitpos;
a62b3dc5
JJ
1548 unsigned HOST_WIDE_INT bitregion_start;
1549 /* This is one past the last bit of the bit region. */
1550 unsigned HOST_WIDE_INT bitregion_end;
f663d9ad
KT
1551 gimple *stmt;
1552 unsigned int order;
e362a897
EB
1553 /* INTEGER_CST for constant store, STRING_CST for string store,
1554 MEM_REF for memory copy, BIT_*_EXPR for logical bitwise operation,
1555 BIT_INSERT_EXPR for bit insertion.
4b84d9b8
JJ
1556 LROTATE_EXPR if it can be only bswap optimized and
1557 ops are not really meaningful.
1558 NOP_EXPR if bswap optimization detected identity, ops
1559 are not meaningful. */
245f6de1 1560 enum tree_code rhs_code;
4b84d9b8
JJ
1561 /* Two fields for bswap optimization purposes. */
1562 struct symbolic_number n;
1563 gimple *ins_stmt;
127ef369 1564 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
d60edaba 1565 bool bit_not_p;
127ef369
JJ
1566 /* True if ops have been swapped and thus ops[1] represents
1567 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1568 bool ops_swapped_p;
629387a6
EB
1569 /* The index number of the landing pad, or 0 if there is none. */
1570 int lp_nr;
245f6de1
JJ
1571 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1572 just the first one. */
1573 store_operand_info ops[2];
b5926e23 1574 store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
a62b3dc5 1575 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
4b84d9b8 1576 gimple *, unsigned int, enum tree_code,
629387a6 1577 struct symbolic_number &, gimple *, bool, int,
245f6de1
JJ
1578 const store_operand_info &,
1579 const store_operand_info &);
f663d9ad
KT
1580};
1581
1582store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs,
b5926e23 1583 unsigned HOST_WIDE_INT bp,
a62b3dc5
JJ
1584 unsigned HOST_WIDE_INT brs,
1585 unsigned HOST_WIDE_INT bre,
b5926e23 1586 gimple *st,
245f6de1
JJ
1587 unsigned int ord,
1588 enum tree_code rhscode,
4b84d9b8
JJ
1589 struct symbolic_number &nr,
1590 gimple *ins_stmtp,
d60edaba 1591 bool bitnotp,
629387a6 1592 int nr2,
245f6de1
JJ
1593 const store_operand_info &op0r,
1594 const store_operand_info &op1r)
a62b3dc5 1595 : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre),
4b84d9b8 1596 stmt (st), order (ord), rhs_code (rhscode), n (nr),
629387a6 1597 ins_stmt (ins_stmtp), bit_not_p (bitnotp), ops_swapped_p (false),
4bc6fb21 1598 lp_nr (nr2), ops { op0r, op1r }
245f6de1
JJ
1599{
1600}
f663d9ad
KT
1601
1602/* Struct representing a group of stores to contiguous memory locations.
1603 These are produced by the second phase (coalescing) and consumed in the
1604 third phase that outputs the widened stores. */
1605
6c1dae73 1606class merged_store_group
f663d9ad 1607{
6c1dae73 1608public:
f663d9ad
KT
1609 unsigned HOST_WIDE_INT start;
1610 unsigned HOST_WIDE_INT width;
a62b3dc5
JJ
1611 unsigned HOST_WIDE_INT bitregion_start;
1612 unsigned HOST_WIDE_INT bitregion_end;
1613 /* The size of the allocated memory for val and mask. */
f663d9ad 1614 unsigned HOST_WIDE_INT buf_size;
a62b3dc5 1615 unsigned HOST_WIDE_INT align_base;
8a91d545 1616 poly_uint64 load_align_base[2];
f663d9ad
KT
1617
1618 unsigned int align;
245f6de1 1619 unsigned int load_align[2];
f663d9ad
KT
1620 unsigned int first_order;
1621 unsigned int last_order;
7f5a3982 1622 bool bit_insertion;
e362a897 1623 bool string_concatenation;
18e0c3d1 1624 bool only_constants;
1b3c9813 1625 bool consecutive;
18e0c3d1 1626 unsigned int first_nonmergeable_order;
629387a6 1627 int lp_nr;
f663d9ad 1628
a62b3dc5 1629 auto_vec<store_immediate_info *> stores;
f663d9ad
KT
1630 /* We record the first and last original statements in the sequence because
1631 we'll need their vuse/vdef and replacement position. It's easier to keep
1632 track of them separately as 'stores' is reordered by apply_stores. */
1633 gimple *last_stmt;
1634 gimple *first_stmt;
1635 unsigned char *val;
a62b3dc5 1636 unsigned char *mask;
f663d9ad
KT
1637
1638 merged_store_group (store_immediate_info *);
1639 ~merged_store_group ();
7f5a3982 1640 bool can_be_merged_into (store_immediate_info *);
f663d9ad
KT
1641 void merge_into (store_immediate_info *);
1642 void merge_overlapping (store_immediate_info *);
1643 bool apply_stores ();
a62b3dc5
JJ
1644private:
1645 void do_merge (store_immediate_info *);
f663d9ad
KT
1646};
1647
1648/* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1649
1650static void
1651dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len)
1652{
1653 if (!fd)
1654 return;
1655
1656 for (unsigned int i = 0; i < len; i++)
c94c3532 1657 fprintf (fd, "%02x ", ptr[i]);
f663d9ad
KT
1658 fprintf (fd, "\n");
1659}
1660
f663d9ad
KT
1661/* Clear out LEN bits starting from bit START in the byte array
1662 PTR. This clears the bits to the *right* from START.
1663 START must be within [0, BITS_PER_UNIT) and counts starting from
1664 the least significant bit. */
1665
1666static void
1667clear_bit_region_be (unsigned char *ptr, unsigned int start,
1668 unsigned int len)
1669{
1670 if (len == 0)
1671 return;
1672 /* Clear len bits to the right of start. */
1673 else if (len <= start + 1)
1674 {
1675 unsigned char mask = (~(~0U << len));
1676 mask = mask << (start + 1U - len);
1677 ptr[0] &= ~mask;
1678 }
1679 else if (start != BITS_PER_UNIT - 1)
1680 {
1681 clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1);
1682 clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1,
1683 len - (start % BITS_PER_UNIT) - 1);
1684 }
1685 else if (start == BITS_PER_UNIT - 1
1686 && len > BITS_PER_UNIT)
1687 {
1688 unsigned int nbytes = len / BITS_PER_UNIT;
a62b3dc5 1689 memset (ptr, 0, nbytes);
f663d9ad
KT
1690 if (len % BITS_PER_UNIT != 0)
1691 clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1,
1692 len % BITS_PER_UNIT);
1693 }
1694 else
1695 gcc_unreachable ();
1696}
1697
1698/* In the byte array PTR clear the bit region starting at bit
1699 START and is LEN bits wide.
1700 For regions spanning multiple bytes do this recursively until we reach
1701 zero LEN or a region contained within a single byte. */
1702
1703static void
1704clear_bit_region (unsigned char *ptr, unsigned int start,
1705 unsigned int len)
1706{
1707 /* Degenerate base case. */
1708 if (len == 0)
1709 return;
1710 else if (start >= BITS_PER_UNIT)
1711 clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len);
1712 /* Second base case. */
1713 else if ((start + len) <= BITS_PER_UNIT)
1714 {
46a61395 1715 unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len);
f663d9ad
KT
1716 mask >>= BITS_PER_UNIT - (start + len);
1717
1718 ptr[0] &= ~mask;
1719
1720 return;
1721 }
1722 /* Clear most significant bits in a byte and proceed with the next byte. */
1723 else if (start != 0)
1724 {
1725 clear_bit_region (ptr, start, BITS_PER_UNIT - start);
1f069ef5 1726 clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start));
f663d9ad
KT
1727 }
1728 /* Whole bytes need to be cleared. */
1729 else if (start == 0 && len > BITS_PER_UNIT)
1730 {
1731 unsigned int nbytes = len / BITS_PER_UNIT;
a848c710
KT
1732 /* We could recurse on each byte but we clear whole bytes, so a simple
1733 memset will do. */
46a61395 1734 memset (ptr, '\0', nbytes);
f663d9ad
KT
1735 /* Clear the remaining sub-byte region if there is one. */
1736 if (len % BITS_PER_UNIT != 0)
1737 clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT);
1738 }
1739 else
1740 gcc_unreachable ();
1741}
1742
1743/* Write BITLEN bits of EXPR to the byte array PTR at
1744 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1745 Return true if the operation succeeded. */
1746
1747static bool
1748encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos,
46a61395 1749 unsigned int total_bytes)
f663d9ad
KT
1750{
1751 unsigned int first_byte = bitpos / BITS_PER_UNIT;
ad1de652
JJ
1752 bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT)
1753 || (bitpos % BITS_PER_UNIT)
f4b31647 1754 || !int_mode_for_size (bitlen, 0).exists ());
3afd514b
JJ
1755 bool empty_ctor_p
1756 = (TREE_CODE (expr) == CONSTRUCTOR
1757 && CONSTRUCTOR_NELTS (expr) == 0
1758 && TYPE_SIZE_UNIT (TREE_TYPE (expr))
1759 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (expr))));
f663d9ad
KT
1760
1761 if (!sub_byte_op_p)
3afd514b
JJ
1762 {
1763 if (first_byte >= total_bytes)
1764 return false;
1765 total_bytes -= first_byte;
1766 if (empty_ctor_p)
1767 {
1768 unsigned HOST_WIDE_INT rhs_bytes
1769 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1770 if (rhs_bytes > total_bytes)
1771 return false;
1772 memset (ptr + first_byte, '\0', rhs_bytes);
1773 return true;
1774 }
1775 return native_encode_expr (expr, ptr + first_byte, total_bytes) != 0;
1776 }
f663d9ad
KT
1777
1778 /* LITTLE-ENDIAN
1779 We are writing a non byte-sized quantity or at a position that is not
1780 at a byte boundary.
1781 |--------|--------|--------| ptr + first_byte
1782 ^ ^
1783 xxx xxxxxxxx xxx< bp>
1784 |______EXPR____|
1785
46a61395 1786 First native_encode_expr EXPR into a temporary buffer and shift each
f663d9ad
KT
1787 byte in the buffer by 'bp' (carrying the bits over as necessary).
1788 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1789 <------bitlen---->< bp>
1790 Then we clear the destination bits:
1791 |---00000|00000000|000-----| ptr + first_byte
1792 <-------bitlen--->< bp>
1793
1794 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1795 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1796
1797 BIG-ENDIAN
1798 We are writing a non byte-sized quantity or at a position that is not
1799 at a byte boundary.
1800 ptr + first_byte |--------|--------|--------|
1801 ^ ^
1802 <bp >xxx xxxxxxxx xxx
1803 |_____EXPR_____|
1804
46a61395 1805 First native_encode_expr EXPR into a temporary buffer and shift each
f663d9ad
KT
1806 byte in the buffer to the right by (carrying the bits over as necessary).
1807 We shift by as much as needed to align the most significant bit of EXPR
1808 with bitpos:
1809 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1810 <---bitlen----> <bp ><-----bitlen----->
1811 Then we clear the destination bits:
1812 ptr + first_byte |-----000||00000000||00000---|
1813 <bp ><-------bitlen----->
1814
1815 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1816 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1817 The awkwardness comes from the fact that bitpos is counted from the
1818 most significant bit of a byte. */
1819
ef1d3b57
RS
1820 /* We must be dealing with fixed-size data at this point, since the
1821 total size is also fixed. */
3afd514b
JJ
1822 unsigned int byte_size;
1823 if (empty_ctor_p)
1824 {
1825 unsigned HOST_WIDE_INT rhs_bytes
1826 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1827 if (rhs_bytes > total_bytes)
1828 return false;
1829 byte_size = rhs_bytes;
1830 }
1831 else
1832 {
1833 fixed_size_mode mode
1834 = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr)));
e362a897
EB
1835 byte_size
1836 = mode == BLKmode
1837 ? tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)))
1838 : GET_MODE_SIZE (mode);
3afd514b 1839 }
f663d9ad 1840 /* Allocate an extra byte so that we have space to shift into. */
3afd514b 1841 byte_size++;
f663d9ad 1842 unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size);
46a61395 1843 memset (tmpbuf, '\0', byte_size);
f663d9ad 1844 /* The store detection code should only have allowed constants that are
3afd514b
JJ
1845 accepted by native_encode_expr or empty ctors. */
1846 if (!empty_ctor_p
1847 && native_encode_expr (expr, tmpbuf, byte_size - 1) == 0)
f663d9ad
KT
1848 gcc_unreachable ();
1849
1850 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1851 bytes to write. This means it can write more than
1852 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1853 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1854 bitlen and zero out the bits that are not relevant as well (that may
1855 contain a sign bit due to sign-extension). */
1856 unsigned int padding
1857 = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1;
ad1de652
JJ
1858 /* On big-endian the padding is at the 'front' so just skip the initial
1859 bytes. */
1860 if (BYTES_BIG_ENDIAN)
1861 tmpbuf += padding;
1862
1863 byte_size -= padding;
1864
1865 if (bitlen % BITS_PER_UNIT != 0)
f663d9ad 1866 {
4b2c06f4 1867 if (BYTES_BIG_ENDIAN)
ad1de652
JJ
1868 clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1,
1869 BITS_PER_UNIT - (bitlen % BITS_PER_UNIT));
1870 else
1871 clear_bit_region (tmpbuf, bitlen,
1872 byte_size * BITS_PER_UNIT - bitlen);
f663d9ad 1873 }
ad1de652
JJ
1874 /* Left shifting relies on the last byte being clear if bitlen is
1875 a multiple of BITS_PER_UNIT, which might not be clear if
1876 there are padding bytes. */
1877 else if (!BYTES_BIG_ENDIAN)
1878 tmpbuf[byte_size - 1] = '\0';
f663d9ad
KT
1879
1880 /* Clear the bit region in PTR where the bits from TMPBUF will be
46a61395 1881 inserted into. */
f663d9ad
KT
1882 if (BYTES_BIG_ENDIAN)
1883 clear_bit_region_be (ptr + first_byte,
1884 BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen);
1885 else
1886 clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen);
1887
1888 int shift_amnt;
1889 int bitlen_mod = bitlen % BITS_PER_UNIT;
1890 int bitpos_mod = bitpos % BITS_PER_UNIT;
1891
1892 bool skip_byte = false;
1893 if (BYTES_BIG_ENDIAN)
1894 {
1895 /* BITPOS and BITLEN are exactly aligned and no shifting
1896 is necessary. */
1897 if (bitpos_mod + bitlen_mod == BITS_PER_UNIT
1898 || (bitpos_mod == 0 && bitlen_mod == 0))
1899 shift_amnt = 0;
1900 /* |. . . . . . . .|
1901 <bp > <blen >.
1902 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1903 of the value until it aligns with 'bp' in the next byte over. */
1904 else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT)
1905 {
1906 shift_amnt = bitlen_mod + bitpos_mod;
1907 skip_byte = bitlen_mod != 0;
1908 }
1909 /* |. . . . . . . .|
1910 <----bp--->
1911 <---blen---->.
1912 Shift the value right within the same byte so it aligns with 'bp'. */
1913 else
1914 shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT;
1915 }
1916 else
1917 shift_amnt = bitpos % BITS_PER_UNIT;
1918
1919 /* Create the shifted version of EXPR. */
1920 if (!BYTES_BIG_ENDIAN)
46a61395 1921 {
8aba425f 1922 shift_bytes_in_array_left (tmpbuf, byte_size, shift_amnt);
46a61395
JJ
1923 if (shift_amnt == 0)
1924 byte_size--;
1925 }
f663d9ad
KT
1926 else
1927 {
1928 gcc_assert (BYTES_BIG_ENDIAN);
1929 shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt);
1930 /* If shifting right forced us to move into the next byte skip the now
1931 empty byte. */
1932 if (skip_byte)
1933 {
1934 tmpbuf++;
1935 byte_size--;
1936 }
1937 }
1938
1939 /* Insert the bits from TMPBUF. */
1940 for (unsigned int i = 0; i < byte_size; i++)
1941 ptr[first_byte + i] |= tmpbuf[i];
1942
1943 return true;
1944}
1945
1946/* Sorting function for store_immediate_info objects.
1947 Sorts them by bitposition. */
1948
1949static int
1950sort_by_bitpos (const void *x, const void *y)
1951{
1952 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1953 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1954
109cca3b 1955 if ((*tmp)->bitpos < (*tmp2)->bitpos)
f663d9ad
KT
1956 return -1;
1957 else if ((*tmp)->bitpos > (*tmp2)->bitpos)
1958 return 1;
109cca3b 1959 else
0f0027d1
KT
1960 /* If they are the same let's use the order which is guaranteed to
1961 be different. */
1962 return (*tmp)->order - (*tmp2)->order;
f663d9ad
KT
1963}
1964
1965/* Sorting function for store_immediate_info objects.
1966 Sorts them by the order field. */
1967
1968static int
1969sort_by_order (const void *x, const void *y)
1970{
1971 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1972 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1973
1974 if ((*tmp)->order < (*tmp2)->order)
1975 return -1;
1976 else if ((*tmp)->order > (*tmp2)->order)
1977 return 1;
1978
1979 gcc_unreachable ();
1980}
1981
1982/* Initialize a merged_store_group object from a store_immediate_info
1983 object. */
1984
1985merged_store_group::merged_store_group (store_immediate_info *info)
1986{
1987 start = info->bitpos;
1988 width = info->bitsize;
a62b3dc5
JJ
1989 bitregion_start = info->bitregion_start;
1990 bitregion_end = info->bitregion_end;
f663d9ad
KT
1991 /* VAL has memory allocated for it in apply_stores once the group
1992 width has been finalized. */
1993 val = NULL;
a62b3dc5 1994 mask = NULL;
e362a897
EB
1995 bit_insertion = info->rhs_code == BIT_INSERT_EXPR;
1996 string_concatenation = info->rhs_code == STRING_CST;
18e0c3d1 1997 only_constants = info->rhs_code == INTEGER_CST;
1b3c9813 1998 consecutive = true;
18e0c3d1 1999 first_nonmergeable_order = ~0U;
629387a6 2000 lp_nr = info->lp_nr;
a62b3dc5
JJ
2001 unsigned HOST_WIDE_INT align_bitpos = 0;
2002 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
2003 &align, &align_bitpos);
2004 align_base = start - align_bitpos;
245f6de1
JJ
2005 for (int i = 0; i < 2; ++i)
2006 {
2007 store_operand_info &op = info->ops[i];
2008 if (op.base_addr == NULL_TREE)
2009 {
2010 load_align[i] = 0;
2011 load_align_base[i] = 0;
2012 }
2013 else
2014 {
2015 get_object_alignment_1 (op.val, &load_align[i], &align_bitpos);
2016 load_align_base[i] = op.bitpos - align_bitpos;
2017 }
2018 }
f663d9ad
KT
2019 stores.create (1);
2020 stores.safe_push (info);
2021 last_stmt = info->stmt;
2022 last_order = info->order;
2023 first_stmt = last_stmt;
2024 first_order = last_order;
2025 buf_size = 0;
2026}
2027
2028merged_store_group::~merged_store_group ()
2029{
2030 if (val)
2031 XDELETEVEC (val);
2032}
2033
7f5a3982
EB
2034/* Return true if the store described by INFO can be merged into the group. */
2035
2036bool
2037merged_store_group::can_be_merged_into (store_immediate_info *info)
2038{
2039 /* Do not merge bswap patterns. */
2040 if (info->rhs_code == LROTATE_EXPR)
2041 return false;
2042
629387a6
EB
2043 if (info->lp_nr != lp_nr)
2044 return false;
2045
7f5a3982
EB
2046 /* The canonical case. */
2047 if (info->rhs_code == stores[0]->rhs_code)
2048 return true;
2049
e362a897 2050 /* BIT_INSERT_EXPR is compatible with INTEGER_CST if no STRING_CST. */
7f5a3982 2051 if (info->rhs_code == BIT_INSERT_EXPR && stores[0]->rhs_code == INTEGER_CST)
e362a897 2052 return !string_concatenation;
7f5a3982
EB
2053
2054 if (stores[0]->rhs_code == BIT_INSERT_EXPR && info->rhs_code == INTEGER_CST)
e362a897 2055 return !string_concatenation;
7f5a3982 2056
ed01d707
EB
2057 /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores, but do it
2058 only for small regions since this can generate a lot of instructions. */
7f5a3982
EB
2059 if (info->rhs_code == MEM_REF
2060 && (stores[0]->rhs_code == INTEGER_CST
2061 || stores[0]->rhs_code == BIT_INSERT_EXPR)
2062 && info->bitregion_start == stores[0]->bitregion_start
ed01d707 2063 && info->bitregion_end == stores[0]->bitregion_end
2815558a 2064 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
e362a897 2065 return !string_concatenation;
7f5a3982
EB
2066
2067 if (stores[0]->rhs_code == MEM_REF
2068 && (info->rhs_code == INTEGER_CST
2069 || info->rhs_code == BIT_INSERT_EXPR)
2070 && info->bitregion_start == stores[0]->bitregion_start
ed01d707 2071 && info->bitregion_end == stores[0]->bitregion_end
2815558a 2072 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
e362a897
EB
2073 return !string_concatenation;
2074
2075 /* STRING_CST is compatible with INTEGER_CST if no BIT_INSERT_EXPR. */
2076 if (info->rhs_code == STRING_CST
2077 && stores[0]->rhs_code == INTEGER_CST
2078 && stores[0]->bitsize == CHAR_BIT)
2079 return !bit_insertion;
2080
2081 if (stores[0]->rhs_code == STRING_CST
2082 && info->rhs_code == INTEGER_CST
2083 && info->bitsize == CHAR_BIT)
2084 return !bit_insertion;
7f5a3982
EB
2085
2086 return false;
2087}
2088
a62b3dc5
JJ
2089/* Helper method for merge_into and merge_overlapping to do
2090 the common part. */
7f5a3982 2091
f663d9ad 2092void
a62b3dc5 2093merged_store_group::do_merge (store_immediate_info *info)
f663d9ad 2094{
a62b3dc5
JJ
2095 bitregion_start = MIN (bitregion_start, info->bitregion_start);
2096 bitregion_end = MAX (bitregion_end, info->bitregion_end);
2097
2098 unsigned int this_align;
2099 unsigned HOST_WIDE_INT align_bitpos = 0;
2100 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
2101 &this_align, &align_bitpos);
2102 if (this_align > align)
2103 {
2104 align = this_align;
2105 align_base = info->bitpos - align_bitpos;
2106 }
245f6de1
JJ
2107 for (int i = 0; i < 2; ++i)
2108 {
2109 store_operand_info &op = info->ops[i];
2110 if (!op.base_addr)
2111 continue;
2112
2113 get_object_alignment_1 (op.val, &this_align, &align_bitpos);
2114 if (this_align > load_align[i])
2115 {
2116 load_align[i] = this_align;
2117 load_align_base[i] = op.bitpos - align_bitpos;
2118 }
2119 }
f663d9ad 2120
f663d9ad
KT
2121 gimple *stmt = info->stmt;
2122 stores.safe_push (info);
2123 if (info->order > last_order)
2124 {
2125 last_order = info->order;
2126 last_stmt = stmt;
2127 }
2128 else if (info->order < first_order)
2129 {
2130 first_order = info->order;
2131 first_stmt = stmt;
2132 }
e362a897 2133
1b3c9813
EB
2134 if (info->bitpos != start + width)
2135 consecutive = false;
2136
e362a897
EB
2137 /* We need to use extraction if there is any bit-field. */
2138 if (info->rhs_code == BIT_INSERT_EXPR)
2139 {
2140 bit_insertion = true;
2141 gcc_assert (!string_concatenation);
2142 }
2143
1b3c9813 2144 /* We want to use concatenation if there is any string. */
e362a897
EB
2145 if (info->rhs_code == STRING_CST)
2146 {
2147 string_concatenation = true;
2148 gcc_assert (!bit_insertion);
2149 }
2150
1b3c9813
EB
2151 /* But we cannot use it if we don't have consecutive stores. */
2152 if (!consecutive)
2153 string_concatenation = false;
2154
18e0c3d1
JJ
2155 if (info->rhs_code != INTEGER_CST)
2156 only_constants = false;
f663d9ad
KT
2157}
2158
a62b3dc5
JJ
2159/* Merge a store recorded by INFO into this merged store.
2160 The store is not overlapping with the existing recorded
2161 stores. */
2162
2163void
2164merged_store_group::merge_into (store_immediate_info *info)
2165{
1b3c9813
EB
2166 do_merge (info);
2167
a62b3dc5
JJ
2168 /* Make sure we're inserting in the position we think we're inserting. */
2169 gcc_assert (info->bitpos >= start + width
2170 && info->bitregion_start <= bitregion_end);
2171
c5679c37 2172 width = info->bitpos + info->bitsize - start;
a62b3dc5
JJ
2173}
2174
f663d9ad
KT
2175/* Merge a store described by INFO into this merged store.
2176 INFO overlaps in some way with the current store (i.e. it's not contiguous
2177 which is handled by merged_store_group::merge_into). */
2178
2179void
2180merged_store_group::merge_overlapping (store_immediate_info *info)
2181{
1b3c9813
EB
2182 do_merge (info);
2183
f663d9ad 2184 /* If the store extends the size of the group, extend the width. */
a62b3dc5 2185 if (info->bitpos + info->bitsize > start + width)
c5679c37 2186 width = info->bitpos + info->bitsize - start;
f663d9ad
KT
2187}
2188
2189/* Go through all the recorded stores in this group in program order and
2190 apply their values to the VAL byte array to create the final merged
2191 value. Return true if the operation succeeded. */
2192
2193bool
2194merged_store_group::apply_stores ()
2195{
e362a897
EB
2196 store_immediate_info *info;
2197 unsigned int i;
2198
a62b3dc5
JJ
2199 /* Make sure we have more than one store in the group, otherwise we cannot
2200 merge anything. */
2201 if (bitregion_start % BITS_PER_UNIT != 0
2202 || bitregion_end % BITS_PER_UNIT != 0
f663d9ad
KT
2203 || stores.length () == 1)
2204 return false;
2205
e362a897
EB
2206 buf_size = (bitregion_end - bitregion_start) / BITS_PER_UNIT;
2207
2208 /* Really do string concatenation for large strings only. */
2209 if (buf_size <= MOVE_MAX)
2210 string_concatenation = false;
2211
c94c3532 2212 /* Create a power-of-2-sized buffer for native_encode_expr. */
e362a897
EB
2213 if (!string_concatenation)
2214 buf_size = 1 << ceil_log2 (buf_size);
2215
a62b3dc5
JJ
2216 val = XNEWVEC (unsigned char, 2 * buf_size);
2217 mask = val + buf_size;
2218 memset (val, 0, buf_size);
2219 memset (mask, ~0U, buf_size);
f663d9ad 2220
e362a897
EB
2221 stores.qsort (sort_by_order);
2222
f663d9ad
KT
2223 FOR_EACH_VEC_ELT (stores, i, info)
2224 {
a62b3dc5 2225 unsigned int pos_in_buffer = info->bitpos - bitregion_start;
c94c3532 2226 tree cst;
245f6de1
JJ
2227 if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE)
2228 cst = info->ops[0].val;
2229 else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE)
2230 cst = info->ops[1].val;
c94c3532
EB
2231 else
2232 cst = NULL_TREE;
245f6de1 2233 bool ret = true;
e362a897
EB
2234 if (cst && info->rhs_code != BIT_INSERT_EXPR)
2235 ret = encode_tree_to_bitpos (cst, val, info->bitsize, pos_in_buffer,
2236 buf_size);
c94c3532
EB
2237 unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT);
2238 if (BYTES_BIG_ENDIAN)
2239 clear_bit_region_be (m, (BITS_PER_UNIT - 1
2240 - (pos_in_buffer % BITS_PER_UNIT)),
2241 info->bitsize);
2242 else
2243 clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize);
245f6de1 2244 if (cst && dump_file && (dump_flags & TDF_DETAILS))
f663d9ad
KT
2245 {
2246 if (ret)
2247 {
c94c3532 2248 fputs ("After writing ", dump_file);
4af78ef8 2249 print_generic_expr (dump_file, cst, TDF_NONE);
f663d9ad 2250 fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC
c94c3532
EB
2251 " at position %d\n", info->bitsize, pos_in_buffer);
2252 fputs (" the merged value contains ", dump_file);
f663d9ad 2253 dump_char_array (dump_file, val, buf_size);
c94c3532
EB
2254 fputs (" the merged mask contains ", dump_file);
2255 dump_char_array (dump_file, mask, buf_size);
2256 if (bit_insertion)
2257 fputs (" bit insertion is required\n", dump_file);
e362a897
EB
2258 if (string_concatenation)
2259 fputs (" string concatenation is required\n", dump_file);
f663d9ad
KT
2260 }
2261 else
2262 fprintf (dump_file, "Failed to merge stores\n");
4b84d9b8 2263 }
f663d9ad
KT
2264 if (!ret)
2265 return false;
2266 }
4b84d9b8 2267 stores.qsort (sort_by_bitpos);
f663d9ad
KT
2268 return true;
2269}
2270
2271/* Structure describing the store chain. */
2272
6c1dae73 2273class imm_store_chain_info
f663d9ad 2274{
6c1dae73 2275public:
50b6d676
AO
2276 /* Doubly-linked list that imposes an order on chain processing.
2277 PNXP (prev's next pointer) points to the head of a list, or to
2278 the next field in the previous chain in the list.
2279 See pass_store_merging::m_stores_head for more rationale. */
2280 imm_store_chain_info *next, **pnxp;
b5926e23 2281 tree base_addr;
a62b3dc5 2282 auto_vec<store_immediate_info *> m_store_info;
f663d9ad
KT
2283 auto_vec<merged_store_group *> m_merged_store_groups;
2284
50b6d676
AO
2285 imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a)
2286 : next (inspt), pnxp (&inspt), base_addr (b_a)
2287 {
2288 inspt = this;
2289 if (next)
2290 {
2291 gcc_checking_assert (pnxp == next->pnxp);
2292 next->pnxp = &next;
2293 }
2294 }
2295 ~imm_store_chain_info ()
2296 {
2297 *pnxp = next;
2298 if (next)
2299 {
2300 gcc_checking_assert (&next == next->pnxp);
2301 next->pnxp = pnxp;
2302 }
2303 }
b5926e23 2304 bool terminate_and_process_chain ();
bd909071
JJ
2305 bool try_coalesce_bswap (merged_store_group *, unsigned int, unsigned int,
2306 unsigned int);
f663d9ad 2307 bool coalesce_immediate_stores ();
b5926e23
RB
2308 bool output_merged_store (merged_store_group *);
2309 bool output_merged_stores ();
f663d9ad
KT
2310};
2311
2312const pass_data pass_data_tree_store_merging = {
2313 GIMPLE_PASS, /* type */
2314 "store-merging", /* name */
2315 OPTGROUP_NONE, /* optinfo_flags */
2316 TV_GIMPLE_STORE_MERGING, /* tv_id */
2317 PROP_ssa, /* properties_required */
2318 0, /* properties_provided */
2319 0, /* properties_destroyed */
2320 0, /* todo_flags_start */
2321 TODO_update_ssa, /* todo_flags_finish */
2322};
2323
2324class pass_store_merging : public gimple_opt_pass
2325{
2326public:
2327 pass_store_merging (gcc::context *ctxt)
95d94b52
RB
2328 : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head (),
2329 m_n_chains (0), m_n_stores (0)
f663d9ad
KT
2330 {
2331 }
2332
c94c3532
EB
2333 /* Pass not supported for PDP-endian, nor for insane hosts or
2334 target character sizes where native_{encode,interpret}_expr
a62b3dc5 2335 doesn't work properly. */
f663d9ad
KT
2336 virtual bool
2337 gate (function *)
2338 {
a62b3dc5 2339 return flag_store_merging
c94c3532 2340 && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
a62b3dc5
JJ
2341 && CHAR_BIT == 8
2342 && BITS_PER_UNIT == 8;
f663d9ad
KT
2343 }
2344
2345 virtual unsigned int execute (function *);
2346
2347private:
99b1c316 2348 hash_map<tree_operand_hash, class imm_store_chain_info *> m_stores;
f663d9ad 2349
50b6d676
AO
2350 /* Form a doubly-linked stack of the elements of m_stores, so that
2351 we can iterate over them in a predictable way. Using this order
2352 avoids extraneous differences in the compiler output just because
2353 of tree pointer variations (e.g. different chains end up in
2354 different positions of m_stores, so they are handled in different
2355 orders, so they allocate or release SSA names in different
2356 orders, and when they get reused, subsequent passes end up
2357 getting different SSA names, which may ultimately change
2358 decisions when going out of SSA). */
2359 imm_store_chain_info *m_stores_head;
2360
95d94b52
RB
2361 /* The number of store chains currently tracked. */
2362 unsigned m_n_chains;
2363 /* The number of stores currently tracked. */
2364 unsigned m_n_stores;
2365
629387a6
EB
2366 bool process_store (gimple *);
2367 bool terminate_and_process_chain (imm_store_chain_info *);
383ac8dc 2368 bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *);
629387a6 2369 bool terminate_and_process_all_chains ();
f663d9ad
KT
2370}; // class pass_store_merging
2371
2372/* Terminate and process all recorded chains. Return true if any changes
2373 were made. */
2374
2375bool
2376pass_store_merging::terminate_and_process_all_chains ()
2377{
f663d9ad 2378 bool ret = false;
50b6d676 2379 while (m_stores_head)
629387a6 2380 ret |= terminate_and_process_chain (m_stores_head);
b119c055 2381 gcc_assert (m_stores.is_empty ());
f663d9ad
KT
2382 return ret;
2383}
2384
383ac8dc
JJ
2385/* Terminate all chains that are affected by the statement STMT.
2386 CHAIN_INFO is the chain we should ignore from the checks if
629387a6 2387 non-NULL. Return true if any changes were made. */
f663d9ad
KT
2388
2389bool
20770eb8 2390pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
b5926e23 2391 **chain_info,
f663d9ad
KT
2392 gimple *stmt)
2393{
2394 bool ret = false;
2395
2396 /* If the statement doesn't touch memory it can't alias. */
2397 if (!gimple_vuse (stmt))
2398 return false;
2399
9e875fd8 2400 tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
6b412bf6
RB
2401 ao_ref store_lhs_ref;
2402 ao_ref_init (&store_lhs_ref, store_lhs);
383ac8dc 2403 for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
f663d9ad 2404 {
383ac8dc
JJ
2405 next = cur->next;
2406
2407 /* We already checked all the stores in chain_info and terminated the
2408 chain if necessary. Skip it here. */
2409 if (chain_info && *chain_info == cur)
2410 continue;
2411
245f6de1
JJ
2412 store_immediate_info *info;
2413 unsigned int i;
383ac8dc 2414 FOR_EACH_VEC_ELT (cur->m_store_info, i, info)
f663d9ad 2415 {
9e875fd8 2416 tree lhs = gimple_assign_lhs (info->stmt);
6b412bf6
RB
2417 ao_ref lhs_ref;
2418 ao_ref_init (&lhs_ref, lhs);
2419 if (ref_maybe_used_by_stmt_p (stmt, &lhs_ref)
2420 || stmt_may_clobber_ref_p_1 (stmt, &lhs_ref)
2421 || (store_lhs && refs_may_alias_p_1 (&store_lhs_ref,
2422 &lhs_ref, false)))
f663d9ad 2423 {
245f6de1 2424 if (dump_file && (dump_flags & TDF_DETAILS))
f663d9ad 2425 {
245f6de1
JJ
2426 fprintf (dump_file, "stmt causes chain termination:\n");
2427 print_gimple_stmt (dump_file, stmt, 0);
f663d9ad 2428 }
629387a6 2429 ret |= terminate_and_process_chain (cur);
245f6de1 2430 break;
f663d9ad
KT
2431 }
2432 }
2433 }
2434
f663d9ad
KT
2435 return ret;
2436}
2437
2438/* Helper function. Terminate the recorded chain storing to base object
2439 BASE. Return true if the merging and output was successful. The m_stores
2440 entry is removed after the processing in any case. */
2441
2442bool
629387a6 2443pass_store_merging::terminate_and_process_chain (imm_store_chain_info *chain_info)
f663d9ad 2444{
95d94b52
RB
2445 m_n_stores -= chain_info->m_store_info.length ();
2446 m_n_chains--;
b5926e23
RB
2447 bool ret = chain_info->terminate_and_process_chain ();
2448 m_stores.remove (chain_info->base_addr);
2449 delete chain_info;
f663d9ad
KT
2450 return ret;
2451}
2452
245f6de1 2453/* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
629387a6
EB
2454 may clobber REF. FIRST and LAST must have non-NULL vdef. We want to
2455 be able to sink load of REF across stores between FIRST and LAST, up
2456 to right before LAST. */
245f6de1
JJ
2457
2458bool
2459stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref)
2460{
2461 ao_ref r;
2462 ao_ref_init (&r, ref);
2463 unsigned int count = 0;
2464 tree vop = gimple_vdef (last);
2465 gimple *stmt;
2466
629387a6
EB
2467 /* Return true conservatively if the basic blocks are different. */
2468 if (gimple_bb (first) != gimple_bb (last))
2469 return true;
2470
245f6de1
JJ
2471 do
2472 {
2473 stmt = SSA_NAME_DEF_STMT (vop);
2474 if (stmt_may_clobber_ref_p_1 (stmt, &r))
2475 return true;
4b84d9b8
JJ
2476 if (gimple_store_p (stmt)
2477 && refs_anti_dependent_p (ref, gimple_get_lhs (stmt)))
2478 return true;
245f6de1
JJ
2479 /* Avoid quadratic compile time by bounding the number of checks
2480 we perform. */
2481 if (++count > MAX_STORE_ALIAS_CHECKS)
2482 return true;
2483 vop = gimple_vuse (stmt);
2484 }
2485 while (stmt != first);
629387a6 2486
245f6de1
JJ
2487 return false;
2488}
2489
2490/* Return true if INFO->ops[IDX] is mergeable with the
2491 corresponding loads already in MERGED_STORE group.
2492 BASE_ADDR is the base address of the whole store group. */
2493
2494bool
2495compatible_load_p (merged_store_group *merged_store,
2496 store_immediate_info *info,
2497 tree base_addr, int idx)
2498{
2499 store_immediate_info *infof = merged_store->stores[0];
2500 if (!info->ops[idx].base_addr
8a91d545
RS
2501 || maybe_ne (info->ops[idx].bitpos - infof->ops[idx].bitpos,
2502 info->bitpos - infof->bitpos)
245f6de1
JJ
2503 || !operand_equal_p (info->ops[idx].base_addr,
2504 infof->ops[idx].base_addr, 0))
2505 return false;
2506
2507 store_immediate_info *infol = merged_store->stores.last ();
2508 tree load_vuse = gimple_vuse (info->ops[idx].stmt);
2509 /* In this case all vuses should be the same, e.g.
2510 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2511 or
2512 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2513 and we can emit the coalesced load next to any of those loads. */
2514 if (gimple_vuse (infof->ops[idx].stmt) == load_vuse
2515 && gimple_vuse (infol->ops[idx].stmt) == load_vuse)
2516 return true;
2517
2518 /* Otherwise, at least for now require that the load has the same
2519 vuse as the store. See following examples. */
2520 if (gimple_vuse (info->stmt) != load_vuse)
2521 return false;
2522
2523 if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt)
2524 || (infof != infol
2525 && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt)))
2526 return false;
2527
2528 /* If the load is from the same location as the store, already
2529 the construction of the immediate chain info guarantees no intervening
2530 stores, so no further checks are needed. Example:
2531 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
8a91d545 2532 if (known_eq (info->ops[idx].bitpos, info->bitpos)
245f6de1
JJ
2533 && operand_equal_p (info->ops[idx].base_addr, base_addr, 0))
2534 return true;
2535
2536 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2537 of the stores in the group, or any other stores in between those.
2538 Previous calls to compatible_load_p ensured that for all the
2539 merged_store->stores IDX loads, no stmts starting with
2540 merged_store->first_stmt and ending right before merged_store->last_stmt
2541 clobbers those loads. */
2542 gimple *first = merged_store->first_stmt;
2543 gimple *last = merged_store->last_stmt;
245f6de1
JJ
2544 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2545 comes before the so far first load, we'll be changing
2546 merged_store->first_stmt. In that case we need to give up if
2547 any of the earlier processed loads clobber with the stmts in the new
2548 range. */
2549 if (info->order < merged_store->first_order)
2550 {
3f207ab3 2551 for (store_immediate_info *infoc : merged_store->stores)
245f6de1
JJ
2552 if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val))
2553 return false;
2554 first = info->stmt;
2555 }
2556 /* Similarly, we could change merged_store->last_stmt, so ensure
2557 in that case no stmts in the new range clobber any of the earlier
2558 processed loads. */
2559 else if (info->order > merged_store->last_order)
2560 {
3f207ab3 2561 for (store_immediate_info *infoc : merged_store->stores)
245f6de1
JJ
2562 if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val))
2563 return false;
2564 last = info->stmt;
2565 }
2566 /* And finally, we'd be adding a new load to the set, ensure it isn't
2567 clobbered in the new range. */
2568 if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val))
2569 return false;
2570
2571 /* Otherwise, we are looking for:
2572 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2573 or
2574 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2575 return true;
2576}
2577
4b84d9b8
JJ
2578/* Add all refs loaded to compute VAL to REFS vector. */
2579
2580void
2581gather_bswap_load_refs (vec<tree> *refs, tree val)
2582{
2583 if (TREE_CODE (val) != SSA_NAME)
2584 return;
2585
2586 gimple *stmt = SSA_NAME_DEF_STMT (val);
2587 if (!is_gimple_assign (stmt))
2588 return;
2589
2590 if (gimple_assign_load_p (stmt))
2591 {
2592 refs->safe_push (gimple_assign_rhs1 (stmt));
2593 return;
2594 }
2595
2596 switch (gimple_assign_rhs_class (stmt))
2597 {
2598 case GIMPLE_BINARY_RHS:
2599 gather_bswap_load_refs (refs, gimple_assign_rhs2 (stmt));
2600 /* FALLTHRU */
2601 case GIMPLE_UNARY_RHS:
2602 gather_bswap_load_refs (refs, gimple_assign_rhs1 (stmt));
2603 break;
2604 default:
2605 gcc_unreachable ();
2606 }
2607}
2608
c5679c37
JJ
2609/* Check if there are any stores in M_STORE_INFO after index I
2610 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2611 a potential group ending with END that have their order
4d213bf6
JJ
2612 smaller than LAST_ORDER. ALL_INTEGER_CST_P is true if
2613 all the stores already merged and the one under consideration
2614 have rhs_code of INTEGER_CST. Return true if there are no such stores.
c5679c37
JJ
2615 Consider:
2616 MEM[(long long int *)p_28] = 0;
2617 MEM[(long long int *)p_28 + 8B] = 0;
2618 MEM[(long long int *)p_28 + 16B] = 0;
2619 MEM[(long long int *)p_28 + 24B] = 0;
2620 _129 = (int) _130;
2621 MEM[(int *)p_28 + 8B] = _129;
2622 MEM[(int *)p_28].a = -1;
2623 We already have
2624 MEM[(long long int *)p_28] = 0;
2625 MEM[(int *)p_28].a = -1;
2626 stmts in the current group and need to consider if it is safe to
2627 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2628 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2629 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2630 into the group and merging of those 3 stores is successful, merged
2631 stmts will be emitted at the latest store from that group, i.e.
2632 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2633 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2634 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2635 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2636 into the group. That way it will be its own store group and will
4d213bf6 2637 not be touched. If ALL_INTEGER_CST_P and there are overlapping
c5679c37 2638 INTEGER_CST stores, those are mergeable using merge_overlapping,
bd909071
JJ
2639 so don't return false for those.
2640
2641 Similarly, check stores from FIRST_EARLIER (inclusive) to END_EARLIER
2642 (exclusive), whether they don't overlap the bitrange START to END
2643 and have order in between FIRST_ORDER and LAST_ORDER. This is to
2644 prevent merging in cases like:
2645 MEM <char[12]> [&b + 8B] = {};
2646 MEM[(short *) &b] = 5;
2647 _5 = *x_4(D);
2648 MEM <long long unsigned int> [&b + 2B] = _5;
2649 MEM[(char *)&b + 16B] = 88;
2650 MEM[(int *)&b + 20B] = 1;
2651 The = {} store comes in sort_by_bitpos before the = 88 store, and can't
2652 be merged with it, because the = _5 store overlaps these and is in between
2653 them in sort_by_order ordering. If it was merged, the merged store would
2654 go after the = _5 store and thus change behavior. */
c5679c37
JJ
2655
2656static bool
2657check_no_overlap (vec<store_immediate_info *> m_store_info, unsigned int i,
bd909071
JJ
2658 bool all_integer_cst_p, unsigned int first_order,
2659 unsigned int last_order, unsigned HOST_WIDE_INT start,
2660 unsigned HOST_WIDE_INT end, unsigned int first_earlier,
2661 unsigned end_earlier)
c5679c37
JJ
2662{
2663 unsigned int len = m_store_info.length ();
bd909071
JJ
2664 for (unsigned int j = first_earlier; j < end_earlier; j++)
2665 {
2666 store_immediate_info *info = m_store_info[j];
2667 if (info->order > first_order
2668 && info->order < last_order
2669 && info->bitpos + info->bitsize > start)
2670 return false;
2671 }
c5679c37
JJ
2672 for (++i; i < len; ++i)
2673 {
2674 store_immediate_info *info = m_store_info[i];
2675 if (info->bitpos >= end)
2676 break;
2677 if (info->order < last_order
4d213bf6 2678 && (!all_integer_cst_p || info->rhs_code != INTEGER_CST))
c5679c37
JJ
2679 return false;
2680 }
2681 return true;
2682}
2683
4b84d9b8
JJ
2684/* Return true if m_store_info[first] and at least one following store
2685 form a group which store try_size bitsize value which is byte swapped
2686 from a memory load or some value, or identity from some value.
2687 This uses the bswap pass APIs. */
2688
2689bool
2690imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store,
2691 unsigned int first,
bd909071
JJ
2692 unsigned int try_size,
2693 unsigned int first_earlier)
4b84d9b8
JJ
2694{
2695 unsigned int len = m_store_info.length (), last = first;
2696 unsigned HOST_WIDE_INT width = m_store_info[first]->bitsize;
2697 if (width >= try_size)
2698 return false;
2699 for (unsigned int i = first + 1; i < len; ++i)
2700 {
2701 if (m_store_info[i]->bitpos != m_store_info[first]->bitpos + width
cb76fcd7 2702 || m_store_info[i]->lp_nr != merged_store->lp_nr
4b84d9b8
JJ
2703 || m_store_info[i]->ins_stmt == NULL)
2704 return false;
2705 width += m_store_info[i]->bitsize;
2706 if (width >= try_size)
2707 {
2708 last = i;
2709 break;
2710 }
2711 }
2712 if (width != try_size)
2713 return false;
2714
2715 bool allow_unaligned
028d4092 2716 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
4b84d9b8
JJ
2717 /* Punt if the combined store would not be aligned and we need alignment. */
2718 if (!allow_unaligned)
2719 {
2720 unsigned int align = merged_store->align;
2721 unsigned HOST_WIDE_INT align_base = merged_store->align_base;
2722 for (unsigned int i = first + 1; i <= last; ++i)
2723 {
2724 unsigned int this_align;
2725 unsigned HOST_WIDE_INT align_bitpos = 0;
2726 get_object_alignment_1 (gimple_assign_lhs (m_store_info[i]->stmt),
2727 &this_align, &align_bitpos);
2728 if (this_align > align)
2729 {
2730 align = this_align;
2731 align_base = m_store_info[i]->bitpos - align_bitpos;
2732 }
2733 }
2734 unsigned HOST_WIDE_INT align_bitpos
2735 = (m_store_info[first]->bitpos - align_base) & (align - 1);
2736 if (align_bitpos)
2737 align = least_bit_hwi (align_bitpos);
2738 if (align < try_size)
2739 return false;
2740 }
2741
2742 tree type;
2743 switch (try_size)
2744 {
2745 case 16: type = uint16_type_node; break;
2746 case 32: type = uint32_type_node; break;
2747 case 64: type = uint64_type_node; break;
2748 default: gcc_unreachable ();
2749 }
2750 struct symbolic_number n;
2751 gimple *ins_stmt = NULL;
2752 int vuse_store = -1;
2753 unsigned int first_order = merged_store->first_order;
2754 unsigned int last_order = merged_store->last_order;
2755 gimple *first_stmt = merged_store->first_stmt;
2756 gimple *last_stmt = merged_store->last_stmt;
c5679c37 2757 unsigned HOST_WIDE_INT end = merged_store->start + merged_store->width;
4b84d9b8
JJ
2758 store_immediate_info *infof = m_store_info[first];
2759
2760 for (unsigned int i = first; i <= last; ++i)
2761 {
2762 store_immediate_info *info = m_store_info[i];
2763 struct symbolic_number this_n = info->n;
2764 this_n.type = type;
2765 if (!this_n.base_addr)
2766 this_n.range = try_size / BITS_PER_UNIT;
30fa8e9c
JJ
2767 else
2768 /* Update vuse in case it has changed by output_merged_stores. */
2769 this_n.vuse = gimple_vuse (info->ins_stmt);
4b84d9b8
JJ
2770 unsigned int bitpos = info->bitpos - infof->bitpos;
2771 if (!do_shift_rotate (LSHIFT_EXPR, &this_n,
2772 BYTES_BIG_ENDIAN
2773 ? try_size - info->bitsize - bitpos
2774 : bitpos))
2775 return false;
aa11164a 2776 if (this_n.base_addr && vuse_store)
4b84d9b8
JJ
2777 {
2778 unsigned int j;
2779 for (j = first; j <= last; ++j)
2780 if (this_n.vuse == gimple_vuse (m_store_info[j]->stmt))
2781 break;
2782 if (j > last)
2783 {
2784 if (vuse_store == 1)
2785 return false;
2786 vuse_store = 0;
2787 }
2788 }
2789 if (i == first)
2790 {
2791 n = this_n;
2792 ins_stmt = info->ins_stmt;
2793 }
2794 else
2795 {
c5679c37 2796 if (n.base_addr && n.vuse != this_n.vuse)
4b84d9b8 2797 {
c5679c37
JJ
2798 if (vuse_store == 0)
2799 return false;
2800 vuse_store = 1;
4b84d9b8 2801 }
c5679c37
JJ
2802 if (info->order > last_order)
2803 {
2804 last_order = info->order;
2805 last_stmt = info->stmt;
2806 }
2807 else if (info->order < first_order)
2808 {
2809 first_order = info->order;
2810 first_stmt = info->stmt;
2811 }
2812 end = MAX (end, info->bitpos + info->bitsize);
4b84d9b8
JJ
2813
2814 ins_stmt = perform_symbolic_merge (ins_stmt, &n, info->ins_stmt,
2815 &this_n, &n);
2816 if (ins_stmt == NULL)
2817 return false;
2818 }
2819 }
2820
2821 uint64_t cmpxchg, cmpnop;
2822 find_bswap_or_nop_finalize (&n, &cmpxchg, &cmpnop);
2823
2824 /* A complete byte swap should make the symbolic number to start with
2825 the largest digit in the highest order byte. Unchanged symbolic
2826 number indicates a read with same endianness as target architecture. */
2827 if (n.n != cmpnop && n.n != cmpxchg)
2828 return false;
2829
2830 if (n.base_addr == NULL_TREE && !is_gimple_val (n.src))
2831 return false;
2832
bd909071
JJ
2833 if (!check_no_overlap (m_store_info, last, false, first_order, last_order,
2834 merged_store->start, end, first_earlier, first))
c5679c37
JJ
2835 return false;
2836
4b84d9b8
JJ
2837 /* Don't handle memory copy this way if normal non-bswap processing
2838 would handle it too. */
2839 if (n.n == cmpnop && (unsigned) n.n_ops == last - first + 1)
2840 {
2841 unsigned int i;
2842 for (i = first; i <= last; ++i)
2843 if (m_store_info[i]->rhs_code != MEM_REF)
2844 break;
2845 if (i == last + 1)
2846 return false;
2847 }
2848
2849 if (n.n == cmpxchg)
2850 switch (try_size)
2851 {
2852 case 16:
2853 /* Will emit LROTATE_EXPR. */
2854 break;
2855 case 32:
2856 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2857 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
2858 break;
2859 return false;
2860 case 64:
2861 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2862 && optab_handler (bswap_optab, DImode) != CODE_FOR_nothing)
2863 break;
2864 return false;
2865 default:
2866 gcc_unreachable ();
2867 }
2868
2869 if (!allow_unaligned && n.base_addr)
2870 {
2871 unsigned int align = get_object_alignment (n.src);
2872 if (align < try_size)
2873 return false;
2874 }
2875
2876 /* If each load has vuse of the corresponding store, need to verify
2877 the loads can be sunk right before the last store. */
2878 if (vuse_store == 1)
2879 {
2880 auto_vec<tree, 64> refs;
2881 for (unsigned int i = first; i <= last; ++i)
2882 gather_bswap_load_refs (&refs,
2883 gimple_assign_rhs1 (m_store_info[i]->stmt));
2884
3f207ab3 2885 for (tree ref : refs)
4b84d9b8
JJ
2886 if (stmts_may_clobber_ref_p (first_stmt, last_stmt, ref))
2887 return false;
2888 n.vuse = NULL_TREE;
2889 }
2890
2891 infof->n = n;
2892 infof->ins_stmt = ins_stmt;
2893 for (unsigned int i = first; i <= last; ++i)
2894 {
2895 m_store_info[i]->rhs_code = n.n == cmpxchg ? LROTATE_EXPR : NOP_EXPR;
2896 m_store_info[i]->ops[0].base_addr = NULL_TREE;
2897 m_store_info[i]->ops[1].base_addr = NULL_TREE;
2898 if (i != first)
2899 merged_store->merge_into (m_store_info[i]);
2900 }
2901
2902 return true;
2903}
2904
f663d9ad
KT
2905/* Go through the candidate stores recorded in m_store_info and merge them
2906 into merged_store_group objects recorded into m_merged_store_groups
2907 representing the widened stores. Return true if coalescing was successful
2908 and the number of widened stores is fewer than the original number
2909 of stores. */
2910
2911bool
2912imm_store_chain_info::coalesce_immediate_stores ()
2913{
2914 /* Anything less can't be processed. */
2915 if (m_store_info.length () < 2)
2916 return false;
2917
2918 if (dump_file && (dump_flags & TDF_DETAILS))
c94c3532 2919 fprintf (dump_file, "Attempting to coalesce %u stores in chain\n",
f663d9ad
KT
2920 m_store_info.length ());
2921
2922 store_immediate_info *info;
4b84d9b8 2923 unsigned int i, ignore = 0;
bd909071
JJ
2924 unsigned int first_earlier = 0;
2925 unsigned int end_earlier = 0;
f663d9ad
KT
2926
2927 /* Order the stores by the bitposition they write to. */
2928 m_store_info.qsort (sort_by_bitpos);
2929
2930 info = m_store_info[0];
2931 merged_store_group *merged_store = new merged_store_group (info);
c94c3532
EB
2932 if (dump_file && (dump_flags & TDF_DETAILS))
2933 fputs ("New store group\n", dump_file);
f663d9ad
KT
2934
2935 FOR_EACH_VEC_ELT (m_store_info, i, info)
2936 {
3afd514b
JJ
2937 unsigned HOST_WIDE_INT new_bitregion_start, new_bitregion_end;
2938
4b84d9b8 2939 if (i <= ignore)
c94c3532 2940 goto done;
f663d9ad 2941
bd909071
JJ
2942 while (first_earlier < end_earlier
2943 && (m_store_info[first_earlier]->bitpos
2944 + m_store_info[first_earlier]->bitsize
2945 <= merged_store->start))
2946 first_earlier++;
2947
4b84d9b8
JJ
2948 /* First try to handle group of stores like:
2949 p[0] = data >> 24;
2950 p[1] = data >> 16;
2951 p[2] = data >> 8;
2952 p[3] = data;
2953 using the bswap framework. */
2954 if (info->bitpos == merged_store->start + merged_store->width
2955 && merged_store->stores.length () == 1
2956 && merged_store->stores[0]->ins_stmt != NULL
cb76fcd7 2957 && info->lp_nr == merged_store->lp_nr
4b84d9b8
JJ
2958 && info->ins_stmt != NULL)
2959 {
2960 unsigned int try_size;
2961 for (try_size = 64; try_size >= 16; try_size >>= 1)
bd909071
JJ
2962 if (try_coalesce_bswap (merged_store, i - 1, try_size,
2963 first_earlier))
4b84d9b8
JJ
2964 break;
2965
2966 if (try_size >= 16)
2967 {
2968 ignore = i + merged_store->stores.length () - 1;
2969 m_merged_store_groups.safe_push (merged_store);
2970 if (ignore < m_store_info.length ())
bd909071
JJ
2971 {
2972 merged_store = new merged_store_group (m_store_info[ignore]);
2973 end_earlier = ignore;
2974 }
4b84d9b8
JJ
2975 else
2976 merged_store = NULL;
c94c3532 2977 goto done;
4b84d9b8
JJ
2978 }
2979 }
2980
3afd514b
JJ
2981 new_bitregion_start
2982 = MIN (merged_store->bitregion_start, info->bitregion_start);
2983 new_bitregion_end
2984 = MAX (merged_store->bitregion_end, info->bitregion_end);
2985
2986 if (info->order >= merged_store->first_nonmergeable_order
2987 || (((new_bitregion_end - new_bitregion_start + 1) / BITS_PER_UNIT)
028d4092 2988 > (unsigned) param_store_merging_max_size))
18e0c3d1
JJ
2989 ;
2990
f663d9ad
KT
2991 /* |---store 1---|
2992 |---store 2---|
4b84d9b8 2993 Overlapping stores. */
18e0c3d1 2994 else if (IN_RANGE (info->bitpos, merged_store->start,
4d213bf6
JJ
2995 merged_store->start + merged_store->width - 1)
2996 /* |---store 1---||---store 2---|
2997 Handle also the consecutive INTEGER_CST stores case here,
2998 as we have here the code to deal with overlaps. */
2999 || (info->bitregion_start <= merged_store->bitregion_end
3000 && info->rhs_code == INTEGER_CST
3001 && merged_store->only_constants
3002 && merged_store->can_be_merged_into (info)))
f663d9ad 3003 {
245f6de1 3004 /* Only allow overlapping stores of constants. */
629387a6
EB
3005 if (info->rhs_code == INTEGER_CST
3006 && merged_store->only_constants
3007 && info->lp_nr == merged_store->lp_nr)
245f6de1 3008 {
bd909071
JJ
3009 unsigned int first_order
3010 = MIN (merged_store->first_order, info->order);
6cd4c66e
JJ
3011 unsigned int last_order
3012 = MAX (merged_store->last_order, info->order);
3013 unsigned HOST_WIDE_INT end
3014 = MAX (merged_store->start + merged_store->width,
3015 info->bitpos + info->bitsize);
bd909071
JJ
3016 if (check_no_overlap (m_store_info, i, true, first_order,
3017 last_order, merged_store->start, end,
3018 first_earlier, end_earlier))
6cd4c66e
JJ
3019 {
3020 /* check_no_overlap call above made sure there are no
3021 overlapping stores with non-INTEGER_CST rhs_code
3022 in between the first and last of the stores we've
3023 just merged. If there are any INTEGER_CST rhs_code
3024 stores in between, we need to merge_overlapping them
3025 even if in the sort_by_bitpos order there are other
3026 overlapping stores in between. Keep those stores as is.
3027 Example:
3028 MEM[(int *)p_28] = 0;
3029 MEM[(char *)p_28 + 3B] = 1;
3030 MEM[(char *)p_28 + 1B] = 2;
3031 MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B];
3032 We can't merge the zero store with the store of two and
3033 not merge anything else, because the store of one is
3034 in the original order in between those two, but in
3035 store_by_bitpos order it comes after the last store that
3036 we can't merge with them. We can merge the first 3 stores
3037 and keep the last store as is though. */
18e0c3d1
JJ
3038 unsigned int len = m_store_info.length ();
3039 unsigned int try_order = last_order;
3040 unsigned int first_nonmergeable_order;
3041 unsigned int k;
3042 bool last_iter = false;
3043 int attempts = 0;
3044 do
6cd4c66e 3045 {
18e0c3d1 3046 unsigned int max_order = 0;
bd909071 3047 unsigned int min_order = first_order;
18e0c3d1
JJ
3048 unsigned first_nonmergeable_int_order = ~0U;
3049 unsigned HOST_WIDE_INT this_end = end;
3050 k = i;
3051 first_nonmergeable_order = ~0U;
3052 for (unsigned int j = i + 1; j < len; ++j)
6cd4c66e 3053 {
18e0c3d1
JJ
3054 store_immediate_info *info2 = m_store_info[j];
3055 if (info2->bitpos >= this_end)
3056 break;
3057 if (info2->order < try_order)
6cd4c66e 3058 {
4119cd69
JJ
3059 if (info2->rhs_code != INTEGER_CST
3060 || info2->lp_nr != merged_store->lp_nr)
18e0c3d1
JJ
3061 {
3062 /* Normally check_no_overlap makes sure this
3063 doesn't happen, but if end grows below,
3064 then we need to process more stores than
3065 check_no_overlap verified. Example:
3066 MEM[(int *)p_5] = 0;
3067 MEM[(short *)p_5 + 3B] = 1;
3068 MEM[(char *)p_5 + 4B] = _9;
3069 MEM[(char *)p_5 + 2B] = 2; */
3070 k = 0;
3071 break;
3072 }
3073 k = j;
bd909071 3074 min_order = MIN (min_order, info2->order);
18e0c3d1
JJ
3075 this_end = MAX (this_end,
3076 info2->bitpos + info2->bitsize);
6cd4c66e 3077 }
18e0c3d1 3078 else if (info2->rhs_code == INTEGER_CST
4119cd69 3079 && info2->lp_nr == merged_store->lp_nr
18e0c3d1
JJ
3080 && !last_iter)
3081 {
3082 max_order = MAX (max_order, info2->order + 1);
3083 first_nonmergeable_int_order
3084 = MIN (first_nonmergeable_int_order,
3085 info2->order);
3086 }
3087 else
3088 first_nonmergeable_order
3089 = MIN (first_nonmergeable_order, info2->order);
6cd4c66e 3090 }
bd909071
JJ
3091 if (k > i
3092 && !check_no_overlap (m_store_info, len - 1, true,
3093 min_order, try_order,
3094 merged_store->start, this_end,
3095 first_earlier, end_earlier))
3096 k = 0;
18e0c3d1
JJ
3097 if (k == 0)
3098 {
3099 if (last_order == try_order)
3100 break;
3101 /* If this failed, but only because we grew
3102 try_order, retry with the last working one,
3103 so that we merge at least something. */
3104 try_order = last_order;
3105 last_iter = true;
3106 continue;
3107 }
3108 last_order = try_order;
3109 /* Retry with a larger try_order to see if we could
3110 merge some further INTEGER_CST stores. */
3111 if (max_order
3112 && (first_nonmergeable_int_order
3113 < first_nonmergeable_order))
3114 {
3115 try_order = MIN (max_order,
3116 first_nonmergeable_order);
3117 try_order
3118 = MIN (try_order,
3119 merged_store->first_nonmergeable_order);
3120 if (try_order > last_order && ++attempts < 16)
3121 continue;
3122 }
3123 first_nonmergeable_order
3124 = MIN (first_nonmergeable_order,
3125 first_nonmergeable_int_order);
3126 end = this_end;
3127 break;
6cd4c66e 3128 }
18e0c3d1 3129 while (1);
6cd4c66e
JJ
3130
3131 if (k != 0)
3132 {
3133 merged_store->merge_overlapping (info);
3134
18e0c3d1
JJ
3135 merged_store->first_nonmergeable_order
3136 = MIN (merged_store->first_nonmergeable_order,
3137 first_nonmergeable_order);
3138
6cd4c66e
JJ
3139 for (unsigned int j = i + 1; j <= k; j++)
3140 {
3141 store_immediate_info *info2 = m_store_info[j];
3142 gcc_assert (info2->bitpos < end);
3143 if (info2->order < last_order)
3144 {
3145 gcc_assert (info2->rhs_code == INTEGER_CST);
18e0c3d1
JJ
3146 if (info != info2)
3147 merged_store->merge_overlapping (info2);
6cd4c66e
JJ
3148 }
3149 /* Other stores are kept and not merged in any
3150 way. */
3151 }
3152 ignore = k;
3153 goto done;
3154 }
3155 }
245f6de1 3156 }
f663d9ad 3157 }
245f6de1
JJ
3158 /* |---store 1---||---store 2---|
3159 This store is consecutive to the previous one.
3160 Merge it into the current store group. There can be gaps in between
3161 the stores, but there can't be gaps in between bitregions. */
c94c3532 3162 else if (info->bitregion_start <= merged_store->bitregion_end
7f5a3982 3163 && merged_store->can_be_merged_into (info))
f663d9ad 3164 {
245f6de1
JJ
3165 store_immediate_info *infof = merged_store->stores[0];
3166
3167 /* All the rhs_code ops that take 2 operands are commutative,
3168 swap the operands if it could make the operands compatible. */
3169 if (infof->ops[0].base_addr
3170 && infof->ops[1].base_addr
3171 && info->ops[0].base_addr
3172 && info->ops[1].base_addr
8a91d545
RS
3173 && known_eq (info->ops[1].bitpos - infof->ops[0].bitpos,
3174 info->bitpos - infof->bitpos)
245f6de1
JJ
3175 && operand_equal_p (info->ops[1].base_addr,
3176 infof->ops[0].base_addr, 0))
127ef369
JJ
3177 {
3178 std::swap (info->ops[0], info->ops[1]);
3179 info->ops_swapped_p = true;
3180 }
4d213bf6 3181 if (check_no_overlap (m_store_info, i, false,
bd909071 3182 MIN (merged_store->first_order, info->order),
a7fe6482 3183 MAX (merged_store->last_order, info->order),
bd909071 3184 merged_store->start,
a7fe6482 3185 MAX (merged_store->start + merged_store->width,
bd909071
JJ
3186 info->bitpos + info->bitsize),
3187 first_earlier, end_earlier))
245f6de1 3188 {
7f5a3982
EB
3189 /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
3190 if (info->rhs_code == MEM_REF && infof->rhs_code != MEM_REF)
3191 {
3192 info->rhs_code = BIT_INSERT_EXPR;
3193 info->ops[0].val = gimple_assign_rhs1 (info->stmt);
3194 info->ops[0].base_addr = NULL_TREE;
3195 }
3196 else if (infof->rhs_code == MEM_REF && info->rhs_code != MEM_REF)
3197 {
3f207ab3 3198 for (store_immediate_info *infoj : merged_store->stores)
7f5a3982
EB
3199 {
3200 infoj->rhs_code = BIT_INSERT_EXPR;
3201 infoj->ops[0].val = gimple_assign_rhs1 (infoj->stmt);
3202 infoj->ops[0].base_addr = NULL_TREE;
3203 }
e362a897 3204 merged_store->bit_insertion = true;
7f5a3982
EB
3205 }
3206 if ((infof->ops[0].base_addr
3207 ? compatible_load_p (merged_store, info, base_addr, 0)
3208 : !info->ops[0].base_addr)
3209 && (infof->ops[1].base_addr
3210 ? compatible_load_p (merged_store, info, base_addr, 1)
3211 : !info->ops[1].base_addr))
3212 {
3213 merged_store->merge_into (info);
3214 goto done;
3215 }
245f6de1
JJ
3216 }
3217 }
f663d9ad 3218
245f6de1
JJ
3219 /* |---store 1---| <gap> |---store 2---|.
3220 Gap between stores or the rhs not compatible. Start a new group. */
f663d9ad 3221
245f6de1
JJ
3222 /* Try to apply all the stores recorded for the group to determine
3223 the bitpattern they write and discard it if that fails.
3224 This will also reject single-store groups. */
c94c3532 3225 if (merged_store->apply_stores ())
245f6de1 3226 m_merged_store_groups.safe_push (merged_store);
c94c3532
EB
3227 else
3228 delete merged_store;
f663d9ad 3229
245f6de1 3230 merged_store = new merged_store_group (info);
bd909071 3231 end_earlier = i;
c94c3532
EB
3232 if (dump_file && (dump_flags & TDF_DETAILS))
3233 fputs ("New store group\n", dump_file);
3234
3235 done:
3236 if (dump_file && (dump_flags & TDF_DETAILS))
3237 {
3238 fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
3239 " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:",
3240 i, info->bitsize, info->bitpos);
3241 print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt));
3242 fputc ('\n', dump_file);
3243 }
f663d9ad
KT
3244 }
3245
a62b3dc5 3246 /* Record or discard the last store group. */
4b84d9b8
JJ
3247 if (merged_store)
3248 {
c94c3532 3249 if (merged_store->apply_stores ())
4b84d9b8 3250 m_merged_store_groups.safe_push (merged_store);
c94c3532
EB
3251 else
3252 delete merged_store;
4b84d9b8 3253 }
f663d9ad
KT
3254
3255 gcc_assert (m_merged_store_groups.length () <= m_store_info.length ());
c94c3532 3256
f663d9ad
KT
3257 bool success
3258 = !m_merged_store_groups.is_empty ()
3259 && m_merged_store_groups.length () < m_store_info.length ();
3260
3261 if (success && dump_file)
c94c3532 3262 fprintf (dump_file, "Coalescing successful!\nMerged into %u stores\n",
a62b3dc5 3263 m_merged_store_groups.length ());
f663d9ad
KT
3264
3265 return success;
3266}
3267
245f6de1
JJ
3268/* Return the type to use for the merged stores or loads described by STMTS.
3269 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
3270 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
3271 of the MEM_REFs if any. */
f663d9ad
KT
3272
3273static tree
245f6de1
JJ
3274get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load,
3275 unsigned short *cliquep, unsigned short *basep)
f663d9ad
KT
3276{
3277 gimple *stmt;
3278 unsigned int i;
245f6de1
JJ
3279 tree type = NULL_TREE;
3280 tree ret = NULL_TREE;
3281 *cliquep = 0;
3282 *basep = 0;
f663d9ad
KT
3283
3284 FOR_EACH_VEC_ELT (stmts, i, stmt)
3285 {
245f6de1
JJ
3286 tree ref = is_load ? gimple_assign_rhs1 (stmt)
3287 : gimple_assign_lhs (stmt);
3288 tree type1 = reference_alias_ptr_type (ref);
3289 tree base = get_base_address (ref);
f663d9ad 3290
245f6de1
JJ
3291 if (i == 0)
3292 {
3293 if (TREE_CODE (base) == MEM_REF)
3294 {
3295 *cliquep = MR_DEPENDENCE_CLIQUE (base);
3296 *basep = MR_DEPENDENCE_BASE (base);
3297 }
3298 ret = type = type1;
3299 continue;
3300 }
f663d9ad 3301 if (!alias_ptr_types_compatible_p (type, type1))
245f6de1
JJ
3302 ret = ptr_type_node;
3303 if (TREE_CODE (base) != MEM_REF
3304 || *cliquep != MR_DEPENDENCE_CLIQUE (base)
3305 || *basep != MR_DEPENDENCE_BASE (base))
3306 {
3307 *cliquep = 0;
3308 *basep = 0;
3309 }
f663d9ad 3310 }
245f6de1 3311 return ret;
f663d9ad
KT
3312}
3313
3314/* Return the location_t information we can find among the statements
3315 in STMTS. */
3316
3317static location_t
245f6de1 3318get_location_for_stmts (vec<gimple *> &stmts)
f663d9ad 3319{
3f207ab3 3320 for (gimple *stmt : stmts)
f663d9ad
KT
3321 if (gimple_has_location (stmt))
3322 return gimple_location (stmt);
3323
3324 return UNKNOWN_LOCATION;
3325}
3326
3327/* Used to decribe a store resulting from splitting a wide store in smaller
3328 regularly-sized stores in split_group. */
3329
6c1dae73 3330class split_store
f663d9ad 3331{
6c1dae73 3332public:
f663d9ad
KT
3333 unsigned HOST_WIDE_INT bytepos;
3334 unsigned HOST_WIDE_INT size;
3335 unsigned HOST_WIDE_INT align;
245f6de1 3336 auto_vec<store_immediate_info *> orig_stores;
a62b3dc5
JJ
3337 /* True if there is a single orig stmt covering the whole split store. */
3338 bool orig;
f663d9ad
KT
3339 split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
3340 unsigned HOST_WIDE_INT);
3341};
3342
3343/* Simple constructor. */
3344
3345split_store::split_store (unsigned HOST_WIDE_INT bp,
3346 unsigned HOST_WIDE_INT sz,
3347 unsigned HOST_WIDE_INT al)
a62b3dc5 3348 : bytepos (bp), size (sz), align (al), orig (false)
f663d9ad 3349{
245f6de1 3350 orig_stores.create (0);
f663d9ad
KT
3351}
3352
245f6de1
JJ
3353/* Record all stores in GROUP that write to the region starting at BITPOS and
3354 is of size BITSIZE. Record infos for such statements in STORES if
3355 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
5384a802
JJ
3356 if there is exactly one original store in the range (in that case ignore
3357 clobber stmts, unless there are only clobber stmts). */
f663d9ad 3358
a62b3dc5 3359static store_immediate_info *
99b1c316 3360find_constituent_stores (class merged_store_group *group,
245f6de1
JJ
3361 vec<store_immediate_info *> *stores,
3362 unsigned int *first,
3363 unsigned HOST_WIDE_INT bitpos,
3364 unsigned HOST_WIDE_INT bitsize)
f663d9ad 3365{
a62b3dc5 3366 store_immediate_info *info, *ret = NULL;
f663d9ad 3367 unsigned int i;
a62b3dc5
JJ
3368 bool second = false;
3369 bool update_first = true;
f663d9ad 3370 unsigned HOST_WIDE_INT end = bitpos + bitsize;
a62b3dc5 3371 for (i = *first; group->stores.iterate (i, &info); ++i)
f663d9ad
KT
3372 {
3373 unsigned HOST_WIDE_INT stmt_start = info->bitpos;
3374 unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize;
a62b3dc5
JJ
3375 if (stmt_end <= bitpos)
3376 {
3377 /* BITPOS passed to this function never decreases from within the
3378 same split_group call, so optimize and don't scan info records
3379 which are known to end before or at BITPOS next time.
3380 Only do it if all stores before this one also pass this. */
3381 if (update_first)
3382 *first = i + 1;
3383 continue;
3384 }
3385 else
3386 update_first = false;
3387
f663d9ad 3388 /* The stores in GROUP are ordered by bitposition so if we're past
a62b3dc5
JJ
3389 the region for this group return early. */
3390 if (stmt_start >= end)
3391 return ret;
3392
5384a802
JJ
3393 if (gimple_clobber_p (info->stmt))
3394 {
3395 if (stores)
3396 stores->safe_push (info);
3397 if (ret == NULL)
3398 ret = info;
3399 continue;
3400 }
245f6de1 3401 if (stores)
a62b3dc5 3402 {
245f6de1 3403 stores->safe_push (info);
5384a802 3404 if (ret && !gimple_clobber_p (ret->stmt))
a62b3dc5
JJ
3405 {
3406 ret = NULL;
3407 second = true;
3408 }
3409 }
5384a802 3410 else if (ret && !gimple_clobber_p (ret->stmt))
a62b3dc5
JJ
3411 return NULL;
3412 if (!second)
3413 ret = info;
f663d9ad 3414 }
a62b3dc5 3415 return ret;
f663d9ad
KT
3416}
3417
d7a9512e
JJ
3418/* Return how many SSA_NAMEs used to compute value to store in the INFO
3419 store have multiple uses. If any SSA_NAME has multiple uses, also
3420 count statements needed to compute it. */
3421
3422static unsigned
3423count_multiple_uses (store_immediate_info *info)
3424{
3425 gimple *stmt = info->stmt;
3426 unsigned ret = 0;
3427 switch (info->rhs_code)
3428 {
3429 case INTEGER_CST:
e362a897 3430 case STRING_CST:
d7a9512e
JJ
3431 return 0;
3432 case BIT_AND_EXPR:
3433 case BIT_IOR_EXPR:
3434 case BIT_XOR_EXPR:
d60edaba
JJ
3435 if (info->bit_not_p)
3436 {
3437 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3438 ret = 1; /* Fall through below to return
3439 the BIT_NOT_EXPR stmt and then
3440 BIT_{AND,IOR,XOR}_EXPR and anything it
3441 uses. */
3442 else
3443 /* stmt is after this the BIT_NOT_EXPR. */
3444 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3445 }
d7a9512e
JJ
3446 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3447 {
3448 ret += 1 + info->ops[0].bit_not_p;
3449 if (info->ops[1].base_addr)
3450 ret += 1 + info->ops[1].bit_not_p;
3451 return ret + 1;
3452 }
3453 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3454 /* stmt is now the BIT_*_EXPR. */
3455 if (!has_single_use (gimple_assign_rhs1 (stmt)))
127ef369
JJ
3456 ret += 1 + info->ops[info->ops_swapped_p].bit_not_p;
3457 else if (info->ops[info->ops_swapped_p].bit_not_p)
d7a9512e
JJ
3458 {
3459 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3460 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3461 ++ret;
3462 }
3463 if (info->ops[1].base_addr == NULL_TREE)
127ef369
JJ
3464 {
3465 gcc_checking_assert (!info->ops_swapped_p);
3466 return ret;
3467 }
d7a9512e 3468 if (!has_single_use (gimple_assign_rhs2 (stmt)))
127ef369
JJ
3469 ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p;
3470 else if (info->ops[1 - info->ops_swapped_p].bit_not_p)
d7a9512e
JJ
3471 {
3472 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
3473 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3474 ++ret;
3475 }
3476 return ret;
3477 case MEM_REF:
3478 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3479 return 1 + info->ops[0].bit_not_p;
3480 else if (info->ops[0].bit_not_p)
3481 {
3482 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3483 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3484 return 1;
3485 }
3486 return 0;
c94c3532
EB
3487 case BIT_INSERT_EXPR:
3488 return has_single_use (gimple_assign_rhs1 (stmt)) ? 0 : 1;
d7a9512e
JJ
3489 default:
3490 gcc_unreachable ();
3491 }
3492}
3493
f663d9ad 3494/* Split a merged store described by GROUP by populating the SPLIT_STORES
a62b3dc5
JJ
3495 vector (if non-NULL) with split_store structs describing the byte offset
3496 (from the base), the bit size and alignment of each store as well as the
3497 original statements involved in each such split group.
f663d9ad
KT
3498 This is to separate the splitting strategy from the statement
3499 building/emission/linking done in output_merged_store.
a62b3dc5 3500 Return number of new stores.
245f6de1
JJ
3501 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
3502 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
3afd514b
JJ
3503 BZERO_FIRST may be true only when the first store covers the whole group
3504 and clears it; if BZERO_FIRST is true, keep that first store in the set
3505 unmodified and emit further stores for the overrides only.
a62b3dc5
JJ
3506 If SPLIT_STORES is NULL, it is just a dry run to count number of
3507 new stores. */
f663d9ad 3508
a62b3dc5 3509static unsigned int
245f6de1 3510split_group (merged_store_group *group, bool allow_unaligned_store,
3afd514b 3511 bool allow_unaligned_load, bool bzero_first,
99b1c316 3512 vec<split_store *> *split_stores,
d7a9512e
JJ
3513 unsigned *total_orig,
3514 unsigned *total_new)
f663d9ad 3515{
a62b3dc5
JJ
3516 unsigned HOST_WIDE_INT pos = group->bitregion_start;
3517 unsigned HOST_WIDE_INT size = group->bitregion_end - pos;
f663d9ad 3518 unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT;
a62b3dc5
JJ
3519 unsigned HOST_WIDE_INT group_align = group->align;
3520 unsigned HOST_WIDE_INT align_base = group->align_base;
245f6de1 3521 unsigned HOST_WIDE_INT group_load_align = group_align;
d7a9512e 3522 bool any_orig = false;
f663d9ad 3523
f663d9ad
KT
3524 gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0));
3525
e362a897
EB
3526 /* For bswap framework using sets of stores, all the checking has been done
3527 earlier in try_coalesce_bswap and the result always needs to be emitted
3528 as a single store. Likewise for string concatenation, */
4b84d9b8 3529 if (group->stores[0]->rhs_code == LROTATE_EXPR
e362a897
EB
3530 || group->stores[0]->rhs_code == NOP_EXPR
3531 || group->string_concatenation)
4b84d9b8 3532 {
3afd514b 3533 gcc_assert (!bzero_first);
4b84d9b8
JJ
3534 if (total_orig)
3535 {
3536 /* Avoid the old/new stmt count heuristics. It should be
3537 always beneficial. */
3538 total_new[0] = 1;
3539 total_orig[0] = 2;
3540 }
3541
3542 if (split_stores)
3543 {
3544 unsigned HOST_WIDE_INT align_bitpos
3545 = (group->start - align_base) & (group_align - 1);
3546 unsigned HOST_WIDE_INT align = group_align;
3547 if (align_bitpos)
3548 align = least_bit_hwi (align_bitpos);
3549 bytepos = group->start / BITS_PER_UNIT;
99b1c316 3550 split_store *store
4b84d9b8
JJ
3551 = new split_store (bytepos, group->width, align);
3552 unsigned int first = 0;
3553 find_constituent_stores (group, &store->orig_stores,
3554 &first, group->start, group->width);
3555 split_stores->safe_push (store);
3556 }
3557
3558 return 1;
3559 }
3560
a62b3dc5 3561 unsigned int ret = 0, first = 0;
f663d9ad 3562 unsigned HOST_WIDE_INT try_pos = bytepos;
f663d9ad 3563
d7a9512e
JJ
3564 if (total_orig)
3565 {
3566 unsigned int i;
3567 store_immediate_info *info = group->stores[0];
3568
3569 total_new[0] = 0;
3570 total_orig[0] = 1; /* The orig store. */
3571 info = group->stores[0];
3572 if (info->ops[0].base_addr)
a6fbd154 3573 total_orig[0]++;
d7a9512e 3574 if (info->ops[1].base_addr)
a6fbd154 3575 total_orig[0]++;
d7a9512e
JJ
3576 switch (info->rhs_code)
3577 {
3578 case BIT_AND_EXPR:
3579 case BIT_IOR_EXPR:
3580 case BIT_XOR_EXPR:
3581 total_orig[0]++; /* The orig BIT_*_EXPR stmt. */
3582 break;
3583 default:
3584 break;
3585 }
3586 total_orig[0] *= group->stores.length ();
3587
3588 FOR_EACH_VEC_ELT (group->stores, i, info)
a6fbd154
JJ
3589 {
3590 total_new[0] += count_multiple_uses (info);
3591 total_orig[0] += (info->bit_not_p
3592 + info->ops[0].bit_not_p
3593 + info->ops[1].bit_not_p);
3594 }
d7a9512e
JJ
3595 }
3596
245f6de1
JJ
3597 if (!allow_unaligned_load)
3598 for (int i = 0; i < 2; ++i)
3599 if (group->load_align[i])
3600 group_load_align = MIN (group_load_align, group->load_align[i]);
3601
3afd514b
JJ
3602 if (bzero_first)
3603 {
5384a802
JJ
3604 store_immediate_info *gstore;
3605 FOR_EACH_VEC_ELT (group->stores, first, gstore)
3606 if (!gimple_clobber_p (gstore->stmt))
3607 break;
3608 ++first;
3afd514b
JJ
3609 ret = 1;
3610 if (split_stores)
3611 {
99b1c316 3612 split_store *store
5384a802
JJ
3613 = new split_store (bytepos, gstore->bitsize, align_base);
3614 store->orig_stores.safe_push (gstore);
3afd514b
JJ
3615 store->orig = true;
3616 any_orig = true;
3617 split_stores->safe_push (store);
3618 }
3619 }
3620
f663d9ad
KT
3621 while (size > 0)
3622 {
245f6de1 3623 if ((allow_unaligned_store || group_align <= BITS_PER_UNIT)
3afd514b
JJ
3624 && (group->mask[try_pos - bytepos] == (unsigned char) ~0U
3625 || (bzero_first && group->val[try_pos - bytepos] == 0)))
a62b3dc5
JJ
3626 {
3627 /* Skip padding bytes. */
3628 ++try_pos;
3629 size -= BITS_PER_UNIT;
3630 continue;
3631 }
3632
f663d9ad 3633 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
a62b3dc5
JJ
3634 unsigned int try_size = MAX_STORE_BITSIZE, nonmasked;
3635 unsigned HOST_WIDE_INT align_bitpos
3636 = (try_bitpos - align_base) & (group_align - 1);
3637 unsigned HOST_WIDE_INT align = group_align;
5384a802 3638 bool found_orig = false;
a62b3dc5
JJ
3639 if (align_bitpos)
3640 align = least_bit_hwi (align_bitpos);
245f6de1 3641 if (!allow_unaligned_store)
a62b3dc5 3642 try_size = MIN (try_size, align);
245f6de1
JJ
3643 if (!allow_unaligned_load)
3644 {
3645 /* If we can't do or don't want to do unaligned stores
3646 as well as loads, we need to take the loads into account
3647 as well. */
3648 unsigned HOST_WIDE_INT load_align = group_load_align;
3649 align_bitpos = (try_bitpos - align_base) & (load_align - 1);
3650 if (align_bitpos)
3651 load_align = least_bit_hwi (align_bitpos);
3652 for (int i = 0; i < 2; ++i)
3653 if (group->load_align[i])
3654 {
8a91d545
RS
3655 align_bitpos
3656 = known_alignment (try_bitpos
3657 - group->stores[0]->bitpos
3658 + group->stores[0]->ops[i].bitpos
3659 - group->load_align_base[i]);
3660 if (align_bitpos & (group_load_align - 1))
245f6de1
JJ
3661 {
3662 unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos);
3663 load_align = MIN (load_align, a);
3664 }
3665 }
3666 try_size = MIN (try_size, load_align);
3667 }
a62b3dc5 3668 store_immediate_info *info
245f6de1 3669 = find_constituent_stores (group, NULL, &first, try_bitpos, try_size);
5384a802 3670 if (info && !gimple_clobber_p (info->stmt))
a62b3dc5
JJ
3671 {
3672 /* If there is just one original statement for the range, see if
3673 we can just reuse the original store which could be even larger
3674 than try_size. */
3675 unsigned HOST_WIDE_INT stmt_end
3676 = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT);
245f6de1
JJ
3677 info = find_constituent_stores (group, NULL, &first, try_bitpos,
3678 stmt_end - try_bitpos);
a62b3dc5
JJ
3679 if (info && info->bitpos >= try_bitpos)
3680 {
5384a802
JJ
3681 store_immediate_info *info2 = NULL;
3682 unsigned int first_copy = first;
3683 if (info->bitpos > try_bitpos
3684 && stmt_end - try_bitpos <= try_size)
3685 {
3686 info2 = find_constituent_stores (group, NULL, &first_copy,
3687 try_bitpos,
3688 info->bitpos - try_bitpos);
3689 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3690 }
3691 if (info2 == NULL && stmt_end - try_bitpos < try_size)
3692 {
3693 info2 = find_constituent_stores (group, NULL, &first_copy,
3694 stmt_end,
3695 (try_bitpos + try_size)
3696 - stmt_end);
3697 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3698 }
3699 if (info2 == NULL)
3700 {
3701 try_size = stmt_end - try_bitpos;
3702 found_orig = true;
3703 goto found;
3704 }
a62b3dc5
JJ
3705 }
3706 }
f663d9ad 3707
a62b3dc5
JJ
3708 /* Approximate store bitsize for the case when there are no padding
3709 bits. */
3710 while (try_size > size)
3711 try_size /= 2;
3712 /* Now look for whole padding bytes at the end of that bitsize. */
3713 for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked)
3714 if (group->mask[try_pos - bytepos + nonmasked - 1]
3afd514b
JJ
3715 != (unsigned char) ~0U
3716 && (!bzero_first
3717 || group->val[try_pos - bytepos + nonmasked - 1] != 0))
a62b3dc5 3718 break;
5384a802 3719 if (nonmasked == 0 || (info && gimple_clobber_p (info->stmt)))
a62b3dc5
JJ
3720 {
3721 /* If entire try_size range is padding, skip it. */
3722 try_pos += try_size / BITS_PER_UNIT;
3723 size -= try_size;
3724 continue;
3725 }
3726 /* Otherwise try to decrease try_size if second half, last 3 quarters
3727 etc. are padding. */
3728 nonmasked *= BITS_PER_UNIT;
3729 while (nonmasked <= try_size / 2)
3730 try_size /= 2;
245f6de1 3731 if (!allow_unaligned_store && group_align > BITS_PER_UNIT)
a62b3dc5
JJ
3732 {
3733 /* Now look for whole padding bytes at the start of that bitsize. */
3734 unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked;
3735 for (masked = 0; masked < try_bytesize; ++masked)
3afd514b
JJ
3736 if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U
3737 && (!bzero_first
3738 || group->val[try_pos - bytepos + masked] != 0))
a62b3dc5
JJ
3739 break;
3740 masked *= BITS_PER_UNIT;
3741 gcc_assert (masked < try_size);
3742 if (masked >= try_size / 2)
3743 {
3744 while (masked >= try_size / 2)
3745 {
3746 try_size /= 2;
3747 try_pos += try_size / BITS_PER_UNIT;
3748 size -= try_size;
3749 masked -= try_size;
3750 }
3751 /* Need to recompute the alignment, so just retry at the new
3752 position. */
3753 continue;
3754 }
3755 }
3756
3757 found:
3758 ++ret;
f663d9ad 3759
a62b3dc5
JJ
3760 if (split_stores)
3761 {
99b1c316 3762 split_store *store
a62b3dc5 3763 = new split_store (try_pos, try_size, align);
245f6de1
JJ
3764 info = find_constituent_stores (group, &store->orig_stores,
3765 &first, try_bitpos, try_size);
a62b3dc5 3766 if (info
5384a802 3767 && !gimple_clobber_p (info->stmt)
a62b3dc5 3768 && info->bitpos >= try_bitpos
5384a802
JJ
3769 && info->bitpos + info->bitsize <= try_bitpos + try_size
3770 && (store->orig_stores.length () == 1
3771 || found_orig
3772 || (info->bitpos == try_bitpos
3773 && (info->bitpos + info->bitsize
3774 == try_bitpos + try_size))))
d7a9512e
JJ
3775 {
3776 store->orig = true;
3777 any_orig = true;
3778 }
a62b3dc5
JJ
3779 split_stores->safe_push (store);
3780 }
3781
3782 try_pos += try_size / BITS_PER_UNIT;
f663d9ad 3783 size -= try_size;
f663d9ad 3784 }
a62b3dc5 3785
d7a9512e
JJ
3786 if (total_orig)
3787 {
a6fbd154 3788 unsigned int i;
99b1c316 3789 split_store *store;
d7a9512e
JJ
3790 /* If we are reusing some original stores and any of the
3791 original SSA_NAMEs had multiple uses, we need to subtract
3792 those now before we add the new ones. */
3793 if (total_new[0] && any_orig)
3794 {
d7a9512e
JJ
3795 FOR_EACH_VEC_ELT (*split_stores, i, store)
3796 if (store->orig)
3797 total_new[0] -= count_multiple_uses (store->orig_stores[0]);
3798 }
3799 total_new[0] += ret; /* The new store. */
3800 store_immediate_info *info = group->stores[0];
3801 if (info->ops[0].base_addr)
a6fbd154 3802 total_new[0] += ret;
d7a9512e 3803 if (info->ops[1].base_addr)
a6fbd154 3804 total_new[0] += ret;
d7a9512e
JJ
3805 switch (info->rhs_code)
3806 {
3807 case BIT_AND_EXPR:
3808 case BIT_IOR_EXPR:
3809 case BIT_XOR_EXPR:
3810 total_new[0] += ret; /* The new BIT_*_EXPR stmt. */
3811 break;
3812 default:
3813 break;
3814 }
a6fbd154
JJ
3815 FOR_EACH_VEC_ELT (*split_stores, i, store)
3816 {
3817 unsigned int j;
3818 bool bit_not_p[3] = { false, false, false };
3819 /* If all orig_stores have certain bit_not_p set, then
3820 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3821 If some orig_stores have certain bit_not_p set, then
3822 we'd use a BIT_XOR_EXPR with a mask and need to account for
3823 it. */
3824 FOR_EACH_VEC_ELT (store->orig_stores, j, info)
3825 {
3826 if (info->ops[0].bit_not_p)
3827 bit_not_p[0] = true;
3828 if (info->ops[1].bit_not_p)
3829 bit_not_p[1] = true;
3830 if (info->bit_not_p)
3831 bit_not_p[2] = true;
3832 }
3833 total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2];
3834 }
3835
d7a9512e
JJ
3836 }
3837
a62b3dc5 3838 return ret;
f663d9ad
KT
3839}
3840
a6fbd154
JJ
3841/* Return the operation through which the operand IDX (if < 2) or
3842 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3843 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3844 the bits should be xored with mask. */
3845
3846static enum tree_code
3847invert_op (split_store *split_store, int idx, tree int_type, tree &mask)
3848{
3849 unsigned int i;
3850 store_immediate_info *info;
3851 unsigned int cnt = 0;
e215422f 3852 bool any_paddings = false;
a6fbd154
JJ
3853 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3854 {
3855 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3856 if (bit_not_p)
e215422f
JJ
3857 {
3858 ++cnt;
3859 tree lhs = gimple_assign_lhs (info->stmt);
3860 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3861 && TYPE_PRECISION (TREE_TYPE (lhs)) < info->bitsize)
3862 any_paddings = true;
3863 }
a6fbd154
JJ
3864 }
3865 mask = NULL_TREE;
3866 if (cnt == 0)
3867 return NOP_EXPR;
e215422f 3868 if (cnt == split_store->orig_stores.length () && !any_paddings)
a6fbd154
JJ
3869 return BIT_NOT_EXPR;
3870
3871 unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT;
3872 unsigned buf_size = split_store->size / BITS_PER_UNIT;
3873 unsigned char *buf
3874 = XALLOCAVEC (unsigned char, buf_size);
3875 memset (buf, ~0U, buf_size);
3876 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3877 {
3878 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3879 if (!bit_not_p)
3880 continue;
3881 /* Clear regions with bit_not_p and invert afterwards, rather than
3882 clear regions with !bit_not_p, so that gaps in between stores aren't
3883 set in the mask. */
3884 unsigned HOST_WIDE_INT bitsize = info->bitsize;
e215422f 3885 unsigned HOST_WIDE_INT prec = bitsize;
a6fbd154 3886 unsigned int pos_in_buffer = 0;
e215422f
JJ
3887 if (any_paddings)
3888 {
3889 tree lhs = gimple_assign_lhs (info->stmt);
3890 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3891 && TYPE_PRECISION (TREE_TYPE (lhs)) < bitsize)
3892 prec = TYPE_PRECISION (TREE_TYPE (lhs));
3893 }
a6fbd154
JJ
3894 if (info->bitpos < try_bitpos)
3895 {
3896 gcc_assert (info->bitpos + bitsize > try_bitpos);
e215422f
JJ
3897 if (!BYTES_BIG_ENDIAN)
3898 {
3899 if (prec <= try_bitpos - info->bitpos)
3900 continue;
3901 prec -= try_bitpos - info->bitpos;
3902 }
3903 bitsize -= try_bitpos - info->bitpos;
3904 if (BYTES_BIG_ENDIAN && prec > bitsize)
3905 prec = bitsize;
a6fbd154
JJ
3906 }
3907 else
3908 pos_in_buffer = info->bitpos - try_bitpos;
e215422f
JJ
3909 if (prec < bitsize)
3910 {
3911 /* If this is a bool inversion, invert just the least significant
3912 prec bits rather than all bits of it. */
3913 if (BYTES_BIG_ENDIAN)
3914 {
3915 pos_in_buffer += bitsize - prec;
3916 if (pos_in_buffer >= split_store->size)
3917 continue;
3918 }
3919 bitsize = prec;
3920 }
a6fbd154
JJ
3921 if (pos_in_buffer + bitsize > split_store->size)
3922 bitsize = split_store->size - pos_in_buffer;
3923 unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT);
3924 if (BYTES_BIG_ENDIAN)
3925 clear_bit_region_be (p, (BITS_PER_UNIT - 1
3926 - (pos_in_buffer % BITS_PER_UNIT)), bitsize);
3927 else
3928 clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize);
3929 }
3930 for (unsigned int i = 0; i < buf_size; ++i)
3931 buf[i] = ~buf[i];
3932 mask = native_interpret_expr (int_type, buf, buf_size);
3933 return BIT_XOR_EXPR;
3934}
3935
f663d9ad
KT
3936/* Given a merged store group GROUP output the widened version of it.
3937 The store chain is against the base object BASE.
3938 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3939 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3940 Make sure that the number of statements output is less than the number of
3941 original statements. If a better sequence is possible emit it and
3942 return true. */
3943
3944bool
b5926e23 3945imm_store_chain_info::output_merged_store (merged_store_group *group)
f663d9ad 3946{
e362a897 3947 const unsigned HOST_WIDE_INT start_byte_pos
a62b3dc5 3948 = group->bitregion_start / BITS_PER_UNIT;
f663d9ad
KT
3949 unsigned int orig_num_stmts = group->stores.length ();
3950 if (orig_num_stmts < 2)
3951 return false;
3952
245f6de1 3953 bool allow_unaligned_store
028d4092 3954 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
245f6de1 3955 bool allow_unaligned_load = allow_unaligned_store;
3afd514b 3956 bool bzero_first = false;
5384a802
JJ
3957 store_immediate_info *store;
3958 unsigned int num_clobber_stmts = 0;
3959 if (group->stores[0]->rhs_code == INTEGER_CST)
3960 {
e362a897 3961 unsigned int i;
5384a802
JJ
3962 FOR_EACH_VEC_ELT (group->stores, i, store)
3963 if (gimple_clobber_p (store->stmt))
3964 num_clobber_stmts++;
3965 else if (TREE_CODE (gimple_assign_rhs1 (store->stmt)) == CONSTRUCTOR
3966 && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (store->stmt)) == 0
3967 && group->start == store->bitpos
3968 && group->width == store->bitsize
3969 && (group->start % BITS_PER_UNIT) == 0
3970 && (group->width % BITS_PER_UNIT) == 0)
3971 {
3972 bzero_first = true;
3973 break;
3974 }
3975 else
3976 break;
3977 FOR_EACH_VEC_ELT_FROM (group->stores, i, store, i)
3978 if (gimple_clobber_p (store->stmt))
3979 num_clobber_stmts++;
3980 if (num_clobber_stmts == orig_num_stmts)
3981 return false;
3982 orig_num_stmts -= num_clobber_stmts;
3983 }
3afd514b 3984 if (allow_unaligned_store || bzero_first)
a62b3dc5
JJ
3985 {
3986 /* If unaligned stores are allowed, see how many stores we'd emit
3987 for unaligned and how many stores we'd emit for aligned stores.
3afd514b
JJ
3988 Only use unaligned stores if it allows fewer stores than aligned.
3989 Similarly, if there is a whole region clear first, prefer expanding
3990 it together compared to expanding clear first followed by merged
3991 further stores. */
21f65995 3992 unsigned cnt[4] = { ~0U, ~0U, ~0U, ~0U };
3afd514b
JJ
3993 int pass_min = 0;
3994 for (int pass = 0; pass < 4; ++pass)
3995 {
3996 if (!allow_unaligned_store && (pass & 1) != 0)
3997 continue;
3998 if (!bzero_first && (pass & 2) != 0)
3999 continue;
4000 cnt[pass] = split_group (group, (pass & 1) != 0,
4001 allow_unaligned_load, (pass & 2) != 0,
4002 NULL, NULL, NULL);
4003 if (cnt[pass] < cnt[pass_min])
4004 pass_min = pass;
4005 }
4006 if ((pass_min & 1) == 0)
245f6de1 4007 allow_unaligned_store = false;
3afd514b
JJ
4008 if ((pass_min & 2) == 0)
4009 bzero_first = false;
a62b3dc5 4010 }
e362a897
EB
4011
4012 auto_vec<class split_store *, 32> split_stores;
4013 split_store *split_store;
4014 unsigned total_orig, total_new, i;
3afd514b 4015 split_group (group, allow_unaligned_store, allow_unaligned_load, bzero_first,
d7a9512e 4016 &split_stores, &total_orig, &total_new);
a62b3dc5 4017
5384a802
JJ
4018 /* Determine if there is a clobber covering the whole group at the start,
4019 followed by proposed split stores that cover the whole group. In that
4020 case, prefer the transformation even if
4021 split_stores.length () == orig_num_stmts. */
4022 bool clobber_first = false;
4023 if (num_clobber_stmts
4024 && gimple_clobber_p (group->stores[0]->stmt)
4025 && group->start == group->stores[0]->bitpos
4026 && group->width == group->stores[0]->bitsize
4027 && (group->start % BITS_PER_UNIT) == 0
4028 && (group->width % BITS_PER_UNIT) == 0)
4029 {
4030 clobber_first = true;
4031 unsigned HOST_WIDE_INT pos = group->start / BITS_PER_UNIT;
4032 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4033 if (split_store->bytepos != pos)
4034 {
4035 clobber_first = false;
4036 break;
4037 }
4038 else
4039 pos += split_store->size / BITS_PER_UNIT;
4040 if (pos != (group->start + group->width) / BITS_PER_UNIT)
4041 clobber_first = false;
4042 }
4043
4044 if (split_stores.length () >= orig_num_stmts + clobber_first)
a62b3dc5 4045 {
5384a802 4046
a62b3dc5
JJ
4047 /* We didn't manage to reduce the number of statements. Bail out. */
4048 if (dump_file && (dump_flags & TDF_DETAILS))
d7a9512e
JJ
4049 fprintf (dump_file, "Exceeded original number of stmts (%u)."
4050 " Not profitable to emit new sequence.\n",
4051 orig_num_stmts);
dd172744
RB
4052 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4053 delete split_store;
a62b3dc5
JJ
4054 return false;
4055 }
d7a9512e
JJ
4056 if (total_orig <= total_new)
4057 {
4058 /* If number of estimated new statements is above estimated original
4059 statements, bail out too. */
4060 if (dump_file && (dump_flags & TDF_DETAILS))
4061 fprintf (dump_file, "Estimated number of original stmts (%u)"
4062 " not larger than estimated number of new"
4063 " stmts (%u).\n",
4064 total_orig, total_new);
dd172744
RB
4065 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4066 delete split_store;
4b84d9b8 4067 return false;
d7a9512e 4068 }
5384a802
JJ
4069 if (group->stores[0]->rhs_code == INTEGER_CST)
4070 {
4071 bool all_orig = true;
4072 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4073 if (!split_store->orig)
4074 {
4075 all_orig = false;
4076 break;
4077 }
4078 if (all_orig)
4079 {
4080 unsigned int cnt = split_stores.length ();
4081 store_immediate_info *store;
4082 FOR_EACH_VEC_ELT (group->stores, i, store)
4083 if (gimple_clobber_p (store->stmt))
4084 ++cnt;
4085 /* Punt if we wouldn't make any real changes, i.e. keep all
4086 orig stmts + all clobbers. */
4087 if (cnt == group->stores.length ())
4088 {
4089 if (dump_file && (dump_flags & TDF_DETAILS))
4090 fprintf (dump_file, "Exceeded original number of stmts (%u)."
4091 " Not profitable to emit new sequence.\n",
4092 orig_num_stmts);
4093 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4094 delete split_store;
4095 return false;
4096 }
4097 }
4098 }
f663d9ad
KT
4099
4100 gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt);
4101 gimple_seq seq = NULL;
f663d9ad
KT
4102 tree last_vdef, new_vuse;
4103 last_vdef = gimple_vdef (group->last_stmt);
4104 new_vuse = gimple_vuse (group->last_stmt);
4b84d9b8
JJ
4105 tree bswap_res = NULL_TREE;
4106
5384a802
JJ
4107 /* Clobbers are not removed. */
4108 if (gimple_clobber_p (group->last_stmt))
4109 {
4110 new_vuse = make_ssa_name (gimple_vop (cfun), group->last_stmt);
4111 gimple_set_vdef (group->last_stmt, new_vuse);
4112 }
4113
4b84d9b8
JJ
4114 if (group->stores[0]->rhs_code == LROTATE_EXPR
4115 || group->stores[0]->rhs_code == NOP_EXPR)
4116 {
4117 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
4118 gimple *ins_stmt = group->stores[0]->ins_stmt;
4119 struct symbolic_number *n = &group->stores[0]->n;
4120 bool bswap = group->stores[0]->rhs_code == LROTATE_EXPR;
4121
4122 switch (n->range)
4123 {
4124 case 16:
4125 load_type = bswap_type = uint16_type_node;
4126 break;
4127 case 32:
4128 load_type = uint32_type_node;
4129 if (bswap)
4130 {
4131 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
4132 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
4133 }
4134 break;
4135 case 64:
4136 load_type = uint64_type_node;
4137 if (bswap)
4138 {
4139 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
4140 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
4141 }
4142 break;
4143 default:
4144 gcc_unreachable ();
4145 }
4146
4147 /* If the loads have each vuse of the corresponding store,
4148 we've checked the aliasing already in try_coalesce_bswap and
4149 we want to sink the need load into seq. So need to use new_vuse
4150 on the load. */
30fa8e9c 4151 if (n->base_addr)
4b84d9b8 4152 {
30fa8e9c
JJ
4153 if (n->vuse == NULL)
4154 {
4155 n->vuse = new_vuse;
4156 ins_stmt = NULL;
4157 }
4158 else
4159 /* Update vuse in case it has changed by output_merged_stores. */
4160 n->vuse = gimple_vuse (ins_stmt);
4b84d9b8
JJ
4161 }
4162 bswap_res = bswap_replace (gsi_start (seq), ins_stmt, fndecl,
4163 bswap_type, load_type, n, bswap);
4164 gcc_assert (bswap_res);
4165 }
f663d9ad
KT
4166
4167 gimple *stmt = NULL;
245f6de1 4168 auto_vec<gimple *, 32> orig_stmts;
4b84d9b8
JJ
4169 gimple_seq this_seq;
4170 tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq,
aa55dc0c 4171 is_gimple_mem_ref_addr, NULL_TREE);
4b84d9b8 4172 gimple_seq_add_seq_without_update (&seq, this_seq);
245f6de1
JJ
4173
4174 tree load_addr[2] = { NULL_TREE, NULL_TREE };
4175 gimple_seq load_seq[2] = { NULL, NULL };
4176 gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () };
4177 for (int j = 0; j < 2; ++j)
4178 {
4179 store_operand_info &op = group->stores[0]->ops[j];
4180 if (op.base_addr == NULL_TREE)
4181 continue;
4182
4183 store_immediate_info *infol = group->stores.last ();
4184 if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt))
4185 {
97031af7
JJ
4186 /* We can't pick the location randomly; while we've verified
4187 all the loads have the same vuse, they can be still in different
4188 basic blocks and we need to pick the one from the last bb:
4189 int x = q[0];
4190 if (x == N) return;
4191 int y = q[1];
4192 p[0] = x;
4193 p[1] = y;
4194 otherwise if we put the wider load at the q[0] load, we might
4195 segfault if q[1] is not mapped. */
4196 basic_block bb = gimple_bb (op.stmt);
4197 gimple *ostmt = op.stmt;
4198 store_immediate_info *info;
4199 FOR_EACH_VEC_ELT (group->stores, i, info)
4200 {
4201 gimple *tstmt = info->ops[j].stmt;
4202 basic_block tbb = gimple_bb (tstmt);
4203 if (dominated_by_p (CDI_DOMINATORS, tbb, bb))
4204 {
4205 ostmt = tstmt;
4206 bb = tbb;
4207 }
4208 }
4209 load_gsi[j] = gsi_for_stmt (ostmt);
245f6de1
JJ
4210 load_addr[j]
4211 = force_gimple_operand_1 (unshare_expr (op.base_addr),
4212 &load_seq[j], is_gimple_mem_ref_addr,
4213 NULL_TREE);
4214 }
4215 else if (operand_equal_p (base_addr, op.base_addr, 0))
4216 load_addr[j] = addr;
4217 else
3e2927a1 4218 {
3e2927a1
JJ
4219 load_addr[j]
4220 = force_gimple_operand_1 (unshare_expr (op.base_addr),
4221 &this_seq, is_gimple_mem_ref_addr,
4222 NULL_TREE);
4223 gimple_seq_add_seq_without_update (&seq, this_seq);
4224 }
245f6de1
JJ
4225 }
4226
f663d9ad
KT
4227 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4228 {
e362a897
EB
4229 const unsigned HOST_WIDE_INT try_size = split_store->size;
4230 const unsigned HOST_WIDE_INT try_pos = split_store->bytepos;
4231 const unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
4232 const unsigned HOST_WIDE_INT try_align = split_store->align;
4233 const unsigned HOST_WIDE_INT try_offset = try_pos - start_byte_pos;
a62b3dc5
JJ
4234 tree dest, src;
4235 location_t loc;
e362a897 4236
a62b3dc5
JJ
4237 if (split_store->orig)
4238 {
5384a802
JJ
4239 /* If there is just a single non-clobber constituent store
4240 which covers the whole area, just reuse the lhs and rhs. */
4241 gimple *orig_stmt = NULL;
4242 store_immediate_info *store;
4243 unsigned int j;
4244 FOR_EACH_VEC_ELT (split_store->orig_stores, j, store)
4245 if (!gimple_clobber_p (store->stmt))
4246 {
4247 orig_stmt = store->stmt;
4248 break;
4249 }
245f6de1
JJ
4250 dest = gimple_assign_lhs (orig_stmt);
4251 src = gimple_assign_rhs1 (orig_stmt);
4252 loc = gimple_location (orig_stmt);
a62b3dc5
JJ
4253 }
4254 else
4255 {
245f6de1
JJ
4256 store_immediate_info *info;
4257 unsigned short clique, base;
4258 unsigned int k;
4259 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4260 orig_stmts.safe_push (info->stmt);
a62b3dc5 4261 tree offset_type
245f6de1 4262 = get_alias_type_for_stmts (orig_stmts, false, &clique, &base);
e362a897 4263 tree dest_type;
245f6de1
JJ
4264 loc = get_location_for_stmts (orig_stmts);
4265 orig_stmts.truncate (0);
a62b3dc5 4266
e362a897
EB
4267 if (group->string_concatenation)
4268 dest_type
4269 = build_array_type_nelts (char_type_node,
4270 try_size / BITS_PER_UNIT);
4271 else
4272 {
4273 dest_type = build_nonstandard_integer_type (try_size, UNSIGNED);
4274 dest_type = build_aligned_type (dest_type, try_align);
4275 }
4276 dest = fold_build2 (MEM_REF, dest_type, addr,
a62b3dc5 4277 build_int_cst (offset_type, try_pos));
245f6de1
JJ
4278 if (TREE_CODE (dest) == MEM_REF)
4279 {
4280 MR_DEPENDENCE_CLIQUE (dest) = clique;
4281 MR_DEPENDENCE_BASE (dest) = base;
4282 }
4283
c94c3532 4284 tree mask;
e362a897 4285 if (bswap_res || group->string_concatenation)
c94c3532
EB
4286 mask = integer_zero_node;
4287 else
e362a897
EB
4288 mask = native_interpret_expr (dest_type,
4289 group->mask + try_offset,
4b84d9b8 4290 group->buf_size);
245f6de1
JJ
4291
4292 tree ops[2];
4293 for (int j = 0;
4294 j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE);
4295 ++j)
4296 {
4297 store_operand_info &op = split_store->orig_stores[0]->ops[j];
4b84d9b8
JJ
4298 if (bswap_res)
4299 ops[j] = bswap_res;
e362a897
EB
4300 else if (group->string_concatenation)
4301 {
4302 ops[j] = build_string (try_size / BITS_PER_UNIT,
4303 (const char *) group->val + try_offset);
4304 TREE_TYPE (ops[j]) = dest_type;
4305 }
4b84d9b8 4306 else if (op.base_addr)
245f6de1
JJ
4307 {
4308 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4309 orig_stmts.safe_push (info->ops[j].stmt);
4310
4311 offset_type = get_alias_type_for_stmts (orig_stmts, true,
4312 &clique, &base);
4313 location_t load_loc = get_location_for_stmts (orig_stmts);
4314 orig_stmts.truncate (0);
4315
4316 unsigned HOST_WIDE_INT load_align = group->load_align[j];
4317 unsigned HOST_WIDE_INT align_bitpos
c94c3532 4318 = known_alignment (try_bitpos
8a91d545
RS
4319 - split_store->orig_stores[0]->bitpos
4320 + op.bitpos);
4321 if (align_bitpos & (load_align - 1))
245f6de1
JJ
4322 load_align = least_bit_hwi (align_bitpos);
4323
4324 tree load_int_type
4325 = build_nonstandard_integer_type (try_size, UNSIGNED);
4326 load_int_type
4327 = build_aligned_type (load_int_type, load_align);
4328
8a91d545 4329 poly_uint64 load_pos
c94c3532 4330 = exact_div (try_bitpos
8a91d545
RS
4331 - split_store->orig_stores[0]->bitpos
4332 + op.bitpos,
4333 BITS_PER_UNIT);
245f6de1
JJ
4334 ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j],
4335 build_int_cst (offset_type, load_pos));
4336 if (TREE_CODE (ops[j]) == MEM_REF)
4337 {
4338 MR_DEPENDENCE_CLIQUE (ops[j]) = clique;
4339 MR_DEPENDENCE_BASE (ops[j]) = base;
4340 }
4341 if (!integer_zerop (mask))
4342 /* The load might load some bits (that will be masked off
4343 later on) uninitialized, avoid -W*uninitialized
4344 warnings in that case. */
4345 TREE_NO_WARNING (ops[j]) = 1;
4346
e362a897 4347 stmt = gimple_build_assign (make_ssa_name (dest_type), ops[j]);
245f6de1
JJ
4348 gimple_set_location (stmt, load_loc);
4349 if (gsi_bb (load_gsi[j]))
4350 {
4351 gimple_set_vuse (stmt, gimple_vuse (op.stmt));
4352 gimple_seq_add_stmt_without_update (&load_seq[j], stmt);
4353 }
4354 else
4355 {
4356 gimple_set_vuse (stmt, new_vuse);
4357 gimple_seq_add_stmt_without_update (&seq, stmt);
4358 }
4359 ops[j] = gimple_assign_lhs (stmt);
a6fbd154
JJ
4360 tree xor_mask;
4361 enum tree_code inv_op
e362a897 4362 = invert_op (split_store, j, dest_type, xor_mask);
a6fbd154 4363 if (inv_op != NOP_EXPR)
383ac8dc 4364 {
e362a897 4365 stmt = gimple_build_assign (make_ssa_name (dest_type),
a6fbd154 4366 inv_op, ops[j], xor_mask);
383ac8dc
JJ
4367 gimple_set_location (stmt, load_loc);
4368 ops[j] = gimple_assign_lhs (stmt);
4369
4370 if (gsi_bb (load_gsi[j]))
4371 gimple_seq_add_stmt_without_update (&load_seq[j],
4372 stmt);
4373 else
4374 gimple_seq_add_stmt_without_update (&seq, stmt);
4375 }
245f6de1
JJ
4376 }
4377 else
e362a897
EB
4378 ops[j] = native_interpret_expr (dest_type,
4379 group->val + try_offset,
245f6de1
JJ
4380 group->buf_size);
4381 }
4382
4383 switch (split_store->orig_stores[0]->rhs_code)
4384 {
4385 case BIT_AND_EXPR:
4386 case BIT_IOR_EXPR:
4387 case BIT_XOR_EXPR:
4388 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4389 {
4390 tree rhs1 = gimple_assign_rhs1 (info->stmt);
4391 orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1));
4392 }
4393 location_t bit_loc;
4394 bit_loc = get_location_for_stmts (orig_stmts);
4395 orig_stmts.truncate (0);
4396
4397 stmt
e362a897 4398 = gimple_build_assign (make_ssa_name (dest_type),
245f6de1
JJ
4399 split_store->orig_stores[0]->rhs_code,
4400 ops[0], ops[1]);
4401 gimple_set_location (stmt, bit_loc);
4402 /* If there is just one load and there is a separate
4403 load_seq[0], emit the bitwise op right after it. */
4404 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4405 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4406 /* Otherwise, if at least one load is in seq, we need to
4407 emit the bitwise op right before the store. If there
4408 are two loads and are emitted somewhere else, it would
4409 be better to emit the bitwise op as early as possible;
4410 we don't track where that would be possible right now
4411 though. */
4412 else
4413 gimple_seq_add_stmt_without_update (&seq, stmt);
4414 src = gimple_assign_lhs (stmt);
a6fbd154
JJ
4415 tree xor_mask;
4416 enum tree_code inv_op;
e362a897 4417 inv_op = invert_op (split_store, 2, dest_type, xor_mask);
a6fbd154 4418 if (inv_op != NOP_EXPR)
d60edaba 4419 {
e362a897 4420 stmt = gimple_build_assign (make_ssa_name (dest_type),
a6fbd154 4421 inv_op, src, xor_mask);
d60edaba
JJ
4422 gimple_set_location (stmt, bit_loc);
4423 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4424 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4425 else
4426 gimple_seq_add_stmt_without_update (&seq, stmt);
4427 src = gimple_assign_lhs (stmt);
4428 }
245f6de1 4429 break;
4b84d9b8
JJ
4430 case LROTATE_EXPR:
4431 case NOP_EXPR:
4432 src = ops[0];
4433 if (!is_gimple_val (src))
4434 {
4435 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (src)),
4436 src);
4437 gimple_seq_add_stmt_without_update (&seq, stmt);
4438 src = gimple_assign_lhs (stmt);
4439 }
e362a897 4440 if (!useless_type_conversion_p (dest_type, TREE_TYPE (src)))
4b84d9b8 4441 {
e362a897 4442 stmt = gimple_build_assign (make_ssa_name (dest_type),
4b84d9b8
JJ
4443 NOP_EXPR, src);
4444 gimple_seq_add_stmt_without_update (&seq, stmt);
4445 src = gimple_assign_lhs (stmt);
4446 }
e362a897 4447 inv_op = invert_op (split_store, 2, dest_type, xor_mask);
be52ac73
JJ
4448 if (inv_op != NOP_EXPR)
4449 {
e362a897 4450 stmt = gimple_build_assign (make_ssa_name (dest_type),
be52ac73
JJ
4451 inv_op, src, xor_mask);
4452 gimple_set_location (stmt, loc);
4453 gimple_seq_add_stmt_without_update (&seq, stmt);
4454 src = gimple_assign_lhs (stmt);
4455 }
4b84d9b8 4456 break;
245f6de1
JJ
4457 default:
4458 src = ops[0];
4459 break;
4460 }
4461
c94c3532
EB
4462 /* If bit insertion is required, we use the source as an accumulator
4463 into which the successive bit-field values are manually inserted.
4464 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
4465 if (group->bit_insertion)
4466 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4467 if (info->rhs_code == BIT_INSERT_EXPR
4468 && info->bitpos < try_bitpos + try_size
4469 && info->bitpos + info->bitsize > try_bitpos)
4470 {
4471 /* Mask, truncate, convert to final type, shift and ior into
4472 the accumulator. Note that every step can be a no-op. */
4473 const HOST_WIDE_INT start_gap = info->bitpos - try_bitpos;
4474 const HOST_WIDE_INT end_gap
4475 = (try_bitpos + try_size) - (info->bitpos + info->bitsize);
4476 tree tem = info->ops[0].val;
ed01d707
EB
4477 if (!INTEGRAL_TYPE_P (TREE_TYPE (tem)))
4478 {
4479 const unsigned HOST_WIDE_INT size
4480 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (tem)));
4481 tree integer_type
4482 = build_nonstandard_integer_type (size, UNSIGNED);
4483 tem = gimple_build (&seq, loc, VIEW_CONVERT_EXPR,
4484 integer_type, tem);
4485 }
c14add82
EB
4486 if (TYPE_PRECISION (TREE_TYPE (tem)) <= info->bitsize)
4487 {
4488 tree bitfield_type
4489 = build_nonstandard_integer_type (info->bitsize,
4490 UNSIGNED);
4491 tem = gimple_convert (&seq, loc, bitfield_type, tem);
4492 }
4493 else if ((BYTES_BIG_ENDIAN ? start_gap : end_gap) > 0)
c94c3532
EB
4494 {
4495 const unsigned HOST_WIDE_INT imask
4496 = (HOST_WIDE_INT_1U << info->bitsize) - 1;
4497 tem = gimple_build (&seq, loc,
4498 BIT_AND_EXPR, TREE_TYPE (tem), tem,
4499 build_int_cst (TREE_TYPE (tem),
4500 imask));
4501 }
4502 const HOST_WIDE_INT shift
4503 = (BYTES_BIG_ENDIAN ? end_gap : start_gap);
4504 if (shift < 0)
4505 tem = gimple_build (&seq, loc,
4506 RSHIFT_EXPR, TREE_TYPE (tem), tem,
4507 build_int_cst (NULL_TREE, -shift));
e362a897 4508 tem = gimple_convert (&seq, loc, dest_type, tem);
c94c3532
EB
4509 if (shift > 0)
4510 tem = gimple_build (&seq, loc,
e362a897 4511 LSHIFT_EXPR, dest_type, tem,
c94c3532
EB
4512 build_int_cst (NULL_TREE, shift));
4513 src = gimple_build (&seq, loc,
e362a897 4514 BIT_IOR_EXPR, dest_type, tem, src);
c94c3532
EB
4515 }
4516
a62b3dc5
JJ
4517 if (!integer_zerop (mask))
4518 {
e362a897 4519 tree tem = make_ssa_name (dest_type);
a62b3dc5
JJ
4520 tree load_src = unshare_expr (dest);
4521 /* The load might load some or all bits uninitialized,
4522 avoid -W*uninitialized warnings in that case.
4523 As optimization, it would be nice if all the bits are
4524 provably uninitialized (no stores at all yet or previous
4525 store a CLOBBER) we'd optimize away the load and replace
4526 it e.g. with 0. */
4527 TREE_NO_WARNING (load_src) = 1;
4528 stmt = gimple_build_assign (tem, load_src);
4529 gimple_set_location (stmt, loc);
4530 gimple_set_vuse (stmt, new_vuse);
4531 gimple_seq_add_stmt_without_update (&seq, stmt);
4532
4533 /* FIXME: If there is a single chunk of zero bits in mask,
4534 perhaps use BIT_INSERT_EXPR instead? */
e362a897 4535 stmt = gimple_build_assign (make_ssa_name (dest_type),
a62b3dc5
JJ
4536 BIT_AND_EXPR, tem, mask);
4537 gimple_set_location (stmt, loc);
4538 gimple_seq_add_stmt_without_update (&seq, stmt);
4539 tem = gimple_assign_lhs (stmt);
4540
245f6de1 4541 if (TREE_CODE (src) == INTEGER_CST)
e362a897 4542 src = wide_int_to_tree (dest_type,
245f6de1
JJ
4543 wi::bit_and_not (wi::to_wide (src),
4544 wi::to_wide (mask)));
4545 else
4546 {
4547 tree nmask
e362a897 4548 = wide_int_to_tree (dest_type,
245f6de1 4549 wi::bit_not (wi::to_wide (mask)));
e362a897 4550 stmt = gimple_build_assign (make_ssa_name (dest_type),
245f6de1
JJ
4551 BIT_AND_EXPR, src, nmask);
4552 gimple_set_location (stmt, loc);
4553 gimple_seq_add_stmt_without_update (&seq, stmt);
4554 src = gimple_assign_lhs (stmt);
4555 }
e362a897 4556 stmt = gimple_build_assign (make_ssa_name (dest_type),
a62b3dc5
JJ
4557 BIT_IOR_EXPR, tem, src);
4558 gimple_set_location (stmt, loc);
4559 gimple_seq_add_stmt_without_update (&seq, stmt);
4560 src = gimple_assign_lhs (stmt);
4561 }
4562 }
f663d9ad
KT
4563
4564 stmt = gimple_build_assign (dest, src);
4565 gimple_set_location (stmt, loc);
4566 gimple_set_vuse (stmt, new_vuse);
4567 gimple_seq_add_stmt_without_update (&seq, stmt);
4568
629387a6
EB
4569 if (group->lp_nr && stmt_could_throw_p (cfun, stmt))
4570 add_stmt_to_eh_lp (stmt, group->lp_nr);
4571
f663d9ad
KT
4572 tree new_vdef;
4573 if (i < split_stores.length () - 1)
a62b3dc5 4574 new_vdef = make_ssa_name (gimple_vop (cfun), stmt);
f663d9ad
KT
4575 else
4576 new_vdef = last_vdef;
4577
4578 gimple_set_vdef (stmt, new_vdef);
4579 SSA_NAME_DEF_STMT (new_vdef) = stmt;
4580 new_vuse = new_vdef;
4581 }
4582
4583 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4584 delete split_store;
4585
f663d9ad
KT
4586 gcc_assert (seq);
4587 if (dump_file)
4588 {
4589 fprintf (dump_file,
c94c3532 4590 "New sequence of %u stores to replace old one of %u stores\n",
a62b3dc5 4591 split_stores.length (), orig_num_stmts);
f663d9ad
KT
4592 if (dump_flags & TDF_DETAILS)
4593 print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS);
4594 }
629387a6 4595
5384a802
JJ
4596 if (gimple_clobber_p (group->last_stmt))
4597 update_stmt (group->last_stmt);
4598
629387a6
EB
4599 if (group->lp_nr > 0)
4600 {
4601 /* We're going to insert a sequence of (potentially) throwing stores
4602 into an active EH region. This means that we're going to create
4603 new basic blocks with EH edges pointing to the post landing pad
4604 and, therefore, to have to update its PHI nodes, if any. For the
4605 virtual PHI node, we're going to use the VDEFs created above, but
4606 for the other nodes, we need to record the original reaching defs. */
4607 eh_landing_pad lp = get_eh_landing_pad_from_number (group->lp_nr);
4608 basic_block lp_bb = label_to_block (cfun, lp->post_landing_pad);
4609 basic_block last_bb = gimple_bb (group->last_stmt);
4610 edge last_edge = find_edge (last_bb, lp_bb);
4611 auto_vec<tree, 16> last_defs;
4612 gphi_iterator gpi;
4613 for (gpi = gsi_start_phis (lp_bb); !gsi_end_p (gpi); gsi_next (&gpi))
4614 {
4615 gphi *phi = gpi.phi ();
4616 tree last_def;
4617 if (virtual_operand_p (gimple_phi_result (phi)))
4618 last_def = NULL_TREE;
4619 else
4620 last_def = gimple_phi_arg_def (phi, last_edge->dest_idx);
4621 last_defs.safe_push (last_def);
4622 }
4623
4624 /* Do the insertion. Then, if new basic blocks have been created in the
4625 process, rewind the chain of VDEFs create above to walk the new basic
4626 blocks and update the corresponding arguments of the PHI nodes. */
4627 update_modified_stmts (seq);
4628 if (gimple_find_sub_bbs (seq, &last_gsi))
4629 while (last_vdef != gimple_vuse (group->last_stmt))
4630 {
4631 gimple *stmt = SSA_NAME_DEF_STMT (last_vdef);
4632 if (stmt_could_throw_p (cfun, stmt))
4633 {
4634 edge new_edge = find_edge (gimple_bb (stmt), lp_bb);
4635 unsigned int i;
4636 for (gpi = gsi_start_phis (lp_bb), i = 0;
4637 !gsi_end_p (gpi);
4638 gsi_next (&gpi), i++)
4639 {
4640 gphi *phi = gpi.phi ();
4641 tree new_def;
4642 if (virtual_operand_p (gimple_phi_result (phi)))
4643 new_def = last_vdef;
4644 else
4645 new_def = last_defs[i];
4646 add_phi_arg (phi, new_def, new_edge, UNKNOWN_LOCATION);
4647 }
4648 }
4649 last_vdef = gimple_vuse (stmt);
4650 }
4651 }
4652 else
4653 gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT);
4654
245f6de1
JJ
4655 for (int j = 0; j < 2; ++j)
4656 if (load_seq[j])
4657 gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT);
f663d9ad
KT
4658
4659 return true;
4660}
4661
4662/* Process the merged_store_group objects created in the coalescing phase.
4663 The stores are all against the base object BASE.
4664 Try to output the widened stores and delete the original statements if
4665 successful. Return true iff any changes were made. */
4666
4667bool
b5926e23 4668imm_store_chain_info::output_merged_stores ()
f663d9ad
KT
4669{
4670 unsigned int i;
4671 merged_store_group *merged_store;
4672 bool ret = false;
4673 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store)
4674 {
a95b474a
ML
4675 if (dbg_cnt (store_merging)
4676 && output_merged_store (merged_store))
f663d9ad
KT
4677 {
4678 unsigned int j;
4679 store_immediate_info *store;
4680 FOR_EACH_VEC_ELT (merged_store->stores, j, store)
4681 {
4682 gimple *stmt = store->stmt;
4683 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
5384a802
JJ
4684 /* Don't remove clobbers, they are still useful even if
4685 everything is overwritten afterwards. */
4686 if (gimple_clobber_p (stmt))
4687 continue;
f663d9ad 4688 gsi_remove (&gsi, true);
629387a6
EB
4689 if (store->lp_nr)
4690 remove_stmt_from_eh_lp (stmt);
f663d9ad
KT
4691 if (stmt != merged_store->last_stmt)
4692 {
4693 unlink_stmt_vdef (stmt);
4694 release_defs (stmt);
4695 }
4696 }
4697 ret = true;
4698 }
4699 }
4700 if (ret && dump_file)
4701 fprintf (dump_file, "Merging successful!\n");
4702
4703 return ret;
4704}
4705
4706/* Coalesce the store_immediate_info objects recorded against the base object
4707 BASE in the first phase and output them.
4708 Delete the allocated structures.
4709 Return true if any changes were made. */
4710
4711bool
b5926e23 4712imm_store_chain_info::terminate_and_process_chain ()
f663d9ad 4713{
95d94b52
RB
4714 if (dump_file && (dump_flags & TDF_DETAILS))
4715 fprintf (dump_file, "Terminating chain with %u stores\n",
4716 m_store_info.length ());
f663d9ad
KT
4717 /* Process store chain. */
4718 bool ret = false;
4719 if (m_store_info.length () > 1)
4720 {
4721 ret = coalesce_immediate_stores ();
4722 if (ret)
b5926e23 4723 ret = output_merged_stores ();
f663d9ad
KT
4724 }
4725
4726 /* Delete all the entries we allocated ourselves. */
4727 store_immediate_info *info;
4728 unsigned int i;
4729 FOR_EACH_VEC_ELT (m_store_info, i, info)
4730 delete info;
4731
4732 merged_store_group *merged_info;
4733 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info)
4734 delete merged_info;
4735
4736 return ret;
4737}
4738
4739/* Return true iff LHS is a destination potentially interesting for
4740 store merging. In practice these are the codes that get_inner_reference
4741 can process. */
4742
4743static bool
4744lhs_valid_for_store_merging_p (tree lhs)
4745{
629387a6 4746 if (DECL_P (lhs))
f663d9ad
KT
4747 return true;
4748
629387a6
EB
4749 switch (TREE_CODE (lhs))
4750 {
4751 case ARRAY_REF:
4752 case ARRAY_RANGE_REF:
4753 case BIT_FIELD_REF:
4754 case COMPONENT_REF:
4755 case MEM_REF:
e362a897 4756 case VIEW_CONVERT_EXPR:
629387a6
EB
4757 return true;
4758 default:
4759 return false;
4760 }
4761
4762 gcc_unreachable ();
f663d9ad
KT
4763}
4764
4765/* Return true if the tree RHS is a constant we want to consider
4766 during store merging. In practice accept all codes that
4767 native_encode_expr accepts. */
4768
4769static bool
4770rhs_valid_for_store_merging_p (tree rhs)
4771{
cf098191 4772 unsigned HOST_WIDE_INT size;
3afd514b 4773 if (TREE_CODE (rhs) == CONSTRUCTOR
3afd514b
JJ
4774 && CONSTRUCTOR_NELTS (rhs) == 0
4775 && TYPE_SIZE_UNIT (TREE_TYPE (rhs))
4776 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs))))
4777 return true;
cf098191
RS
4778 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (&size)
4779 && native_encode_expr (rhs, NULL, size) != 0);
f663d9ad
KT
4780}
4781
629387a6
EB
4782/* Adjust *PBITPOS, *PBITREGION_START and *PBITREGION_END by BYTE_OFF bytes
4783 and return true on success or false on failure. */
4784
4785static bool
4786adjust_bit_pos (poly_offset_int byte_off,
4787 poly_int64 *pbitpos,
4788 poly_uint64 *pbitregion_start,
4789 poly_uint64 *pbitregion_end)
4790{
4791 poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT;
4792 bit_off += *pbitpos;
4793
4794 if (known_ge (bit_off, 0) && bit_off.to_shwi (pbitpos))
4795 {
4796 if (maybe_ne (*pbitregion_end, 0U))
4797 {
4798 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4799 bit_off += *pbitregion_start;
4800 if (bit_off.to_uhwi (pbitregion_start))
4801 {
4802 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4803 bit_off += *pbitregion_end;
4804 if (!bit_off.to_uhwi (pbitregion_end))
4805 *pbitregion_end = 0;
4806 }
4807 else
4808 *pbitregion_end = 0;
4809 }
4810 return true;
4811 }
4812 else
4813 return false;
4814}
4815
245f6de1
JJ
4816/* If MEM is a memory reference usable for store merging (either as
4817 store destination or for loads), return the non-NULL base_addr
4818 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
4819 Otherwise return NULL, *PBITPOS should be still valid even for that
4820 case. */
4821
4822static tree
8a91d545
RS
4823mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize,
4824 poly_uint64 *pbitpos,
4825 poly_uint64 *pbitregion_start,
4826 poly_uint64 *pbitregion_end)
245f6de1 4827{
8a91d545
RS
4828 poly_int64 bitsize, bitpos;
4829 poly_uint64 bitregion_start = 0, bitregion_end = 0;
245f6de1
JJ
4830 machine_mode mode;
4831 int unsignedp = 0, reversep = 0, volatilep = 0;
4832 tree offset;
4833 tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode,
4834 &unsignedp, &reversep, &volatilep);
4835 *pbitsize = bitsize;
8a91d545 4836 if (known_eq (bitsize, 0))
245f6de1
JJ
4837 return NULL_TREE;
4838
4839 if (TREE_CODE (mem) == COMPONENT_REF
4840 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1)))
4841 {
4842 get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset);
8a91d545
RS
4843 if (maybe_ne (bitregion_end, 0U))
4844 bitregion_end += 1;
245f6de1
JJ
4845 }
4846
4847 if (reversep)
4848 return NULL_TREE;
4849
4850 /* We do not want to rewrite TARGET_MEM_REFs. */
4851 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
4852 return NULL_TREE;
4853 /* In some cases get_inner_reference may return a
4854 MEM_REF [ptr + byteoffset]. For the purposes of this pass
4855 canonicalize the base_addr to MEM_REF [ptr] and take
4856 byteoffset into account in the bitpos. This occurs in
4857 PR 23684 and this way we can catch more chains. */
4858 else if (TREE_CODE (base_addr) == MEM_REF)
4859 {
629387a6
EB
4860 if (!adjust_bit_pos (mem_ref_offset (base_addr), &bitpos,
4861 &bitregion_start, &bitregion_end))
245f6de1
JJ
4862 return NULL_TREE;
4863 base_addr = TREE_OPERAND (base_addr, 0);
4864 }
4865 /* get_inner_reference returns the base object, get at its
4866 address now. */
4867 else
4868 {
8a91d545 4869 if (maybe_lt (bitpos, 0))
245f6de1
JJ
4870 return NULL_TREE;
4871 base_addr = build_fold_addr_expr (base_addr);
4872 }
4873
629387a6 4874 if (offset)
245f6de1
JJ
4875 {
4876 /* If the access is variable offset then a base decl has to be
4877 address-taken to be able to emit pointer-based stores to it.
4878 ??? We might be able to get away with re-using the original
4879 base up to the first variable part and then wrapping that inside
4880 a BIT_FIELD_REF. */
4881 tree base = get_base_address (base_addr);
629387a6 4882 if (!base || (DECL_P (base) && !TREE_ADDRESSABLE (base)))
245f6de1
JJ
4883 return NULL_TREE;
4884
629387a6
EB
4885 /* Similarly to above for the base, remove constant from the offset. */
4886 if (TREE_CODE (offset) == PLUS_EXPR
4887 && TREE_CODE (TREE_OPERAND (offset, 1)) == INTEGER_CST
4888 && adjust_bit_pos (wi::to_poly_offset (TREE_OPERAND (offset, 1)),
4889 &bitpos, &bitregion_start, &bitregion_end))
4890 offset = TREE_OPERAND (offset, 0);
4891
245f6de1
JJ
4892 base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr),
4893 base_addr, offset);
4894 }
4895
629387a6
EB
4896 if (known_eq (bitregion_end, 0U))
4897 {
4898 bitregion_start = round_down_to_byte_boundary (bitpos);
4899 bitregion_end = round_up_to_byte_boundary (bitpos + bitsize);
4900 }
4901
245f6de1
JJ
4902 *pbitsize = bitsize;
4903 *pbitpos = bitpos;
4904 *pbitregion_start = bitregion_start;
4905 *pbitregion_end = bitregion_end;
4906 return base_addr;
4907}
4908
4909/* Return true if STMT is a load that can be used for store merging.
4910 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
4911 BITREGION_END are properties of the corresponding store. */
4912
4913static bool
4914handled_load (gimple *stmt, store_operand_info *op,
8a91d545
RS
4915 poly_uint64 bitsize, poly_uint64 bitpos,
4916 poly_uint64 bitregion_start, poly_uint64 bitregion_end)
245f6de1 4917{
383ac8dc 4918 if (!is_gimple_assign (stmt))
245f6de1 4919 return false;
383ac8dc
JJ
4920 if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR)
4921 {
4922 tree rhs1 = gimple_assign_rhs1 (stmt);
4923 if (TREE_CODE (rhs1) == SSA_NAME
383ac8dc
JJ
4924 && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos,
4925 bitregion_start, bitregion_end))
4926 {
d60edaba
JJ
4927 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
4928 been optimized earlier, but if allowed here, would confuse the
4929 multiple uses counting. */
4930 if (op->bit_not_p)
4931 return false;
383ac8dc
JJ
4932 op->bit_not_p = !op->bit_not_p;
4933 return true;
4934 }
4935 return false;
4936 }
4937 if (gimple_vuse (stmt)
4938 && gimple_assign_load_p (stmt)
36bbc05d 4939 && !stmt_can_throw_internal (cfun, stmt)
245f6de1
JJ
4940 && !gimple_has_volatile_ops (stmt))
4941 {
4942 tree mem = gimple_assign_rhs1 (stmt);
4943 op->base_addr
4944 = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos,
4945 &op->bitregion_start,
4946 &op->bitregion_end);
4947 if (op->base_addr != NULL_TREE
8a91d545
RS
4948 && known_eq (op->bitsize, bitsize)
4949 && multiple_p (op->bitpos - bitpos, BITS_PER_UNIT)
4950 && known_ge (op->bitpos - op->bitregion_start,
4951 bitpos - bitregion_start)
4952 && known_ge (op->bitregion_end - op->bitpos,
4953 bitregion_end - bitpos))
245f6de1
JJ
4954 {
4955 op->stmt = stmt;
4956 op->val = mem;
383ac8dc 4957 op->bit_not_p = false;
245f6de1
JJ
4958 return true;
4959 }
4960 }
4961 return false;
4962}
4963
629387a6
EB
4964/* Return the index number of the landing pad for STMT, if any. */
4965
4966static int
4967lp_nr_for_store (gimple *stmt)
4968{
4969 if (!cfun->can_throw_non_call_exceptions || !cfun->eh)
4970 return 0;
4971
4972 if (!stmt_could_throw_p (cfun, stmt))
4973 return 0;
4974
4975 return lookup_stmt_eh_lp (stmt);
4976}
4977
245f6de1 4978/* Record the store STMT for store merging optimization if it can be
629387a6 4979 optimized. Return true if any changes were made. */
245f6de1 4980
629387a6 4981bool
245f6de1
JJ
4982pass_store_merging::process_store (gimple *stmt)
4983{
4984 tree lhs = gimple_assign_lhs (stmt);
4985 tree rhs = gimple_assign_rhs1 (stmt);
2c832ffe
SSF
4986 poly_uint64 bitsize, bitpos = 0;
4987 poly_uint64 bitregion_start = 0, bitregion_end = 0;
245f6de1
JJ
4988 tree base_addr
4989 = mem_valid_for_store_merging (lhs, &bitsize, &bitpos,
4990 &bitregion_start, &bitregion_end);
8a91d545 4991 if (known_eq (bitsize, 0U))
629387a6 4992 return false;
245f6de1
JJ
4993
4994 bool invalid = (base_addr == NULL_TREE
8a91d545
RS
4995 || (maybe_gt (bitsize,
4996 (unsigned int) MAX_BITSIZE_MODE_ANY_INT)
3afd514b
JJ
4997 && TREE_CODE (rhs) != INTEGER_CST
4998 && (TREE_CODE (rhs) != CONSTRUCTOR
4999 || CONSTRUCTOR_NELTS (rhs) != 0)));
245f6de1 5000 enum tree_code rhs_code = ERROR_MARK;
d60edaba 5001 bool bit_not_p = false;
4b84d9b8
JJ
5002 struct symbolic_number n;
5003 gimple *ins_stmt = NULL;
245f6de1
JJ
5004 store_operand_info ops[2];
5005 if (invalid)
5006 ;
e362a897
EB
5007 else if (TREE_CODE (rhs) == STRING_CST)
5008 {
5009 rhs_code = STRING_CST;
5010 ops[0].val = rhs;
5011 }
245f6de1
JJ
5012 else if (rhs_valid_for_store_merging_p (rhs))
5013 {
5014 rhs_code = INTEGER_CST;
5015 ops[0].val = rhs;
5016 }
e362a897 5017 else if (TREE_CODE (rhs) == SSA_NAME)
245f6de1
JJ
5018 {
5019 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2;
5020 if (!is_gimple_assign (def_stmt))
5021 invalid = true;
5022 else if (handled_load (def_stmt, &ops[0], bitsize, bitpos,
5023 bitregion_start, bitregion_end))
5024 rhs_code = MEM_REF;
d60edaba
JJ
5025 else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR)
5026 {
5027 tree rhs1 = gimple_assign_rhs1 (def_stmt);
5028 if (TREE_CODE (rhs1) == SSA_NAME
5029 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1)))
5030 {
5031 bit_not_p = true;
5032 def_stmt = SSA_NAME_DEF_STMT (rhs1);
5033 }
5034 }
c94c3532 5035
d60edaba 5036 if (rhs_code == ERROR_MARK && !invalid)
245f6de1
JJ
5037 switch ((rhs_code = gimple_assign_rhs_code (def_stmt)))
5038 {
5039 case BIT_AND_EXPR:
5040 case BIT_IOR_EXPR:
5041 case BIT_XOR_EXPR:
5042 tree rhs1, rhs2;
5043 rhs1 = gimple_assign_rhs1 (def_stmt);
5044 rhs2 = gimple_assign_rhs2 (def_stmt);
5045 invalid = true;
d7a9512e 5046 if (TREE_CODE (rhs1) != SSA_NAME)
245f6de1
JJ
5047 break;
5048 def_stmt1 = SSA_NAME_DEF_STMT (rhs1);
5049 if (!is_gimple_assign (def_stmt1)
5050 || !handled_load (def_stmt1, &ops[0], bitsize, bitpos,
5051 bitregion_start, bitregion_end))
5052 break;
5053 if (rhs_valid_for_store_merging_p (rhs2))
5054 ops[1].val = rhs2;
d7a9512e 5055 else if (TREE_CODE (rhs2) != SSA_NAME)
245f6de1
JJ
5056 break;
5057 else
5058 {
5059 def_stmt2 = SSA_NAME_DEF_STMT (rhs2);
5060 if (!is_gimple_assign (def_stmt2))
5061 break;
5062 else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos,
5063 bitregion_start, bitregion_end))
5064 break;
5065 }
5066 invalid = false;
5067 break;
5068 default:
5069 invalid = true;
5070 break;
5071 }
c94c3532 5072
8a91d545
RS
5073 unsigned HOST_WIDE_INT const_bitsize;
5074 if (bitsize.is_constant (&const_bitsize)
c94c3532 5075 && (const_bitsize % BITS_PER_UNIT) == 0
8a91d545 5076 && const_bitsize <= 64
c94c3532 5077 && multiple_p (bitpos, BITS_PER_UNIT))
4b84d9b8
JJ
5078 {
5079 ins_stmt = find_bswap_or_nop_1 (def_stmt, &n, 12);
5080 if (ins_stmt)
5081 {
5082 uint64_t nn = n.n;
5083 for (unsigned HOST_WIDE_INT i = 0;
8a91d545
RS
5084 i < const_bitsize;
5085 i += BITS_PER_UNIT, nn >>= BITS_PER_MARKER)
4b84d9b8
JJ
5086 if ((nn & MARKER_MASK) == 0
5087 || (nn & MARKER_MASK) == MARKER_BYTE_UNKNOWN)
5088 {
5089 ins_stmt = NULL;
5090 break;
5091 }
5092 if (ins_stmt)
5093 {
5094 if (invalid)
5095 {
5096 rhs_code = LROTATE_EXPR;
5097 ops[0].base_addr = NULL_TREE;
5098 ops[1].base_addr = NULL_TREE;
5099 }
5100 invalid = false;
5101 }
5102 }
5103 }
c94c3532
EB
5104
5105 if (invalid
5106 && bitsize.is_constant (&const_bitsize)
5107 && ((const_bitsize % BITS_PER_UNIT) != 0
5108 || !multiple_p (bitpos, BITS_PER_UNIT))
ed01d707 5109 && const_bitsize <= MAX_FIXED_MODE_SIZE)
c94c3532 5110 {
c14add82 5111 /* Bypass a conversion to the bit-field type. */
31a5d8c5
EB
5112 if (!bit_not_p
5113 && is_gimple_assign (def_stmt)
5114 && CONVERT_EXPR_CODE_P (rhs_code))
c94c3532
EB
5115 {
5116 tree rhs1 = gimple_assign_rhs1 (def_stmt);
5117 if (TREE_CODE (rhs1) == SSA_NAME
c14add82 5118 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
c94c3532
EB
5119 rhs = rhs1;
5120 }
5121 rhs_code = BIT_INSERT_EXPR;
31a5d8c5 5122 bit_not_p = false;
c94c3532
EB
5123 ops[0].val = rhs;
5124 ops[0].base_addr = NULL_TREE;
5125 ops[1].base_addr = NULL_TREE;
5126 invalid = false;
5127 }
245f6de1 5128 }
e362a897
EB
5129 else
5130 invalid = true;
245f6de1 5131
8a91d545
RS
5132 unsigned HOST_WIDE_INT const_bitsize, const_bitpos;
5133 unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end;
5134 if (invalid
5135 || !bitsize.is_constant (&const_bitsize)
5136 || !bitpos.is_constant (&const_bitpos)
5137 || !bitregion_start.is_constant (&const_bitregion_start)
5138 || !bitregion_end.is_constant (&const_bitregion_end))
629387a6 5139 return terminate_all_aliasing_chains (NULL, stmt);
245f6de1 5140
4b84d9b8
JJ
5141 if (!ins_stmt)
5142 memset (&n, 0, sizeof (n));
5143
99b1c316 5144 class imm_store_chain_info **chain_info = NULL;
629387a6 5145 bool ret = false;
383ac8dc
JJ
5146 if (base_addr)
5147 chain_info = m_stores.get (base_addr);
5148
245f6de1
JJ
5149 store_immediate_info *info;
5150 if (chain_info)
5151 {
5152 unsigned int ord = (*chain_info)->m_store_info.length ();
8a91d545
RS
5153 info = new store_immediate_info (const_bitsize, const_bitpos,
5154 const_bitregion_start,
5155 const_bitregion_end,
5156 stmt, ord, rhs_code, n, ins_stmt,
629387a6
EB
5157 bit_not_p, lp_nr_for_store (stmt),
5158 ops[0], ops[1]);
245f6de1
JJ
5159 if (dump_file && (dump_flags & TDF_DETAILS))
5160 {
5161 fprintf (dump_file, "Recording immediate store from stmt:\n");
5162 print_gimple_stmt (dump_file, stmt, 0);
5163 }
5164 (*chain_info)->m_store_info.safe_push (info);
95d94b52 5165 m_n_stores++;
629387a6 5166 ret |= terminate_all_aliasing_chains (chain_info, stmt);
245f6de1
JJ
5167 /* If we reach the limit of stores to merge in a chain terminate and
5168 process the chain now. */
5169 if ((*chain_info)->m_store_info.length ()
028d4092 5170 == (unsigned int) param_max_stores_to_merge)
245f6de1
JJ
5171 {
5172 if (dump_file && (dump_flags & TDF_DETAILS))
5173 fprintf (dump_file,
5174 "Reached maximum number of statements to merge:\n");
629387a6 5175 ret |= terminate_and_process_chain (*chain_info);
245f6de1 5176 }
245f6de1 5177 }
95d94b52
RB
5178 else
5179 {
5180 /* Store aliases any existing chain? */
5181 ret |= terminate_all_aliasing_chains (NULL, stmt);
245f6de1 5182
95d94b52
RB
5183 /* Start a new chain. */
5184 class imm_store_chain_info *new_chain
5185 = new imm_store_chain_info (m_stores_head, base_addr);
5186 info = new store_immediate_info (const_bitsize, const_bitpos,
5187 const_bitregion_start,
5188 const_bitregion_end,
5189 stmt, 0, rhs_code, n, ins_stmt,
5190 bit_not_p, lp_nr_for_store (stmt),
5191 ops[0], ops[1]);
5192 new_chain->m_store_info.safe_push (info);
5193 m_n_stores++;
5194 m_stores.put (base_addr, new_chain);
5195 m_n_chains++;
5196 if (dump_file && (dump_flags & TDF_DETAILS))
5197 {
5198 fprintf (dump_file, "Starting active chain number %u with statement:\n",
5199 m_n_chains);
5200 print_gimple_stmt (dump_file, stmt, 0);
5201 fprintf (dump_file, "The base object is:\n");
5202 print_generic_expr (dump_file, base_addr);
5203 fprintf (dump_file, "\n");
5204 }
5205 }
5206
5207 /* Prune oldest chains so that after adding the chain or store above
5208 we're again within the limits set by the params. */
5209 if (m_n_chains > (unsigned)param_max_store_chains_to_track
5210 || m_n_stores > (unsigned)param_max_stores_to_track)
245f6de1 5211 {
95d94b52
RB
5212 if (dump_file && (dump_flags & TDF_DETAILS))
5213 fprintf (dump_file, "Too many chains (%u > %d) or stores (%u > %d), "
5214 "terminating oldest chain(s).\n", m_n_chains,
5215 param_max_store_chains_to_track, m_n_stores,
5216 param_max_stores_to_track);
5217 imm_store_chain_info **e = &m_stores_head;
5218 unsigned idx = 0;
5219 unsigned n_stores = 0;
5220 while (*e)
5221 {
5222 if (idx >= (unsigned)param_max_store_chains_to_track
5223 || (n_stores + (*e)->m_store_info.length ()
5224 > (unsigned)param_max_stores_to_track))
8a8eee6b 5225 ret |= terminate_and_process_chain (*e);
95d94b52
RB
5226 else
5227 {
5228 n_stores += (*e)->m_store_info.length ();
5229 e = &(*e)->next;
5230 ++idx;
5231 }
5232 }
245f6de1 5233 }
95d94b52 5234
629387a6
EB
5235 return ret;
5236}
5237
5238/* Return true if STMT is a store valid for store merging. */
5239
5240static bool
5241store_valid_for_store_merging_p (gimple *stmt)
5242{
5243 return gimple_assign_single_p (stmt)
5244 && gimple_vdef (stmt)
5245 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt))
5384a802 5246 && (!gimple_has_volatile_ops (stmt) || gimple_clobber_p (stmt));
629387a6
EB
5247}
5248
5249enum basic_block_status { BB_INVALID, BB_VALID, BB_EXTENDED_VALID };
5250
5251/* Return the status of basic block BB wrt store merging. */
5252
5253static enum basic_block_status
5254get_status_for_store_merging (basic_block bb)
5255{
5256 unsigned int num_statements = 0;
a7553ad6 5257 unsigned int num_constructors = 0;
629387a6
EB
5258 gimple_stmt_iterator gsi;
5259 edge e;
5260
5261 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5262 {
5263 gimple *stmt = gsi_stmt (gsi);
5264
5265 if (is_gimple_debug (stmt))
5266 continue;
5267
5268 if (store_valid_for_store_merging_p (stmt) && ++num_statements >= 2)
5269 break;
a7553ad6
JJ
5270
5271 if (is_gimple_assign (stmt)
5272 && gimple_assign_rhs_code (stmt) == CONSTRUCTOR)
5273 {
5274 tree rhs = gimple_assign_rhs1 (stmt);
5275 if (VECTOR_TYPE_P (TREE_TYPE (rhs))
5276 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs)))
5277 && gimple_assign_lhs (stmt) != NULL_TREE)
5278 {
5279 HOST_WIDE_INT sz
5280 = int_size_in_bytes (TREE_TYPE (rhs)) * BITS_PER_UNIT;
5281 if (sz == 16 || sz == 32 || sz == 64)
5282 {
5283 num_constructors = 1;
5284 break;
5285 }
5286 }
5287 }
629387a6
EB
5288 }
5289
a7553ad6 5290 if (num_statements == 0 && num_constructors == 0)
629387a6
EB
5291 return BB_INVALID;
5292
5293 if (cfun->can_throw_non_call_exceptions && cfun->eh
5294 && store_valid_for_store_merging_p (gimple_seq_last_stmt (bb_seq (bb)))
5295 && (e = find_fallthru_edge (bb->succs))
5296 && e->dest == bb->next_bb)
5297 return BB_EXTENDED_VALID;
5298
a7553ad6 5299 return (num_statements >= 2 || num_constructors) ? BB_VALID : BB_INVALID;
245f6de1
JJ
5300}
5301
f663d9ad 5302/* Entry point for the pass. Go over each basic block recording chains of
245f6de1
JJ
5303 immediate stores. Upon encountering a terminating statement (as defined
5304 by stmt_terminates_chain_p) process the recorded stores and emit the widened
5305 variants. */
f663d9ad
KT
5306
5307unsigned int
5308pass_store_merging::execute (function *fun)
5309{
5310 basic_block bb;
5311 hash_set<gimple *> orig_stmts;
629387a6
EB
5312 bool changed = false, open_chains = false;
5313
5314 /* If the function can throw and catch non-call exceptions, we'll be trying
5315 to merge stores across different basic blocks so we need to first unsplit
5316 the EH edges in order to streamline the CFG of the function. */
5317 if (cfun->can_throw_non_call_exceptions && cfun->eh)
5318 unsplit_eh_edges ();
f663d9ad 5319
4b84d9b8
JJ
5320 calculate_dominance_info (CDI_DOMINATORS);
5321
f663d9ad
KT
5322 FOR_EACH_BB_FN (bb, fun)
5323 {
629387a6 5324 const basic_block_status bb_status = get_status_for_store_merging (bb);
f663d9ad 5325 gimple_stmt_iterator gsi;
f663d9ad 5326
629387a6
EB
5327 if (open_chains && (bb_status == BB_INVALID || !single_pred_p (bb)))
5328 {
5329 changed |= terminate_and_process_all_chains ();
5330 open_chains = false;
f663d9ad
KT
5331 }
5332
629387a6 5333 if (bb_status == BB_INVALID)
f663d9ad
KT
5334 continue;
5335
5336 if (dump_file && (dump_flags & TDF_DETAILS))
5337 fprintf (dump_file, "Processing basic block <%d>:\n", bb->index);
5338
a7553ad6 5339 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
f663d9ad
KT
5340 {
5341 gimple *stmt = gsi_stmt (gsi);
a7553ad6 5342 gsi_next (&gsi);
f663d9ad 5343
50b6d676
AO
5344 if (is_gimple_debug (stmt))
5345 continue;
5346
5384a802 5347 if (gimple_has_volatile_ops (stmt) && !gimple_clobber_p (stmt))
f663d9ad
KT
5348 {
5349 /* Terminate all chains. */
5350 if (dump_file && (dump_flags & TDF_DETAILS))
5351 fprintf (dump_file, "Volatile access terminates "
5352 "all chains\n");
629387a6
EB
5353 changed |= terminate_and_process_all_chains ();
5354 open_chains = false;
f663d9ad
KT
5355 continue;
5356 }
5357
a7553ad6
JJ
5358 if (is_gimple_assign (stmt)
5359 && gimple_assign_rhs_code (stmt) == CONSTRUCTOR
5360 && maybe_optimize_vector_constructor (stmt))
5361 continue;
5362
629387a6
EB
5363 if (store_valid_for_store_merging_p (stmt))
5364 changed |= process_store (stmt);
245f6de1 5365 else
629387a6
EB
5366 changed |= terminate_all_aliasing_chains (NULL, stmt);
5367 }
5368
5369 if (bb_status == BB_EXTENDED_VALID)
5370 open_chains = true;
5371 else
5372 {
5373 changed |= terminate_and_process_all_chains ();
5374 open_chains = false;
f663d9ad 5375 }
f663d9ad 5376 }
629387a6
EB
5377
5378 if (open_chains)
5379 changed |= terminate_and_process_all_chains ();
5380
5381 /* If the function can throw and catch non-call exceptions and something
5382 changed during the pass, then the CFG has (very likely) changed too. */
5383 if (cfun->can_throw_non_call_exceptions && cfun->eh && changed)
5384 {
5385 free_dominance_info (CDI_DOMINATORS);
5386 return TODO_cleanup_cfg;
5387 }
5388
f663d9ad
KT
5389 return 0;
5390}
5391
5392} // anon namespace
5393
5394/* Construct and return a store merging pass object. */
5395
5396gimple_opt_pass *
5397make_pass_store_merging (gcc::context *ctxt)
5398{
5399 return new pass_store_merging (ctxt);
5400}
c22d8787
KT
5401
5402#if CHECKING_P
5403
5404namespace selftest {
5405
5406/* Selftests for store merging helpers. */
5407
5408/* Assert that all elements of the byte arrays X and Y, both of length N
5409 are equal. */
5410
5411static void
5412verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n)
5413{
5414 for (unsigned int i = 0; i < n; i++)
5415 {
5416 if (x[i] != y[i])
5417 {
5418 fprintf (stderr, "Arrays do not match. X:\n");
5419 dump_char_array (stderr, x, n);
5420 fprintf (stderr, "Y:\n");
5421 dump_char_array (stderr, y, n);
5422 }
5423 ASSERT_EQ (x[i], y[i]);
5424 }
5425}
5426
8aba425f 5427/* Test shift_bytes_in_array_left and that it carries bits across between
c22d8787
KT
5428 bytes correctly. */
5429
5430static void
8aba425f 5431verify_shift_bytes_in_array_left (void)
c22d8787
KT
5432{
5433 /* byte 1 | byte 0
5434 00011111 | 11100000. */
5435 unsigned char orig[2] = { 0xe0, 0x1f };
5436 unsigned char in[2];
5437 memcpy (in, orig, sizeof orig);
5438
5439 unsigned char expected[2] = { 0x80, 0x7f };
8aba425f 5440 shift_bytes_in_array_left (in, sizeof (in), 2);
c22d8787
KT
5441 verify_array_eq (in, expected, sizeof (in));
5442
5443 memcpy (in, orig, sizeof orig);
5444 memcpy (expected, orig, sizeof orig);
5445 /* Check that shifting by zero doesn't change anything. */
8aba425f 5446 shift_bytes_in_array_left (in, sizeof (in), 0);
c22d8787
KT
5447 verify_array_eq (in, expected, sizeof (in));
5448
5449}
5450
5451/* Test shift_bytes_in_array_right and that it carries bits across between
5452 bytes correctly. */
5453
5454static void
5455verify_shift_bytes_in_array_right (void)
5456{
5457 /* byte 1 | byte 0
5458 00011111 | 11100000. */
5459 unsigned char orig[2] = { 0x1f, 0xe0};
5460 unsigned char in[2];
5461 memcpy (in, orig, sizeof orig);
5462 unsigned char expected[2] = { 0x07, 0xf8};
5463 shift_bytes_in_array_right (in, sizeof (in), 2);
5464 verify_array_eq (in, expected, sizeof (in));
5465
5466 memcpy (in, orig, sizeof orig);
5467 memcpy (expected, orig, sizeof orig);
5468 /* Check that shifting by zero doesn't change anything. */
5469 shift_bytes_in_array_right (in, sizeof (in), 0);
5470 verify_array_eq (in, expected, sizeof (in));
5471}
5472
5473/* Test clear_bit_region that it clears exactly the bits asked and
5474 nothing more. */
5475
5476static void
5477verify_clear_bit_region (void)
5478{
5479 /* Start with all bits set and test clearing various patterns in them. */
5480 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5481 unsigned char in[3];
5482 unsigned char expected[3];
5483 memcpy (in, orig, sizeof in);
5484
5485 /* Check zeroing out all the bits. */
5486 clear_bit_region (in, 0, 3 * BITS_PER_UNIT);
5487 expected[0] = expected[1] = expected[2] = 0;
5488 verify_array_eq (in, expected, sizeof in);
5489
5490 memcpy (in, orig, sizeof in);
5491 /* Leave the first and last bits intact. */
5492 clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2);
5493 expected[0] = 0x1;
5494 expected[1] = 0;
5495 expected[2] = 0x80;
5496 verify_array_eq (in, expected, sizeof in);
5497}
5498
5384a802 5499/* Test clear_bit_region_be that it clears exactly the bits asked and
c22d8787
KT
5500 nothing more. */
5501
5502static void
5503verify_clear_bit_region_be (void)
5504{
5505 /* Start with all bits set and test clearing various patterns in them. */
5506 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5507 unsigned char in[3];
5508 unsigned char expected[3];
5509 memcpy (in, orig, sizeof in);
5510
5511 /* Check zeroing out all the bits. */
5512 clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT);
5513 expected[0] = expected[1] = expected[2] = 0;
5514 verify_array_eq (in, expected, sizeof in);
5515
5516 memcpy (in, orig, sizeof in);
5517 /* Leave the first and last bits intact. */
5518 clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2);
5519 expected[0] = 0x80;
5520 expected[1] = 0;
5521 expected[2] = 0x1;
5522 verify_array_eq (in, expected, sizeof in);
5523}
5524
5525
5526/* Run all of the selftests within this file. */
5527
5528void
5529store_merging_c_tests (void)
5530{
8aba425f 5531 verify_shift_bytes_in_array_left ();
c22d8787
KT
5532 verify_shift_bytes_in_array_right ();
5533 verify_clear_bit_region ();
5534 verify_clear_bit_region_be ();
5535}
5536
5537} // namespace selftest
5538#endif /* CHECKING_P. */