]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/gimple-ssa-store-merging.c
tree-optimization/79334 - avoid PRE of possibly trapping array-ref
[thirdparty/gcc.git] / gcc / gimple-ssa-store-merging.c
CommitLineData
dffec8eb 1/* GIMPLE store merging and byte swapping passes.
99dee823 2 Copyright (C) 2009-2021 Free Software Foundation, Inc.
f663d9ad
KT
3 Contributed by ARM Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
c94c3532
EB
21/* The purpose of the store merging pass is to combine multiple memory stores
22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
24
f663d9ad
KT
25 For example, if we have a sequence peforming four byte stores to
26 consecutive memory locations:
27 [p ] := imm1;
28 [p + 1B] := imm2;
29 [p + 2B] := imm3;
30 [p + 3B] := imm4;
31 we can transform this into a single 4-byte store if the target supports it:
c94c3532 32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
f663d9ad 33
245f6de1
JJ
34 Or:
35 [p ] := [q ];
36 [p + 1B] := [q + 1B];
37 [p + 2B] := [q + 2B];
38 [p + 3B] := [q + 3B];
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
41
42 Or:
43 [p ] := [q ] ^ imm1;
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
49
c94c3532
EB
50 Or:
51 [p:1 ] := imm;
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
55
f663d9ad
KT
56 The algorithm is applied to each basic block in three phases:
57
c94c3532
EB
58 1) Scan through the basic block and record assignments to destinations
59 that can be expressed as a store to memory of a certain size at a certain
60 bit offset from base expressions we can handle. For bit-fields we also
61 record the surrounding bit region, i.e. bits that could be stored in
245f6de1
JJ
62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
700d4cb0 64 terminate such chains when appropriate (for example when the stored
245f6de1 65 values get used subsequently).
f663d9ad
KT
66 These stores can be a result of structure element initializers, array stores
67 etc. A store_immediate_info object is recorded for every such store.
68 Record as many such assignments to a single base as possible until a
69 statement that interferes with the store sequence is encountered.
c94c3532
EB
70 Each store has up to 2 operands, which can be a either constant, a memory
71 load or an SSA name, from which the value to be stored can be computed.
245f6de1
JJ
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
f663d9ad 74
c94c3532 75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
f663d9ad 76 store_immediate_info objects) and coalesce contiguous stores into
c94c3532 77 merged_store_group objects. For bit-field stores, we don't need to
245f6de1
JJ
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
f663d9ad
KT
83
84 For example, given the stores:
85 [p ] := 0;
86 [p + 1B] := 1;
87 [p + 3B] := 0;
88 [p + 4B] := 1;
89 [p + 5B] := 0;
90 [p + 6B] := 0;
91 This phase would produce two merged_store_group objects, one recording the
92 two bytes stored in the memory region [p : p + 1] and another
93 recording the four bytes stored in the memory region [p + 3 : p + 6].
94
95 3) The merged_store_group objects produced in phase 2) are processed
96 to generate the sequence of wider stores that set the contiguous memory
97 regions to the sequence of bytes that correspond to it. This may emit
98 multiple stores per store group to handle contiguous stores that are not
99 of a size that is a power of 2. For example it can try to emit a 40-bit
100 store as a 32-bit store followed by an 8-bit store.
c94c3532
EB
101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
f663d9ad
KT
103
104 Note on endianness and example:
105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
106 [p ] := 0x1234;
107 [p + 2B] := 0x5678;
108 [p + 4B] := 0xab;
109 [p + 5B] := 0xcd;
110
111 The memory layout for little-endian (LE) and big-endian (BE) must be:
112 p |LE|BE|
113 ---------
114 0 |34|12|
115 1 |12|34|
116 2 |78|56|
117 3 |56|78|
118 4 |ab|ab|
119 5 |cd|cd|
120
121 To merge these into a single 48-bit merged value 'val' in phase 2)
122 on little-endian we insert stores to higher (consecutive) bitpositions
123 into the most significant bits of the merged value.
124 The final merged value would be: 0xcdab56781234
125
126 For big-endian we insert stores to higher bitpositions into the least
127 significant bits of the merged value.
128 The final merged value would be: 0x12345678abcd
129
130 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
131 followed by a 16-bit store. Again, we must consider endianness when
132 breaking down the 48-bit value 'val' computed above.
133 For little endian we emit:
134 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
135 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
136
137 Whereas for big-endian we emit:
138 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
139 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
140
141#include "config.h"
142#include "system.h"
143#include "coretypes.h"
144#include "backend.h"
145#include "tree.h"
146#include "gimple.h"
147#include "builtins.h"
148#include "fold-const.h"
149#include "tree-pass.h"
150#include "ssa.h"
151#include "gimple-pretty-print.h"
152#include "alias.h"
153#include "fold-const.h"
f663d9ad
KT
154#include "print-tree.h"
155#include "tree-hash-traits.h"
156#include "gimple-iterator.h"
157#include "gimplify.h"
c94c3532 158#include "gimple-fold.h"
f663d9ad
KT
159#include "stor-layout.h"
160#include "timevar.h"
629387a6
EB
161#include "cfganal.h"
162#include "cfgcleanup.h"
f663d9ad 163#include "tree-cfg.h"
629387a6 164#include "except.h"
f663d9ad
KT
165#include "tree-eh.h"
166#include "target.h"
aa55dc0c 167#include "gimplify-me.h"
a62b3dc5
JJ
168#include "rtl.h"
169#include "expr.h" /* For get_bit_range. */
dffec8eb 170#include "optabs-tree.h"
a95b474a 171#include "dbgcnt.h"
c22d8787 172#include "selftest.h"
f663d9ad
KT
173
174/* The maximum size (in bits) of the stores this pass should generate. */
175#define MAX_STORE_BITSIZE (BITS_PER_WORD)
176#define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
177
245f6de1
JJ
178/* Limit to bound the number of aliasing checks for loads with the same
179 vuse as the corresponding store. */
180#define MAX_STORE_ALIAS_CHECKS 64
181
f663d9ad
KT
182namespace {
183
bebadeca 184struct bswap_stat
dffec8eb
JJ
185{
186 /* Number of hand-written 16-bit nop / bswaps found. */
187 int found_16bit;
188
189 /* Number of hand-written 32-bit nop / bswaps found. */
190 int found_32bit;
191
192 /* Number of hand-written 64-bit nop / bswaps found. */
193 int found_64bit;
194} nop_stats, bswap_stats;
195
196/* A symbolic number structure is used to detect byte permutation and selection
197 patterns of a source. To achieve that, its field N contains an artificial
198 number consisting of BITS_PER_MARKER sized markers tracking where does each
199 byte come from in the source:
200
201 0 - target byte has the value 0
202 FF - target byte has an unknown value (eg. due to sign extension)
203 1..size - marker value is the byte index in the source (0 for lsb).
204
205 To detect permutations on memory sources (arrays and structures), a symbolic
206 number is also associated:
207 - a base address BASE_ADDR and an OFFSET giving the address of the source;
208 - a range which gives the difference between the highest and lowest accessed
209 memory location to make such a symbolic number;
210 - the address SRC of the source element of lowest address as a convenience
211 to easily get BASE_ADDR + offset + lowest bytepos;
212 - number of expressions N_OPS bitwise ored together to represent
213 approximate cost of the computation.
214
215 Note 1: the range is different from size as size reflects the size of the
216 type of the current expression. For instance, for an array char a[],
217 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
218 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
219 time a range of 1.
220
221 Note 2: for non-memory sources, range holds the same value as size.
222
223 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
224
225struct symbolic_number {
226 uint64_t n;
227 tree type;
228 tree base_addr;
229 tree offset;
4a022c70 230 poly_int64_pod bytepos;
dffec8eb
JJ
231 tree src;
232 tree alias_set;
233 tree vuse;
234 unsigned HOST_WIDE_INT range;
235 int n_ops;
236};
237
238#define BITS_PER_MARKER 8
239#define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
240#define MARKER_BYTE_UNKNOWN MARKER_MASK
241#define HEAD_MARKER(n, size) \
242 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
243
244/* The number which the find_bswap_or_nop_1 result should match in
245 order to have a nop. The number is masked according to the size of
246 the symbolic number before using it. */
247#define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
248 (uint64_t)0x08070605 << 32 | 0x04030201)
249
250/* The number which the find_bswap_or_nop_1 result should match in
251 order to have a byte swap. The number is masked according to the
252 size of the symbolic number before using it. */
253#define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
254 (uint64_t)0x01020304 << 32 | 0x05060708)
255
256/* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
257 number N. Return false if the requested operation is not permitted
258 on a symbolic number. */
259
260inline bool
261do_shift_rotate (enum tree_code code,
262 struct symbolic_number *n,
263 int count)
264{
265 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
266 unsigned head_marker;
267
444cda74
JJ
268 if (count < 0
269 || count >= TYPE_PRECISION (n->type)
270 || count % BITS_PER_UNIT != 0)
dffec8eb
JJ
271 return false;
272 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
273
274 /* Zero out the extra bits of N in order to avoid them being shifted
275 into the significant bits. */
276 if (size < 64 / BITS_PER_MARKER)
277 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
278
279 switch (code)
280 {
281 case LSHIFT_EXPR:
282 n->n <<= count;
283 break;
284 case RSHIFT_EXPR:
285 head_marker = HEAD_MARKER (n->n, size);
286 n->n >>= count;
287 /* Arithmetic shift of signed type: result is dependent on the value. */
288 if (!TYPE_UNSIGNED (n->type) && head_marker)
289 for (i = 0; i < count / BITS_PER_MARKER; i++)
290 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
291 << ((size - 1 - i) * BITS_PER_MARKER);
292 break;
293 case LROTATE_EXPR:
294 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
295 break;
296 case RROTATE_EXPR:
297 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
298 break;
299 default:
300 return false;
301 }
302 /* Zero unused bits for size. */
303 if (size < 64 / BITS_PER_MARKER)
304 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
305 return true;
306}
307
308/* Perform sanity checking for the symbolic number N and the gimple
309 statement STMT. */
310
311inline bool
312verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
313{
314 tree lhs_type;
315
650c70a9 316 lhs_type = TREE_TYPE (gimple_get_lhs (stmt));
dffec8eb 317
5ea39b24
JJ
318 if (TREE_CODE (lhs_type) != INTEGER_TYPE
319 && TREE_CODE (lhs_type) != ENUMERAL_TYPE)
dffec8eb
JJ
320 return false;
321
322 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
323 return false;
324
325 return true;
326}
327
328/* Initialize the symbolic number N for the bswap pass from the base element
329 SRC manipulated by the bitwise OR expression. */
330
331bool
332init_symbolic_number (struct symbolic_number *n, tree src)
333{
334 int size;
335
5b9a65ec 336 if (!INTEGRAL_TYPE_P (TREE_TYPE (src)) && !POINTER_TYPE_P (TREE_TYPE (src)))
dffec8eb
JJ
337 return false;
338
339 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
340 n->src = src;
341
342 /* Set up the symbolic number N by setting each byte to a value between 1 and
343 the byte size of rhs1. The highest order byte is set to n->size and the
344 lowest order byte to 1. */
345 n->type = TREE_TYPE (src);
346 size = TYPE_PRECISION (n->type);
347 if (size % BITS_PER_UNIT != 0)
348 return false;
349 size /= BITS_PER_UNIT;
350 if (size > 64 / BITS_PER_MARKER)
351 return false;
352 n->range = size;
353 n->n = CMPNOP;
354 n->n_ops = 1;
355
356 if (size < 64 / BITS_PER_MARKER)
357 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
358
359 return true;
360}
361
362/* Check if STMT might be a byte swap or a nop from a memory source and returns
363 the answer. If so, REF is that memory source and the base of the memory area
364 accessed and the offset of the access from that base are recorded in N. */
365
366bool
367find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
368{
369 /* Leaf node is an array or component ref. Memorize its base and
370 offset from base to compare to other such leaf node. */
f37fac2b 371 poly_int64 bitsize, bitpos, bytepos;
dffec8eb
JJ
372 machine_mode mode;
373 int unsignedp, reversep, volatilep;
374 tree offset, base_addr;
375
376 /* Not prepared to handle PDP endian. */
377 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
378 return false;
379
380 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
381 return false;
382
383 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
384 &unsignedp, &reversep, &volatilep);
385
4b84d9b8
JJ
386 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
387 /* Do not rewrite TARGET_MEM_REF. */
388 return false;
389 else if (TREE_CODE (base_addr) == MEM_REF)
dffec8eb 390 {
3fed2ce9 391 poly_offset_int bit_offset = 0;
dffec8eb
JJ
392 tree off = TREE_OPERAND (base_addr, 1);
393
394 if (!integer_zerop (off))
395 {
3fed2ce9
RS
396 poly_offset_int boff = mem_ref_offset (base_addr);
397 boff <<= LOG2_BITS_PER_UNIT;
dffec8eb
JJ
398 bit_offset += boff;
399 }
400
401 base_addr = TREE_OPERAND (base_addr, 0);
402
403 /* Avoid returning a negative bitpos as this may wreak havoc later. */
3fed2ce9 404 if (maybe_lt (bit_offset, 0))
dffec8eb 405 {
3fed2ce9
RS
406 tree byte_offset = wide_int_to_tree
407 (sizetype, bits_to_bytes_round_down (bit_offset));
408 bit_offset = num_trailing_bits (bit_offset);
dffec8eb 409 if (offset)
3fed2ce9 410 offset = size_binop (PLUS_EXPR, offset, byte_offset);
dffec8eb 411 else
3fed2ce9 412 offset = byte_offset;
dffec8eb
JJ
413 }
414
3fed2ce9 415 bitpos += bit_offset.force_shwi ();
dffec8eb 416 }
4b84d9b8
JJ
417 else
418 base_addr = build_fold_addr_expr (base_addr);
dffec8eb 419
f37fac2b 420 if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
dffec8eb 421 return false;
f37fac2b 422 if (!multiple_p (bitsize, BITS_PER_UNIT))
dffec8eb
JJ
423 return false;
424 if (reversep)
425 return false;
426
427 if (!init_symbolic_number (n, ref))
428 return false;
429 n->base_addr = base_addr;
430 n->offset = offset;
f37fac2b 431 n->bytepos = bytepos;
dffec8eb
JJ
432 n->alias_set = reference_alias_ptr_type (ref);
433 n->vuse = gimple_vuse (stmt);
434 return true;
435}
436
437/* Compute the symbolic number N representing the result of a bitwise OR on 2
438 symbolic number N1 and N2 whose source statements are respectively
439 SOURCE_STMT1 and SOURCE_STMT2. */
440
441gimple *
442perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
443 gimple *source_stmt2, struct symbolic_number *n2,
444 struct symbolic_number *n)
445{
446 int i, size;
447 uint64_t mask;
448 gimple *source_stmt;
449 struct symbolic_number *n_start;
450
451 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
452 if (TREE_CODE (rhs1) == BIT_FIELD_REF
453 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
454 rhs1 = TREE_OPERAND (rhs1, 0);
455 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
456 if (TREE_CODE (rhs2) == BIT_FIELD_REF
457 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
458 rhs2 = TREE_OPERAND (rhs2, 0);
459
460 /* Sources are different, cancel bswap if they are not memory location with
461 the same base (array, structure, ...). */
462 if (rhs1 != rhs2)
463 {
464 uint64_t inc;
4a022c70 465 HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end;
dffec8eb
JJ
466 struct symbolic_number *toinc_n_ptr, *n_end;
467 basic_block bb1, bb2;
468
469 if (!n1->base_addr || !n2->base_addr
470 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
471 return NULL;
472
473 if (!n1->offset != !n2->offset
474 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
475 return NULL;
476
4a022c70
RS
477 start1 = 0;
478 if (!(n2->bytepos - n1->bytepos).is_constant (&start2))
479 return NULL;
480
481 if (start1 < start2)
dffec8eb
JJ
482 {
483 n_start = n1;
4a022c70 484 start_sub = start2 - start1;
dffec8eb
JJ
485 }
486 else
487 {
488 n_start = n2;
4a022c70 489 start_sub = start1 - start2;
dffec8eb
JJ
490 }
491
492 bb1 = gimple_bb (source_stmt1);
493 bb2 = gimple_bb (source_stmt2);
494 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
495 source_stmt = source_stmt1;
496 else
497 source_stmt = source_stmt2;
498
499 /* Find the highest address at which a load is performed and
500 compute related info. */
4a022c70
RS
501 end1 = start1 + (n1->range - 1);
502 end2 = start2 + (n2->range - 1);
dffec8eb
JJ
503 if (end1 < end2)
504 {
505 end = end2;
506 end_sub = end2 - end1;
507 }
508 else
509 {
510 end = end1;
511 end_sub = end1 - end2;
512 }
513 n_end = (end2 > end1) ? n2 : n1;
514
515 /* Find symbolic number whose lsb is the most significant. */
516 if (BYTES_BIG_ENDIAN)
517 toinc_n_ptr = (n_end == n1) ? n2 : n1;
518 else
519 toinc_n_ptr = (n_start == n1) ? n2 : n1;
520
4a022c70 521 n->range = end - MIN (start1, start2) + 1;
dffec8eb
JJ
522
523 /* Check that the range of memory covered can be represented by
524 a symbolic number. */
525 if (n->range > 64 / BITS_PER_MARKER)
526 return NULL;
527
528 /* Reinterpret byte marks in symbolic number holding the value of
529 bigger weight according to target endianness. */
530 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
531 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
532 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
533 {
534 unsigned marker
535 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
536 if (marker && marker != MARKER_BYTE_UNKNOWN)
537 toinc_n_ptr->n += inc;
538 }
539 }
540 else
541 {
542 n->range = n1->range;
543 n_start = n1;
544 source_stmt = source_stmt1;
545 }
546
547 if (!n1->alias_set
548 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
549 n->alias_set = n1->alias_set;
550 else
551 n->alias_set = ptr_type_node;
552 n->vuse = n_start->vuse;
553 n->base_addr = n_start->base_addr;
554 n->offset = n_start->offset;
555 n->src = n_start->src;
556 n->bytepos = n_start->bytepos;
557 n->type = n_start->type;
558 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
559
560 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
561 {
562 uint64_t masked1, masked2;
563
564 masked1 = n1->n & mask;
565 masked2 = n2->n & mask;
566 if (masked1 && masked2 && masked1 != masked2)
567 return NULL;
568 }
569 n->n = n1->n | n2->n;
570 n->n_ops = n1->n_ops + n2->n_ops;
571
572 return source_stmt;
573}
574
575/* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
576 the operation given by the rhs of STMT on the result. If the operation
577 could successfully be executed the function returns a gimple stmt whose
578 rhs's first tree is the expression of the source operand and NULL
579 otherwise. */
580
581gimple *
582find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
583{
584 enum tree_code code;
585 tree rhs1, rhs2 = NULL;
586 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
587 enum gimple_rhs_class rhs_class;
588
589 if (!limit || !is_gimple_assign (stmt))
590 return NULL;
591
592 rhs1 = gimple_assign_rhs1 (stmt);
593
594 if (find_bswap_or_nop_load (stmt, rhs1, n))
595 return stmt;
596
597 /* Handle BIT_FIELD_REF. */
598 if (TREE_CODE (rhs1) == BIT_FIELD_REF
599 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
600 {
35cf3c55
KZ
601 if (!tree_fits_uhwi_p (TREE_OPERAND (rhs1, 1))
602 || !tree_fits_uhwi_p (TREE_OPERAND (rhs1, 2)))
603 return NULL;
604
dffec8eb
JJ
605 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
606 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
607 if (bitpos % BITS_PER_UNIT == 0
608 && bitsize % BITS_PER_UNIT == 0
609 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
610 {
611 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
612 if (BYTES_BIG_ENDIAN)
613 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
614
615 /* Shift. */
616 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
617 return NULL;
618
619 /* Mask. */
620 uint64_t mask = 0;
621 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
622 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
623 i++, tmp <<= BITS_PER_UNIT)
624 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
625 n->n &= mask;
626
627 /* Convert. */
628 n->type = TREE_TYPE (rhs1);
629 if (!n->base_addr)
630 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
631
632 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
633 }
634
635 return NULL;
636 }
637
638 if (TREE_CODE (rhs1) != SSA_NAME)
639 return NULL;
640
641 code = gimple_assign_rhs_code (stmt);
642 rhs_class = gimple_assign_rhs_class (stmt);
643 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
644
645 if (rhs_class == GIMPLE_BINARY_RHS)
646 rhs2 = gimple_assign_rhs2 (stmt);
647
648 /* Handle unary rhs and binary rhs with integer constants as second
649 operand. */
650
651 if (rhs_class == GIMPLE_UNARY_RHS
652 || (rhs_class == GIMPLE_BINARY_RHS
653 && TREE_CODE (rhs2) == INTEGER_CST))
654 {
655 if (code != BIT_AND_EXPR
656 && code != LSHIFT_EXPR
657 && code != RSHIFT_EXPR
658 && code != LROTATE_EXPR
659 && code != RROTATE_EXPR
660 && !CONVERT_EXPR_CODE_P (code))
661 return NULL;
662
663 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
664
665 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
666 we have to initialize the symbolic number. */
667 if (!source_stmt1)
668 {
669 if (gimple_assign_load_p (stmt)
670 || !init_symbolic_number (n, rhs1))
671 return NULL;
672 source_stmt1 = stmt;
673 }
674
675 switch (code)
676 {
677 case BIT_AND_EXPR:
678 {
679 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
680 uint64_t val = int_cst_value (rhs2), mask = 0;
681 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
682
683 /* Only constants masking full bytes are allowed. */
684 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
685 if ((val & tmp) != 0 && (val & tmp) != tmp)
686 return NULL;
687 else if (val & tmp)
688 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
689
690 n->n &= mask;
691 }
692 break;
693 case LSHIFT_EXPR:
694 case RSHIFT_EXPR:
695 case LROTATE_EXPR:
696 case RROTATE_EXPR:
697 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
698 return NULL;
699 break;
700 CASE_CONVERT:
701 {
702 int i, type_size, old_type_size;
703 tree type;
704
650c70a9 705 type = TREE_TYPE (gimple_assign_lhs (stmt));
dffec8eb
JJ
706 type_size = TYPE_PRECISION (type);
707 if (type_size % BITS_PER_UNIT != 0)
708 return NULL;
709 type_size /= BITS_PER_UNIT;
710 if (type_size > 64 / BITS_PER_MARKER)
711 return NULL;
712
713 /* Sign extension: result is dependent on the value. */
714 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
715 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
716 && HEAD_MARKER (n->n, old_type_size))
717 for (i = 0; i < type_size - old_type_size; i++)
718 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
719 << ((type_size - 1 - i) * BITS_PER_MARKER);
720
721 if (type_size < 64 / BITS_PER_MARKER)
722 {
723 /* If STMT casts to a smaller type mask out the bits not
724 belonging to the target type. */
725 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
726 }
727 n->type = type;
728 if (!n->base_addr)
729 n->range = type_size;
730 }
731 break;
732 default:
733 return NULL;
734 };
735 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
736 }
737
738 /* Handle binary rhs. */
739
740 if (rhs_class == GIMPLE_BINARY_RHS)
741 {
742 struct symbolic_number n1, n2;
743 gimple *source_stmt, *source_stmt2;
744
745 if (code != BIT_IOR_EXPR)
746 return NULL;
747
748 if (TREE_CODE (rhs2) != SSA_NAME)
749 return NULL;
750
751 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
752
753 switch (code)
754 {
755 case BIT_IOR_EXPR:
756 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
757
758 if (!source_stmt1)
759 return NULL;
760
761 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
762
763 if (!source_stmt2)
764 return NULL;
765
766 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
767 return NULL;
768
4b84d9b8 769 if (n1.vuse != n2.vuse)
dffec8eb
JJ
770 return NULL;
771
772 source_stmt
773 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
774
775 if (!source_stmt)
776 return NULL;
777
778 if (!verify_symbolic_number_p (n, stmt))
779 return NULL;
780
781 break;
782 default:
783 return NULL;
784 }
785 return source_stmt;
786 }
787 return NULL;
788}
789
4b84d9b8
JJ
790/* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
791 *CMPXCHG, *CMPNOP and adjust *N. */
dffec8eb 792
4b84d9b8
JJ
793void
794find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg,
795 uint64_t *cmpnop)
dffec8eb
JJ
796{
797 unsigned rsize;
798 uint64_t tmpn, mask;
dffec8eb 799
4b84d9b8
JJ
800 /* The number which the find_bswap_or_nop_1 result should match in order
801 to have a full byte swap. The number is shifted to the right
802 according to the size of the symbolic number before using it. */
803 *cmpxchg = CMPXCHG;
804 *cmpnop = CMPNOP;
dffec8eb
JJ
805
806 /* Find real size of result (highest non-zero byte). */
807 if (n->base_addr)
808 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
809 else
810 rsize = n->range;
811
812 /* Zero out the bits corresponding to untouched bytes in original gimple
813 expression. */
814 if (n->range < (int) sizeof (int64_t))
815 {
816 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
4b84d9b8
JJ
817 *cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
818 *cmpnop &= mask;
dffec8eb
JJ
819 }
820
821 /* Zero out the bits corresponding to unused bytes in the result of the
822 gimple expression. */
823 if (rsize < n->range)
824 {
825 if (BYTES_BIG_ENDIAN)
826 {
827 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
4b84d9b8
JJ
828 *cmpxchg &= mask;
829 *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
dffec8eb
JJ
830 }
831 else
832 {
833 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
4b84d9b8
JJ
834 *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
835 *cmpnop &= mask;
dffec8eb
JJ
836 }
837 n->range = rsize;
838 }
839
4b84d9b8
JJ
840 n->range *= BITS_PER_UNIT;
841}
842
843/* Check if STMT completes a bswap implementation or a read in a given
844 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
845 accordingly. It also sets N to represent the kind of operations
846 performed: size of the resulting expression and whether it works on
847 a memory source, and if so alias-set and vuse. At last, the
848 function returns a stmt whose rhs's first tree is the source
849 expression. */
850
851gimple *
852find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
853{
650c70a9 854 tree type_size = TYPE_SIZE_UNIT (TREE_TYPE (gimple_get_lhs (stmt)));
7f0ce82a
KT
855 if (!tree_fits_uhwi_p (type_size))
856 return NULL;
857
4b84d9b8
JJ
858 /* The last parameter determines the depth search limit. It usually
859 correlates directly to the number n of bytes to be touched. We
0f507a36 860 increase that number by 2 * (log2(n) + 1) here in order to also
4b84d9b8
JJ
861 cover signed -> unsigned conversions of the src operand as can be seen
862 in libgcc, and for initial shift/and operation of the src operand. */
7f0ce82a 863 int limit = tree_to_uhwi (type_size);
0f507a36 864 limit += 2 * (1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit));
4b84d9b8
JJ
865 gimple *ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
866
867 if (!ins_stmt)
cd676dfa
JJ
868 {
869 if (gimple_assign_rhs_code (stmt) != CONSTRUCTOR
870 || BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
871 return NULL;
872 unsigned HOST_WIDE_INT sz = tree_to_uhwi (type_size) * BITS_PER_UNIT;
873 if (sz != 16 && sz != 32 && sz != 64)
874 return NULL;
875 tree rhs = gimple_assign_rhs1 (stmt);
9032d2b2
JJ
876 if (CONSTRUCTOR_NELTS (rhs) == 0)
877 return NULL;
cd676dfa
JJ
878 tree eltype = TREE_TYPE (TREE_TYPE (rhs));
879 unsigned HOST_WIDE_INT eltsz
880 = int_size_in_bytes (eltype) * BITS_PER_UNIT;
881 if (TYPE_PRECISION (eltype) != eltsz)
882 return NULL;
883 constructor_elt *elt;
884 unsigned int i;
885 tree type = build_nonstandard_integer_type (sz, 1);
886 FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (rhs), i, elt)
887 {
888 if (TREE_CODE (elt->value) != SSA_NAME
889 || !INTEGRAL_TYPE_P (TREE_TYPE (elt->value)))
890 return NULL;
891 struct symbolic_number n1;
892 gimple *source_stmt
893 = find_bswap_or_nop_1 (SSA_NAME_DEF_STMT (elt->value), &n1,
894 limit - 1);
895
896 if (!source_stmt)
897 return NULL;
898
899 n1.type = type;
900 if (!n1.base_addr)
901 n1.range = sz / BITS_PER_UNIT;
902
903 if (i == 0)
904 {
905 ins_stmt = source_stmt;
906 *n = n1;
907 }
908 else
909 {
910 if (n->vuse != n1.vuse)
911 return NULL;
912
913 struct symbolic_number n0 = *n;
914
915 if (!BYTES_BIG_ENDIAN)
916 {
917 if (!do_shift_rotate (LSHIFT_EXPR, &n1, i * eltsz))
918 return NULL;
919 }
920 else if (!do_shift_rotate (LSHIFT_EXPR, &n0, eltsz))
921 return NULL;
922 ins_stmt
923 = perform_symbolic_merge (ins_stmt, &n0, source_stmt, &n1, n);
924
925 if (!ins_stmt)
926 return NULL;
927 }
928 }
929 }
4b84d9b8
JJ
930
931 uint64_t cmpxchg, cmpnop;
932 find_bswap_or_nop_finalize (n, &cmpxchg, &cmpnop);
933
dffec8eb
JJ
934 /* A complete byte swap should make the symbolic number to start with
935 the largest digit in the highest order byte. Unchanged symbolic
936 number indicates a read with same endianness as target architecture. */
937 if (n->n == cmpnop)
938 *bswap = false;
939 else if (n->n == cmpxchg)
940 *bswap = true;
941 else
942 return NULL;
943
944 /* Useless bit manipulation performed by code. */
945 if (!n->base_addr && n->n == cmpnop && n->n_ops == 1)
946 return NULL;
947
dffec8eb
JJ
948 return ins_stmt;
949}
950
951const pass_data pass_data_optimize_bswap =
952{
953 GIMPLE_PASS, /* type */
954 "bswap", /* name */
955 OPTGROUP_NONE, /* optinfo_flags */
956 TV_NONE, /* tv_id */
957 PROP_ssa, /* properties_required */
958 0, /* properties_provided */
959 0, /* properties_destroyed */
960 0, /* todo_flags_start */
961 0, /* todo_flags_finish */
962};
963
964class pass_optimize_bswap : public gimple_opt_pass
965{
966public:
967 pass_optimize_bswap (gcc::context *ctxt)
968 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
969 {}
970
971 /* opt_pass methods: */
972 virtual bool gate (function *)
973 {
974 return flag_expensive_optimizations && optimize && BITS_PER_UNIT == 8;
975 }
976
977 virtual unsigned int execute (function *);
978
979}; // class pass_optimize_bswap
980
d02a8b63
JJ
981/* Helper function for bswap_replace. Build VIEW_CONVERT_EXPR from
982 VAL to TYPE. If VAL has different type size, emit a NOP_EXPR cast
983 first. */
984
985static tree
986bswap_view_convert (gimple_stmt_iterator *gsi, tree type, tree val)
987{
a4001578
JJ
988 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (val))
989 || POINTER_TYPE_P (TREE_TYPE (val)));
d02a8b63
JJ
990 if (TYPE_SIZE (type) != TYPE_SIZE (TREE_TYPE (val)))
991 {
992 HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_SIZE (type));
a4001578
JJ
993 if (POINTER_TYPE_P (TREE_TYPE (val)))
994 {
995 gimple *g
996 = gimple_build_assign (make_ssa_name (pointer_sized_int_node),
997 NOP_EXPR, val);
998 gsi_insert_before (gsi, g, GSI_SAME_STMT);
999 val = gimple_assign_lhs (g);
1000 }
d02a8b63
JJ
1001 tree itype = build_nonstandard_integer_type (prec, 1);
1002 gimple *g = gimple_build_assign (make_ssa_name (itype), NOP_EXPR, val);
1003 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1004 val = gimple_assign_lhs (g);
1005 }
1006 return build1 (VIEW_CONVERT_EXPR, type, val);
1007}
1008
dffec8eb 1009/* Perform the bswap optimization: replace the expression computed in the rhs
4b84d9b8
JJ
1010 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
1011 bswap, load or load + bswap expression.
dffec8eb
JJ
1012 Which of these alternatives replace the rhs is given by N->base_addr (non
1013 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
1014 load to perform are also given in N while the builtin bswap invoke is given
4b84d9b8
JJ
1015 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
1016 load statements involved to construct the rhs in gsi_stmt (GSI) and
1017 N->range gives the size of the rhs expression for maintaining some
1018 statistics.
dffec8eb 1019
4b84d9b8
JJ
1020 Note that if the replacement involve a load and if gsi_stmt (GSI) is
1021 non-NULL, that stmt is moved just after INS_STMT to do the load with the
1022 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
dffec8eb 1023
4b84d9b8
JJ
1024tree
1025bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl,
dffec8eb
JJ
1026 tree bswap_type, tree load_type, struct symbolic_number *n,
1027 bool bswap)
1028{
4b84d9b8 1029 tree src, tmp, tgt = NULL_TREE;
dffec8eb 1030 gimple *bswap_stmt;
cd676dfa 1031 tree_code conv_code = NOP_EXPR;
dffec8eb 1032
4b84d9b8 1033 gimple *cur_stmt = gsi_stmt (gsi);
dffec8eb 1034 src = n->src;
4b84d9b8 1035 if (cur_stmt)
cd676dfa
JJ
1036 {
1037 tgt = gimple_assign_lhs (cur_stmt);
1038 if (gimple_assign_rhs_code (cur_stmt) == CONSTRUCTOR
1039 && tgt
1040 && VECTOR_TYPE_P (TREE_TYPE (tgt)))
1041 conv_code = VIEW_CONVERT_EXPR;
1042 }
dffec8eb
JJ
1043
1044 /* Need to load the value from memory first. */
1045 if (n->base_addr)
1046 {
4b84d9b8
JJ
1047 gimple_stmt_iterator gsi_ins = gsi;
1048 if (ins_stmt)
1049 gsi_ins = gsi_for_stmt (ins_stmt);
dffec8eb
JJ
1050 tree addr_expr, addr_tmp, val_expr, val_tmp;
1051 tree load_offset_ptr, aligned_load_type;
4b84d9b8
JJ
1052 gimple *load_stmt;
1053 unsigned align = get_object_alignment (src);
4a022c70 1054 poly_int64 load_offset = 0;
dffec8eb 1055
4b84d9b8
JJ
1056 if (cur_stmt)
1057 {
1058 basic_block ins_bb = gimple_bb (ins_stmt);
1059 basic_block cur_bb = gimple_bb (cur_stmt);
1060 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
1061 return NULL_TREE;
1062
1063 /* Move cur_stmt just before one of the load of the original
1064 to ensure it has the same VUSE. See PR61517 for what could
1065 go wrong. */
1066 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
1067 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
1068 gsi_move_before (&gsi, &gsi_ins);
1069 gsi = gsi_for_stmt (cur_stmt);
1070 }
1071 else
1072 gsi = gsi_ins;
dffec8eb
JJ
1073
1074 /* Compute address to load from and cast according to the size
1075 of the load. */
4b84d9b8 1076 addr_expr = build_fold_addr_expr (src);
dffec8eb 1077 if (is_gimple_mem_ref_addr (addr_expr))
4b84d9b8 1078 addr_tmp = unshare_expr (addr_expr);
dffec8eb
JJ
1079 else
1080 {
4b84d9b8
JJ
1081 addr_tmp = unshare_expr (n->base_addr);
1082 if (!is_gimple_mem_ref_addr (addr_tmp))
1083 addr_tmp = force_gimple_operand_gsi_1 (&gsi, addr_tmp,
1084 is_gimple_mem_ref_addr,
1085 NULL_TREE, true,
1086 GSI_SAME_STMT);
1087 load_offset = n->bytepos;
1088 if (n->offset)
1089 {
1090 tree off
1091 = force_gimple_operand_gsi (&gsi, unshare_expr (n->offset),
1092 true, NULL_TREE, true,
1093 GSI_SAME_STMT);
1094 gimple *stmt
1095 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp)),
1096 POINTER_PLUS_EXPR, addr_tmp, off);
1097 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1098 addr_tmp = gimple_assign_lhs (stmt);
1099 }
dffec8eb
JJ
1100 }
1101
1102 /* Perform the load. */
1103 aligned_load_type = load_type;
1104 if (align < TYPE_ALIGN (load_type))
1105 aligned_load_type = build_aligned_type (load_type, align);
1106 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
1107 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
1108 load_offset_ptr);
1109
1110 if (!bswap)
1111 {
1112 if (n->range == 16)
1113 nop_stats.found_16bit++;
1114 else if (n->range == 32)
1115 nop_stats.found_32bit++;
1116 else
1117 {
1118 gcc_assert (n->range == 64);
1119 nop_stats.found_64bit++;
1120 }
1121
1122 /* Convert the result of load if necessary. */
4b84d9b8 1123 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), load_type))
dffec8eb
JJ
1124 {
1125 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
1126 "load_dst");
1127 load_stmt = gimple_build_assign (val_tmp, val_expr);
1128 gimple_set_vuse (load_stmt, n->vuse);
1129 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
cd676dfa 1130 if (conv_code == VIEW_CONVERT_EXPR)
d02a8b63 1131 val_tmp = bswap_view_convert (&gsi, TREE_TYPE (tgt), val_tmp);
cd676dfa 1132 gimple_assign_set_rhs_with_ops (&gsi, conv_code, val_tmp);
4b84d9b8 1133 update_stmt (cur_stmt);
dffec8eb 1134 }
4b84d9b8 1135 else if (cur_stmt)
dffec8eb
JJ
1136 {
1137 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
1138 gimple_set_vuse (cur_stmt, n->vuse);
4b84d9b8
JJ
1139 update_stmt (cur_stmt);
1140 }
1141 else
1142 {
1143 tgt = make_ssa_name (load_type);
1144 cur_stmt = gimple_build_assign (tgt, MEM_REF, val_expr);
1145 gimple_set_vuse (cur_stmt, n->vuse);
1146 gsi_insert_before (&gsi, cur_stmt, GSI_SAME_STMT);
dffec8eb 1147 }
dffec8eb
JJ
1148
1149 if (dump_file)
1150 {
1151 fprintf (dump_file,
1152 "%d bit load in target endianness found at: ",
1153 (int) n->range);
1154 print_gimple_stmt (dump_file, cur_stmt, 0);
1155 }
4b84d9b8 1156 return tgt;
dffec8eb
JJ
1157 }
1158 else
1159 {
1160 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
1161 load_stmt = gimple_build_assign (val_tmp, val_expr);
1162 gimple_set_vuse (load_stmt, n->vuse);
1163 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1164 }
1165 src = val_tmp;
1166 }
1167 else if (!bswap)
1168 {
4b84d9b8
JJ
1169 gimple *g = NULL;
1170 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src)))
dffec8eb
JJ
1171 {
1172 if (!is_gimple_val (src))
4b84d9b8 1173 return NULL_TREE;
cd676dfa 1174 if (conv_code == VIEW_CONVERT_EXPR)
d02a8b63 1175 src = bswap_view_convert (&gsi, TREE_TYPE (tgt), src);
cd676dfa 1176 g = gimple_build_assign (tgt, conv_code, src);
dffec8eb 1177 }
4b84d9b8 1178 else if (cur_stmt)
dffec8eb 1179 g = gimple_build_assign (tgt, src);
4b84d9b8
JJ
1180 else
1181 tgt = src;
dffec8eb
JJ
1182 if (n->range == 16)
1183 nop_stats.found_16bit++;
1184 else if (n->range == 32)
1185 nop_stats.found_32bit++;
1186 else
1187 {
1188 gcc_assert (n->range == 64);
1189 nop_stats.found_64bit++;
1190 }
1191 if (dump_file)
1192 {
1193 fprintf (dump_file,
1194 "%d bit reshuffle in target endianness found at: ",
1195 (int) n->range);
4b84d9b8
JJ
1196 if (cur_stmt)
1197 print_gimple_stmt (dump_file, cur_stmt, 0);
1198 else
1199 {
4af78ef8 1200 print_generic_expr (dump_file, tgt, TDF_NONE);
4b84d9b8
JJ
1201 fprintf (dump_file, "\n");
1202 }
dffec8eb 1203 }
4b84d9b8
JJ
1204 if (cur_stmt)
1205 gsi_replace (&gsi, g, true);
1206 return tgt;
dffec8eb
JJ
1207 }
1208 else if (TREE_CODE (src) == BIT_FIELD_REF)
1209 src = TREE_OPERAND (src, 0);
1210
1211 if (n->range == 16)
1212 bswap_stats.found_16bit++;
1213 else if (n->range == 32)
1214 bswap_stats.found_32bit++;
1215 else
1216 {
1217 gcc_assert (n->range == 64);
1218 bswap_stats.found_64bit++;
1219 }
1220
1221 tmp = src;
1222
1223 /* Convert the src expression if necessary. */
1224 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
1225 {
1226 gimple *convert_stmt;
1227
1228 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
1229 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
1230 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
1231 }
1232
1233 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1234 are considered as rotation of 2N bit values by N bits is generally not
1235 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1236 gives 0x03040102 while a bswap for that value is 0x04030201. */
1237 if (bswap && n->range == 16)
1238 {
1239 tree count = build_int_cst (NULL, BITS_PER_UNIT);
1240 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
1241 bswap_stmt = gimple_build_assign (NULL, src);
1242 }
1243 else
1244 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
1245
4b84d9b8
JJ
1246 if (tgt == NULL_TREE)
1247 tgt = make_ssa_name (bswap_type);
dffec8eb
JJ
1248 tmp = tgt;
1249
1250 /* Convert the result if necessary. */
1251 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
1252 {
1253 gimple *convert_stmt;
1254
1255 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
cd676dfa
JJ
1256 tree atmp = tmp;
1257 if (conv_code == VIEW_CONVERT_EXPR)
d02a8b63 1258 atmp = bswap_view_convert (&gsi, TREE_TYPE (tgt), tmp);
cd676dfa 1259 convert_stmt = gimple_build_assign (tgt, conv_code, atmp);
dffec8eb
JJ
1260 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
1261 }
1262
1263 gimple_set_lhs (bswap_stmt, tmp);
1264
1265 if (dump_file)
1266 {
1267 fprintf (dump_file, "%d bit bswap implementation found at: ",
1268 (int) n->range);
4b84d9b8
JJ
1269 if (cur_stmt)
1270 print_gimple_stmt (dump_file, cur_stmt, 0);
1271 else
1272 {
4af78ef8 1273 print_generic_expr (dump_file, tgt, TDF_NONE);
4b84d9b8
JJ
1274 fprintf (dump_file, "\n");
1275 }
dffec8eb
JJ
1276 }
1277
4b84d9b8
JJ
1278 if (cur_stmt)
1279 {
1280 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
1281 gsi_remove (&gsi, true);
1282 }
1283 else
1284 gsi_insert_before (&gsi, bswap_stmt, GSI_SAME_STMT);
1285 return tgt;
dffec8eb
JJ
1286}
1287
a7553ad6
JJ
1288/* Try to optimize an assignment CUR_STMT with CONSTRUCTOR on the rhs
1289 using bswap optimizations. CDI_DOMINATORS need to be
1290 computed on entry. Return true if it has been optimized and
1291 TODO_update_ssa is needed. */
1292
1293static bool
1294maybe_optimize_vector_constructor (gimple *cur_stmt)
1295{
1296 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1297 struct symbolic_number n;
1298 bool bswap;
1299
1300 gcc_assert (is_gimple_assign (cur_stmt)
1301 && gimple_assign_rhs_code (cur_stmt) == CONSTRUCTOR);
1302
1303 tree rhs = gimple_assign_rhs1 (cur_stmt);
1304 if (!VECTOR_TYPE_P (TREE_TYPE (rhs))
1305 || !INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs)))
1306 || gimple_assign_lhs (cur_stmt) == NULL_TREE)
1307 return false;
1308
1309 HOST_WIDE_INT sz = int_size_in_bytes (TREE_TYPE (rhs)) * BITS_PER_UNIT;
1310 switch (sz)
1311 {
1312 case 16:
1313 load_type = bswap_type = uint16_type_node;
1314 break;
1315 case 32:
1316 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1317 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
1318 {
1319 load_type = uint32_type_node;
1320 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1321 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1322 }
1323 else
1324 return false;
1325 break;
1326 case 64:
1327 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1328 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1329 || (word_mode == SImode
1330 && builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1331 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)))
1332 {
1333 load_type = uint64_type_node;
1334 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1335 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1336 }
1337 else
1338 return false;
1339 break;
1340 default:
1341 return false;
1342 }
1343
1344 gimple *ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
1345 if (!ins_stmt || n.range != (unsigned HOST_WIDE_INT) sz)
1346 return false;
1347
1348 if (bswap && !fndecl && n.range != 16)
1349 return false;
1350
1351 memset (&nop_stats, 0, sizeof (nop_stats));
1352 memset (&bswap_stats, 0, sizeof (bswap_stats));
1353 return bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1354 bswap_type, load_type, &n, bswap) != NULL_TREE;
1355}
1356
dffec8eb
JJ
1357/* Find manual byte swap implementations as well as load in a given
1358 endianness. Byte swaps are turned into a bswap builtin invokation
1359 while endian loads are converted to bswap builtin invokation or
1360 simple load according to the target endianness. */
1361
1362unsigned int
1363pass_optimize_bswap::execute (function *fun)
1364{
1365 basic_block bb;
1366 bool bswap32_p, bswap64_p;
1367 bool changed = false;
1368 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1369
1370 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1371 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1372 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1373 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1374 || (bswap32_p && word_mode == SImode)));
1375
1376 /* Determine the argument type of the builtins. The code later on
1377 assumes that the return and argument type are the same. */
1378 if (bswap32_p)
1379 {
1380 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1381 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1382 }
1383
1384 if (bswap64_p)
1385 {
1386 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1387 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1388 }
1389
1390 memset (&nop_stats, 0, sizeof (nop_stats));
1391 memset (&bswap_stats, 0, sizeof (bswap_stats));
1392 calculate_dominance_info (CDI_DOMINATORS);
1393
1394 FOR_EACH_BB_FN (bb, fun)
1395 {
1396 gimple_stmt_iterator gsi;
1397
1398 /* We do a reverse scan for bswap patterns to make sure we get the
1399 widest match. As bswap pattern matching doesn't handle previously
1400 inserted smaller bswap replacements as sub-patterns, the wider
1401 variant wouldn't be detected. */
1402 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
1403 {
1404 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
1405 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1406 enum tree_code code;
1407 struct symbolic_number n;
1408 bool bswap;
1409
1410 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1411 might be moved to a different basic block by bswap_replace and gsi
1412 must not points to it if that's the case. Moving the gsi_prev
1413 there make sure that gsi points to the statement previous to
1414 cur_stmt while still making sure that all statements are
1415 considered in this basic block. */
1416 gsi_prev (&gsi);
1417
1418 if (!is_gimple_assign (cur_stmt))
1419 continue;
1420
1421 code = gimple_assign_rhs_code (cur_stmt);
1422 switch (code)
1423 {
1424 case LROTATE_EXPR:
1425 case RROTATE_EXPR:
1426 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
1427 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
1428 % BITS_PER_UNIT)
1429 continue;
1430 /* Fall through. */
1431 case BIT_IOR_EXPR:
1432 break;
cd676dfa
JJ
1433 case CONSTRUCTOR:
1434 {
1435 tree rhs = gimple_assign_rhs1 (cur_stmt);
1436 if (VECTOR_TYPE_P (TREE_TYPE (rhs))
1437 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs))))
1438 break;
1439 }
1440 continue;
dffec8eb
JJ
1441 default:
1442 continue;
1443 }
1444
1445 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
1446
1447 if (!ins_stmt)
1448 continue;
1449
1450 switch (n.range)
1451 {
1452 case 16:
1453 /* Already in canonical form, nothing to do. */
1454 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
1455 continue;
1456 load_type = bswap_type = uint16_type_node;
1457 break;
1458 case 32:
1459 load_type = uint32_type_node;
1460 if (bswap32_p)
1461 {
1462 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1463 bswap_type = bswap32_type;
1464 }
1465 break;
1466 case 64:
1467 load_type = uint64_type_node;
1468 if (bswap64_p)
1469 {
1470 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1471 bswap_type = bswap64_type;
1472 }
1473 break;
1474 default:
1475 continue;
1476 }
1477
1478 if (bswap && !fndecl && n.range != 16)
1479 continue;
1480
4b84d9b8
JJ
1481 if (bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1482 bswap_type, load_type, &n, bswap))
dffec8eb
JJ
1483 changed = true;
1484 }
1485 }
1486
1487 statistics_counter_event (fun, "16-bit nop implementations found",
1488 nop_stats.found_16bit);
1489 statistics_counter_event (fun, "32-bit nop implementations found",
1490 nop_stats.found_32bit);
1491 statistics_counter_event (fun, "64-bit nop implementations found",
1492 nop_stats.found_64bit);
1493 statistics_counter_event (fun, "16-bit bswap implementations found",
1494 bswap_stats.found_16bit);
1495 statistics_counter_event (fun, "32-bit bswap implementations found",
1496 bswap_stats.found_32bit);
1497 statistics_counter_event (fun, "64-bit bswap implementations found",
1498 bswap_stats.found_64bit);
1499
1500 return (changed ? TODO_update_ssa : 0);
1501}
1502
1503} // anon namespace
1504
1505gimple_opt_pass *
1506make_pass_optimize_bswap (gcc::context *ctxt)
1507{
1508 return new pass_optimize_bswap (ctxt);
1509}
1510
1511namespace {
1512
245f6de1 1513/* Struct recording one operand for the store, which is either a constant,
c94c3532
EB
1514 then VAL represents the constant and all the other fields are zero, or
1515 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1516 and the other fields also reflect the memory load, or an SSA name, then
1517 VAL represents the SSA name and all the other fields are zero, */
245f6de1 1518
6c1dae73 1519class store_operand_info
245f6de1 1520{
6c1dae73 1521public:
245f6de1
JJ
1522 tree val;
1523 tree base_addr;
8a91d545
RS
1524 poly_uint64 bitsize;
1525 poly_uint64 bitpos;
1526 poly_uint64 bitregion_start;
1527 poly_uint64 bitregion_end;
245f6de1 1528 gimple *stmt;
383ac8dc 1529 bool bit_not_p;
245f6de1
JJ
1530 store_operand_info ();
1531};
1532
1533store_operand_info::store_operand_info ()
1534 : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0),
383ac8dc 1535 bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false)
245f6de1
JJ
1536{
1537}
1538
f663d9ad
KT
1539/* Struct recording the information about a single store of an immediate
1540 to memory. These are created in the first phase and coalesced into
1541 merged_store_group objects in the second phase. */
1542
6c1dae73 1543class store_immediate_info
f663d9ad 1544{
6c1dae73 1545public:
f663d9ad
KT
1546 unsigned HOST_WIDE_INT bitsize;
1547 unsigned HOST_WIDE_INT bitpos;
a62b3dc5
JJ
1548 unsigned HOST_WIDE_INT bitregion_start;
1549 /* This is one past the last bit of the bit region. */
1550 unsigned HOST_WIDE_INT bitregion_end;
f663d9ad
KT
1551 gimple *stmt;
1552 unsigned int order;
e362a897
EB
1553 /* INTEGER_CST for constant store, STRING_CST for string store,
1554 MEM_REF for memory copy, BIT_*_EXPR for logical bitwise operation,
1555 BIT_INSERT_EXPR for bit insertion.
4b84d9b8
JJ
1556 LROTATE_EXPR if it can be only bswap optimized and
1557 ops are not really meaningful.
1558 NOP_EXPR if bswap optimization detected identity, ops
1559 are not meaningful. */
245f6de1 1560 enum tree_code rhs_code;
4b84d9b8
JJ
1561 /* Two fields for bswap optimization purposes. */
1562 struct symbolic_number n;
1563 gimple *ins_stmt;
127ef369 1564 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
d60edaba 1565 bool bit_not_p;
127ef369
JJ
1566 /* True if ops have been swapped and thus ops[1] represents
1567 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1568 bool ops_swapped_p;
629387a6
EB
1569 /* The index number of the landing pad, or 0 if there is none. */
1570 int lp_nr;
245f6de1
JJ
1571 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1572 just the first one. */
1573 store_operand_info ops[2];
b5926e23 1574 store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
a62b3dc5 1575 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
4b84d9b8 1576 gimple *, unsigned int, enum tree_code,
629387a6 1577 struct symbolic_number &, gimple *, bool, int,
245f6de1
JJ
1578 const store_operand_info &,
1579 const store_operand_info &);
f663d9ad
KT
1580};
1581
1582store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs,
b5926e23 1583 unsigned HOST_WIDE_INT bp,
a62b3dc5
JJ
1584 unsigned HOST_WIDE_INT brs,
1585 unsigned HOST_WIDE_INT bre,
b5926e23 1586 gimple *st,
245f6de1
JJ
1587 unsigned int ord,
1588 enum tree_code rhscode,
4b84d9b8
JJ
1589 struct symbolic_number &nr,
1590 gimple *ins_stmtp,
d60edaba 1591 bool bitnotp,
629387a6 1592 int nr2,
245f6de1
JJ
1593 const store_operand_info &op0r,
1594 const store_operand_info &op1r)
a62b3dc5 1595 : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre),
4b84d9b8 1596 stmt (st), order (ord), rhs_code (rhscode), n (nr),
629387a6 1597 ins_stmt (ins_stmtp), bit_not_p (bitnotp), ops_swapped_p (false),
4bc6fb21 1598 lp_nr (nr2), ops { op0r, op1r }
245f6de1
JJ
1599{
1600}
f663d9ad
KT
1601
1602/* Struct representing a group of stores to contiguous memory locations.
1603 These are produced by the second phase (coalescing) and consumed in the
1604 third phase that outputs the widened stores. */
1605
6c1dae73 1606class merged_store_group
f663d9ad 1607{
6c1dae73 1608public:
f663d9ad
KT
1609 unsigned HOST_WIDE_INT start;
1610 unsigned HOST_WIDE_INT width;
a62b3dc5
JJ
1611 unsigned HOST_WIDE_INT bitregion_start;
1612 unsigned HOST_WIDE_INT bitregion_end;
1613 /* The size of the allocated memory for val and mask. */
f663d9ad 1614 unsigned HOST_WIDE_INT buf_size;
a62b3dc5 1615 unsigned HOST_WIDE_INT align_base;
8a91d545 1616 poly_uint64 load_align_base[2];
f663d9ad
KT
1617
1618 unsigned int align;
245f6de1 1619 unsigned int load_align[2];
f663d9ad
KT
1620 unsigned int first_order;
1621 unsigned int last_order;
7f5a3982 1622 bool bit_insertion;
e362a897 1623 bool string_concatenation;
18e0c3d1 1624 bool only_constants;
1b3c9813 1625 bool consecutive;
18e0c3d1 1626 unsigned int first_nonmergeable_order;
629387a6 1627 int lp_nr;
f663d9ad 1628
a62b3dc5 1629 auto_vec<store_immediate_info *> stores;
f663d9ad
KT
1630 /* We record the first and last original statements in the sequence because
1631 we'll need their vuse/vdef and replacement position. It's easier to keep
1632 track of them separately as 'stores' is reordered by apply_stores. */
1633 gimple *last_stmt;
1634 gimple *first_stmt;
1635 unsigned char *val;
a62b3dc5 1636 unsigned char *mask;
f663d9ad
KT
1637
1638 merged_store_group (store_immediate_info *);
1639 ~merged_store_group ();
7f5a3982 1640 bool can_be_merged_into (store_immediate_info *);
f663d9ad
KT
1641 void merge_into (store_immediate_info *);
1642 void merge_overlapping (store_immediate_info *);
1643 bool apply_stores ();
a62b3dc5
JJ
1644private:
1645 void do_merge (store_immediate_info *);
f663d9ad
KT
1646};
1647
1648/* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1649
1650static void
1651dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len)
1652{
1653 if (!fd)
1654 return;
1655
1656 for (unsigned int i = 0; i < len; i++)
c94c3532 1657 fprintf (fd, "%02x ", ptr[i]);
f663d9ad
KT
1658 fprintf (fd, "\n");
1659}
1660
f663d9ad
KT
1661/* Clear out LEN bits starting from bit START in the byte array
1662 PTR. This clears the bits to the *right* from START.
1663 START must be within [0, BITS_PER_UNIT) and counts starting from
1664 the least significant bit. */
1665
1666static void
1667clear_bit_region_be (unsigned char *ptr, unsigned int start,
1668 unsigned int len)
1669{
1670 if (len == 0)
1671 return;
1672 /* Clear len bits to the right of start. */
1673 else if (len <= start + 1)
1674 {
1675 unsigned char mask = (~(~0U << len));
1676 mask = mask << (start + 1U - len);
1677 ptr[0] &= ~mask;
1678 }
1679 else if (start != BITS_PER_UNIT - 1)
1680 {
1681 clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1);
1682 clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1,
1683 len - (start % BITS_PER_UNIT) - 1);
1684 }
1685 else if (start == BITS_PER_UNIT - 1
1686 && len > BITS_PER_UNIT)
1687 {
1688 unsigned int nbytes = len / BITS_PER_UNIT;
a62b3dc5 1689 memset (ptr, 0, nbytes);
f663d9ad
KT
1690 if (len % BITS_PER_UNIT != 0)
1691 clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1,
1692 len % BITS_PER_UNIT);
1693 }
1694 else
1695 gcc_unreachable ();
1696}
1697
1698/* In the byte array PTR clear the bit region starting at bit
1699 START and is LEN bits wide.
1700 For regions spanning multiple bytes do this recursively until we reach
1701 zero LEN or a region contained within a single byte. */
1702
1703static void
1704clear_bit_region (unsigned char *ptr, unsigned int start,
1705 unsigned int len)
1706{
1707 /* Degenerate base case. */
1708 if (len == 0)
1709 return;
1710 else if (start >= BITS_PER_UNIT)
1711 clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len);
1712 /* Second base case. */
1713 else if ((start + len) <= BITS_PER_UNIT)
1714 {
46a61395 1715 unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len);
f663d9ad
KT
1716 mask >>= BITS_PER_UNIT - (start + len);
1717
1718 ptr[0] &= ~mask;
1719
1720 return;
1721 }
1722 /* Clear most significant bits in a byte and proceed with the next byte. */
1723 else if (start != 0)
1724 {
1725 clear_bit_region (ptr, start, BITS_PER_UNIT - start);
1f069ef5 1726 clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start));
f663d9ad
KT
1727 }
1728 /* Whole bytes need to be cleared. */
1729 else if (start == 0 && len > BITS_PER_UNIT)
1730 {
1731 unsigned int nbytes = len / BITS_PER_UNIT;
a848c710
KT
1732 /* We could recurse on each byte but we clear whole bytes, so a simple
1733 memset will do. */
46a61395 1734 memset (ptr, '\0', nbytes);
f663d9ad
KT
1735 /* Clear the remaining sub-byte region if there is one. */
1736 if (len % BITS_PER_UNIT != 0)
1737 clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT);
1738 }
1739 else
1740 gcc_unreachable ();
1741}
1742
1743/* Write BITLEN bits of EXPR to the byte array PTR at
1744 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1745 Return true if the operation succeeded. */
1746
1747static bool
1748encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos,
46a61395 1749 unsigned int total_bytes)
f663d9ad
KT
1750{
1751 unsigned int first_byte = bitpos / BITS_PER_UNIT;
ad1de652
JJ
1752 bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT)
1753 || (bitpos % BITS_PER_UNIT)
f4b31647 1754 || !int_mode_for_size (bitlen, 0).exists ());
3afd514b
JJ
1755 bool empty_ctor_p
1756 = (TREE_CODE (expr) == CONSTRUCTOR
1757 && CONSTRUCTOR_NELTS (expr) == 0
1758 && TYPE_SIZE_UNIT (TREE_TYPE (expr))
1759 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (expr))));
f663d9ad
KT
1760
1761 if (!sub_byte_op_p)
3afd514b
JJ
1762 {
1763 if (first_byte >= total_bytes)
1764 return false;
1765 total_bytes -= first_byte;
1766 if (empty_ctor_p)
1767 {
1768 unsigned HOST_WIDE_INT rhs_bytes
1769 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1770 if (rhs_bytes > total_bytes)
1771 return false;
1772 memset (ptr + first_byte, '\0', rhs_bytes);
1773 return true;
1774 }
1775 return native_encode_expr (expr, ptr + first_byte, total_bytes) != 0;
1776 }
f663d9ad
KT
1777
1778 /* LITTLE-ENDIAN
1779 We are writing a non byte-sized quantity or at a position that is not
1780 at a byte boundary.
1781 |--------|--------|--------| ptr + first_byte
1782 ^ ^
1783 xxx xxxxxxxx xxx< bp>
1784 |______EXPR____|
1785
46a61395 1786 First native_encode_expr EXPR into a temporary buffer and shift each
f663d9ad
KT
1787 byte in the buffer by 'bp' (carrying the bits over as necessary).
1788 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1789 <------bitlen---->< bp>
1790 Then we clear the destination bits:
1791 |---00000|00000000|000-----| ptr + first_byte
1792 <-------bitlen--->< bp>
1793
1794 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1795 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1796
1797 BIG-ENDIAN
1798 We are writing a non byte-sized quantity or at a position that is not
1799 at a byte boundary.
1800 ptr + first_byte |--------|--------|--------|
1801 ^ ^
1802 <bp >xxx xxxxxxxx xxx
1803 |_____EXPR_____|
1804
46a61395 1805 First native_encode_expr EXPR into a temporary buffer and shift each
f663d9ad
KT
1806 byte in the buffer to the right by (carrying the bits over as necessary).
1807 We shift by as much as needed to align the most significant bit of EXPR
1808 with bitpos:
1809 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1810 <---bitlen----> <bp ><-----bitlen----->
1811 Then we clear the destination bits:
1812 ptr + first_byte |-----000||00000000||00000---|
1813 <bp ><-------bitlen----->
1814
1815 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1816 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1817 The awkwardness comes from the fact that bitpos is counted from the
1818 most significant bit of a byte. */
1819
ef1d3b57
RS
1820 /* We must be dealing with fixed-size data at this point, since the
1821 total size is also fixed. */
3afd514b
JJ
1822 unsigned int byte_size;
1823 if (empty_ctor_p)
1824 {
1825 unsigned HOST_WIDE_INT rhs_bytes
1826 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1827 if (rhs_bytes > total_bytes)
1828 return false;
1829 byte_size = rhs_bytes;
1830 }
1831 else
1832 {
1833 fixed_size_mode mode
1834 = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr)));
e362a897
EB
1835 byte_size
1836 = mode == BLKmode
1837 ? tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)))
1838 : GET_MODE_SIZE (mode);
3afd514b 1839 }
f663d9ad 1840 /* Allocate an extra byte so that we have space to shift into. */
3afd514b 1841 byte_size++;
f663d9ad 1842 unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size);
46a61395 1843 memset (tmpbuf, '\0', byte_size);
f663d9ad 1844 /* The store detection code should only have allowed constants that are
3afd514b
JJ
1845 accepted by native_encode_expr or empty ctors. */
1846 if (!empty_ctor_p
1847 && native_encode_expr (expr, tmpbuf, byte_size - 1) == 0)
f663d9ad
KT
1848 gcc_unreachable ();
1849
1850 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1851 bytes to write. This means it can write more than
1852 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1853 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1854 bitlen and zero out the bits that are not relevant as well (that may
1855 contain a sign bit due to sign-extension). */
1856 unsigned int padding
1857 = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1;
ad1de652
JJ
1858 /* On big-endian the padding is at the 'front' so just skip the initial
1859 bytes. */
1860 if (BYTES_BIG_ENDIAN)
1861 tmpbuf += padding;
1862
1863 byte_size -= padding;
1864
1865 if (bitlen % BITS_PER_UNIT != 0)
f663d9ad 1866 {
4b2c06f4 1867 if (BYTES_BIG_ENDIAN)
ad1de652
JJ
1868 clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1,
1869 BITS_PER_UNIT - (bitlen % BITS_PER_UNIT));
1870 else
1871 clear_bit_region (tmpbuf, bitlen,
1872 byte_size * BITS_PER_UNIT - bitlen);
f663d9ad 1873 }
ad1de652
JJ
1874 /* Left shifting relies on the last byte being clear if bitlen is
1875 a multiple of BITS_PER_UNIT, which might not be clear if
1876 there are padding bytes. */
1877 else if (!BYTES_BIG_ENDIAN)
1878 tmpbuf[byte_size - 1] = '\0';
f663d9ad
KT
1879
1880 /* Clear the bit region in PTR where the bits from TMPBUF will be
46a61395 1881 inserted into. */
f663d9ad
KT
1882 if (BYTES_BIG_ENDIAN)
1883 clear_bit_region_be (ptr + first_byte,
1884 BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen);
1885 else
1886 clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen);
1887
1888 int shift_amnt;
1889 int bitlen_mod = bitlen % BITS_PER_UNIT;
1890 int bitpos_mod = bitpos % BITS_PER_UNIT;
1891
1892 bool skip_byte = false;
1893 if (BYTES_BIG_ENDIAN)
1894 {
1895 /* BITPOS and BITLEN are exactly aligned and no shifting
1896 is necessary. */
1897 if (bitpos_mod + bitlen_mod == BITS_PER_UNIT
1898 || (bitpos_mod == 0 && bitlen_mod == 0))
1899 shift_amnt = 0;
1900 /* |. . . . . . . .|
1901 <bp > <blen >.
1902 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1903 of the value until it aligns with 'bp' in the next byte over. */
1904 else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT)
1905 {
1906 shift_amnt = bitlen_mod + bitpos_mod;
1907 skip_byte = bitlen_mod != 0;
1908 }
1909 /* |. . . . . . . .|
1910 <----bp--->
1911 <---blen---->.
1912 Shift the value right within the same byte so it aligns with 'bp'. */
1913 else
1914 shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT;
1915 }
1916 else
1917 shift_amnt = bitpos % BITS_PER_UNIT;
1918
1919 /* Create the shifted version of EXPR. */
1920 if (!BYTES_BIG_ENDIAN)
46a61395 1921 {
8aba425f 1922 shift_bytes_in_array_left (tmpbuf, byte_size, shift_amnt);
46a61395
JJ
1923 if (shift_amnt == 0)
1924 byte_size--;
1925 }
f663d9ad
KT
1926 else
1927 {
1928 gcc_assert (BYTES_BIG_ENDIAN);
1929 shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt);
1930 /* If shifting right forced us to move into the next byte skip the now
1931 empty byte. */
1932 if (skip_byte)
1933 {
1934 tmpbuf++;
1935 byte_size--;
1936 }
1937 }
1938
1939 /* Insert the bits from TMPBUF. */
1940 for (unsigned int i = 0; i < byte_size; i++)
1941 ptr[first_byte + i] |= tmpbuf[i];
1942
1943 return true;
1944}
1945
1946/* Sorting function for store_immediate_info objects.
1947 Sorts them by bitposition. */
1948
1949static int
1950sort_by_bitpos (const void *x, const void *y)
1951{
1952 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1953 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1954
109cca3b 1955 if ((*tmp)->bitpos < (*tmp2)->bitpos)
f663d9ad
KT
1956 return -1;
1957 else if ((*tmp)->bitpos > (*tmp2)->bitpos)
1958 return 1;
109cca3b 1959 else
0f0027d1
KT
1960 /* If they are the same let's use the order which is guaranteed to
1961 be different. */
1962 return (*tmp)->order - (*tmp2)->order;
f663d9ad
KT
1963}
1964
1965/* Sorting function for store_immediate_info objects.
1966 Sorts them by the order field. */
1967
1968static int
1969sort_by_order (const void *x, const void *y)
1970{
1971 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1972 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1973
1974 if ((*tmp)->order < (*tmp2)->order)
1975 return -1;
1976 else if ((*tmp)->order > (*tmp2)->order)
1977 return 1;
1978
1979 gcc_unreachable ();
1980}
1981
1982/* Initialize a merged_store_group object from a store_immediate_info
1983 object. */
1984
1985merged_store_group::merged_store_group (store_immediate_info *info)
1986{
1987 start = info->bitpos;
1988 width = info->bitsize;
a62b3dc5
JJ
1989 bitregion_start = info->bitregion_start;
1990 bitregion_end = info->bitregion_end;
f663d9ad
KT
1991 /* VAL has memory allocated for it in apply_stores once the group
1992 width has been finalized. */
1993 val = NULL;
a62b3dc5 1994 mask = NULL;
e362a897
EB
1995 bit_insertion = info->rhs_code == BIT_INSERT_EXPR;
1996 string_concatenation = info->rhs_code == STRING_CST;
18e0c3d1 1997 only_constants = info->rhs_code == INTEGER_CST;
1b3c9813 1998 consecutive = true;
18e0c3d1 1999 first_nonmergeable_order = ~0U;
629387a6 2000 lp_nr = info->lp_nr;
a62b3dc5
JJ
2001 unsigned HOST_WIDE_INT align_bitpos = 0;
2002 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
2003 &align, &align_bitpos);
2004 align_base = start - align_bitpos;
245f6de1
JJ
2005 for (int i = 0; i < 2; ++i)
2006 {
2007 store_operand_info &op = info->ops[i];
2008 if (op.base_addr == NULL_TREE)
2009 {
2010 load_align[i] = 0;
2011 load_align_base[i] = 0;
2012 }
2013 else
2014 {
2015 get_object_alignment_1 (op.val, &load_align[i], &align_bitpos);
2016 load_align_base[i] = op.bitpos - align_bitpos;
2017 }
2018 }
f663d9ad
KT
2019 stores.create (1);
2020 stores.safe_push (info);
2021 last_stmt = info->stmt;
2022 last_order = info->order;
2023 first_stmt = last_stmt;
2024 first_order = last_order;
2025 buf_size = 0;
2026}
2027
2028merged_store_group::~merged_store_group ()
2029{
2030 if (val)
2031 XDELETEVEC (val);
2032}
2033
7f5a3982
EB
2034/* Return true if the store described by INFO can be merged into the group. */
2035
2036bool
2037merged_store_group::can_be_merged_into (store_immediate_info *info)
2038{
2039 /* Do not merge bswap patterns. */
2040 if (info->rhs_code == LROTATE_EXPR)
2041 return false;
2042
629387a6
EB
2043 if (info->lp_nr != lp_nr)
2044 return false;
2045
7f5a3982
EB
2046 /* The canonical case. */
2047 if (info->rhs_code == stores[0]->rhs_code)
2048 return true;
2049
e362a897 2050 /* BIT_INSERT_EXPR is compatible with INTEGER_CST if no STRING_CST. */
7f5a3982 2051 if (info->rhs_code == BIT_INSERT_EXPR && stores[0]->rhs_code == INTEGER_CST)
e362a897 2052 return !string_concatenation;
7f5a3982
EB
2053
2054 if (stores[0]->rhs_code == BIT_INSERT_EXPR && info->rhs_code == INTEGER_CST)
e362a897 2055 return !string_concatenation;
7f5a3982 2056
ed01d707
EB
2057 /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores, but do it
2058 only for small regions since this can generate a lot of instructions. */
7f5a3982
EB
2059 if (info->rhs_code == MEM_REF
2060 && (stores[0]->rhs_code == INTEGER_CST
2061 || stores[0]->rhs_code == BIT_INSERT_EXPR)
2062 && info->bitregion_start == stores[0]->bitregion_start
ed01d707 2063 && info->bitregion_end == stores[0]->bitregion_end
2815558a 2064 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
e362a897 2065 return !string_concatenation;
7f5a3982
EB
2066
2067 if (stores[0]->rhs_code == MEM_REF
2068 && (info->rhs_code == INTEGER_CST
2069 || info->rhs_code == BIT_INSERT_EXPR)
2070 && info->bitregion_start == stores[0]->bitregion_start
ed01d707 2071 && info->bitregion_end == stores[0]->bitregion_end
2815558a 2072 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
e362a897
EB
2073 return !string_concatenation;
2074
2075 /* STRING_CST is compatible with INTEGER_CST if no BIT_INSERT_EXPR. */
2076 if (info->rhs_code == STRING_CST
2077 && stores[0]->rhs_code == INTEGER_CST
2078 && stores[0]->bitsize == CHAR_BIT)
2079 return !bit_insertion;
2080
2081 if (stores[0]->rhs_code == STRING_CST
2082 && info->rhs_code == INTEGER_CST
2083 && info->bitsize == CHAR_BIT)
2084 return !bit_insertion;
7f5a3982
EB
2085
2086 return false;
2087}
2088
a62b3dc5
JJ
2089/* Helper method for merge_into and merge_overlapping to do
2090 the common part. */
7f5a3982 2091
f663d9ad 2092void
a62b3dc5 2093merged_store_group::do_merge (store_immediate_info *info)
f663d9ad 2094{
a62b3dc5
JJ
2095 bitregion_start = MIN (bitregion_start, info->bitregion_start);
2096 bitregion_end = MAX (bitregion_end, info->bitregion_end);
2097
2098 unsigned int this_align;
2099 unsigned HOST_WIDE_INT align_bitpos = 0;
2100 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
2101 &this_align, &align_bitpos);
2102 if (this_align > align)
2103 {
2104 align = this_align;
2105 align_base = info->bitpos - align_bitpos;
2106 }
245f6de1
JJ
2107 for (int i = 0; i < 2; ++i)
2108 {
2109 store_operand_info &op = info->ops[i];
2110 if (!op.base_addr)
2111 continue;
2112
2113 get_object_alignment_1 (op.val, &this_align, &align_bitpos);
2114 if (this_align > load_align[i])
2115 {
2116 load_align[i] = this_align;
2117 load_align_base[i] = op.bitpos - align_bitpos;
2118 }
2119 }
f663d9ad 2120
f663d9ad
KT
2121 gimple *stmt = info->stmt;
2122 stores.safe_push (info);
2123 if (info->order > last_order)
2124 {
2125 last_order = info->order;
2126 last_stmt = stmt;
2127 }
2128 else if (info->order < first_order)
2129 {
2130 first_order = info->order;
2131 first_stmt = stmt;
2132 }
e362a897 2133
1b3c9813
EB
2134 if (info->bitpos != start + width)
2135 consecutive = false;
2136
e362a897
EB
2137 /* We need to use extraction if there is any bit-field. */
2138 if (info->rhs_code == BIT_INSERT_EXPR)
2139 {
2140 bit_insertion = true;
2141 gcc_assert (!string_concatenation);
2142 }
2143
1b3c9813 2144 /* We want to use concatenation if there is any string. */
e362a897
EB
2145 if (info->rhs_code == STRING_CST)
2146 {
2147 string_concatenation = true;
2148 gcc_assert (!bit_insertion);
2149 }
2150
1b3c9813
EB
2151 /* But we cannot use it if we don't have consecutive stores. */
2152 if (!consecutive)
2153 string_concatenation = false;
2154
18e0c3d1
JJ
2155 if (info->rhs_code != INTEGER_CST)
2156 only_constants = false;
f663d9ad
KT
2157}
2158
a62b3dc5
JJ
2159/* Merge a store recorded by INFO into this merged store.
2160 The store is not overlapping with the existing recorded
2161 stores. */
2162
2163void
2164merged_store_group::merge_into (store_immediate_info *info)
2165{
1b3c9813
EB
2166 do_merge (info);
2167
a62b3dc5
JJ
2168 /* Make sure we're inserting in the position we think we're inserting. */
2169 gcc_assert (info->bitpos >= start + width
2170 && info->bitregion_start <= bitregion_end);
2171
c5679c37 2172 width = info->bitpos + info->bitsize - start;
a62b3dc5
JJ
2173}
2174
f663d9ad
KT
2175/* Merge a store described by INFO into this merged store.
2176 INFO overlaps in some way with the current store (i.e. it's not contiguous
2177 which is handled by merged_store_group::merge_into). */
2178
2179void
2180merged_store_group::merge_overlapping (store_immediate_info *info)
2181{
1b3c9813
EB
2182 do_merge (info);
2183
f663d9ad 2184 /* If the store extends the size of the group, extend the width. */
a62b3dc5 2185 if (info->bitpos + info->bitsize > start + width)
c5679c37 2186 width = info->bitpos + info->bitsize - start;
f663d9ad
KT
2187}
2188
2189/* Go through all the recorded stores in this group in program order and
2190 apply their values to the VAL byte array to create the final merged
2191 value. Return true if the operation succeeded. */
2192
2193bool
2194merged_store_group::apply_stores ()
2195{
e362a897
EB
2196 store_immediate_info *info;
2197 unsigned int i;
2198
a62b3dc5
JJ
2199 /* Make sure we have more than one store in the group, otherwise we cannot
2200 merge anything. */
2201 if (bitregion_start % BITS_PER_UNIT != 0
2202 || bitregion_end % BITS_PER_UNIT != 0
f663d9ad
KT
2203 || stores.length () == 1)
2204 return false;
2205
e362a897
EB
2206 buf_size = (bitregion_end - bitregion_start) / BITS_PER_UNIT;
2207
2208 /* Really do string concatenation for large strings only. */
2209 if (buf_size <= MOVE_MAX)
2210 string_concatenation = false;
2211
c94c3532 2212 /* Create a power-of-2-sized buffer for native_encode_expr. */
e362a897
EB
2213 if (!string_concatenation)
2214 buf_size = 1 << ceil_log2 (buf_size);
2215
a62b3dc5
JJ
2216 val = XNEWVEC (unsigned char, 2 * buf_size);
2217 mask = val + buf_size;
2218 memset (val, 0, buf_size);
2219 memset (mask, ~0U, buf_size);
f663d9ad 2220
e362a897
EB
2221 stores.qsort (sort_by_order);
2222
f663d9ad
KT
2223 FOR_EACH_VEC_ELT (stores, i, info)
2224 {
a62b3dc5 2225 unsigned int pos_in_buffer = info->bitpos - bitregion_start;
c94c3532 2226 tree cst;
245f6de1
JJ
2227 if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE)
2228 cst = info->ops[0].val;
2229 else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE)
2230 cst = info->ops[1].val;
c94c3532
EB
2231 else
2232 cst = NULL_TREE;
245f6de1 2233 bool ret = true;
e362a897
EB
2234 if (cst && info->rhs_code != BIT_INSERT_EXPR)
2235 ret = encode_tree_to_bitpos (cst, val, info->bitsize, pos_in_buffer,
2236 buf_size);
c94c3532
EB
2237 unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT);
2238 if (BYTES_BIG_ENDIAN)
2239 clear_bit_region_be (m, (BITS_PER_UNIT - 1
2240 - (pos_in_buffer % BITS_PER_UNIT)),
2241 info->bitsize);
2242 else
2243 clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize);
245f6de1 2244 if (cst && dump_file && (dump_flags & TDF_DETAILS))
f663d9ad
KT
2245 {
2246 if (ret)
2247 {
c94c3532 2248 fputs ("After writing ", dump_file);
4af78ef8 2249 print_generic_expr (dump_file, cst, TDF_NONE);
f663d9ad 2250 fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC
c94c3532
EB
2251 " at position %d\n", info->bitsize, pos_in_buffer);
2252 fputs (" the merged value contains ", dump_file);
f663d9ad 2253 dump_char_array (dump_file, val, buf_size);
c94c3532
EB
2254 fputs (" the merged mask contains ", dump_file);
2255 dump_char_array (dump_file, mask, buf_size);
2256 if (bit_insertion)
2257 fputs (" bit insertion is required\n", dump_file);
e362a897
EB
2258 if (string_concatenation)
2259 fputs (" string concatenation is required\n", dump_file);
f663d9ad
KT
2260 }
2261 else
2262 fprintf (dump_file, "Failed to merge stores\n");
4b84d9b8 2263 }
f663d9ad
KT
2264 if (!ret)
2265 return false;
2266 }
4b84d9b8 2267 stores.qsort (sort_by_bitpos);
f663d9ad
KT
2268 return true;
2269}
2270
2271/* Structure describing the store chain. */
2272
6c1dae73 2273class imm_store_chain_info
f663d9ad 2274{
6c1dae73 2275public:
50b6d676
AO
2276 /* Doubly-linked list that imposes an order on chain processing.
2277 PNXP (prev's next pointer) points to the head of a list, or to
2278 the next field in the previous chain in the list.
2279 See pass_store_merging::m_stores_head for more rationale. */
2280 imm_store_chain_info *next, **pnxp;
b5926e23 2281 tree base_addr;
a62b3dc5 2282 auto_vec<store_immediate_info *> m_store_info;
f663d9ad
KT
2283 auto_vec<merged_store_group *> m_merged_store_groups;
2284
50b6d676
AO
2285 imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a)
2286 : next (inspt), pnxp (&inspt), base_addr (b_a)
2287 {
2288 inspt = this;
2289 if (next)
2290 {
2291 gcc_checking_assert (pnxp == next->pnxp);
2292 next->pnxp = &next;
2293 }
2294 }
2295 ~imm_store_chain_info ()
2296 {
2297 *pnxp = next;
2298 if (next)
2299 {
2300 gcc_checking_assert (&next == next->pnxp);
2301 next->pnxp = pnxp;
2302 }
2303 }
b5926e23 2304 bool terminate_and_process_chain ();
bd909071
JJ
2305 bool try_coalesce_bswap (merged_store_group *, unsigned int, unsigned int,
2306 unsigned int);
f663d9ad 2307 bool coalesce_immediate_stores ();
b5926e23
RB
2308 bool output_merged_store (merged_store_group *);
2309 bool output_merged_stores ();
f663d9ad
KT
2310};
2311
2312const pass_data pass_data_tree_store_merging = {
2313 GIMPLE_PASS, /* type */
2314 "store-merging", /* name */
2315 OPTGROUP_NONE, /* optinfo_flags */
2316 TV_GIMPLE_STORE_MERGING, /* tv_id */
2317 PROP_ssa, /* properties_required */
2318 0, /* properties_provided */
2319 0, /* properties_destroyed */
2320 0, /* todo_flags_start */
2321 TODO_update_ssa, /* todo_flags_finish */
2322};
2323
2324class pass_store_merging : public gimple_opt_pass
2325{
2326public:
2327 pass_store_merging (gcc::context *ctxt)
95d94b52
RB
2328 : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head (),
2329 m_n_chains (0), m_n_stores (0)
f663d9ad
KT
2330 {
2331 }
2332
c94c3532
EB
2333 /* Pass not supported for PDP-endian, nor for insane hosts or
2334 target character sizes where native_{encode,interpret}_expr
a62b3dc5 2335 doesn't work properly. */
f663d9ad
KT
2336 virtual bool
2337 gate (function *)
2338 {
a62b3dc5 2339 return flag_store_merging
c94c3532 2340 && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
a62b3dc5
JJ
2341 && CHAR_BIT == 8
2342 && BITS_PER_UNIT == 8;
f663d9ad
KT
2343 }
2344
2345 virtual unsigned int execute (function *);
2346
2347private:
99b1c316 2348 hash_map<tree_operand_hash, class imm_store_chain_info *> m_stores;
f663d9ad 2349
50b6d676
AO
2350 /* Form a doubly-linked stack of the elements of m_stores, so that
2351 we can iterate over them in a predictable way. Using this order
2352 avoids extraneous differences in the compiler output just because
2353 of tree pointer variations (e.g. different chains end up in
2354 different positions of m_stores, so they are handled in different
2355 orders, so they allocate or release SSA names in different
2356 orders, and when they get reused, subsequent passes end up
2357 getting different SSA names, which may ultimately change
2358 decisions when going out of SSA). */
2359 imm_store_chain_info *m_stores_head;
2360
95d94b52
RB
2361 /* The number of store chains currently tracked. */
2362 unsigned m_n_chains;
2363 /* The number of stores currently tracked. */
2364 unsigned m_n_stores;
2365
629387a6
EB
2366 bool process_store (gimple *);
2367 bool terminate_and_process_chain (imm_store_chain_info *);
383ac8dc 2368 bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *);
629387a6 2369 bool terminate_and_process_all_chains ();
f663d9ad
KT
2370}; // class pass_store_merging
2371
2372/* Terminate and process all recorded chains. Return true if any changes
2373 were made. */
2374
2375bool
2376pass_store_merging::terminate_and_process_all_chains ()
2377{
f663d9ad 2378 bool ret = false;
50b6d676 2379 while (m_stores_head)
629387a6 2380 ret |= terminate_and_process_chain (m_stores_head);
b119c055 2381 gcc_assert (m_stores.is_empty ());
f663d9ad
KT
2382 return ret;
2383}
2384
383ac8dc
JJ
2385/* Terminate all chains that are affected by the statement STMT.
2386 CHAIN_INFO is the chain we should ignore from the checks if
629387a6 2387 non-NULL. Return true if any changes were made. */
f663d9ad
KT
2388
2389bool
20770eb8 2390pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
b5926e23 2391 **chain_info,
f663d9ad
KT
2392 gimple *stmt)
2393{
2394 bool ret = false;
2395
2396 /* If the statement doesn't touch memory it can't alias. */
2397 if (!gimple_vuse (stmt))
2398 return false;
2399
9e875fd8 2400 tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
6b412bf6
RB
2401 ao_ref store_lhs_ref;
2402 ao_ref_init (&store_lhs_ref, store_lhs);
383ac8dc 2403 for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
f663d9ad 2404 {
383ac8dc
JJ
2405 next = cur->next;
2406
2407 /* We already checked all the stores in chain_info and terminated the
2408 chain if necessary. Skip it here. */
2409 if (chain_info && *chain_info == cur)
2410 continue;
2411
245f6de1
JJ
2412 store_immediate_info *info;
2413 unsigned int i;
383ac8dc 2414 FOR_EACH_VEC_ELT (cur->m_store_info, i, info)
f663d9ad 2415 {
9e875fd8 2416 tree lhs = gimple_assign_lhs (info->stmt);
6b412bf6
RB
2417 ao_ref lhs_ref;
2418 ao_ref_init (&lhs_ref, lhs);
2419 if (ref_maybe_used_by_stmt_p (stmt, &lhs_ref)
2420 || stmt_may_clobber_ref_p_1 (stmt, &lhs_ref)
2421 || (store_lhs && refs_may_alias_p_1 (&store_lhs_ref,
2422 &lhs_ref, false)))
f663d9ad 2423 {
245f6de1 2424 if (dump_file && (dump_flags & TDF_DETAILS))
f663d9ad 2425 {
245f6de1
JJ
2426 fprintf (dump_file, "stmt causes chain termination:\n");
2427 print_gimple_stmt (dump_file, stmt, 0);
f663d9ad 2428 }
629387a6 2429 ret |= terminate_and_process_chain (cur);
245f6de1 2430 break;
f663d9ad
KT
2431 }
2432 }
2433 }
2434
f663d9ad
KT
2435 return ret;
2436}
2437
2438/* Helper function. Terminate the recorded chain storing to base object
2439 BASE. Return true if the merging and output was successful. The m_stores
2440 entry is removed after the processing in any case. */
2441
2442bool
629387a6 2443pass_store_merging::terminate_and_process_chain (imm_store_chain_info *chain_info)
f663d9ad 2444{
95d94b52
RB
2445 m_n_stores -= chain_info->m_store_info.length ();
2446 m_n_chains--;
b5926e23
RB
2447 bool ret = chain_info->terminate_and_process_chain ();
2448 m_stores.remove (chain_info->base_addr);
2449 delete chain_info;
f663d9ad
KT
2450 return ret;
2451}
2452
245f6de1 2453/* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
629387a6
EB
2454 may clobber REF. FIRST and LAST must have non-NULL vdef. We want to
2455 be able to sink load of REF across stores between FIRST and LAST, up
2456 to right before LAST. */
245f6de1
JJ
2457
2458bool
2459stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref)
2460{
2461 ao_ref r;
2462 ao_ref_init (&r, ref);
2463 unsigned int count = 0;
2464 tree vop = gimple_vdef (last);
2465 gimple *stmt;
2466
629387a6
EB
2467 /* Return true conservatively if the basic blocks are different. */
2468 if (gimple_bb (first) != gimple_bb (last))
2469 return true;
2470
245f6de1
JJ
2471 do
2472 {
2473 stmt = SSA_NAME_DEF_STMT (vop);
2474 if (stmt_may_clobber_ref_p_1 (stmt, &r))
2475 return true;
4b84d9b8
JJ
2476 if (gimple_store_p (stmt)
2477 && refs_anti_dependent_p (ref, gimple_get_lhs (stmt)))
2478 return true;
245f6de1
JJ
2479 /* Avoid quadratic compile time by bounding the number of checks
2480 we perform. */
2481 if (++count > MAX_STORE_ALIAS_CHECKS)
2482 return true;
2483 vop = gimple_vuse (stmt);
2484 }
2485 while (stmt != first);
629387a6 2486
245f6de1
JJ
2487 return false;
2488}
2489
2490/* Return true if INFO->ops[IDX] is mergeable with the
2491 corresponding loads already in MERGED_STORE group.
2492 BASE_ADDR is the base address of the whole store group. */
2493
2494bool
2495compatible_load_p (merged_store_group *merged_store,
2496 store_immediate_info *info,
2497 tree base_addr, int idx)
2498{
2499 store_immediate_info *infof = merged_store->stores[0];
2500 if (!info->ops[idx].base_addr
8a91d545
RS
2501 || maybe_ne (info->ops[idx].bitpos - infof->ops[idx].bitpos,
2502 info->bitpos - infof->bitpos)
245f6de1
JJ
2503 || !operand_equal_p (info->ops[idx].base_addr,
2504 infof->ops[idx].base_addr, 0))
2505 return false;
2506
2507 store_immediate_info *infol = merged_store->stores.last ();
2508 tree load_vuse = gimple_vuse (info->ops[idx].stmt);
2509 /* In this case all vuses should be the same, e.g.
2510 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2511 or
2512 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2513 and we can emit the coalesced load next to any of those loads. */
2514 if (gimple_vuse (infof->ops[idx].stmt) == load_vuse
2515 && gimple_vuse (infol->ops[idx].stmt) == load_vuse)
2516 return true;
2517
2518 /* Otherwise, at least for now require that the load has the same
2519 vuse as the store. See following examples. */
2520 if (gimple_vuse (info->stmt) != load_vuse)
2521 return false;
2522
2523 if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt)
2524 || (infof != infol
2525 && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt)))
2526 return false;
2527
2528 /* If the load is from the same location as the store, already
2529 the construction of the immediate chain info guarantees no intervening
2530 stores, so no further checks are needed. Example:
2531 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
8a91d545 2532 if (known_eq (info->ops[idx].bitpos, info->bitpos)
245f6de1
JJ
2533 && operand_equal_p (info->ops[idx].base_addr, base_addr, 0))
2534 return true;
2535
2536 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2537 of the stores in the group, or any other stores in between those.
2538 Previous calls to compatible_load_p ensured that for all the
2539 merged_store->stores IDX loads, no stmts starting with
2540 merged_store->first_stmt and ending right before merged_store->last_stmt
2541 clobbers those loads. */
2542 gimple *first = merged_store->first_stmt;
2543 gimple *last = merged_store->last_stmt;
245f6de1
JJ
2544 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2545 comes before the so far first load, we'll be changing
2546 merged_store->first_stmt. In that case we need to give up if
2547 any of the earlier processed loads clobber with the stmts in the new
2548 range. */
2549 if (info->order < merged_store->first_order)
2550 {
3f207ab3 2551 for (store_immediate_info *infoc : merged_store->stores)
245f6de1
JJ
2552 if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val))
2553 return false;
2554 first = info->stmt;
2555 }
2556 /* Similarly, we could change merged_store->last_stmt, so ensure
2557 in that case no stmts in the new range clobber any of the earlier
2558 processed loads. */
2559 else if (info->order > merged_store->last_order)
2560 {
3f207ab3 2561 for (store_immediate_info *infoc : merged_store->stores)
245f6de1
JJ
2562 if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val))
2563 return false;
2564 last = info->stmt;
2565 }
2566 /* And finally, we'd be adding a new load to the set, ensure it isn't
2567 clobbered in the new range. */
2568 if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val))
2569 return false;
2570
2571 /* Otherwise, we are looking for:
2572 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2573 or
2574 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2575 return true;
2576}
2577
4b84d9b8
JJ
2578/* Add all refs loaded to compute VAL to REFS vector. */
2579
2580void
2581gather_bswap_load_refs (vec<tree> *refs, tree val)
2582{
2583 if (TREE_CODE (val) != SSA_NAME)
2584 return;
2585
2586 gimple *stmt = SSA_NAME_DEF_STMT (val);
2587 if (!is_gimple_assign (stmt))
2588 return;
2589
2590 if (gimple_assign_load_p (stmt))
2591 {
2592 refs->safe_push (gimple_assign_rhs1 (stmt));
2593 return;
2594 }
2595
2596 switch (gimple_assign_rhs_class (stmt))
2597 {
2598 case GIMPLE_BINARY_RHS:
2599 gather_bswap_load_refs (refs, gimple_assign_rhs2 (stmt));
2600 /* FALLTHRU */
2601 case GIMPLE_UNARY_RHS:
2602 gather_bswap_load_refs (refs, gimple_assign_rhs1 (stmt));
2603 break;
2604 default:
2605 gcc_unreachable ();
2606 }
2607}
2608
c5679c37
JJ
2609/* Check if there are any stores in M_STORE_INFO after index I
2610 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2611 a potential group ending with END that have their order
4d213bf6
JJ
2612 smaller than LAST_ORDER. ALL_INTEGER_CST_P is true if
2613 all the stores already merged and the one under consideration
2614 have rhs_code of INTEGER_CST. Return true if there are no such stores.
c5679c37
JJ
2615 Consider:
2616 MEM[(long long int *)p_28] = 0;
2617 MEM[(long long int *)p_28 + 8B] = 0;
2618 MEM[(long long int *)p_28 + 16B] = 0;
2619 MEM[(long long int *)p_28 + 24B] = 0;
2620 _129 = (int) _130;
2621 MEM[(int *)p_28 + 8B] = _129;
2622 MEM[(int *)p_28].a = -1;
2623 We already have
2624 MEM[(long long int *)p_28] = 0;
2625 MEM[(int *)p_28].a = -1;
2626 stmts in the current group and need to consider if it is safe to
2627 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2628 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2629 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2630 into the group and merging of those 3 stores is successful, merged
2631 stmts will be emitted at the latest store from that group, i.e.
2632 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2633 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2634 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2635 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2636 into the group. That way it will be its own store group and will
4d213bf6 2637 not be touched. If ALL_INTEGER_CST_P and there are overlapping
c5679c37 2638 INTEGER_CST stores, those are mergeable using merge_overlapping,
bd909071
JJ
2639 so don't return false for those.
2640
2641 Similarly, check stores from FIRST_EARLIER (inclusive) to END_EARLIER
2642 (exclusive), whether they don't overlap the bitrange START to END
2643 and have order in between FIRST_ORDER and LAST_ORDER. This is to
2644 prevent merging in cases like:
2645 MEM <char[12]> [&b + 8B] = {};
2646 MEM[(short *) &b] = 5;
2647 _5 = *x_4(D);
2648 MEM <long long unsigned int> [&b + 2B] = _5;
2649 MEM[(char *)&b + 16B] = 88;
2650 MEM[(int *)&b + 20B] = 1;
2651 The = {} store comes in sort_by_bitpos before the = 88 store, and can't
2652 be merged with it, because the = _5 store overlaps these and is in between
2653 them in sort_by_order ordering. If it was merged, the merged store would
2654 go after the = _5 store and thus change behavior. */
c5679c37
JJ
2655
2656static bool
00dcc88a
MS
2657check_no_overlap (const vec<store_immediate_info *> &m_store_info,
2658 unsigned int i,
bd909071
JJ
2659 bool all_integer_cst_p, unsigned int first_order,
2660 unsigned int last_order, unsigned HOST_WIDE_INT start,
2661 unsigned HOST_WIDE_INT end, unsigned int first_earlier,
2662 unsigned end_earlier)
c5679c37
JJ
2663{
2664 unsigned int len = m_store_info.length ();
bd909071
JJ
2665 for (unsigned int j = first_earlier; j < end_earlier; j++)
2666 {
2667 store_immediate_info *info = m_store_info[j];
2668 if (info->order > first_order
2669 && info->order < last_order
2670 && info->bitpos + info->bitsize > start)
2671 return false;
2672 }
c5679c37
JJ
2673 for (++i; i < len; ++i)
2674 {
2675 store_immediate_info *info = m_store_info[i];
2676 if (info->bitpos >= end)
2677 break;
2678 if (info->order < last_order
4d213bf6 2679 && (!all_integer_cst_p || info->rhs_code != INTEGER_CST))
c5679c37
JJ
2680 return false;
2681 }
2682 return true;
2683}
2684
4b84d9b8
JJ
2685/* Return true if m_store_info[first] and at least one following store
2686 form a group which store try_size bitsize value which is byte swapped
2687 from a memory load or some value, or identity from some value.
2688 This uses the bswap pass APIs. */
2689
2690bool
2691imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store,
2692 unsigned int first,
bd909071
JJ
2693 unsigned int try_size,
2694 unsigned int first_earlier)
4b84d9b8
JJ
2695{
2696 unsigned int len = m_store_info.length (), last = first;
2697 unsigned HOST_WIDE_INT width = m_store_info[first]->bitsize;
2698 if (width >= try_size)
2699 return false;
2700 for (unsigned int i = first + 1; i < len; ++i)
2701 {
2702 if (m_store_info[i]->bitpos != m_store_info[first]->bitpos + width
cb76fcd7 2703 || m_store_info[i]->lp_nr != merged_store->lp_nr
4b84d9b8
JJ
2704 || m_store_info[i]->ins_stmt == NULL)
2705 return false;
2706 width += m_store_info[i]->bitsize;
2707 if (width >= try_size)
2708 {
2709 last = i;
2710 break;
2711 }
2712 }
2713 if (width != try_size)
2714 return false;
2715
2716 bool allow_unaligned
028d4092 2717 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
4b84d9b8
JJ
2718 /* Punt if the combined store would not be aligned and we need alignment. */
2719 if (!allow_unaligned)
2720 {
2721 unsigned int align = merged_store->align;
2722 unsigned HOST_WIDE_INT align_base = merged_store->align_base;
2723 for (unsigned int i = first + 1; i <= last; ++i)
2724 {
2725 unsigned int this_align;
2726 unsigned HOST_WIDE_INT align_bitpos = 0;
2727 get_object_alignment_1 (gimple_assign_lhs (m_store_info[i]->stmt),
2728 &this_align, &align_bitpos);
2729 if (this_align > align)
2730 {
2731 align = this_align;
2732 align_base = m_store_info[i]->bitpos - align_bitpos;
2733 }
2734 }
2735 unsigned HOST_WIDE_INT align_bitpos
2736 = (m_store_info[first]->bitpos - align_base) & (align - 1);
2737 if (align_bitpos)
2738 align = least_bit_hwi (align_bitpos);
2739 if (align < try_size)
2740 return false;
2741 }
2742
2743 tree type;
2744 switch (try_size)
2745 {
2746 case 16: type = uint16_type_node; break;
2747 case 32: type = uint32_type_node; break;
2748 case 64: type = uint64_type_node; break;
2749 default: gcc_unreachable ();
2750 }
2751 struct symbolic_number n;
2752 gimple *ins_stmt = NULL;
2753 int vuse_store = -1;
2754 unsigned int first_order = merged_store->first_order;
2755 unsigned int last_order = merged_store->last_order;
2756 gimple *first_stmt = merged_store->first_stmt;
2757 gimple *last_stmt = merged_store->last_stmt;
c5679c37 2758 unsigned HOST_WIDE_INT end = merged_store->start + merged_store->width;
4b84d9b8
JJ
2759 store_immediate_info *infof = m_store_info[first];
2760
2761 for (unsigned int i = first; i <= last; ++i)
2762 {
2763 store_immediate_info *info = m_store_info[i];
2764 struct symbolic_number this_n = info->n;
2765 this_n.type = type;
2766 if (!this_n.base_addr)
2767 this_n.range = try_size / BITS_PER_UNIT;
30fa8e9c
JJ
2768 else
2769 /* Update vuse in case it has changed by output_merged_stores. */
2770 this_n.vuse = gimple_vuse (info->ins_stmt);
4b84d9b8
JJ
2771 unsigned int bitpos = info->bitpos - infof->bitpos;
2772 if (!do_shift_rotate (LSHIFT_EXPR, &this_n,
2773 BYTES_BIG_ENDIAN
2774 ? try_size - info->bitsize - bitpos
2775 : bitpos))
2776 return false;
aa11164a 2777 if (this_n.base_addr && vuse_store)
4b84d9b8
JJ
2778 {
2779 unsigned int j;
2780 for (j = first; j <= last; ++j)
2781 if (this_n.vuse == gimple_vuse (m_store_info[j]->stmt))
2782 break;
2783 if (j > last)
2784 {
2785 if (vuse_store == 1)
2786 return false;
2787 vuse_store = 0;
2788 }
2789 }
2790 if (i == first)
2791 {
2792 n = this_n;
2793 ins_stmt = info->ins_stmt;
2794 }
2795 else
2796 {
c5679c37 2797 if (n.base_addr && n.vuse != this_n.vuse)
4b84d9b8 2798 {
c5679c37
JJ
2799 if (vuse_store == 0)
2800 return false;
2801 vuse_store = 1;
4b84d9b8 2802 }
c5679c37
JJ
2803 if (info->order > last_order)
2804 {
2805 last_order = info->order;
2806 last_stmt = info->stmt;
2807 }
2808 else if (info->order < first_order)
2809 {
2810 first_order = info->order;
2811 first_stmt = info->stmt;
2812 }
2813 end = MAX (end, info->bitpos + info->bitsize);
4b84d9b8
JJ
2814
2815 ins_stmt = perform_symbolic_merge (ins_stmt, &n, info->ins_stmt,
2816 &this_n, &n);
2817 if (ins_stmt == NULL)
2818 return false;
2819 }
2820 }
2821
2822 uint64_t cmpxchg, cmpnop;
2823 find_bswap_or_nop_finalize (&n, &cmpxchg, &cmpnop);
2824
2825 /* A complete byte swap should make the symbolic number to start with
2826 the largest digit in the highest order byte. Unchanged symbolic
2827 number indicates a read with same endianness as target architecture. */
2828 if (n.n != cmpnop && n.n != cmpxchg)
2829 return false;
2830
2831 if (n.base_addr == NULL_TREE && !is_gimple_val (n.src))
2832 return false;
2833
bd909071
JJ
2834 if (!check_no_overlap (m_store_info, last, false, first_order, last_order,
2835 merged_store->start, end, first_earlier, first))
c5679c37
JJ
2836 return false;
2837
4b84d9b8
JJ
2838 /* Don't handle memory copy this way if normal non-bswap processing
2839 would handle it too. */
2840 if (n.n == cmpnop && (unsigned) n.n_ops == last - first + 1)
2841 {
2842 unsigned int i;
2843 for (i = first; i <= last; ++i)
2844 if (m_store_info[i]->rhs_code != MEM_REF)
2845 break;
2846 if (i == last + 1)
2847 return false;
2848 }
2849
2850 if (n.n == cmpxchg)
2851 switch (try_size)
2852 {
2853 case 16:
2854 /* Will emit LROTATE_EXPR. */
2855 break;
2856 case 32:
2857 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2858 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
2859 break;
2860 return false;
2861 case 64:
2862 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2863 && optab_handler (bswap_optab, DImode) != CODE_FOR_nothing)
2864 break;
2865 return false;
2866 default:
2867 gcc_unreachable ();
2868 }
2869
2870 if (!allow_unaligned && n.base_addr)
2871 {
2872 unsigned int align = get_object_alignment (n.src);
2873 if (align < try_size)
2874 return false;
2875 }
2876
2877 /* If each load has vuse of the corresponding store, need to verify
2878 the loads can be sunk right before the last store. */
2879 if (vuse_store == 1)
2880 {
2881 auto_vec<tree, 64> refs;
2882 for (unsigned int i = first; i <= last; ++i)
2883 gather_bswap_load_refs (&refs,
2884 gimple_assign_rhs1 (m_store_info[i]->stmt));
2885
3f207ab3 2886 for (tree ref : refs)
4b84d9b8
JJ
2887 if (stmts_may_clobber_ref_p (first_stmt, last_stmt, ref))
2888 return false;
2889 n.vuse = NULL_TREE;
2890 }
2891
2892 infof->n = n;
2893 infof->ins_stmt = ins_stmt;
2894 for (unsigned int i = first; i <= last; ++i)
2895 {
2896 m_store_info[i]->rhs_code = n.n == cmpxchg ? LROTATE_EXPR : NOP_EXPR;
2897 m_store_info[i]->ops[0].base_addr = NULL_TREE;
2898 m_store_info[i]->ops[1].base_addr = NULL_TREE;
2899 if (i != first)
2900 merged_store->merge_into (m_store_info[i]);
2901 }
2902
2903 return true;
2904}
2905
f663d9ad
KT
2906/* Go through the candidate stores recorded in m_store_info and merge them
2907 into merged_store_group objects recorded into m_merged_store_groups
2908 representing the widened stores. Return true if coalescing was successful
2909 and the number of widened stores is fewer than the original number
2910 of stores. */
2911
2912bool
2913imm_store_chain_info::coalesce_immediate_stores ()
2914{
2915 /* Anything less can't be processed. */
2916 if (m_store_info.length () < 2)
2917 return false;
2918
2919 if (dump_file && (dump_flags & TDF_DETAILS))
c94c3532 2920 fprintf (dump_file, "Attempting to coalesce %u stores in chain\n",
f663d9ad
KT
2921 m_store_info.length ());
2922
2923 store_immediate_info *info;
4b84d9b8 2924 unsigned int i, ignore = 0;
bd909071
JJ
2925 unsigned int first_earlier = 0;
2926 unsigned int end_earlier = 0;
f663d9ad
KT
2927
2928 /* Order the stores by the bitposition they write to. */
2929 m_store_info.qsort (sort_by_bitpos);
2930
2931 info = m_store_info[0];
2932 merged_store_group *merged_store = new merged_store_group (info);
c94c3532
EB
2933 if (dump_file && (dump_flags & TDF_DETAILS))
2934 fputs ("New store group\n", dump_file);
f663d9ad
KT
2935
2936 FOR_EACH_VEC_ELT (m_store_info, i, info)
2937 {
3afd514b
JJ
2938 unsigned HOST_WIDE_INT new_bitregion_start, new_bitregion_end;
2939
4b84d9b8 2940 if (i <= ignore)
c94c3532 2941 goto done;
f663d9ad 2942
bd909071
JJ
2943 while (first_earlier < end_earlier
2944 && (m_store_info[first_earlier]->bitpos
2945 + m_store_info[first_earlier]->bitsize
2946 <= merged_store->start))
2947 first_earlier++;
2948
4b84d9b8
JJ
2949 /* First try to handle group of stores like:
2950 p[0] = data >> 24;
2951 p[1] = data >> 16;
2952 p[2] = data >> 8;
2953 p[3] = data;
2954 using the bswap framework. */
2955 if (info->bitpos == merged_store->start + merged_store->width
2956 && merged_store->stores.length () == 1
2957 && merged_store->stores[0]->ins_stmt != NULL
cb76fcd7 2958 && info->lp_nr == merged_store->lp_nr
4b84d9b8
JJ
2959 && info->ins_stmt != NULL)
2960 {
2961 unsigned int try_size;
2962 for (try_size = 64; try_size >= 16; try_size >>= 1)
bd909071
JJ
2963 if (try_coalesce_bswap (merged_store, i - 1, try_size,
2964 first_earlier))
4b84d9b8
JJ
2965 break;
2966
2967 if (try_size >= 16)
2968 {
2969 ignore = i + merged_store->stores.length () - 1;
2970 m_merged_store_groups.safe_push (merged_store);
2971 if (ignore < m_store_info.length ())
bd909071
JJ
2972 {
2973 merged_store = new merged_store_group (m_store_info[ignore]);
2974 end_earlier = ignore;
2975 }
4b84d9b8
JJ
2976 else
2977 merged_store = NULL;
c94c3532 2978 goto done;
4b84d9b8
JJ
2979 }
2980 }
2981
3afd514b
JJ
2982 new_bitregion_start
2983 = MIN (merged_store->bitregion_start, info->bitregion_start);
2984 new_bitregion_end
2985 = MAX (merged_store->bitregion_end, info->bitregion_end);
2986
2987 if (info->order >= merged_store->first_nonmergeable_order
2988 || (((new_bitregion_end - new_bitregion_start + 1) / BITS_PER_UNIT)
028d4092 2989 > (unsigned) param_store_merging_max_size))
18e0c3d1
JJ
2990 ;
2991
f663d9ad
KT
2992 /* |---store 1---|
2993 |---store 2---|
4b84d9b8 2994 Overlapping stores. */
18e0c3d1 2995 else if (IN_RANGE (info->bitpos, merged_store->start,
4d213bf6
JJ
2996 merged_store->start + merged_store->width - 1)
2997 /* |---store 1---||---store 2---|
2998 Handle also the consecutive INTEGER_CST stores case here,
2999 as we have here the code to deal with overlaps. */
3000 || (info->bitregion_start <= merged_store->bitregion_end
3001 && info->rhs_code == INTEGER_CST
3002 && merged_store->only_constants
3003 && merged_store->can_be_merged_into (info)))
f663d9ad 3004 {
245f6de1 3005 /* Only allow overlapping stores of constants. */
629387a6
EB
3006 if (info->rhs_code == INTEGER_CST
3007 && merged_store->only_constants
3008 && info->lp_nr == merged_store->lp_nr)
245f6de1 3009 {
bd909071
JJ
3010 unsigned int first_order
3011 = MIN (merged_store->first_order, info->order);
6cd4c66e
JJ
3012 unsigned int last_order
3013 = MAX (merged_store->last_order, info->order);
3014 unsigned HOST_WIDE_INT end
3015 = MAX (merged_store->start + merged_store->width,
3016 info->bitpos + info->bitsize);
bd909071
JJ
3017 if (check_no_overlap (m_store_info, i, true, first_order,
3018 last_order, merged_store->start, end,
3019 first_earlier, end_earlier))
6cd4c66e
JJ
3020 {
3021 /* check_no_overlap call above made sure there are no
3022 overlapping stores with non-INTEGER_CST rhs_code
3023 in between the first and last of the stores we've
3024 just merged. If there are any INTEGER_CST rhs_code
3025 stores in between, we need to merge_overlapping them
3026 even if in the sort_by_bitpos order there are other
3027 overlapping stores in between. Keep those stores as is.
3028 Example:
3029 MEM[(int *)p_28] = 0;
3030 MEM[(char *)p_28 + 3B] = 1;
3031 MEM[(char *)p_28 + 1B] = 2;
3032 MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B];
3033 We can't merge the zero store with the store of two and
3034 not merge anything else, because the store of one is
3035 in the original order in between those two, but in
3036 store_by_bitpos order it comes after the last store that
3037 we can't merge with them. We can merge the first 3 stores
3038 and keep the last store as is though. */
18e0c3d1
JJ
3039 unsigned int len = m_store_info.length ();
3040 unsigned int try_order = last_order;
3041 unsigned int first_nonmergeable_order;
3042 unsigned int k;
3043 bool last_iter = false;
3044 int attempts = 0;
3045 do
6cd4c66e 3046 {
18e0c3d1 3047 unsigned int max_order = 0;
bd909071 3048 unsigned int min_order = first_order;
18e0c3d1
JJ
3049 unsigned first_nonmergeable_int_order = ~0U;
3050 unsigned HOST_WIDE_INT this_end = end;
3051 k = i;
3052 first_nonmergeable_order = ~0U;
3053 for (unsigned int j = i + 1; j < len; ++j)
6cd4c66e 3054 {
18e0c3d1
JJ
3055 store_immediate_info *info2 = m_store_info[j];
3056 if (info2->bitpos >= this_end)
3057 break;
3058 if (info2->order < try_order)
6cd4c66e 3059 {
4119cd69
JJ
3060 if (info2->rhs_code != INTEGER_CST
3061 || info2->lp_nr != merged_store->lp_nr)
18e0c3d1
JJ
3062 {
3063 /* Normally check_no_overlap makes sure this
3064 doesn't happen, but if end grows below,
3065 then we need to process more stores than
3066 check_no_overlap verified. Example:
3067 MEM[(int *)p_5] = 0;
3068 MEM[(short *)p_5 + 3B] = 1;
3069 MEM[(char *)p_5 + 4B] = _9;
3070 MEM[(char *)p_5 + 2B] = 2; */
3071 k = 0;
3072 break;
3073 }
3074 k = j;
bd909071 3075 min_order = MIN (min_order, info2->order);
18e0c3d1
JJ
3076 this_end = MAX (this_end,
3077 info2->bitpos + info2->bitsize);
6cd4c66e 3078 }
18e0c3d1 3079 else if (info2->rhs_code == INTEGER_CST
4119cd69 3080 && info2->lp_nr == merged_store->lp_nr
18e0c3d1
JJ
3081 && !last_iter)
3082 {
3083 max_order = MAX (max_order, info2->order + 1);
3084 first_nonmergeable_int_order
3085 = MIN (first_nonmergeable_int_order,
3086 info2->order);
3087 }
3088 else
3089 first_nonmergeable_order
3090 = MIN (first_nonmergeable_order, info2->order);
6cd4c66e 3091 }
bd909071
JJ
3092 if (k > i
3093 && !check_no_overlap (m_store_info, len - 1, true,
3094 min_order, try_order,
3095 merged_store->start, this_end,
3096 first_earlier, end_earlier))
3097 k = 0;
18e0c3d1
JJ
3098 if (k == 0)
3099 {
3100 if (last_order == try_order)
3101 break;
3102 /* If this failed, but only because we grew
3103 try_order, retry with the last working one,
3104 so that we merge at least something. */
3105 try_order = last_order;
3106 last_iter = true;
3107 continue;
3108 }
3109 last_order = try_order;
3110 /* Retry with a larger try_order to see if we could
3111 merge some further INTEGER_CST stores. */
3112 if (max_order
3113 && (first_nonmergeable_int_order
3114 < first_nonmergeable_order))
3115 {
3116 try_order = MIN (max_order,
3117 first_nonmergeable_order);
3118 try_order
3119 = MIN (try_order,
3120 merged_store->first_nonmergeable_order);
3121 if (try_order > last_order && ++attempts < 16)
3122 continue;
3123 }
3124 first_nonmergeable_order
3125 = MIN (first_nonmergeable_order,
3126 first_nonmergeable_int_order);
3127 end = this_end;
3128 break;
6cd4c66e 3129 }
18e0c3d1 3130 while (1);
6cd4c66e
JJ
3131
3132 if (k != 0)
3133 {
3134 merged_store->merge_overlapping (info);
3135
18e0c3d1
JJ
3136 merged_store->first_nonmergeable_order
3137 = MIN (merged_store->first_nonmergeable_order,
3138 first_nonmergeable_order);
3139
6cd4c66e
JJ
3140 for (unsigned int j = i + 1; j <= k; j++)
3141 {
3142 store_immediate_info *info2 = m_store_info[j];
3143 gcc_assert (info2->bitpos < end);
3144 if (info2->order < last_order)
3145 {
3146 gcc_assert (info2->rhs_code == INTEGER_CST);
18e0c3d1
JJ
3147 if (info != info2)
3148 merged_store->merge_overlapping (info2);
6cd4c66e
JJ
3149 }
3150 /* Other stores are kept and not merged in any
3151 way. */
3152 }
3153 ignore = k;
3154 goto done;
3155 }
3156 }
245f6de1 3157 }
f663d9ad 3158 }
245f6de1
JJ
3159 /* |---store 1---||---store 2---|
3160 This store is consecutive to the previous one.
3161 Merge it into the current store group. There can be gaps in between
3162 the stores, but there can't be gaps in between bitregions. */
c94c3532 3163 else if (info->bitregion_start <= merged_store->bitregion_end
7f5a3982 3164 && merged_store->can_be_merged_into (info))
f663d9ad 3165 {
245f6de1
JJ
3166 store_immediate_info *infof = merged_store->stores[0];
3167
3168 /* All the rhs_code ops that take 2 operands are commutative,
3169 swap the operands if it could make the operands compatible. */
3170 if (infof->ops[0].base_addr
3171 && infof->ops[1].base_addr
3172 && info->ops[0].base_addr
3173 && info->ops[1].base_addr
8a91d545
RS
3174 && known_eq (info->ops[1].bitpos - infof->ops[0].bitpos,
3175 info->bitpos - infof->bitpos)
245f6de1
JJ
3176 && operand_equal_p (info->ops[1].base_addr,
3177 infof->ops[0].base_addr, 0))
127ef369
JJ
3178 {
3179 std::swap (info->ops[0], info->ops[1]);
3180 info->ops_swapped_p = true;
3181 }
4d213bf6 3182 if (check_no_overlap (m_store_info, i, false,
bd909071 3183 MIN (merged_store->first_order, info->order),
a7fe6482 3184 MAX (merged_store->last_order, info->order),
bd909071 3185 merged_store->start,
a7fe6482 3186 MAX (merged_store->start + merged_store->width,
bd909071
JJ
3187 info->bitpos + info->bitsize),
3188 first_earlier, end_earlier))
245f6de1 3189 {
7f5a3982
EB
3190 /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
3191 if (info->rhs_code == MEM_REF && infof->rhs_code != MEM_REF)
3192 {
3193 info->rhs_code = BIT_INSERT_EXPR;
3194 info->ops[0].val = gimple_assign_rhs1 (info->stmt);
3195 info->ops[0].base_addr = NULL_TREE;
3196 }
3197 else if (infof->rhs_code == MEM_REF && info->rhs_code != MEM_REF)
3198 {
3f207ab3 3199 for (store_immediate_info *infoj : merged_store->stores)
7f5a3982
EB
3200 {
3201 infoj->rhs_code = BIT_INSERT_EXPR;
3202 infoj->ops[0].val = gimple_assign_rhs1 (infoj->stmt);
3203 infoj->ops[0].base_addr = NULL_TREE;
3204 }
e362a897 3205 merged_store->bit_insertion = true;
7f5a3982
EB
3206 }
3207 if ((infof->ops[0].base_addr
3208 ? compatible_load_p (merged_store, info, base_addr, 0)
3209 : !info->ops[0].base_addr)
3210 && (infof->ops[1].base_addr
3211 ? compatible_load_p (merged_store, info, base_addr, 1)
3212 : !info->ops[1].base_addr))
3213 {
3214 merged_store->merge_into (info);
3215 goto done;
3216 }
245f6de1
JJ
3217 }
3218 }
f663d9ad 3219
245f6de1
JJ
3220 /* |---store 1---| <gap> |---store 2---|.
3221 Gap between stores or the rhs not compatible. Start a new group. */
f663d9ad 3222
245f6de1
JJ
3223 /* Try to apply all the stores recorded for the group to determine
3224 the bitpattern they write and discard it if that fails.
3225 This will also reject single-store groups. */
c94c3532 3226 if (merged_store->apply_stores ())
245f6de1 3227 m_merged_store_groups.safe_push (merged_store);
c94c3532
EB
3228 else
3229 delete merged_store;
f663d9ad 3230
245f6de1 3231 merged_store = new merged_store_group (info);
bd909071 3232 end_earlier = i;
c94c3532
EB
3233 if (dump_file && (dump_flags & TDF_DETAILS))
3234 fputs ("New store group\n", dump_file);
3235
3236 done:
3237 if (dump_file && (dump_flags & TDF_DETAILS))
3238 {
3239 fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
3240 " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:",
3241 i, info->bitsize, info->bitpos);
3242 print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt));
3243 fputc ('\n', dump_file);
3244 }
f663d9ad
KT
3245 }
3246
a62b3dc5 3247 /* Record or discard the last store group. */
4b84d9b8
JJ
3248 if (merged_store)
3249 {
c94c3532 3250 if (merged_store->apply_stores ())
4b84d9b8 3251 m_merged_store_groups.safe_push (merged_store);
c94c3532
EB
3252 else
3253 delete merged_store;
4b84d9b8 3254 }
f663d9ad
KT
3255
3256 gcc_assert (m_merged_store_groups.length () <= m_store_info.length ());
c94c3532 3257
f663d9ad
KT
3258 bool success
3259 = !m_merged_store_groups.is_empty ()
3260 && m_merged_store_groups.length () < m_store_info.length ();
3261
3262 if (success && dump_file)
c94c3532 3263 fprintf (dump_file, "Coalescing successful!\nMerged into %u stores\n",
a62b3dc5 3264 m_merged_store_groups.length ());
f663d9ad
KT
3265
3266 return success;
3267}
3268
245f6de1
JJ
3269/* Return the type to use for the merged stores or loads described by STMTS.
3270 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
3271 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
3272 of the MEM_REFs if any. */
f663d9ad
KT
3273
3274static tree
245f6de1
JJ
3275get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load,
3276 unsigned short *cliquep, unsigned short *basep)
f663d9ad
KT
3277{
3278 gimple *stmt;
3279 unsigned int i;
245f6de1
JJ
3280 tree type = NULL_TREE;
3281 tree ret = NULL_TREE;
3282 *cliquep = 0;
3283 *basep = 0;
f663d9ad
KT
3284
3285 FOR_EACH_VEC_ELT (stmts, i, stmt)
3286 {
245f6de1
JJ
3287 tree ref = is_load ? gimple_assign_rhs1 (stmt)
3288 : gimple_assign_lhs (stmt);
3289 tree type1 = reference_alias_ptr_type (ref);
3290 tree base = get_base_address (ref);
f663d9ad 3291
245f6de1
JJ
3292 if (i == 0)
3293 {
3294 if (TREE_CODE (base) == MEM_REF)
3295 {
3296 *cliquep = MR_DEPENDENCE_CLIQUE (base);
3297 *basep = MR_DEPENDENCE_BASE (base);
3298 }
3299 ret = type = type1;
3300 continue;
3301 }
f663d9ad 3302 if (!alias_ptr_types_compatible_p (type, type1))
245f6de1
JJ
3303 ret = ptr_type_node;
3304 if (TREE_CODE (base) != MEM_REF
3305 || *cliquep != MR_DEPENDENCE_CLIQUE (base)
3306 || *basep != MR_DEPENDENCE_BASE (base))
3307 {
3308 *cliquep = 0;
3309 *basep = 0;
3310 }
f663d9ad 3311 }
245f6de1 3312 return ret;
f663d9ad
KT
3313}
3314
3315/* Return the location_t information we can find among the statements
3316 in STMTS. */
3317
3318static location_t
245f6de1 3319get_location_for_stmts (vec<gimple *> &stmts)
f663d9ad 3320{
3f207ab3 3321 for (gimple *stmt : stmts)
f663d9ad
KT
3322 if (gimple_has_location (stmt))
3323 return gimple_location (stmt);
3324
3325 return UNKNOWN_LOCATION;
3326}
3327
3328/* Used to decribe a store resulting from splitting a wide store in smaller
3329 regularly-sized stores in split_group. */
3330
6c1dae73 3331class split_store
f663d9ad 3332{
6c1dae73 3333public:
f663d9ad
KT
3334 unsigned HOST_WIDE_INT bytepos;
3335 unsigned HOST_WIDE_INT size;
3336 unsigned HOST_WIDE_INT align;
245f6de1 3337 auto_vec<store_immediate_info *> orig_stores;
a62b3dc5
JJ
3338 /* True if there is a single orig stmt covering the whole split store. */
3339 bool orig;
f663d9ad
KT
3340 split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
3341 unsigned HOST_WIDE_INT);
3342};
3343
3344/* Simple constructor. */
3345
3346split_store::split_store (unsigned HOST_WIDE_INT bp,
3347 unsigned HOST_WIDE_INT sz,
3348 unsigned HOST_WIDE_INT al)
a62b3dc5 3349 : bytepos (bp), size (sz), align (al), orig (false)
f663d9ad 3350{
245f6de1 3351 orig_stores.create (0);
f663d9ad
KT
3352}
3353
245f6de1
JJ
3354/* Record all stores in GROUP that write to the region starting at BITPOS and
3355 is of size BITSIZE. Record infos for such statements in STORES if
3356 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
5384a802
JJ
3357 if there is exactly one original store in the range (in that case ignore
3358 clobber stmts, unless there are only clobber stmts). */
f663d9ad 3359
a62b3dc5 3360static store_immediate_info *
99b1c316 3361find_constituent_stores (class merged_store_group *group,
245f6de1
JJ
3362 vec<store_immediate_info *> *stores,
3363 unsigned int *first,
3364 unsigned HOST_WIDE_INT bitpos,
3365 unsigned HOST_WIDE_INT bitsize)
f663d9ad 3366{
a62b3dc5 3367 store_immediate_info *info, *ret = NULL;
f663d9ad 3368 unsigned int i;
a62b3dc5
JJ
3369 bool second = false;
3370 bool update_first = true;
f663d9ad 3371 unsigned HOST_WIDE_INT end = bitpos + bitsize;
a62b3dc5 3372 for (i = *first; group->stores.iterate (i, &info); ++i)
f663d9ad
KT
3373 {
3374 unsigned HOST_WIDE_INT stmt_start = info->bitpos;
3375 unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize;
a62b3dc5
JJ
3376 if (stmt_end <= bitpos)
3377 {
3378 /* BITPOS passed to this function never decreases from within the
3379 same split_group call, so optimize and don't scan info records
3380 which are known to end before or at BITPOS next time.
3381 Only do it if all stores before this one also pass this. */
3382 if (update_first)
3383 *first = i + 1;
3384 continue;
3385 }
3386 else
3387 update_first = false;
3388
f663d9ad 3389 /* The stores in GROUP are ordered by bitposition so if we're past
a62b3dc5
JJ
3390 the region for this group return early. */
3391 if (stmt_start >= end)
3392 return ret;
3393
5384a802
JJ
3394 if (gimple_clobber_p (info->stmt))
3395 {
3396 if (stores)
3397 stores->safe_push (info);
3398 if (ret == NULL)
3399 ret = info;
3400 continue;
3401 }
245f6de1 3402 if (stores)
a62b3dc5 3403 {
245f6de1 3404 stores->safe_push (info);
5384a802 3405 if (ret && !gimple_clobber_p (ret->stmt))
a62b3dc5
JJ
3406 {
3407 ret = NULL;
3408 second = true;
3409 }
3410 }
5384a802 3411 else if (ret && !gimple_clobber_p (ret->stmt))
a62b3dc5
JJ
3412 return NULL;
3413 if (!second)
3414 ret = info;
f663d9ad 3415 }
a62b3dc5 3416 return ret;
f663d9ad
KT
3417}
3418
d7a9512e
JJ
3419/* Return how many SSA_NAMEs used to compute value to store in the INFO
3420 store have multiple uses. If any SSA_NAME has multiple uses, also
3421 count statements needed to compute it. */
3422
3423static unsigned
3424count_multiple_uses (store_immediate_info *info)
3425{
3426 gimple *stmt = info->stmt;
3427 unsigned ret = 0;
3428 switch (info->rhs_code)
3429 {
3430 case INTEGER_CST:
e362a897 3431 case STRING_CST:
d7a9512e
JJ
3432 return 0;
3433 case BIT_AND_EXPR:
3434 case BIT_IOR_EXPR:
3435 case BIT_XOR_EXPR:
d60edaba
JJ
3436 if (info->bit_not_p)
3437 {
3438 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3439 ret = 1; /* Fall through below to return
3440 the BIT_NOT_EXPR stmt and then
3441 BIT_{AND,IOR,XOR}_EXPR and anything it
3442 uses. */
3443 else
3444 /* stmt is after this the BIT_NOT_EXPR. */
3445 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3446 }
d7a9512e
JJ
3447 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3448 {
3449 ret += 1 + info->ops[0].bit_not_p;
3450 if (info->ops[1].base_addr)
3451 ret += 1 + info->ops[1].bit_not_p;
3452 return ret + 1;
3453 }
3454 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3455 /* stmt is now the BIT_*_EXPR. */
3456 if (!has_single_use (gimple_assign_rhs1 (stmt)))
127ef369
JJ
3457 ret += 1 + info->ops[info->ops_swapped_p].bit_not_p;
3458 else if (info->ops[info->ops_swapped_p].bit_not_p)
d7a9512e
JJ
3459 {
3460 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3461 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3462 ++ret;
3463 }
3464 if (info->ops[1].base_addr == NULL_TREE)
127ef369
JJ
3465 {
3466 gcc_checking_assert (!info->ops_swapped_p);
3467 return ret;
3468 }
d7a9512e 3469 if (!has_single_use (gimple_assign_rhs2 (stmt)))
127ef369
JJ
3470 ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p;
3471 else if (info->ops[1 - info->ops_swapped_p].bit_not_p)
d7a9512e
JJ
3472 {
3473 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
3474 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3475 ++ret;
3476 }
3477 return ret;
3478 case MEM_REF:
3479 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3480 return 1 + info->ops[0].bit_not_p;
3481 else if (info->ops[0].bit_not_p)
3482 {
3483 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3484 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3485 return 1;
3486 }
3487 return 0;
c94c3532
EB
3488 case BIT_INSERT_EXPR:
3489 return has_single_use (gimple_assign_rhs1 (stmt)) ? 0 : 1;
d7a9512e
JJ
3490 default:
3491 gcc_unreachable ();
3492 }
3493}
3494
f663d9ad 3495/* Split a merged store described by GROUP by populating the SPLIT_STORES
a62b3dc5
JJ
3496 vector (if non-NULL) with split_store structs describing the byte offset
3497 (from the base), the bit size and alignment of each store as well as the
3498 original statements involved in each such split group.
f663d9ad
KT
3499 This is to separate the splitting strategy from the statement
3500 building/emission/linking done in output_merged_store.
a62b3dc5 3501 Return number of new stores.
245f6de1
JJ
3502 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
3503 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
3afd514b
JJ
3504 BZERO_FIRST may be true only when the first store covers the whole group
3505 and clears it; if BZERO_FIRST is true, keep that first store in the set
3506 unmodified and emit further stores for the overrides only.
a62b3dc5
JJ
3507 If SPLIT_STORES is NULL, it is just a dry run to count number of
3508 new stores. */
f663d9ad 3509
a62b3dc5 3510static unsigned int
245f6de1 3511split_group (merged_store_group *group, bool allow_unaligned_store,
3afd514b 3512 bool allow_unaligned_load, bool bzero_first,
99b1c316 3513 vec<split_store *> *split_stores,
d7a9512e
JJ
3514 unsigned *total_orig,
3515 unsigned *total_new)
f663d9ad 3516{
a62b3dc5
JJ
3517 unsigned HOST_WIDE_INT pos = group->bitregion_start;
3518 unsigned HOST_WIDE_INT size = group->bitregion_end - pos;
f663d9ad 3519 unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT;
a62b3dc5
JJ
3520 unsigned HOST_WIDE_INT group_align = group->align;
3521 unsigned HOST_WIDE_INT align_base = group->align_base;
245f6de1 3522 unsigned HOST_WIDE_INT group_load_align = group_align;
d7a9512e 3523 bool any_orig = false;
f663d9ad 3524
f663d9ad
KT
3525 gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0));
3526
e362a897
EB
3527 /* For bswap framework using sets of stores, all the checking has been done
3528 earlier in try_coalesce_bswap and the result always needs to be emitted
3529 as a single store. Likewise for string concatenation, */
4b84d9b8 3530 if (group->stores[0]->rhs_code == LROTATE_EXPR
e362a897
EB
3531 || group->stores[0]->rhs_code == NOP_EXPR
3532 || group->string_concatenation)
4b84d9b8 3533 {
3afd514b 3534 gcc_assert (!bzero_first);
4b84d9b8
JJ
3535 if (total_orig)
3536 {
3537 /* Avoid the old/new stmt count heuristics. It should be
3538 always beneficial. */
3539 total_new[0] = 1;
3540 total_orig[0] = 2;
3541 }
3542
3543 if (split_stores)
3544 {
3545 unsigned HOST_WIDE_INT align_bitpos
3546 = (group->start - align_base) & (group_align - 1);
3547 unsigned HOST_WIDE_INT align = group_align;
3548 if (align_bitpos)
3549 align = least_bit_hwi (align_bitpos);
3550 bytepos = group->start / BITS_PER_UNIT;
99b1c316 3551 split_store *store
4b84d9b8
JJ
3552 = new split_store (bytepos, group->width, align);
3553 unsigned int first = 0;
3554 find_constituent_stores (group, &store->orig_stores,
3555 &first, group->start, group->width);
3556 split_stores->safe_push (store);
3557 }
3558
3559 return 1;
3560 }
3561
a62b3dc5 3562 unsigned int ret = 0, first = 0;
f663d9ad 3563 unsigned HOST_WIDE_INT try_pos = bytepos;
f663d9ad 3564
d7a9512e
JJ
3565 if (total_orig)
3566 {
3567 unsigned int i;
3568 store_immediate_info *info = group->stores[0];
3569
3570 total_new[0] = 0;
3571 total_orig[0] = 1; /* The orig store. */
3572 info = group->stores[0];
3573 if (info->ops[0].base_addr)
a6fbd154 3574 total_orig[0]++;
d7a9512e 3575 if (info->ops[1].base_addr)
a6fbd154 3576 total_orig[0]++;
d7a9512e
JJ
3577 switch (info->rhs_code)
3578 {
3579 case BIT_AND_EXPR:
3580 case BIT_IOR_EXPR:
3581 case BIT_XOR_EXPR:
3582 total_orig[0]++; /* The orig BIT_*_EXPR stmt. */
3583 break;
3584 default:
3585 break;
3586 }
3587 total_orig[0] *= group->stores.length ();
3588
3589 FOR_EACH_VEC_ELT (group->stores, i, info)
a6fbd154
JJ
3590 {
3591 total_new[0] += count_multiple_uses (info);
3592 total_orig[0] += (info->bit_not_p
3593 + info->ops[0].bit_not_p
3594 + info->ops[1].bit_not_p);
3595 }
d7a9512e
JJ
3596 }
3597
245f6de1
JJ
3598 if (!allow_unaligned_load)
3599 for (int i = 0; i < 2; ++i)
3600 if (group->load_align[i])
3601 group_load_align = MIN (group_load_align, group->load_align[i]);
3602
3afd514b
JJ
3603 if (bzero_first)
3604 {
5384a802
JJ
3605 store_immediate_info *gstore;
3606 FOR_EACH_VEC_ELT (group->stores, first, gstore)
3607 if (!gimple_clobber_p (gstore->stmt))
3608 break;
3609 ++first;
3afd514b
JJ
3610 ret = 1;
3611 if (split_stores)
3612 {
99b1c316 3613 split_store *store
5384a802
JJ
3614 = new split_store (bytepos, gstore->bitsize, align_base);
3615 store->orig_stores.safe_push (gstore);
3afd514b
JJ
3616 store->orig = true;
3617 any_orig = true;
3618 split_stores->safe_push (store);
3619 }
3620 }
3621
f663d9ad
KT
3622 while (size > 0)
3623 {
245f6de1 3624 if ((allow_unaligned_store || group_align <= BITS_PER_UNIT)
3afd514b
JJ
3625 && (group->mask[try_pos - bytepos] == (unsigned char) ~0U
3626 || (bzero_first && group->val[try_pos - bytepos] == 0)))
a62b3dc5
JJ
3627 {
3628 /* Skip padding bytes. */
3629 ++try_pos;
3630 size -= BITS_PER_UNIT;
3631 continue;
3632 }
3633
f663d9ad 3634 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
a62b3dc5
JJ
3635 unsigned int try_size = MAX_STORE_BITSIZE, nonmasked;
3636 unsigned HOST_WIDE_INT align_bitpos
3637 = (try_bitpos - align_base) & (group_align - 1);
3638 unsigned HOST_WIDE_INT align = group_align;
5384a802 3639 bool found_orig = false;
a62b3dc5
JJ
3640 if (align_bitpos)
3641 align = least_bit_hwi (align_bitpos);
245f6de1 3642 if (!allow_unaligned_store)
a62b3dc5 3643 try_size = MIN (try_size, align);
245f6de1
JJ
3644 if (!allow_unaligned_load)
3645 {
3646 /* If we can't do or don't want to do unaligned stores
3647 as well as loads, we need to take the loads into account
3648 as well. */
3649 unsigned HOST_WIDE_INT load_align = group_load_align;
3650 align_bitpos = (try_bitpos - align_base) & (load_align - 1);
3651 if (align_bitpos)
3652 load_align = least_bit_hwi (align_bitpos);
3653 for (int i = 0; i < 2; ++i)
3654 if (group->load_align[i])
3655 {
8a91d545
RS
3656 align_bitpos
3657 = known_alignment (try_bitpos
3658 - group->stores[0]->bitpos
3659 + group->stores[0]->ops[i].bitpos
3660 - group->load_align_base[i]);
3661 if (align_bitpos & (group_load_align - 1))
245f6de1
JJ
3662 {
3663 unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos);
3664 load_align = MIN (load_align, a);
3665 }
3666 }
3667 try_size = MIN (try_size, load_align);
3668 }
a62b3dc5 3669 store_immediate_info *info
245f6de1 3670 = find_constituent_stores (group, NULL, &first, try_bitpos, try_size);
5384a802 3671 if (info && !gimple_clobber_p (info->stmt))
a62b3dc5
JJ
3672 {
3673 /* If there is just one original statement for the range, see if
3674 we can just reuse the original store which could be even larger
3675 than try_size. */
3676 unsigned HOST_WIDE_INT stmt_end
3677 = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT);
245f6de1
JJ
3678 info = find_constituent_stores (group, NULL, &first, try_bitpos,
3679 stmt_end - try_bitpos);
a62b3dc5
JJ
3680 if (info && info->bitpos >= try_bitpos)
3681 {
5384a802
JJ
3682 store_immediate_info *info2 = NULL;
3683 unsigned int first_copy = first;
3684 if (info->bitpos > try_bitpos
3685 && stmt_end - try_bitpos <= try_size)
3686 {
3687 info2 = find_constituent_stores (group, NULL, &first_copy,
3688 try_bitpos,
3689 info->bitpos - try_bitpos);
3690 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3691 }
3692 if (info2 == NULL && stmt_end - try_bitpos < try_size)
3693 {
3694 info2 = find_constituent_stores (group, NULL, &first_copy,
3695 stmt_end,
3696 (try_bitpos + try_size)
3697 - stmt_end);
3698 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3699 }
3700 if (info2 == NULL)
3701 {
3702 try_size = stmt_end - try_bitpos;
3703 found_orig = true;
3704 goto found;
3705 }
a62b3dc5
JJ
3706 }
3707 }
f663d9ad 3708
a62b3dc5
JJ
3709 /* Approximate store bitsize for the case when there are no padding
3710 bits. */
3711 while (try_size > size)
3712 try_size /= 2;
3713 /* Now look for whole padding bytes at the end of that bitsize. */
3714 for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked)
3715 if (group->mask[try_pos - bytepos + nonmasked - 1]
3afd514b
JJ
3716 != (unsigned char) ~0U
3717 && (!bzero_first
3718 || group->val[try_pos - bytepos + nonmasked - 1] != 0))
a62b3dc5 3719 break;
5384a802 3720 if (nonmasked == 0 || (info && gimple_clobber_p (info->stmt)))
a62b3dc5
JJ
3721 {
3722 /* If entire try_size range is padding, skip it. */
3723 try_pos += try_size / BITS_PER_UNIT;
3724 size -= try_size;
3725 continue;
3726 }
3727 /* Otherwise try to decrease try_size if second half, last 3 quarters
3728 etc. are padding. */
3729 nonmasked *= BITS_PER_UNIT;
3730 while (nonmasked <= try_size / 2)
3731 try_size /= 2;
245f6de1 3732 if (!allow_unaligned_store && group_align > BITS_PER_UNIT)
a62b3dc5
JJ
3733 {
3734 /* Now look for whole padding bytes at the start of that bitsize. */
3735 unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked;
3736 for (masked = 0; masked < try_bytesize; ++masked)
3afd514b
JJ
3737 if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U
3738 && (!bzero_first
3739 || group->val[try_pos - bytepos + masked] != 0))
a62b3dc5
JJ
3740 break;
3741 masked *= BITS_PER_UNIT;
3742 gcc_assert (masked < try_size);
3743 if (masked >= try_size / 2)
3744 {
3745 while (masked >= try_size / 2)
3746 {
3747 try_size /= 2;
3748 try_pos += try_size / BITS_PER_UNIT;
3749 size -= try_size;
3750 masked -= try_size;
3751 }
3752 /* Need to recompute the alignment, so just retry at the new
3753 position. */
3754 continue;
3755 }
3756 }
3757
3758 found:
3759 ++ret;
f663d9ad 3760
a62b3dc5
JJ
3761 if (split_stores)
3762 {
99b1c316 3763 split_store *store
a62b3dc5 3764 = new split_store (try_pos, try_size, align);
245f6de1
JJ
3765 info = find_constituent_stores (group, &store->orig_stores,
3766 &first, try_bitpos, try_size);
a62b3dc5 3767 if (info
5384a802 3768 && !gimple_clobber_p (info->stmt)
a62b3dc5 3769 && info->bitpos >= try_bitpos
5384a802
JJ
3770 && info->bitpos + info->bitsize <= try_bitpos + try_size
3771 && (store->orig_stores.length () == 1
3772 || found_orig
3773 || (info->bitpos == try_bitpos
3774 && (info->bitpos + info->bitsize
3775 == try_bitpos + try_size))))
d7a9512e
JJ
3776 {
3777 store->orig = true;
3778 any_orig = true;
3779 }
a62b3dc5
JJ
3780 split_stores->safe_push (store);
3781 }
3782
3783 try_pos += try_size / BITS_PER_UNIT;
f663d9ad 3784 size -= try_size;
f663d9ad 3785 }
a62b3dc5 3786
d7a9512e
JJ
3787 if (total_orig)
3788 {
a6fbd154 3789 unsigned int i;
99b1c316 3790 split_store *store;
d7a9512e
JJ
3791 /* If we are reusing some original stores and any of the
3792 original SSA_NAMEs had multiple uses, we need to subtract
3793 those now before we add the new ones. */
3794 if (total_new[0] && any_orig)
3795 {
d7a9512e
JJ
3796 FOR_EACH_VEC_ELT (*split_stores, i, store)
3797 if (store->orig)
3798 total_new[0] -= count_multiple_uses (store->orig_stores[0]);
3799 }
3800 total_new[0] += ret; /* The new store. */
3801 store_immediate_info *info = group->stores[0];
3802 if (info->ops[0].base_addr)
a6fbd154 3803 total_new[0] += ret;
d7a9512e 3804 if (info->ops[1].base_addr)
a6fbd154 3805 total_new[0] += ret;
d7a9512e
JJ
3806 switch (info->rhs_code)
3807 {
3808 case BIT_AND_EXPR:
3809 case BIT_IOR_EXPR:
3810 case BIT_XOR_EXPR:
3811 total_new[0] += ret; /* The new BIT_*_EXPR stmt. */
3812 break;
3813 default:
3814 break;
3815 }
a6fbd154
JJ
3816 FOR_EACH_VEC_ELT (*split_stores, i, store)
3817 {
3818 unsigned int j;
3819 bool bit_not_p[3] = { false, false, false };
3820 /* If all orig_stores have certain bit_not_p set, then
3821 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3822 If some orig_stores have certain bit_not_p set, then
3823 we'd use a BIT_XOR_EXPR with a mask and need to account for
3824 it. */
3825 FOR_EACH_VEC_ELT (store->orig_stores, j, info)
3826 {
3827 if (info->ops[0].bit_not_p)
3828 bit_not_p[0] = true;
3829 if (info->ops[1].bit_not_p)
3830 bit_not_p[1] = true;
3831 if (info->bit_not_p)
3832 bit_not_p[2] = true;
3833 }
3834 total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2];
3835 }
3836
d7a9512e
JJ
3837 }
3838
a62b3dc5 3839 return ret;
f663d9ad
KT
3840}
3841
a6fbd154
JJ
3842/* Return the operation through which the operand IDX (if < 2) or
3843 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3844 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3845 the bits should be xored with mask. */
3846
3847static enum tree_code
3848invert_op (split_store *split_store, int idx, tree int_type, tree &mask)
3849{
3850 unsigned int i;
3851 store_immediate_info *info;
3852 unsigned int cnt = 0;
e215422f 3853 bool any_paddings = false;
a6fbd154
JJ
3854 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3855 {
3856 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3857 if (bit_not_p)
e215422f
JJ
3858 {
3859 ++cnt;
3860 tree lhs = gimple_assign_lhs (info->stmt);
3861 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3862 && TYPE_PRECISION (TREE_TYPE (lhs)) < info->bitsize)
3863 any_paddings = true;
3864 }
a6fbd154
JJ
3865 }
3866 mask = NULL_TREE;
3867 if (cnt == 0)
3868 return NOP_EXPR;
e215422f 3869 if (cnt == split_store->orig_stores.length () && !any_paddings)
a6fbd154
JJ
3870 return BIT_NOT_EXPR;
3871
3872 unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT;
3873 unsigned buf_size = split_store->size / BITS_PER_UNIT;
3874 unsigned char *buf
3875 = XALLOCAVEC (unsigned char, buf_size);
3876 memset (buf, ~0U, buf_size);
3877 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3878 {
3879 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3880 if (!bit_not_p)
3881 continue;
3882 /* Clear regions with bit_not_p and invert afterwards, rather than
3883 clear regions with !bit_not_p, so that gaps in between stores aren't
3884 set in the mask. */
3885 unsigned HOST_WIDE_INT bitsize = info->bitsize;
e215422f 3886 unsigned HOST_WIDE_INT prec = bitsize;
a6fbd154 3887 unsigned int pos_in_buffer = 0;
e215422f
JJ
3888 if (any_paddings)
3889 {
3890 tree lhs = gimple_assign_lhs (info->stmt);
3891 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3892 && TYPE_PRECISION (TREE_TYPE (lhs)) < bitsize)
3893 prec = TYPE_PRECISION (TREE_TYPE (lhs));
3894 }
a6fbd154
JJ
3895 if (info->bitpos < try_bitpos)
3896 {
3897 gcc_assert (info->bitpos + bitsize > try_bitpos);
e215422f
JJ
3898 if (!BYTES_BIG_ENDIAN)
3899 {
3900 if (prec <= try_bitpos - info->bitpos)
3901 continue;
3902 prec -= try_bitpos - info->bitpos;
3903 }
3904 bitsize -= try_bitpos - info->bitpos;
3905 if (BYTES_BIG_ENDIAN && prec > bitsize)
3906 prec = bitsize;
a6fbd154
JJ
3907 }
3908 else
3909 pos_in_buffer = info->bitpos - try_bitpos;
e215422f
JJ
3910 if (prec < bitsize)
3911 {
3912 /* If this is a bool inversion, invert just the least significant
3913 prec bits rather than all bits of it. */
3914 if (BYTES_BIG_ENDIAN)
3915 {
3916 pos_in_buffer += bitsize - prec;
3917 if (pos_in_buffer >= split_store->size)
3918 continue;
3919 }
3920 bitsize = prec;
3921 }
a6fbd154
JJ
3922 if (pos_in_buffer + bitsize > split_store->size)
3923 bitsize = split_store->size - pos_in_buffer;
3924 unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT);
3925 if (BYTES_BIG_ENDIAN)
3926 clear_bit_region_be (p, (BITS_PER_UNIT - 1
3927 - (pos_in_buffer % BITS_PER_UNIT)), bitsize);
3928 else
3929 clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize);
3930 }
3931 for (unsigned int i = 0; i < buf_size; ++i)
3932 buf[i] = ~buf[i];
3933 mask = native_interpret_expr (int_type, buf, buf_size);
3934 return BIT_XOR_EXPR;
3935}
3936
f663d9ad
KT
3937/* Given a merged store group GROUP output the widened version of it.
3938 The store chain is against the base object BASE.
3939 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3940 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3941 Make sure that the number of statements output is less than the number of
3942 original statements. If a better sequence is possible emit it and
3943 return true. */
3944
3945bool
b5926e23 3946imm_store_chain_info::output_merged_store (merged_store_group *group)
f663d9ad 3947{
e362a897 3948 const unsigned HOST_WIDE_INT start_byte_pos
a62b3dc5 3949 = group->bitregion_start / BITS_PER_UNIT;
f663d9ad
KT
3950 unsigned int orig_num_stmts = group->stores.length ();
3951 if (orig_num_stmts < 2)
3952 return false;
3953
245f6de1 3954 bool allow_unaligned_store
028d4092 3955 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
245f6de1 3956 bool allow_unaligned_load = allow_unaligned_store;
3afd514b 3957 bool bzero_first = false;
5384a802
JJ
3958 store_immediate_info *store;
3959 unsigned int num_clobber_stmts = 0;
3960 if (group->stores[0]->rhs_code == INTEGER_CST)
3961 {
e362a897 3962 unsigned int i;
5384a802
JJ
3963 FOR_EACH_VEC_ELT (group->stores, i, store)
3964 if (gimple_clobber_p (store->stmt))
3965 num_clobber_stmts++;
3966 else if (TREE_CODE (gimple_assign_rhs1 (store->stmt)) == CONSTRUCTOR
3967 && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (store->stmt)) == 0
3968 && group->start == store->bitpos
3969 && group->width == store->bitsize
3970 && (group->start % BITS_PER_UNIT) == 0
3971 && (group->width % BITS_PER_UNIT) == 0)
3972 {
3973 bzero_first = true;
3974 break;
3975 }
3976 else
3977 break;
3978 FOR_EACH_VEC_ELT_FROM (group->stores, i, store, i)
3979 if (gimple_clobber_p (store->stmt))
3980 num_clobber_stmts++;
3981 if (num_clobber_stmts == orig_num_stmts)
3982 return false;
3983 orig_num_stmts -= num_clobber_stmts;
3984 }
3afd514b 3985 if (allow_unaligned_store || bzero_first)
a62b3dc5
JJ
3986 {
3987 /* If unaligned stores are allowed, see how many stores we'd emit
3988 for unaligned and how many stores we'd emit for aligned stores.
3afd514b
JJ
3989 Only use unaligned stores if it allows fewer stores than aligned.
3990 Similarly, if there is a whole region clear first, prefer expanding
3991 it together compared to expanding clear first followed by merged
3992 further stores. */
21f65995 3993 unsigned cnt[4] = { ~0U, ~0U, ~0U, ~0U };
3afd514b
JJ
3994 int pass_min = 0;
3995 for (int pass = 0; pass < 4; ++pass)
3996 {
3997 if (!allow_unaligned_store && (pass & 1) != 0)
3998 continue;
3999 if (!bzero_first && (pass & 2) != 0)
4000 continue;
4001 cnt[pass] = split_group (group, (pass & 1) != 0,
4002 allow_unaligned_load, (pass & 2) != 0,
4003 NULL, NULL, NULL);
4004 if (cnt[pass] < cnt[pass_min])
4005 pass_min = pass;
4006 }
4007 if ((pass_min & 1) == 0)
245f6de1 4008 allow_unaligned_store = false;
3afd514b
JJ
4009 if ((pass_min & 2) == 0)
4010 bzero_first = false;
a62b3dc5 4011 }
e362a897
EB
4012
4013 auto_vec<class split_store *, 32> split_stores;
4014 split_store *split_store;
4015 unsigned total_orig, total_new, i;
3afd514b 4016 split_group (group, allow_unaligned_store, allow_unaligned_load, bzero_first,
d7a9512e 4017 &split_stores, &total_orig, &total_new);
a62b3dc5 4018
5384a802
JJ
4019 /* Determine if there is a clobber covering the whole group at the start,
4020 followed by proposed split stores that cover the whole group. In that
4021 case, prefer the transformation even if
4022 split_stores.length () == orig_num_stmts. */
4023 bool clobber_first = false;
4024 if (num_clobber_stmts
4025 && gimple_clobber_p (group->stores[0]->stmt)
4026 && group->start == group->stores[0]->bitpos
4027 && group->width == group->stores[0]->bitsize
4028 && (group->start % BITS_PER_UNIT) == 0
4029 && (group->width % BITS_PER_UNIT) == 0)
4030 {
4031 clobber_first = true;
4032 unsigned HOST_WIDE_INT pos = group->start / BITS_PER_UNIT;
4033 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4034 if (split_store->bytepos != pos)
4035 {
4036 clobber_first = false;
4037 break;
4038 }
4039 else
4040 pos += split_store->size / BITS_PER_UNIT;
4041 if (pos != (group->start + group->width) / BITS_PER_UNIT)
4042 clobber_first = false;
4043 }
4044
4045 if (split_stores.length () >= orig_num_stmts + clobber_first)
a62b3dc5 4046 {
5384a802 4047
a62b3dc5
JJ
4048 /* We didn't manage to reduce the number of statements. Bail out. */
4049 if (dump_file && (dump_flags & TDF_DETAILS))
d7a9512e
JJ
4050 fprintf (dump_file, "Exceeded original number of stmts (%u)."
4051 " Not profitable to emit new sequence.\n",
4052 orig_num_stmts);
dd172744
RB
4053 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4054 delete split_store;
a62b3dc5
JJ
4055 return false;
4056 }
d7a9512e
JJ
4057 if (total_orig <= total_new)
4058 {
4059 /* If number of estimated new statements is above estimated original
4060 statements, bail out too. */
4061 if (dump_file && (dump_flags & TDF_DETAILS))
4062 fprintf (dump_file, "Estimated number of original stmts (%u)"
4063 " not larger than estimated number of new"
4064 " stmts (%u).\n",
4065 total_orig, total_new);
dd172744
RB
4066 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4067 delete split_store;
4b84d9b8 4068 return false;
d7a9512e 4069 }
5384a802
JJ
4070 if (group->stores[0]->rhs_code == INTEGER_CST)
4071 {
4072 bool all_orig = true;
4073 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4074 if (!split_store->orig)
4075 {
4076 all_orig = false;
4077 break;
4078 }
4079 if (all_orig)
4080 {
4081 unsigned int cnt = split_stores.length ();
4082 store_immediate_info *store;
4083 FOR_EACH_VEC_ELT (group->stores, i, store)
4084 if (gimple_clobber_p (store->stmt))
4085 ++cnt;
4086 /* Punt if we wouldn't make any real changes, i.e. keep all
4087 orig stmts + all clobbers. */
4088 if (cnt == group->stores.length ())
4089 {
4090 if (dump_file && (dump_flags & TDF_DETAILS))
4091 fprintf (dump_file, "Exceeded original number of stmts (%u)."
4092 " Not profitable to emit new sequence.\n",
4093 orig_num_stmts);
4094 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4095 delete split_store;
4096 return false;
4097 }
4098 }
4099 }
f663d9ad
KT
4100
4101 gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt);
4102 gimple_seq seq = NULL;
f663d9ad
KT
4103 tree last_vdef, new_vuse;
4104 last_vdef = gimple_vdef (group->last_stmt);
4105 new_vuse = gimple_vuse (group->last_stmt);
4b84d9b8
JJ
4106 tree bswap_res = NULL_TREE;
4107
5384a802
JJ
4108 /* Clobbers are not removed. */
4109 if (gimple_clobber_p (group->last_stmt))
4110 {
4111 new_vuse = make_ssa_name (gimple_vop (cfun), group->last_stmt);
4112 gimple_set_vdef (group->last_stmt, new_vuse);
4113 }
4114
4b84d9b8
JJ
4115 if (group->stores[0]->rhs_code == LROTATE_EXPR
4116 || group->stores[0]->rhs_code == NOP_EXPR)
4117 {
4118 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
4119 gimple *ins_stmt = group->stores[0]->ins_stmt;
4120 struct symbolic_number *n = &group->stores[0]->n;
4121 bool bswap = group->stores[0]->rhs_code == LROTATE_EXPR;
4122
4123 switch (n->range)
4124 {
4125 case 16:
4126 load_type = bswap_type = uint16_type_node;
4127 break;
4128 case 32:
4129 load_type = uint32_type_node;
4130 if (bswap)
4131 {
4132 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
4133 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
4134 }
4135 break;
4136 case 64:
4137 load_type = uint64_type_node;
4138 if (bswap)
4139 {
4140 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
4141 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
4142 }
4143 break;
4144 default:
4145 gcc_unreachable ();
4146 }
4147
4148 /* If the loads have each vuse of the corresponding store,
4149 we've checked the aliasing already in try_coalesce_bswap and
4150 we want to sink the need load into seq. So need to use new_vuse
4151 on the load. */
30fa8e9c 4152 if (n->base_addr)
4b84d9b8 4153 {
30fa8e9c
JJ
4154 if (n->vuse == NULL)
4155 {
4156 n->vuse = new_vuse;
4157 ins_stmt = NULL;
4158 }
4159 else
4160 /* Update vuse in case it has changed by output_merged_stores. */
4161 n->vuse = gimple_vuse (ins_stmt);
4b84d9b8
JJ
4162 }
4163 bswap_res = bswap_replace (gsi_start (seq), ins_stmt, fndecl,
4164 bswap_type, load_type, n, bswap);
4165 gcc_assert (bswap_res);
4166 }
f663d9ad
KT
4167
4168 gimple *stmt = NULL;
245f6de1 4169 auto_vec<gimple *, 32> orig_stmts;
4b84d9b8
JJ
4170 gimple_seq this_seq;
4171 tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq,
aa55dc0c 4172 is_gimple_mem_ref_addr, NULL_TREE);
4b84d9b8 4173 gimple_seq_add_seq_without_update (&seq, this_seq);
245f6de1
JJ
4174
4175 tree load_addr[2] = { NULL_TREE, NULL_TREE };
4176 gimple_seq load_seq[2] = { NULL, NULL };
4177 gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () };
4178 for (int j = 0; j < 2; ++j)
4179 {
4180 store_operand_info &op = group->stores[0]->ops[j];
4181 if (op.base_addr == NULL_TREE)
4182 continue;
4183
4184 store_immediate_info *infol = group->stores.last ();
4185 if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt))
4186 {
97031af7
JJ
4187 /* We can't pick the location randomly; while we've verified
4188 all the loads have the same vuse, they can be still in different
4189 basic blocks and we need to pick the one from the last bb:
4190 int x = q[0];
4191 if (x == N) return;
4192 int y = q[1];
4193 p[0] = x;
4194 p[1] = y;
4195 otherwise if we put the wider load at the q[0] load, we might
4196 segfault if q[1] is not mapped. */
4197 basic_block bb = gimple_bb (op.stmt);
4198 gimple *ostmt = op.stmt;
4199 store_immediate_info *info;
4200 FOR_EACH_VEC_ELT (group->stores, i, info)
4201 {
4202 gimple *tstmt = info->ops[j].stmt;
4203 basic_block tbb = gimple_bb (tstmt);
4204 if (dominated_by_p (CDI_DOMINATORS, tbb, bb))
4205 {
4206 ostmt = tstmt;
4207 bb = tbb;
4208 }
4209 }
4210 load_gsi[j] = gsi_for_stmt (ostmt);
245f6de1
JJ
4211 load_addr[j]
4212 = force_gimple_operand_1 (unshare_expr (op.base_addr),
4213 &load_seq[j], is_gimple_mem_ref_addr,
4214 NULL_TREE);
4215 }
4216 else if (operand_equal_p (base_addr, op.base_addr, 0))
4217 load_addr[j] = addr;
4218 else
3e2927a1 4219 {
3e2927a1
JJ
4220 load_addr[j]
4221 = force_gimple_operand_1 (unshare_expr (op.base_addr),
4222 &this_seq, is_gimple_mem_ref_addr,
4223 NULL_TREE);
4224 gimple_seq_add_seq_without_update (&seq, this_seq);
4225 }
245f6de1
JJ
4226 }
4227
f663d9ad
KT
4228 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4229 {
e362a897
EB
4230 const unsigned HOST_WIDE_INT try_size = split_store->size;
4231 const unsigned HOST_WIDE_INT try_pos = split_store->bytepos;
4232 const unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
4233 const unsigned HOST_WIDE_INT try_align = split_store->align;
4234 const unsigned HOST_WIDE_INT try_offset = try_pos - start_byte_pos;
a62b3dc5
JJ
4235 tree dest, src;
4236 location_t loc;
e362a897 4237
a62b3dc5
JJ
4238 if (split_store->orig)
4239 {
5384a802
JJ
4240 /* If there is just a single non-clobber constituent store
4241 which covers the whole area, just reuse the lhs and rhs. */
4242 gimple *orig_stmt = NULL;
4243 store_immediate_info *store;
4244 unsigned int j;
4245 FOR_EACH_VEC_ELT (split_store->orig_stores, j, store)
4246 if (!gimple_clobber_p (store->stmt))
4247 {
4248 orig_stmt = store->stmt;
4249 break;
4250 }
245f6de1
JJ
4251 dest = gimple_assign_lhs (orig_stmt);
4252 src = gimple_assign_rhs1 (orig_stmt);
4253 loc = gimple_location (orig_stmt);
a62b3dc5
JJ
4254 }
4255 else
4256 {
245f6de1
JJ
4257 store_immediate_info *info;
4258 unsigned short clique, base;
4259 unsigned int k;
4260 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4261 orig_stmts.safe_push (info->stmt);
a62b3dc5 4262 tree offset_type
245f6de1 4263 = get_alias_type_for_stmts (orig_stmts, false, &clique, &base);
e362a897 4264 tree dest_type;
245f6de1
JJ
4265 loc = get_location_for_stmts (orig_stmts);
4266 orig_stmts.truncate (0);
a62b3dc5 4267
e362a897
EB
4268 if (group->string_concatenation)
4269 dest_type
4270 = build_array_type_nelts (char_type_node,
4271 try_size / BITS_PER_UNIT);
4272 else
4273 {
4274 dest_type = build_nonstandard_integer_type (try_size, UNSIGNED);
4275 dest_type = build_aligned_type (dest_type, try_align);
4276 }
4277 dest = fold_build2 (MEM_REF, dest_type, addr,
a62b3dc5 4278 build_int_cst (offset_type, try_pos));
245f6de1
JJ
4279 if (TREE_CODE (dest) == MEM_REF)
4280 {
4281 MR_DEPENDENCE_CLIQUE (dest) = clique;
4282 MR_DEPENDENCE_BASE (dest) = base;
4283 }
4284
c94c3532 4285 tree mask;
e362a897 4286 if (bswap_res || group->string_concatenation)
c94c3532
EB
4287 mask = integer_zero_node;
4288 else
e362a897
EB
4289 mask = native_interpret_expr (dest_type,
4290 group->mask + try_offset,
4b84d9b8 4291 group->buf_size);
245f6de1
JJ
4292
4293 tree ops[2];
4294 for (int j = 0;
4295 j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE);
4296 ++j)
4297 {
4298 store_operand_info &op = split_store->orig_stores[0]->ops[j];
4b84d9b8
JJ
4299 if (bswap_res)
4300 ops[j] = bswap_res;
e362a897
EB
4301 else if (group->string_concatenation)
4302 {
4303 ops[j] = build_string (try_size / BITS_PER_UNIT,
4304 (const char *) group->val + try_offset);
4305 TREE_TYPE (ops[j]) = dest_type;
4306 }
4b84d9b8 4307 else if (op.base_addr)
245f6de1
JJ
4308 {
4309 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4310 orig_stmts.safe_push (info->ops[j].stmt);
4311
4312 offset_type = get_alias_type_for_stmts (orig_stmts, true,
4313 &clique, &base);
4314 location_t load_loc = get_location_for_stmts (orig_stmts);
4315 orig_stmts.truncate (0);
4316
4317 unsigned HOST_WIDE_INT load_align = group->load_align[j];
4318 unsigned HOST_WIDE_INT align_bitpos
c94c3532 4319 = known_alignment (try_bitpos
8a91d545
RS
4320 - split_store->orig_stores[0]->bitpos
4321 + op.bitpos);
4322 if (align_bitpos & (load_align - 1))
245f6de1
JJ
4323 load_align = least_bit_hwi (align_bitpos);
4324
4325 tree load_int_type
4326 = build_nonstandard_integer_type (try_size, UNSIGNED);
4327 load_int_type
4328 = build_aligned_type (load_int_type, load_align);
4329
8a91d545 4330 poly_uint64 load_pos
c94c3532 4331 = exact_div (try_bitpos
8a91d545
RS
4332 - split_store->orig_stores[0]->bitpos
4333 + op.bitpos,
4334 BITS_PER_UNIT);
245f6de1
JJ
4335 ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j],
4336 build_int_cst (offset_type, load_pos));
4337 if (TREE_CODE (ops[j]) == MEM_REF)
4338 {
4339 MR_DEPENDENCE_CLIQUE (ops[j]) = clique;
4340 MR_DEPENDENCE_BASE (ops[j]) = base;
4341 }
4342 if (!integer_zerop (mask))
e9e2bad7
MS
4343 {
4344 /* The load might load some bits (that will be masked
4345 off later on) uninitialized, avoid -W*uninitialized
4346 warnings in that case. */
4347 suppress_warning (ops[j], OPT_Wuninitialized);
4348 }
245f6de1 4349
e362a897 4350 stmt = gimple_build_assign (make_ssa_name (dest_type), ops[j]);
245f6de1
JJ
4351 gimple_set_location (stmt, load_loc);
4352 if (gsi_bb (load_gsi[j]))
4353 {
4354 gimple_set_vuse (stmt, gimple_vuse (op.stmt));
4355 gimple_seq_add_stmt_without_update (&load_seq[j], stmt);
4356 }
4357 else
4358 {
4359 gimple_set_vuse (stmt, new_vuse);
4360 gimple_seq_add_stmt_without_update (&seq, stmt);
4361 }
4362 ops[j] = gimple_assign_lhs (stmt);
a6fbd154
JJ
4363 tree xor_mask;
4364 enum tree_code inv_op
e362a897 4365 = invert_op (split_store, j, dest_type, xor_mask);
a6fbd154 4366 if (inv_op != NOP_EXPR)
383ac8dc 4367 {
e362a897 4368 stmt = gimple_build_assign (make_ssa_name (dest_type),
a6fbd154 4369 inv_op, ops[j], xor_mask);
383ac8dc
JJ
4370 gimple_set_location (stmt, load_loc);
4371 ops[j] = gimple_assign_lhs (stmt);
4372
4373 if (gsi_bb (load_gsi[j]))
4374 gimple_seq_add_stmt_without_update (&load_seq[j],
4375 stmt);
4376 else
4377 gimple_seq_add_stmt_without_update (&seq, stmt);
4378 }
245f6de1
JJ
4379 }
4380 else
e362a897
EB
4381 ops[j] = native_interpret_expr (dest_type,
4382 group->val + try_offset,
245f6de1
JJ
4383 group->buf_size);
4384 }
4385
4386 switch (split_store->orig_stores[0]->rhs_code)
4387 {
4388 case BIT_AND_EXPR:
4389 case BIT_IOR_EXPR:
4390 case BIT_XOR_EXPR:
4391 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4392 {
4393 tree rhs1 = gimple_assign_rhs1 (info->stmt);
4394 orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1));
4395 }
4396 location_t bit_loc;
4397 bit_loc = get_location_for_stmts (orig_stmts);
4398 orig_stmts.truncate (0);
4399
4400 stmt
e362a897 4401 = gimple_build_assign (make_ssa_name (dest_type),
245f6de1
JJ
4402 split_store->orig_stores[0]->rhs_code,
4403 ops[0], ops[1]);
4404 gimple_set_location (stmt, bit_loc);
4405 /* If there is just one load and there is a separate
4406 load_seq[0], emit the bitwise op right after it. */
4407 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4408 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4409 /* Otherwise, if at least one load is in seq, we need to
4410 emit the bitwise op right before the store. If there
4411 are two loads and are emitted somewhere else, it would
4412 be better to emit the bitwise op as early as possible;
4413 we don't track where that would be possible right now
4414 though. */
4415 else
4416 gimple_seq_add_stmt_without_update (&seq, stmt);
4417 src = gimple_assign_lhs (stmt);
a6fbd154
JJ
4418 tree xor_mask;
4419 enum tree_code inv_op;
e362a897 4420 inv_op = invert_op (split_store, 2, dest_type, xor_mask);
a6fbd154 4421 if (inv_op != NOP_EXPR)
d60edaba 4422 {
e362a897 4423 stmt = gimple_build_assign (make_ssa_name (dest_type),
a6fbd154 4424 inv_op, src, xor_mask);
d60edaba
JJ
4425 gimple_set_location (stmt, bit_loc);
4426 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4427 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4428 else
4429 gimple_seq_add_stmt_without_update (&seq, stmt);
4430 src = gimple_assign_lhs (stmt);
4431 }
245f6de1 4432 break;
4b84d9b8
JJ
4433 case LROTATE_EXPR:
4434 case NOP_EXPR:
4435 src = ops[0];
4436 if (!is_gimple_val (src))
4437 {
4438 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (src)),
4439 src);
4440 gimple_seq_add_stmt_without_update (&seq, stmt);
4441 src = gimple_assign_lhs (stmt);
4442 }
e362a897 4443 if (!useless_type_conversion_p (dest_type, TREE_TYPE (src)))
4b84d9b8 4444 {
e362a897 4445 stmt = gimple_build_assign (make_ssa_name (dest_type),
4b84d9b8
JJ
4446 NOP_EXPR, src);
4447 gimple_seq_add_stmt_without_update (&seq, stmt);
4448 src = gimple_assign_lhs (stmt);
4449 }
e362a897 4450 inv_op = invert_op (split_store, 2, dest_type, xor_mask);
be52ac73
JJ
4451 if (inv_op != NOP_EXPR)
4452 {
e362a897 4453 stmt = gimple_build_assign (make_ssa_name (dest_type),
be52ac73
JJ
4454 inv_op, src, xor_mask);
4455 gimple_set_location (stmt, loc);
4456 gimple_seq_add_stmt_without_update (&seq, stmt);
4457 src = gimple_assign_lhs (stmt);
4458 }
4b84d9b8 4459 break;
245f6de1
JJ
4460 default:
4461 src = ops[0];
4462 break;
4463 }
4464
c94c3532
EB
4465 /* If bit insertion is required, we use the source as an accumulator
4466 into which the successive bit-field values are manually inserted.
4467 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
4468 if (group->bit_insertion)
4469 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4470 if (info->rhs_code == BIT_INSERT_EXPR
4471 && info->bitpos < try_bitpos + try_size
4472 && info->bitpos + info->bitsize > try_bitpos)
4473 {
4474 /* Mask, truncate, convert to final type, shift and ior into
4475 the accumulator. Note that every step can be a no-op. */
4476 const HOST_WIDE_INT start_gap = info->bitpos - try_bitpos;
4477 const HOST_WIDE_INT end_gap
4478 = (try_bitpos + try_size) - (info->bitpos + info->bitsize);
4479 tree tem = info->ops[0].val;
ed01d707
EB
4480 if (!INTEGRAL_TYPE_P (TREE_TYPE (tem)))
4481 {
4482 const unsigned HOST_WIDE_INT size
4483 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (tem)));
4484 tree integer_type
4485 = build_nonstandard_integer_type (size, UNSIGNED);
4486 tem = gimple_build (&seq, loc, VIEW_CONVERT_EXPR,
4487 integer_type, tem);
4488 }
c14add82
EB
4489 if (TYPE_PRECISION (TREE_TYPE (tem)) <= info->bitsize)
4490 {
4491 tree bitfield_type
4492 = build_nonstandard_integer_type (info->bitsize,
4493 UNSIGNED);
4494 tem = gimple_convert (&seq, loc, bitfield_type, tem);
4495 }
4496 else if ((BYTES_BIG_ENDIAN ? start_gap : end_gap) > 0)
c94c3532
EB
4497 {
4498 const unsigned HOST_WIDE_INT imask
4499 = (HOST_WIDE_INT_1U << info->bitsize) - 1;
4500 tem = gimple_build (&seq, loc,
4501 BIT_AND_EXPR, TREE_TYPE (tem), tem,
4502 build_int_cst (TREE_TYPE (tem),
4503 imask));
4504 }
4505 const HOST_WIDE_INT shift
4506 = (BYTES_BIG_ENDIAN ? end_gap : start_gap);
4507 if (shift < 0)
4508 tem = gimple_build (&seq, loc,
4509 RSHIFT_EXPR, TREE_TYPE (tem), tem,
4510 build_int_cst (NULL_TREE, -shift));
e362a897 4511 tem = gimple_convert (&seq, loc, dest_type, tem);
c94c3532
EB
4512 if (shift > 0)
4513 tem = gimple_build (&seq, loc,
e362a897 4514 LSHIFT_EXPR, dest_type, tem,
c94c3532
EB
4515 build_int_cst (NULL_TREE, shift));
4516 src = gimple_build (&seq, loc,
e362a897 4517 BIT_IOR_EXPR, dest_type, tem, src);
c94c3532
EB
4518 }
4519
a62b3dc5
JJ
4520 if (!integer_zerop (mask))
4521 {
e362a897 4522 tree tem = make_ssa_name (dest_type);
a62b3dc5
JJ
4523 tree load_src = unshare_expr (dest);
4524 /* The load might load some or all bits uninitialized,
4525 avoid -W*uninitialized warnings in that case.
4526 As optimization, it would be nice if all the bits are
4527 provably uninitialized (no stores at all yet or previous
4528 store a CLOBBER) we'd optimize away the load and replace
4529 it e.g. with 0. */
e9e2bad7 4530 suppress_warning (load_src, OPT_Wuninitialized);
a62b3dc5
JJ
4531 stmt = gimple_build_assign (tem, load_src);
4532 gimple_set_location (stmt, loc);
4533 gimple_set_vuse (stmt, new_vuse);
4534 gimple_seq_add_stmt_without_update (&seq, stmt);
4535
4536 /* FIXME: If there is a single chunk of zero bits in mask,
4537 perhaps use BIT_INSERT_EXPR instead? */
e362a897 4538 stmt = gimple_build_assign (make_ssa_name (dest_type),
a62b3dc5
JJ
4539 BIT_AND_EXPR, tem, mask);
4540 gimple_set_location (stmt, loc);
4541 gimple_seq_add_stmt_without_update (&seq, stmt);
4542 tem = gimple_assign_lhs (stmt);
4543
245f6de1 4544 if (TREE_CODE (src) == INTEGER_CST)
e362a897 4545 src = wide_int_to_tree (dest_type,
245f6de1
JJ
4546 wi::bit_and_not (wi::to_wide (src),
4547 wi::to_wide (mask)));
4548 else
4549 {
4550 tree nmask
e362a897 4551 = wide_int_to_tree (dest_type,
245f6de1 4552 wi::bit_not (wi::to_wide (mask)));
e362a897 4553 stmt = gimple_build_assign (make_ssa_name (dest_type),
245f6de1
JJ
4554 BIT_AND_EXPR, src, nmask);
4555 gimple_set_location (stmt, loc);
4556 gimple_seq_add_stmt_without_update (&seq, stmt);
4557 src = gimple_assign_lhs (stmt);
4558 }
e362a897 4559 stmt = gimple_build_assign (make_ssa_name (dest_type),
a62b3dc5
JJ
4560 BIT_IOR_EXPR, tem, src);
4561 gimple_set_location (stmt, loc);
4562 gimple_seq_add_stmt_without_update (&seq, stmt);
4563 src = gimple_assign_lhs (stmt);
4564 }
4565 }
f663d9ad
KT
4566
4567 stmt = gimple_build_assign (dest, src);
4568 gimple_set_location (stmt, loc);
4569 gimple_set_vuse (stmt, new_vuse);
4570 gimple_seq_add_stmt_without_update (&seq, stmt);
4571
629387a6
EB
4572 if (group->lp_nr && stmt_could_throw_p (cfun, stmt))
4573 add_stmt_to_eh_lp (stmt, group->lp_nr);
4574
f663d9ad
KT
4575 tree new_vdef;
4576 if (i < split_stores.length () - 1)
a62b3dc5 4577 new_vdef = make_ssa_name (gimple_vop (cfun), stmt);
f663d9ad
KT
4578 else
4579 new_vdef = last_vdef;
4580
4581 gimple_set_vdef (stmt, new_vdef);
4582 SSA_NAME_DEF_STMT (new_vdef) = stmt;
4583 new_vuse = new_vdef;
4584 }
4585
4586 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4587 delete split_store;
4588
f663d9ad
KT
4589 gcc_assert (seq);
4590 if (dump_file)
4591 {
4592 fprintf (dump_file,
c94c3532 4593 "New sequence of %u stores to replace old one of %u stores\n",
a62b3dc5 4594 split_stores.length (), orig_num_stmts);
f663d9ad
KT
4595 if (dump_flags & TDF_DETAILS)
4596 print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS);
4597 }
629387a6 4598
5384a802
JJ
4599 if (gimple_clobber_p (group->last_stmt))
4600 update_stmt (group->last_stmt);
4601
629387a6
EB
4602 if (group->lp_nr > 0)
4603 {
4604 /* We're going to insert a sequence of (potentially) throwing stores
4605 into an active EH region. This means that we're going to create
4606 new basic blocks with EH edges pointing to the post landing pad
4607 and, therefore, to have to update its PHI nodes, if any. For the
4608 virtual PHI node, we're going to use the VDEFs created above, but
4609 for the other nodes, we need to record the original reaching defs. */
4610 eh_landing_pad lp = get_eh_landing_pad_from_number (group->lp_nr);
4611 basic_block lp_bb = label_to_block (cfun, lp->post_landing_pad);
4612 basic_block last_bb = gimple_bb (group->last_stmt);
4613 edge last_edge = find_edge (last_bb, lp_bb);
4614 auto_vec<tree, 16> last_defs;
4615 gphi_iterator gpi;
4616 for (gpi = gsi_start_phis (lp_bb); !gsi_end_p (gpi); gsi_next (&gpi))
4617 {
4618 gphi *phi = gpi.phi ();
4619 tree last_def;
4620 if (virtual_operand_p (gimple_phi_result (phi)))
4621 last_def = NULL_TREE;
4622 else
4623 last_def = gimple_phi_arg_def (phi, last_edge->dest_idx);
4624 last_defs.safe_push (last_def);
4625 }
4626
4627 /* Do the insertion. Then, if new basic blocks have been created in the
4628 process, rewind the chain of VDEFs create above to walk the new basic
4629 blocks and update the corresponding arguments of the PHI nodes. */
4630 update_modified_stmts (seq);
4631 if (gimple_find_sub_bbs (seq, &last_gsi))
4632 while (last_vdef != gimple_vuse (group->last_stmt))
4633 {
4634 gimple *stmt = SSA_NAME_DEF_STMT (last_vdef);
4635 if (stmt_could_throw_p (cfun, stmt))
4636 {
4637 edge new_edge = find_edge (gimple_bb (stmt), lp_bb);
4638 unsigned int i;
4639 for (gpi = gsi_start_phis (lp_bb), i = 0;
4640 !gsi_end_p (gpi);
4641 gsi_next (&gpi), i++)
4642 {
4643 gphi *phi = gpi.phi ();
4644 tree new_def;
4645 if (virtual_operand_p (gimple_phi_result (phi)))
4646 new_def = last_vdef;
4647 else
4648 new_def = last_defs[i];
4649 add_phi_arg (phi, new_def, new_edge, UNKNOWN_LOCATION);
4650 }
4651 }
4652 last_vdef = gimple_vuse (stmt);
4653 }
4654 }
4655 else
4656 gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT);
4657
245f6de1
JJ
4658 for (int j = 0; j < 2; ++j)
4659 if (load_seq[j])
4660 gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT);
f663d9ad
KT
4661
4662 return true;
4663}
4664
4665/* Process the merged_store_group objects created in the coalescing phase.
4666 The stores are all against the base object BASE.
4667 Try to output the widened stores and delete the original statements if
4668 successful. Return true iff any changes were made. */
4669
4670bool
b5926e23 4671imm_store_chain_info::output_merged_stores ()
f663d9ad
KT
4672{
4673 unsigned int i;
4674 merged_store_group *merged_store;
4675 bool ret = false;
4676 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store)
4677 {
a95b474a
ML
4678 if (dbg_cnt (store_merging)
4679 && output_merged_store (merged_store))
f663d9ad
KT
4680 {
4681 unsigned int j;
4682 store_immediate_info *store;
4683 FOR_EACH_VEC_ELT (merged_store->stores, j, store)
4684 {
4685 gimple *stmt = store->stmt;
4686 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
5384a802
JJ
4687 /* Don't remove clobbers, they are still useful even if
4688 everything is overwritten afterwards. */
4689 if (gimple_clobber_p (stmt))
4690 continue;
f663d9ad 4691 gsi_remove (&gsi, true);
629387a6
EB
4692 if (store->lp_nr)
4693 remove_stmt_from_eh_lp (stmt);
f663d9ad
KT
4694 if (stmt != merged_store->last_stmt)
4695 {
4696 unlink_stmt_vdef (stmt);
4697 release_defs (stmt);
4698 }
4699 }
4700 ret = true;
4701 }
4702 }
4703 if (ret && dump_file)
4704 fprintf (dump_file, "Merging successful!\n");
4705
4706 return ret;
4707}
4708
4709/* Coalesce the store_immediate_info objects recorded against the base object
4710 BASE in the first phase and output them.
4711 Delete the allocated structures.
4712 Return true if any changes were made. */
4713
4714bool
b5926e23 4715imm_store_chain_info::terminate_and_process_chain ()
f663d9ad 4716{
95d94b52
RB
4717 if (dump_file && (dump_flags & TDF_DETAILS))
4718 fprintf (dump_file, "Terminating chain with %u stores\n",
4719 m_store_info.length ());
f663d9ad
KT
4720 /* Process store chain. */
4721 bool ret = false;
4722 if (m_store_info.length () > 1)
4723 {
4724 ret = coalesce_immediate_stores ();
4725 if (ret)
b5926e23 4726 ret = output_merged_stores ();
f663d9ad
KT
4727 }
4728
4729 /* Delete all the entries we allocated ourselves. */
4730 store_immediate_info *info;
4731 unsigned int i;
4732 FOR_EACH_VEC_ELT (m_store_info, i, info)
4733 delete info;
4734
4735 merged_store_group *merged_info;
4736 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info)
4737 delete merged_info;
4738
4739 return ret;
4740}
4741
4742/* Return true iff LHS is a destination potentially interesting for
4743 store merging. In practice these are the codes that get_inner_reference
4744 can process. */
4745
4746static bool
4747lhs_valid_for_store_merging_p (tree lhs)
4748{
629387a6 4749 if (DECL_P (lhs))
f663d9ad
KT
4750 return true;
4751
629387a6
EB
4752 switch (TREE_CODE (lhs))
4753 {
4754 case ARRAY_REF:
4755 case ARRAY_RANGE_REF:
4756 case BIT_FIELD_REF:
4757 case COMPONENT_REF:
4758 case MEM_REF:
e362a897 4759 case VIEW_CONVERT_EXPR:
629387a6
EB
4760 return true;
4761 default:
4762 return false;
4763 }
4764
4765 gcc_unreachable ();
f663d9ad
KT
4766}
4767
4768/* Return true if the tree RHS is a constant we want to consider
4769 during store merging. In practice accept all codes that
4770 native_encode_expr accepts. */
4771
4772static bool
4773rhs_valid_for_store_merging_p (tree rhs)
4774{
cf098191 4775 unsigned HOST_WIDE_INT size;
3afd514b 4776 if (TREE_CODE (rhs) == CONSTRUCTOR
3afd514b
JJ
4777 && CONSTRUCTOR_NELTS (rhs) == 0
4778 && TYPE_SIZE_UNIT (TREE_TYPE (rhs))
4779 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs))))
4780 return true;
cf098191
RS
4781 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (&size)
4782 && native_encode_expr (rhs, NULL, size) != 0);
f663d9ad
KT
4783}
4784
629387a6
EB
4785/* Adjust *PBITPOS, *PBITREGION_START and *PBITREGION_END by BYTE_OFF bytes
4786 and return true on success or false on failure. */
4787
4788static bool
4789adjust_bit_pos (poly_offset_int byte_off,
4790 poly_int64 *pbitpos,
4791 poly_uint64 *pbitregion_start,
4792 poly_uint64 *pbitregion_end)
4793{
4794 poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT;
4795 bit_off += *pbitpos;
4796
4797 if (known_ge (bit_off, 0) && bit_off.to_shwi (pbitpos))
4798 {
4799 if (maybe_ne (*pbitregion_end, 0U))
4800 {
4801 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4802 bit_off += *pbitregion_start;
4803 if (bit_off.to_uhwi (pbitregion_start))
4804 {
4805 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4806 bit_off += *pbitregion_end;
4807 if (!bit_off.to_uhwi (pbitregion_end))
4808 *pbitregion_end = 0;
4809 }
4810 else
4811 *pbitregion_end = 0;
4812 }
4813 return true;
4814 }
4815 else
4816 return false;
4817}
4818
245f6de1
JJ
4819/* If MEM is a memory reference usable for store merging (either as
4820 store destination or for loads), return the non-NULL base_addr
4821 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
4822 Otherwise return NULL, *PBITPOS should be still valid even for that
4823 case. */
4824
4825static tree
8a91d545
RS
4826mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize,
4827 poly_uint64 *pbitpos,
4828 poly_uint64 *pbitregion_start,
4829 poly_uint64 *pbitregion_end)
245f6de1 4830{
8a91d545
RS
4831 poly_int64 bitsize, bitpos;
4832 poly_uint64 bitregion_start = 0, bitregion_end = 0;
245f6de1
JJ
4833 machine_mode mode;
4834 int unsignedp = 0, reversep = 0, volatilep = 0;
4835 tree offset;
4836 tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode,
4837 &unsignedp, &reversep, &volatilep);
4838 *pbitsize = bitsize;
8a91d545 4839 if (known_eq (bitsize, 0))
245f6de1
JJ
4840 return NULL_TREE;
4841
4842 if (TREE_CODE (mem) == COMPONENT_REF
4843 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1)))
4844 {
4845 get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset);
8a91d545
RS
4846 if (maybe_ne (bitregion_end, 0U))
4847 bitregion_end += 1;
245f6de1
JJ
4848 }
4849
4850 if (reversep)
4851 return NULL_TREE;
4852
4853 /* We do not want to rewrite TARGET_MEM_REFs. */
4854 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
4855 return NULL_TREE;
4856 /* In some cases get_inner_reference may return a
4857 MEM_REF [ptr + byteoffset]. For the purposes of this pass
4858 canonicalize the base_addr to MEM_REF [ptr] and take
4859 byteoffset into account in the bitpos. This occurs in
4860 PR 23684 and this way we can catch more chains. */
4861 else if (TREE_CODE (base_addr) == MEM_REF)
4862 {
629387a6
EB
4863 if (!adjust_bit_pos (mem_ref_offset (base_addr), &bitpos,
4864 &bitregion_start, &bitregion_end))
245f6de1
JJ
4865 return NULL_TREE;
4866 base_addr = TREE_OPERAND (base_addr, 0);
4867 }
4868 /* get_inner_reference returns the base object, get at its
4869 address now. */
4870 else
4871 {
8a91d545 4872 if (maybe_lt (bitpos, 0))
245f6de1
JJ
4873 return NULL_TREE;
4874 base_addr = build_fold_addr_expr (base_addr);
4875 }
4876
629387a6 4877 if (offset)
245f6de1
JJ
4878 {
4879 /* If the access is variable offset then a base decl has to be
4880 address-taken to be able to emit pointer-based stores to it.
4881 ??? We might be able to get away with re-using the original
4882 base up to the first variable part and then wrapping that inside
4883 a BIT_FIELD_REF. */
4884 tree base = get_base_address (base_addr);
629387a6 4885 if (!base || (DECL_P (base) && !TREE_ADDRESSABLE (base)))
245f6de1
JJ
4886 return NULL_TREE;
4887
629387a6
EB
4888 /* Similarly to above for the base, remove constant from the offset. */
4889 if (TREE_CODE (offset) == PLUS_EXPR
4890 && TREE_CODE (TREE_OPERAND (offset, 1)) == INTEGER_CST
4891 && adjust_bit_pos (wi::to_poly_offset (TREE_OPERAND (offset, 1)),
4892 &bitpos, &bitregion_start, &bitregion_end))
4893 offset = TREE_OPERAND (offset, 0);
4894
245f6de1
JJ
4895 base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr),
4896 base_addr, offset);
4897 }
4898
629387a6
EB
4899 if (known_eq (bitregion_end, 0U))
4900 {
4901 bitregion_start = round_down_to_byte_boundary (bitpos);
4902 bitregion_end = round_up_to_byte_boundary (bitpos + bitsize);
4903 }
4904
245f6de1
JJ
4905 *pbitsize = bitsize;
4906 *pbitpos = bitpos;
4907 *pbitregion_start = bitregion_start;
4908 *pbitregion_end = bitregion_end;
4909 return base_addr;
4910}
4911
4912/* Return true if STMT is a load that can be used for store merging.
4913 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
4914 BITREGION_END are properties of the corresponding store. */
4915
4916static bool
4917handled_load (gimple *stmt, store_operand_info *op,
8a91d545
RS
4918 poly_uint64 bitsize, poly_uint64 bitpos,
4919 poly_uint64 bitregion_start, poly_uint64 bitregion_end)
245f6de1 4920{
383ac8dc 4921 if (!is_gimple_assign (stmt))
245f6de1 4922 return false;
383ac8dc
JJ
4923 if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR)
4924 {
4925 tree rhs1 = gimple_assign_rhs1 (stmt);
4926 if (TREE_CODE (rhs1) == SSA_NAME
383ac8dc
JJ
4927 && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos,
4928 bitregion_start, bitregion_end))
4929 {
d60edaba
JJ
4930 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
4931 been optimized earlier, but if allowed here, would confuse the
4932 multiple uses counting. */
4933 if (op->bit_not_p)
4934 return false;
383ac8dc
JJ
4935 op->bit_not_p = !op->bit_not_p;
4936 return true;
4937 }
4938 return false;
4939 }
4940 if (gimple_vuse (stmt)
4941 && gimple_assign_load_p (stmt)
36bbc05d 4942 && !stmt_can_throw_internal (cfun, stmt)
245f6de1
JJ
4943 && !gimple_has_volatile_ops (stmt))
4944 {
4945 tree mem = gimple_assign_rhs1 (stmt);
4946 op->base_addr
4947 = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos,
4948 &op->bitregion_start,
4949 &op->bitregion_end);
4950 if (op->base_addr != NULL_TREE
8a91d545
RS
4951 && known_eq (op->bitsize, bitsize)
4952 && multiple_p (op->bitpos - bitpos, BITS_PER_UNIT)
4953 && known_ge (op->bitpos - op->bitregion_start,
4954 bitpos - bitregion_start)
4955 && known_ge (op->bitregion_end - op->bitpos,
4956 bitregion_end - bitpos))
245f6de1
JJ
4957 {
4958 op->stmt = stmt;
4959 op->val = mem;
383ac8dc 4960 op->bit_not_p = false;
245f6de1
JJ
4961 return true;
4962 }
4963 }
4964 return false;
4965}
4966
629387a6
EB
4967/* Return the index number of the landing pad for STMT, if any. */
4968
4969static int
4970lp_nr_for_store (gimple *stmt)
4971{
4972 if (!cfun->can_throw_non_call_exceptions || !cfun->eh)
4973 return 0;
4974
4975 if (!stmt_could_throw_p (cfun, stmt))
4976 return 0;
4977
4978 return lookup_stmt_eh_lp (stmt);
4979}
4980
245f6de1 4981/* Record the store STMT for store merging optimization if it can be
629387a6 4982 optimized. Return true if any changes were made. */
245f6de1 4983
629387a6 4984bool
245f6de1
JJ
4985pass_store_merging::process_store (gimple *stmt)
4986{
4987 tree lhs = gimple_assign_lhs (stmt);
4988 tree rhs = gimple_assign_rhs1 (stmt);
2c832ffe
SSF
4989 poly_uint64 bitsize, bitpos = 0;
4990 poly_uint64 bitregion_start = 0, bitregion_end = 0;
245f6de1
JJ
4991 tree base_addr
4992 = mem_valid_for_store_merging (lhs, &bitsize, &bitpos,
4993 &bitregion_start, &bitregion_end);
8a91d545 4994 if (known_eq (bitsize, 0U))
629387a6 4995 return false;
245f6de1
JJ
4996
4997 bool invalid = (base_addr == NULL_TREE
8a91d545
RS
4998 || (maybe_gt (bitsize,
4999 (unsigned int) MAX_BITSIZE_MODE_ANY_INT)
3afd514b
JJ
5000 && TREE_CODE (rhs) != INTEGER_CST
5001 && (TREE_CODE (rhs) != CONSTRUCTOR
5002 || CONSTRUCTOR_NELTS (rhs) != 0)));
245f6de1 5003 enum tree_code rhs_code = ERROR_MARK;
d60edaba 5004 bool bit_not_p = false;
4b84d9b8
JJ
5005 struct symbolic_number n;
5006 gimple *ins_stmt = NULL;
245f6de1
JJ
5007 store_operand_info ops[2];
5008 if (invalid)
5009 ;
e362a897
EB
5010 else if (TREE_CODE (rhs) == STRING_CST)
5011 {
5012 rhs_code = STRING_CST;
5013 ops[0].val = rhs;
5014 }
245f6de1
JJ
5015 else if (rhs_valid_for_store_merging_p (rhs))
5016 {
5017 rhs_code = INTEGER_CST;
5018 ops[0].val = rhs;
5019 }
e362a897 5020 else if (TREE_CODE (rhs) == SSA_NAME)
245f6de1
JJ
5021 {
5022 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2;
5023 if (!is_gimple_assign (def_stmt))
5024 invalid = true;
5025 else if (handled_load (def_stmt, &ops[0], bitsize, bitpos,
5026 bitregion_start, bitregion_end))
5027 rhs_code = MEM_REF;
d60edaba
JJ
5028 else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR)
5029 {
5030 tree rhs1 = gimple_assign_rhs1 (def_stmt);
5031 if (TREE_CODE (rhs1) == SSA_NAME
5032 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1)))
5033 {
5034 bit_not_p = true;
5035 def_stmt = SSA_NAME_DEF_STMT (rhs1);
5036 }
5037 }
c94c3532 5038
d60edaba 5039 if (rhs_code == ERROR_MARK && !invalid)
245f6de1
JJ
5040 switch ((rhs_code = gimple_assign_rhs_code (def_stmt)))
5041 {
5042 case BIT_AND_EXPR:
5043 case BIT_IOR_EXPR:
5044 case BIT_XOR_EXPR:
5045 tree rhs1, rhs2;
5046 rhs1 = gimple_assign_rhs1 (def_stmt);
5047 rhs2 = gimple_assign_rhs2 (def_stmt);
5048 invalid = true;
d7a9512e 5049 if (TREE_CODE (rhs1) != SSA_NAME)
245f6de1
JJ
5050 break;
5051 def_stmt1 = SSA_NAME_DEF_STMT (rhs1);
5052 if (!is_gimple_assign (def_stmt1)
5053 || !handled_load (def_stmt1, &ops[0], bitsize, bitpos,
5054 bitregion_start, bitregion_end))
5055 break;
5056 if (rhs_valid_for_store_merging_p (rhs2))
5057 ops[1].val = rhs2;
d7a9512e 5058 else if (TREE_CODE (rhs2) != SSA_NAME)
245f6de1
JJ
5059 break;
5060 else
5061 {
5062 def_stmt2 = SSA_NAME_DEF_STMT (rhs2);
5063 if (!is_gimple_assign (def_stmt2))
5064 break;
5065 else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos,
5066 bitregion_start, bitregion_end))
5067 break;
5068 }
5069 invalid = false;
5070 break;
5071 default:
5072 invalid = true;
5073 break;
5074 }
c94c3532 5075
8a91d545
RS
5076 unsigned HOST_WIDE_INT const_bitsize;
5077 if (bitsize.is_constant (&const_bitsize)
c94c3532 5078 && (const_bitsize % BITS_PER_UNIT) == 0
8a91d545 5079 && const_bitsize <= 64
c94c3532 5080 && multiple_p (bitpos, BITS_PER_UNIT))
4b84d9b8
JJ
5081 {
5082 ins_stmt = find_bswap_or_nop_1 (def_stmt, &n, 12);
5083 if (ins_stmt)
5084 {
5085 uint64_t nn = n.n;
5086 for (unsigned HOST_WIDE_INT i = 0;
8a91d545
RS
5087 i < const_bitsize;
5088 i += BITS_PER_UNIT, nn >>= BITS_PER_MARKER)
4b84d9b8
JJ
5089 if ((nn & MARKER_MASK) == 0
5090 || (nn & MARKER_MASK) == MARKER_BYTE_UNKNOWN)
5091 {
5092 ins_stmt = NULL;
5093 break;
5094 }
5095 if (ins_stmt)
5096 {
5097 if (invalid)
5098 {
5099 rhs_code = LROTATE_EXPR;
5100 ops[0].base_addr = NULL_TREE;
5101 ops[1].base_addr = NULL_TREE;
5102 }
5103 invalid = false;
5104 }
5105 }
5106 }
c94c3532
EB
5107
5108 if (invalid
5109 && bitsize.is_constant (&const_bitsize)
5110 && ((const_bitsize % BITS_PER_UNIT) != 0
5111 || !multiple_p (bitpos, BITS_PER_UNIT))
ed01d707 5112 && const_bitsize <= MAX_FIXED_MODE_SIZE)
c94c3532 5113 {
c14add82 5114 /* Bypass a conversion to the bit-field type. */
31a5d8c5
EB
5115 if (!bit_not_p
5116 && is_gimple_assign (def_stmt)
5117 && CONVERT_EXPR_CODE_P (rhs_code))
c94c3532
EB
5118 {
5119 tree rhs1 = gimple_assign_rhs1 (def_stmt);
5120 if (TREE_CODE (rhs1) == SSA_NAME
c14add82 5121 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
c94c3532
EB
5122 rhs = rhs1;
5123 }
5124 rhs_code = BIT_INSERT_EXPR;
31a5d8c5 5125 bit_not_p = false;
c94c3532
EB
5126 ops[0].val = rhs;
5127 ops[0].base_addr = NULL_TREE;
5128 ops[1].base_addr = NULL_TREE;
5129 invalid = false;
5130 }
245f6de1 5131 }
e362a897
EB
5132 else
5133 invalid = true;
245f6de1 5134
8a91d545
RS
5135 unsigned HOST_WIDE_INT const_bitsize, const_bitpos;
5136 unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end;
5137 if (invalid
5138 || !bitsize.is_constant (&const_bitsize)
5139 || !bitpos.is_constant (&const_bitpos)
5140 || !bitregion_start.is_constant (&const_bitregion_start)
5141 || !bitregion_end.is_constant (&const_bitregion_end))
629387a6 5142 return terminate_all_aliasing_chains (NULL, stmt);
245f6de1 5143
4b84d9b8
JJ
5144 if (!ins_stmt)
5145 memset (&n, 0, sizeof (n));
5146
99b1c316 5147 class imm_store_chain_info **chain_info = NULL;
629387a6 5148 bool ret = false;
383ac8dc
JJ
5149 if (base_addr)
5150 chain_info = m_stores.get (base_addr);
5151
245f6de1
JJ
5152 store_immediate_info *info;
5153 if (chain_info)
5154 {
5155 unsigned int ord = (*chain_info)->m_store_info.length ();
8a91d545
RS
5156 info = new store_immediate_info (const_bitsize, const_bitpos,
5157 const_bitregion_start,
5158 const_bitregion_end,
5159 stmt, ord, rhs_code, n, ins_stmt,
629387a6
EB
5160 bit_not_p, lp_nr_for_store (stmt),
5161 ops[0], ops[1]);
245f6de1
JJ
5162 if (dump_file && (dump_flags & TDF_DETAILS))
5163 {
5164 fprintf (dump_file, "Recording immediate store from stmt:\n");
5165 print_gimple_stmt (dump_file, stmt, 0);
5166 }
5167 (*chain_info)->m_store_info.safe_push (info);
95d94b52 5168 m_n_stores++;
629387a6 5169 ret |= terminate_all_aliasing_chains (chain_info, stmt);
245f6de1
JJ
5170 /* If we reach the limit of stores to merge in a chain terminate and
5171 process the chain now. */
5172 if ((*chain_info)->m_store_info.length ()
028d4092 5173 == (unsigned int) param_max_stores_to_merge)
245f6de1
JJ
5174 {
5175 if (dump_file && (dump_flags & TDF_DETAILS))
5176 fprintf (dump_file,
5177 "Reached maximum number of statements to merge:\n");
629387a6 5178 ret |= terminate_and_process_chain (*chain_info);
245f6de1 5179 }
245f6de1 5180 }
95d94b52
RB
5181 else
5182 {
5183 /* Store aliases any existing chain? */
5184 ret |= terminate_all_aliasing_chains (NULL, stmt);
245f6de1 5185
95d94b52
RB
5186 /* Start a new chain. */
5187 class imm_store_chain_info *new_chain
5188 = new imm_store_chain_info (m_stores_head, base_addr);
5189 info = new store_immediate_info (const_bitsize, const_bitpos,
5190 const_bitregion_start,
5191 const_bitregion_end,
5192 stmt, 0, rhs_code, n, ins_stmt,
5193 bit_not_p, lp_nr_for_store (stmt),
5194 ops[0], ops[1]);
5195 new_chain->m_store_info.safe_push (info);
5196 m_n_stores++;
5197 m_stores.put (base_addr, new_chain);
5198 m_n_chains++;
5199 if (dump_file && (dump_flags & TDF_DETAILS))
5200 {
5201 fprintf (dump_file, "Starting active chain number %u with statement:\n",
5202 m_n_chains);
5203 print_gimple_stmt (dump_file, stmt, 0);
5204 fprintf (dump_file, "The base object is:\n");
5205 print_generic_expr (dump_file, base_addr);
5206 fprintf (dump_file, "\n");
5207 }
5208 }
5209
5210 /* Prune oldest chains so that after adding the chain or store above
5211 we're again within the limits set by the params. */
5212 if (m_n_chains > (unsigned)param_max_store_chains_to_track
5213 || m_n_stores > (unsigned)param_max_stores_to_track)
245f6de1 5214 {
95d94b52
RB
5215 if (dump_file && (dump_flags & TDF_DETAILS))
5216 fprintf (dump_file, "Too many chains (%u > %d) or stores (%u > %d), "
5217 "terminating oldest chain(s).\n", m_n_chains,
5218 param_max_store_chains_to_track, m_n_stores,
5219 param_max_stores_to_track);
5220 imm_store_chain_info **e = &m_stores_head;
5221 unsigned idx = 0;
5222 unsigned n_stores = 0;
5223 while (*e)
5224 {
5225 if (idx >= (unsigned)param_max_store_chains_to_track
5226 || (n_stores + (*e)->m_store_info.length ()
5227 > (unsigned)param_max_stores_to_track))
8a8eee6b 5228 ret |= terminate_and_process_chain (*e);
95d94b52
RB
5229 else
5230 {
5231 n_stores += (*e)->m_store_info.length ();
5232 e = &(*e)->next;
5233 ++idx;
5234 }
5235 }
245f6de1 5236 }
95d94b52 5237
629387a6
EB
5238 return ret;
5239}
5240
5241/* Return true if STMT is a store valid for store merging. */
5242
5243static bool
5244store_valid_for_store_merging_p (gimple *stmt)
5245{
5246 return gimple_assign_single_p (stmt)
5247 && gimple_vdef (stmt)
5248 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt))
5384a802 5249 && (!gimple_has_volatile_ops (stmt) || gimple_clobber_p (stmt));
629387a6
EB
5250}
5251
5252enum basic_block_status { BB_INVALID, BB_VALID, BB_EXTENDED_VALID };
5253
5254/* Return the status of basic block BB wrt store merging. */
5255
5256static enum basic_block_status
5257get_status_for_store_merging (basic_block bb)
5258{
5259 unsigned int num_statements = 0;
a7553ad6 5260 unsigned int num_constructors = 0;
629387a6
EB
5261 gimple_stmt_iterator gsi;
5262 edge e;
5263
5264 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5265 {
5266 gimple *stmt = gsi_stmt (gsi);
5267
5268 if (is_gimple_debug (stmt))
5269 continue;
5270
5271 if (store_valid_for_store_merging_p (stmt) && ++num_statements >= 2)
5272 break;
a7553ad6
JJ
5273
5274 if (is_gimple_assign (stmt)
5275 && gimple_assign_rhs_code (stmt) == CONSTRUCTOR)
5276 {
5277 tree rhs = gimple_assign_rhs1 (stmt);
5278 if (VECTOR_TYPE_P (TREE_TYPE (rhs))
5279 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs)))
5280 && gimple_assign_lhs (stmt) != NULL_TREE)
5281 {
5282 HOST_WIDE_INT sz
5283 = int_size_in_bytes (TREE_TYPE (rhs)) * BITS_PER_UNIT;
5284 if (sz == 16 || sz == 32 || sz == 64)
5285 {
5286 num_constructors = 1;
5287 break;
5288 }
5289 }
5290 }
629387a6
EB
5291 }
5292
a7553ad6 5293 if (num_statements == 0 && num_constructors == 0)
629387a6
EB
5294 return BB_INVALID;
5295
5296 if (cfun->can_throw_non_call_exceptions && cfun->eh
5297 && store_valid_for_store_merging_p (gimple_seq_last_stmt (bb_seq (bb)))
5298 && (e = find_fallthru_edge (bb->succs))
5299 && e->dest == bb->next_bb)
5300 return BB_EXTENDED_VALID;
5301
a7553ad6 5302 return (num_statements >= 2 || num_constructors) ? BB_VALID : BB_INVALID;
245f6de1
JJ
5303}
5304
f663d9ad 5305/* Entry point for the pass. Go over each basic block recording chains of
245f6de1
JJ
5306 immediate stores. Upon encountering a terminating statement (as defined
5307 by stmt_terminates_chain_p) process the recorded stores and emit the widened
5308 variants. */
f663d9ad
KT
5309
5310unsigned int
5311pass_store_merging::execute (function *fun)
5312{
5313 basic_block bb;
5314 hash_set<gimple *> orig_stmts;
629387a6
EB
5315 bool changed = false, open_chains = false;
5316
5317 /* If the function can throw and catch non-call exceptions, we'll be trying
5318 to merge stores across different basic blocks so we need to first unsplit
5319 the EH edges in order to streamline the CFG of the function. */
5320 if (cfun->can_throw_non_call_exceptions && cfun->eh)
5321 unsplit_eh_edges ();
f663d9ad 5322
4b84d9b8
JJ
5323 calculate_dominance_info (CDI_DOMINATORS);
5324
f663d9ad
KT
5325 FOR_EACH_BB_FN (bb, fun)
5326 {
629387a6 5327 const basic_block_status bb_status = get_status_for_store_merging (bb);
f663d9ad 5328 gimple_stmt_iterator gsi;
f663d9ad 5329
629387a6
EB
5330 if (open_chains && (bb_status == BB_INVALID || !single_pred_p (bb)))
5331 {
5332 changed |= terminate_and_process_all_chains ();
5333 open_chains = false;
f663d9ad
KT
5334 }
5335
629387a6 5336 if (bb_status == BB_INVALID)
f663d9ad
KT
5337 continue;
5338
5339 if (dump_file && (dump_flags & TDF_DETAILS))
5340 fprintf (dump_file, "Processing basic block <%d>:\n", bb->index);
5341
a7553ad6 5342 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
f663d9ad
KT
5343 {
5344 gimple *stmt = gsi_stmt (gsi);
a7553ad6 5345 gsi_next (&gsi);
f663d9ad 5346
50b6d676
AO
5347 if (is_gimple_debug (stmt))
5348 continue;
5349
5384a802 5350 if (gimple_has_volatile_ops (stmt) && !gimple_clobber_p (stmt))
f663d9ad
KT
5351 {
5352 /* Terminate all chains. */
5353 if (dump_file && (dump_flags & TDF_DETAILS))
5354 fprintf (dump_file, "Volatile access terminates "
5355 "all chains\n");
629387a6
EB
5356 changed |= terminate_and_process_all_chains ();
5357 open_chains = false;
f663d9ad
KT
5358 continue;
5359 }
5360
a7553ad6
JJ
5361 if (is_gimple_assign (stmt)
5362 && gimple_assign_rhs_code (stmt) == CONSTRUCTOR
5363 && maybe_optimize_vector_constructor (stmt))
5364 continue;
5365
629387a6
EB
5366 if (store_valid_for_store_merging_p (stmt))
5367 changed |= process_store (stmt);
245f6de1 5368 else
629387a6
EB
5369 changed |= terminate_all_aliasing_chains (NULL, stmt);
5370 }
5371
5372 if (bb_status == BB_EXTENDED_VALID)
5373 open_chains = true;
5374 else
5375 {
5376 changed |= terminate_and_process_all_chains ();
5377 open_chains = false;
f663d9ad 5378 }
f663d9ad 5379 }
629387a6
EB
5380
5381 if (open_chains)
5382 changed |= terminate_and_process_all_chains ();
5383
5384 /* If the function can throw and catch non-call exceptions and something
5385 changed during the pass, then the CFG has (very likely) changed too. */
5386 if (cfun->can_throw_non_call_exceptions && cfun->eh && changed)
5387 {
5388 free_dominance_info (CDI_DOMINATORS);
5389 return TODO_cleanup_cfg;
5390 }
5391
f663d9ad
KT
5392 return 0;
5393}
5394
5395} // anon namespace
5396
5397/* Construct and return a store merging pass object. */
5398
5399gimple_opt_pass *
5400make_pass_store_merging (gcc::context *ctxt)
5401{
5402 return new pass_store_merging (ctxt);
5403}
c22d8787
KT
5404
5405#if CHECKING_P
5406
5407namespace selftest {
5408
5409/* Selftests for store merging helpers. */
5410
5411/* Assert that all elements of the byte arrays X and Y, both of length N
5412 are equal. */
5413
5414static void
5415verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n)
5416{
5417 for (unsigned int i = 0; i < n; i++)
5418 {
5419 if (x[i] != y[i])
5420 {
5421 fprintf (stderr, "Arrays do not match. X:\n");
5422 dump_char_array (stderr, x, n);
5423 fprintf (stderr, "Y:\n");
5424 dump_char_array (stderr, y, n);
5425 }
5426 ASSERT_EQ (x[i], y[i]);
5427 }
5428}
5429
8aba425f 5430/* Test shift_bytes_in_array_left and that it carries bits across between
c22d8787
KT
5431 bytes correctly. */
5432
5433static void
8aba425f 5434verify_shift_bytes_in_array_left (void)
c22d8787
KT
5435{
5436 /* byte 1 | byte 0
5437 00011111 | 11100000. */
5438 unsigned char orig[2] = { 0xe0, 0x1f };
5439 unsigned char in[2];
5440 memcpy (in, orig, sizeof orig);
5441
5442 unsigned char expected[2] = { 0x80, 0x7f };
8aba425f 5443 shift_bytes_in_array_left (in, sizeof (in), 2);
c22d8787
KT
5444 verify_array_eq (in, expected, sizeof (in));
5445
5446 memcpy (in, orig, sizeof orig);
5447 memcpy (expected, orig, sizeof orig);
5448 /* Check that shifting by zero doesn't change anything. */
8aba425f 5449 shift_bytes_in_array_left (in, sizeof (in), 0);
c22d8787
KT
5450 verify_array_eq (in, expected, sizeof (in));
5451
5452}
5453
5454/* Test shift_bytes_in_array_right and that it carries bits across between
5455 bytes correctly. */
5456
5457static void
5458verify_shift_bytes_in_array_right (void)
5459{
5460 /* byte 1 | byte 0
5461 00011111 | 11100000. */
5462 unsigned char orig[2] = { 0x1f, 0xe0};
5463 unsigned char in[2];
5464 memcpy (in, orig, sizeof orig);
5465 unsigned char expected[2] = { 0x07, 0xf8};
5466 shift_bytes_in_array_right (in, sizeof (in), 2);
5467 verify_array_eq (in, expected, sizeof (in));
5468
5469 memcpy (in, orig, sizeof orig);
5470 memcpy (expected, orig, sizeof orig);
5471 /* Check that shifting by zero doesn't change anything. */
5472 shift_bytes_in_array_right (in, sizeof (in), 0);
5473 verify_array_eq (in, expected, sizeof (in));
5474}
5475
5476/* Test clear_bit_region that it clears exactly the bits asked and
5477 nothing more. */
5478
5479static void
5480verify_clear_bit_region (void)
5481{
5482 /* Start with all bits set and test clearing various patterns in them. */
5483 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5484 unsigned char in[3];
5485 unsigned char expected[3];
5486 memcpy (in, orig, sizeof in);
5487
5488 /* Check zeroing out all the bits. */
5489 clear_bit_region (in, 0, 3 * BITS_PER_UNIT);
5490 expected[0] = expected[1] = expected[2] = 0;
5491 verify_array_eq (in, expected, sizeof in);
5492
5493 memcpy (in, orig, sizeof in);
5494 /* Leave the first and last bits intact. */
5495 clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2);
5496 expected[0] = 0x1;
5497 expected[1] = 0;
5498 expected[2] = 0x80;
5499 verify_array_eq (in, expected, sizeof in);
5500}
5501
5384a802 5502/* Test clear_bit_region_be that it clears exactly the bits asked and
c22d8787
KT
5503 nothing more. */
5504
5505static void
5506verify_clear_bit_region_be (void)
5507{
5508 /* Start with all bits set and test clearing various patterns in them. */
5509 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5510 unsigned char in[3];
5511 unsigned char expected[3];
5512 memcpy (in, orig, sizeof in);
5513
5514 /* Check zeroing out all the bits. */
5515 clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT);
5516 expected[0] = expected[1] = expected[2] = 0;
5517 verify_array_eq (in, expected, sizeof in);
5518
5519 memcpy (in, orig, sizeof in);
5520 /* Leave the first and last bits intact. */
5521 clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2);
5522 expected[0] = 0x80;
5523 expected[1] = 0;
5524 expected[2] = 0x1;
5525 verify_array_eq (in, expected, sizeof in);
5526}
5527
5528
5529/* Run all of the selftests within this file. */
5530
5531void
5532store_merging_c_tests (void)
5533{
8aba425f 5534 verify_shift_bytes_in_array_left ();
c22d8787
KT
5535 verify_shift_bytes_in_array_right ();
5536 verify_clear_bit_region ();
5537 verify_clear_bit_region_be ();
5538}
5539
5540} // namespace selftest
5541#endif /* CHECKING_P. */