]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/gimple-ssa-store-merging.c
PR c++/83817
[thirdparty/gcc.git] / gcc / gimple-ssa-store-merging.c
CommitLineData
4aba7bd3 1/* GIMPLE store merging and byte swapping passes.
8e8f6434 2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3d3e04ac 3 Contributed by ARM Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
4aba7bd3 21/* The purpose of the store merging pass is to combine multiple memory
22 stores of constant values, values loaded from memory or bitwise operations
9991d1d3 23 on those to consecutive memory locations into fewer wider stores.
3d3e04ac 24 For example, if we have a sequence peforming four byte stores to
25 consecutive memory locations:
26 [p ] := imm1;
27 [p + 1B] := imm2;
28 [p + 2B] := imm3;
29 [p + 3B] := imm4;
30 we can transform this into a single 4-byte store if the target supports it:
31 [p] := imm1:imm2:imm3:imm4 //concatenated immediates according to endianness.
32
9991d1d3 33 Or:
34 [p ] := [q ];
35 [p + 1B] := [q + 1B];
36 [p + 2B] := [q + 2B];
37 [p + 3B] := [q + 3B];
38 if there is no overlap can be transformed into a single 4-byte
39 load followed by single 4-byte store.
40
41 Or:
42 [p ] := [q ] ^ imm1;
43 [p + 1B] := [q + 1B] ^ imm2;
44 [p + 2B] := [q + 2B] ^ imm3;
45 [p + 3B] := [q + 3B] ^ imm4;
46 if there is no overlap can be transformed into a single 4-byte
47 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
48
3d3e04ac 49 The algorithm is applied to each basic block in three phases:
50
9991d1d3 51 1) Scan through the basic block recording assignments to
3d3e04ac 52 destinations that can be expressed as a store to memory of a certain size
9991d1d3 53 at a certain bit offset from expressions we can handle. For bit-fields
54 we also note the surrounding bit region, bits that could be stored in
55 a read-modify-write operation when storing the bit-field. Record store
56 chains to different bases in a hash_map (m_stores) and make sure to
57 terminate such chains when appropriate (for example when when the stored
58 values get used subsequently).
3d3e04ac 59 These stores can be a result of structure element initializers, array stores
60 etc. A store_immediate_info object is recorded for every such store.
61 Record as many such assignments to a single base as possible until a
62 statement that interferes with the store sequence is encountered.
9991d1d3 63 Each store has up to 2 operands, which can be an immediate constant
64 or a memory load, from which the value to be stored can be computed.
65 At most one of the operands can be a constant. The operands are recorded
66 in store_operand_info struct.
3d3e04ac 67
68 2) Analyze the chain of stores recorded in phase 1) (i.e. the vector of
69 store_immediate_info objects) and coalesce contiguous stores into
9991d1d3 70 merged_store_group objects. For bit-fields stores, we don't need to
71 require the stores to be contiguous, just their surrounding bit regions
72 have to be contiguous. If the expression being stored is different
73 between adjacent stores, such as one store storing a constant and
74 following storing a value loaded from memory, or if the loaded memory
75 objects are not adjacent, a new merged_store_group is created as well.
3d3e04ac 76
77 For example, given the stores:
78 [p ] := 0;
79 [p + 1B] := 1;
80 [p + 3B] := 0;
81 [p + 4B] := 1;
82 [p + 5B] := 0;
83 [p + 6B] := 0;
84 This phase would produce two merged_store_group objects, one recording the
85 two bytes stored in the memory region [p : p + 1] and another
86 recording the four bytes stored in the memory region [p + 3 : p + 6].
87
88 3) The merged_store_group objects produced in phase 2) are processed
89 to generate the sequence of wider stores that set the contiguous memory
90 regions to the sequence of bytes that correspond to it. This may emit
91 multiple stores per store group to handle contiguous stores that are not
92 of a size that is a power of 2. For example it can try to emit a 40-bit
93 store as a 32-bit store followed by an 8-bit store.
94 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT or
dfdced85 95 TARGET_SLOW_UNALIGNED_ACCESS rules.
3d3e04ac 96
97 Note on endianness and example:
98 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
99 [p ] := 0x1234;
100 [p + 2B] := 0x5678;
101 [p + 4B] := 0xab;
102 [p + 5B] := 0xcd;
103
104 The memory layout for little-endian (LE) and big-endian (BE) must be:
105 p |LE|BE|
106 ---------
107 0 |34|12|
108 1 |12|34|
109 2 |78|56|
110 3 |56|78|
111 4 |ab|ab|
112 5 |cd|cd|
113
114 To merge these into a single 48-bit merged value 'val' in phase 2)
115 on little-endian we insert stores to higher (consecutive) bitpositions
116 into the most significant bits of the merged value.
117 The final merged value would be: 0xcdab56781234
118
119 For big-endian we insert stores to higher bitpositions into the least
120 significant bits of the merged value.
121 The final merged value would be: 0x12345678abcd
122
123 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
124 followed by a 16-bit store. Again, we must consider endianness when
125 breaking down the 48-bit value 'val' computed above.
126 For little endian we emit:
127 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
128 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
129
130 Whereas for big-endian we emit:
131 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
132 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
133
134#include "config.h"
135#include "system.h"
136#include "coretypes.h"
137#include "backend.h"
138#include "tree.h"
139#include "gimple.h"
140#include "builtins.h"
141#include "fold-const.h"
142#include "tree-pass.h"
143#include "ssa.h"
144#include "gimple-pretty-print.h"
145#include "alias.h"
146#include "fold-const.h"
147#include "params.h"
148#include "print-tree.h"
149#include "tree-hash-traits.h"
150#include "gimple-iterator.h"
151#include "gimplify.h"
152#include "stor-layout.h"
153#include "timevar.h"
154#include "tree-cfg.h"
155#include "tree-eh.h"
156#include "target.h"
427223f1 157#include "gimplify-me.h"
902cb3b7 158#include "rtl.h"
159#include "expr.h" /* For get_bit_range. */
4aba7bd3 160#include "optabs-tree.h"
3d9a2fb3 161#include "selftest.h"
3d3e04ac 162
163/* The maximum size (in bits) of the stores this pass should generate. */
164#define MAX_STORE_BITSIZE (BITS_PER_WORD)
165#define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
166
9991d1d3 167/* Limit to bound the number of aliasing checks for loads with the same
168 vuse as the corresponding store. */
169#define MAX_STORE_ALIAS_CHECKS 64
170
3d3e04ac 171namespace {
172
81c8113b 173struct bswap_stat
4aba7bd3 174{
175 /* Number of hand-written 16-bit nop / bswaps found. */
176 int found_16bit;
177
178 /* Number of hand-written 32-bit nop / bswaps found. */
179 int found_32bit;
180
181 /* Number of hand-written 64-bit nop / bswaps found. */
182 int found_64bit;
183} nop_stats, bswap_stats;
184
185/* A symbolic number structure is used to detect byte permutation and selection
186 patterns of a source. To achieve that, its field N contains an artificial
187 number consisting of BITS_PER_MARKER sized markers tracking where does each
188 byte come from in the source:
189
190 0 - target byte has the value 0
191 FF - target byte has an unknown value (eg. due to sign extension)
192 1..size - marker value is the byte index in the source (0 for lsb).
193
194 To detect permutations on memory sources (arrays and structures), a symbolic
195 number is also associated:
196 - a base address BASE_ADDR and an OFFSET giving the address of the source;
197 - a range which gives the difference between the highest and lowest accessed
198 memory location to make such a symbolic number;
199 - the address SRC of the source element of lowest address as a convenience
200 to easily get BASE_ADDR + offset + lowest bytepos;
201 - number of expressions N_OPS bitwise ored together to represent
202 approximate cost of the computation.
203
204 Note 1: the range is different from size as size reflects the size of the
205 type of the current expression. For instance, for an array char a[],
206 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
207 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
208 time a range of 1.
209
210 Note 2: for non-memory sources, range holds the same value as size.
211
212 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
213
214struct symbolic_number {
215 uint64_t n;
216 tree type;
217 tree base_addr;
218 tree offset;
08454aa5 219 poly_int64_pod bytepos;
4aba7bd3 220 tree src;
221 tree alias_set;
222 tree vuse;
223 unsigned HOST_WIDE_INT range;
224 int n_ops;
225};
226
227#define BITS_PER_MARKER 8
228#define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
229#define MARKER_BYTE_UNKNOWN MARKER_MASK
230#define HEAD_MARKER(n, size) \
231 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
232
233/* The number which the find_bswap_or_nop_1 result should match in
234 order to have a nop. The number is masked according to the size of
235 the symbolic number before using it. */
236#define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
237 (uint64_t)0x08070605 << 32 | 0x04030201)
238
239/* The number which the find_bswap_or_nop_1 result should match in
240 order to have a byte swap. The number is masked according to the
241 size of the symbolic number before using it. */
242#define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
243 (uint64_t)0x01020304 << 32 | 0x05060708)
244
245/* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
246 number N. Return false if the requested operation is not permitted
247 on a symbolic number. */
248
249inline bool
250do_shift_rotate (enum tree_code code,
251 struct symbolic_number *n,
252 int count)
253{
254 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
255 unsigned head_marker;
256
257 if (count % BITS_PER_UNIT != 0)
258 return false;
259 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
260
261 /* Zero out the extra bits of N in order to avoid them being shifted
262 into the significant bits. */
263 if (size < 64 / BITS_PER_MARKER)
264 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
265
266 switch (code)
267 {
268 case LSHIFT_EXPR:
269 n->n <<= count;
270 break;
271 case RSHIFT_EXPR:
272 head_marker = HEAD_MARKER (n->n, size);
273 n->n >>= count;
274 /* Arithmetic shift of signed type: result is dependent on the value. */
275 if (!TYPE_UNSIGNED (n->type) && head_marker)
276 for (i = 0; i < count / BITS_PER_MARKER; i++)
277 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
278 << ((size - 1 - i) * BITS_PER_MARKER);
279 break;
280 case LROTATE_EXPR:
281 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
282 break;
283 case RROTATE_EXPR:
284 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
285 break;
286 default:
287 return false;
288 }
289 /* Zero unused bits for size. */
290 if (size < 64 / BITS_PER_MARKER)
291 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
292 return true;
293}
294
295/* Perform sanity checking for the symbolic number N and the gimple
296 statement STMT. */
297
298inline bool
299verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
300{
301 tree lhs_type;
302
303 lhs_type = gimple_expr_type (stmt);
304
305 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
306 return false;
307
308 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
309 return false;
310
311 return true;
312}
313
314/* Initialize the symbolic number N for the bswap pass from the base element
315 SRC manipulated by the bitwise OR expression. */
316
317bool
318init_symbolic_number (struct symbolic_number *n, tree src)
319{
320 int size;
321
322 if (! INTEGRAL_TYPE_P (TREE_TYPE (src)))
323 return false;
324
325 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
326 n->src = src;
327
328 /* Set up the symbolic number N by setting each byte to a value between 1 and
329 the byte size of rhs1. The highest order byte is set to n->size and the
330 lowest order byte to 1. */
331 n->type = TREE_TYPE (src);
332 size = TYPE_PRECISION (n->type);
333 if (size % BITS_PER_UNIT != 0)
334 return false;
335 size /= BITS_PER_UNIT;
336 if (size > 64 / BITS_PER_MARKER)
337 return false;
338 n->range = size;
339 n->n = CMPNOP;
340 n->n_ops = 1;
341
342 if (size < 64 / BITS_PER_MARKER)
343 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
344
345 return true;
346}
347
348/* Check if STMT might be a byte swap or a nop from a memory source and returns
349 the answer. If so, REF is that memory source and the base of the memory area
350 accessed and the offset of the access from that base are recorded in N. */
351
352bool
353find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
354{
355 /* Leaf node is an array or component ref. Memorize its base and
356 offset from base to compare to other such leaf node. */
81bc0f0f 357 poly_int64 bitsize, bitpos, bytepos;
4aba7bd3 358 machine_mode mode;
359 int unsignedp, reversep, volatilep;
360 tree offset, base_addr;
361
362 /* Not prepared to handle PDP endian. */
363 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
364 return false;
365
366 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
367 return false;
368
369 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
370 &unsignedp, &reversep, &volatilep);
371
509ab8cd 372 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
373 /* Do not rewrite TARGET_MEM_REF. */
374 return false;
375 else if (TREE_CODE (base_addr) == MEM_REF)
4aba7bd3 376 {
9c6deaf0 377 poly_offset_int bit_offset = 0;
4aba7bd3 378 tree off = TREE_OPERAND (base_addr, 1);
379
380 if (!integer_zerop (off))
381 {
9c6deaf0 382 poly_offset_int boff = mem_ref_offset (base_addr);
383 boff <<= LOG2_BITS_PER_UNIT;
4aba7bd3 384 bit_offset += boff;
385 }
386
387 base_addr = TREE_OPERAND (base_addr, 0);
388
389 /* Avoid returning a negative bitpos as this may wreak havoc later. */
9c6deaf0 390 if (maybe_lt (bit_offset, 0))
4aba7bd3 391 {
9c6deaf0 392 tree byte_offset = wide_int_to_tree
393 (sizetype, bits_to_bytes_round_down (bit_offset));
394 bit_offset = num_trailing_bits (bit_offset);
4aba7bd3 395 if (offset)
9c6deaf0 396 offset = size_binop (PLUS_EXPR, offset, byte_offset);
4aba7bd3 397 else
9c6deaf0 398 offset = byte_offset;
4aba7bd3 399 }
400
9c6deaf0 401 bitpos += bit_offset.force_shwi ();
4aba7bd3 402 }
509ab8cd 403 else
404 base_addr = build_fold_addr_expr (base_addr);
4aba7bd3 405
81bc0f0f 406 if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
4aba7bd3 407 return false;
81bc0f0f 408 if (!multiple_p (bitsize, BITS_PER_UNIT))
4aba7bd3 409 return false;
410 if (reversep)
411 return false;
412
413 if (!init_symbolic_number (n, ref))
414 return false;
415 n->base_addr = base_addr;
416 n->offset = offset;
81bc0f0f 417 n->bytepos = bytepos;
4aba7bd3 418 n->alias_set = reference_alias_ptr_type (ref);
419 n->vuse = gimple_vuse (stmt);
420 return true;
421}
422
423/* Compute the symbolic number N representing the result of a bitwise OR on 2
424 symbolic number N1 and N2 whose source statements are respectively
425 SOURCE_STMT1 and SOURCE_STMT2. */
426
427gimple *
428perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
429 gimple *source_stmt2, struct symbolic_number *n2,
430 struct symbolic_number *n)
431{
432 int i, size;
433 uint64_t mask;
434 gimple *source_stmt;
435 struct symbolic_number *n_start;
436
437 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
438 if (TREE_CODE (rhs1) == BIT_FIELD_REF
439 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
440 rhs1 = TREE_OPERAND (rhs1, 0);
441 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
442 if (TREE_CODE (rhs2) == BIT_FIELD_REF
443 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
444 rhs2 = TREE_OPERAND (rhs2, 0);
445
446 /* Sources are different, cancel bswap if they are not memory location with
447 the same base (array, structure, ...). */
448 if (rhs1 != rhs2)
449 {
450 uint64_t inc;
08454aa5 451 HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end;
4aba7bd3 452 struct symbolic_number *toinc_n_ptr, *n_end;
453 basic_block bb1, bb2;
454
455 if (!n1->base_addr || !n2->base_addr
456 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
457 return NULL;
458
459 if (!n1->offset != !n2->offset
460 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
461 return NULL;
462
08454aa5 463 start1 = 0;
464 if (!(n2->bytepos - n1->bytepos).is_constant (&start2))
465 return NULL;
466
467 if (start1 < start2)
4aba7bd3 468 {
469 n_start = n1;
08454aa5 470 start_sub = start2 - start1;
4aba7bd3 471 }
472 else
473 {
474 n_start = n2;
08454aa5 475 start_sub = start1 - start2;
4aba7bd3 476 }
477
478 bb1 = gimple_bb (source_stmt1);
479 bb2 = gimple_bb (source_stmt2);
480 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
481 source_stmt = source_stmt1;
482 else
483 source_stmt = source_stmt2;
484
485 /* Find the highest address at which a load is performed and
486 compute related info. */
08454aa5 487 end1 = start1 + (n1->range - 1);
488 end2 = start2 + (n2->range - 1);
4aba7bd3 489 if (end1 < end2)
490 {
491 end = end2;
492 end_sub = end2 - end1;
493 }
494 else
495 {
496 end = end1;
497 end_sub = end1 - end2;
498 }
499 n_end = (end2 > end1) ? n2 : n1;
500
501 /* Find symbolic number whose lsb is the most significant. */
502 if (BYTES_BIG_ENDIAN)
503 toinc_n_ptr = (n_end == n1) ? n2 : n1;
504 else
505 toinc_n_ptr = (n_start == n1) ? n2 : n1;
506
08454aa5 507 n->range = end - MIN (start1, start2) + 1;
4aba7bd3 508
509 /* Check that the range of memory covered can be represented by
510 a symbolic number. */
511 if (n->range > 64 / BITS_PER_MARKER)
512 return NULL;
513
514 /* Reinterpret byte marks in symbolic number holding the value of
515 bigger weight according to target endianness. */
516 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
517 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
518 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
519 {
520 unsigned marker
521 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
522 if (marker && marker != MARKER_BYTE_UNKNOWN)
523 toinc_n_ptr->n += inc;
524 }
525 }
526 else
527 {
528 n->range = n1->range;
529 n_start = n1;
530 source_stmt = source_stmt1;
531 }
532
533 if (!n1->alias_set
534 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
535 n->alias_set = n1->alias_set;
536 else
537 n->alias_set = ptr_type_node;
538 n->vuse = n_start->vuse;
539 n->base_addr = n_start->base_addr;
540 n->offset = n_start->offset;
541 n->src = n_start->src;
542 n->bytepos = n_start->bytepos;
543 n->type = n_start->type;
544 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
545
546 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
547 {
548 uint64_t masked1, masked2;
549
550 masked1 = n1->n & mask;
551 masked2 = n2->n & mask;
552 if (masked1 && masked2 && masked1 != masked2)
553 return NULL;
554 }
555 n->n = n1->n | n2->n;
556 n->n_ops = n1->n_ops + n2->n_ops;
557
558 return source_stmt;
559}
560
561/* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
562 the operation given by the rhs of STMT on the result. If the operation
563 could successfully be executed the function returns a gimple stmt whose
564 rhs's first tree is the expression of the source operand and NULL
565 otherwise. */
566
567gimple *
568find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
569{
570 enum tree_code code;
571 tree rhs1, rhs2 = NULL;
572 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
573 enum gimple_rhs_class rhs_class;
574
575 if (!limit || !is_gimple_assign (stmt))
576 return NULL;
577
578 rhs1 = gimple_assign_rhs1 (stmt);
579
580 if (find_bswap_or_nop_load (stmt, rhs1, n))
581 return stmt;
582
583 /* Handle BIT_FIELD_REF. */
584 if (TREE_CODE (rhs1) == BIT_FIELD_REF
585 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
586 {
587 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
588 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
589 if (bitpos % BITS_PER_UNIT == 0
590 && bitsize % BITS_PER_UNIT == 0
591 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
592 {
593 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
594 if (BYTES_BIG_ENDIAN)
595 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
596
597 /* Shift. */
598 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
599 return NULL;
600
601 /* Mask. */
602 uint64_t mask = 0;
603 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
604 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
605 i++, tmp <<= BITS_PER_UNIT)
606 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
607 n->n &= mask;
608
609 /* Convert. */
610 n->type = TREE_TYPE (rhs1);
611 if (!n->base_addr)
612 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
613
614 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
615 }
616
617 return NULL;
618 }
619
620 if (TREE_CODE (rhs1) != SSA_NAME)
621 return NULL;
622
623 code = gimple_assign_rhs_code (stmt);
624 rhs_class = gimple_assign_rhs_class (stmt);
625 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
626
627 if (rhs_class == GIMPLE_BINARY_RHS)
628 rhs2 = gimple_assign_rhs2 (stmt);
629
630 /* Handle unary rhs and binary rhs with integer constants as second
631 operand. */
632
633 if (rhs_class == GIMPLE_UNARY_RHS
634 || (rhs_class == GIMPLE_BINARY_RHS
635 && TREE_CODE (rhs2) == INTEGER_CST))
636 {
637 if (code != BIT_AND_EXPR
638 && code != LSHIFT_EXPR
639 && code != RSHIFT_EXPR
640 && code != LROTATE_EXPR
641 && code != RROTATE_EXPR
642 && !CONVERT_EXPR_CODE_P (code))
643 return NULL;
644
645 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
646
647 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
648 we have to initialize the symbolic number. */
649 if (!source_stmt1)
650 {
651 if (gimple_assign_load_p (stmt)
652 || !init_symbolic_number (n, rhs1))
653 return NULL;
654 source_stmt1 = stmt;
655 }
656
657 switch (code)
658 {
659 case BIT_AND_EXPR:
660 {
661 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
662 uint64_t val = int_cst_value (rhs2), mask = 0;
663 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
664
665 /* Only constants masking full bytes are allowed. */
666 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
667 if ((val & tmp) != 0 && (val & tmp) != tmp)
668 return NULL;
669 else if (val & tmp)
670 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
671
672 n->n &= mask;
673 }
674 break;
675 case LSHIFT_EXPR:
676 case RSHIFT_EXPR:
677 case LROTATE_EXPR:
678 case RROTATE_EXPR:
679 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
680 return NULL;
681 break;
682 CASE_CONVERT:
683 {
684 int i, type_size, old_type_size;
685 tree type;
686
687 type = gimple_expr_type (stmt);
688 type_size = TYPE_PRECISION (type);
689 if (type_size % BITS_PER_UNIT != 0)
690 return NULL;
691 type_size /= BITS_PER_UNIT;
692 if (type_size > 64 / BITS_PER_MARKER)
693 return NULL;
694
695 /* Sign extension: result is dependent on the value. */
696 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
697 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
698 && HEAD_MARKER (n->n, old_type_size))
699 for (i = 0; i < type_size - old_type_size; i++)
700 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
701 << ((type_size - 1 - i) * BITS_PER_MARKER);
702
703 if (type_size < 64 / BITS_PER_MARKER)
704 {
705 /* If STMT casts to a smaller type mask out the bits not
706 belonging to the target type. */
707 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
708 }
709 n->type = type;
710 if (!n->base_addr)
711 n->range = type_size;
712 }
713 break;
714 default:
715 return NULL;
716 };
717 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
718 }
719
720 /* Handle binary rhs. */
721
722 if (rhs_class == GIMPLE_BINARY_RHS)
723 {
724 struct symbolic_number n1, n2;
725 gimple *source_stmt, *source_stmt2;
726
727 if (code != BIT_IOR_EXPR)
728 return NULL;
729
730 if (TREE_CODE (rhs2) != SSA_NAME)
731 return NULL;
732
733 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
734
735 switch (code)
736 {
737 case BIT_IOR_EXPR:
738 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
739
740 if (!source_stmt1)
741 return NULL;
742
743 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
744
745 if (!source_stmt2)
746 return NULL;
747
748 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
749 return NULL;
750
509ab8cd 751 if (n1.vuse != n2.vuse)
4aba7bd3 752 return NULL;
753
754 source_stmt
755 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
756
757 if (!source_stmt)
758 return NULL;
759
760 if (!verify_symbolic_number_p (n, stmt))
761 return NULL;
762
763 break;
764 default:
765 return NULL;
766 }
767 return source_stmt;
768 }
769 return NULL;
770}
771
509ab8cd 772/* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
773 *CMPXCHG, *CMPNOP and adjust *N. */
4aba7bd3 774
509ab8cd 775void
776find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg,
777 uint64_t *cmpnop)
4aba7bd3 778{
779 unsigned rsize;
780 uint64_t tmpn, mask;
4aba7bd3 781
509ab8cd 782 /* The number which the find_bswap_or_nop_1 result should match in order
783 to have a full byte swap. The number is shifted to the right
784 according to the size of the symbolic number before using it. */
785 *cmpxchg = CMPXCHG;
786 *cmpnop = CMPNOP;
4aba7bd3 787
788 /* Find real size of result (highest non-zero byte). */
789 if (n->base_addr)
790 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
791 else
792 rsize = n->range;
793
794 /* Zero out the bits corresponding to untouched bytes in original gimple
795 expression. */
796 if (n->range < (int) sizeof (int64_t))
797 {
798 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
509ab8cd 799 *cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
800 *cmpnop &= mask;
4aba7bd3 801 }
802
803 /* Zero out the bits corresponding to unused bytes in the result of the
804 gimple expression. */
805 if (rsize < n->range)
806 {
807 if (BYTES_BIG_ENDIAN)
808 {
809 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
509ab8cd 810 *cmpxchg &= mask;
811 *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
4aba7bd3 812 }
813 else
814 {
815 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
509ab8cd 816 *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
817 *cmpnop &= mask;
4aba7bd3 818 }
819 n->range = rsize;
820 }
821
509ab8cd 822 n->range *= BITS_PER_UNIT;
823}
824
825/* Check if STMT completes a bswap implementation or a read in a given
826 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
827 accordingly. It also sets N to represent the kind of operations
828 performed: size of the resulting expression and whether it works on
829 a memory source, and if so alias-set and vuse. At last, the
830 function returns a stmt whose rhs's first tree is the source
831 expression. */
832
833gimple *
834find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
835{
836 /* The last parameter determines the depth search limit. It usually
837 correlates directly to the number n of bytes to be touched. We
838 increase that number by log2(n) + 1 here in order to also
839 cover signed -> unsigned conversions of the src operand as can be seen
840 in libgcc, and for initial shift/and operation of the src operand. */
841 int limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
842 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
843 gimple *ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
844
845 if (!ins_stmt)
846 return NULL;
847
848 uint64_t cmpxchg, cmpnop;
849 find_bswap_or_nop_finalize (n, &cmpxchg, &cmpnop);
850
4aba7bd3 851 /* A complete byte swap should make the symbolic number to start with
852 the largest digit in the highest order byte. Unchanged symbolic
853 number indicates a read with same endianness as target architecture. */
854 if (n->n == cmpnop)
855 *bswap = false;
856 else if (n->n == cmpxchg)
857 *bswap = true;
858 else
859 return NULL;
860
861 /* Useless bit manipulation performed by code. */
862 if (!n->base_addr && n->n == cmpnop && n->n_ops == 1)
863 return NULL;
864
4aba7bd3 865 return ins_stmt;
866}
867
868const pass_data pass_data_optimize_bswap =
869{
870 GIMPLE_PASS, /* type */
871 "bswap", /* name */
872 OPTGROUP_NONE, /* optinfo_flags */
873 TV_NONE, /* tv_id */
874 PROP_ssa, /* properties_required */
875 0, /* properties_provided */
876 0, /* properties_destroyed */
877 0, /* todo_flags_start */
878 0, /* todo_flags_finish */
879};
880
881class pass_optimize_bswap : public gimple_opt_pass
882{
883public:
884 pass_optimize_bswap (gcc::context *ctxt)
885 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
886 {}
887
888 /* opt_pass methods: */
889 virtual bool gate (function *)
890 {
891 return flag_expensive_optimizations && optimize && BITS_PER_UNIT == 8;
892 }
893
894 virtual unsigned int execute (function *);
895
896}; // class pass_optimize_bswap
897
898/* Perform the bswap optimization: replace the expression computed in the rhs
509ab8cd 899 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
900 bswap, load or load + bswap expression.
4aba7bd3 901 Which of these alternatives replace the rhs is given by N->base_addr (non
902 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
903 load to perform are also given in N while the builtin bswap invoke is given
509ab8cd 904 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
905 load statements involved to construct the rhs in gsi_stmt (GSI) and
906 N->range gives the size of the rhs expression for maintaining some
907 statistics.
4aba7bd3 908
509ab8cd 909 Note that if the replacement involve a load and if gsi_stmt (GSI) is
910 non-NULL, that stmt is moved just after INS_STMT to do the load with the
911 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
4aba7bd3 912
509ab8cd 913tree
914bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl,
4aba7bd3 915 tree bswap_type, tree load_type, struct symbolic_number *n,
916 bool bswap)
917{
509ab8cd 918 tree src, tmp, tgt = NULL_TREE;
4aba7bd3 919 gimple *bswap_stmt;
920
509ab8cd 921 gimple *cur_stmt = gsi_stmt (gsi);
4aba7bd3 922 src = n->src;
509ab8cd 923 if (cur_stmt)
924 tgt = gimple_assign_lhs (cur_stmt);
4aba7bd3 925
926 /* Need to load the value from memory first. */
927 if (n->base_addr)
928 {
509ab8cd 929 gimple_stmt_iterator gsi_ins = gsi;
930 if (ins_stmt)
931 gsi_ins = gsi_for_stmt (ins_stmt);
4aba7bd3 932 tree addr_expr, addr_tmp, val_expr, val_tmp;
933 tree load_offset_ptr, aligned_load_type;
509ab8cd 934 gimple *load_stmt;
935 unsigned align = get_object_alignment (src);
08454aa5 936 poly_int64 load_offset = 0;
4aba7bd3 937
509ab8cd 938 if (cur_stmt)
939 {
940 basic_block ins_bb = gimple_bb (ins_stmt);
941 basic_block cur_bb = gimple_bb (cur_stmt);
942 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
943 return NULL_TREE;
944
945 /* Move cur_stmt just before one of the load of the original
946 to ensure it has the same VUSE. See PR61517 for what could
947 go wrong. */
948 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
949 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
950 gsi_move_before (&gsi, &gsi_ins);
951 gsi = gsi_for_stmt (cur_stmt);
952 }
953 else
954 gsi = gsi_ins;
4aba7bd3 955
956 /* Compute address to load from and cast according to the size
957 of the load. */
509ab8cd 958 addr_expr = build_fold_addr_expr (src);
4aba7bd3 959 if (is_gimple_mem_ref_addr (addr_expr))
509ab8cd 960 addr_tmp = unshare_expr (addr_expr);
4aba7bd3 961 else
962 {
509ab8cd 963 addr_tmp = unshare_expr (n->base_addr);
964 if (!is_gimple_mem_ref_addr (addr_tmp))
965 addr_tmp = force_gimple_operand_gsi_1 (&gsi, addr_tmp,
966 is_gimple_mem_ref_addr,
967 NULL_TREE, true,
968 GSI_SAME_STMT);
969 load_offset = n->bytepos;
970 if (n->offset)
971 {
972 tree off
973 = force_gimple_operand_gsi (&gsi, unshare_expr (n->offset),
974 true, NULL_TREE, true,
975 GSI_SAME_STMT);
976 gimple *stmt
977 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp)),
978 POINTER_PLUS_EXPR, addr_tmp, off);
979 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
980 addr_tmp = gimple_assign_lhs (stmt);
981 }
4aba7bd3 982 }
983
984 /* Perform the load. */
985 aligned_load_type = load_type;
986 if (align < TYPE_ALIGN (load_type))
987 aligned_load_type = build_aligned_type (load_type, align);
988 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
989 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
990 load_offset_ptr);
991
992 if (!bswap)
993 {
994 if (n->range == 16)
995 nop_stats.found_16bit++;
996 else if (n->range == 32)
997 nop_stats.found_32bit++;
998 else
999 {
1000 gcc_assert (n->range == 64);
1001 nop_stats.found_64bit++;
1002 }
1003
1004 /* Convert the result of load if necessary. */
509ab8cd 1005 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), load_type))
4aba7bd3 1006 {
1007 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
1008 "load_dst");
1009 load_stmt = gimple_build_assign (val_tmp, val_expr);
1010 gimple_set_vuse (load_stmt, n->vuse);
1011 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1012 gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
509ab8cd 1013 update_stmt (cur_stmt);
4aba7bd3 1014 }
509ab8cd 1015 else if (cur_stmt)
4aba7bd3 1016 {
1017 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
1018 gimple_set_vuse (cur_stmt, n->vuse);
509ab8cd 1019 update_stmt (cur_stmt);
1020 }
1021 else
1022 {
1023 tgt = make_ssa_name (load_type);
1024 cur_stmt = gimple_build_assign (tgt, MEM_REF, val_expr);
1025 gimple_set_vuse (cur_stmt, n->vuse);
1026 gsi_insert_before (&gsi, cur_stmt, GSI_SAME_STMT);
4aba7bd3 1027 }
4aba7bd3 1028
1029 if (dump_file)
1030 {
1031 fprintf (dump_file,
1032 "%d bit load in target endianness found at: ",
1033 (int) n->range);
1034 print_gimple_stmt (dump_file, cur_stmt, 0);
1035 }
509ab8cd 1036 return tgt;
4aba7bd3 1037 }
1038 else
1039 {
1040 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
1041 load_stmt = gimple_build_assign (val_tmp, val_expr);
1042 gimple_set_vuse (load_stmt, n->vuse);
1043 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1044 }
1045 src = val_tmp;
1046 }
1047 else if (!bswap)
1048 {
509ab8cd 1049 gimple *g = NULL;
1050 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src)))
4aba7bd3 1051 {
1052 if (!is_gimple_val (src))
509ab8cd 1053 return NULL_TREE;
4aba7bd3 1054 g = gimple_build_assign (tgt, NOP_EXPR, src);
1055 }
509ab8cd 1056 else if (cur_stmt)
4aba7bd3 1057 g = gimple_build_assign (tgt, src);
509ab8cd 1058 else
1059 tgt = src;
4aba7bd3 1060 if (n->range == 16)
1061 nop_stats.found_16bit++;
1062 else if (n->range == 32)
1063 nop_stats.found_32bit++;
1064 else
1065 {
1066 gcc_assert (n->range == 64);
1067 nop_stats.found_64bit++;
1068 }
1069 if (dump_file)
1070 {
1071 fprintf (dump_file,
1072 "%d bit reshuffle in target endianness found at: ",
1073 (int) n->range);
509ab8cd 1074 if (cur_stmt)
1075 print_gimple_stmt (dump_file, cur_stmt, 0);
1076 else
1077 {
1078 print_generic_expr (dump_file, tgt, 0);
1079 fprintf (dump_file, "\n");
1080 }
4aba7bd3 1081 }
509ab8cd 1082 if (cur_stmt)
1083 gsi_replace (&gsi, g, true);
1084 return tgt;
4aba7bd3 1085 }
1086 else if (TREE_CODE (src) == BIT_FIELD_REF)
1087 src = TREE_OPERAND (src, 0);
1088
1089 if (n->range == 16)
1090 bswap_stats.found_16bit++;
1091 else if (n->range == 32)
1092 bswap_stats.found_32bit++;
1093 else
1094 {
1095 gcc_assert (n->range == 64);
1096 bswap_stats.found_64bit++;
1097 }
1098
1099 tmp = src;
1100
1101 /* Convert the src expression if necessary. */
1102 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
1103 {
1104 gimple *convert_stmt;
1105
1106 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
1107 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
1108 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
1109 }
1110
1111 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1112 are considered as rotation of 2N bit values by N bits is generally not
1113 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1114 gives 0x03040102 while a bswap for that value is 0x04030201. */
1115 if (bswap && n->range == 16)
1116 {
1117 tree count = build_int_cst (NULL, BITS_PER_UNIT);
1118 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
1119 bswap_stmt = gimple_build_assign (NULL, src);
1120 }
1121 else
1122 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
1123
509ab8cd 1124 if (tgt == NULL_TREE)
1125 tgt = make_ssa_name (bswap_type);
4aba7bd3 1126 tmp = tgt;
1127
1128 /* Convert the result if necessary. */
1129 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
1130 {
1131 gimple *convert_stmt;
1132
1133 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
1134 convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
1135 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
1136 }
1137
1138 gimple_set_lhs (bswap_stmt, tmp);
1139
1140 if (dump_file)
1141 {
1142 fprintf (dump_file, "%d bit bswap implementation found at: ",
1143 (int) n->range);
509ab8cd 1144 if (cur_stmt)
1145 print_gimple_stmt (dump_file, cur_stmt, 0);
1146 else
1147 {
1148 print_generic_expr (dump_file, tgt, 0);
1149 fprintf (dump_file, "\n");
1150 }
4aba7bd3 1151 }
1152
509ab8cd 1153 if (cur_stmt)
1154 {
1155 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
1156 gsi_remove (&gsi, true);
1157 }
1158 else
1159 gsi_insert_before (&gsi, bswap_stmt, GSI_SAME_STMT);
1160 return tgt;
4aba7bd3 1161}
1162
1163/* Find manual byte swap implementations as well as load in a given
1164 endianness. Byte swaps are turned into a bswap builtin invokation
1165 while endian loads are converted to bswap builtin invokation or
1166 simple load according to the target endianness. */
1167
1168unsigned int
1169pass_optimize_bswap::execute (function *fun)
1170{
1171 basic_block bb;
1172 bool bswap32_p, bswap64_p;
1173 bool changed = false;
1174 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1175
1176 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1177 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1178 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1179 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1180 || (bswap32_p && word_mode == SImode)));
1181
1182 /* Determine the argument type of the builtins. The code later on
1183 assumes that the return and argument type are the same. */
1184 if (bswap32_p)
1185 {
1186 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1187 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1188 }
1189
1190 if (bswap64_p)
1191 {
1192 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1193 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1194 }
1195
1196 memset (&nop_stats, 0, sizeof (nop_stats));
1197 memset (&bswap_stats, 0, sizeof (bswap_stats));
1198 calculate_dominance_info (CDI_DOMINATORS);
1199
1200 FOR_EACH_BB_FN (bb, fun)
1201 {
1202 gimple_stmt_iterator gsi;
1203
1204 /* We do a reverse scan for bswap patterns to make sure we get the
1205 widest match. As bswap pattern matching doesn't handle previously
1206 inserted smaller bswap replacements as sub-patterns, the wider
1207 variant wouldn't be detected. */
1208 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
1209 {
1210 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
1211 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1212 enum tree_code code;
1213 struct symbolic_number n;
1214 bool bswap;
1215
1216 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1217 might be moved to a different basic block by bswap_replace and gsi
1218 must not points to it if that's the case. Moving the gsi_prev
1219 there make sure that gsi points to the statement previous to
1220 cur_stmt while still making sure that all statements are
1221 considered in this basic block. */
1222 gsi_prev (&gsi);
1223
1224 if (!is_gimple_assign (cur_stmt))
1225 continue;
1226
1227 code = gimple_assign_rhs_code (cur_stmt);
1228 switch (code)
1229 {
1230 case LROTATE_EXPR:
1231 case RROTATE_EXPR:
1232 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
1233 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
1234 % BITS_PER_UNIT)
1235 continue;
1236 /* Fall through. */
1237 case BIT_IOR_EXPR:
1238 break;
1239 default:
1240 continue;
1241 }
1242
1243 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
1244
1245 if (!ins_stmt)
1246 continue;
1247
1248 switch (n.range)
1249 {
1250 case 16:
1251 /* Already in canonical form, nothing to do. */
1252 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
1253 continue;
1254 load_type = bswap_type = uint16_type_node;
1255 break;
1256 case 32:
1257 load_type = uint32_type_node;
1258 if (bswap32_p)
1259 {
1260 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1261 bswap_type = bswap32_type;
1262 }
1263 break;
1264 case 64:
1265 load_type = uint64_type_node;
1266 if (bswap64_p)
1267 {
1268 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1269 bswap_type = bswap64_type;
1270 }
1271 break;
1272 default:
1273 continue;
1274 }
1275
1276 if (bswap && !fndecl && n.range != 16)
1277 continue;
1278
509ab8cd 1279 if (bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1280 bswap_type, load_type, &n, bswap))
4aba7bd3 1281 changed = true;
1282 }
1283 }
1284
1285 statistics_counter_event (fun, "16-bit nop implementations found",
1286 nop_stats.found_16bit);
1287 statistics_counter_event (fun, "32-bit nop implementations found",
1288 nop_stats.found_32bit);
1289 statistics_counter_event (fun, "64-bit nop implementations found",
1290 nop_stats.found_64bit);
1291 statistics_counter_event (fun, "16-bit bswap implementations found",
1292 bswap_stats.found_16bit);
1293 statistics_counter_event (fun, "32-bit bswap implementations found",
1294 bswap_stats.found_32bit);
1295 statistics_counter_event (fun, "64-bit bswap implementations found",
1296 bswap_stats.found_64bit);
1297
1298 return (changed ? TODO_update_ssa : 0);
1299}
1300
1301} // anon namespace
1302
1303gimple_opt_pass *
1304make_pass_optimize_bswap (gcc::context *ctxt)
1305{
1306 return new pass_optimize_bswap (ctxt);
1307}
1308
1309namespace {
1310
9991d1d3 1311/* Struct recording one operand for the store, which is either a constant,
1312 then VAL represents the constant and all the other fields are zero,
1313 or a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1314 and the other fields also reflect the memory load. */
1315
1316struct store_operand_info
1317{
1318 tree val;
1319 tree base_addr;
e61263f2 1320 poly_uint64 bitsize;
1321 poly_uint64 bitpos;
1322 poly_uint64 bitregion_start;
1323 poly_uint64 bitregion_end;
9991d1d3 1324 gimple *stmt;
c35548ce 1325 bool bit_not_p;
9991d1d3 1326 store_operand_info ();
1327};
1328
1329store_operand_info::store_operand_info ()
1330 : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0),
c35548ce 1331 bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false)
9991d1d3 1332{
1333}
1334
3d3e04ac 1335/* Struct recording the information about a single store of an immediate
1336 to memory. These are created in the first phase and coalesced into
1337 merged_store_group objects in the second phase. */
1338
1339struct store_immediate_info
1340{
1341 unsigned HOST_WIDE_INT bitsize;
1342 unsigned HOST_WIDE_INT bitpos;
902cb3b7 1343 unsigned HOST_WIDE_INT bitregion_start;
1344 /* This is one past the last bit of the bit region. */
1345 unsigned HOST_WIDE_INT bitregion_end;
3d3e04ac 1346 gimple *stmt;
1347 unsigned int order;
9991d1d3 1348 /* INTEGER_CST for constant stores, MEM_REF for memory copy or
509ab8cd 1349 BIT_*_EXPR for logical bitwise operation.
1350 LROTATE_EXPR if it can be only bswap optimized and
1351 ops are not really meaningful.
1352 NOP_EXPR if bswap optimization detected identity, ops
1353 are not meaningful. */
9991d1d3 1354 enum tree_code rhs_code;
509ab8cd 1355 /* Two fields for bswap optimization purposes. */
1356 struct symbolic_number n;
1357 gimple *ins_stmt;
aa0a1d29 1358 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
832a73b9 1359 bool bit_not_p;
aa0a1d29 1360 /* True if ops have been swapped and thus ops[1] represents
1361 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1362 bool ops_swapped_p;
9991d1d3 1363 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1364 just the first one. */
1365 store_operand_info ops[2];
f85e7cb7 1366 store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
902cb3b7 1367 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
509ab8cd 1368 gimple *, unsigned int, enum tree_code,
1369 struct symbolic_number &, gimple *, bool,
9991d1d3 1370 const store_operand_info &,
1371 const store_operand_info &);
3d3e04ac 1372};
1373
1374store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs,
f85e7cb7 1375 unsigned HOST_WIDE_INT bp,
902cb3b7 1376 unsigned HOST_WIDE_INT brs,
1377 unsigned HOST_WIDE_INT bre,
f85e7cb7 1378 gimple *st,
9991d1d3 1379 unsigned int ord,
1380 enum tree_code rhscode,
509ab8cd 1381 struct symbolic_number &nr,
1382 gimple *ins_stmtp,
832a73b9 1383 bool bitnotp,
9991d1d3 1384 const store_operand_info &op0r,
1385 const store_operand_info &op1r)
902cb3b7 1386 : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre),
509ab8cd 1387 stmt (st), order (ord), rhs_code (rhscode), n (nr),
1388 ins_stmt (ins_stmtp), bit_not_p (bitnotp), ops_swapped_p (false)
9991d1d3 1389#if __cplusplus >= 201103L
1390 , ops { op0r, op1r }
1391{
1392}
1393#else
3d3e04ac 1394{
9991d1d3 1395 ops[0] = op0r;
1396 ops[1] = op1r;
3d3e04ac 1397}
9991d1d3 1398#endif
3d3e04ac 1399
1400/* Struct representing a group of stores to contiguous memory locations.
1401 These are produced by the second phase (coalescing) and consumed in the
1402 third phase that outputs the widened stores. */
1403
1404struct merged_store_group
1405{
1406 unsigned HOST_WIDE_INT start;
1407 unsigned HOST_WIDE_INT width;
902cb3b7 1408 unsigned HOST_WIDE_INT bitregion_start;
1409 unsigned HOST_WIDE_INT bitregion_end;
1410 /* The size of the allocated memory for val and mask. */
3d3e04ac 1411 unsigned HOST_WIDE_INT buf_size;
902cb3b7 1412 unsigned HOST_WIDE_INT align_base;
e61263f2 1413 poly_uint64 load_align_base[2];
3d3e04ac 1414
1415 unsigned int align;
9991d1d3 1416 unsigned int load_align[2];
3d3e04ac 1417 unsigned int first_order;
1418 unsigned int last_order;
1419
902cb3b7 1420 auto_vec<store_immediate_info *> stores;
3d3e04ac 1421 /* We record the first and last original statements in the sequence because
1422 we'll need their vuse/vdef and replacement position. It's easier to keep
1423 track of them separately as 'stores' is reordered by apply_stores. */
1424 gimple *last_stmt;
1425 gimple *first_stmt;
1426 unsigned char *val;
902cb3b7 1427 unsigned char *mask;
3d3e04ac 1428
1429 merged_store_group (store_immediate_info *);
1430 ~merged_store_group ();
1431 void merge_into (store_immediate_info *);
1432 void merge_overlapping (store_immediate_info *);
1433 bool apply_stores ();
902cb3b7 1434private:
1435 void do_merge (store_immediate_info *);
3d3e04ac 1436};
1437
1438/* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1439
1440static void
1441dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len)
1442{
1443 if (!fd)
1444 return;
1445
1446 for (unsigned int i = 0; i < len; i++)
1447 fprintf (fd, "%x ", ptr[i]);
1448 fprintf (fd, "\n");
1449}
1450
3d3e04ac 1451/* Shift left the bytes in PTR of SZ elements by AMNT bits, carrying over the
1452 bits between adjacent elements. AMNT should be within
1453 [0, BITS_PER_UNIT).
1454 Example, AMNT = 2:
1455 00011111|11100000 << 2 = 01111111|10000000
1456 PTR[1] | PTR[0] PTR[1] | PTR[0]. */
1457
1458static void
1459shift_bytes_in_array (unsigned char *ptr, unsigned int sz, unsigned int amnt)
1460{
1461 if (amnt == 0)
1462 return;
1463
1464 unsigned char carry_over = 0U;
b1c71535 1465 unsigned char carry_mask = (~0U) << (unsigned char) (BITS_PER_UNIT - amnt);
3d3e04ac 1466 unsigned char clear_mask = (~0U) << amnt;
1467
1468 for (unsigned int i = 0; i < sz; i++)
1469 {
1470 unsigned prev_carry_over = carry_over;
b1c71535 1471 carry_over = (ptr[i] & carry_mask) >> (BITS_PER_UNIT - amnt);
3d3e04ac 1472
1473 ptr[i] <<= amnt;
1474 if (i != 0)
1475 {
1476 ptr[i] &= clear_mask;
1477 ptr[i] |= prev_carry_over;
1478 }
1479 }
1480}
1481
1482/* Like shift_bytes_in_array but for big-endian.
1483 Shift right the bytes in PTR of SZ elements by AMNT bits, carrying over the
1484 bits between adjacent elements. AMNT should be within
1485 [0, BITS_PER_UNIT).
1486 Example, AMNT = 2:
1487 00011111|11100000 >> 2 = 00000111|11111000
1488 PTR[0] | PTR[1] PTR[0] | PTR[1]. */
1489
1490static void
1491shift_bytes_in_array_right (unsigned char *ptr, unsigned int sz,
1492 unsigned int amnt)
1493{
1494 if (amnt == 0)
1495 return;
1496
1497 unsigned char carry_over = 0U;
1498 unsigned char carry_mask = ~(~0U << amnt);
1499
1500 for (unsigned int i = 0; i < sz; i++)
1501 {
1502 unsigned prev_carry_over = carry_over;
b1c71535 1503 carry_over = ptr[i] & carry_mask;
3d3e04ac 1504
a425d9af 1505 carry_over <<= (unsigned char) BITS_PER_UNIT - amnt;
1506 ptr[i] >>= amnt;
1507 ptr[i] |= prev_carry_over;
3d3e04ac 1508 }
1509}
1510
1511/* Clear out LEN bits starting from bit START in the byte array
1512 PTR. This clears the bits to the *right* from START.
1513 START must be within [0, BITS_PER_UNIT) and counts starting from
1514 the least significant bit. */
1515
1516static void
1517clear_bit_region_be (unsigned char *ptr, unsigned int start,
1518 unsigned int len)
1519{
1520 if (len == 0)
1521 return;
1522 /* Clear len bits to the right of start. */
1523 else if (len <= start + 1)
1524 {
1525 unsigned char mask = (~(~0U << len));
1526 mask = mask << (start + 1U - len);
1527 ptr[0] &= ~mask;
1528 }
1529 else if (start != BITS_PER_UNIT - 1)
1530 {
1531 clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1);
1532 clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1,
1533 len - (start % BITS_PER_UNIT) - 1);
1534 }
1535 else if (start == BITS_PER_UNIT - 1
1536 && len > BITS_PER_UNIT)
1537 {
1538 unsigned int nbytes = len / BITS_PER_UNIT;
902cb3b7 1539 memset (ptr, 0, nbytes);
3d3e04ac 1540 if (len % BITS_PER_UNIT != 0)
1541 clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1,
1542 len % BITS_PER_UNIT);
1543 }
1544 else
1545 gcc_unreachable ();
1546}
1547
1548/* In the byte array PTR clear the bit region starting at bit
1549 START and is LEN bits wide.
1550 For regions spanning multiple bytes do this recursively until we reach
1551 zero LEN or a region contained within a single byte. */
1552
1553static void
1554clear_bit_region (unsigned char *ptr, unsigned int start,
1555 unsigned int len)
1556{
1557 /* Degenerate base case. */
1558 if (len == 0)
1559 return;
1560 else if (start >= BITS_PER_UNIT)
1561 clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len);
1562 /* Second base case. */
1563 else if ((start + len) <= BITS_PER_UNIT)
1564 {
b1c71535 1565 unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len);
3d3e04ac 1566 mask >>= BITS_PER_UNIT - (start + len);
1567
1568 ptr[0] &= ~mask;
1569
1570 return;
1571 }
1572 /* Clear most significant bits in a byte and proceed with the next byte. */
1573 else if (start != 0)
1574 {
1575 clear_bit_region (ptr, start, BITS_PER_UNIT - start);
3d6071e9 1576 clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start));
3d3e04ac 1577 }
1578 /* Whole bytes need to be cleared. */
1579 else if (start == 0 && len > BITS_PER_UNIT)
1580 {
1581 unsigned int nbytes = len / BITS_PER_UNIT;
7839cdcc 1582 /* We could recurse on each byte but we clear whole bytes, so a simple
1583 memset will do. */
b1c71535 1584 memset (ptr, '\0', nbytes);
3d3e04ac 1585 /* Clear the remaining sub-byte region if there is one. */
1586 if (len % BITS_PER_UNIT != 0)
1587 clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT);
1588 }
1589 else
1590 gcc_unreachable ();
1591}
1592
1593/* Write BITLEN bits of EXPR to the byte array PTR at
1594 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1595 Return true if the operation succeeded. */
1596
1597static bool
1598encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos,
b1c71535 1599 unsigned int total_bytes)
3d3e04ac 1600{
1601 unsigned int first_byte = bitpos / BITS_PER_UNIT;
1602 tree tmp_int = expr;
a425d9af 1603 bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT)
1604 || (bitpos % BITS_PER_UNIT)
517be012 1605 || !int_mode_for_size (bitlen, 0).exists ());
3d3e04ac 1606
1607 if (!sub_byte_op_p)
63eabc9b 1608 return native_encode_expr (tmp_int, ptr + first_byte, total_bytes) != 0;
3d3e04ac 1609
1610 /* LITTLE-ENDIAN
1611 We are writing a non byte-sized quantity or at a position that is not
1612 at a byte boundary.
1613 |--------|--------|--------| ptr + first_byte
1614 ^ ^
1615 xxx xxxxxxxx xxx< bp>
1616 |______EXPR____|
1617
b1c71535 1618 First native_encode_expr EXPR into a temporary buffer and shift each
3d3e04ac 1619 byte in the buffer by 'bp' (carrying the bits over as necessary).
1620 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1621 <------bitlen---->< bp>
1622 Then we clear the destination bits:
1623 |---00000|00000000|000-----| ptr + first_byte
1624 <-------bitlen--->< bp>
1625
1626 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1627 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1628
1629 BIG-ENDIAN
1630 We are writing a non byte-sized quantity or at a position that is not
1631 at a byte boundary.
1632 ptr + first_byte |--------|--------|--------|
1633 ^ ^
1634 <bp >xxx xxxxxxxx xxx
1635 |_____EXPR_____|
1636
b1c71535 1637 First native_encode_expr EXPR into a temporary buffer and shift each
3d3e04ac 1638 byte in the buffer to the right by (carrying the bits over as necessary).
1639 We shift by as much as needed to align the most significant bit of EXPR
1640 with bitpos:
1641 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1642 <---bitlen----> <bp ><-----bitlen----->
1643 Then we clear the destination bits:
1644 ptr + first_byte |-----000||00000000||00000---|
1645 <bp ><-------bitlen----->
1646
1647 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1648 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1649 The awkwardness comes from the fact that bitpos is counted from the
1650 most significant bit of a byte. */
1651
d2401312 1652 /* We must be dealing with fixed-size data at this point, since the
1653 total size is also fixed. */
1654 fixed_size_mode mode = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr)));
3d3e04ac 1655 /* Allocate an extra byte so that we have space to shift into. */
d2401312 1656 unsigned int byte_size = GET_MODE_SIZE (mode) + 1;
3d3e04ac 1657 unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size);
b1c71535 1658 memset (tmpbuf, '\0', byte_size);
3d3e04ac 1659 /* The store detection code should only have allowed constants that are
1660 accepted by native_encode_expr. */
63eabc9b 1661 if (native_encode_expr (expr, tmpbuf, byte_size - 1) == 0)
3d3e04ac 1662 gcc_unreachable ();
1663
1664 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1665 bytes to write. This means it can write more than
1666 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1667 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1668 bitlen and zero out the bits that are not relevant as well (that may
1669 contain a sign bit due to sign-extension). */
1670 unsigned int padding
1671 = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1;
a425d9af 1672 /* On big-endian the padding is at the 'front' so just skip the initial
1673 bytes. */
1674 if (BYTES_BIG_ENDIAN)
1675 tmpbuf += padding;
1676
1677 byte_size -= padding;
1678
1679 if (bitlen % BITS_PER_UNIT != 0)
3d3e04ac 1680 {
5e922e43 1681 if (BYTES_BIG_ENDIAN)
a425d9af 1682 clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1,
1683 BITS_PER_UNIT - (bitlen % BITS_PER_UNIT));
1684 else
1685 clear_bit_region (tmpbuf, bitlen,
1686 byte_size * BITS_PER_UNIT - bitlen);
3d3e04ac 1687 }
a425d9af 1688 /* Left shifting relies on the last byte being clear if bitlen is
1689 a multiple of BITS_PER_UNIT, which might not be clear if
1690 there are padding bytes. */
1691 else if (!BYTES_BIG_ENDIAN)
1692 tmpbuf[byte_size - 1] = '\0';
3d3e04ac 1693
1694 /* Clear the bit region in PTR where the bits from TMPBUF will be
b1c71535 1695 inserted into. */
3d3e04ac 1696 if (BYTES_BIG_ENDIAN)
1697 clear_bit_region_be (ptr + first_byte,
1698 BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen);
1699 else
1700 clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen);
1701
1702 int shift_amnt;
1703 int bitlen_mod = bitlen % BITS_PER_UNIT;
1704 int bitpos_mod = bitpos % BITS_PER_UNIT;
1705
1706 bool skip_byte = false;
1707 if (BYTES_BIG_ENDIAN)
1708 {
1709 /* BITPOS and BITLEN are exactly aligned and no shifting
1710 is necessary. */
1711 if (bitpos_mod + bitlen_mod == BITS_PER_UNIT
1712 || (bitpos_mod == 0 && bitlen_mod == 0))
1713 shift_amnt = 0;
1714 /* |. . . . . . . .|
1715 <bp > <blen >.
1716 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1717 of the value until it aligns with 'bp' in the next byte over. */
1718 else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT)
1719 {
1720 shift_amnt = bitlen_mod + bitpos_mod;
1721 skip_byte = bitlen_mod != 0;
1722 }
1723 /* |. . . . . . . .|
1724 <----bp--->
1725 <---blen---->.
1726 Shift the value right within the same byte so it aligns with 'bp'. */
1727 else
1728 shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT;
1729 }
1730 else
1731 shift_amnt = bitpos % BITS_PER_UNIT;
1732
1733 /* Create the shifted version of EXPR. */
1734 if (!BYTES_BIG_ENDIAN)
b1c71535 1735 {
1736 shift_bytes_in_array (tmpbuf, byte_size, shift_amnt);
1737 if (shift_amnt == 0)
1738 byte_size--;
1739 }
3d3e04ac 1740 else
1741 {
1742 gcc_assert (BYTES_BIG_ENDIAN);
1743 shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt);
1744 /* If shifting right forced us to move into the next byte skip the now
1745 empty byte. */
1746 if (skip_byte)
1747 {
1748 tmpbuf++;
1749 byte_size--;
1750 }
1751 }
1752
1753 /* Insert the bits from TMPBUF. */
1754 for (unsigned int i = 0; i < byte_size; i++)
1755 ptr[first_byte + i] |= tmpbuf[i];
1756
1757 return true;
1758}
1759
1760/* Sorting function for store_immediate_info objects.
1761 Sorts them by bitposition. */
1762
1763static int
1764sort_by_bitpos (const void *x, const void *y)
1765{
1766 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1767 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1768
61d052e5 1769 if ((*tmp)->bitpos < (*tmp2)->bitpos)
3d3e04ac 1770 return -1;
1771 else if ((*tmp)->bitpos > (*tmp2)->bitpos)
1772 return 1;
61d052e5 1773 else
ca4982c2 1774 /* If they are the same let's use the order which is guaranteed to
1775 be different. */
1776 return (*tmp)->order - (*tmp2)->order;
3d3e04ac 1777}
1778
1779/* Sorting function for store_immediate_info objects.
1780 Sorts them by the order field. */
1781
1782static int
1783sort_by_order (const void *x, const void *y)
1784{
1785 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1786 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1787
1788 if ((*tmp)->order < (*tmp2)->order)
1789 return -1;
1790 else if ((*tmp)->order > (*tmp2)->order)
1791 return 1;
1792
1793 gcc_unreachable ();
1794}
1795
1796/* Initialize a merged_store_group object from a store_immediate_info
1797 object. */
1798
1799merged_store_group::merged_store_group (store_immediate_info *info)
1800{
1801 start = info->bitpos;
1802 width = info->bitsize;
902cb3b7 1803 bitregion_start = info->bitregion_start;
1804 bitregion_end = info->bitregion_end;
3d3e04ac 1805 /* VAL has memory allocated for it in apply_stores once the group
1806 width has been finalized. */
1807 val = NULL;
902cb3b7 1808 mask = NULL;
1809 unsigned HOST_WIDE_INT align_bitpos = 0;
1810 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
1811 &align, &align_bitpos);
1812 align_base = start - align_bitpos;
9991d1d3 1813 for (int i = 0; i < 2; ++i)
1814 {
1815 store_operand_info &op = info->ops[i];
1816 if (op.base_addr == NULL_TREE)
1817 {
1818 load_align[i] = 0;
1819 load_align_base[i] = 0;
1820 }
1821 else
1822 {
1823 get_object_alignment_1 (op.val, &load_align[i], &align_bitpos);
1824 load_align_base[i] = op.bitpos - align_bitpos;
1825 }
1826 }
3d3e04ac 1827 stores.create (1);
1828 stores.safe_push (info);
1829 last_stmt = info->stmt;
1830 last_order = info->order;
1831 first_stmt = last_stmt;
1832 first_order = last_order;
1833 buf_size = 0;
1834}
1835
1836merged_store_group::~merged_store_group ()
1837{
1838 if (val)
1839 XDELETEVEC (val);
1840}
1841
902cb3b7 1842/* Helper method for merge_into and merge_overlapping to do
1843 the common part. */
3d3e04ac 1844void
902cb3b7 1845merged_store_group::do_merge (store_immediate_info *info)
3d3e04ac 1846{
902cb3b7 1847 bitregion_start = MIN (bitregion_start, info->bitregion_start);
1848 bitregion_end = MAX (bitregion_end, info->bitregion_end);
1849
1850 unsigned int this_align;
1851 unsigned HOST_WIDE_INT align_bitpos = 0;
1852 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
1853 &this_align, &align_bitpos);
1854 if (this_align > align)
1855 {
1856 align = this_align;
1857 align_base = info->bitpos - align_bitpos;
1858 }
9991d1d3 1859 for (int i = 0; i < 2; ++i)
1860 {
1861 store_operand_info &op = info->ops[i];
1862 if (!op.base_addr)
1863 continue;
1864
1865 get_object_alignment_1 (op.val, &this_align, &align_bitpos);
1866 if (this_align > load_align[i])
1867 {
1868 load_align[i] = this_align;
1869 load_align_base[i] = op.bitpos - align_bitpos;
1870 }
1871 }
3d3e04ac 1872
3d3e04ac 1873 gimple *stmt = info->stmt;
1874 stores.safe_push (info);
1875 if (info->order > last_order)
1876 {
1877 last_order = info->order;
1878 last_stmt = stmt;
1879 }
1880 else if (info->order < first_order)
1881 {
1882 first_order = info->order;
1883 first_stmt = stmt;
1884 }
1885}
1886
902cb3b7 1887/* Merge a store recorded by INFO into this merged store.
1888 The store is not overlapping with the existing recorded
1889 stores. */
1890
1891void
1892merged_store_group::merge_into (store_immediate_info *info)
1893{
1894 unsigned HOST_WIDE_INT wid = info->bitsize;
1895 /* Make sure we're inserting in the position we think we're inserting. */
1896 gcc_assert (info->bitpos >= start + width
1897 && info->bitregion_start <= bitregion_end);
1898
1899 width += wid;
1900 do_merge (info);
1901}
1902
3d3e04ac 1903/* Merge a store described by INFO into this merged store.
1904 INFO overlaps in some way with the current store (i.e. it's not contiguous
1905 which is handled by merged_store_group::merge_into). */
1906
1907void
1908merged_store_group::merge_overlapping (store_immediate_info *info)
1909{
3d3e04ac 1910 /* If the store extends the size of the group, extend the width. */
902cb3b7 1911 if (info->bitpos + info->bitsize > start + width)
3d3e04ac 1912 width += info->bitpos + info->bitsize - (start + width);
1913
902cb3b7 1914 do_merge (info);
3d3e04ac 1915}
1916
1917/* Go through all the recorded stores in this group in program order and
1918 apply their values to the VAL byte array to create the final merged
1919 value. Return true if the operation succeeded. */
1920
1921bool
1922merged_store_group::apply_stores ()
1923{
902cb3b7 1924 /* Make sure we have more than one store in the group, otherwise we cannot
1925 merge anything. */
1926 if (bitregion_start % BITS_PER_UNIT != 0
1927 || bitregion_end % BITS_PER_UNIT != 0
3d3e04ac 1928 || stores.length () == 1)
1929 return false;
1930
1931 stores.qsort (sort_by_order);
902cb3b7 1932 store_immediate_info *info;
3d3e04ac 1933 unsigned int i;
1934 /* Create a buffer of a size that is 2 times the number of bytes we're
1935 storing. That way native_encode_expr can write power-of-2-sized
1936 chunks without overrunning. */
902cb3b7 1937 buf_size = 2 * ((bitregion_end - bitregion_start) / BITS_PER_UNIT);
1938 val = XNEWVEC (unsigned char, 2 * buf_size);
1939 mask = val + buf_size;
1940 memset (val, 0, buf_size);
1941 memset (mask, ~0U, buf_size);
3d3e04ac 1942
1943 FOR_EACH_VEC_ELT (stores, i, info)
1944 {
902cb3b7 1945 unsigned int pos_in_buffer = info->bitpos - bitregion_start;
9991d1d3 1946 tree cst = NULL_TREE;
1947 if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE)
1948 cst = info->ops[0].val;
1949 else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE)
1950 cst = info->ops[1].val;
1951 bool ret = true;
1952 if (cst)
1953 ret = encode_tree_to_bitpos (cst, val, info->bitsize,
1954 pos_in_buffer, buf_size);
1955 if (cst && dump_file && (dump_flags & TDF_DETAILS))
3d3e04ac 1956 {
1957 if (ret)
1958 {
1959 fprintf (dump_file, "After writing ");
9991d1d3 1960 print_generic_expr (dump_file, cst, 0);
3d3e04ac 1961 fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC
509ab8cd 1962 " at position %d the merged region contains:\n",
1963 info->bitsize, pos_in_buffer);
3d3e04ac 1964 dump_char_array (dump_file, val, buf_size);
1965 }
1966 else
1967 fprintf (dump_file, "Failed to merge stores\n");
509ab8cd 1968 }
3d3e04ac 1969 if (!ret)
1970 return false;
902cb3b7 1971 unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT);
1972 if (BYTES_BIG_ENDIAN)
a5961a9d 1973 clear_bit_region_be (m, (BITS_PER_UNIT - 1
1974 - (pos_in_buffer % BITS_PER_UNIT)),
1975 info->bitsize);
902cb3b7 1976 else
1977 clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize);
3d3e04ac 1978 }
509ab8cd 1979 stores.qsort (sort_by_bitpos);
3d3e04ac 1980 return true;
1981}
1982
1983/* Structure describing the store chain. */
1984
1985struct imm_store_chain_info
1986{
3a3ba7de 1987 /* Doubly-linked list that imposes an order on chain processing.
1988 PNXP (prev's next pointer) points to the head of a list, or to
1989 the next field in the previous chain in the list.
1990 See pass_store_merging::m_stores_head for more rationale. */
1991 imm_store_chain_info *next, **pnxp;
f85e7cb7 1992 tree base_addr;
902cb3b7 1993 auto_vec<store_immediate_info *> m_store_info;
3d3e04ac 1994 auto_vec<merged_store_group *> m_merged_store_groups;
1995
3a3ba7de 1996 imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a)
1997 : next (inspt), pnxp (&inspt), base_addr (b_a)
1998 {
1999 inspt = this;
2000 if (next)
2001 {
2002 gcc_checking_assert (pnxp == next->pnxp);
2003 next->pnxp = &next;
2004 }
2005 }
2006 ~imm_store_chain_info ()
2007 {
2008 *pnxp = next;
2009 if (next)
2010 {
2011 gcc_checking_assert (&next == next->pnxp);
2012 next->pnxp = pnxp;
2013 }
2014 }
f85e7cb7 2015 bool terminate_and_process_chain ();
509ab8cd 2016 bool try_coalesce_bswap (merged_store_group *, unsigned int, unsigned int);
3d3e04ac 2017 bool coalesce_immediate_stores ();
f85e7cb7 2018 bool output_merged_store (merged_store_group *);
2019 bool output_merged_stores ();
3d3e04ac 2020};
2021
2022const pass_data pass_data_tree_store_merging = {
2023 GIMPLE_PASS, /* type */
2024 "store-merging", /* name */
2025 OPTGROUP_NONE, /* optinfo_flags */
2026 TV_GIMPLE_STORE_MERGING, /* tv_id */
2027 PROP_ssa, /* properties_required */
2028 0, /* properties_provided */
2029 0, /* properties_destroyed */
2030 0, /* todo_flags_start */
2031 TODO_update_ssa, /* todo_flags_finish */
2032};
2033
2034class pass_store_merging : public gimple_opt_pass
2035{
2036public:
2037 pass_store_merging (gcc::context *ctxt)
2d27e5c1 2038 : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head ()
3d3e04ac 2039 {
2040 }
2041
902cb3b7 2042 /* Pass not supported for PDP-endianness, nor for insane hosts
2043 or target character sizes where native_{encode,interpret}_expr
2044 doesn't work properly. */
3d3e04ac 2045 virtual bool
2046 gate (function *)
2047 {
902cb3b7 2048 return flag_store_merging
2049 && WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN
2050 && CHAR_BIT == 8
2051 && BITS_PER_UNIT == 8;
3d3e04ac 2052 }
2053
2054 virtual unsigned int execute (function *);
2055
2056private:
2057 hash_map<tree_operand_hash, struct imm_store_chain_info *> m_stores;
2058
3a3ba7de 2059 /* Form a doubly-linked stack of the elements of m_stores, so that
2060 we can iterate over them in a predictable way. Using this order
2061 avoids extraneous differences in the compiler output just because
2062 of tree pointer variations (e.g. different chains end up in
2063 different positions of m_stores, so they are handled in different
2064 orders, so they allocate or release SSA names in different
2065 orders, and when they get reused, subsequent passes end up
2066 getting different SSA names, which may ultimately change
2067 decisions when going out of SSA). */
2068 imm_store_chain_info *m_stores_head;
2069
9991d1d3 2070 void process_store (gimple *);
3d3e04ac 2071 bool terminate_and_process_all_chains ();
c35548ce 2072 bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *);
f85e7cb7 2073 bool terminate_and_release_chain (imm_store_chain_info *);
3d3e04ac 2074}; // class pass_store_merging
2075
2076/* Terminate and process all recorded chains. Return true if any changes
2077 were made. */
2078
2079bool
2080pass_store_merging::terminate_and_process_all_chains ()
2081{
3d3e04ac 2082 bool ret = false;
3a3ba7de 2083 while (m_stores_head)
2084 ret |= terminate_and_release_chain (m_stores_head);
2085 gcc_assert (m_stores.elements () == 0);
2086 gcc_assert (m_stores_head == NULL);
3d3e04ac 2087
2088 return ret;
2089}
2090
c35548ce 2091/* Terminate all chains that are affected by the statement STMT.
2092 CHAIN_INFO is the chain we should ignore from the checks if
2093 non-NULL. */
3d3e04ac 2094
2095bool
4de7f8df 2096pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
f85e7cb7 2097 **chain_info,
3d3e04ac 2098 gimple *stmt)
2099{
2100 bool ret = false;
2101
2102 /* If the statement doesn't touch memory it can't alias. */
2103 if (!gimple_vuse (stmt))
2104 return false;
2105
9fead2ab 2106 tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
c35548ce 2107 for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
3d3e04ac 2108 {
c35548ce 2109 next = cur->next;
2110
2111 /* We already checked all the stores in chain_info and terminated the
2112 chain if necessary. Skip it here. */
2113 if (chain_info && *chain_info == cur)
2114 continue;
2115
9991d1d3 2116 store_immediate_info *info;
2117 unsigned int i;
c35548ce 2118 FOR_EACH_VEC_ELT (cur->m_store_info, i, info)
3d3e04ac 2119 {
9fead2ab 2120 tree lhs = gimple_assign_lhs (info->stmt);
2121 if (ref_maybe_used_by_stmt_p (stmt, lhs)
2122 || stmt_may_clobber_ref_p (stmt, lhs)
2123 || (store_lhs && refs_output_dependent_p (store_lhs, lhs)))
3d3e04ac 2124 {
9991d1d3 2125 if (dump_file && (dump_flags & TDF_DETAILS))
3d3e04ac 2126 {
9991d1d3 2127 fprintf (dump_file, "stmt causes chain termination:\n");
2128 print_gimple_stmt (dump_file, stmt, 0);
3d3e04ac 2129 }
c35548ce 2130 terminate_and_release_chain (cur);
9991d1d3 2131 ret = true;
2132 break;
3d3e04ac 2133 }
2134 }
2135 }
2136
3d3e04ac 2137 return ret;
2138}
2139
2140/* Helper function. Terminate the recorded chain storing to base object
2141 BASE. Return true if the merging and output was successful. The m_stores
2142 entry is removed after the processing in any case. */
2143
2144bool
f85e7cb7 2145pass_store_merging::terminate_and_release_chain (imm_store_chain_info *chain_info)
3d3e04ac 2146{
f85e7cb7 2147 bool ret = chain_info->terminate_and_process_chain ();
2148 m_stores.remove (chain_info->base_addr);
2149 delete chain_info;
3d3e04ac 2150 return ret;
2151}
2152
9991d1d3 2153/* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
2154 may clobber REF. FIRST and LAST must be in the same basic block and
509ab8cd 2155 have non-NULL vdef. We want to be able to sink load of REF across
2156 stores between FIRST and LAST, up to right before LAST. */
9991d1d3 2157
2158bool
2159stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref)
2160{
2161 ao_ref r;
2162 ao_ref_init (&r, ref);
2163 unsigned int count = 0;
2164 tree vop = gimple_vdef (last);
2165 gimple *stmt;
2166
2167 gcc_checking_assert (gimple_bb (first) == gimple_bb (last));
2168 do
2169 {
2170 stmt = SSA_NAME_DEF_STMT (vop);
2171 if (stmt_may_clobber_ref_p_1 (stmt, &r))
2172 return true;
509ab8cd 2173 if (gimple_store_p (stmt)
2174 && refs_anti_dependent_p (ref, gimple_get_lhs (stmt)))
2175 return true;
9991d1d3 2176 /* Avoid quadratic compile time by bounding the number of checks
2177 we perform. */
2178 if (++count > MAX_STORE_ALIAS_CHECKS)
2179 return true;
2180 vop = gimple_vuse (stmt);
2181 }
2182 while (stmt != first);
2183 return false;
2184}
2185
2186/* Return true if INFO->ops[IDX] is mergeable with the
2187 corresponding loads already in MERGED_STORE group.
2188 BASE_ADDR is the base address of the whole store group. */
2189
2190bool
2191compatible_load_p (merged_store_group *merged_store,
2192 store_immediate_info *info,
2193 tree base_addr, int idx)
2194{
2195 store_immediate_info *infof = merged_store->stores[0];
2196 if (!info->ops[idx].base_addr
e61263f2 2197 || maybe_ne (info->ops[idx].bitpos - infof->ops[idx].bitpos,
2198 info->bitpos - infof->bitpos)
9991d1d3 2199 || !operand_equal_p (info->ops[idx].base_addr,
2200 infof->ops[idx].base_addr, 0))
2201 return false;
2202
2203 store_immediate_info *infol = merged_store->stores.last ();
2204 tree load_vuse = gimple_vuse (info->ops[idx].stmt);
2205 /* In this case all vuses should be the same, e.g.
2206 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2207 or
2208 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2209 and we can emit the coalesced load next to any of those loads. */
2210 if (gimple_vuse (infof->ops[idx].stmt) == load_vuse
2211 && gimple_vuse (infol->ops[idx].stmt) == load_vuse)
2212 return true;
2213
2214 /* Otherwise, at least for now require that the load has the same
2215 vuse as the store. See following examples. */
2216 if (gimple_vuse (info->stmt) != load_vuse)
2217 return false;
2218
2219 if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt)
2220 || (infof != infol
2221 && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt)))
2222 return false;
2223
2224 /* If the load is from the same location as the store, already
2225 the construction of the immediate chain info guarantees no intervening
2226 stores, so no further checks are needed. Example:
2227 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
e61263f2 2228 if (known_eq (info->ops[idx].bitpos, info->bitpos)
9991d1d3 2229 && operand_equal_p (info->ops[idx].base_addr, base_addr, 0))
2230 return true;
2231
2232 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2233 of the stores in the group, or any other stores in between those.
2234 Previous calls to compatible_load_p ensured that for all the
2235 merged_store->stores IDX loads, no stmts starting with
2236 merged_store->first_stmt and ending right before merged_store->last_stmt
2237 clobbers those loads. */
2238 gimple *first = merged_store->first_stmt;
2239 gimple *last = merged_store->last_stmt;
2240 unsigned int i;
2241 store_immediate_info *infoc;
2242 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2243 comes before the so far first load, we'll be changing
2244 merged_store->first_stmt. In that case we need to give up if
2245 any of the earlier processed loads clobber with the stmts in the new
2246 range. */
2247 if (info->order < merged_store->first_order)
2248 {
2249 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2250 if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val))
2251 return false;
2252 first = info->stmt;
2253 }
2254 /* Similarly, we could change merged_store->last_stmt, so ensure
2255 in that case no stmts in the new range clobber any of the earlier
2256 processed loads. */
2257 else if (info->order > merged_store->last_order)
2258 {
2259 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2260 if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val))
2261 return false;
2262 last = info->stmt;
2263 }
2264 /* And finally, we'd be adding a new load to the set, ensure it isn't
2265 clobbered in the new range. */
2266 if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val))
2267 return false;
2268
2269 /* Otherwise, we are looking for:
2270 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2271 or
2272 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2273 return true;
2274}
2275
509ab8cd 2276/* Add all refs loaded to compute VAL to REFS vector. */
2277
2278void
2279gather_bswap_load_refs (vec<tree> *refs, tree val)
2280{
2281 if (TREE_CODE (val) != SSA_NAME)
2282 return;
2283
2284 gimple *stmt = SSA_NAME_DEF_STMT (val);
2285 if (!is_gimple_assign (stmt))
2286 return;
2287
2288 if (gimple_assign_load_p (stmt))
2289 {
2290 refs->safe_push (gimple_assign_rhs1 (stmt));
2291 return;
2292 }
2293
2294 switch (gimple_assign_rhs_class (stmt))
2295 {
2296 case GIMPLE_BINARY_RHS:
2297 gather_bswap_load_refs (refs, gimple_assign_rhs2 (stmt));
2298 /* FALLTHRU */
2299 case GIMPLE_UNARY_RHS:
2300 gather_bswap_load_refs (refs, gimple_assign_rhs1 (stmt));
2301 break;
2302 default:
2303 gcc_unreachable ();
2304 }
2305}
2306
2307/* Return true if m_store_info[first] and at least one following store
2308 form a group which store try_size bitsize value which is byte swapped
2309 from a memory load or some value, or identity from some value.
2310 This uses the bswap pass APIs. */
2311
2312bool
2313imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store,
2314 unsigned int first,
2315 unsigned int try_size)
2316{
2317 unsigned int len = m_store_info.length (), last = first;
2318 unsigned HOST_WIDE_INT width = m_store_info[first]->bitsize;
2319 if (width >= try_size)
2320 return false;
2321 for (unsigned int i = first + 1; i < len; ++i)
2322 {
2323 if (m_store_info[i]->bitpos != m_store_info[first]->bitpos + width
2324 || m_store_info[i]->ins_stmt == NULL)
2325 return false;
2326 width += m_store_info[i]->bitsize;
2327 if (width >= try_size)
2328 {
2329 last = i;
2330 break;
2331 }
2332 }
2333 if (width != try_size)
2334 return false;
2335
2336 bool allow_unaligned
2337 = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
2338 /* Punt if the combined store would not be aligned and we need alignment. */
2339 if (!allow_unaligned)
2340 {
2341 unsigned int align = merged_store->align;
2342 unsigned HOST_WIDE_INT align_base = merged_store->align_base;
2343 for (unsigned int i = first + 1; i <= last; ++i)
2344 {
2345 unsigned int this_align;
2346 unsigned HOST_WIDE_INT align_bitpos = 0;
2347 get_object_alignment_1 (gimple_assign_lhs (m_store_info[i]->stmt),
2348 &this_align, &align_bitpos);
2349 if (this_align > align)
2350 {
2351 align = this_align;
2352 align_base = m_store_info[i]->bitpos - align_bitpos;
2353 }
2354 }
2355 unsigned HOST_WIDE_INT align_bitpos
2356 = (m_store_info[first]->bitpos - align_base) & (align - 1);
2357 if (align_bitpos)
2358 align = least_bit_hwi (align_bitpos);
2359 if (align < try_size)
2360 return false;
2361 }
2362
2363 tree type;
2364 switch (try_size)
2365 {
2366 case 16: type = uint16_type_node; break;
2367 case 32: type = uint32_type_node; break;
2368 case 64: type = uint64_type_node; break;
2369 default: gcc_unreachable ();
2370 }
2371 struct symbolic_number n;
2372 gimple *ins_stmt = NULL;
2373 int vuse_store = -1;
2374 unsigned int first_order = merged_store->first_order;
2375 unsigned int last_order = merged_store->last_order;
2376 gimple *first_stmt = merged_store->first_stmt;
2377 gimple *last_stmt = merged_store->last_stmt;
2378 store_immediate_info *infof = m_store_info[first];
2379
2380 for (unsigned int i = first; i <= last; ++i)
2381 {
2382 store_immediate_info *info = m_store_info[i];
2383 struct symbolic_number this_n = info->n;
2384 this_n.type = type;
2385 if (!this_n.base_addr)
2386 this_n.range = try_size / BITS_PER_UNIT;
58cff6a2 2387 else
2388 /* Update vuse in case it has changed by output_merged_stores. */
2389 this_n.vuse = gimple_vuse (info->ins_stmt);
509ab8cd 2390 unsigned int bitpos = info->bitpos - infof->bitpos;
2391 if (!do_shift_rotate (LSHIFT_EXPR, &this_n,
2392 BYTES_BIG_ENDIAN
2393 ? try_size - info->bitsize - bitpos
2394 : bitpos))
2395 return false;
1636105f 2396 if (this_n.base_addr && vuse_store)
509ab8cd 2397 {
2398 unsigned int j;
2399 for (j = first; j <= last; ++j)
2400 if (this_n.vuse == gimple_vuse (m_store_info[j]->stmt))
2401 break;
2402 if (j > last)
2403 {
2404 if (vuse_store == 1)
2405 return false;
2406 vuse_store = 0;
2407 }
2408 }
2409 if (i == first)
2410 {
2411 n = this_n;
2412 ins_stmt = info->ins_stmt;
2413 }
2414 else
2415 {
2416 if (n.base_addr)
2417 {
2418 if (n.vuse != this_n.vuse)
2419 {
2420 if (vuse_store == 0)
2421 return false;
2422 vuse_store = 1;
2423 }
2424 if (info->order > last_order)
2425 {
2426 last_order = info->order;
2427 last_stmt = info->stmt;
2428 }
2429 else if (info->order < first_order)
2430 {
2431 first_order = info->order;
2432 first_stmt = info->stmt;
2433 }
2434 }
2435
2436 ins_stmt = perform_symbolic_merge (ins_stmt, &n, info->ins_stmt,
2437 &this_n, &n);
2438 if (ins_stmt == NULL)
2439 return false;
2440 }
2441 }
2442
2443 uint64_t cmpxchg, cmpnop;
2444 find_bswap_or_nop_finalize (&n, &cmpxchg, &cmpnop);
2445
2446 /* A complete byte swap should make the symbolic number to start with
2447 the largest digit in the highest order byte. Unchanged symbolic
2448 number indicates a read with same endianness as target architecture. */
2449 if (n.n != cmpnop && n.n != cmpxchg)
2450 return false;
2451
2452 if (n.base_addr == NULL_TREE && !is_gimple_val (n.src))
2453 return false;
2454
2455 /* Don't handle memory copy this way if normal non-bswap processing
2456 would handle it too. */
2457 if (n.n == cmpnop && (unsigned) n.n_ops == last - first + 1)
2458 {
2459 unsigned int i;
2460 for (i = first; i <= last; ++i)
2461 if (m_store_info[i]->rhs_code != MEM_REF)
2462 break;
2463 if (i == last + 1)
2464 return false;
2465 }
2466
2467 if (n.n == cmpxchg)
2468 switch (try_size)
2469 {
2470 case 16:
2471 /* Will emit LROTATE_EXPR. */
2472 break;
2473 case 32:
2474 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2475 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
2476 break;
2477 return false;
2478 case 64:
2479 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2480 && optab_handler (bswap_optab, DImode) != CODE_FOR_nothing)
2481 break;
2482 return false;
2483 default:
2484 gcc_unreachable ();
2485 }
2486
2487 if (!allow_unaligned && n.base_addr)
2488 {
2489 unsigned int align = get_object_alignment (n.src);
2490 if (align < try_size)
2491 return false;
2492 }
2493
2494 /* If each load has vuse of the corresponding store, need to verify
2495 the loads can be sunk right before the last store. */
2496 if (vuse_store == 1)
2497 {
2498 auto_vec<tree, 64> refs;
2499 for (unsigned int i = first; i <= last; ++i)
2500 gather_bswap_load_refs (&refs,
2501 gimple_assign_rhs1 (m_store_info[i]->stmt));
2502
2503 unsigned int i;
2504 tree ref;
2505 FOR_EACH_VEC_ELT (refs, i, ref)
2506 if (stmts_may_clobber_ref_p (first_stmt, last_stmt, ref))
2507 return false;
2508 n.vuse = NULL_TREE;
2509 }
2510
2511 infof->n = n;
2512 infof->ins_stmt = ins_stmt;
2513 for (unsigned int i = first; i <= last; ++i)
2514 {
2515 m_store_info[i]->rhs_code = n.n == cmpxchg ? LROTATE_EXPR : NOP_EXPR;
2516 m_store_info[i]->ops[0].base_addr = NULL_TREE;
2517 m_store_info[i]->ops[1].base_addr = NULL_TREE;
2518 if (i != first)
2519 merged_store->merge_into (m_store_info[i]);
2520 }
2521
2522 return true;
2523}
2524
3d3e04ac 2525/* Go through the candidate stores recorded in m_store_info and merge them
2526 into merged_store_group objects recorded into m_merged_store_groups
2527 representing the widened stores. Return true if coalescing was successful
2528 and the number of widened stores is fewer than the original number
2529 of stores. */
2530
2531bool
2532imm_store_chain_info::coalesce_immediate_stores ()
2533{
2534 /* Anything less can't be processed. */
2535 if (m_store_info.length () < 2)
2536 return false;
2537
2538 if (dump_file && (dump_flags & TDF_DETAILS))
2539 fprintf (dump_file, "Attempting to coalesce %u stores in chain.\n",
2540 m_store_info.length ());
2541
2542 store_immediate_info *info;
509ab8cd 2543 unsigned int i, ignore = 0;
3d3e04ac 2544
2545 /* Order the stores by the bitposition they write to. */
2546 m_store_info.qsort (sort_by_bitpos);
2547
2548 info = m_store_info[0];
2549 merged_store_group *merged_store = new merged_store_group (info);
2550
2551 FOR_EACH_VEC_ELT (m_store_info, i, info)
2552 {
2553 if (dump_file && (dump_flags & TDF_DETAILS))
2554 {
2555 fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
2556 " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:\n",
2557 i, info->bitsize, info->bitpos);
1ffa4346 2558 print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt));
3d3e04ac 2559 fprintf (dump_file, "\n------------\n");
2560 }
2561
509ab8cd 2562 if (i <= ignore)
3d3e04ac 2563 continue;
2564
509ab8cd 2565 /* First try to handle group of stores like:
2566 p[0] = data >> 24;
2567 p[1] = data >> 16;
2568 p[2] = data >> 8;
2569 p[3] = data;
2570 using the bswap framework. */
2571 if (info->bitpos == merged_store->start + merged_store->width
2572 && merged_store->stores.length () == 1
2573 && merged_store->stores[0]->ins_stmt != NULL
2574 && info->ins_stmt != NULL)
2575 {
2576 unsigned int try_size;
2577 for (try_size = 64; try_size >= 16; try_size >>= 1)
2578 if (try_coalesce_bswap (merged_store, i - 1, try_size))
2579 break;
2580
2581 if (try_size >= 16)
2582 {
2583 ignore = i + merged_store->stores.length () - 1;
2584 m_merged_store_groups.safe_push (merged_store);
2585 if (ignore < m_store_info.length ())
2586 merged_store = new merged_store_group (m_store_info[ignore]);
2587 else
2588 merged_store = NULL;
2589 continue;
2590 }
2591 }
2592
3d3e04ac 2593 /* |---store 1---|
2594 |---store 2---|
509ab8cd 2595 Overlapping stores. */
2596 if (IN_RANGE (info->bitpos, merged_store->start,
3d3e04ac 2597 merged_store->start + merged_store->width - 1))
2598 {
9991d1d3 2599 /* Only allow overlapping stores of constants. */
2600 if (info->rhs_code == INTEGER_CST
2601 && merged_store->stores[0]->rhs_code == INTEGER_CST)
2602 {
2603 merged_store->merge_overlapping (info);
2604 continue;
2605 }
3d3e04ac 2606 }
9991d1d3 2607 /* |---store 1---||---store 2---|
2608 This store is consecutive to the previous one.
2609 Merge it into the current store group. There can be gaps in between
2610 the stores, but there can't be gaps in between bitregions. */
509ab8cd 2611 else if (info->rhs_code != LROTATE_EXPR
2612 && info->bitregion_start <= merged_store->bitregion_end
9deedf62 2613 && info->rhs_code == merged_store->stores[0]->rhs_code)
3d3e04ac 2614 {
9991d1d3 2615 store_immediate_info *infof = merged_store->stores[0];
2616
2617 /* All the rhs_code ops that take 2 operands are commutative,
2618 swap the operands if it could make the operands compatible. */
2619 if (infof->ops[0].base_addr
2620 && infof->ops[1].base_addr
2621 && info->ops[0].base_addr
2622 && info->ops[1].base_addr
e61263f2 2623 && known_eq (info->ops[1].bitpos - infof->ops[0].bitpos,
2624 info->bitpos - infof->bitpos)
9991d1d3 2625 && operand_equal_p (info->ops[1].base_addr,
2626 infof->ops[0].base_addr, 0))
aa0a1d29 2627 {
2628 std::swap (info->ops[0], info->ops[1]);
2629 info->ops_swapped_p = true;
2630 }
950ddbd7 2631 if ((infof->ops[0].base_addr
2632 ? compatible_load_p (merged_store, info, base_addr, 0)
2633 : !info->ops[0].base_addr)
2634 && (infof->ops[1].base_addr
2635 ? compatible_load_p (merged_store, info, base_addr, 1)
2636 : !info->ops[1].base_addr))
9991d1d3 2637 {
2638 merged_store->merge_into (info);
2639 continue;
2640 }
2641 }
3d3e04ac 2642
9991d1d3 2643 /* |---store 1---| <gap> |---store 2---|.
2644 Gap between stores or the rhs not compatible. Start a new group. */
3d3e04ac 2645
9991d1d3 2646 /* Try to apply all the stores recorded for the group to determine
2647 the bitpattern they write and discard it if that fails.
2648 This will also reject single-store groups. */
2649 if (!merged_store->apply_stores ())
2650 delete merged_store;
2651 else
2652 m_merged_store_groups.safe_push (merged_store);
3d3e04ac 2653
9991d1d3 2654 merged_store = new merged_store_group (info);
3d3e04ac 2655 }
2656
902cb3b7 2657 /* Record or discard the last store group. */
509ab8cd 2658 if (merged_store)
2659 {
2660 if (!merged_store->apply_stores ())
2661 delete merged_store;
2662 else
2663 m_merged_store_groups.safe_push (merged_store);
2664 }
3d3e04ac 2665
2666 gcc_assert (m_merged_store_groups.length () <= m_store_info.length ());
2667 bool success
2668 = !m_merged_store_groups.is_empty ()
2669 && m_merged_store_groups.length () < m_store_info.length ();
2670
2671 if (success && dump_file)
2672 fprintf (dump_file, "Coalescing successful!\n"
902cb3b7 2673 "Merged into %u stores\n",
2674 m_merged_store_groups.length ());
3d3e04ac 2675
2676 return success;
2677}
2678
9991d1d3 2679/* Return the type to use for the merged stores or loads described by STMTS.
2680 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
2681 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
2682 of the MEM_REFs if any. */
3d3e04ac 2683
2684static tree
9991d1d3 2685get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load,
2686 unsigned short *cliquep, unsigned short *basep)
3d3e04ac 2687{
2688 gimple *stmt;
2689 unsigned int i;
9991d1d3 2690 tree type = NULL_TREE;
2691 tree ret = NULL_TREE;
2692 *cliquep = 0;
2693 *basep = 0;
3d3e04ac 2694
2695 FOR_EACH_VEC_ELT (stmts, i, stmt)
2696 {
9991d1d3 2697 tree ref = is_load ? gimple_assign_rhs1 (stmt)
2698 : gimple_assign_lhs (stmt);
2699 tree type1 = reference_alias_ptr_type (ref);
2700 tree base = get_base_address (ref);
3d3e04ac 2701
9991d1d3 2702 if (i == 0)
2703 {
2704 if (TREE_CODE (base) == MEM_REF)
2705 {
2706 *cliquep = MR_DEPENDENCE_CLIQUE (base);
2707 *basep = MR_DEPENDENCE_BASE (base);
2708 }
2709 ret = type = type1;
2710 continue;
2711 }
3d3e04ac 2712 if (!alias_ptr_types_compatible_p (type, type1))
9991d1d3 2713 ret = ptr_type_node;
2714 if (TREE_CODE (base) != MEM_REF
2715 || *cliquep != MR_DEPENDENCE_CLIQUE (base)
2716 || *basep != MR_DEPENDENCE_BASE (base))
2717 {
2718 *cliquep = 0;
2719 *basep = 0;
2720 }
3d3e04ac 2721 }
9991d1d3 2722 return ret;
3d3e04ac 2723}
2724
2725/* Return the location_t information we can find among the statements
2726 in STMTS. */
2727
2728static location_t
9991d1d3 2729get_location_for_stmts (vec<gimple *> &stmts)
3d3e04ac 2730{
2731 gimple *stmt;
2732 unsigned int i;
2733
2734 FOR_EACH_VEC_ELT (stmts, i, stmt)
2735 if (gimple_has_location (stmt))
2736 return gimple_location (stmt);
2737
2738 return UNKNOWN_LOCATION;
2739}
2740
2741/* Used to decribe a store resulting from splitting a wide store in smaller
2742 regularly-sized stores in split_group. */
2743
2744struct split_store
2745{
2746 unsigned HOST_WIDE_INT bytepos;
2747 unsigned HOST_WIDE_INT size;
2748 unsigned HOST_WIDE_INT align;
9991d1d3 2749 auto_vec<store_immediate_info *> orig_stores;
902cb3b7 2750 /* True if there is a single orig stmt covering the whole split store. */
2751 bool orig;
3d3e04ac 2752 split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
2753 unsigned HOST_WIDE_INT);
2754};
2755
2756/* Simple constructor. */
2757
2758split_store::split_store (unsigned HOST_WIDE_INT bp,
2759 unsigned HOST_WIDE_INT sz,
2760 unsigned HOST_WIDE_INT al)
902cb3b7 2761 : bytepos (bp), size (sz), align (al), orig (false)
3d3e04ac 2762{
9991d1d3 2763 orig_stores.create (0);
3d3e04ac 2764}
2765
9991d1d3 2766/* Record all stores in GROUP that write to the region starting at BITPOS and
2767 is of size BITSIZE. Record infos for such statements in STORES if
2768 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
2769 if there is exactly one original store in the range. */
3d3e04ac 2770
902cb3b7 2771static store_immediate_info *
9991d1d3 2772find_constituent_stores (struct merged_store_group *group,
2773 vec<store_immediate_info *> *stores,
2774 unsigned int *first,
2775 unsigned HOST_WIDE_INT bitpos,
2776 unsigned HOST_WIDE_INT bitsize)
3d3e04ac 2777{
902cb3b7 2778 store_immediate_info *info, *ret = NULL;
3d3e04ac 2779 unsigned int i;
902cb3b7 2780 bool second = false;
2781 bool update_first = true;
3d3e04ac 2782 unsigned HOST_WIDE_INT end = bitpos + bitsize;
902cb3b7 2783 for (i = *first; group->stores.iterate (i, &info); ++i)
3d3e04ac 2784 {
2785 unsigned HOST_WIDE_INT stmt_start = info->bitpos;
2786 unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize;
902cb3b7 2787 if (stmt_end <= bitpos)
2788 {
2789 /* BITPOS passed to this function never decreases from within the
2790 same split_group call, so optimize and don't scan info records
2791 which are known to end before or at BITPOS next time.
2792 Only do it if all stores before this one also pass this. */
2793 if (update_first)
2794 *first = i + 1;
2795 continue;
2796 }
2797 else
2798 update_first = false;
2799
3d3e04ac 2800 /* The stores in GROUP are ordered by bitposition so if we're past
902cb3b7 2801 the region for this group return early. */
2802 if (stmt_start >= end)
2803 return ret;
2804
9991d1d3 2805 if (stores)
902cb3b7 2806 {
9991d1d3 2807 stores->safe_push (info);
902cb3b7 2808 if (ret)
2809 {
2810 ret = NULL;
2811 second = true;
2812 }
2813 }
2814 else if (ret)
2815 return NULL;
2816 if (!second)
2817 ret = info;
3d3e04ac 2818 }
902cb3b7 2819 return ret;
3d3e04ac 2820}
2821
871a91ec 2822/* Return how many SSA_NAMEs used to compute value to store in the INFO
2823 store have multiple uses. If any SSA_NAME has multiple uses, also
2824 count statements needed to compute it. */
2825
2826static unsigned
2827count_multiple_uses (store_immediate_info *info)
2828{
2829 gimple *stmt = info->stmt;
2830 unsigned ret = 0;
2831 switch (info->rhs_code)
2832 {
2833 case INTEGER_CST:
2834 return 0;
2835 case BIT_AND_EXPR:
2836 case BIT_IOR_EXPR:
2837 case BIT_XOR_EXPR:
832a73b9 2838 if (info->bit_not_p)
2839 {
2840 if (!has_single_use (gimple_assign_rhs1 (stmt)))
2841 ret = 1; /* Fall through below to return
2842 the BIT_NOT_EXPR stmt and then
2843 BIT_{AND,IOR,XOR}_EXPR and anything it
2844 uses. */
2845 else
2846 /* stmt is after this the BIT_NOT_EXPR. */
2847 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
2848 }
871a91ec 2849 if (!has_single_use (gimple_assign_rhs1 (stmt)))
2850 {
2851 ret += 1 + info->ops[0].bit_not_p;
2852 if (info->ops[1].base_addr)
2853 ret += 1 + info->ops[1].bit_not_p;
2854 return ret + 1;
2855 }
2856 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
2857 /* stmt is now the BIT_*_EXPR. */
2858 if (!has_single_use (gimple_assign_rhs1 (stmt)))
aa0a1d29 2859 ret += 1 + info->ops[info->ops_swapped_p].bit_not_p;
2860 else if (info->ops[info->ops_swapped_p].bit_not_p)
871a91ec 2861 {
2862 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
2863 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
2864 ++ret;
2865 }
2866 if (info->ops[1].base_addr == NULL_TREE)
aa0a1d29 2867 {
2868 gcc_checking_assert (!info->ops_swapped_p);
2869 return ret;
2870 }
871a91ec 2871 if (!has_single_use (gimple_assign_rhs2 (stmt)))
aa0a1d29 2872 ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p;
2873 else if (info->ops[1 - info->ops_swapped_p].bit_not_p)
871a91ec 2874 {
2875 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
2876 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
2877 ++ret;
2878 }
2879 return ret;
2880 case MEM_REF:
2881 if (!has_single_use (gimple_assign_rhs1 (stmt)))
2882 return 1 + info->ops[0].bit_not_p;
2883 else if (info->ops[0].bit_not_p)
2884 {
2885 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
2886 if (!has_single_use (gimple_assign_rhs1 (stmt)))
2887 return 1;
2888 }
2889 return 0;
2890 default:
2891 gcc_unreachable ();
2892 }
2893}
2894
3d3e04ac 2895/* Split a merged store described by GROUP by populating the SPLIT_STORES
902cb3b7 2896 vector (if non-NULL) with split_store structs describing the byte offset
2897 (from the base), the bit size and alignment of each store as well as the
2898 original statements involved in each such split group.
3d3e04ac 2899 This is to separate the splitting strategy from the statement
2900 building/emission/linking done in output_merged_store.
902cb3b7 2901 Return number of new stores.
9991d1d3 2902 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
2903 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
902cb3b7 2904 If SPLIT_STORES is NULL, it is just a dry run to count number of
2905 new stores. */
3d3e04ac 2906
902cb3b7 2907static unsigned int
9991d1d3 2908split_group (merged_store_group *group, bool allow_unaligned_store,
2909 bool allow_unaligned_load,
871a91ec 2910 vec<struct split_store *> *split_stores,
2911 unsigned *total_orig,
2912 unsigned *total_new)
3d3e04ac 2913{
902cb3b7 2914 unsigned HOST_WIDE_INT pos = group->bitregion_start;
2915 unsigned HOST_WIDE_INT size = group->bitregion_end - pos;
3d3e04ac 2916 unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT;
902cb3b7 2917 unsigned HOST_WIDE_INT group_align = group->align;
2918 unsigned HOST_WIDE_INT align_base = group->align_base;
9991d1d3 2919 unsigned HOST_WIDE_INT group_load_align = group_align;
871a91ec 2920 bool any_orig = false;
3d3e04ac 2921
3d3e04ac 2922 gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0));
2923
509ab8cd 2924 if (group->stores[0]->rhs_code == LROTATE_EXPR
2925 || group->stores[0]->rhs_code == NOP_EXPR)
2926 {
2927 /* For bswap framework using sets of stores, all the checking
2928 has been done earlier in try_coalesce_bswap and needs to be
2929 emitted as a single store. */
2930 if (total_orig)
2931 {
2932 /* Avoid the old/new stmt count heuristics. It should be
2933 always beneficial. */
2934 total_new[0] = 1;
2935 total_orig[0] = 2;
2936 }
2937
2938 if (split_stores)
2939 {
2940 unsigned HOST_WIDE_INT align_bitpos
2941 = (group->start - align_base) & (group_align - 1);
2942 unsigned HOST_WIDE_INT align = group_align;
2943 if (align_bitpos)
2944 align = least_bit_hwi (align_bitpos);
2945 bytepos = group->start / BITS_PER_UNIT;
2946 struct split_store *store
2947 = new split_store (bytepos, group->width, align);
2948 unsigned int first = 0;
2949 find_constituent_stores (group, &store->orig_stores,
2950 &first, group->start, group->width);
2951 split_stores->safe_push (store);
2952 }
2953
2954 return 1;
2955 }
2956
902cb3b7 2957 unsigned int ret = 0, first = 0;
3d3e04ac 2958 unsigned HOST_WIDE_INT try_pos = bytepos;
3d3e04ac 2959
871a91ec 2960 if (total_orig)
2961 {
2962 unsigned int i;
2963 store_immediate_info *info = group->stores[0];
2964
2965 total_new[0] = 0;
2966 total_orig[0] = 1; /* The orig store. */
2967 info = group->stores[0];
2968 if (info->ops[0].base_addr)
9deedf62 2969 total_orig[0]++;
871a91ec 2970 if (info->ops[1].base_addr)
9deedf62 2971 total_orig[0]++;
871a91ec 2972 switch (info->rhs_code)
2973 {
2974 case BIT_AND_EXPR:
2975 case BIT_IOR_EXPR:
2976 case BIT_XOR_EXPR:
2977 total_orig[0]++; /* The orig BIT_*_EXPR stmt. */
2978 break;
2979 default:
2980 break;
2981 }
2982 total_orig[0] *= group->stores.length ();
2983
2984 FOR_EACH_VEC_ELT (group->stores, i, info)
9deedf62 2985 {
2986 total_new[0] += count_multiple_uses (info);
2987 total_orig[0] += (info->bit_not_p
2988 + info->ops[0].bit_not_p
2989 + info->ops[1].bit_not_p);
2990 }
871a91ec 2991 }
2992
9991d1d3 2993 if (!allow_unaligned_load)
2994 for (int i = 0; i < 2; ++i)
2995 if (group->load_align[i])
2996 group_load_align = MIN (group_load_align, group->load_align[i]);
2997
3d3e04ac 2998 while (size > 0)
2999 {
9991d1d3 3000 if ((allow_unaligned_store || group_align <= BITS_PER_UNIT)
902cb3b7 3001 && group->mask[try_pos - bytepos] == (unsigned char) ~0U)
3002 {
3003 /* Skip padding bytes. */
3004 ++try_pos;
3005 size -= BITS_PER_UNIT;
3006 continue;
3007 }
3008
3d3e04ac 3009 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
902cb3b7 3010 unsigned int try_size = MAX_STORE_BITSIZE, nonmasked;
3011 unsigned HOST_WIDE_INT align_bitpos
3012 = (try_bitpos - align_base) & (group_align - 1);
3013 unsigned HOST_WIDE_INT align = group_align;
3014 if (align_bitpos)
3015 align = least_bit_hwi (align_bitpos);
9991d1d3 3016 if (!allow_unaligned_store)
902cb3b7 3017 try_size = MIN (try_size, align);
9991d1d3 3018 if (!allow_unaligned_load)
3019 {
3020 /* If we can't do or don't want to do unaligned stores
3021 as well as loads, we need to take the loads into account
3022 as well. */
3023 unsigned HOST_WIDE_INT load_align = group_load_align;
3024 align_bitpos = (try_bitpos - align_base) & (load_align - 1);
3025 if (align_bitpos)
3026 load_align = least_bit_hwi (align_bitpos);
3027 for (int i = 0; i < 2; ++i)
3028 if (group->load_align[i])
3029 {
e61263f2 3030 align_bitpos
3031 = known_alignment (try_bitpos
3032 - group->stores[0]->bitpos
3033 + group->stores[0]->ops[i].bitpos
3034 - group->load_align_base[i]);
3035 if (align_bitpos & (group_load_align - 1))
9991d1d3 3036 {
3037 unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos);
3038 load_align = MIN (load_align, a);
3039 }
3040 }
3041 try_size = MIN (try_size, load_align);
3042 }
902cb3b7 3043 store_immediate_info *info
9991d1d3 3044 = find_constituent_stores (group, NULL, &first, try_bitpos, try_size);
902cb3b7 3045 if (info)
3046 {
3047 /* If there is just one original statement for the range, see if
3048 we can just reuse the original store which could be even larger
3049 than try_size. */
3050 unsigned HOST_WIDE_INT stmt_end
3051 = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT);
9991d1d3 3052 info = find_constituent_stores (group, NULL, &first, try_bitpos,
3053 stmt_end - try_bitpos);
902cb3b7 3054 if (info && info->bitpos >= try_bitpos)
3055 {
3056 try_size = stmt_end - try_bitpos;
3057 goto found;
3058 }
3059 }
3d3e04ac 3060
902cb3b7 3061 /* Approximate store bitsize for the case when there are no padding
3062 bits. */
3063 while (try_size > size)
3064 try_size /= 2;
3065 /* Now look for whole padding bytes at the end of that bitsize. */
3066 for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked)
3067 if (group->mask[try_pos - bytepos + nonmasked - 1]
3068 != (unsigned char) ~0U)
3069 break;
3070 if (nonmasked == 0)
3071 {
3072 /* If entire try_size range is padding, skip it. */
3073 try_pos += try_size / BITS_PER_UNIT;
3074 size -= try_size;
3075 continue;
3076 }
3077 /* Otherwise try to decrease try_size if second half, last 3 quarters
3078 etc. are padding. */
3079 nonmasked *= BITS_PER_UNIT;
3080 while (nonmasked <= try_size / 2)
3081 try_size /= 2;
9991d1d3 3082 if (!allow_unaligned_store && group_align > BITS_PER_UNIT)
902cb3b7 3083 {
3084 /* Now look for whole padding bytes at the start of that bitsize. */
3085 unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked;
3086 for (masked = 0; masked < try_bytesize; ++masked)
3087 if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U)
3088 break;
3089 masked *= BITS_PER_UNIT;
3090 gcc_assert (masked < try_size);
3091 if (masked >= try_size / 2)
3092 {
3093 while (masked >= try_size / 2)
3094 {
3095 try_size /= 2;
3096 try_pos += try_size / BITS_PER_UNIT;
3097 size -= try_size;
3098 masked -= try_size;
3099 }
3100 /* Need to recompute the alignment, so just retry at the new
3101 position. */
3102 continue;
3103 }
3104 }
3105
3106 found:
3107 ++ret;
3d3e04ac 3108
902cb3b7 3109 if (split_stores)
3110 {
3111 struct split_store *store
3112 = new split_store (try_pos, try_size, align);
9991d1d3 3113 info = find_constituent_stores (group, &store->orig_stores,
3114 &first, try_bitpos, try_size);
902cb3b7 3115 if (info
3116 && info->bitpos >= try_bitpos
3117 && info->bitpos + info->bitsize <= try_bitpos + try_size)
871a91ec 3118 {
3119 store->orig = true;
3120 any_orig = true;
3121 }
902cb3b7 3122 split_stores->safe_push (store);
3123 }
3124
3125 try_pos += try_size / BITS_PER_UNIT;
3d3e04ac 3126 size -= try_size;
3d3e04ac 3127 }
902cb3b7 3128
871a91ec 3129 if (total_orig)
3130 {
9deedf62 3131 unsigned int i;
3132 struct split_store *store;
871a91ec 3133 /* If we are reusing some original stores and any of the
3134 original SSA_NAMEs had multiple uses, we need to subtract
3135 those now before we add the new ones. */
3136 if (total_new[0] && any_orig)
3137 {
871a91ec 3138 FOR_EACH_VEC_ELT (*split_stores, i, store)
3139 if (store->orig)
3140 total_new[0] -= count_multiple_uses (store->orig_stores[0]);
3141 }
3142 total_new[0] += ret; /* The new store. */
3143 store_immediate_info *info = group->stores[0];
3144 if (info->ops[0].base_addr)
9deedf62 3145 total_new[0] += ret;
871a91ec 3146 if (info->ops[1].base_addr)
9deedf62 3147 total_new[0] += ret;
871a91ec 3148 switch (info->rhs_code)
3149 {
3150 case BIT_AND_EXPR:
3151 case BIT_IOR_EXPR:
3152 case BIT_XOR_EXPR:
3153 total_new[0] += ret; /* The new BIT_*_EXPR stmt. */
3154 break;
3155 default:
3156 break;
3157 }
9deedf62 3158 FOR_EACH_VEC_ELT (*split_stores, i, store)
3159 {
3160 unsigned int j;
3161 bool bit_not_p[3] = { false, false, false };
3162 /* If all orig_stores have certain bit_not_p set, then
3163 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3164 If some orig_stores have certain bit_not_p set, then
3165 we'd use a BIT_XOR_EXPR with a mask and need to account for
3166 it. */
3167 FOR_EACH_VEC_ELT (store->orig_stores, j, info)
3168 {
3169 if (info->ops[0].bit_not_p)
3170 bit_not_p[0] = true;
3171 if (info->ops[1].bit_not_p)
3172 bit_not_p[1] = true;
3173 if (info->bit_not_p)
3174 bit_not_p[2] = true;
3175 }
3176 total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2];
3177 }
3178
871a91ec 3179 }
3180
902cb3b7 3181 return ret;
3d3e04ac 3182}
3183
9deedf62 3184/* Return the operation through which the operand IDX (if < 2) or
3185 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3186 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3187 the bits should be xored with mask. */
3188
3189static enum tree_code
3190invert_op (split_store *split_store, int idx, tree int_type, tree &mask)
3191{
3192 unsigned int i;
3193 store_immediate_info *info;
3194 unsigned int cnt = 0;
3195 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3196 {
3197 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3198 if (bit_not_p)
3199 ++cnt;
3200 }
3201 mask = NULL_TREE;
3202 if (cnt == 0)
3203 return NOP_EXPR;
3204 if (cnt == split_store->orig_stores.length ())
3205 return BIT_NOT_EXPR;
3206
3207 unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT;
3208 unsigned buf_size = split_store->size / BITS_PER_UNIT;
3209 unsigned char *buf
3210 = XALLOCAVEC (unsigned char, buf_size);
3211 memset (buf, ~0U, buf_size);
3212 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3213 {
3214 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3215 if (!bit_not_p)
3216 continue;
3217 /* Clear regions with bit_not_p and invert afterwards, rather than
3218 clear regions with !bit_not_p, so that gaps in between stores aren't
3219 set in the mask. */
3220 unsigned HOST_WIDE_INT bitsize = info->bitsize;
3221 unsigned int pos_in_buffer = 0;
3222 if (info->bitpos < try_bitpos)
3223 {
3224 gcc_assert (info->bitpos + bitsize > try_bitpos);
3225 bitsize -= (try_bitpos - info->bitpos);
3226 }
3227 else
3228 pos_in_buffer = info->bitpos - try_bitpos;
3229 if (pos_in_buffer + bitsize > split_store->size)
3230 bitsize = split_store->size - pos_in_buffer;
3231 unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT);
3232 if (BYTES_BIG_ENDIAN)
3233 clear_bit_region_be (p, (BITS_PER_UNIT - 1
3234 - (pos_in_buffer % BITS_PER_UNIT)), bitsize);
3235 else
3236 clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize);
3237 }
3238 for (unsigned int i = 0; i < buf_size; ++i)
3239 buf[i] = ~buf[i];
3240 mask = native_interpret_expr (int_type, buf, buf_size);
3241 return BIT_XOR_EXPR;
3242}
3243
3d3e04ac 3244/* Given a merged store group GROUP output the widened version of it.
3245 The store chain is against the base object BASE.
3246 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3247 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3248 Make sure that the number of statements output is less than the number of
3249 original statements. If a better sequence is possible emit it and
3250 return true. */
3251
3252bool
f85e7cb7 3253imm_store_chain_info::output_merged_store (merged_store_group *group)
3d3e04ac 3254{
902cb3b7 3255 unsigned HOST_WIDE_INT start_byte_pos
3256 = group->bitregion_start / BITS_PER_UNIT;
3d3e04ac 3257
3258 unsigned int orig_num_stmts = group->stores.length ();
3259 if (orig_num_stmts < 2)
3260 return false;
3261
902cb3b7 3262 auto_vec<struct split_store *, 32> split_stores;
3d3e04ac 3263 split_stores.create (0);
9991d1d3 3264 bool allow_unaligned_store
902cb3b7 3265 = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
9991d1d3 3266 bool allow_unaligned_load = allow_unaligned_store;
3267 if (allow_unaligned_store)
902cb3b7 3268 {
3269 /* If unaligned stores are allowed, see how many stores we'd emit
3270 for unaligned and how many stores we'd emit for aligned stores.
3271 Only use unaligned stores if it allows fewer stores than aligned. */
9991d1d3 3272 unsigned aligned_cnt
871a91ec 3273 = split_group (group, false, allow_unaligned_load, NULL, NULL, NULL);
9991d1d3 3274 unsigned unaligned_cnt
871a91ec 3275 = split_group (group, true, allow_unaligned_load, NULL, NULL, NULL);
902cb3b7 3276 if (aligned_cnt <= unaligned_cnt)
9991d1d3 3277 allow_unaligned_store = false;
902cb3b7 3278 }
871a91ec 3279 unsigned total_orig, total_new;
9991d1d3 3280 split_group (group, allow_unaligned_store, allow_unaligned_load,
871a91ec 3281 &split_stores, &total_orig, &total_new);
902cb3b7 3282
3283 if (split_stores.length () >= orig_num_stmts)
3284 {
3285 /* We didn't manage to reduce the number of statements. Bail out. */
3286 if (dump_file && (dump_flags & TDF_DETAILS))
871a91ec 3287 fprintf (dump_file, "Exceeded original number of stmts (%u)."
3288 " Not profitable to emit new sequence.\n",
3289 orig_num_stmts);
902cb3b7 3290 return false;
3291 }
871a91ec 3292 if (total_orig <= total_new)
3293 {
3294 /* If number of estimated new statements is above estimated original
3295 statements, bail out too. */
3296 if (dump_file && (dump_flags & TDF_DETAILS))
3297 fprintf (dump_file, "Estimated number of original stmts (%u)"
3298 " not larger than estimated number of new"
3299 " stmts (%u).\n",
3300 total_orig, total_new);
509ab8cd 3301 return false;
871a91ec 3302 }
3d3e04ac 3303
3304 gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt);
3305 gimple_seq seq = NULL;
3d3e04ac 3306 tree last_vdef, new_vuse;
3307 last_vdef = gimple_vdef (group->last_stmt);
3308 new_vuse = gimple_vuse (group->last_stmt);
509ab8cd 3309 tree bswap_res = NULL_TREE;
3310
3311 if (group->stores[0]->rhs_code == LROTATE_EXPR
3312 || group->stores[0]->rhs_code == NOP_EXPR)
3313 {
3314 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
3315 gimple *ins_stmt = group->stores[0]->ins_stmt;
3316 struct symbolic_number *n = &group->stores[0]->n;
3317 bool bswap = group->stores[0]->rhs_code == LROTATE_EXPR;
3318
3319 switch (n->range)
3320 {
3321 case 16:
3322 load_type = bswap_type = uint16_type_node;
3323 break;
3324 case 32:
3325 load_type = uint32_type_node;
3326 if (bswap)
3327 {
3328 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
3329 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
3330 }
3331 break;
3332 case 64:
3333 load_type = uint64_type_node;
3334 if (bswap)
3335 {
3336 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
3337 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
3338 }
3339 break;
3340 default:
3341 gcc_unreachable ();
3342 }
3343
3344 /* If the loads have each vuse of the corresponding store,
3345 we've checked the aliasing already in try_coalesce_bswap and
3346 we want to sink the need load into seq. So need to use new_vuse
3347 on the load. */
58cff6a2 3348 if (n->base_addr)
509ab8cd 3349 {
58cff6a2 3350 if (n->vuse == NULL)
3351 {
3352 n->vuse = new_vuse;
3353 ins_stmt = NULL;
3354 }
3355 else
3356 /* Update vuse in case it has changed by output_merged_stores. */
3357 n->vuse = gimple_vuse (ins_stmt);
509ab8cd 3358 }
3359 bswap_res = bswap_replace (gsi_start (seq), ins_stmt, fndecl,
3360 bswap_type, load_type, n, bswap);
3361 gcc_assert (bswap_res);
3362 }
3d3e04ac 3363
3364 gimple *stmt = NULL;
3d3e04ac 3365 split_store *split_store;
3366 unsigned int i;
9991d1d3 3367 auto_vec<gimple *, 32> orig_stmts;
509ab8cd 3368 gimple_seq this_seq;
3369 tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq,
427223f1 3370 is_gimple_mem_ref_addr, NULL_TREE);
509ab8cd 3371 gimple_seq_add_seq_without_update (&seq, this_seq);
9991d1d3 3372
3373 tree load_addr[2] = { NULL_TREE, NULL_TREE };
3374 gimple_seq load_seq[2] = { NULL, NULL };
3375 gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () };
3376 for (int j = 0; j < 2; ++j)
3377 {
3378 store_operand_info &op = group->stores[0]->ops[j];
3379 if (op.base_addr == NULL_TREE)
3380 continue;
3381
3382 store_immediate_info *infol = group->stores.last ();
3383 if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt))
3384 {
e0099abc 3385 /* We can't pick the location randomly; while we've verified
3386 all the loads have the same vuse, they can be still in different
3387 basic blocks and we need to pick the one from the last bb:
3388 int x = q[0];
3389 if (x == N) return;
3390 int y = q[1];
3391 p[0] = x;
3392 p[1] = y;
3393 otherwise if we put the wider load at the q[0] load, we might
3394 segfault if q[1] is not mapped. */
3395 basic_block bb = gimple_bb (op.stmt);
3396 gimple *ostmt = op.stmt;
3397 store_immediate_info *info;
3398 FOR_EACH_VEC_ELT (group->stores, i, info)
3399 {
3400 gimple *tstmt = info->ops[j].stmt;
3401 basic_block tbb = gimple_bb (tstmt);
3402 if (dominated_by_p (CDI_DOMINATORS, tbb, bb))
3403 {
3404 ostmt = tstmt;
3405 bb = tbb;
3406 }
3407 }
3408 load_gsi[j] = gsi_for_stmt (ostmt);
9991d1d3 3409 load_addr[j]
3410 = force_gimple_operand_1 (unshare_expr (op.base_addr),
3411 &load_seq[j], is_gimple_mem_ref_addr,
3412 NULL_TREE);
3413 }
3414 else if (operand_equal_p (base_addr, op.base_addr, 0))
3415 load_addr[j] = addr;
3416 else
ad3e5b2f 3417 {
ad3e5b2f 3418 load_addr[j]
3419 = force_gimple_operand_1 (unshare_expr (op.base_addr),
3420 &this_seq, is_gimple_mem_ref_addr,
3421 NULL_TREE);
3422 gimple_seq_add_seq_without_update (&seq, this_seq);
3423 }
9991d1d3 3424 }
3425
3d3e04ac 3426 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3427 {
3428 unsigned HOST_WIDE_INT try_size = split_store->size;
3429 unsigned HOST_WIDE_INT try_pos = split_store->bytepos;
3430 unsigned HOST_WIDE_INT align = split_store->align;
902cb3b7 3431 tree dest, src;
3432 location_t loc;
3433 if (split_store->orig)
3434 {
3435 /* If there is just a single constituent store which covers
3436 the whole area, just reuse the lhs and rhs. */
9991d1d3 3437 gimple *orig_stmt = split_store->orig_stores[0]->stmt;
3438 dest = gimple_assign_lhs (orig_stmt);
3439 src = gimple_assign_rhs1 (orig_stmt);
3440 loc = gimple_location (orig_stmt);
902cb3b7 3441 }
3442 else
3443 {
9991d1d3 3444 store_immediate_info *info;
3445 unsigned short clique, base;
3446 unsigned int k;
3447 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
3448 orig_stmts.safe_push (info->stmt);
902cb3b7 3449 tree offset_type
9991d1d3 3450 = get_alias_type_for_stmts (orig_stmts, false, &clique, &base);
3451 loc = get_location_for_stmts (orig_stmts);
3452 orig_stmts.truncate (0);
902cb3b7 3453
3454 tree int_type = build_nonstandard_integer_type (try_size, UNSIGNED);
3455 int_type = build_aligned_type (int_type, align);
3456 dest = fold_build2 (MEM_REF, int_type, addr,
3457 build_int_cst (offset_type, try_pos));
9991d1d3 3458 if (TREE_CODE (dest) == MEM_REF)
3459 {
3460 MR_DEPENDENCE_CLIQUE (dest) = clique;
3461 MR_DEPENDENCE_BASE (dest) = base;
3462 }
3463
509ab8cd 3464 tree mask = integer_zero_node;
3465 if (!bswap_res)
3466 mask = native_interpret_expr (int_type,
3467 group->mask + try_pos
3468 - start_byte_pos,
3469 group->buf_size);
9991d1d3 3470
3471 tree ops[2];
3472 for (int j = 0;
3473 j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE);
3474 ++j)
3475 {
3476 store_operand_info &op = split_store->orig_stores[0]->ops[j];
509ab8cd 3477 if (bswap_res)
3478 ops[j] = bswap_res;
3479 else if (op.base_addr)
9991d1d3 3480 {
3481 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
3482 orig_stmts.safe_push (info->ops[j].stmt);
3483
3484 offset_type = get_alias_type_for_stmts (orig_stmts, true,
3485 &clique, &base);
3486 location_t load_loc = get_location_for_stmts (orig_stmts);
3487 orig_stmts.truncate (0);
3488
3489 unsigned HOST_WIDE_INT load_align = group->load_align[j];
3490 unsigned HOST_WIDE_INT align_bitpos
e61263f2 3491 = known_alignment (try_pos * BITS_PER_UNIT
3492 - split_store->orig_stores[0]->bitpos
3493 + op.bitpos);
3494 if (align_bitpos & (load_align - 1))
9991d1d3 3495 load_align = least_bit_hwi (align_bitpos);
3496
3497 tree load_int_type
3498 = build_nonstandard_integer_type (try_size, UNSIGNED);
3499 load_int_type
3500 = build_aligned_type (load_int_type, load_align);
3501
e61263f2 3502 poly_uint64 load_pos
3503 = exact_div (try_pos * BITS_PER_UNIT
3504 - split_store->orig_stores[0]->bitpos
3505 + op.bitpos,
3506 BITS_PER_UNIT);
9991d1d3 3507 ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j],
3508 build_int_cst (offset_type, load_pos));
3509 if (TREE_CODE (ops[j]) == MEM_REF)
3510 {
3511 MR_DEPENDENCE_CLIQUE (ops[j]) = clique;
3512 MR_DEPENDENCE_BASE (ops[j]) = base;
3513 }
3514 if (!integer_zerop (mask))
3515 /* The load might load some bits (that will be masked off
3516 later on) uninitialized, avoid -W*uninitialized
3517 warnings in that case. */
3518 TREE_NO_WARNING (ops[j]) = 1;
3519
3520 stmt = gimple_build_assign (make_ssa_name (int_type),
3521 ops[j]);
3522 gimple_set_location (stmt, load_loc);
3523 if (gsi_bb (load_gsi[j]))
3524 {
3525 gimple_set_vuse (stmt, gimple_vuse (op.stmt));
3526 gimple_seq_add_stmt_without_update (&load_seq[j], stmt);
3527 }
3528 else
3529 {
3530 gimple_set_vuse (stmt, new_vuse);
3531 gimple_seq_add_stmt_without_update (&seq, stmt);
3532 }
3533 ops[j] = gimple_assign_lhs (stmt);
9deedf62 3534 tree xor_mask;
3535 enum tree_code inv_op
3536 = invert_op (split_store, j, int_type, xor_mask);
3537 if (inv_op != NOP_EXPR)
c35548ce 3538 {
3539 stmt = gimple_build_assign (make_ssa_name (int_type),
9deedf62 3540 inv_op, ops[j], xor_mask);
c35548ce 3541 gimple_set_location (stmt, load_loc);
3542 ops[j] = gimple_assign_lhs (stmt);
3543
3544 if (gsi_bb (load_gsi[j]))
3545 gimple_seq_add_stmt_without_update (&load_seq[j],
3546 stmt);
3547 else
3548 gimple_seq_add_stmt_without_update (&seq, stmt);
3549 }
9991d1d3 3550 }
3551 else
3552 ops[j] = native_interpret_expr (int_type,
3553 group->val + try_pos
3554 - start_byte_pos,
3555 group->buf_size);
3556 }
3557
3558 switch (split_store->orig_stores[0]->rhs_code)
3559 {
3560 case BIT_AND_EXPR:
3561 case BIT_IOR_EXPR:
3562 case BIT_XOR_EXPR:
3563 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
3564 {
3565 tree rhs1 = gimple_assign_rhs1 (info->stmt);
3566 orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1));
3567 }
3568 location_t bit_loc;
3569 bit_loc = get_location_for_stmts (orig_stmts);
3570 orig_stmts.truncate (0);
3571
3572 stmt
3573 = gimple_build_assign (make_ssa_name (int_type),
3574 split_store->orig_stores[0]->rhs_code,
3575 ops[0], ops[1]);
3576 gimple_set_location (stmt, bit_loc);
3577 /* If there is just one load and there is a separate
3578 load_seq[0], emit the bitwise op right after it. */
3579 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
3580 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
3581 /* Otherwise, if at least one load is in seq, we need to
3582 emit the bitwise op right before the store. If there
3583 are two loads and are emitted somewhere else, it would
3584 be better to emit the bitwise op as early as possible;
3585 we don't track where that would be possible right now
3586 though. */
3587 else
3588 gimple_seq_add_stmt_without_update (&seq, stmt);
3589 src = gimple_assign_lhs (stmt);
9deedf62 3590 tree xor_mask;
3591 enum tree_code inv_op;
3592 inv_op = invert_op (split_store, 2, int_type, xor_mask);
3593 if (inv_op != NOP_EXPR)
832a73b9 3594 {
3595 stmt = gimple_build_assign (make_ssa_name (int_type),
9deedf62 3596 inv_op, src, xor_mask);
832a73b9 3597 gimple_set_location (stmt, bit_loc);
3598 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
3599 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
3600 else
3601 gimple_seq_add_stmt_without_update (&seq, stmt);
3602 src = gimple_assign_lhs (stmt);
3603 }
9991d1d3 3604 break;
509ab8cd 3605 case LROTATE_EXPR:
3606 case NOP_EXPR:
3607 src = ops[0];
3608 if (!is_gimple_val (src))
3609 {
3610 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (src)),
3611 src);
3612 gimple_seq_add_stmt_without_update (&seq, stmt);
3613 src = gimple_assign_lhs (stmt);
3614 }
3615 if (!useless_type_conversion_p (int_type, TREE_TYPE (src)))
3616 {
3617 stmt = gimple_build_assign (make_ssa_name (int_type),
3618 NOP_EXPR, src);
3619 gimple_seq_add_stmt_without_update (&seq, stmt);
3620 src = gimple_assign_lhs (stmt);
3621 }
3622 break;
9991d1d3 3623 default:
3624 src = ops[0];
3625 break;
3626 }
3627
902cb3b7 3628 if (!integer_zerop (mask))
3629 {
3630 tree tem = make_ssa_name (int_type);
3631 tree load_src = unshare_expr (dest);
3632 /* The load might load some or all bits uninitialized,
3633 avoid -W*uninitialized warnings in that case.
3634 As optimization, it would be nice if all the bits are
3635 provably uninitialized (no stores at all yet or previous
3636 store a CLOBBER) we'd optimize away the load and replace
3637 it e.g. with 0. */
3638 TREE_NO_WARNING (load_src) = 1;
3639 stmt = gimple_build_assign (tem, load_src);
3640 gimple_set_location (stmt, loc);
3641 gimple_set_vuse (stmt, new_vuse);
3642 gimple_seq_add_stmt_without_update (&seq, stmt);
3643
3644 /* FIXME: If there is a single chunk of zero bits in mask,
3645 perhaps use BIT_INSERT_EXPR instead? */
3646 stmt = gimple_build_assign (make_ssa_name (int_type),
3647 BIT_AND_EXPR, tem, mask);
3648 gimple_set_location (stmt, loc);
3649 gimple_seq_add_stmt_without_update (&seq, stmt);
3650 tem = gimple_assign_lhs (stmt);
3651
9991d1d3 3652 if (TREE_CODE (src) == INTEGER_CST)
3653 src = wide_int_to_tree (int_type,
3654 wi::bit_and_not (wi::to_wide (src),
3655 wi::to_wide (mask)));
3656 else
3657 {
3658 tree nmask
3659 = wide_int_to_tree (int_type,
3660 wi::bit_not (wi::to_wide (mask)));
3661 stmt = gimple_build_assign (make_ssa_name (int_type),
3662 BIT_AND_EXPR, src, nmask);
3663 gimple_set_location (stmt, loc);
3664 gimple_seq_add_stmt_without_update (&seq, stmt);
3665 src = gimple_assign_lhs (stmt);
3666 }
902cb3b7 3667 stmt = gimple_build_assign (make_ssa_name (int_type),
3668 BIT_IOR_EXPR, tem, src);
3669 gimple_set_location (stmt, loc);
3670 gimple_seq_add_stmt_without_update (&seq, stmt);
3671 src = gimple_assign_lhs (stmt);
3672 }
3673 }
3d3e04ac 3674
3675 stmt = gimple_build_assign (dest, src);
3676 gimple_set_location (stmt, loc);
3677 gimple_set_vuse (stmt, new_vuse);
3678 gimple_seq_add_stmt_without_update (&seq, stmt);
3679
3d3e04ac 3680 tree new_vdef;
3681 if (i < split_stores.length () - 1)
902cb3b7 3682 new_vdef = make_ssa_name (gimple_vop (cfun), stmt);
3d3e04ac 3683 else
3684 new_vdef = last_vdef;
3685
3686 gimple_set_vdef (stmt, new_vdef);
3687 SSA_NAME_DEF_STMT (new_vdef) = stmt;
3688 new_vuse = new_vdef;
3689 }
3690
3691 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3692 delete split_store;
3693
3d3e04ac 3694 gcc_assert (seq);
3695 if (dump_file)
3696 {
3697 fprintf (dump_file,
3698 "New sequence of %u stmts to replace old one of %u stmts\n",
902cb3b7 3699 split_stores.length (), orig_num_stmts);
3d3e04ac 3700 if (dump_flags & TDF_DETAILS)
3701 print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS);
3702 }
3703 gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT);
9991d1d3 3704 for (int j = 0; j < 2; ++j)
3705 if (load_seq[j])
3706 gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT);
3d3e04ac 3707
3708 return true;
3709}
3710
3711/* Process the merged_store_group objects created in the coalescing phase.
3712 The stores are all against the base object BASE.
3713 Try to output the widened stores and delete the original statements if
3714 successful. Return true iff any changes were made. */
3715
3716bool
f85e7cb7 3717imm_store_chain_info::output_merged_stores ()
3d3e04ac 3718{
3719 unsigned int i;
3720 merged_store_group *merged_store;
3721 bool ret = false;
3722 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store)
3723 {
f85e7cb7 3724 if (output_merged_store (merged_store))
3d3e04ac 3725 {
3726 unsigned int j;
3727 store_immediate_info *store;
3728 FOR_EACH_VEC_ELT (merged_store->stores, j, store)
3729 {
3730 gimple *stmt = store->stmt;
3731 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
3732 gsi_remove (&gsi, true);
3733 if (stmt != merged_store->last_stmt)
3734 {
3735 unlink_stmt_vdef (stmt);
3736 release_defs (stmt);
3737 }
3738 }
3739 ret = true;
3740 }
3741 }
3742 if (ret && dump_file)
3743 fprintf (dump_file, "Merging successful!\n");
3744
3745 return ret;
3746}
3747
3748/* Coalesce the store_immediate_info objects recorded against the base object
3749 BASE in the first phase and output them.
3750 Delete the allocated structures.
3751 Return true if any changes were made. */
3752
3753bool
f85e7cb7 3754imm_store_chain_info::terminate_and_process_chain ()
3d3e04ac 3755{
3756 /* Process store chain. */
3757 bool ret = false;
3758 if (m_store_info.length () > 1)
3759 {
3760 ret = coalesce_immediate_stores ();
3761 if (ret)
f85e7cb7 3762 ret = output_merged_stores ();
3d3e04ac 3763 }
3764
3765 /* Delete all the entries we allocated ourselves. */
3766 store_immediate_info *info;
3767 unsigned int i;
3768 FOR_EACH_VEC_ELT (m_store_info, i, info)
3769 delete info;
3770
3771 merged_store_group *merged_info;
3772 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info)
3773 delete merged_info;
3774
3775 return ret;
3776}
3777
3778/* Return true iff LHS is a destination potentially interesting for
3779 store merging. In practice these are the codes that get_inner_reference
3780 can process. */
3781
3782static bool
3783lhs_valid_for_store_merging_p (tree lhs)
3784{
3785 tree_code code = TREE_CODE (lhs);
3786
3787 if (code == ARRAY_REF || code == ARRAY_RANGE_REF || code == MEM_REF
3788 || code == COMPONENT_REF || code == BIT_FIELD_REF)
3789 return true;
3790
3791 return false;
3792}
3793
3794/* Return true if the tree RHS is a constant we want to consider
3795 during store merging. In practice accept all codes that
3796 native_encode_expr accepts. */
3797
3798static bool
3799rhs_valid_for_store_merging_p (tree rhs)
3800{
52acb7ae 3801 unsigned HOST_WIDE_INT size;
3802 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (&size)
3803 && native_encode_expr (rhs, NULL, size) != 0);
3d3e04ac 3804}
3805
9991d1d3 3806/* If MEM is a memory reference usable for store merging (either as
3807 store destination or for loads), return the non-NULL base_addr
3808 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
3809 Otherwise return NULL, *PBITPOS should be still valid even for that
3810 case. */
3811
3812static tree
e61263f2 3813mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize,
3814 poly_uint64 *pbitpos,
3815 poly_uint64 *pbitregion_start,
3816 poly_uint64 *pbitregion_end)
9991d1d3 3817{
e61263f2 3818 poly_int64 bitsize, bitpos;
3819 poly_uint64 bitregion_start = 0, bitregion_end = 0;
9991d1d3 3820 machine_mode mode;
3821 int unsignedp = 0, reversep = 0, volatilep = 0;
3822 tree offset;
3823 tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode,
3824 &unsignedp, &reversep, &volatilep);
3825 *pbitsize = bitsize;
e61263f2 3826 if (known_eq (bitsize, 0))
9991d1d3 3827 return NULL_TREE;
3828
3829 if (TREE_CODE (mem) == COMPONENT_REF
3830 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1)))
3831 {
3832 get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset);
e61263f2 3833 if (maybe_ne (bitregion_end, 0U))
3834 bitregion_end += 1;
9991d1d3 3835 }
3836
3837 if (reversep)
3838 return NULL_TREE;
3839
3840 /* We do not want to rewrite TARGET_MEM_REFs. */
3841 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
3842 return NULL_TREE;
3843 /* In some cases get_inner_reference may return a
3844 MEM_REF [ptr + byteoffset]. For the purposes of this pass
3845 canonicalize the base_addr to MEM_REF [ptr] and take
3846 byteoffset into account in the bitpos. This occurs in
3847 PR 23684 and this way we can catch more chains. */
3848 else if (TREE_CODE (base_addr) == MEM_REF)
3849 {
e61263f2 3850 poly_offset_int byte_off = mem_ref_offset (base_addr);
3851 poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT;
9991d1d3 3852 bit_off += bitpos;
e61263f2 3853 if (known_ge (bit_off, 0) && bit_off.to_shwi (&bitpos))
9991d1d3 3854 {
e61263f2 3855 if (maybe_ne (bitregion_end, 0U))
9991d1d3 3856 {
3857 bit_off = byte_off << LOG2_BITS_PER_UNIT;
3858 bit_off += bitregion_start;
e61263f2 3859 if (bit_off.to_uhwi (&bitregion_start))
9991d1d3 3860 {
9991d1d3 3861 bit_off = byte_off << LOG2_BITS_PER_UNIT;
3862 bit_off += bitregion_end;
e61263f2 3863 if (!bit_off.to_uhwi (&bitregion_end))
9991d1d3 3864 bitregion_end = 0;
3865 }
3866 else
3867 bitregion_end = 0;
3868 }
3869 }
3870 else
3871 return NULL_TREE;
3872 base_addr = TREE_OPERAND (base_addr, 0);
3873 }
3874 /* get_inner_reference returns the base object, get at its
3875 address now. */
3876 else
3877 {
e61263f2 3878 if (maybe_lt (bitpos, 0))
9991d1d3 3879 return NULL_TREE;
3880 base_addr = build_fold_addr_expr (base_addr);
3881 }
3882
e61263f2 3883 if (known_eq (bitregion_end, 0U))
9991d1d3 3884 {
e61263f2 3885 bitregion_start = round_down_to_byte_boundary (bitpos);
3886 bitregion_end = round_up_to_byte_boundary (bitpos + bitsize);
9991d1d3 3887 }
3888
3889 if (offset != NULL_TREE)
3890 {
3891 /* If the access is variable offset then a base decl has to be
3892 address-taken to be able to emit pointer-based stores to it.
3893 ??? We might be able to get away with re-using the original
3894 base up to the first variable part and then wrapping that inside
3895 a BIT_FIELD_REF. */
3896 tree base = get_base_address (base_addr);
3897 if (! base
3898 || (DECL_P (base) && ! TREE_ADDRESSABLE (base)))
3899 return NULL_TREE;
3900
3901 base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr),
3902 base_addr, offset);
3903 }
3904
3905 *pbitsize = bitsize;
3906 *pbitpos = bitpos;
3907 *pbitregion_start = bitregion_start;
3908 *pbitregion_end = bitregion_end;
3909 return base_addr;
3910}
3911
3912/* Return true if STMT is a load that can be used for store merging.
3913 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
3914 BITREGION_END are properties of the corresponding store. */
3915
3916static bool
3917handled_load (gimple *stmt, store_operand_info *op,
e61263f2 3918 poly_uint64 bitsize, poly_uint64 bitpos,
3919 poly_uint64 bitregion_start, poly_uint64 bitregion_end)
9991d1d3 3920{
c35548ce 3921 if (!is_gimple_assign (stmt))
9991d1d3 3922 return false;
c35548ce 3923 if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR)
3924 {
3925 tree rhs1 = gimple_assign_rhs1 (stmt);
3926 if (TREE_CODE (rhs1) == SSA_NAME
c35548ce 3927 && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos,
3928 bitregion_start, bitregion_end))
3929 {
832a73b9 3930 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
3931 been optimized earlier, but if allowed here, would confuse the
3932 multiple uses counting. */
3933 if (op->bit_not_p)
3934 return false;
c35548ce 3935 op->bit_not_p = !op->bit_not_p;
3936 return true;
3937 }
3938 return false;
3939 }
3940 if (gimple_vuse (stmt)
3941 && gimple_assign_load_p (stmt)
9991d1d3 3942 && !stmt_can_throw_internal (stmt)
3943 && !gimple_has_volatile_ops (stmt))
3944 {
3945 tree mem = gimple_assign_rhs1 (stmt);
3946 op->base_addr
3947 = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos,
3948 &op->bitregion_start,
3949 &op->bitregion_end);
3950 if (op->base_addr != NULL_TREE
e61263f2 3951 && known_eq (op->bitsize, bitsize)
3952 && multiple_p (op->bitpos - bitpos, BITS_PER_UNIT)
3953 && known_ge (op->bitpos - op->bitregion_start,
3954 bitpos - bitregion_start)
3955 && known_ge (op->bitregion_end - op->bitpos,
3956 bitregion_end - bitpos))
9991d1d3 3957 {
3958 op->stmt = stmt;
3959 op->val = mem;
c35548ce 3960 op->bit_not_p = false;
9991d1d3 3961 return true;
3962 }
3963 }
3964 return false;
3965}
3966
3967/* Record the store STMT for store merging optimization if it can be
3968 optimized. */
3969
3970void
3971pass_store_merging::process_store (gimple *stmt)
3972{
3973 tree lhs = gimple_assign_lhs (stmt);
3974 tree rhs = gimple_assign_rhs1 (stmt);
e61263f2 3975 poly_uint64 bitsize, bitpos;
3976 poly_uint64 bitregion_start, bitregion_end;
9991d1d3 3977 tree base_addr
3978 = mem_valid_for_store_merging (lhs, &bitsize, &bitpos,
3979 &bitregion_start, &bitregion_end);
e61263f2 3980 if (known_eq (bitsize, 0U))
9991d1d3 3981 return;
3982
3983 bool invalid = (base_addr == NULL_TREE
e61263f2 3984 || (maybe_gt (bitsize,
3985 (unsigned int) MAX_BITSIZE_MODE_ANY_INT)
3986 && (TREE_CODE (rhs) != INTEGER_CST)));
9991d1d3 3987 enum tree_code rhs_code = ERROR_MARK;
832a73b9 3988 bool bit_not_p = false;
509ab8cd 3989 struct symbolic_number n;
3990 gimple *ins_stmt = NULL;
9991d1d3 3991 store_operand_info ops[2];
3992 if (invalid)
3993 ;
3994 else if (rhs_valid_for_store_merging_p (rhs))
3995 {
3996 rhs_code = INTEGER_CST;
3997 ops[0].val = rhs;
3998 }
871a91ec 3999 else if (TREE_CODE (rhs) != SSA_NAME)
9991d1d3 4000 invalid = true;
4001 else
4002 {
4003 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2;
4004 if (!is_gimple_assign (def_stmt))
4005 invalid = true;
4006 else if (handled_load (def_stmt, &ops[0], bitsize, bitpos,
4007 bitregion_start, bitregion_end))
4008 rhs_code = MEM_REF;
832a73b9 4009 else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR)
4010 {
4011 tree rhs1 = gimple_assign_rhs1 (def_stmt);
4012 if (TREE_CODE (rhs1) == SSA_NAME
4013 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1)))
4014 {
4015 bit_not_p = true;
4016 def_stmt = SSA_NAME_DEF_STMT (rhs1);
4017 }
4018 }
4019 if (rhs_code == ERROR_MARK && !invalid)
9991d1d3 4020 switch ((rhs_code = gimple_assign_rhs_code (def_stmt)))
4021 {
4022 case BIT_AND_EXPR:
4023 case BIT_IOR_EXPR:
4024 case BIT_XOR_EXPR:
4025 tree rhs1, rhs2;
4026 rhs1 = gimple_assign_rhs1 (def_stmt);
4027 rhs2 = gimple_assign_rhs2 (def_stmt);
4028 invalid = true;
871a91ec 4029 if (TREE_CODE (rhs1) != SSA_NAME)
9991d1d3 4030 break;
4031 def_stmt1 = SSA_NAME_DEF_STMT (rhs1);
4032 if (!is_gimple_assign (def_stmt1)
4033 || !handled_load (def_stmt1, &ops[0], bitsize, bitpos,
4034 bitregion_start, bitregion_end))
4035 break;
4036 if (rhs_valid_for_store_merging_p (rhs2))
4037 ops[1].val = rhs2;
871a91ec 4038 else if (TREE_CODE (rhs2) != SSA_NAME)
9991d1d3 4039 break;
4040 else
4041 {
4042 def_stmt2 = SSA_NAME_DEF_STMT (rhs2);
4043 if (!is_gimple_assign (def_stmt2))
4044 break;
4045 else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos,
4046 bitregion_start, bitregion_end))
4047 break;
4048 }
4049 invalid = false;
4050 break;
4051 default:
4052 invalid = true;
4053 break;
4054 }
e61263f2 4055 unsigned HOST_WIDE_INT const_bitsize;
4056 if (bitsize.is_constant (&const_bitsize)
4057 && multiple_p (const_bitsize, BITS_PER_UNIT)
4058 && multiple_p (bitpos, BITS_PER_UNIT)
4059 && const_bitsize <= 64
509ab8cd 4060 && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN)
4061 {
4062 ins_stmt = find_bswap_or_nop_1 (def_stmt, &n, 12);
4063 if (ins_stmt)
4064 {
4065 uint64_t nn = n.n;
4066 for (unsigned HOST_WIDE_INT i = 0;
e61263f2 4067 i < const_bitsize;
4068 i += BITS_PER_UNIT, nn >>= BITS_PER_MARKER)
509ab8cd 4069 if ((nn & MARKER_MASK) == 0
4070 || (nn & MARKER_MASK) == MARKER_BYTE_UNKNOWN)
4071 {
4072 ins_stmt = NULL;
4073 break;
4074 }
4075 if (ins_stmt)
4076 {
4077 if (invalid)
4078 {
4079 rhs_code = LROTATE_EXPR;
4080 ops[0].base_addr = NULL_TREE;
4081 ops[1].base_addr = NULL_TREE;
4082 }
4083 invalid = false;
4084 }
4085 }
4086 }
9991d1d3 4087 }
4088
e61263f2 4089 unsigned HOST_WIDE_INT const_bitsize, const_bitpos;
4090 unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end;
4091 if (invalid
4092 || !bitsize.is_constant (&const_bitsize)
4093 || !bitpos.is_constant (&const_bitpos)
4094 || !bitregion_start.is_constant (&const_bitregion_start)
4095 || !bitregion_end.is_constant (&const_bitregion_end))
9991d1d3 4096 {
c35548ce 4097 terminate_all_aliasing_chains (NULL, stmt);
9991d1d3 4098 return;
4099 }
4100
509ab8cd 4101 if (!ins_stmt)
4102 memset (&n, 0, sizeof (n));
4103
c35548ce 4104 struct imm_store_chain_info **chain_info = NULL;
4105 if (base_addr)
4106 chain_info = m_stores.get (base_addr);
4107
9991d1d3 4108 store_immediate_info *info;
4109 if (chain_info)
4110 {
4111 unsigned int ord = (*chain_info)->m_store_info.length ();
e61263f2 4112 info = new store_immediate_info (const_bitsize, const_bitpos,
4113 const_bitregion_start,
4114 const_bitregion_end,
4115 stmt, ord, rhs_code, n, ins_stmt,
832a73b9 4116 bit_not_p, ops[0], ops[1]);
9991d1d3 4117 if (dump_file && (dump_flags & TDF_DETAILS))
4118 {
4119 fprintf (dump_file, "Recording immediate store from stmt:\n");
4120 print_gimple_stmt (dump_file, stmt, 0);
4121 }
4122 (*chain_info)->m_store_info.safe_push (info);
c35548ce 4123 terminate_all_aliasing_chains (chain_info, stmt);
9991d1d3 4124 /* If we reach the limit of stores to merge in a chain terminate and
4125 process the chain now. */
4126 if ((*chain_info)->m_store_info.length ()
4127 == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE))
4128 {
4129 if (dump_file && (dump_flags & TDF_DETAILS))
4130 fprintf (dump_file,
4131 "Reached maximum number of statements to merge:\n");
4132 terminate_and_release_chain (*chain_info);
4133 }
4134 return;
4135 }
4136
4137 /* Store aliases any existing chain? */
c35548ce 4138 terminate_all_aliasing_chains (NULL, stmt);
9991d1d3 4139 /* Start a new chain. */
4140 struct imm_store_chain_info *new_chain
4141 = new imm_store_chain_info (m_stores_head, base_addr);
e61263f2 4142 info = new store_immediate_info (const_bitsize, const_bitpos,
4143 const_bitregion_start,
4144 const_bitregion_end,
4145 stmt, 0, rhs_code, n, ins_stmt,
832a73b9 4146 bit_not_p, ops[0], ops[1]);
9991d1d3 4147 new_chain->m_store_info.safe_push (info);
4148 m_stores.put (base_addr, new_chain);
4149 if (dump_file && (dump_flags & TDF_DETAILS))
4150 {
4151 fprintf (dump_file, "Starting new chain with statement:\n");
4152 print_gimple_stmt (dump_file, stmt, 0);
4153 fprintf (dump_file, "The base object is:\n");
4154 print_generic_expr (dump_file, base_addr);
4155 fprintf (dump_file, "\n");
4156 }
4157}
4158
3d3e04ac 4159/* Entry point for the pass. Go over each basic block recording chains of
9991d1d3 4160 immediate stores. Upon encountering a terminating statement (as defined
4161 by stmt_terminates_chain_p) process the recorded stores and emit the widened
4162 variants. */
3d3e04ac 4163
4164unsigned int
4165pass_store_merging::execute (function *fun)
4166{
4167 basic_block bb;
4168 hash_set<gimple *> orig_stmts;
4169
509ab8cd 4170 calculate_dominance_info (CDI_DOMINATORS);
4171
3d3e04ac 4172 FOR_EACH_BB_FN (bb, fun)
4173 {
4174 gimple_stmt_iterator gsi;
4175 unsigned HOST_WIDE_INT num_statements = 0;
4176 /* Record the original statements so that we can keep track of
4177 statements emitted in this pass and not re-process new
4178 statements. */
4179 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4180 {
4181 if (is_gimple_debug (gsi_stmt (gsi)))
4182 continue;
4183
63eabc9b 4184 if (++num_statements >= 2)
3d3e04ac 4185 break;
4186 }
4187
4188 if (num_statements < 2)
4189 continue;
4190
4191 if (dump_file && (dump_flags & TDF_DETAILS))
4192 fprintf (dump_file, "Processing basic block <%d>:\n", bb->index);
4193
4194 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4195 {
4196 gimple *stmt = gsi_stmt (gsi);
4197
3a3ba7de 4198 if (is_gimple_debug (stmt))
4199 continue;
4200
3d3e04ac 4201 if (gimple_has_volatile_ops (stmt))
4202 {
4203 /* Terminate all chains. */
4204 if (dump_file && (dump_flags & TDF_DETAILS))
4205 fprintf (dump_file, "Volatile access terminates "
4206 "all chains\n");
4207 terminate_and_process_all_chains ();
4208 continue;
4209 }
4210
3d3e04ac 4211 if (gimple_assign_single_p (stmt) && gimple_vdef (stmt)
4212 && !stmt_can_throw_internal (stmt)
4213 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt)))
9991d1d3 4214 process_store (stmt);
4215 else
4216 terminate_all_aliasing_chains (NULL, stmt);
3d3e04ac 4217 }
4218 terminate_and_process_all_chains ();
4219 }
4220 return 0;
4221}
4222
4223} // anon namespace
4224
4225/* Construct and return a store merging pass object. */
4226
4227gimple_opt_pass *
4228make_pass_store_merging (gcc::context *ctxt)
4229{
4230 return new pass_store_merging (ctxt);
4231}
3d9a2fb3 4232
4233#if CHECKING_P
4234
4235namespace selftest {
4236
4237/* Selftests for store merging helpers. */
4238
4239/* Assert that all elements of the byte arrays X and Y, both of length N
4240 are equal. */
4241
4242static void
4243verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n)
4244{
4245 for (unsigned int i = 0; i < n; i++)
4246 {
4247 if (x[i] != y[i])
4248 {
4249 fprintf (stderr, "Arrays do not match. X:\n");
4250 dump_char_array (stderr, x, n);
4251 fprintf (stderr, "Y:\n");
4252 dump_char_array (stderr, y, n);
4253 }
4254 ASSERT_EQ (x[i], y[i]);
4255 }
4256}
4257
4258/* Test shift_bytes_in_array and that it carries bits across between
4259 bytes correctly. */
4260
4261static void
4262verify_shift_bytes_in_array (void)
4263{
4264 /* byte 1 | byte 0
4265 00011111 | 11100000. */
4266 unsigned char orig[2] = { 0xe0, 0x1f };
4267 unsigned char in[2];
4268 memcpy (in, orig, sizeof orig);
4269
4270 unsigned char expected[2] = { 0x80, 0x7f };
4271 shift_bytes_in_array (in, sizeof (in), 2);
4272 verify_array_eq (in, expected, sizeof (in));
4273
4274 memcpy (in, orig, sizeof orig);
4275 memcpy (expected, orig, sizeof orig);
4276 /* Check that shifting by zero doesn't change anything. */
4277 shift_bytes_in_array (in, sizeof (in), 0);
4278 verify_array_eq (in, expected, sizeof (in));
4279
4280}
4281
4282/* Test shift_bytes_in_array_right and that it carries bits across between
4283 bytes correctly. */
4284
4285static void
4286verify_shift_bytes_in_array_right (void)
4287{
4288 /* byte 1 | byte 0
4289 00011111 | 11100000. */
4290 unsigned char orig[2] = { 0x1f, 0xe0};
4291 unsigned char in[2];
4292 memcpy (in, orig, sizeof orig);
4293 unsigned char expected[2] = { 0x07, 0xf8};
4294 shift_bytes_in_array_right (in, sizeof (in), 2);
4295 verify_array_eq (in, expected, sizeof (in));
4296
4297 memcpy (in, orig, sizeof orig);
4298 memcpy (expected, orig, sizeof orig);
4299 /* Check that shifting by zero doesn't change anything. */
4300 shift_bytes_in_array_right (in, sizeof (in), 0);
4301 verify_array_eq (in, expected, sizeof (in));
4302}
4303
4304/* Test clear_bit_region that it clears exactly the bits asked and
4305 nothing more. */
4306
4307static void
4308verify_clear_bit_region (void)
4309{
4310 /* Start with all bits set and test clearing various patterns in them. */
4311 unsigned char orig[3] = { 0xff, 0xff, 0xff};
4312 unsigned char in[3];
4313 unsigned char expected[3];
4314 memcpy (in, orig, sizeof in);
4315
4316 /* Check zeroing out all the bits. */
4317 clear_bit_region (in, 0, 3 * BITS_PER_UNIT);
4318 expected[0] = expected[1] = expected[2] = 0;
4319 verify_array_eq (in, expected, sizeof in);
4320
4321 memcpy (in, orig, sizeof in);
4322 /* Leave the first and last bits intact. */
4323 clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2);
4324 expected[0] = 0x1;
4325 expected[1] = 0;
4326 expected[2] = 0x80;
4327 verify_array_eq (in, expected, sizeof in);
4328}
4329
4330/* Test verify_clear_bit_region_be that it clears exactly the bits asked and
4331 nothing more. */
4332
4333static void
4334verify_clear_bit_region_be (void)
4335{
4336 /* Start with all bits set and test clearing various patterns in them. */
4337 unsigned char orig[3] = { 0xff, 0xff, 0xff};
4338 unsigned char in[3];
4339 unsigned char expected[3];
4340 memcpy (in, orig, sizeof in);
4341
4342 /* Check zeroing out all the bits. */
4343 clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT);
4344 expected[0] = expected[1] = expected[2] = 0;
4345 verify_array_eq (in, expected, sizeof in);
4346
4347 memcpy (in, orig, sizeof in);
4348 /* Leave the first and last bits intact. */
4349 clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2);
4350 expected[0] = 0x80;
4351 expected[1] = 0;
4352 expected[2] = 0x1;
4353 verify_array_eq (in, expected, sizeof in);
4354}
4355
4356
4357/* Run all of the selftests within this file. */
4358
4359void
4360store_merging_c_tests (void)
4361{
4362 verify_shift_bytes_in_array ();
4363 verify_shift_bytes_in_array_right ();
4364 verify_clear_bit_region ();
4365 verify_clear_bit_region_be ();
4366}
4367
4368} // namespace selftest
4369#endif /* CHECKING_P. */