]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/gimple-ssa-store-merging.c
[Ada] Revert change for gnatprove that is no longer needed
[thirdparty/gcc.git] / gcc / gimple-ssa-store-merging.c
1 /* GIMPLE store merging and byte swapping passes.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* The purpose of the store merging pass is to combine multiple memory stores
22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
24
25 For example, if we have a sequence peforming four byte stores to
26 consecutive memory locations:
27 [p ] := imm1;
28 [p + 1B] := imm2;
29 [p + 2B] := imm3;
30 [p + 3B] := imm4;
31 we can transform this into a single 4-byte store if the target supports it:
32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
33
34 Or:
35 [p ] := [q ];
36 [p + 1B] := [q + 1B];
37 [p + 2B] := [q + 2B];
38 [p + 3B] := [q + 3B];
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
41
42 Or:
43 [p ] := [q ] ^ imm1;
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
49
50 Or:
51 [p:1 ] := imm;
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
55
56 The algorithm is applied to each basic block in three phases:
57
58 1) Scan through the basic block and record assignments to destinations
59 that can be expressed as a store to memory of a certain size at a certain
60 bit offset from base expressions we can handle. For bit-fields we also
61 record the surrounding bit region, i.e. bits that could be stored in
62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
64 terminate such chains when appropriate (for example when when the stored
65 values get used subsequently).
66 These stores can be a result of structure element initializers, array stores
67 etc. A store_immediate_info object is recorded for every such store.
68 Record as many such assignments to a single base as possible until a
69 statement that interferes with the store sequence is encountered.
70 Each store has up to 2 operands, which can be a either constant, a memory
71 load or an SSA name, from which the value to be stored can be computed.
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
74
75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
76 store_immediate_info objects) and coalesce contiguous stores into
77 merged_store_group objects. For bit-field stores, we don't need to
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
83
84 For example, given the stores:
85 [p ] := 0;
86 [p + 1B] := 1;
87 [p + 3B] := 0;
88 [p + 4B] := 1;
89 [p + 5B] := 0;
90 [p + 6B] := 0;
91 This phase would produce two merged_store_group objects, one recording the
92 two bytes stored in the memory region [p : p + 1] and another
93 recording the four bytes stored in the memory region [p + 3 : p + 6].
94
95 3) The merged_store_group objects produced in phase 2) are processed
96 to generate the sequence of wider stores that set the contiguous memory
97 regions to the sequence of bytes that correspond to it. This may emit
98 multiple stores per store group to handle contiguous stores that are not
99 of a size that is a power of 2. For example it can try to emit a 40-bit
100 store as a 32-bit store followed by an 8-bit store.
101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
103
104 Note on endianness and example:
105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
106 [p ] := 0x1234;
107 [p + 2B] := 0x5678;
108 [p + 4B] := 0xab;
109 [p + 5B] := 0xcd;
110
111 The memory layout for little-endian (LE) and big-endian (BE) must be:
112 p |LE|BE|
113 ---------
114 0 |34|12|
115 1 |12|34|
116 2 |78|56|
117 3 |56|78|
118 4 |ab|ab|
119 5 |cd|cd|
120
121 To merge these into a single 48-bit merged value 'val' in phase 2)
122 on little-endian we insert stores to higher (consecutive) bitpositions
123 into the most significant bits of the merged value.
124 The final merged value would be: 0xcdab56781234
125
126 For big-endian we insert stores to higher bitpositions into the least
127 significant bits of the merged value.
128 The final merged value would be: 0x12345678abcd
129
130 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
131 followed by a 16-bit store. Again, we must consider endianness when
132 breaking down the 48-bit value 'val' computed above.
133 For little endian we emit:
134 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
135 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
136
137 Whereas for big-endian we emit:
138 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
139 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
140
141 #include "config.h"
142 #include "system.h"
143 #include "coretypes.h"
144 #include "backend.h"
145 #include "tree.h"
146 #include "gimple.h"
147 #include "builtins.h"
148 #include "fold-const.h"
149 #include "tree-pass.h"
150 #include "ssa.h"
151 #include "gimple-pretty-print.h"
152 #include "alias.h"
153 #include "fold-const.h"
154 #include "params.h"
155 #include "print-tree.h"
156 #include "tree-hash-traits.h"
157 #include "gimple-iterator.h"
158 #include "gimplify.h"
159 #include "gimple-fold.h"
160 #include "stor-layout.h"
161 #include "timevar.h"
162 #include "tree-cfg.h"
163 #include "tree-eh.h"
164 #include "target.h"
165 #include "gimplify-me.h"
166 #include "rtl.h"
167 #include "expr.h" /* For get_bit_range. */
168 #include "optabs-tree.h"
169 #include "selftest.h"
170
171 /* The maximum size (in bits) of the stores this pass should generate. */
172 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
173 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
174
175 /* Limit to bound the number of aliasing checks for loads with the same
176 vuse as the corresponding store. */
177 #define MAX_STORE_ALIAS_CHECKS 64
178
179 namespace {
180
181 struct bswap_stat
182 {
183 /* Number of hand-written 16-bit nop / bswaps found. */
184 int found_16bit;
185
186 /* Number of hand-written 32-bit nop / bswaps found. */
187 int found_32bit;
188
189 /* Number of hand-written 64-bit nop / bswaps found. */
190 int found_64bit;
191 } nop_stats, bswap_stats;
192
193 /* A symbolic number structure is used to detect byte permutation and selection
194 patterns of a source. To achieve that, its field N contains an artificial
195 number consisting of BITS_PER_MARKER sized markers tracking where does each
196 byte come from in the source:
197
198 0 - target byte has the value 0
199 FF - target byte has an unknown value (eg. due to sign extension)
200 1..size - marker value is the byte index in the source (0 for lsb).
201
202 To detect permutations on memory sources (arrays and structures), a symbolic
203 number is also associated:
204 - a base address BASE_ADDR and an OFFSET giving the address of the source;
205 - a range which gives the difference between the highest and lowest accessed
206 memory location to make such a symbolic number;
207 - the address SRC of the source element of lowest address as a convenience
208 to easily get BASE_ADDR + offset + lowest bytepos;
209 - number of expressions N_OPS bitwise ored together to represent
210 approximate cost of the computation.
211
212 Note 1: the range is different from size as size reflects the size of the
213 type of the current expression. For instance, for an array char a[],
214 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
215 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
216 time a range of 1.
217
218 Note 2: for non-memory sources, range holds the same value as size.
219
220 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
221
222 struct symbolic_number {
223 uint64_t n;
224 tree type;
225 tree base_addr;
226 tree offset;
227 poly_int64_pod bytepos;
228 tree src;
229 tree alias_set;
230 tree vuse;
231 unsigned HOST_WIDE_INT range;
232 int n_ops;
233 };
234
235 #define BITS_PER_MARKER 8
236 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
237 #define MARKER_BYTE_UNKNOWN MARKER_MASK
238 #define HEAD_MARKER(n, size) \
239 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
240
241 /* The number which the find_bswap_or_nop_1 result should match in
242 order to have a nop. The number is masked according to the size of
243 the symbolic number before using it. */
244 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
245 (uint64_t)0x08070605 << 32 | 0x04030201)
246
247 /* The number which the find_bswap_or_nop_1 result should match in
248 order to have a byte swap. The number is masked according to the
249 size of the symbolic number before using it. */
250 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
251 (uint64_t)0x01020304 << 32 | 0x05060708)
252
253 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
254 number N. Return false if the requested operation is not permitted
255 on a symbolic number. */
256
257 inline bool
258 do_shift_rotate (enum tree_code code,
259 struct symbolic_number *n,
260 int count)
261 {
262 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
263 unsigned head_marker;
264
265 if (count < 0
266 || count >= TYPE_PRECISION (n->type)
267 || count % BITS_PER_UNIT != 0)
268 return false;
269 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
270
271 /* Zero out the extra bits of N in order to avoid them being shifted
272 into the significant bits. */
273 if (size < 64 / BITS_PER_MARKER)
274 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
275
276 switch (code)
277 {
278 case LSHIFT_EXPR:
279 n->n <<= count;
280 break;
281 case RSHIFT_EXPR:
282 head_marker = HEAD_MARKER (n->n, size);
283 n->n >>= count;
284 /* Arithmetic shift of signed type: result is dependent on the value. */
285 if (!TYPE_UNSIGNED (n->type) && head_marker)
286 for (i = 0; i < count / BITS_PER_MARKER; i++)
287 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
288 << ((size - 1 - i) * BITS_PER_MARKER);
289 break;
290 case LROTATE_EXPR:
291 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
292 break;
293 case RROTATE_EXPR:
294 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
295 break;
296 default:
297 return false;
298 }
299 /* Zero unused bits for size. */
300 if (size < 64 / BITS_PER_MARKER)
301 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
302 return true;
303 }
304
305 /* Perform sanity checking for the symbolic number N and the gimple
306 statement STMT. */
307
308 inline bool
309 verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
310 {
311 tree lhs_type;
312
313 lhs_type = gimple_expr_type (stmt);
314
315 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
316 return false;
317
318 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
319 return false;
320
321 return true;
322 }
323
324 /* Initialize the symbolic number N for the bswap pass from the base element
325 SRC manipulated by the bitwise OR expression. */
326
327 bool
328 init_symbolic_number (struct symbolic_number *n, tree src)
329 {
330 int size;
331
332 if (! INTEGRAL_TYPE_P (TREE_TYPE (src)))
333 return false;
334
335 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
336 n->src = src;
337
338 /* Set up the symbolic number N by setting each byte to a value between 1 and
339 the byte size of rhs1. The highest order byte is set to n->size and the
340 lowest order byte to 1. */
341 n->type = TREE_TYPE (src);
342 size = TYPE_PRECISION (n->type);
343 if (size % BITS_PER_UNIT != 0)
344 return false;
345 size /= BITS_PER_UNIT;
346 if (size > 64 / BITS_PER_MARKER)
347 return false;
348 n->range = size;
349 n->n = CMPNOP;
350 n->n_ops = 1;
351
352 if (size < 64 / BITS_PER_MARKER)
353 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
354
355 return true;
356 }
357
358 /* Check if STMT might be a byte swap or a nop from a memory source and returns
359 the answer. If so, REF is that memory source and the base of the memory area
360 accessed and the offset of the access from that base are recorded in N. */
361
362 bool
363 find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
364 {
365 /* Leaf node is an array or component ref. Memorize its base and
366 offset from base to compare to other such leaf node. */
367 poly_int64 bitsize, bitpos, bytepos;
368 machine_mode mode;
369 int unsignedp, reversep, volatilep;
370 tree offset, base_addr;
371
372 /* Not prepared to handle PDP endian. */
373 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
374 return false;
375
376 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
377 return false;
378
379 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
380 &unsignedp, &reversep, &volatilep);
381
382 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
383 /* Do not rewrite TARGET_MEM_REF. */
384 return false;
385 else if (TREE_CODE (base_addr) == MEM_REF)
386 {
387 poly_offset_int bit_offset = 0;
388 tree off = TREE_OPERAND (base_addr, 1);
389
390 if (!integer_zerop (off))
391 {
392 poly_offset_int boff = mem_ref_offset (base_addr);
393 boff <<= LOG2_BITS_PER_UNIT;
394 bit_offset += boff;
395 }
396
397 base_addr = TREE_OPERAND (base_addr, 0);
398
399 /* Avoid returning a negative bitpos as this may wreak havoc later. */
400 if (maybe_lt (bit_offset, 0))
401 {
402 tree byte_offset = wide_int_to_tree
403 (sizetype, bits_to_bytes_round_down (bit_offset));
404 bit_offset = num_trailing_bits (bit_offset);
405 if (offset)
406 offset = size_binop (PLUS_EXPR, offset, byte_offset);
407 else
408 offset = byte_offset;
409 }
410
411 bitpos += bit_offset.force_shwi ();
412 }
413 else
414 base_addr = build_fold_addr_expr (base_addr);
415
416 if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
417 return false;
418 if (!multiple_p (bitsize, BITS_PER_UNIT))
419 return false;
420 if (reversep)
421 return false;
422
423 if (!init_symbolic_number (n, ref))
424 return false;
425 n->base_addr = base_addr;
426 n->offset = offset;
427 n->bytepos = bytepos;
428 n->alias_set = reference_alias_ptr_type (ref);
429 n->vuse = gimple_vuse (stmt);
430 return true;
431 }
432
433 /* Compute the symbolic number N representing the result of a bitwise OR on 2
434 symbolic number N1 and N2 whose source statements are respectively
435 SOURCE_STMT1 and SOURCE_STMT2. */
436
437 gimple *
438 perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
439 gimple *source_stmt2, struct symbolic_number *n2,
440 struct symbolic_number *n)
441 {
442 int i, size;
443 uint64_t mask;
444 gimple *source_stmt;
445 struct symbolic_number *n_start;
446
447 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
448 if (TREE_CODE (rhs1) == BIT_FIELD_REF
449 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
450 rhs1 = TREE_OPERAND (rhs1, 0);
451 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
452 if (TREE_CODE (rhs2) == BIT_FIELD_REF
453 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
454 rhs2 = TREE_OPERAND (rhs2, 0);
455
456 /* Sources are different, cancel bswap if they are not memory location with
457 the same base (array, structure, ...). */
458 if (rhs1 != rhs2)
459 {
460 uint64_t inc;
461 HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end;
462 struct symbolic_number *toinc_n_ptr, *n_end;
463 basic_block bb1, bb2;
464
465 if (!n1->base_addr || !n2->base_addr
466 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
467 return NULL;
468
469 if (!n1->offset != !n2->offset
470 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
471 return NULL;
472
473 start1 = 0;
474 if (!(n2->bytepos - n1->bytepos).is_constant (&start2))
475 return NULL;
476
477 if (start1 < start2)
478 {
479 n_start = n1;
480 start_sub = start2 - start1;
481 }
482 else
483 {
484 n_start = n2;
485 start_sub = start1 - start2;
486 }
487
488 bb1 = gimple_bb (source_stmt1);
489 bb2 = gimple_bb (source_stmt2);
490 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
491 source_stmt = source_stmt1;
492 else
493 source_stmt = source_stmt2;
494
495 /* Find the highest address at which a load is performed and
496 compute related info. */
497 end1 = start1 + (n1->range - 1);
498 end2 = start2 + (n2->range - 1);
499 if (end1 < end2)
500 {
501 end = end2;
502 end_sub = end2 - end1;
503 }
504 else
505 {
506 end = end1;
507 end_sub = end1 - end2;
508 }
509 n_end = (end2 > end1) ? n2 : n1;
510
511 /* Find symbolic number whose lsb is the most significant. */
512 if (BYTES_BIG_ENDIAN)
513 toinc_n_ptr = (n_end == n1) ? n2 : n1;
514 else
515 toinc_n_ptr = (n_start == n1) ? n2 : n1;
516
517 n->range = end - MIN (start1, start2) + 1;
518
519 /* Check that the range of memory covered can be represented by
520 a symbolic number. */
521 if (n->range > 64 / BITS_PER_MARKER)
522 return NULL;
523
524 /* Reinterpret byte marks in symbolic number holding the value of
525 bigger weight according to target endianness. */
526 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
527 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
528 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
529 {
530 unsigned marker
531 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
532 if (marker && marker != MARKER_BYTE_UNKNOWN)
533 toinc_n_ptr->n += inc;
534 }
535 }
536 else
537 {
538 n->range = n1->range;
539 n_start = n1;
540 source_stmt = source_stmt1;
541 }
542
543 if (!n1->alias_set
544 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
545 n->alias_set = n1->alias_set;
546 else
547 n->alias_set = ptr_type_node;
548 n->vuse = n_start->vuse;
549 n->base_addr = n_start->base_addr;
550 n->offset = n_start->offset;
551 n->src = n_start->src;
552 n->bytepos = n_start->bytepos;
553 n->type = n_start->type;
554 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
555
556 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
557 {
558 uint64_t masked1, masked2;
559
560 masked1 = n1->n & mask;
561 masked2 = n2->n & mask;
562 if (masked1 && masked2 && masked1 != masked2)
563 return NULL;
564 }
565 n->n = n1->n | n2->n;
566 n->n_ops = n1->n_ops + n2->n_ops;
567
568 return source_stmt;
569 }
570
571 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
572 the operation given by the rhs of STMT on the result. If the operation
573 could successfully be executed the function returns a gimple stmt whose
574 rhs's first tree is the expression of the source operand and NULL
575 otherwise. */
576
577 gimple *
578 find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
579 {
580 enum tree_code code;
581 tree rhs1, rhs2 = NULL;
582 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
583 enum gimple_rhs_class rhs_class;
584
585 if (!limit || !is_gimple_assign (stmt))
586 return NULL;
587
588 rhs1 = gimple_assign_rhs1 (stmt);
589
590 if (find_bswap_or_nop_load (stmt, rhs1, n))
591 return stmt;
592
593 /* Handle BIT_FIELD_REF. */
594 if (TREE_CODE (rhs1) == BIT_FIELD_REF
595 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
596 {
597 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
598 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
599 if (bitpos % BITS_PER_UNIT == 0
600 && bitsize % BITS_PER_UNIT == 0
601 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
602 {
603 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
604 if (BYTES_BIG_ENDIAN)
605 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
606
607 /* Shift. */
608 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
609 return NULL;
610
611 /* Mask. */
612 uint64_t mask = 0;
613 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
614 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
615 i++, tmp <<= BITS_PER_UNIT)
616 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
617 n->n &= mask;
618
619 /* Convert. */
620 n->type = TREE_TYPE (rhs1);
621 if (!n->base_addr)
622 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
623
624 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
625 }
626
627 return NULL;
628 }
629
630 if (TREE_CODE (rhs1) != SSA_NAME)
631 return NULL;
632
633 code = gimple_assign_rhs_code (stmt);
634 rhs_class = gimple_assign_rhs_class (stmt);
635 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
636
637 if (rhs_class == GIMPLE_BINARY_RHS)
638 rhs2 = gimple_assign_rhs2 (stmt);
639
640 /* Handle unary rhs and binary rhs with integer constants as second
641 operand. */
642
643 if (rhs_class == GIMPLE_UNARY_RHS
644 || (rhs_class == GIMPLE_BINARY_RHS
645 && TREE_CODE (rhs2) == INTEGER_CST))
646 {
647 if (code != BIT_AND_EXPR
648 && code != LSHIFT_EXPR
649 && code != RSHIFT_EXPR
650 && code != LROTATE_EXPR
651 && code != RROTATE_EXPR
652 && !CONVERT_EXPR_CODE_P (code))
653 return NULL;
654
655 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
656
657 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
658 we have to initialize the symbolic number. */
659 if (!source_stmt1)
660 {
661 if (gimple_assign_load_p (stmt)
662 || !init_symbolic_number (n, rhs1))
663 return NULL;
664 source_stmt1 = stmt;
665 }
666
667 switch (code)
668 {
669 case BIT_AND_EXPR:
670 {
671 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
672 uint64_t val = int_cst_value (rhs2), mask = 0;
673 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
674
675 /* Only constants masking full bytes are allowed. */
676 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
677 if ((val & tmp) != 0 && (val & tmp) != tmp)
678 return NULL;
679 else if (val & tmp)
680 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
681
682 n->n &= mask;
683 }
684 break;
685 case LSHIFT_EXPR:
686 case RSHIFT_EXPR:
687 case LROTATE_EXPR:
688 case RROTATE_EXPR:
689 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
690 return NULL;
691 break;
692 CASE_CONVERT:
693 {
694 int i, type_size, old_type_size;
695 tree type;
696
697 type = gimple_expr_type (stmt);
698 type_size = TYPE_PRECISION (type);
699 if (type_size % BITS_PER_UNIT != 0)
700 return NULL;
701 type_size /= BITS_PER_UNIT;
702 if (type_size > 64 / BITS_PER_MARKER)
703 return NULL;
704
705 /* Sign extension: result is dependent on the value. */
706 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
707 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
708 && HEAD_MARKER (n->n, old_type_size))
709 for (i = 0; i < type_size - old_type_size; i++)
710 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
711 << ((type_size - 1 - i) * BITS_PER_MARKER);
712
713 if (type_size < 64 / BITS_PER_MARKER)
714 {
715 /* If STMT casts to a smaller type mask out the bits not
716 belonging to the target type. */
717 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
718 }
719 n->type = type;
720 if (!n->base_addr)
721 n->range = type_size;
722 }
723 break;
724 default:
725 return NULL;
726 };
727 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
728 }
729
730 /* Handle binary rhs. */
731
732 if (rhs_class == GIMPLE_BINARY_RHS)
733 {
734 struct symbolic_number n1, n2;
735 gimple *source_stmt, *source_stmt2;
736
737 if (code != BIT_IOR_EXPR)
738 return NULL;
739
740 if (TREE_CODE (rhs2) != SSA_NAME)
741 return NULL;
742
743 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
744
745 switch (code)
746 {
747 case BIT_IOR_EXPR:
748 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
749
750 if (!source_stmt1)
751 return NULL;
752
753 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
754
755 if (!source_stmt2)
756 return NULL;
757
758 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
759 return NULL;
760
761 if (n1.vuse != n2.vuse)
762 return NULL;
763
764 source_stmt
765 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
766
767 if (!source_stmt)
768 return NULL;
769
770 if (!verify_symbolic_number_p (n, stmt))
771 return NULL;
772
773 break;
774 default:
775 return NULL;
776 }
777 return source_stmt;
778 }
779 return NULL;
780 }
781
782 /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
783 *CMPXCHG, *CMPNOP and adjust *N. */
784
785 void
786 find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg,
787 uint64_t *cmpnop)
788 {
789 unsigned rsize;
790 uint64_t tmpn, mask;
791
792 /* The number which the find_bswap_or_nop_1 result should match in order
793 to have a full byte swap. The number is shifted to the right
794 according to the size of the symbolic number before using it. */
795 *cmpxchg = CMPXCHG;
796 *cmpnop = CMPNOP;
797
798 /* Find real size of result (highest non-zero byte). */
799 if (n->base_addr)
800 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
801 else
802 rsize = n->range;
803
804 /* Zero out the bits corresponding to untouched bytes in original gimple
805 expression. */
806 if (n->range < (int) sizeof (int64_t))
807 {
808 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
809 *cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
810 *cmpnop &= mask;
811 }
812
813 /* Zero out the bits corresponding to unused bytes in the result of the
814 gimple expression. */
815 if (rsize < n->range)
816 {
817 if (BYTES_BIG_ENDIAN)
818 {
819 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
820 *cmpxchg &= mask;
821 *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
822 }
823 else
824 {
825 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
826 *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
827 *cmpnop &= mask;
828 }
829 n->range = rsize;
830 }
831
832 n->range *= BITS_PER_UNIT;
833 }
834
835 /* Check if STMT completes a bswap implementation or a read in a given
836 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
837 accordingly. It also sets N to represent the kind of operations
838 performed: size of the resulting expression and whether it works on
839 a memory source, and if so alias-set and vuse. At last, the
840 function returns a stmt whose rhs's first tree is the source
841 expression. */
842
843 gimple *
844 find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
845 {
846 /* The last parameter determines the depth search limit. It usually
847 correlates directly to the number n of bytes to be touched. We
848 increase that number by log2(n) + 1 here in order to also
849 cover signed -> unsigned conversions of the src operand as can be seen
850 in libgcc, and for initial shift/and operation of the src operand. */
851 int limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
852 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
853 gimple *ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
854
855 if (!ins_stmt)
856 return NULL;
857
858 uint64_t cmpxchg, cmpnop;
859 find_bswap_or_nop_finalize (n, &cmpxchg, &cmpnop);
860
861 /* A complete byte swap should make the symbolic number to start with
862 the largest digit in the highest order byte. Unchanged symbolic
863 number indicates a read with same endianness as target architecture. */
864 if (n->n == cmpnop)
865 *bswap = false;
866 else if (n->n == cmpxchg)
867 *bswap = true;
868 else
869 return NULL;
870
871 /* Useless bit manipulation performed by code. */
872 if (!n->base_addr && n->n == cmpnop && n->n_ops == 1)
873 return NULL;
874
875 return ins_stmt;
876 }
877
878 const pass_data pass_data_optimize_bswap =
879 {
880 GIMPLE_PASS, /* type */
881 "bswap", /* name */
882 OPTGROUP_NONE, /* optinfo_flags */
883 TV_NONE, /* tv_id */
884 PROP_ssa, /* properties_required */
885 0, /* properties_provided */
886 0, /* properties_destroyed */
887 0, /* todo_flags_start */
888 0, /* todo_flags_finish */
889 };
890
891 class pass_optimize_bswap : public gimple_opt_pass
892 {
893 public:
894 pass_optimize_bswap (gcc::context *ctxt)
895 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
896 {}
897
898 /* opt_pass methods: */
899 virtual bool gate (function *)
900 {
901 return flag_expensive_optimizations && optimize && BITS_PER_UNIT == 8;
902 }
903
904 virtual unsigned int execute (function *);
905
906 }; // class pass_optimize_bswap
907
908 /* Perform the bswap optimization: replace the expression computed in the rhs
909 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
910 bswap, load or load + bswap expression.
911 Which of these alternatives replace the rhs is given by N->base_addr (non
912 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
913 load to perform are also given in N while the builtin bswap invoke is given
914 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
915 load statements involved to construct the rhs in gsi_stmt (GSI) and
916 N->range gives the size of the rhs expression for maintaining some
917 statistics.
918
919 Note that if the replacement involve a load and if gsi_stmt (GSI) is
920 non-NULL, that stmt is moved just after INS_STMT to do the load with the
921 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
922
923 tree
924 bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl,
925 tree bswap_type, tree load_type, struct symbolic_number *n,
926 bool bswap)
927 {
928 tree src, tmp, tgt = NULL_TREE;
929 gimple *bswap_stmt;
930
931 gimple *cur_stmt = gsi_stmt (gsi);
932 src = n->src;
933 if (cur_stmt)
934 tgt = gimple_assign_lhs (cur_stmt);
935
936 /* Need to load the value from memory first. */
937 if (n->base_addr)
938 {
939 gimple_stmt_iterator gsi_ins = gsi;
940 if (ins_stmt)
941 gsi_ins = gsi_for_stmt (ins_stmt);
942 tree addr_expr, addr_tmp, val_expr, val_tmp;
943 tree load_offset_ptr, aligned_load_type;
944 gimple *load_stmt;
945 unsigned align = get_object_alignment (src);
946 poly_int64 load_offset = 0;
947
948 if (cur_stmt)
949 {
950 basic_block ins_bb = gimple_bb (ins_stmt);
951 basic_block cur_bb = gimple_bb (cur_stmt);
952 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
953 return NULL_TREE;
954
955 /* Move cur_stmt just before one of the load of the original
956 to ensure it has the same VUSE. See PR61517 for what could
957 go wrong. */
958 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
959 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
960 gsi_move_before (&gsi, &gsi_ins);
961 gsi = gsi_for_stmt (cur_stmt);
962 }
963 else
964 gsi = gsi_ins;
965
966 /* Compute address to load from and cast according to the size
967 of the load. */
968 addr_expr = build_fold_addr_expr (src);
969 if (is_gimple_mem_ref_addr (addr_expr))
970 addr_tmp = unshare_expr (addr_expr);
971 else
972 {
973 addr_tmp = unshare_expr (n->base_addr);
974 if (!is_gimple_mem_ref_addr (addr_tmp))
975 addr_tmp = force_gimple_operand_gsi_1 (&gsi, addr_tmp,
976 is_gimple_mem_ref_addr,
977 NULL_TREE, true,
978 GSI_SAME_STMT);
979 load_offset = n->bytepos;
980 if (n->offset)
981 {
982 tree off
983 = force_gimple_operand_gsi (&gsi, unshare_expr (n->offset),
984 true, NULL_TREE, true,
985 GSI_SAME_STMT);
986 gimple *stmt
987 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp)),
988 POINTER_PLUS_EXPR, addr_tmp, off);
989 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
990 addr_tmp = gimple_assign_lhs (stmt);
991 }
992 }
993
994 /* Perform the load. */
995 aligned_load_type = load_type;
996 if (align < TYPE_ALIGN (load_type))
997 aligned_load_type = build_aligned_type (load_type, align);
998 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
999 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
1000 load_offset_ptr);
1001
1002 if (!bswap)
1003 {
1004 if (n->range == 16)
1005 nop_stats.found_16bit++;
1006 else if (n->range == 32)
1007 nop_stats.found_32bit++;
1008 else
1009 {
1010 gcc_assert (n->range == 64);
1011 nop_stats.found_64bit++;
1012 }
1013
1014 /* Convert the result of load if necessary. */
1015 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), load_type))
1016 {
1017 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
1018 "load_dst");
1019 load_stmt = gimple_build_assign (val_tmp, val_expr);
1020 gimple_set_vuse (load_stmt, n->vuse);
1021 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1022 gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
1023 update_stmt (cur_stmt);
1024 }
1025 else if (cur_stmt)
1026 {
1027 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
1028 gimple_set_vuse (cur_stmt, n->vuse);
1029 update_stmt (cur_stmt);
1030 }
1031 else
1032 {
1033 tgt = make_ssa_name (load_type);
1034 cur_stmt = gimple_build_assign (tgt, MEM_REF, val_expr);
1035 gimple_set_vuse (cur_stmt, n->vuse);
1036 gsi_insert_before (&gsi, cur_stmt, GSI_SAME_STMT);
1037 }
1038
1039 if (dump_file)
1040 {
1041 fprintf (dump_file,
1042 "%d bit load in target endianness found at: ",
1043 (int) n->range);
1044 print_gimple_stmt (dump_file, cur_stmt, 0);
1045 }
1046 return tgt;
1047 }
1048 else
1049 {
1050 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
1051 load_stmt = gimple_build_assign (val_tmp, val_expr);
1052 gimple_set_vuse (load_stmt, n->vuse);
1053 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1054 }
1055 src = val_tmp;
1056 }
1057 else if (!bswap)
1058 {
1059 gimple *g = NULL;
1060 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src)))
1061 {
1062 if (!is_gimple_val (src))
1063 return NULL_TREE;
1064 g = gimple_build_assign (tgt, NOP_EXPR, src);
1065 }
1066 else if (cur_stmt)
1067 g = gimple_build_assign (tgt, src);
1068 else
1069 tgt = src;
1070 if (n->range == 16)
1071 nop_stats.found_16bit++;
1072 else if (n->range == 32)
1073 nop_stats.found_32bit++;
1074 else
1075 {
1076 gcc_assert (n->range == 64);
1077 nop_stats.found_64bit++;
1078 }
1079 if (dump_file)
1080 {
1081 fprintf (dump_file,
1082 "%d bit reshuffle in target endianness found at: ",
1083 (int) n->range);
1084 if (cur_stmt)
1085 print_gimple_stmt (dump_file, cur_stmt, 0);
1086 else
1087 {
1088 print_generic_expr (dump_file, tgt, TDF_NONE);
1089 fprintf (dump_file, "\n");
1090 }
1091 }
1092 if (cur_stmt)
1093 gsi_replace (&gsi, g, true);
1094 return tgt;
1095 }
1096 else if (TREE_CODE (src) == BIT_FIELD_REF)
1097 src = TREE_OPERAND (src, 0);
1098
1099 if (n->range == 16)
1100 bswap_stats.found_16bit++;
1101 else if (n->range == 32)
1102 bswap_stats.found_32bit++;
1103 else
1104 {
1105 gcc_assert (n->range == 64);
1106 bswap_stats.found_64bit++;
1107 }
1108
1109 tmp = src;
1110
1111 /* Convert the src expression if necessary. */
1112 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
1113 {
1114 gimple *convert_stmt;
1115
1116 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
1117 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
1118 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
1119 }
1120
1121 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1122 are considered as rotation of 2N bit values by N bits is generally not
1123 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1124 gives 0x03040102 while a bswap for that value is 0x04030201. */
1125 if (bswap && n->range == 16)
1126 {
1127 tree count = build_int_cst (NULL, BITS_PER_UNIT);
1128 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
1129 bswap_stmt = gimple_build_assign (NULL, src);
1130 }
1131 else
1132 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
1133
1134 if (tgt == NULL_TREE)
1135 tgt = make_ssa_name (bswap_type);
1136 tmp = tgt;
1137
1138 /* Convert the result if necessary. */
1139 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
1140 {
1141 gimple *convert_stmt;
1142
1143 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
1144 convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
1145 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
1146 }
1147
1148 gimple_set_lhs (bswap_stmt, tmp);
1149
1150 if (dump_file)
1151 {
1152 fprintf (dump_file, "%d bit bswap implementation found at: ",
1153 (int) n->range);
1154 if (cur_stmt)
1155 print_gimple_stmt (dump_file, cur_stmt, 0);
1156 else
1157 {
1158 print_generic_expr (dump_file, tgt, TDF_NONE);
1159 fprintf (dump_file, "\n");
1160 }
1161 }
1162
1163 if (cur_stmt)
1164 {
1165 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
1166 gsi_remove (&gsi, true);
1167 }
1168 else
1169 gsi_insert_before (&gsi, bswap_stmt, GSI_SAME_STMT);
1170 return tgt;
1171 }
1172
1173 /* Find manual byte swap implementations as well as load in a given
1174 endianness. Byte swaps are turned into a bswap builtin invokation
1175 while endian loads are converted to bswap builtin invokation or
1176 simple load according to the target endianness. */
1177
1178 unsigned int
1179 pass_optimize_bswap::execute (function *fun)
1180 {
1181 basic_block bb;
1182 bool bswap32_p, bswap64_p;
1183 bool changed = false;
1184 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1185
1186 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1187 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1188 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1189 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1190 || (bswap32_p && word_mode == SImode)));
1191
1192 /* Determine the argument type of the builtins. The code later on
1193 assumes that the return and argument type are the same. */
1194 if (bswap32_p)
1195 {
1196 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1197 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1198 }
1199
1200 if (bswap64_p)
1201 {
1202 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1203 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1204 }
1205
1206 memset (&nop_stats, 0, sizeof (nop_stats));
1207 memset (&bswap_stats, 0, sizeof (bswap_stats));
1208 calculate_dominance_info (CDI_DOMINATORS);
1209
1210 FOR_EACH_BB_FN (bb, fun)
1211 {
1212 gimple_stmt_iterator gsi;
1213
1214 /* We do a reverse scan for bswap patterns to make sure we get the
1215 widest match. As bswap pattern matching doesn't handle previously
1216 inserted smaller bswap replacements as sub-patterns, the wider
1217 variant wouldn't be detected. */
1218 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
1219 {
1220 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
1221 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1222 enum tree_code code;
1223 struct symbolic_number n;
1224 bool bswap;
1225
1226 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1227 might be moved to a different basic block by bswap_replace and gsi
1228 must not points to it if that's the case. Moving the gsi_prev
1229 there make sure that gsi points to the statement previous to
1230 cur_stmt while still making sure that all statements are
1231 considered in this basic block. */
1232 gsi_prev (&gsi);
1233
1234 if (!is_gimple_assign (cur_stmt))
1235 continue;
1236
1237 code = gimple_assign_rhs_code (cur_stmt);
1238 switch (code)
1239 {
1240 case LROTATE_EXPR:
1241 case RROTATE_EXPR:
1242 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
1243 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
1244 % BITS_PER_UNIT)
1245 continue;
1246 /* Fall through. */
1247 case BIT_IOR_EXPR:
1248 break;
1249 default:
1250 continue;
1251 }
1252
1253 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
1254
1255 if (!ins_stmt)
1256 continue;
1257
1258 switch (n.range)
1259 {
1260 case 16:
1261 /* Already in canonical form, nothing to do. */
1262 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
1263 continue;
1264 load_type = bswap_type = uint16_type_node;
1265 break;
1266 case 32:
1267 load_type = uint32_type_node;
1268 if (bswap32_p)
1269 {
1270 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1271 bswap_type = bswap32_type;
1272 }
1273 break;
1274 case 64:
1275 load_type = uint64_type_node;
1276 if (bswap64_p)
1277 {
1278 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1279 bswap_type = bswap64_type;
1280 }
1281 break;
1282 default:
1283 continue;
1284 }
1285
1286 if (bswap && !fndecl && n.range != 16)
1287 continue;
1288
1289 if (bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1290 bswap_type, load_type, &n, bswap))
1291 changed = true;
1292 }
1293 }
1294
1295 statistics_counter_event (fun, "16-bit nop implementations found",
1296 nop_stats.found_16bit);
1297 statistics_counter_event (fun, "32-bit nop implementations found",
1298 nop_stats.found_32bit);
1299 statistics_counter_event (fun, "64-bit nop implementations found",
1300 nop_stats.found_64bit);
1301 statistics_counter_event (fun, "16-bit bswap implementations found",
1302 bswap_stats.found_16bit);
1303 statistics_counter_event (fun, "32-bit bswap implementations found",
1304 bswap_stats.found_32bit);
1305 statistics_counter_event (fun, "64-bit bswap implementations found",
1306 bswap_stats.found_64bit);
1307
1308 return (changed ? TODO_update_ssa : 0);
1309 }
1310
1311 } // anon namespace
1312
1313 gimple_opt_pass *
1314 make_pass_optimize_bswap (gcc::context *ctxt)
1315 {
1316 return new pass_optimize_bswap (ctxt);
1317 }
1318
1319 namespace {
1320
1321 /* Struct recording one operand for the store, which is either a constant,
1322 then VAL represents the constant and all the other fields are zero, or
1323 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1324 and the other fields also reflect the memory load, or an SSA name, then
1325 VAL represents the SSA name and all the other fields are zero, */
1326
1327 class store_operand_info
1328 {
1329 public:
1330 tree val;
1331 tree base_addr;
1332 poly_uint64 bitsize;
1333 poly_uint64 bitpos;
1334 poly_uint64 bitregion_start;
1335 poly_uint64 bitregion_end;
1336 gimple *stmt;
1337 bool bit_not_p;
1338 store_operand_info ();
1339 };
1340
1341 store_operand_info::store_operand_info ()
1342 : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0),
1343 bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false)
1344 {
1345 }
1346
1347 /* Struct recording the information about a single store of an immediate
1348 to memory. These are created in the first phase and coalesced into
1349 merged_store_group objects in the second phase. */
1350
1351 class store_immediate_info
1352 {
1353 public:
1354 unsigned HOST_WIDE_INT bitsize;
1355 unsigned HOST_WIDE_INT bitpos;
1356 unsigned HOST_WIDE_INT bitregion_start;
1357 /* This is one past the last bit of the bit region. */
1358 unsigned HOST_WIDE_INT bitregion_end;
1359 gimple *stmt;
1360 unsigned int order;
1361 /* INTEGER_CST for constant stores, MEM_REF for memory copy,
1362 BIT_*_EXPR for logical bitwise operation, BIT_INSERT_EXPR
1363 for bit insertion.
1364 LROTATE_EXPR if it can be only bswap optimized and
1365 ops are not really meaningful.
1366 NOP_EXPR if bswap optimization detected identity, ops
1367 are not meaningful. */
1368 enum tree_code rhs_code;
1369 /* Two fields for bswap optimization purposes. */
1370 struct symbolic_number n;
1371 gimple *ins_stmt;
1372 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
1373 bool bit_not_p;
1374 /* True if ops have been swapped and thus ops[1] represents
1375 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1376 bool ops_swapped_p;
1377 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1378 just the first one. */
1379 store_operand_info ops[2];
1380 store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1381 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1382 gimple *, unsigned int, enum tree_code,
1383 struct symbolic_number &, gimple *, bool,
1384 const store_operand_info &,
1385 const store_operand_info &);
1386 };
1387
1388 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs,
1389 unsigned HOST_WIDE_INT bp,
1390 unsigned HOST_WIDE_INT brs,
1391 unsigned HOST_WIDE_INT bre,
1392 gimple *st,
1393 unsigned int ord,
1394 enum tree_code rhscode,
1395 struct symbolic_number &nr,
1396 gimple *ins_stmtp,
1397 bool bitnotp,
1398 const store_operand_info &op0r,
1399 const store_operand_info &op1r)
1400 : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre),
1401 stmt (st), order (ord), rhs_code (rhscode), n (nr),
1402 ins_stmt (ins_stmtp), bit_not_p (bitnotp), ops_swapped_p (false)
1403 #if __cplusplus >= 201103L
1404 , ops { op0r, op1r }
1405 {
1406 }
1407 #else
1408 {
1409 ops[0] = op0r;
1410 ops[1] = op1r;
1411 }
1412 #endif
1413
1414 /* Struct representing a group of stores to contiguous memory locations.
1415 These are produced by the second phase (coalescing) and consumed in the
1416 third phase that outputs the widened stores. */
1417
1418 class merged_store_group
1419 {
1420 public:
1421 unsigned HOST_WIDE_INT start;
1422 unsigned HOST_WIDE_INT width;
1423 unsigned HOST_WIDE_INT bitregion_start;
1424 unsigned HOST_WIDE_INT bitregion_end;
1425 /* The size of the allocated memory for val and mask. */
1426 unsigned HOST_WIDE_INT buf_size;
1427 unsigned HOST_WIDE_INT align_base;
1428 poly_uint64 load_align_base[2];
1429
1430 unsigned int align;
1431 unsigned int load_align[2];
1432 unsigned int first_order;
1433 unsigned int last_order;
1434 bool bit_insertion;
1435 bool only_constants;
1436 unsigned int first_nonmergeable_order;
1437
1438 auto_vec<store_immediate_info *> stores;
1439 /* We record the first and last original statements in the sequence because
1440 we'll need their vuse/vdef and replacement position. It's easier to keep
1441 track of them separately as 'stores' is reordered by apply_stores. */
1442 gimple *last_stmt;
1443 gimple *first_stmt;
1444 unsigned char *val;
1445 unsigned char *mask;
1446
1447 merged_store_group (store_immediate_info *);
1448 ~merged_store_group ();
1449 bool can_be_merged_into (store_immediate_info *);
1450 void merge_into (store_immediate_info *);
1451 void merge_overlapping (store_immediate_info *);
1452 bool apply_stores ();
1453 private:
1454 void do_merge (store_immediate_info *);
1455 };
1456
1457 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1458
1459 static void
1460 dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len)
1461 {
1462 if (!fd)
1463 return;
1464
1465 for (unsigned int i = 0; i < len; i++)
1466 fprintf (fd, "%02x ", ptr[i]);
1467 fprintf (fd, "\n");
1468 }
1469
1470 /* Shift left the bytes in PTR of SZ elements by AMNT bits, carrying over the
1471 bits between adjacent elements. AMNT should be within
1472 [0, BITS_PER_UNIT).
1473 Example, AMNT = 2:
1474 00011111|11100000 << 2 = 01111111|10000000
1475 PTR[1] | PTR[0] PTR[1] | PTR[0]. */
1476
1477 static void
1478 shift_bytes_in_array (unsigned char *ptr, unsigned int sz, unsigned int amnt)
1479 {
1480 if (amnt == 0)
1481 return;
1482
1483 unsigned char carry_over = 0U;
1484 unsigned char carry_mask = (~0U) << (unsigned char) (BITS_PER_UNIT - amnt);
1485 unsigned char clear_mask = (~0U) << amnt;
1486
1487 for (unsigned int i = 0; i < sz; i++)
1488 {
1489 unsigned prev_carry_over = carry_over;
1490 carry_over = (ptr[i] & carry_mask) >> (BITS_PER_UNIT - amnt);
1491
1492 ptr[i] <<= amnt;
1493 if (i != 0)
1494 {
1495 ptr[i] &= clear_mask;
1496 ptr[i] |= prev_carry_over;
1497 }
1498 }
1499 }
1500
1501 /* Like shift_bytes_in_array but for big-endian.
1502 Shift right the bytes in PTR of SZ elements by AMNT bits, carrying over the
1503 bits between adjacent elements. AMNT should be within
1504 [0, BITS_PER_UNIT).
1505 Example, AMNT = 2:
1506 00011111|11100000 >> 2 = 00000111|11111000
1507 PTR[0] | PTR[1] PTR[0] | PTR[1]. */
1508
1509 static void
1510 shift_bytes_in_array_right (unsigned char *ptr, unsigned int sz,
1511 unsigned int amnt)
1512 {
1513 if (amnt == 0)
1514 return;
1515
1516 unsigned char carry_over = 0U;
1517 unsigned char carry_mask = ~(~0U << amnt);
1518
1519 for (unsigned int i = 0; i < sz; i++)
1520 {
1521 unsigned prev_carry_over = carry_over;
1522 carry_over = ptr[i] & carry_mask;
1523
1524 carry_over <<= (unsigned char) BITS_PER_UNIT - amnt;
1525 ptr[i] >>= amnt;
1526 ptr[i] |= prev_carry_over;
1527 }
1528 }
1529
1530 /* Clear out LEN bits starting from bit START in the byte array
1531 PTR. This clears the bits to the *right* from START.
1532 START must be within [0, BITS_PER_UNIT) and counts starting from
1533 the least significant bit. */
1534
1535 static void
1536 clear_bit_region_be (unsigned char *ptr, unsigned int start,
1537 unsigned int len)
1538 {
1539 if (len == 0)
1540 return;
1541 /* Clear len bits to the right of start. */
1542 else if (len <= start + 1)
1543 {
1544 unsigned char mask = (~(~0U << len));
1545 mask = mask << (start + 1U - len);
1546 ptr[0] &= ~mask;
1547 }
1548 else if (start != BITS_PER_UNIT - 1)
1549 {
1550 clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1);
1551 clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1,
1552 len - (start % BITS_PER_UNIT) - 1);
1553 }
1554 else if (start == BITS_PER_UNIT - 1
1555 && len > BITS_PER_UNIT)
1556 {
1557 unsigned int nbytes = len / BITS_PER_UNIT;
1558 memset (ptr, 0, nbytes);
1559 if (len % BITS_PER_UNIT != 0)
1560 clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1,
1561 len % BITS_PER_UNIT);
1562 }
1563 else
1564 gcc_unreachable ();
1565 }
1566
1567 /* In the byte array PTR clear the bit region starting at bit
1568 START and is LEN bits wide.
1569 For regions spanning multiple bytes do this recursively until we reach
1570 zero LEN or a region contained within a single byte. */
1571
1572 static void
1573 clear_bit_region (unsigned char *ptr, unsigned int start,
1574 unsigned int len)
1575 {
1576 /* Degenerate base case. */
1577 if (len == 0)
1578 return;
1579 else if (start >= BITS_PER_UNIT)
1580 clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len);
1581 /* Second base case. */
1582 else if ((start + len) <= BITS_PER_UNIT)
1583 {
1584 unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len);
1585 mask >>= BITS_PER_UNIT - (start + len);
1586
1587 ptr[0] &= ~mask;
1588
1589 return;
1590 }
1591 /* Clear most significant bits in a byte and proceed with the next byte. */
1592 else if (start != 0)
1593 {
1594 clear_bit_region (ptr, start, BITS_PER_UNIT - start);
1595 clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start));
1596 }
1597 /* Whole bytes need to be cleared. */
1598 else if (start == 0 && len > BITS_PER_UNIT)
1599 {
1600 unsigned int nbytes = len / BITS_PER_UNIT;
1601 /* We could recurse on each byte but we clear whole bytes, so a simple
1602 memset will do. */
1603 memset (ptr, '\0', nbytes);
1604 /* Clear the remaining sub-byte region if there is one. */
1605 if (len % BITS_PER_UNIT != 0)
1606 clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT);
1607 }
1608 else
1609 gcc_unreachable ();
1610 }
1611
1612 /* Write BITLEN bits of EXPR to the byte array PTR at
1613 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1614 Return true if the operation succeeded. */
1615
1616 static bool
1617 encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos,
1618 unsigned int total_bytes)
1619 {
1620 unsigned int first_byte = bitpos / BITS_PER_UNIT;
1621 bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT)
1622 || (bitpos % BITS_PER_UNIT)
1623 || !int_mode_for_size (bitlen, 0).exists ());
1624 bool empty_ctor_p
1625 = (TREE_CODE (expr) == CONSTRUCTOR
1626 && CONSTRUCTOR_NELTS (expr) == 0
1627 && TYPE_SIZE_UNIT (TREE_TYPE (expr))
1628 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (expr))));
1629
1630 if (!sub_byte_op_p)
1631 {
1632 if (first_byte >= total_bytes)
1633 return false;
1634 total_bytes -= first_byte;
1635 if (empty_ctor_p)
1636 {
1637 unsigned HOST_WIDE_INT rhs_bytes
1638 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1639 if (rhs_bytes > total_bytes)
1640 return false;
1641 memset (ptr + first_byte, '\0', rhs_bytes);
1642 return true;
1643 }
1644 return native_encode_expr (expr, ptr + first_byte, total_bytes) != 0;
1645 }
1646
1647 /* LITTLE-ENDIAN
1648 We are writing a non byte-sized quantity or at a position that is not
1649 at a byte boundary.
1650 |--------|--------|--------| ptr + first_byte
1651 ^ ^
1652 xxx xxxxxxxx xxx< bp>
1653 |______EXPR____|
1654
1655 First native_encode_expr EXPR into a temporary buffer and shift each
1656 byte in the buffer by 'bp' (carrying the bits over as necessary).
1657 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1658 <------bitlen---->< bp>
1659 Then we clear the destination bits:
1660 |---00000|00000000|000-----| ptr + first_byte
1661 <-------bitlen--->< bp>
1662
1663 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1664 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1665
1666 BIG-ENDIAN
1667 We are writing a non byte-sized quantity or at a position that is not
1668 at a byte boundary.
1669 ptr + first_byte |--------|--------|--------|
1670 ^ ^
1671 <bp >xxx xxxxxxxx xxx
1672 |_____EXPR_____|
1673
1674 First native_encode_expr EXPR into a temporary buffer and shift each
1675 byte in the buffer to the right by (carrying the bits over as necessary).
1676 We shift by as much as needed to align the most significant bit of EXPR
1677 with bitpos:
1678 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1679 <---bitlen----> <bp ><-----bitlen----->
1680 Then we clear the destination bits:
1681 ptr + first_byte |-----000||00000000||00000---|
1682 <bp ><-------bitlen----->
1683
1684 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1685 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1686 The awkwardness comes from the fact that bitpos is counted from the
1687 most significant bit of a byte. */
1688
1689 /* We must be dealing with fixed-size data at this point, since the
1690 total size is also fixed. */
1691 unsigned int byte_size;
1692 if (empty_ctor_p)
1693 {
1694 unsigned HOST_WIDE_INT rhs_bytes
1695 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1696 if (rhs_bytes > total_bytes)
1697 return false;
1698 byte_size = rhs_bytes;
1699 }
1700 else
1701 {
1702 fixed_size_mode mode
1703 = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr)));
1704 byte_size = GET_MODE_SIZE (mode);
1705 }
1706 /* Allocate an extra byte so that we have space to shift into. */
1707 byte_size++;
1708 unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size);
1709 memset (tmpbuf, '\0', byte_size);
1710 /* The store detection code should only have allowed constants that are
1711 accepted by native_encode_expr or empty ctors. */
1712 if (!empty_ctor_p
1713 && native_encode_expr (expr, tmpbuf, byte_size - 1) == 0)
1714 gcc_unreachable ();
1715
1716 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1717 bytes to write. This means it can write more than
1718 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1719 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1720 bitlen and zero out the bits that are not relevant as well (that may
1721 contain a sign bit due to sign-extension). */
1722 unsigned int padding
1723 = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1;
1724 /* On big-endian the padding is at the 'front' so just skip the initial
1725 bytes. */
1726 if (BYTES_BIG_ENDIAN)
1727 tmpbuf += padding;
1728
1729 byte_size -= padding;
1730
1731 if (bitlen % BITS_PER_UNIT != 0)
1732 {
1733 if (BYTES_BIG_ENDIAN)
1734 clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1,
1735 BITS_PER_UNIT - (bitlen % BITS_PER_UNIT));
1736 else
1737 clear_bit_region (tmpbuf, bitlen,
1738 byte_size * BITS_PER_UNIT - bitlen);
1739 }
1740 /* Left shifting relies on the last byte being clear if bitlen is
1741 a multiple of BITS_PER_UNIT, which might not be clear if
1742 there are padding bytes. */
1743 else if (!BYTES_BIG_ENDIAN)
1744 tmpbuf[byte_size - 1] = '\0';
1745
1746 /* Clear the bit region in PTR where the bits from TMPBUF will be
1747 inserted into. */
1748 if (BYTES_BIG_ENDIAN)
1749 clear_bit_region_be (ptr + first_byte,
1750 BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen);
1751 else
1752 clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen);
1753
1754 int shift_amnt;
1755 int bitlen_mod = bitlen % BITS_PER_UNIT;
1756 int bitpos_mod = bitpos % BITS_PER_UNIT;
1757
1758 bool skip_byte = false;
1759 if (BYTES_BIG_ENDIAN)
1760 {
1761 /* BITPOS and BITLEN are exactly aligned and no shifting
1762 is necessary. */
1763 if (bitpos_mod + bitlen_mod == BITS_PER_UNIT
1764 || (bitpos_mod == 0 && bitlen_mod == 0))
1765 shift_amnt = 0;
1766 /* |. . . . . . . .|
1767 <bp > <blen >.
1768 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1769 of the value until it aligns with 'bp' in the next byte over. */
1770 else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT)
1771 {
1772 shift_amnt = bitlen_mod + bitpos_mod;
1773 skip_byte = bitlen_mod != 0;
1774 }
1775 /* |. . . . . . . .|
1776 <----bp--->
1777 <---blen---->.
1778 Shift the value right within the same byte so it aligns with 'bp'. */
1779 else
1780 shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT;
1781 }
1782 else
1783 shift_amnt = bitpos % BITS_PER_UNIT;
1784
1785 /* Create the shifted version of EXPR. */
1786 if (!BYTES_BIG_ENDIAN)
1787 {
1788 shift_bytes_in_array (tmpbuf, byte_size, shift_amnt);
1789 if (shift_amnt == 0)
1790 byte_size--;
1791 }
1792 else
1793 {
1794 gcc_assert (BYTES_BIG_ENDIAN);
1795 shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt);
1796 /* If shifting right forced us to move into the next byte skip the now
1797 empty byte. */
1798 if (skip_byte)
1799 {
1800 tmpbuf++;
1801 byte_size--;
1802 }
1803 }
1804
1805 /* Insert the bits from TMPBUF. */
1806 for (unsigned int i = 0; i < byte_size; i++)
1807 ptr[first_byte + i] |= tmpbuf[i];
1808
1809 return true;
1810 }
1811
1812 /* Sorting function for store_immediate_info objects.
1813 Sorts them by bitposition. */
1814
1815 static int
1816 sort_by_bitpos (const void *x, const void *y)
1817 {
1818 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1819 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1820
1821 if ((*tmp)->bitpos < (*tmp2)->bitpos)
1822 return -1;
1823 else if ((*tmp)->bitpos > (*tmp2)->bitpos)
1824 return 1;
1825 else
1826 /* If they are the same let's use the order which is guaranteed to
1827 be different. */
1828 return (*tmp)->order - (*tmp2)->order;
1829 }
1830
1831 /* Sorting function for store_immediate_info objects.
1832 Sorts them by the order field. */
1833
1834 static int
1835 sort_by_order (const void *x, const void *y)
1836 {
1837 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1838 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1839
1840 if ((*tmp)->order < (*tmp2)->order)
1841 return -1;
1842 else if ((*tmp)->order > (*tmp2)->order)
1843 return 1;
1844
1845 gcc_unreachable ();
1846 }
1847
1848 /* Initialize a merged_store_group object from a store_immediate_info
1849 object. */
1850
1851 merged_store_group::merged_store_group (store_immediate_info *info)
1852 {
1853 start = info->bitpos;
1854 width = info->bitsize;
1855 bitregion_start = info->bitregion_start;
1856 bitregion_end = info->bitregion_end;
1857 /* VAL has memory allocated for it in apply_stores once the group
1858 width has been finalized. */
1859 val = NULL;
1860 mask = NULL;
1861 bit_insertion = false;
1862 only_constants = info->rhs_code == INTEGER_CST;
1863 first_nonmergeable_order = ~0U;
1864 unsigned HOST_WIDE_INT align_bitpos = 0;
1865 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
1866 &align, &align_bitpos);
1867 align_base = start - align_bitpos;
1868 for (int i = 0; i < 2; ++i)
1869 {
1870 store_operand_info &op = info->ops[i];
1871 if (op.base_addr == NULL_TREE)
1872 {
1873 load_align[i] = 0;
1874 load_align_base[i] = 0;
1875 }
1876 else
1877 {
1878 get_object_alignment_1 (op.val, &load_align[i], &align_bitpos);
1879 load_align_base[i] = op.bitpos - align_bitpos;
1880 }
1881 }
1882 stores.create (1);
1883 stores.safe_push (info);
1884 last_stmt = info->stmt;
1885 last_order = info->order;
1886 first_stmt = last_stmt;
1887 first_order = last_order;
1888 buf_size = 0;
1889 }
1890
1891 merged_store_group::~merged_store_group ()
1892 {
1893 if (val)
1894 XDELETEVEC (val);
1895 }
1896
1897 /* Return true if the store described by INFO can be merged into the group. */
1898
1899 bool
1900 merged_store_group::can_be_merged_into (store_immediate_info *info)
1901 {
1902 /* Do not merge bswap patterns. */
1903 if (info->rhs_code == LROTATE_EXPR)
1904 return false;
1905
1906 /* The canonical case. */
1907 if (info->rhs_code == stores[0]->rhs_code)
1908 return true;
1909
1910 /* BIT_INSERT_EXPR is compatible with INTEGER_CST. */
1911 if (info->rhs_code == BIT_INSERT_EXPR && stores[0]->rhs_code == INTEGER_CST)
1912 return true;
1913
1914 if (stores[0]->rhs_code == BIT_INSERT_EXPR && info->rhs_code == INTEGER_CST)
1915 return true;
1916
1917 /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
1918 if (info->rhs_code == MEM_REF
1919 && (stores[0]->rhs_code == INTEGER_CST
1920 || stores[0]->rhs_code == BIT_INSERT_EXPR)
1921 && info->bitregion_start == stores[0]->bitregion_start
1922 && info->bitregion_end == stores[0]->bitregion_end)
1923 return true;
1924
1925 if (stores[0]->rhs_code == MEM_REF
1926 && (info->rhs_code == INTEGER_CST
1927 || info->rhs_code == BIT_INSERT_EXPR)
1928 && info->bitregion_start == stores[0]->bitregion_start
1929 && info->bitregion_end == stores[0]->bitregion_end)
1930 return true;
1931
1932 return false;
1933 }
1934
1935 /* Helper method for merge_into and merge_overlapping to do
1936 the common part. */
1937
1938 void
1939 merged_store_group::do_merge (store_immediate_info *info)
1940 {
1941 bitregion_start = MIN (bitregion_start, info->bitregion_start);
1942 bitregion_end = MAX (bitregion_end, info->bitregion_end);
1943
1944 unsigned int this_align;
1945 unsigned HOST_WIDE_INT align_bitpos = 0;
1946 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
1947 &this_align, &align_bitpos);
1948 if (this_align > align)
1949 {
1950 align = this_align;
1951 align_base = info->bitpos - align_bitpos;
1952 }
1953 for (int i = 0; i < 2; ++i)
1954 {
1955 store_operand_info &op = info->ops[i];
1956 if (!op.base_addr)
1957 continue;
1958
1959 get_object_alignment_1 (op.val, &this_align, &align_bitpos);
1960 if (this_align > load_align[i])
1961 {
1962 load_align[i] = this_align;
1963 load_align_base[i] = op.bitpos - align_bitpos;
1964 }
1965 }
1966
1967 gimple *stmt = info->stmt;
1968 stores.safe_push (info);
1969 if (info->order > last_order)
1970 {
1971 last_order = info->order;
1972 last_stmt = stmt;
1973 }
1974 else if (info->order < first_order)
1975 {
1976 first_order = info->order;
1977 first_stmt = stmt;
1978 }
1979 if (info->rhs_code != INTEGER_CST)
1980 only_constants = false;
1981 }
1982
1983 /* Merge a store recorded by INFO into this merged store.
1984 The store is not overlapping with the existing recorded
1985 stores. */
1986
1987 void
1988 merged_store_group::merge_into (store_immediate_info *info)
1989 {
1990 /* Make sure we're inserting in the position we think we're inserting. */
1991 gcc_assert (info->bitpos >= start + width
1992 && info->bitregion_start <= bitregion_end);
1993
1994 width = info->bitpos + info->bitsize - start;
1995 do_merge (info);
1996 }
1997
1998 /* Merge a store described by INFO into this merged store.
1999 INFO overlaps in some way with the current store (i.e. it's not contiguous
2000 which is handled by merged_store_group::merge_into). */
2001
2002 void
2003 merged_store_group::merge_overlapping (store_immediate_info *info)
2004 {
2005 /* If the store extends the size of the group, extend the width. */
2006 if (info->bitpos + info->bitsize > start + width)
2007 width = info->bitpos + info->bitsize - start;
2008
2009 do_merge (info);
2010 }
2011
2012 /* Go through all the recorded stores in this group in program order and
2013 apply their values to the VAL byte array to create the final merged
2014 value. Return true if the operation succeeded. */
2015
2016 bool
2017 merged_store_group::apply_stores ()
2018 {
2019 /* Make sure we have more than one store in the group, otherwise we cannot
2020 merge anything. */
2021 if (bitregion_start % BITS_PER_UNIT != 0
2022 || bitregion_end % BITS_PER_UNIT != 0
2023 || stores.length () == 1)
2024 return false;
2025
2026 stores.qsort (sort_by_order);
2027 store_immediate_info *info;
2028 unsigned int i;
2029 /* Create a power-of-2-sized buffer for native_encode_expr. */
2030 buf_size = 1 << ceil_log2 ((bitregion_end - bitregion_start) / BITS_PER_UNIT);
2031 val = XNEWVEC (unsigned char, 2 * buf_size);
2032 mask = val + buf_size;
2033 memset (val, 0, buf_size);
2034 memset (mask, ~0U, buf_size);
2035
2036 FOR_EACH_VEC_ELT (stores, i, info)
2037 {
2038 unsigned int pos_in_buffer = info->bitpos - bitregion_start;
2039 tree cst;
2040 if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE)
2041 cst = info->ops[0].val;
2042 else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE)
2043 cst = info->ops[1].val;
2044 else
2045 cst = NULL_TREE;
2046 bool ret = true;
2047 if (cst)
2048 {
2049 if (info->rhs_code == BIT_INSERT_EXPR)
2050 bit_insertion = true;
2051 else
2052 ret = encode_tree_to_bitpos (cst, val, info->bitsize,
2053 pos_in_buffer, buf_size);
2054 }
2055 unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT);
2056 if (BYTES_BIG_ENDIAN)
2057 clear_bit_region_be (m, (BITS_PER_UNIT - 1
2058 - (pos_in_buffer % BITS_PER_UNIT)),
2059 info->bitsize);
2060 else
2061 clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize);
2062 if (cst && dump_file && (dump_flags & TDF_DETAILS))
2063 {
2064 if (ret)
2065 {
2066 fputs ("After writing ", dump_file);
2067 print_generic_expr (dump_file, cst, TDF_NONE);
2068 fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC
2069 " at position %d\n", info->bitsize, pos_in_buffer);
2070 fputs (" the merged value contains ", dump_file);
2071 dump_char_array (dump_file, val, buf_size);
2072 fputs (" the merged mask contains ", dump_file);
2073 dump_char_array (dump_file, mask, buf_size);
2074 if (bit_insertion)
2075 fputs (" bit insertion is required\n", dump_file);
2076 }
2077 else
2078 fprintf (dump_file, "Failed to merge stores\n");
2079 }
2080 if (!ret)
2081 return false;
2082 }
2083 stores.qsort (sort_by_bitpos);
2084 return true;
2085 }
2086
2087 /* Structure describing the store chain. */
2088
2089 class imm_store_chain_info
2090 {
2091 public:
2092 /* Doubly-linked list that imposes an order on chain processing.
2093 PNXP (prev's next pointer) points to the head of a list, or to
2094 the next field in the previous chain in the list.
2095 See pass_store_merging::m_stores_head for more rationale. */
2096 imm_store_chain_info *next, **pnxp;
2097 tree base_addr;
2098 auto_vec<store_immediate_info *> m_store_info;
2099 auto_vec<merged_store_group *> m_merged_store_groups;
2100
2101 imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a)
2102 : next (inspt), pnxp (&inspt), base_addr (b_a)
2103 {
2104 inspt = this;
2105 if (next)
2106 {
2107 gcc_checking_assert (pnxp == next->pnxp);
2108 next->pnxp = &next;
2109 }
2110 }
2111 ~imm_store_chain_info ()
2112 {
2113 *pnxp = next;
2114 if (next)
2115 {
2116 gcc_checking_assert (&next == next->pnxp);
2117 next->pnxp = pnxp;
2118 }
2119 }
2120 bool terminate_and_process_chain ();
2121 bool try_coalesce_bswap (merged_store_group *, unsigned int, unsigned int);
2122 bool coalesce_immediate_stores ();
2123 bool output_merged_store (merged_store_group *);
2124 bool output_merged_stores ();
2125 };
2126
2127 const pass_data pass_data_tree_store_merging = {
2128 GIMPLE_PASS, /* type */
2129 "store-merging", /* name */
2130 OPTGROUP_NONE, /* optinfo_flags */
2131 TV_GIMPLE_STORE_MERGING, /* tv_id */
2132 PROP_ssa, /* properties_required */
2133 0, /* properties_provided */
2134 0, /* properties_destroyed */
2135 0, /* todo_flags_start */
2136 TODO_update_ssa, /* todo_flags_finish */
2137 };
2138
2139 class pass_store_merging : public gimple_opt_pass
2140 {
2141 public:
2142 pass_store_merging (gcc::context *ctxt)
2143 : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head ()
2144 {
2145 }
2146
2147 /* Pass not supported for PDP-endian, nor for insane hosts or
2148 target character sizes where native_{encode,interpret}_expr
2149 doesn't work properly. */
2150 virtual bool
2151 gate (function *)
2152 {
2153 return flag_store_merging
2154 && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
2155 && CHAR_BIT == 8
2156 && BITS_PER_UNIT == 8;
2157 }
2158
2159 virtual unsigned int execute (function *);
2160
2161 private:
2162 hash_map<tree_operand_hash, class imm_store_chain_info *> m_stores;
2163
2164 /* Form a doubly-linked stack of the elements of m_stores, so that
2165 we can iterate over them in a predictable way. Using this order
2166 avoids extraneous differences in the compiler output just because
2167 of tree pointer variations (e.g. different chains end up in
2168 different positions of m_stores, so they are handled in different
2169 orders, so they allocate or release SSA names in different
2170 orders, and when they get reused, subsequent passes end up
2171 getting different SSA names, which may ultimately change
2172 decisions when going out of SSA). */
2173 imm_store_chain_info *m_stores_head;
2174
2175 void process_store (gimple *);
2176 bool terminate_and_process_all_chains ();
2177 bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *);
2178 bool terminate_and_release_chain (imm_store_chain_info *);
2179 }; // class pass_store_merging
2180
2181 /* Terminate and process all recorded chains. Return true if any changes
2182 were made. */
2183
2184 bool
2185 pass_store_merging::terminate_and_process_all_chains ()
2186 {
2187 bool ret = false;
2188 while (m_stores_head)
2189 ret |= terminate_and_release_chain (m_stores_head);
2190 gcc_assert (m_stores.is_empty ());
2191 gcc_assert (m_stores_head == NULL);
2192
2193 return ret;
2194 }
2195
2196 /* Terminate all chains that are affected by the statement STMT.
2197 CHAIN_INFO is the chain we should ignore from the checks if
2198 non-NULL. */
2199
2200 bool
2201 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
2202 **chain_info,
2203 gimple *stmt)
2204 {
2205 bool ret = false;
2206
2207 /* If the statement doesn't touch memory it can't alias. */
2208 if (!gimple_vuse (stmt))
2209 return false;
2210
2211 tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
2212 for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
2213 {
2214 next = cur->next;
2215
2216 /* We already checked all the stores in chain_info and terminated the
2217 chain if necessary. Skip it here. */
2218 if (chain_info && *chain_info == cur)
2219 continue;
2220
2221 store_immediate_info *info;
2222 unsigned int i;
2223 FOR_EACH_VEC_ELT (cur->m_store_info, i, info)
2224 {
2225 tree lhs = gimple_assign_lhs (info->stmt);
2226 if (ref_maybe_used_by_stmt_p (stmt, lhs)
2227 || stmt_may_clobber_ref_p (stmt, lhs)
2228 || (store_lhs && refs_output_dependent_p (store_lhs, lhs)))
2229 {
2230 if (dump_file && (dump_flags & TDF_DETAILS))
2231 {
2232 fprintf (dump_file, "stmt causes chain termination:\n");
2233 print_gimple_stmt (dump_file, stmt, 0);
2234 }
2235 terminate_and_release_chain (cur);
2236 ret = true;
2237 break;
2238 }
2239 }
2240 }
2241
2242 return ret;
2243 }
2244
2245 /* Helper function. Terminate the recorded chain storing to base object
2246 BASE. Return true if the merging and output was successful. The m_stores
2247 entry is removed after the processing in any case. */
2248
2249 bool
2250 pass_store_merging::terminate_and_release_chain (imm_store_chain_info *chain_info)
2251 {
2252 bool ret = chain_info->terminate_and_process_chain ();
2253 m_stores.remove (chain_info->base_addr);
2254 delete chain_info;
2255 return ret;
2256 }
2257
2258 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
2259 may clobber REF. FIRST and LAST must be in the same basic block and
2260 have non-NULL vdef. We want to be able to sink load of REF across
2261 stores between FIRST and LAST, up to right before LAST. */
2262
2263 bool
2264 stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref)
2265 {
2266 ao_ref r;
2267 ao_ref_init (&r, ref);
2268 unsigned int count = 0;
2269 tree vop = gimple_vdef (last);
2270 gimple *stmt;
2271
2272 gcc_checking_assert (gimple_bb (first) == gimple_bb (last));
2273 do
2274 {
2275 stmt = SSA_NAME_DEF_STMT (vop);
2276 if (stmt_may_clobber_ref_p_1 (stmt, &r))
2277 return true;
2278 if (gimple_store_p (stmt)
2279 && refs_anti_dependent_p (ref, gimple_get_lhs (stmt)))
2280 return true;
2281 /* Avoid quadratic compile time by bounding the number of checks
2282 we perform. */
2283 if (++count > MAX_STORE_ALIAS_CHECKS)
2284 return true;
2285 vop = gimple_vuse (stmt);
2286 }
2287 while (stmt != first);
2288 return false;
2289 }
2290
2291 /* Return true if INFO->ops[IDX] is mergeable with the
2292 corresponding loads already in MERGED_STORE group.
2293 BASE_ADDR is the base address of the whole store group. */
2294
2295 bool
2296 compatible_load_p (merged_store_group *merged_store,
2297 store_immediate_info *info,
2298 tree base_addr, int idx)
2299 {
2300 store_immediate_info *infof = merged_store->stores[0];
2301 if (!info->ops[idx].base_addr
2302 || maybe_ne (info->ops[idx].bitpos - infof->ops[idx].bitpos,
2303 info->bitpos - infof->bitpos)
2304 || !operand_equal_p (info->ops[idx].base_addr,
2305 infof->ops[idx].base_addr, 0))
2306 return false;
2307
2308 store_immediate_info *infol = merged_store->stores.last ();
2309 tree load_vuse = gimple_vuse (info->ops[idx].stmt);
2310 /* In this case all vuses should be the same, e.g.
2311 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2312 or
2313 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2314 and we can emit the coalesced load next to any of those loads. */
2315 if (gimple_vuse (infof->ops[idx].stmt) == load_vuse
2316 && gimple_vuse (infol->ops[idx].stmt) == load_vuse)
2317 return true;
2318
2319 /* Otherwise, at least for now require that the load has the same
2320 vuse as the store. See following examples. */
2321 if (gimple_vuse (info->stmt) != load_vuse)
2322 return false;
2323
2324 if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt)
2325 || (infof != infol
2326 && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt)))
2327 return false;
2328
2329 /* If the load is from the same location as the store, already
2330 the construction of the immediate chain info guarantees no intervening
2331 stores, so no further checks are needed. Example:
2332 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
2333 if (known_eq (info->ops[idx].bitpos, info->bitpos)
2334 && operand_equal_p (info->ops[idx].base_addr, base_addr, 0))
2335 return true;
2336
2337 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2338 of the stores in the group, or any other stores in between those.
2339 Previous calls to compatible_load_p ensured that for all the
2340 merged_store->stores IDX loads, no stmts starting with
2341 merged_store->first_stmt and ending right before merged_store->last_stmt
2342 clobbers those loads. */
2343 gimple *first = merged_store->first_stmt;
2344 gimple *last = merged_store->last_stmt;
2345 unsigned int i;
2346 store_immediate_info *infoc;
2347 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2348 comes before the so far first load, we'll be changing
2349 merged_store->first_stmt. In that case we need to give up if
2350 any of the earlier processed loads clobber with the stmts in the new
2351 range. */
2352 if (info->order < merged_store->first_order)
2353 {
2354 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2355 if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val))
2356 return false;
2357 first = info->stmt;
2358 }
2359 /* Similarly, we could change merged_store->last_stmt, so ensure
2360 in that case no stmts in the new range clobber any of the earlier
2361 processed loads. */
2362 else if (info->order > merged_store->last_order)
2363 {
2364 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2365 if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val))
2366 return false;
2367 last = info->stmt;
2368 }
2369 /* And finally, we'd be adding a new load to the set, ensure it isn't
2370 clobbered in the new range. */
2371 if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val))
2372 return false;
2373
2374 /* Otherwise, we are looking for:
2375 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2376 or
2377 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2378 return true;
2379 }
2380
2381 /* Add all refs loaded to compute VAL to REFS vector. */
2382
2383 void
2384 gather_bswap_load_refs (vec<tree> *refs, tree val)
2385 {
2386 if (TREE_CODE (val) != SSA_NAME)
2387 return;
2388
2389 gimple *stmt = SSA_NAME_DEF_STMT (val);
2390 if (!is_gimple_assign (stmt))
2391 return;
2392
2393 if (gimple_assign_load_p (stmt))
2394 {
2395 refs->safe_push (gimple_assign_rhs1 (stmt));
2396 return;
2397 }
2398
2399 switch (gimple_assign_rhs_class (stmt))
2400 {
2401 case GIMPLE_BINARY_RHS:
2402 gather_bswap_load_refs (refs, gimple_assign_rhs2 (stmt));
2403 /* FALLTHRU */
2404 case GIMPLE_UNARY_RHS:
2405 gather_bswap_load_refs (refs, gimple_assign_rhs1 (stmt));
2406 break;
2407 default:
2408 gcc_unreachable ();
2409 }
2410 }
2411
2412 /* Check if there are any stores in M_STORE_INFO after index I
2413 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2414 a potential group ending with END that have their order
2415 smaller than LAST_ORDER. RHS_CODE is the kind of store in the
2416 group. Return true if there are no such stores.
2417 Consider:
2418 MEM[(long long int *)p_28] = 0;
2419 MEM[(long long int *)p_28 + 8B] = 0;
2420 MEM[(long long int *)p_28 + 16B] = 0;
2421 MEM[(long long int *)p_28 + 24B] = 0;
2422 _129 = (int) _130;
2423 MEM[(int *)p_28 + 8B] = _129;
2424 MEM[(int *)p_28].a = -1;
2425 We already have
2426 MEM[(long long int *)p_28] = 0;
2427 MEM[(int *)p_28].a = -1;
2428 stmts in the current group and need to consider if it is safe to
2429 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2430 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2431 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2432 into the group and merging of those 3 stores is successful, merged
2433 stmts will be emitted at the latest store from that group, i.e.
2434 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2435 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2436 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2437 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2438 into the group. That way it will be its own store group and will
2439 not be touched. If RHS_CODE is INTEGER_CST and there are overlapping
2440 INTEGER_CST stores, those are mergeable using merge_overlapping,
2441 so don't return false for those. */
2442
2443 static bool
2444 check_no_overlap (vec<store_immediate_info *> m_store_info, unsigned int i,
2445 enum tree_code rhs_code, unsigned int last_order,
2446 unsigned HOST_WIDE_INT end)
2447 {
2448 unsigned int len = m_store_info.length ();
2449 for (++i; i < len; ++i)
2450 {
2451 store_immediate_info *info = m_store_info[i];
2452 if (info->bitpos >= end)
2453 break;
2454 if (info->order < last_order
2455 && (rhs_code != INTEGER_CST || info->rhs_code != INTEGER_CST))
2456 return false;
2457 }
2458 return true;
2459 }
2460
2461 /* Return true if m_store_info[first] and at least one following store
2462 form a group which store try_size bitsize value which is byte swapped
2463 from a memory load or some value, or identity from some value.
2464 This uses the bswap pass APIs. */
2465
2466 bool
2467 imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store,
2468 unsigned int first,
2469 unsigned int try_size)
2470 {
2471 unsigned int len = m_store_info.length (), last = first;
2472 unsigned HOST_WIDE_INT width = m_store_info[first]->bitsize;
2473 if (width >= try_size)
2474 return false;
2475 for (unsigned int i = first + 1; i < len; ++i)
2476 {
2477 if (m_store_info[i]->bitpos != m_store_info[first]->bitpos + width
2478 || m_store_info[i]->ins_stmt == NULL)
2479 return false;
2480 width += m_store_info[i]->bitsize;
2481 if (width >= try_size)
2482 {
2483 last = i;
2484 break;
2485 }
2486 }
2487 if (width != try_size)
2488 return false;
2489
2490 bool allow_unaligned
2491 = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
2492 /* Punt if the combined store would not be aligned and we need alignment. */
2493 if (!allow_unaligned)
2494 {
2495 unsigned int align = merged_store->align;
2496 unsigned HOST_WIDE_INT align_base = merged_store->align_base;
2497 for (unsigned int i = first + 1; i <= last; ++i)
2498 {
2499 unsigned int this_align;
2500 unsigned HOST_WIDE_INT align_bitpos = 0;
2501 get_object_alignment_1 (gimple_assign_lhs (m_store_info[i]->stmt),
2502 &this_align, &align_bitpos);
2503 if (this_align > align)
2504 {
2505 align = this_align;
2506 align_base = m_store_info[i]->bitpos - align_bitpos;
2507 }
2508 }
2509 unsigned HOST_WIDE_INT align_bitpos
2510 = (m_store_info[first]->bitpos - align_base) & (align - 1);
2511 if (align_bitpos)
2512 align = least_bit_hwi (align_bitpos);
2513 if (align < try_size)
2514 return false;
2515 }
2516
2517 tree type;
2518 switch (try_size)
2519 {
2520 case 16: type = uint16_type_node; break;
2521 case 32: type = uint32_type_node; break;
2522 case 64: type = uint64_type_node; break;
2523 default: gcc_unreachable ();
2524 }
2525 struct symbolic_number n;
2526 gimple *ins_stmt = NULL;
2527 int vuse_store = -1;
2528 unsigned int first_order = merged_store->first_order;
2529 unsigned int last_order = merged_store->last_order;
2530 gimple *first_stmt = merged_store->first_stmt;
2531 gimple *last_stmt = merged_store->last_stmt;
2532 unsigned HOST_WIDE_INT end = merged_store->start + merged_store->width;
2533 store_immediate_info *infof = m_store_info[first];
2534
2535 for (unsigned int i = first; i <= last; ++i)
2536 {
2537 store_immediate_info *info = m_store_info[i];
2538 struct symbolic_number this_n = info->n;
2539 this_n.type = type;
2540 if (!this_n.base_addr)
2541 this_n.range = try_size / BITS_PER_UNIT;
2542 else
2543 /* Update vuse in case it has changed by output_merged_stores. */
2544 this_n.vuse = gimple_vuse (info->ins_stmt);
2545 unsigned int bitpos = info->bitpos - infof->bitpos;
2546 if (!do_shift_rotate (LSHIFT_EXPR, &this_n,
2547 BYTES_BIG_ENDIAN
2548 ? try_size - info->bitsize - bitpos
2549 : bitpos))
2550 return false;
2551 if (this_n.base_addr && vuse_store)
2552 {
2553 unsigned int j;
2554 for (j = first; j <= last; ++j)
2555 if (this_n.vuse == gimple_vuse (m_store_info[j]->stmt))
2556 break;
2557 if (j > last)
2558 {
2559 if (vuse_store == 1)
2560 return false;
2561 vuse_store = 0;
2562 }
2563 }
2564 if (i == first)
2565 {
2566 n = this_n;
2567 ins_stmt = info->ins_stmt;
2568 }
2569 else
2570 {
2571 if (n.base_addr && n.vuse != this_n.vuse)
2572 {
2573 if (vuse_store == 0)
2574 return false;
2575 vuse_store = 1;
2576 }
2577 if (info->order > last_order)
2578 {
2579 last_order = info->order;
2580 last_stmt = info->stmt;
2581 }
2582 else if (info->order < first_order)
2583 {
2584 first_order = info->order;
2585 first_stmt = info->stmt;
2586 }
2587 end = MAX (end, info->bitpos + info->bitsize);
2588
2589 ins_stmt = perform_symbolic_merge (ins_stmt, &n, info->ins_stmt,
2590 &this_n, &n);
2591 if (ins_stmt == NULL)
2592 return false;
2593 }
2594 }
2595
2596 uint64_t cmpxchg, cmpnop;
2597 find_bswap_or_nop_finalize (&n, &cmpxchg, &cmpnop);
2598
2599 /* A complete byte swap should make the symbolic number to start with
2600 the largest digit in the highest order byte. Unchanged symbolic
2601 number indicates a read with same endianness as target architecture. */
2602 if (n.n != cmpnop && n.n != cmpxchg)
2603 return false;
2604
2605 if (n.base_addr == NULL_TREE && !is_gimple_val (n.src))
2606 return false;
2607
2608 if (!check_no_overlap (m_store_info, last, LROTATE_EXPR, last_order, end))
2609 return false;
2610
2611 /* Don't handle memory copy this way if normal non-bswap processing
2612 would handle it too. */
2613 if (n.n == cmpnop && (unsigned) n.n_ops == last - first + 1)
2614 {
2615 unsigned int i;
2616 for (i = first; i <= last; ++i)
2617 if (m_store_info[i]->rhs_code != MEM_REF)
2618 break;
2619 if (i == last + 1)
2620 return false;
2621 }
2622
2623 if (n.n == cmpxchg)
2624 switch (try_size)
2625 {
2626 case 16:
2627 /* Will emit LROTATE_EXPR. */
2628 break;
2629 case 32:
2630 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2631 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
2632 break;
2633 return false;
2634 case 64:
2635 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2636 && optab_handler (bswap_optab, DImode) != CODE_FOR_nothing)
2637 break;
2638 return false;
2639 default:
2640 gcc_unreachable ();
2641 }
2642
2643 if (!allow_unaligned && n.base_addr)
2644 {
2645 unsigned int align = get_object_alignment (n.src);
2646 if (align < try_size)
2647 return false;
2648 }
2649
2650 /* If each load has vuse of the corresponding store, need to verify
2651 the loads can be sunk right before the last store. */
2652 if (vuse_store == 1)
2653 {
2654 auto_vec<tree, 64> refs;
2655 for (unsigned int i = first; i <= last; ++i)
2656 gather_bswap_load_refs (&refs,
2657 gimple_assign_rhs1 (m_store_info[i]->stmt));
2658
2659 unsigned int i;
2660 tree ref;
2661 FOR_EACH_VEC_ELT (refs, i, ref)
2662 if (stmts_may_clobber_ref_p (first_stmt, last_stmt, ref))
2663 return false;
2664 n.vuse = NULL_TREE;
2665 }
2666
2667 infof->n = n;
2668 infof->ins_stmt = ins_stmt;
2669 for (unsigned int i = first; i <= last; ++i)
2670 {
2671 m_store_info[i]->rhs_code = n.n == cmpxchg ? LROTATE_EXPR : NOP_EXPR;
2672 m_store_info[i]->ops[0].base_addr = NULL_TREE;
2673 m_store_info[i]->ops[1].base_addr = NULL_TREE;
2674 if (i != first)
2675 merged_store->merge_into (m_store_info[i]);
2676 }
2677
2678 return true;
2679 }
2680
2681 /* Go through the candidate stores recorded in m_store_info and merge them
2682 into merged_store_group objects recorded into m_merged_store_groups
2683 representing the widened stores. Return true if coalescing was successful
2684 and the number of widened stores is fewer than the original number
2685 of stores. */
2686
2687 bool
2688 imm_store_chain_info::coalesce_immediate_stores ()
2689 {
2690 /* Anything less can't be processed. */
2691 if (m_store_info.length () < 2)
2692 return false;
2693
2694 if (dump_file && (dump_flags & TDF_DETAILS))
2695 fprintf (dump_file, "Attempting to coalesce %u stores in chain\n",
2696 m_store_info.length ());
2697
2698 store_immediate_info *info;
2699 unsigned int i, ignore = 0;
2700
2701 /* Order the stores by the bitposition they write to. */
2702 m_store_info.qsort (sort_by_bitpos);
2703
2704 info = m_store_info[0];
2705 merged_store_group *merged_store = new merged_store_group (info);
2706 if (dump_file && (dump_flags & TDF_DETAILS))
2707 fputs ("New store group\n", dump_file);
2708
2709 FOR_EACH_VEC_ELT (m_store_info, i, info)
2710 {
2711 unsigned HOST_WIDE_INT new_bitregion_start, new_bitregion_end;
2712
2713 if (i <= ignore)
2714 goto done;
2715
2716 /* First try to handle group of stores like:
2717 p[0] = data >> 24;
2718 p[1] = data >> 16;
2719 p[2] = data >> 8;
2720 p[3] = data;
2721 using the bswap framework. */
2722 if (info->bitpos == merged_store->start + merged_store->width
2723 && merged_store->stores.length () == 1
2724 && merged_store->stores[0]->ins_stmt != NULL
2725 && info->ins_stmt != NULL)
2726 {
2727 unsigned int try_size;
2728 for (try_size = 64; try_size >= 16; try_size >>= 1)
2729 if (try_coalesce_bswap (merged_store, i - 1, try_size))
2730 break;
2731
2732 if (try_size >= 16)
2733 {
2734 ignore = i + merged_store->stores.length () - 1;
2735 m_merged_store_groups.safe_push (merged_store);
2736 if (ignore < m_store_info.length ())
2737 merged_store = new merged_store_group (m_store_info[ignore]);
2738 else
2739 merged_store = NULL;
2740 goto done;
2741 }
2742 }
2743
2744 new_bitregion_start
2745 = MIN (merged_store->bitregion_start, info->bitregion_start);
2746 new_bitregion_end
2747 = MAX (merged_store->bitregion_end, info->bitregion_end);
2748
2749 if (info->order >= merged_store->first_nonmergeable_order
2750 || (((new_bitregion_end - new_bitregion_start + 1) / BITS_PER_UNIT)
2751 > (unsigned) PARAM_VALUE (PARAM_STORE_MERGING_MAX_SIZE)))
2752 ;
2753
2754 /* |---store 1---|
2755 |---store 2---|
2756 Overlapping stores. */
2757 else if (IN_RANGE (info->bitpos, merged_store->start,
2758 merged_store->start + merged_store->width - 1))
2759 {
2760 /* Only allow overlapping stores of constants. */
2761 if (info->rhs_code == INTEGER_CST && merged_store->only_constants)
2762 {
2763 unsigned int last_order
2764 = MAX (merged_store->last_order, info->order);
2765 unsigned HOST_WIDE_INT end
2766 = MAX (merged_store->start + merged_store->width,
2767 info->bitpos + info->bitsize);
2768 if (check_no_overlap (m_store_info, i, INTEGER_CST,
2769 last_order, end))
2770 {
2771 /* check_no_overlap call above made sure there are no
2772 overlapping stores with non-INTEGER_CST rhs_code
2773 in between the first and last of the stores we've
2774 just merged. If there are any INTEGER_CST rhs_code
2775 stores in between, we need to merge_overlapping them
2776 even if in the sort_by_bitpos order there are other
2777 overlapping stores in between. Keep those stores as is.
2778 Example:
2779 MEM[(int *)p_28] = 0;
2780 MEM[(char *)p_28 + 3B] = 1;
2781 MEM[(char *)p_28 + 1B] = 2;
2782 MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B];
2783 We can't merge the zero store with the store of two and
2784 not merge anything else, because the store of one is
2785 in the original order in between those two, but in
2786 store_by_bitpos order it comes after the last store that
2787 we can't merge with them. We can merge the first 3 stores
2788 and keep the last store as is though. */
2789 unsigned int len = m_store_info.length ();
2790 unsigned int try_order = last_order;
2791 unsigned int first_nonmergeable_order;
2792 unsigned int k;
2793 bool last_iter = false;
2794 int attempts = 0;
2795 do
2796 {
2797 unsigned int max_order = 0;
2798 unsigned first_nonmergeable_int_order = ~0U;
2799 unsigned HOST_WIDE_INT this_end = end;
2800 k = i;
2801 first_nonmergeable_order = ~0U;
2802 for (unsigned int j = i + 1; j < len; ++j)
2803 {
2804 store_immediate_info *info2 = m_store_info[j];
2805 if (info2->bitpos >= this_end)
2806 break;
2807 if (info2->order < try_order)
2808 {
2809 if (info2->rhs_code != INTEGER_CST)
2810 {
2811 /* Normally check_no_overlap makes sure this
2812 doesn't happen, but if end grows below,
2813 then we need to process more stores than
2814 check_no_overlap verified. Example:
2815 MEM[(int *)p_5] = 0;
2816 MEM[(short *)p_5 + 3B] = 1;
2817 MEM[(char *)p_5 + 4B] = _9;
2818 MEM[(char *)p_5 + 2B] = 2; */
2819 k = 0;
2820 break;
2821 }
2822 k = j;
2823 this_end = MAX (this_end,
2824 info2->bitpos + info2->bitsize);
2825 }
2826 else if (info2->rhs_code == INTEGER_CST
2827 && !last_iter)
2828 {
2829 max_order = MAX (max_order, info2->order + 1);
2830 first_nonmergeable_int_order
2831 = MIN (first_nonmergeable_int_order,
2832 info2->order);
2833 }
2834 else
2835 first_nonmergeable_order
2836 = MIN (first_nonmergeable_order, info2->order);
2837 }
2838 if (k == 0)
2839 {
2840 if (last_order == try_order)
2841 break;
2842 /* If this failed, but only because we grew
2843 try_order, retry with the last working one,
2844 so that we merge at least something. */
2845 try_order = last_order;
2846 last_iter = true;
2847 continue;
2848 }
2849 last_order = try_order;
2850 /* Retry with a larger try_order to see if we could
2851 merge some further INTEGER_CST stores. */
2852 if (max_order
2853 && (first_nonmergeable_int_order
2854 < first_nonmergeable_order))
2855 {
2856 try_order = MIN (max_order,
2857 first_nonmergeable_order);
2858 try_order
2859 = MIN (try_order,
2860 merged_store->first_nonmergeable_order);
2861 if (try_order > last_order && ++attempts < 16)
2862 continue;
2863 }
2864 first_nonmergeable_order
2865 = MIN (first_nonmergeable_order,
2866 first_nonmergeable_int_order);
2867 end = this_end;
2868 break;
2869 }
2870 while (1);
2871
2872 if (k != 0)
2873 {
2874 merged_store->merge_overlapping (info);
2875
2876 merged_store->first_nonmergeable_order
2877 = MIN (merged_store->first_nonmergeable_order,
2878 first_nonmergeable_order);
2879
2880 for (unsigned int j = i + 1; j <= k; j++)
2881 {
2882 store_immediate_info *info2 = m_store_info[j];
2883 gcc_assert (info2->bitpos < end);
2884 if (info2->order < last_order)
2885 {
2886 gcc_assert (info2->rhs_code == INTEGER_CST);
2887 if (info != info2)
2888 merged_store->merge_overlapping (info2);
2889 }
2890 /* Other stores are kept and not merged in any
2891 way. */
2892 }
2893 ignore = k;
2894 goto done;
2895 }
2896 }
2897 }
2898 }
2899 /* |---store 1---||---store 2---|
2900 This store is consecutive to the previous one.
2901 Merge it into the current store group. There can be gaps in between
2902 the stores, but there can't be gaps in between bitregions. */
2903 else if (info->bitregion_start <= merged_store->bitregion_end
2904 && merged_store->can_be_merged_into (info))
2905 {
2906 store_immediate_info *infof = merged_store->stores[0];
2907
2908 /* All the rhs_code ops that take 2 operands are commutative,
2909 swap the operands if it could make the operands compatible. */
2910 if (infof->ops[0].base_addr
2911 && infof->ops[1].base_addr
2912 && info->ops[0].base_addr
2913 && info->ops[1].base_addr
2914 && known_eq (info->ops[1].bitpos - infof->ops[0].bitpos,
2915 info->bitpos - infof->bitpos)
2916 && operand_equal_p (info->ops[1].base_addr,
2917 infof->ops[0].base_addr, 0))
2918 {
2919 std::swap (info->ops[0], info->ops[1]);
2920 info->ops_swapped_p = true;
2921 }
2922 if (check_no_overlap (m_store_info, i, info->rhs_code,
2923 MAX (merged_store->last_order, info->order),
2924 MAX (merged_store->start + merged_store->width,
2925 info->bitpos + info->bitsize)))
2926 {
2927 /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
2928 if (info->rhs_code == MEM_REF && infof->rhs_code != MEM_REF)
2929 {
2930 info->rhs_code = BIT_INSERT_EXPR;
2931 info->ops[0].val = gimple_assign_rhs1 (info->stmt);
2932 info->ops[0].base_addr = NULL_TREE;
2933 }
2934 else if (infof->rhs_code == MEM_REF && info->rhs_code != MEM_REF)
2935 {
2936 store_immediate_info *infoj;
2937 unsigned int j;
2938 FOR_EACH_VEC_ELT (merged_store->stores, j, infoj)
2939 {
2940 infoj->rhs_code = BIT_INSERT_EXPR;
2941 infoj->ops[0].val = gimple_assign_rhs1 (infoj->stmt);
2942 infoj->ops[0].base_addr = NULL_TREE;
2943 }
2944 }
2945 if ((infof->ops[0].base_addr
2946 ? compatible_load_p (merged_store, info, base_addr, 0)
2947 : !info->ops[0].base_addr)
2948 && (infof->ops[1].base_addr
2949 ? compatible_load_p (merged_store, info, base_addr, 1)
2950 : !info->ops[1].base_addr))
2951 {
2952 merged_store->merge_into (info);
2953 goto done;
2954 }
2955 }
2956 }
2957
2958 /* |---store 1---| <gap> |---store 2---|.
2959 Gap between stores or the rhs not compatible. Start a new group. */
2960
2961 /* Try to apply all the stores recorded for the group to determine
2962 the bitpattern they write and discard it if that fails.
2963 This will also reject single-store groups. */
2964 if (merged_store->apply_stores ())
2965 m_merged_store_groups.safe_push (merged_store);
2966 else
2967 delete merged_store;
2968
2969 merged_store = new merged_store_group (info);
2970 if (dump_file && (dump_flags & TDF_DETAILS))
2971 fputs ("New store group\n", dump_file);
2972
2973 done:
2974 if (dump_file && (dump_flags & TDF_DETAILS))
2975 {
2976 fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
2977 " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:",
2978 i, info->bitsize, info->bitpos);
2979 print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt));
2980 fputc ('\n', dump_file);
2981 }
2982 }
2983
2984 /* Record or discard the last store group. */
2985 if (merged_store)
2986 {
2987 if (merged_store->apply_stores ())
2988 m_merged_store_groups.safe_push (merged_store);
2989 else
2990 delete merged_store;
2991 }
2992
2993 gcc_assert (m_merged_store_groups.length () <= m_store_info.length ());
2994
2995 bool success
2996 = !m_merged_store_groups.is_empty ()
2997 && m_merged_store_groups.length () < m_store_info.length ();
2998
2999 if (success && dump_file)
3000 fprintf (dump_file, "Coalescing successful!\nMerged into %u stores\n",
3001 m_merged_store_groups.length ());
3002
3003 return success;
3004 }
3005
3006 /* Return the type to use for the merged stores or loads described by STMTS.
3007 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
3008 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
3009 of the MEM_REFs if any. */
3010
3011 static tree
3012 get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load,
3013 unsigned short *cliquep, unsigned short *basep)
3014 {
3015 gimple *stmt;
3016 unsigned int i;
3017 tree type = NULL_TREE;
3018 tree ret = NULL_TREE;
3019 *cliquep = 0;
3020 *basep = 0;
3021
3022 FOR_EACH_VEC_ELT (stmts, i, stmt)
3023 {
3024 tree ref = is_load ? gimple_assign_rhs1 (stmt)
3025 : gimple_assign_lhs (stmt);
3026 tree type1 = reference_alias_ptr_type (ref);
3027 tree base = get_base_address (ref);
3028
3029 if (i == 0)
3030 {
3031 if (TREE_CODE (base) == MEM_REF)
3032 {
3033 *cliquep = MR_DEPENDENCE_CLIQUE (base);
3034 *basep = MR_DEPENDENCE_BASE (base);
3035 }
3036 ret = type = type1;
3037 continue;
3038 }
3039 if (!alias_ptr_types_compatible_p (type, type1))
3040 ret = ptr_type_node;
3041 if (TREE_CODE (base) != MEM_REF
3042 || *cliquep != MR_DEPENDENCE_CLIQUE (base)
3043 || *basep != MR_DEPENDENCE_BASE (base))
3044 {
3045 *cliquep = 0;
3046 *basep = 0;
3047 }
3048 }
3049 return ret;
3050 }
3051
3052 /* Return the location_t information we can find among the statements
3053 in STMTS. */
3054
3055 static location_t
3056 get_location_for_stmts (vec<gimple *> &stmts)
3057 {
3058 gimple *stmt;
3059 unsigned int i;
3060
3061 FOR_EACH_VEC_ELT (stmts, i, stmt)
3062 if (gimple_has_location (stmt))
3063 return gimple_location (stmt);
3064
3065 return UNKNOWN_LOCATION;
3066 }
3067
3068 /* Used to decribe a store resulting from splitting a wide store in smaller
3069 regularly-sized stores in split_group. */
3070
3071 class split_store
3072 {
3073 public:
3074 unsigned HOST_WIDE_INT bytepos;
3075 unsigned HOST_WIDE_INT size;
3076 unsigned HOST_WIDE_INT align;
3077 auto_vec<store_immediate_info *> orig_stores;
3078 /* True if there is a single orig stmt covering the whole split store. */
3079 bool orig;
3080 split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
3081 unsigned HOST_WIDE_INT);
3082 };
3083
3084 /* Simple constructor. */
3085
3086 split_store::split_store (unsigned HOST_WIDE_INT bp,
3087 unsigned HOST_WIDE_INT sz,
3088 unsigned HOST_WIDE_INT al)
3089 : bytepos (bp), size (sz), align (al), orig (false)
3090 {
3091 orig_stores.create (0);
3092 }
3093
3094 /* Record all stores in GROUP that write to the region starting at BITPOS and
3095 is of size BITSIZE. Record infos for such statements in STORES if
3096 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
3097 if there is exactly one original store in the range. */
3098
3099 static store_immediate_info *
3100 find_constituent_stores (class merged_store_group *group,
3101 vec<store_immediate_info *> *stores,
3102 unsigned int *first,
3103 unsigned HOST_WIDE_INT bitpos,
3104 unsigned HOST_WIDE_INT bitsize)
3105 {
3106 store_immediate_info *info, *ret = NULL;
3107 unsigned int i;
3108 bool second = false;
3109 bool update_first = true;
3110 unsigned HOST_WIDE_INT end = bitpos + bitsize;
3111 for (i = *first; group->stores.iterate (i, &info); ++i)
3112 {
3113 unsigned HOST_WIDE_INT stmt_start = info->bitpos;
3114 unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize;
3115 if (stmt_end <= bitpos)
3116 {
3117 /* BITPOS passed to this function never decreases from within the
3118 same split_group call, so optimize and don't scan info records
3119 which are known to end before or at BITPOS next time.
3120 Only do it if all stores before this one also pass this. */
3121 if (update_first)
3122 *first = i + 1;
3123 continue;
3124 }
3125 else
3126 update_first = false;
3127
3128 /* The stores in GROUP are ordered by bitposition so if we're past
3129 the region for this group return early. */
3130 if (stmt_start >= end)
3131 return ret;
3132
3133 if (stores)
3134 {
3135 stores->safe_push (info);
3136 if (ret)
3137 {
3138 ret = NULL;
3139 second = true;
3140 }
3141 }
3142 else if (ret)
3143 return NULL;
3144 if (!second)
3145 ret = info;
3146 }
3147 return ret;
3148 }
3149
3150 /* Return how many SSA_NAMEs used to compute value to store in the INFO
3151 store have multiple uses. If any SSA_NAME has multiple uses, also
3152 count statements needed to compute it. */
3153
3154 static unsigned
3155 count_multiple_uses (store_immediate_info *info)
3156 {
3157 gimple *stmt = info->stmt;
3158 unsigned ret = 0;
3159 switch (info->rhs_code)
3160 {
3161 case INTEGER_CST:
3162 return 0;
3163 case BIT_AND_EXPR:
3164 case BIT_IOR_EXPR:
3165 case BIT_XOR_EXPR:
3166 if (info->bit_not_p)
3167 {
3168 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3169 ret = 1; /* Fall through below to return
3170 the BIT_NOT_EXPR stmt and then
3171 BIT_{AND,IOR,XOR}_EXPR and anything it
3172 uses. */
3173 else
3174 /* stmt is after this the BIT_NOT_EXPR. */
3175 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3176 }
3177 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3178 {
3179 ret += 1 + info->ops[0].bit_not_p;
3180 if (info->ops[1].base_addr)
3181 ret += 1 + info->ops[1].bit_not_p;
3182 return ret + 1;
3183 }
3184 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3185 /* stmt is now the BIT_*_EXPR. */
3186 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3187 ret += 1 + info->ops[info->ops_swapped_p].bit_not_p;
3188 else if (info->ops[info->ops_swapped_p].bit_not_p)
3189 {
3190 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3191 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3192 ++ret;
3193 }
3194 if (info->ops[1].base_addr == NULL_TREE)
3195 {
3196 gcc_checking_assert (!info->ops_swapped_p);
3197 return ret;
3198 }
3199 if (!has_single_use (gimple_assign_rhs2 (stmt)))
3200 ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p;
3201 else if (info->ops[1 - info->ops_swapped_p].bit_not_p)
3202 {
3203 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
3204 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3205 ++ret;
3206 }
3207 return ret;
3208 case MEM_REF:
3209 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3210 return 1 + info->ops[0].bit_not_p;
3211 else if (info->ops[0].bit_not_p)
3212 {
3213 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3214 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3215 return 1;
3216 }
3217 return 0;
3218 case BIT_INSERT_EXPR:
3219 return has_single_use (gimple_assign_rhs1 (stmt)) ? 0 : 1;
3220 default:
3221 gcc_unreachable ();
3222 }
3223 }
3224
3225 /* Split a merged store described by GROUP by populating the SPLIT_STORES
3226 vector (if non-NULL) with split_store structs describing the byte offset
3227 (from the base), the bit size and alignment of each store as well as the
3228 original statements involved in each such split group.
3229 This is to separate the splitting strategy from the statement
3230 building/emission/linking done in output_merged_store.
3231 Return number of new stores.
3232 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
3233 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
3234 BZERO_FIRST may be true only when the first store covers the whole group
3235 and clears it; if BZERO_FIRST is true, keep that first store in the set
3236 unmodified and emit further stores for the overrides only.
3237 If SPLIT_STORES is NULL, it is just a dry run to count number of
3238 new stores. */
3239
3240 static unsigned int
3241 split_group (merged_store_group *group, bool allow_unaligned_store,
3242 bool allow_unaligned_load, bool bzero_first,
3243 vec<split_store *> *split_stores,
3244 unsigned *total_orig,
3245 unsigned *total_new)
3246 {
3247 unsigned HOST_WIDE_INT pos = group->bitregion_start;
3248 unsigned HOST_WIDE_INT size = group->bitregion_end - pos;
3249 unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT;
3250 unsigned HOST_WIDE_INT group_align = group->align;
3251 unsigned HOST_WIDE_INT align_base = group->align_base;
3252 unsigned HOST_WIDE_INT group_load_align = group_align;
3253 bool any_orig = false;
3254
3255 gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0));
3256
3257 if (group->stores[0]->rhs_code == LROTATE_EXPR
3258 || group->stores[0]->rhs_code == NOP_EXPR)
3259 {
3260 gcc_assert (!bzero_first);
3261 /* For bswap framework using sets of stores, all the checking
3262 has been done earlier in try_coalesce_bswap and needs to be
3263 emitted as a single store. */
3264 if (total_orig)
3265 {
3266 /* Avoid the old/new stmt count heuristics. It should be
3267 always beneficial. */
3268 total_new[0] = 1;
3269 total_orig[0] = 2;
3270 }
3271
3272 if (split_stores)
3273 {
3274 unsigned HOST_WIDE_INT align_bitpos
3275 = (group->start - align_base) & (group_align - 1);
3276 unsigned HOST_WIDE_INT align = group_align;
3277 if (align_bitpos)
3278 align = least_bit_hwi (align_bitpos);
3279 bytepos = group->start / BITS_PER_UNIT;
3280 split_store *store
3281 = new split_store (bytepos, group->width, align);
3282 unsigned int first = 0;
3283 find_constituent_stores (group, &store->orig_stores,
3284 &first, group->start, group->width);
3285 split_stores->safe_push (store);
3286 }
3287
3288 return 1;
3289 }
3290
3291 unsigned int ret = 0, first = 0;
3292 unsigned HOST_WIDE_INT try_pos = bytepos;
3293
3294 if (total_orig)
3295 {
3296 unsigned int i;
3297 store_immediate_info *info = group->stores[0];
3298
3299 total_new[0] = 0;
3300 total_orig[0] = 1; /* The orig store. */
3301 info = group->stores[0];
3302 if (info->ops[0].base_addr)
3303 total_orig[0]++;
3304 if (info->ops[1].base_addr)
3305 total_orig[0]++;
3306 switch (info->rhs_code)
3307 {
3308 case BIT_AND_EXPR:
3309 case BIT_IOR_EXPR:
3310 case BIT_XOR_EXPR:
3311 total_orig[0]++; /* The orig BIT_*_EXPR stmt. */
3312 break;
3313 default:
3314 break;
3315 }
3316 total_orig[0] *= group->stores.length ();
3317
3318 FOR_EACH_VEC_ELT (group->stores, i, info)
3319 {
3320 total_new[0] += count_multiple_uses (info);
3321 total_orig[0] += (info->bit_not_p
3322 + info->ops[0].bit_not_p
3323 + info->ops[1].bit_not_p);
3324 }
3325 }
3326
3327 if (!allow_unaligned_load)
3328 for (int i = 0; i < 2; ++i)
3329 if (group->load_align[i])
3330 group_load_align = MIN (group_load_align, group->load_align[i]);
3331
3332 if (bzero_first)
3333 {
3334 first = 1;
3335 ret = 1;
3336 if (split_stores)
3337 {
3338 split_store *store
3339 = new split_store (bytepos, group->stores[0]->bitsize, align_base);
3340 store->orig_stores.safe_push (group->stores[0]);
3341 store->orig = true;
3342 any_orig = true;
3343 split_stores->safe_push (store);
3344 }
3345 }
3346
3347 while (size > 0)
3348 {
3349 if ((allow_unaligned_store || group_align <= BITS_PER_UNIT)
3350 && (group->mask[try_pos - bytepos] == (unsigned char) ~0U
3351 || (bzero_first && group->val[try_pos - bytepos] == 0)))
3352 {
3353 /* Skip padding bytes. */
3354 ++try_pos;
3355 size -= BITS_PER_UNIT;
3356 continue;
3357 }
3358
3359 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
3360 unsigned int try_size = MAX_STORE_BITSIZE, nonmasked;
3361 unsigned HOST_WIDE_INT align_bitpos
3362 = (try_bitpos - align_base) & (group_align - 1);
3363 unsigned HOST_WIDE_INT align = group_align;
3364 if (align_bitpos)
3365 align = least_bit_hwi (align_bitpos);
3366 if (!allow_unaligned_store)
3367 try_size = MIN (try_size, align);
3368 if (!allow_unaligned_load)
3369 {
3370 /* If we can't do or don't want to do unaligned stores
3371 as well as loads, we need to take the loads into account
3372 as well. */
3373 unsigned HOST_WIDE_INT load_align = group_load_align;
3374 align_bitpos = (try_bitpos - align_base) & (load_align - 1);
3375 if (align_bitpos)
3376 load_align = least_bit_hwi (align_bitpos);
3377 for (int i = 0; i < 2; ++i)
3378 if (group->load_align[i])
3379 {
3380 align_bitpos
3381 = known_alignment (try_bitpos
3382 - group->stores[0]->bitpos
3383 + group->stores[0]->ops[i].bitpos
3384 - group->load_align_base[i]);
3385 if (align_bitpos & (group_load_align - 1))
3386 {
3387 unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos);
3388 load_align = MIN (load_align, a);
3389 }
3390 }
3391 try_size = MIN (try_size, load_align);
3392 }
3393 store_immediate_info *info
3394 = find_constituent_stores (group, NULL, &first, try_bitpos, try_size);
3395 if (info)
3396 {
3397 /* If there is just one original statement for the range, see if
3398 we can just reuse the original store which could be even larger
3399 than try_size. */
3400 unsigned HOST_WIDE_INT stmt_end
3401 = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT);
3402 info = find_constituent_stores (group, NULL, &first, try_bitpos,
3403 stmt_end - try_bitpos);
3404 if (info && info->bitpos >= try_bitpos)
3405 {
3406 try_size = stmt_end - try_bitpos;
3407 goto found;
3408 }
3409 }
3410
3411 /* Approximate store bitsize for the case when there are no padding
3412 bits. */
3413 while (try_size > size)
3414 try_size /= 2;
3415 /* Now look for whole padding bytes at the end of that bitsize. */
3416 for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked)
3417 if (group->mask[try_pos - bytepos + nonmasked - 1]
3418 != (unsigned char) ~0U
3419 && (!bzero_first
3420 || group->val[try_pos - bytepos + nonmasked - 1] != 0))
3421 break;
3422 if (nonmasked == 0)
3423 {
3424 /* If entire try_size range is padding, skip it. */
3425 try_pos += try_size / BITS_PER_UNIT;
3426 size -= try_size;
3427 continue;
3428 }
3429 /* Otherwise try to decrease try_size if second half, last 3 quarters
3430 etc. are padding. */
3431 nonmasked *= BITS_PER_UNIT;
3432 while (nonmasked <= try_size / 2)
3433 try_size /= 2;
3434 if (!allow_unaligned_store && group_align > BITS_PER_UNIT)
3435 {
3436 /* Now look for whole padding bytes at the start of that bitsize. */
3437 unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked;
3438 for (masked = 0; masked < try_bytesize; ++masked)
3439 if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U
3440 && (!bzero_first
3441 || group->val[try_pos - bytepos + masked] != 0))
3442 break;
3443 masked *= BITS_PER_UNIT;
3444 gcc_assert (masked < try_size);
3445 if (masked >= try_size / 2)
3446 {
3447 while (masked >= try_size / 2)
3448 {
3449 try_size /= 2;
3450 try_pos += try_size / BITS_PER_UNIT;
3451 size -= try_size;
3452 masked -= try_size;
3453 }
3454 /* Need to recompute the alignment, so just retry at the new
3455 position. */
3456 continue;
3457 }
3458 }
3459
3460 found:
3461 ++ret;
3462
3463 if (split_stores)
3464 {
3465 split_store *store
3466 = new split_store (try_pos, try_size, align);
3467 info = find_constituent_stores (group, &store->orig_stores,
3468 &first, try_bitpos, try_size);
3469 if (info
3470 && info->bitpos >= try_bitpos
3471 && info->bitpos + info->bitsize <= try_bitpos + try_size)
3472 {
3473 store->orig = true;
3474 any_orig = true;
3475 }
3476 split_stores->safe_push (store);
3477 }
3478
3479 try_pos += try_size / BITS_PER_UNIT;
3480 size -= try_size;
3481 }
3482
3483 if (total_orig)
3484 {
3485 unsigned int i;
3486 split_store *store;
3487 /* If we are reusing some original stores and any of the
3488 original SSA_NAMEs had multiple uses, we need to subtract
3489 those now before we add the new ones. */
3490 if (total_new[0] && any_orig)
3491 {
3492 FOR_EACH_VEC_ELT (*split_stores, i, store)
3493 if (store->orig)
3494 total_new[0] -= count_multiple_uses (store->orig_stores[0]);
3495 }
3496 total_new[0] += ret; /* The new store. */
3497 store_immediate_info *info = group->stores[0];
3498 if (info->ops[0].base_addr)
3499 total_new[0] += ret;
3500 if (info->ops[1].base_addr)
3501 total_new[0] += ret;
3502 switch (info->rhs_code)
3503 {
3504 case BIT_AND_EXPR:
3505 case BIT_IOR_EXPR:
3506 case BIT_XOR_EXPR:
3507 total_new[0] += ret; /* The new BIT_*_EXPR stmt. */
3508 break;
3509 default:
3510 break;
3511 }
3512 FOR_EACH_VEC_ELT (*split_stores, i, store)
3513 {
3514 unsigned int j;
3515 bool bit_not_p[3] = { false, false, false };
3516 /* If all orig_stores have certain bit_not_p set, then
3517 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3518 If some orig_stores have certain bit_not_p set, then
3519 we'd use a BIT_XOR_EXPR with a mask and need to account for
3520 it. */
3521 FOR_EACH_VEC_ELT (store->orig_stores, j, info)
3522 {
3523 if (info->ops[0].bit_not_p)
3524 bit_not_p[0] = true;
3525 if (info->ops[1].bit_not_p)
3526 bit_not_p[1] = true;
3527 if (info->bit_not_p)
3528 bit_not_p[2] = true;
3529 }
3530 total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2];
3531 }
3532
3533 }
3534
3535 return ret;
3536 }
3537
3538 /* Return the operation through which the operand IDX (if < 2) or
3539 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3540 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3541 the bits should be xored with mask. */
3542
3543 static enum tree_code
3544 invert_op (split_store *split_store, int idx, tree int_type, tree &mask)
3545 {
3546 unsigned int i;
3547 store_immediate_info *info;
3548 unsigned int cnt = 0;
3549 bool any_paddings = false;
3550 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3551 {
3552 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3553 if (bit_not_p)
3554 {
3555 ++cnt;
3556 tree lhs = gimple_assign_lhs (info->stmt);
3557 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3558 && TYPE_PRECISION (TREE_TYPE (lhs)) < info->bitsize)
3559 any_paddings = true;
3560 }
3561 }
3562 mask = NULL_TREE;
3563 if (cnt == 0)
3564 return NOP_EXPR;
3565 if (cnt == split_store->orig_stores.length () && !any_paddings)
3566 return BIT_NOT_EXPR;
3567
3568 unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT;
3569 unsigned buf_size = split_store->size / BITS_PER_UNIT;
3570 unsigned char *buf
3571 = XALLOCAVEC (unsigned char, buf_size);
3572 memset (buf, ~0U, buf_size);
3573 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3574 {
3575 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3576 if (!bit_not_p)
3577 continue;
3578 /* Clear regions with bit_not_p and invert afterwards, rather than
3579 clear regions with !bit_not_p, so that gaps in between stores aren't
3580 set in the mask. */
3581 unsigned HOST_WIDE_INT bitsize = info->bitsize;
3582 unsigned HOST_WIDE_INT prec = bitsize;
3583 unsigned int pos_in_buffer = 0;
3584 if (any_paddings)
3585 {
3586 tree lhs = gimple_assign_lhs (info->stmt);
3587 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3588 && TYPE_PRECISION (TREE_TYPE (lhs)) < bitsize)
3589 prec = TYPE_PRECISION (TREE_TYPE (lhs));
3590 }
3591 if (info->bitpos < try_bitpos)
3592 {
3593 gcc_assert (info->bitpos + bitsize > try_bitpos);
3594 if (!BYTES_BIG_ENDIAN)
3595 {
3596 if (prec <= try_bitpos - info->bitpos)
3597 continue;
3598 prec -= try_bitpos - info->bitpos;
3599 }
3600 bitsize -= try_bitpos - info->bitpos;
3601 if (BYTES_BIG_ENDIAN && prec > bitsize)
3602 prec = bitsize;
3603 }
3604 else
3605 pos_in_buffer = info->bitpos - try_bitpos;
3606 if (prec < bitsize)
3607 {
3608 /* If this is a bool inversion, invert just the least significant
3609 prec bits rather than all bits of it. */
3610 if (BYTES_BIG_ENDIAN)
3611 {
3612 pos_in_buffer += bitsize - prec;
3613 if (pos_in_buffer >= split_store->size)
3614 continue;
3615 }
3616 bitsize = prec;
3617 }
3618 if (pos_in_buffer + bitsize > split_store->size)
3619 bitsize = split_store->size - pos_in_buffer;
3620 unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT);
3621 if (BYTES_BIG_ENDIAN)
3622 clear_bit_region_be (p, (BITS_PER_UNIT - 1
3623 - (pos_in_buffer % BITS_PER_UNIT)), bitsize);
3624 else
3625 clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize);
3626 }
3627 for (unsigned int i = 0; i < buf_size; ++i)
3628 buf[i] = ~buf[i];
3629 mask = native_interpret_expr (int_type, buf, buf_size);
3630 return BIT_XOR_EXPR;
3631 }
3632
3633 /* Given a merged store group GROUP output the widened version of it.
3634 The store chain is against the base object BASE.
3635 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3636 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3637 Make sure that the number of statements output is less than the number of
3638 original statements. If a better sequence is possible emit it and
3639 return true. */
3640
3641 bool
3642 imm_store_chain_info::output_merged_store (merged_store_group *group)
3643 {
3644 split_store *split_store;
3645 unsigned int i;
3646 unsigned HOST_WIDE_INT start_byte_pos
3647 = group->bitregion_start / BITS_PER_UNIT;
3648
3649 unsigned int orig_num_stmts = group->stores.length ();
3650 if (orig_num_stmts < 2)
3651 return false;
3652
3653 auto_vec<class split_store *, 32> split_stores;
3654 bool allow_unaligned_store
3655 = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
3656 bool allow_unaligned_load = allow_unaligned_store;
3657 bool bzero_first = false;
3658 if (group->stores[0]->rhs_code == INTEGER_CST
3659 && TREE_CODE (gimple_assign_rhs1 (group->stores[0]->stmt)) == CONSTRUCTOR
3660 && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (group->stores[0]->stmt)) == 0
3661 && group->start == group->stores[0]->bitpos
3662 && group->width == group->stores[0]->bitsize
3663 && (group->start % BITS_PER_UNIT) == 0
3664 && (group->width % BITS_PER_UNIT) == 0)
3665 bzero_first = true;
3666 if (allow_unaligned_store || bzero_first)
3667 {
3668 /* If unaligned stores are allowed, see how many stores we'd emit
3669 for unaligned and how many stores we'd emit for aligned stores.
3670 Only use unaligned stores if it allows fewer stores than aligned.
3671 Similarly, if there is a whole region clear first, prefer expanding
3672 it together compared to expanding clear first followed by merged
3673 further stores. */
3674 unsigned cnt[4] = { ~0, ~0, ~0, ~0 };
3675 int pass_min = 0;
3676 for (int pass = 0; pass < 4; ++pass)
3677 {
3678 if (!allow_unaligned_store && (pass & 1) != 0)
3679 continue;
3680 if (!bzero_first && (pass & 2) != 0)
3681 continue;
3682 cnt[pass] = split_group (group, (pass & 1) != 0,
3683 allow_unaligned_load, (pass & 2) != 0,
3684 NULL, NULL, NULL);
3685 if (cnt[pass] < cnt[pass_min])
3686 pass_min = pass;
3687 }
3688 if ((pass_min & 1) == 0)
3689 allow_unaligned_store = false;
3690 if ((pass_min & 2) == 0)
3691 bzero_first = false;
3692 }
3693 unsigned total_orig, total_new;
3694 split_group (group, allow_unaligned_store, allow_unaligned_load, bzero_first,
3695 &split_stores, &total_orig, &total_new);
3696
3697 if (split_stores.length () >= orig_num_stmts)
3698 {
3699 /* We didn't manage to reduce the number of statements. Bail out. */
3700 if (dump_file && (dump_flags & TDF_DETAILS))
3701 fprintf (dump_file, "Exceeded original number of stmts (%u)."
3702 " Not profitable to emit new sequence.\n",
3703 orig_num_stmts);
3704 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3705 delete split_store;
3706 return false;
3707 }
3708 if (total_orig <= total_new)
3709 {
3710 /* If number of estimated new statements is above estimated original
3711 statements, bail out too. */
3712 if (dump_file && (dump_flags & TDF_DETAILS))
3713 fprintf (dump_file, "Estimated number of original stmts (%u)"
3714 " not larger than estimated number of new"
3715 " stmts (%u).\n",
3716 total_orig, total_new);
3717 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3718 delete split_store;
3719 return false;
3720 }
3721
3722 gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt);
3723 gimple_seq seq = NULL;
3724 tree last_vdef, new_vuse;
3725 last_vdef = gimple_vdef (group->last_stmt);
3726 new_vuse = gimple_vuse (group->last_stmt);
3727 tree bswap_res = NULL_TREE;
3728
3729 if (group->stores[0]->rhs_code == LROTATE_EXPR
3730 || group->stores[0]->rhs_code == NOP_EXPR)
3731 {
3732 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
3733 gimple *ins_stmt = group->stores[0]->ins_stmt;
3734 struct symbolic_number *n = &group->stores[0]->n;
3735 bool bswap = group->stores[0]->rhs_code == LROTATE_EXPR;
3736
3737 switch (n->range)
3738 {
3739 case 16:
3740 load_type = bswap_type = uint16_type_node;
3741 break;
3742 case 32:
3743 load_type = uint32_type_node;
3744 if (bswap)
3745 {
3746 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
3747 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
3748 }
3749 break;
3750 case 64:
3751 load_type = uint64_type_node;
3752 if (bswap)
3753 {
3754 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
3755 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
3756 }
3757 break;
3758 default:
3759 gcc_unreachable ();
3760 }
3761
3762 /* If the loads have each vuse of the corresponding store,
3763 we've checked the aliasing already in try_coalesce_bswap and
3764 we want to sink the need load into seq. So need to use new_vuse
3765 on the load. */
3766 if (n->base_addr)
3767 {
3768 if (n->vuse == NULL)
3769 {
3770 n->vuse = new_vuse;
3771 ins_stmt = NULL;
3772 }
3773 else
3774 /* Update vuse in case it has changed by output_merged_stores. */
3775 n->vuse = gimple_vuse (ins_stmt);
3776 }
3777 bswap_res = bswap_replace (gsi_start (seq), ins_stmt, fndecl,
3778 bswap_type, load_type, n, bswap);
3779 gcc_assert (bswap_res);
3780 }
3781
3782 gimple *stmt = NULL;
3783 auto_vec<gimple *, 32> orig_stmts;
3784 gimple_seq this_seq;
3785 tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq,
3786 is_gimple_mem_ref_addr, NULL_TREE);
3787 gimple_seq_add_seq_without_update (&seq, this_seq);
3788
3789 tree load_addr[2] = { NULL_TREE, NULL_TREE };
3790 gimple_seq load_seq[2] = { NULL, NULL };
3791 gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () };
3792 for (int j = 0; j < 2; ++j)
3793 {
3794 store_operand_info &op = group->stores[0]->ops[j];
3795 if (op.base_addr == NULL_TREE)
3796 continue;
3797
3798 store_immediate_info *infol = group->stores.last ();
3799 if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt))
3800 {
3801 /* We can't pick the location randomly; while we've verified
3802 all the loads have the same vuse, they can be still in different
3803 basic blocks and we need to pick the one from the last bb:
3804 int x = q[0];
3805 if (x == N) return;
3806 int y = q[1];
3807 p[0] = x;
3808 p[1] = y;
3809 otherwise if we put the wider load at the q[0] load, we might
3810 segfault if q[1] is not mapped. */
3811 basic_block bb = gimple_bb (op.stmt);
3812 gimple *ostmt = op.stmt;
3813 store_immediate_info *info;
3814 FOR_EACH_VEC_ELT (group->stores, i, info)
3815 {
3816 gimple *tstmt = info->ops[j].stmt;
3817 basic_block tbb = gimple_bb (tstmt);
3818 if (dominated_by_p (CDI_DOMINATORS, tbb, bb))
3819 {
3820 ostmt = tstmt;
3821 bb = tbb;
3822 }
3823 }
3824 load_gsi[j] = gsi_for_stmt (ostmt);
3825 load_addr[j]
3826 = force_gimple_operand_1 (unshare_expr (op.base_addr),
3827 &load_seq[j], is_gimple_mem_ref_addr,
3828 NULL_TREE);
3829 }
3830 else if (operand_equal_p (base_addr, op.base_addr, 0))
3831 load_addr[j] = addr;
3832 else
3833 {
3834 load_addr[j]
3835 = force_gimple_operand_1 (unshare_expr (op.base_addr),
3836 &this_seq, is_gimple_mem_ref_addr,
3837 NULL_TREE);
3838 gimple_seq_add_seq_without_update (&seq, this_seq);
3839 }
3840 }
3841
3842 FOR_EACH_VEC_ELT (split_stores, i, split_store)
3843 {
3844 unsigned HOST_WIDE_INT try_size = split_store->size;
3845 unsigned HOST_WIDE_INT try_pos = split_store->bytepos;
3846 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
3847 unsigned HOST_WIDE_INT align = split_store->align;
3848 tree dest, src;
3849 location_t loc;
3850 if (split_store->orig)
3851 {
3852 /* If there is just a single constituent store which covers
3853 the whole area, just reuse the lhs and rhs. */
3854 gimple *orig_stmt = split_store->orig_stores[0]->stmt;
3855 dest = gimple_assign_lhs (orig_stmt);
3856 src = gimple_assign_rhs1 (orig_stmt);
3857 loc = gimple_location (orig_stmt);
3858 }
3859 else
3860 {
3861 store_immediate_info *info;
3862 unsigned short clique, base;
3863 unsigned int k;
3864 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
3865 orig_stmts.safe_push (info->stmt);
3866 tree offset_type
3867 = get_alias_type_for_stmts (orig_stmts, false, &clique, &base);
3868 loc = get_location_for_stmts (orig_stmts);
3869 orig_stmts.truncate (0);
3870
3871 tree int_type = build_nonstandard_integer_type (try_size, UNSIGNED);
3872 int_type = build_aligned_type (int_type, align);
3873 dest = fold_build2 (MEM_REF, int_type, addr,
3874 build_int_cst (offset_type, try_pos));
3875 if (TREE_CODE (dest) == MEM_REF)
3876 {
3877 MR_DEPENDENCE_CLIQUE (dest) = clique;
3878 MR_DEPENDENCE_BASE (dest) = base;
3879 }
3880
3881 tree mask;
3882 if (bswap_res)
3883 mask = integer_zero_node;
3884 else
3885 mask = native_interpret_expr (int_type,
3886 group->mask + try_pos
3887 - start_byte_pos,
3888 group->buf_size);
3889
3890 tree ops[2];
3891 for (int j = 0;
3892 j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE);
3893 ++j)
3894 {
3895 store_operand_info &op = split_store->orig_stores[0]->ops[j];
3896 if (bswap_res)
3897 ops[j] = bswap_res;
3898 else if (op.base_addr)
3899 {
3900 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
3901 orig_stmts.safe_push (info->ops[j].stmt);
3902
3903 offset_type = get_alias_type_for_stmts (orig_stmts, true,
3904 &clique, &base);
3905 location_t load_loc = get_location_for_stmts (orig_stmts);
3906 orig_stmts.truncate (0);
3907
3908 unsigned HOST_WIDE_INT load_align = group->load_align[j];
3909 unsigned HOST_WIDE_INT align_bitpos
3910 = known_alignment (try_bitpos
3911 - split_store->orig_stores[0]->bitpos
3912 + op.bitpos);
3913 if (align_bitpos & (load_align - 1))
3914 load_align = least_bit_hwi (align_bitpos);
3915
3916 tree load_int_type
3917 = build_nonstandard_integer_type (try_size, UNSIGNED);
3918 load_int_type
3919 = build_aligned_type (load_int_type, load_align);
3920
3921 poly_uint64 load_pos
3922 = exact_div (try_bitpos
3923 - split_store->orig_stores[0]->bitpos
3924 + op.bitpos,
3925 BITS_PER_UNIT);
3926 ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j],
3927 build_int_cst (offset_type, load_pos));
3928 if (TREE_CODE (ops[j]) == MEM_REF)
3929 {
3930 MR_DEPENDENCE_CLIQUE (ops[j]) = clique;
3931 MR_DEPENDENCE_BASE (ops[j]) = base;
3932 }
3933 if (!integer_zerop (mask))
3934 /* The load might load some bits (that will be masked off
3935 later on) uninitialized, avoid -W*uninitialized
3936 warnings in that case. */
3937 TREE_NO_WARNING (ops[j]) = 1;
3938
3939 stmt = gimple_build_assign (make_ssa_name (int_type),
3940 ops[j]);
3941 gimple_set_location (stmt, load_loc);
3942 if (gsi_bb (load_gsi[j]))
3943 {
3944 gimple_set_vuse (stmt, gimple_vuse (op.stmt));
3945 gimple_seq_add_stmt_without_update (&load_seq[j], stmt);
3946 }
3947 else
3948 {
3949 gimple_set_vuse (stmt, new_vuse);
3950 gimple_seq_add_stmt_without_update (&seq, stmt);
3951 }
3952 ops[j] = gimple_assign_lhs (stmt);
3953 tree xor_mask;
3954 enum tree_code inv_op
3955 = invert_op (split_store, j, int_type, xor_mask);
3956 if (inv_op != NOP_EXPR)
3957 {
3958 stmt = gimple_build_assign (make_ssa_name (int_type),
3959 inv_op, ops[j], xor_mask);
3960 gimple_set_location (stmt, load_loc);
3961 ops[j] = gimple_assign_lhs (stmt);
3962
3963 if (gsi_bb (load_gsi[j]))
3964 gimple_seq_add_stmt_without_update (&load_seq[j],
3965 stmt);
3966 else
3967 gimple_seq_add_stmt_without_update (&seq, stmt);
3968 }
3969 }
3970 else
3971 ops[j] = native_interpret_expr (int_type,
3972 group->val + try_pos
3973 - start_byte_pos,
3974 group->buf_size);
3975 }
3976
3977 switch (split_store->orig_stores[0]->rhs_code)
3978 {
3979 case BIT_AND_EXPR:
3980 case BIT_IOR_EXPR:
3981 case BIT_XOR_EXPR:
3982 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
3983 {
3984 tree rhs1 = gimple_assign_rhs1 (info->stmt);
3985 orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1));
3986 }
3987 location_t bit_loc;
3988 bit_loc = get_location_for_stmts (orig_stmts);
3989 orig_stmts.truncate (0);
3990
3991 stmt
3992 = gimple_build_assign (make_ssa_name (int_type),
3993 split_store->orig_stores[0]->rhs_code,
3994 ops[0], ops[1]);
3995 gimple_set_location (stmt, bit_loc);
3996 /* If there is just one load and there is a separate
3997 load_seq[0], emit the bitwise op right after it. */
3998 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
3999 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4000 /* Otherwise, if at least one load is in seq, we need to
4001 emit the bitwise op right before the store. If there
4002 are two loads and are emitted somewhere else, it would
4003 be better to emit the bitwise op as early as possible;
4004 we don't track where that would be possible right now
4005 though. */
4006 else
4007 gimple_seq_add_stmt_without_update (&seq, stmt);
4008 src = gimple_assign_lhs (stmt);
4009 tree xor_mask;
4010 enum tree_code inv_op;
4011 inv_op = invert_op (split_store, 2, int_type, xor_mask);
4012 if (inv_op != NOP_EXPR)
4013 {
4014 stmt = gimple_build_assign (make_ssa_name (int_type),
4015 inv_op, src, xor_mask);
4016 gimple_set_location (stmt, bit_loc);
4017 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4018 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4019 else
4020 gimple_seq_add_stmt_without_update (&seq, stmt);
4021 src = gimple_assign_lhs (stmt);
4022 }
4023 break;
4024 case LROTATE_EXPR:
4025 case NOP_EXPR:
4026 src = ops[0];
4027 if (!is_gimple_val (src))
4028 {
4029 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (src)),
4030 src);
4031 gimple_seq_add_stmt_without_update (&seq, stmt);
4032 src = gimple_assign_lhs (stmt);
4033 }
4034 if (!useless_type_conversion_p (int_type, TREE_TYPE (src)))
4035 {
4036 stmt = gimple_build_assign (make_ssa_name (int_type),
4037 NOP_EXPR, src);
4038 gimple_seq_add_stmt_without_update (&seq, stmt);
4039 src = gimple_assign_lhs (stmt);
4040 }
4041 inv_op = invert_op (split_store, 2, int_type, xor_mask);
4042 if (inv_op != NOP_EXPR)
4043 {
4044 stmt = gimple_build_assign (make_ssa_name (int_type),
4045 inv_op, src, xor_mask);
4046 gimple_set_location (stmt, loc);
4047 gimple_seq_add_stmt_without_update (&seq, stmt);
4048 src = gimple_assign_lhs (stmt);
4049 }
4050 break;
4051 default:
4052 src = ops[0];
4053 break;
4054 }
4055
4056 /* If bit insertion is required, we use the source as an accumulator
4057 into which the successive bit-field values are manually inserted.
4058 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
4059 if (group->bit_insertion)
4060 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4061 if (info->rhs_code == BIT_INSERT_EXPR
4062 && info->bitpos < try_bitpos + try_size
4063 && info->bitpos + info->bitsize > try_bitpos)
4064 {
4065 /* Mask, truncate, convert to final type, shift and ior into
4066 the accumulator. Note that every step can be a no-op. */
4067 const HOST_WIDE_INT start_gap = info->bitpos - try_bitpos;
4068 const HOST_WIDE_INT end_gap
4069 = (try_bitpos + try_size) - (info->bitpos + info->bitsize);
4070 tree tem = info->ops[0].val;
4071 if (TYPE_PRECISION (TREE_TYPE (tem)) <= info->bitsize)
4072 {
4073 tree bitfield_type
4074 = build_nonstandard_integer_type (info->bitsize,
4075 UNSIGNED);
4076 tem = gimple_convert (&seq, loc, bitfield_type, tem);
4077 }
4078 else if ((BYTES_BIG_ENDIAN ? start_gap : end_gap) > 0)
4079 {
4080 const unsigned HOST_WIDE_INT imask
4081 = (HOST_WIDE_INT_1U << info->bitsize) - 1;
4082 tem = gimple_build (&seq, loc,
4083 BIT_AND_EXPR, TREE_TYPE (tem), tem,
4084 build_int_cst (TREE_TYPE (tem),
4085 imask));
4086 }
4087 const HOST_WIDE_INT shift
4088 = (BYTES_BIG_ENDIAN ? end_gap : start_gap);
4089 if (shift < 0)
4090 tem = gimple_build (&seq, loc,
4091 RSHIFT_EXPR, TREE_TYPE (tem), tem,
4092 build_int_cst (NULL_TREE, -shift));
4093 tem = gimple_convert (&seq, loc, int_type, tem);
4094 if (shift > 0)
4095 tem = gimple_build (&seq, loc,
4096 LSHIFT_EXPR, int_type, tem,
4097 build_int_cst (NULL_TREE, shift));
4098 src = gimple_build (&seq, loc,
4099 BIT_IOR_EXPR, int_type, tem, src);
4100 }
4101
4102 if (!integer_zerop (mask))
4103 {
4104 tree tem = make_ssa_name (int_type);
4105 tree load_src = unshare_expr (dest);
4106 /* The load might load some or all bits uninitialized,
4107 avoid -W*uninitialized warnings in that case.
4108 As optimization, it would be nice if all the bits are
4109 provably uninitialized (no stores at all yet or previous
4110 store a CLOBBER) we'd optimize away the load and replace
4111 it e.g. with 0. */
4112 TREE_NO_WARNING (load_src) = 1;
4113 stmt = gimple_build_assign (tem, load_src);
4114 gimple_set_location (stmt, loc);
4115 gimple_set_vuse (stmt, new_vuse);
4116 gimple_seq_add_stmt_without_update (&seq, stmt);
4117
4118 /* FIXME: If there is a single chunk of zero bits in mask,
4119 perhaps use BIT_INSERT_EXPR instead? */
4120 stmt = gimple_build_assign (make_ssa_name (int_type),
4121 BIT_AND_EXPR, tem, mask);
4122 gimple_set_location (stmt, loc);
4123 gimple_seq_add_stmt_without_update (&seq, stmt);
4124 tem = gimple_assign_lhs (stmt);
4125
4126 if (TREE_CODE (src) == INTEGER_CST)
4127 src = wide_int_to_tree (int_type,
4128 wi::bit_and_not (wi::to_wide (src),
4129 wi::to_wide (mask)));
4130 else
4131 {
4132 tree nmask
4133 = wide_int_to_tree (int_type,
4134 wi::bit_not (wi::to_wide (mask)));
4135 stmt = gimple_build_assign (make_ssa_name (int_type),
4136 BIT_AND_EXPR, src, nmask);
4137 gimple_set_location (stmt, loc);
4138 gimple_seq_add_stmt_without_update (&seq, stmt);
4139 src = gimple_assign_lhs (stmt);
4140 }
4141 stmt = gimple_build_assign (make_ssa_name (int_type),
4142 BIT_IOR_EXPR, tem, src);
4143 gimple_set_location (stmt, loc);
4144 gimple_seq_add_stmt_without_update (&seq, stmt);
4145 src = gimple_assign_lhs (stmt);
4146 }
4147 }
4148
4149 stmt = gimple_build_assign (dest, src);
4150 gimple_set_location (stmt, loc);
4151 gimple_set_vuse (stmt, new_vuse);
4152 gimple_seq_add_stmt_without_update (&seq, stmt);
4153
4154 tree new_vdef;
4155 if (i < split_stores.length () - 1)
4156 new_vdef = make_ssa_name (gimple_vop (cfun), stmt);
4157 else
4158 new_vdef = last_vdef;
4159
4160 gimple_set_vdef (stmt, new_vdef);
4161 SSA_NAME_DEF_STMT (new_vdef) = stmt;
4162 new_vuse = new_vdef;
4163 }
4164
4165 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4166 delete split_store;
4167
4168 gcc_assert (seq);
4169 if (dump_file)
4170 {
4171 fprintf (dump_file,
4172 "New sequence of %u stores to replace old one of %u stores\n",
4173 split_stores.length (), orig_num_stmts);
4174 if (dump_flags & TDF_DETAILS)
4175 print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS);
4176 }
4177 gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT);
4178 for (int j = 0; j < 2; ++j)
4179 if (load_seq[j])
4180 gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT);
4181
4182 return true;
4183 }
4184
4185 /* Process the merged_store_group objects created in the coalescing phase.
4186 The stores are all against the base object BASE.
4187 Try to output the widened stores and delete the original statements if
4188 successful. Return true iff any changes were made. */
4189
4190 bool
4191 imm_store_chain_info::output_merged_stores ()
4192 {
4193 unsigned int i;
4194 merged_store_group *merged_store;
4195 bool ret = false;
4196 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store)
4197 {
4198 if (output_merged_store (merged_store))
4199 {
4200 unsigned int j;
4201 store_immediate_info *store;
4202 FOR_EACH_VEC_ELT (merged_store->stores, j, store)
4203 {
4204 gimple *stmt = store->stmt;
4205 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4206 gsi_remove (&gsi, true);
4207 if (stmt != merged_store->last_stmt)
4208 {
4209 unlink_stmt_vdef (stmt);
4210 release_defs (stmt);
4211 }
4212 }
4213 ret = true;
4214 }
4215 }
4216 if (ret && dump_file)
4217 fprintf (dump_file, "Merging successful!\n");
4218
4219 return ret;
4220 }
4221
4222 /* Coalesce the store_immediate_info objects recorded against the base object
4223 BASE in the first phase and output them.
4224 Delete the allocated structures.
4225 Return true if any changes were made. */
4226
4227 bool
4228 imm_store_chain_info::terminate_and_process_chain ()
4229 {
4230 /* Process store chain. */
4231 bool ret = false;
4232 if (m_store_info.length () > 1)
4233 {
4234 ret = coalesce_immediate_stores ();
4235 if (ret)
4236 ret = output_merged_stores ();
4237 }
4238
4239 /* Delete all the entries we allocated ourselves. */
4240 store_immediate_info *info;
4241 unsigned int i;
4242 FOR_EACH_VEC_ELT (m_store_info, i, info)
4243 delete info;
4244
4245 merged_store_group *merged_info;
4246 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info)
4247 delete merged_info;
4248
4249 return ret;
4250 }
4251
4252 /* Return true iff LHS is a destination potentially interesting for
4253 store merging. In practice these are the codes that get_inner_reference
4254 can process. */
4255
4256 static bool
4257 lhs_valid_for_store_merging_p (tree lhs)
4258 {
4259 tree_code code = TREE_CODE (lhs);
4260
4261 if (code == ARRAY_REF || code == ARRAY_RANGE_REF || code == MEM_REF
4262 || code == COMPONENT_REF || code == BIT_FIELD_REF
4263 || DECL_P (lhs))
4264 return true;
4265
4266 return false;
4267 }
4268
4269 /* Return true if the tree RHS is a constant we want to consider
4270 during store merging. In practice accept all codes that
4271 native_encode_expr accepts. */
4272
4273 static bool
4274 rhs_valid_for_store_merging_p (tree rhs)
4275 {
4276 unsigned HOST_WIDE_INT size;
4277 if (TREE_CODE (rhs) == CONSTRUCTOR
4278 && !TREE_CLOBBER_P (rhs)
4279 && CONSTRUCTOR_NELTS (rhs) == 0
4280 && TYPE_SIZE_UNIT (TREE_TYPE (rhs))
4281 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs))))
4282 return true;
4283 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (&size)
4284 && native_encode_expr (rhs, NULL, size) != 0);
4285 }
4286
4287 /* If MEM is a memory reference usable for store merging (either as
4288 store destination or for loads), return the non-NULL base_addr
4289 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
4290 Otherwise return NULL, *PBITPOS should be still valid even for that
4291 case. */
4292
4293 static tree
4294 mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize,
4295 poly_uint64 *pbitpos,
4296 poly_uint64 *pbitregion_start,
4297 poly_uint64 *pbitregion_end)
4298 {
4299 poly_int64 bitsize, bitpos;
4300 poly_uint64 bitregion_start = 0, bitregion_end = 0;
4301 machine_mode mode;
4302 int unsignedp = 0, reversep = 0, volatilep = 0;
4303 tree offset;
4304 tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode,
4305 &unsignedp, &reversep, &volatilep);
4306 *pbitsize = bitsize;
4307 if (known_eq (bitsize, 0))
4308 return NULL_TREE;
4309
4310 if (TREE_CODE (mem) == COMPONENT_REF
4311 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1)))
4312 {
4313 get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset);
4314 if (maybe_ne (bitregion_end, 0U))
4315 bitregion_end += 1;
4316 }
4317
4318 if (reversep)
4319 return NULL_TREE;
4320
4321 /* We do not want to rewrite TARGET_MEM_REFs. */
4322 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
4323 return NULL_TREE;
4324 /* In some cases get_inner_reference may return a
4325 MEM_REF [ptr + byteoffset]. For the purposes of this pass
4326 canonicalize the base_addr to MEM_REF [ptr] and take
4327 byteoffset into account in the bitpos. This occurs in
4328 PR 23684 and this way we can catch more chains. */
4329 else if (TREE_CODE (base_addr) == MEM_REF)
4330 {
4331 poly_offset_int byte_off = mem_ref_offset (base_addr);
4332 poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT;
4333 bit_off += bitpos;
4334 if (known_ge (bit_off, 0) && bit_off.to_shwi (&bitpos))
4335 {
4336 if (maybe_ne (bitregion_end, 0U))
4337 {
4338 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4339 bit_off += bitregion_start;
4340 if (bit_off.to_uhwi (&bitregion_start))
4341 {
4342 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4343 bit_off += bitregion_end;
4344 if (!bit_off.to_uhwi (&bitregion_end))
4345 bitregion_end = 0;
4346 }
4347 else
4348 bitregion_end = 0;
4349 }
4350 }
4351 else
4352 return NULL_TREE;
4353 base_addr = TREE_OPERAND (base_addr, 0);
4354 }
4355 /* get_inner_reference returns the base object, get at its
4356 address now. */
4357 else
4358 {
4359 if (maybe_lt (bitpos, 0))
4360 return NULL_TREE;
4361 base_addr = build_fold_addr_expr (base_addr);
4362 }
4363
4364 if (known_eq (bitregion_end, 0U))
4365 {
4366 bitregion_start = round_down_to_byte_boundary (bitpos);
4367 bitregion_end = bitpos;
4368 bitregion_end = round_up_to_byte_boundary (bitregion_end + bitsize);
4369 }
4370
4371 if (offset != NULL_TREE)
4372 {
4373 /* If the access is variable offset then a base decl has to be
4374 address-taken to be able to emit pointer-based stores to it.
4375 ??? We might be able to get away with re-using the original
4376 base up to the first variable part and then wrapping that inside
4377 a BIT_FIELD_REF. */
4378 tree base = get_base_address (base_addr);
4379 if (! base
4380 || (DECL_P (base) && ! TREE_ADDRESSABLE (base)))
4381 return NULL_TREE;
4382
4383 base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr),
4384 base_addr, offset);
4385 }
4386
4387 *pbitsize = bitsize;
4388 *pbitpos = bitpos;
4389 *pbitregion_start = bitregion_start;
4390 *pbitregion_end = bitregion_end;
4391 return base_addr;
4392 }
4393
4394 /* Return true if STMT is a load that can be used for store merging.
4395 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
4396 BITREGION_END are properties of the corresponding store. */
4397
4398 static bool
4399 handled_load (gimple *stmt, store_operand_info *op,
4400 poly_uint64 bitsize, poly_uint64 bitpos,
4401 poly_uint64 bitregion_start, poly_uint64 bitregion_end)
4402 {
4403 if (!is_gimple_assign (stmt))
4404 return false;
4405 if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR)
4406 {
4407 tree rhs1 = gimple_assign_rhs1 (stmt);
4408 if (TREE_CODE (rhs1) == SSA_NAME
4409 && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos,
4410 bitregion_start, bitregion_end))
4411 {
4412 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
4413 been optimized earlier, but if allowed here, would confuse the
4414 multiple uses counting. */
4415 if (op->bit_not_p)
4416 return false;
4417 op->bit_not_p = !op->bit_not_p;
4418 return true;
4419 }
4420 return false;
4421 }
4422 if (gimple_vuse (stmt)
4423 && gimple_assign_load_p (stmt)
4424 && !stmt_can_throw_internal (cfun, stmt)
4425 && !gimple_has_volatile_ops (stmt))
4426 {
4427 tree mem = gimple_assign_rhs1 (stmt);
4428 op->base_addr
4429 = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos,
4430 &op->bitregion_start,
4431 &op->bitregion_end);
4432 if (op->base_addr != NULL_TREE
4433 && known_eq (op->bitsize, bitsize)
4434 && multiple_p (op->bitpos - bitpos, BITS_PER_UNIT)
4435 && known_ge (op->bitpos - op->bitregion_start,
4436 bitpos - bitregion_start)
4437 && known_ge (op->bitregion_end - op->bitpos,
4438 bitregion_end - bitpos))
4439 {
4440 op->stmt = stmt;
4441 op->val = mem;
4442 op->bit_not_p = false;
4443 return true;
4444 }
4445 }
4446 return false;
4447 }
4448
4449 /* Record the store STMT for store merging optimization if it can be
4450 optimized. */
4451
4452 void
4453 pass_store_merging::process_store (gimple *stmt)
4454 {
4455 tree lhs = gimple_assign_lhs (stmt);
4456 tree rhs = gimple_assign_rhs1 (stmt);
4457 poly_uint64 bitsize, bitpos;
4458 poly_uint64 bitregion_start, bitregion_end;
4459 tree base_addr
4460 = mem_valid_for_store_merging (lhs, &bitsize, &bitpos,
4461 &bitregion_start, &bitregion_end);
4462 if (known_eq (bitsize, 0U))
4463 return;
4464
4465 bool invalid = (base_addr == NULL_TREE
4466 || (maybe_gt (bitsize,
4467 (unsigned int) MAX_BITSIZE_MODE_ANY_INT)
4468 && TREE_CODE (rhs) != INTEGER_CST
4469 && (TREE_CODE (rhs) != CONSTRUCTOR
4470 || CONSTRUCTOR_NELTS (rhs) != 0)));
4471 enum tree_code rhs_code = ERROR_MARK;
4472 bool bit_not_p = false;
4473 struct symbolic_number n;
4474 gimple *ins_stmt = NULL;
4475 store_operand_info ops[2];
4476 if (invalid)
4477 ;
4478 else if (rhs_valid_for_store_merging_p (rhs))
4479 {
4480 rhs_code = INTEGER_CST;
4481 ops[0].val = rhs;
4482 }
4483 else if (TREE_CODE (rhs) != SSA_NAME)
4484 invalid = true;
4485 else
4486 {
4487 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2;
4488 if (!is_gimple_assign (def_stmt))
4489 invalid = true;
4490 else if (handled_load (def_stmt, &ops[0], bitsize, bitpos,
4491 bitregion_start, bitregion_end))
4492 rhs_code = MEM_REF;
4493 else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR)
4494 {
4495 tree rhs1 = gimple_assign_rhs1 (def_stmt);
4496 if (TREE_CODE (rhs1) == SSA_NAME
4497 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1)))
4498 {
4499 bit_not_p = true;
4500 def_stmt = SSA_NAME_DEF_STMT (rhs1);
4501 }
4502 }
4503
4504 if (rhs_code == ERROR_MARK && !invalid)
4505 switch ((rhs_code = gimple_assign_rhs_code (def_stmt)))
4506 {
4507 case BIT_AND_EXPR:
4508 case BIT_IOR_EXPR:
4509 case BIT_XOR_EXPR:
4510 tree rhs1, rhs2;
4511 rhs1 = gimple_assign_rhs1 (def_stmt);
4512 rhs2 = gimple_assign_rhs2 (def_stmt);
4513 invalid = true;
4514 if (TREE_CODE (rhs1) != SSA_NAME)
4515 break;
4516 def_stmt1 = SSA_NAME_DEF_STMT (rhs1);
4517 if (!is_gimple_assign (def_stmt1)
4518 || !handled_load (def_stmt1, &ops[0], bitsize, bitpos,
4519 bitregion_start, bitregion_end))
4520 break;
4521 if (rhs_valid_for_store_merging_p (rhs2))
4522 ops[1].val = rhs2;
4523 else if (TREE_CODE (rhs2) != SSA_NAME)
4524 break;
4525 else
4526 {
4527 def_stmt2 = SSA_NAME_DEF_STMT (rhs2);
4528 if (!is_gimple_assign (def_stmt2))
4529 break;
4530 else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos,
4531 bitregion_start, bitregion_end))
4532 break;
4533 }
4534 invalid = false;
4535 break;
4536 default:
4537 invalid = true;
4538 break;
4539 }
4540
4541 unsigned HOST_WIDE_INT const_bitsize;
4542 if (bitsize.is_constant (&const_bitsize)
4543 && (const_bitsize % BITS_PER_UNIT) == 0
4544 && const_bitsize <= 64
4545 && multiple_p (bitpos, BITS_PER_UNIT))
4546 {
4547 ins_stmt = find_bswap_or_nop_1 (def_stmt, &n, 12);
4548 if (ins_stmt)
4549 {
4550 uint64_t nn = n.n;
4551 for (unsigned HOST_WIDE_INT i = 0;
4552 i < const_bitsize;
4553 i += BITS_PER_UNIT, nn >>= BITS_PER_MARKER)
4554 if ((nn & MARKER_MASK) == 0
4555 || (nn & MARKER_MASK) == MARKER_BYTE_UNKNOWN)
4556 {
4557 ins_stmt = NULL;
4558 break;
4559 }
4560 if (ins_stmt)
4561 {
4562 if (invalid)
4563 {
4564 rhs_code = LROTATE_EXPR;
4565 ops[0].base_addr = NULL_TREE;
4566 ops[1].base_addr = NULL_TREE;
4567 }
4568 invalid = false;
4569 }
4570 }
4571 }
4572
4573 if (invalid
4574 && bitsize.is_constant (&const_bitsize)
4575 && ((const_bitsize % BITS_PER_UNIT) != 0
4576 || !multiple_p (bitpos, BITS_PER_UNIT))
4577 && const_bitsize <= 64)
4578 {
4579 /* Bypass a conversion to the bit-field type. */
4580 if (!bit_not_p
4581 && is_gimple_assign (def_stmt)
4582 && CONVERT_EXPR_CODE_P (rhs_code))
4583 {
4584 tree rhs1 = gimple_assign_rhs1 (def_stmt);
4585 if (TREE_CODE (rhs1) == SSA_NAME
4586 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
4587 rhs = rhs1;
4588 }
4589 rhs_code = BIT_INSERT_EXPR;
4590 bit_not_p = false;
4591 ops[0].val = rhs;
4592 ops[0].base_addr = NULL_TREE;
4593 ops[1].base_addr = NULL_TREE;
4594 invalid = false;
4595 }
4596 }
4597
4598 unsigned HOST_WIDE_INT const_bitsize, const_bitpos;
4599 unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end;
4600 if (invalid
4601 || !bitsize.is_constant (&const_bitsize)
4602 || !bitpos.is_constant (&const_bitpos)
4603 || !bitregion_start.is_constant (&const_bitregion_start)
4604 || !bitregion_end.is_constant (&const_bitregion_end))
4605 {
4606 terminate_all_aliasing_chains (NULL, stmt);
4607 return;
4608 }
4609
4610 if (!ins_stmt)
4611 memset (&n, 0, sizeof (n));
4612
4613 class imm_store_chain_info **chain_info = NULL;
4614 if (base_addr)
4615 chain_info = m_stores.get (base_addr);
4616
4617 store_immediate_info *info;
4618 if (chain_info)
4619 {
4620 unsigned int ord = (*chain_info)->m_store_info.length ();
4621 info = new store_immediate_info (const_bitsize, const_bitpos,
4622 const_bitregion_start,
4623 const_bitregion_end,
4624 stmt, ord, rhs_code, n, ins_stmt,
4625 bit_not_p, ops[0], ops[1]);
4626 if (dump_file && (dump_flags & TDF_DETAILS))
4627 {
4628 fprintf (dump_file, "Recording immediate store from stmt:\n");
4629 print_gimple_stmt (dump_file, stmt, 0);
4630 }
4631 (*chain_info)->m_store_info.safe_push (info);
4632 terminate_all_aliasing_chains (chain_info, stmt);
4633 /* If we reach the limit of stores to merge in a chain terminate and
4634 process the chain now. */
4635 if ((*chain_info)->m_store_info.length ()
4636 == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE))
4637 {
4638 if (dump_file && (dump_flags & TDF_DETAILS))
4639 fprintf (dump_file,
4640 "Reached maximum number of statements to merge:\n");
4641 terminate_and_release_chain (*chain_info);
4642 }
4643 return;
4644 }
4645
4646 /* Store aliases any existing chain? */
4647 terminate_all_aliasing_chains (NULL, stmt);
4648 /* Start a new chain. */
4649 class imm_store_chain_info *new_chain
4650 = new imm_store_chain_info (m_stores_head, base_addr);
4651 info = new store_immediate_info (const_bitsize, const_bitpos,
4652 const_bitregion_start,
4653 const_bitregion_end,
4654 stmt, 0, rhs_code, n, ins_stmt,
4655 bit_not_p, ops[0], ops[1]);
4656 new_chain->m_store_info.safe_push (info);
4657 m_stores.put (base_addr, new_chain);
4658 if (dump_file && (dump_flags & TDF_DETAILS))
4659 {
4660 fprintf (dump_file, "Starting new chain with statement:\n");
4661 print_gimple_stmt (dump_file, stmt, 0);
4662 fprintf (dump_file, "The base object is:\n");
4663 print_generic_expr (dump_file, base_addr);
4664 fprintf (dump_file, "\n");
4665 }
4666 }
4667
4668 /* Entry point for the pass. Go over each basic block recording chains of
4669 immediate stores. Upon encountering a terminating statement (as defined
4670 by stmt_terminates_chain_p) process the recorded stores and emit the widened
4671 variants. */
4672
4673 unsigned int
4674 pass_store_merging::execute (function *fun)
4675 {
4676 basic_block bb;
4677 hash_set<gimple *> orig_stmts;
4678
4679 calculate_dominance_info (CDI_DOMINATORS);
4680
4681 FOR_EACH_BB_FN (bb, fun)
4682 {
4683 gimple_stmt_iterator gsi;
4684 unsigned HOST_WIDE_INT num_statements = 0;
4685 /* Record the original statements so that we can keep track of
4686 statements emitted in this pass and not re-process new
4687 statements. */
4688 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4689 {
4690 if (is_gimple_debug (gsi_stmt (gsi)))
4691 continue;
4692
4693 if (++num_statements >= 2)
4694 break;
4695 }
4696
4697 if (num_statements < 2)
4698 continue;
4699
4700 if (dump_file && (dump_flags & TDF_DETAILS))
4701 fprintf (dump_file, "Processing basic block <%d>:\n", bb->index);
4702
4703 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4704 {
4705 gimple *stmt = gsi_stmt (gsi);
4706
4707 if (is_gimple_debug (stmt))
4708 continue;
4709
4710 if (gimple_has_volatile_ops (stmt))
4711 {
4712 /* Terminate all chains. */
4713 if (dump_file && (dump_flags & TDF_DETAILS))
4714 fprintf (dump_file, "Volatile access terminates "
4715 "all chains\n");
4716 terminate_and_process_all_chains ();
4717 continue;
4718 }
4719
4720 if (gimple_assign_single_p (stmt) && gimple_vdef (stmt)
4721 && !stmt_can_throw_internal (cfun, stmt)
4722 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt)))
4723 process_store (stmt);
4724 else
4725 terminate_all_aliasing_chains (NULL, stmt);
4726 }
4727 terminate_and_process_all_chains ();
4728 }
4729 return 0;
4730 }
4731
4732 } // anon namespace
4733
4734 /* Construct and return a store merging pass object. */
4735
4736 gimple_opt_pass *
4737 make_pass_store_merging (gcc::context *ctxt)
4738 {
4739 return new pass_store_merging (ctxt);
4740 }
4741
4742 #if CHECKING_P
4743
4744 namespace selftest {
4745
4746 /* Selftests for store merging helpers. */
4747
4748 /* Assert that all elements of the byte arrays X and Y, both of length N
4749 are equal. */
4750
4751 static void
4752 verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n)
4753 {
4754 for (unsigned int i = 0; i < n; i++)
4755 {
4756 if (x[i] != y[i])
4757 {
4758 fprintf (stderr, "Arrays do not match. X:\n");
4759 dump_char_array (stderr, x, n);
4760 fprintf (stderr, "Y:\n");
4761 dump_char_array (stderr, y, n);
4762 }
4763 ASSERT_EQ (x[i], y[i]);
4764 }
4765 }
4766
4767 /* Test shift_bytes_in_array and that it carries bits across between
4768 bytes correctly. */
4769
4770 static void
4771 verify_shift_bytes_in_array (void)
4772 {
4773 /* byte 1 | byte 0
4774 00011111 | 11100000. */
4775 unsigned char orig[2] = { 0xe0, 0x1f };
4776 unsigned char in[2];
4777 memcpy (in, orig, sizeof orig);
4778
4779 unsigned char expected[2] = { 0x80, 0x7f };
4780 shift_bytes_in_array (in, sizeof (in), 2);
4781 verify_array_eq (in, expected, sizeof (in));
4782
4783 memcpy (in, orig, sizeof orig);
4784 memcpy (expected, orig, sizeof orig);
4785 /* Check that shifting by zero doesn't change anything. */
4786 shift_bytes_in_array (in, sizeof (in), 0);
4787 verify_array_eq (in, expected, sizeof (in));
4788
4789 }
4790
4791 /* Test shift_bytes_in_array_right and that it carries bits across between
4792 bytes correctly. */
4793
4794 static void
4795 verify_shift_bytes_in_array_right (void)
4796 {
4797 /* byte 1 | byte 0
4798 00011111 | 11100000. */
4799 unsigned char orig[2] = { 0x1f, 0xe0};
4800 unsigned char in[2];
4801 memcpy (in, orig, sizeof orig);
4802 unsigned char expected[2] = { 0x07, 0xf8};
4803 shift_bytes_in_array_right (in, sizeof (in), 2);
4804 verify_array_eq (in, expected, sizeof (in));
4805
4806 memcpy (in, orig, sizeof orig);
4807 memcpy (expected, orig, sizeof orig);
4808 /* Check that shifting by zero doesn't change anything. */
4809 shift_bytes_in_array_right (in, sizeof (in), 0);
4810 verify_array_eq (in, expected, sizeof (in));
4811 }
4812
4813 /* Test clear_bit_region that it clears exactly the bits asked and
4814 nothing more. */
4815
4816 static void
4817 verify_clear_bit_region (void)
4818 {
4819 /* Start with all bits set and test clearing various patterns in them. */
4820 unsigned char orig[3] = { 0xff, 0xff, 0xff};
4821 unsigned char in[3];
4822 unsigned char expected[3];
4823 memcpy (in, orig, sizeof in);
4824
4825 /* Check zeroing out all the bits. */
4826 clear_bit_region (in, 0, 3 * BITS_PER_UNIT);
4827 expected[0] = expected[1] = expected[2] = 0;
4828 verify_array_eq (in, expected, sizeof in);
4829
4830 memcpy (in, orig, sizeof in);
4831 /* Leave the first and last bits intact. */
4832 clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2);
4833 expected[0] = 0x1;
4834 expected[1] = 0;
4835 expected[2] = 0x80;
4836 verify_array_eq (in, expected, sizeof in);
4837 }
4838
4839 /* Test verify_clear_bit_region_be that it clears exactly the bits asked and
4840 nothing more. */
4841
4842 static void
4843 verify_clear_bit_region_be (void)
4844 {
4845 /* Start with all bits set and test clearing various patterns in them. */
4846 unsigned char orig[3] = { 0xff, 0xff, 0xff};
4847 unsigned char in[3];
4848 unsigned char expected[3];
4849 memcpy (in, orig, sizeof in);
4850
4851 /* Check zeroing out all the bits. */
4852 clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT);
4853 expected[0] = expected[1] = expected[2] = 0;
4854 verify_array_eq (in, expected, sizeof in);
4855
4856 memcpy (in, orig, sizeof in);
4857 /* Leave the first and last bits intact. */
4858 clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2);
4859 expected[0] = 0x80;
4860 expected[1] = 0;
4861 expected[2] = 0x1;
4862 verify_array_eq (in, expected, sizeof in);
4863 }
4864
4865
4866 /* Run all of the selftests within this file. */
4867
4868 void
4869 store_merging_c_tests (void)
4870 {
4871 verify_shift_bytes_in_array ();
4872 verify_shift_bytes_in_array_right ();
4873 verify_clear_bit_region ();
4874 verify_clear_bit_region_be ();
4875 }
4876
4877 } // namespace selftest
4878 #endif /* CHECKING_P. */