1 /* GIMPLE store merging and byte swapping passes.
2 Copyright (C) 2009-2020 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* The purpose of the store merging pass is to combine multiple memory stores
22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
25 For example, if we have a sequence peforming four byte stores to
26 consecutive memory locations:
31 we can transform this into a single 4-byte store if the target supports it:
32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
56 The algorithm is applied to each basic block in three phases:
58 1) Scan through the basic block and record assignments to destinations
59 that can be expressed as a store to memory of a certain size at a certain
60 bit offset from base expressions we can handle. For bit-fields we also
61 record the surrounding bit region, i.e. bits that could be stored in
62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
64 terminate such chains when appropriate (for example when the stored
65 values get used subsequently).
66 These stores can be a result of structure element initializers, array stores
67 etc. A store_immediate_info object is recorded for every such store.
68 Record as many such assignments to a single base as possible until a
69 statement that interferes with the store sequence is encountered.
70 Each store has up to 2 operands, which can be a either constant, a memory
71 load or an SSA name, from which the value to be stored can be computed.
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
76 store_immediate_info objects) and coalesce contiguous stores into
77 merged_store_group objects. For bit-field stores, we don't need to
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
84 For example, given the stores:
91 This phase would produce two merged_store_group objects, one recording the
92 two bytes stored in the memory region [p : p + 1] and another
93 recording the four bytes stored in the memory region [p + 3 : p + 6].
95 3) The merged_store_group objects produced in phase 2) are processed
96 to generate the sequence of wider stores that set the contiguous memory
97 regions to the sequence of bytes that correspond to it. This may emit
98 multiple stores per store group to handle contiguous stores that are not
99 of a size that is a power of 2. For example it can try to emit a 40-bit
100 store as a 32-bit store followed by an 8-bit store.
101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
104 Note on endianness and example:
105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
111 The memory layout for little-endian (LE) and big-endian (BE) must be:
121 To merge these into a single 48-bit merged value 'val' in phase 2)
122 on little-endian we insert stores to higher (consecutive) bitpositions
123 into the most significant bits of the merged value.
124 The final merged value would be: 0xcdab56781234
126 For big-endian we insert stores to higher bitpositions into the least
127 significant bits of the merged value.
128 The final merged value would be: 0x12345678abcd
130 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
131 followed by a 16-bit store. Again, we must consider endianness when
132 breaking down the 48-bit value 'val' computed above.
133 For little endian we emit:
134 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
135 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
137 Whereas for big-endian we emit:
138 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
139 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
143 #include "coretypes.h"
147 #include "builtins.h"
148 #include "fold-const.h"
149 #include "tree-pass.h"
151 #include "gimple-pretty-print.h"
153 #include "fold-const.h"
154 #include "print-tree.h"
155 #include "tree-hash-traits.h"
156 #include "gimple-iterator.h"
157 #include "gimplify.h"
158 #include "gimple-fold.h"
159 #include "stor-layout.h"
162 #include "cfgcleanup.h"
163 #include "tree-cfg.h"
167 #include "gimplify-me.h"
169 #include "expr.h" /* For get_bit_range. */
170 #include "optabs-tree.h"
172 #include "selftest.h"
174 /* The maximum size (in bits) of the stores this pass should generate. */
175 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
176 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
178 /* Limit to bound the number of aliasing checks for loads with the same
179 vuse as the corresponding store. */
180 #define MAX_STORE_ALIAS_CHECKS 64
186 /* Number of hand-written 16-bit nop / bswaps found. */
189 /* Number of hand-written 32-bit nop / bswaps found. */
192 /* Number of hand-written 64-bit nop / bswaps found. */
194 } nop_stats
, bswap_stats
;
196 /* A symbolic number structure is used to detect byte permutation and selection
197 patterns of a source. To achieve that, its field N contains an artificial
198 number consisting of BITS_PER_MARKER sized markers tracking where does each
199 byte come from in the source:
201 0 - target byte has the value 0
202 FF - target byte has an unknown value (eg. due to sign extension)
203 1..size - marker value is the byte index in the source (0 for lsb).
205 To detect permutations on memory sources (arrays and structures), a symbolic
206 number is also associated:
207 - a base address BASE_ADDR and an OFFSET giving the address of the source;
208 - a range which gives the difference between the highest and lowest accessed
209 memory location to make such a symbolic number;
210 - the address SRC of the source element of lowest address as a convenience
211 to easily get BASE_ADDR + offset + lowest bytepos;
212 - number of expressions N_OPS bitwise ored together to represent
213 approximate cost of the computation.
215 Note 1: the range is different from size as size reflects the size of the
216 type of the current expression. For instance, for an array char a[],
217 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
218 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
221 Note 2: for non-memory sources, range holds the same value as size.
223 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
225 struct symbolic_number
{
230 poly_int64_pod bytepos
;
234 unsigned HOST_WIDE_INT range
;
238 #define BITS_PER_MARKER 8
239 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
240 #define MARKER_BYTE_UNKNOWN MARKER_MASK
241 #define HEAD_MARKER(n, size) \
242 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
244 /* The number which the find_bswap_or_nop_1 result should match in
245 order to have a nop. The number is masked according to the size of
246 the symbolic number before using it. */
247 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
248 (uint64_t)0x08070605 << 32 | 0x04030201)
250 /* The number which the find_bswap_or_nop_1 result should match in
251 order to have a byte swap. The number is masked according to the
252 size of the symbolic number before using it. */
253 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
254 (uint64_t)0x01020304 << 32 | 0x05060708)
256 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
257 number N. Return false if the requested operation is not permitted
258 on a symbolic number. */
261 do_shift_rotate (enum tree_code code
,
262 struct symbolic_number
*n
,
265 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
266 unsigned head_marker
;
269 || count
>= TYPE_PRECISION (n
->type
)
270 || count
% BITS_PER_UNIT
!= 0)
272 count
= (count
/ BITS_PER_UNIT
) * BITS_PER_MARKER
;
274 /* Zero out the extra bits of N in order to avoid them being shifted
275 into the significant bits. */
276 if (size
< 64 / BITS_PER_MARKER
)
277 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
285 head_marker
= HEAD_MARKER (n
->n
, size
);
287 /* Arithmetic shift of signed type: result is dependent on the value. */
288 if (!TYPE_UNSIGNED (n
->type
) && head_marker
)
289 for (i
= 0; i
< count
/ BITS_PER_MARKER
; i
++)
290 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
291 << ((size
- 1 - i
) * BITS_PER_MARKER
);
294 n
->n
= (n
->n
<< count
) | (n
->n
>> ((size
* BITS_PER_MARKER
) - count
));
297 n
->n
= (n
->n
>> count
) | (n
->n
<< ((size
* BITS_PER_MARKER
) - count
));
302 /* Zero unused bits for size. */
303 if (size
< 64 / BITS_PER_MARKER
)
304 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
308 /* Perform sanity checking for the symbolic number N and the gimple
312 verify_symbolic_number_p (struct symbolic_number
*n
, gimple
*stmt
)
316 lhs_type
= gimple_expr_type (stmt
);
318 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
319 && TREE_CODE (lhs_type
) != ENUMERAL_TYPE
)
322 if (TYPE_PRECISION (lhs_type
) != TYPE_PRECISION (n
->type
))
328 /* Initialize the symbolic number N for the bswap pass from the base element
329 SRC manipulated by the bitwise OR expression. */
332 init_symbolic_number (struct symbolic_number
*n
, tree src
)
336 if (! INTEGRAL_TYPE_P (TREE_TYPE (src
)))
339 n
->base_addr
= n
->offset
= n
->alias_set
= n
->vuse
= NULL_TREE
;
342 /* Set up the symbolic number N by setting each byte to a value between 1 and
343 the byte size of rhs1. The highest order byte is set to n->size and the
344 lowest order byte to 1. */
345 n
->type
= TREE_TYPE (src
);
346 size
= TYPE_PRECISION (n
->type
);
347 if (size
% BITS_PER_UNIT
!= 0)
349 size
/= BITS_PER_UNIT
;
350 if (size
> 64 / BITS_PER_MARKER
)
356 if (size
< 64 / BITS_PER_MARKER
)
357 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
362 /* Check if STMT might be a byte swap or a nop from a memory source and returns
363 the answer. If so, REF is that memory source and the base of the memory area
364 accessed and the offset of the access from that base are recorded in N. */
367 find_bswap_or_nop_load (gimple
*stmt
, tree ref
, struct symbolic_number
*n
)
369 /* Leaf node is an array or component ref. Memorize its base and
370 offset from base to compare to other such leaf node. */
371 poly_int64 bitsize
, bitpos
, bytepos
;
373 int unsignedp
, reversep
, volatilep
;
374 tree offset
, base_addr
;
376 /* Not prepared to handle PDP endian. */
377 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
380 if (!gimple_assign_load_p (stmt
) || gimple_has_volatile_ops (stmt
))
383 base_addr
= get_inner_reference (ref
, &bitsize
, &bitpos
, &offset
, &mode
,
384 &unsignedp
, &reversep
, &volatilep
);
386 if (TREE_CODE (base_addr
) == TARGET_MEM_REF
)
387 /* Do not rewrite TARGET_MEM_REF. */
389 else if (TREE_CODE (base_addr
) == MEM_REF
)
391 poly_offset_int bit_offset
= 0;
392 tree off
= TREE_OPERAND (base_addr
, 1);
394 if (!integer_zerop (off
))
396 poly_offset_int boff
= mem_ref_offset (base_addr
);
397 boff
<<= LOG2_BITS_PER_UNIT
;
401 base_addr
= TREE_OPERAND (base_addr
, 0);
403 /* Avoid returning a negative bitpos as this may wreak havoc later. */
404 if (maybe_lt (bit_offset
, 0))
406 tree byte_offset
= wide_int_to_tree
407 (sizetype
, bits_to_bytes_round_down (bit_offset
));
408 bit_offset
= num_trailing_bits (bit_offset
);
410 offset
= size_binop (PLUS_EXPR
, offset
, byte_offset
);
412 offset
= byte_offset
;
415 bitpos
+= bit_offset
.force_shwi ();
418 base_addr
= build_fold_addr_expr (base_addr
);
420 if (!multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
))
422 if (!multiple_p (bitsize
, BITS_PER_UNIT
))
427 if (!init_symbolic_number (n
, ref
))
429 n
->base_addr
= base_addr
;
431 n
->bytepos
= bytepos
;
432 n
->alias_set
= reference_alias_ptr_type (ref
);
433 n
->vuse
= gimple_vuse (stmt
);
437 /* Compute the symbolic number N representing the result of a bitwise OR on 2
438 symbolic number N1 and N2 whose source statements are respectively
439 SOURCE_STMT1 and SOURCE_STMT2. */
442 perform_symbolic_merge (gimple
*source_stmt1
, struct symbolic_number
*n1
,
443 gimple
*source_stmt2
, struct symbolic_number
*n2
,
444 struct symbolic_number
*n
)
449 struct symbolic_number
*n_start
;
451 tree rhs1
= gimple_assign_rhs1 (source_stmt1
);
452 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
453 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
454 rhs1
= TREE_OPERAND (rhs1
, 0);
455 tree rhs2
= gimple_assign_rhs1 (source_stmt2
);
456 if (TREE_CODE (rhs2
) == BIT_FIELD_REF
457 && TREE_CODE (TREE_OPERAND (rhs2
, 0)) == SSA_NAME
)
458 rhs2
= TREE_OPERAND (rhs2
, 0);
460 /* Sources are different, cancel bswap if they are not memory location with
461 the same base (array, structure, ...). */
465 HOST_WIDE_INT start1
, start2
, start_sub
, end_sub
, end1
, end2
, end
;
466 struct symbolic_number
*toinc_n_ptr
, *n_end
;
467 basic_block bb1
, bb2
;
469 if (!n1
->base_addr
|| !n2
->base_addr
470 || !operand_equal_p (n1
->base_addr
, n2
->base_addr
, 0))
473 if (!n1
->offset
!= !n2
->offset
474 || (n1
->offset
&& !operand_equal_p (n1
->offset
, n2
->offset
, 0)))
478 if (!(n2
->bytepos
- n1
->bytepos
).is_constant (&start2
))
484 start_sub
= start2
- start1
;
489 start_sub
= start1
- start2
;
492 bb1
= gimple_bb (source_stmt1
);
493 bb2
= gimple_bb (source_stmt2
);
494 if (dominated_by_p (CDI_DOMINATORS
, bb1
, bb2
))
495 source_stmt
= source_stmt1
;
497 source_stmt
= source_stmt2
;
499 /* Find the highest address at which a load is performed and
500 compute related info. */
501 end1
= start1
+ (n1
->range
- 1);
502 end2
= start2
+ (n2
->range
- 1);
506 end_sub
= end2
- end1
;
511 end_sub
= end1
- end2
;
513 n_end
= (end2
> end1
) ? n2
: n1
;
515 /* Find symbolic number whose lsb is the most significant. */
516 if (BYTES_BIG_ENDIAN
)
517 toinc_n_ptr
= (n_end
== n1
) ? n2
: n1
;
519 toinc_n_ptr
= (n_start
== n1
) ? n2
: n1
;
521 n
->range
= end
- MIN (start1
, start2
) + 1;
523 /* Check that the range of memory covered can be represented by
524 a symbolic number. */
525 if (n
->range
> 64 / BITS_PER_MARKER
)
528 /* Reinterpret byte marks in symbolic number holding the value of
529 bigger weight according to target endianness. */
530 inc
= BYTES_BIG_ENDIAN
? end_sub
: start_sub
;
531 size
= TYPE_PRECISION (n1
->type
) / BITS_PER_UNIT
;
532 for (i
= 0; i
< size
; i
++, inc
<<= BITS_PER_MARKER
)
535 = (toinc_n_ptr
->n
>> (i
* BITS_PER_MARKER
)) & MARKER_MASK
;
536 if (marker
&& marker
!= MARKER_BYTE_UNKNOWN
)
537 toinc_n_ptr
->n
+= inc
;
542 n
->range
= n1
->range
;
544 source_stmt
= source_stmt1
;
548 || alias_ptr_types_compatible_p (n1
->alias_set
, n2
->alias_set
))
549 n
->alias_set
= n1
->alias_set
;
551 n
->alias_set
= ptr_type_node
;
552 n
->vuse
= n_start
->vuse
;
553 n
->base_addr
= n_start
->base_addr
;
554 n
->offset
= n_start
->offset
;
555 n
->src
= n_start
->src
;
556 n
->bytepos
= n_start
->bytepos
;
557 n
->type
= n_start
->type
;
558 size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
560 for (i
= 0, mask
= MARKER_MASK
; i
< size
; i
++, mask
<<= BITS_PER_MARKER
)
562 uint64_t masked1
, masked2
;
564 masked1
= n1
->n
& mask
;
565 masked2
= n2
->n
& mask
;
566 if (masked1
&& masked2
&& masked1
!= masked2
)
569 n
->n
= n1
->n
| n2
->n
;
570 n
->n_ops
= n1
->n_ops
+ n2
->n_ops
;
575 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
576 the operation given by the rhs of STMT on the result. If the operation
577 could successfully be executed the function returns a gimple stmt whose
578 rhs's first tree is the expression of the source operand and NULL
582 find_bswap_or_nop_1 (gimple
*stmt
, struct symbolic_number
*n
, int limit
)
585 tree rhs1
, rhs2
= NULL
;
586 gimple
*rhs1_stmt
, *rhs2_stmt
, *source_stmt1
;
587 enum gimple_rhs_class rhs_class
;
589 if (!limit
|| !is_gimple_assign (stmt
))
592 rhs1
= gimple_assign_rhs1 (stmt
);
594 if (find_bswap_or_nop_load (stmt
, rhs1
, n
))
597 /* Handle BIT_FIELD_REF. */
598 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
599 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
601 unsigned HOST_WIDE_INT bitsize
= tree_to_uhwi (TREE_OPERAND (rhs1
, 1));
602 unsigned HOST_WIDE_INT bitpos
= tree_to_uhwi (TREE_OPERAND (rhs1
, 2));
603 if (bitpos
% BITS_PER_UNIT
== 0
604 && bitsize
% BITS_PER_UNIT
== 0
605 && init_symbolic_number (n
, TREE_OPERAND (rhs1
, 0)))
607 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
608 if (BYTES_BIG_ENDIAN
)
609 bitpos
= TYPE_PRECISION (n
->type
) - bitpos
- bitsize
;
612 if (!do_shift_rotate (RSHIFT_EXPR
, n
, bitpos
))
617 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
618 for (unsigned i
= 0; i
< bitsize
/ BITS_PER_UNIT
;
619 i
++, tmp
<<= BITS_PER_UNIT
)
620 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
624 n
->type
= TREE_TYPE (rhs1
);
626 n
->range
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
628 return verify_symbolic_number_p (n
, stmt
) ? stmt
: NULL
;
634 if (TREE_CODE (rhs1
) != SSA_NAME
)
637 code
= gimple_assign_rhs_code (stmt
);
638 rhs_class
= gimple_assign_rhs_class (stmt
);
639 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
641 if (rhs_class
== GIMPLE_BINARY_RHS
)
642 rhs2
= gimple_assign_rhs2 (stmt
);
644 /* Handle unary rhs and binary rhs with integer constants as second
647 if (rhs_class
== GIMPLE_UNARY_RHS
648 || (rhs_class
== GIMPLE_BINARY_RHS
649 && TREE_CODE (rhs2
) == INTEGER_CST
))
651 if (code
!= BIT_AND_EXPR
652 && code
!= LSHIFT_EXPR
653 && code
!= RSHIFT_EXPR
654 && code
!= LROTATE_EXPR
655 && code
!= RROTATE_EXPR
656 && !CONVERT_EXPR_CODE_P (code
))
659 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, n
, limit
- 1);
661 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
662 we have to initialize the symbolic number. */
665 if (gimple_assign_load_p (stmt
)
666 || !init_symbolic_number (n
, rhs1
))
675 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
676 uint64_t val
= int_cst_value (rhs2
), mask
= 0;
677 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
679 /* Only constants masking full bytes are allowed. */
680 for (i
= 0; i
< size
; i
++, tmp
<<= BITS_PER_UNIT
)
681 if ((val
& tmp
) != 0 && (val
& tmp
) != tmp
)
684 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
693 if (!do_shift_rotate (code
, n
, (int) TREE_INT_CST_LOW (rhs2
)))
698 int i
, type_size
, old_type_size
;
701 type
= gimple_expr_type (stmt
);
702 type_size
= TYPE_PRECISION (type
);
703 if (type_size
% BITS_PER_UNIT
!= 0)
705 type_size
/= BITS_PER_UNIT
;
706 if (type_size
> 64 / BITS_PER_MARKER
)
709 /* Sign extension: result is dependent on the value. */
710 old_type_size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
711 if (!TYPE_UNSIGNED (n
->type
) && type_size
> old_type_size
712 && HEAD_MARKER (n
->n
, old_type_size
))
713 for (i
= 0; i
< type_size
- old_type_size
; i
++)
714 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
715 << ((type_size
- 1 - i
) * BITS_PER_MARKER
);
717 if (type_size
< 64 / BITS_PER_MARKER
)
719 /* If STMT casts to a smaller type mask out the bits not
720 belonging to the target type. */
721 n
->n
&= ((uint64_t) 1 << (type_size
* BITS_PER_MARKER
)) - 1;
725 n
->range
= type_size
;
731 return verify_symbolic_number_p (n
, stmt
) ? source_stmt1
: NULL
;
734 /* Handle binary rhs. */
736 if (rhs_class
== GIMPLE_BINARY_RHS
)
738 struct symbolic_number n1
, n2
;
739 gimple
*source_stmt
, *source_stmt2
;
741 if (code
!= BIT_IOR_EXPR
)
744 if (TREE_CODE (rhs2
) != SSA_NAME
)
747 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
752 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, &n1
, limit
- 1);
757 source_stmt2
= find_bswap_or_nop_1 (rhs2_stmt
, &n2
, limit
- 1);
762 if (TYPE_PRECISION (n1
.type
) != TYPE_PRECISION (n2
.type
))
765 if (n1
.vuse
!= n2
.vuse
)
769 = perform_symbolic_merge (source_stmt1
, &n1
, source_stmt2
, &n2
, n
);
774 if (!verify_symbolic_number_p (n
, stmt
))
786 /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
787 *CMPXCHG, *CMPNOP and adjust *N. */
790 find_bswap_or_nop_finalize (struct symbolic_number
*n
, uint64_t *cmpxchg
,
796 /* The number which the find_bswap_or_nop_1 result should match in order
797 to have a full byte swap. The number is shifted to the right
798 according to the size of the symbolic number before using it. */
802 /* Find real size of result (highest non-zero byte). */
804 for (tmpn
= n
->n
, rsize
= 0; tmpn
; tmpn
>>= BITS_PER_MARKER
, rsize
++);
808 /* Zero out the bits corresponding to untouched bytes in original gimple
810 if (n
->range
< (int) sizeof (int64_t))
812 mask
= ((uint64_t) 1 << (n
->range
* BITS_PER_MARKER
)) - 1;
813 *cmpxchg
>>= (64 / BITS_PER_MARKER
- n
->range
) * BITS_PER_MARKER
;
817 /* Zero out the bits corresponding to unused bytes in the result of the
818 gimple expression. */
819 if (rsize
< n
->range
)
821 if (BYTES_BIG_ENDIAN
)
823 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
825 *cmpnop
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
829 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
830 *cmpxchg
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
836 n
->range
*= BITS_PER_UNIT
;
839 /* Check if STMT completes a bswap implementation or a read in a given
840 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
841 accordingly. It also sets N to represent the kind of operations
842 performed: size of the resulting expression and whether it works on
843 a memory source, and if so alias-set and vuse. At last, the
844 function returns a stmt whose rhs's first tree is the source
848 find_bswap_or_nop (gimple
*stmt
, struct symbolic_number
*n
, bool *bswap
)
850 /* The last parameter determines the depth search limit. It usually
851 correlates directly to the number n of bytes to be touched. We
852 increase that number by 2 * (log2(n) + 1) here in order to also
853 cover signed -> unsigned conversions of the src operand as can be seen
854 in libgcc, and for initial shift/and operation of the src operand. */
855 int limit
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt
)));
856 limit
+= 2 * (1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT
) limit
));
857 gimple
*ins_stmt
= find_bswap_or_nop_1 (stmt
, n
, limit
);
862 uint64_t cmpxchg
, cmpnop
;
863 find_bswap_or_nop_finalize (n
, &cmpxchg
, &cmpnop
);
865 /* A complete byte swap should make the symbolic number to start with
866 the largest digit in the highest order byte. Unchanged symbolic
867 number indicates a read with same endianness as target architecture. */
870 else if (n
->n
== cmpxchg
)
875 /* Useless bit manipulation performed by code. */
876 if (!n
->base_addr
&& n
->n
== cmpnop
&& n
->n_ops
== 1)
882 const pass_data pass_data_optimize_bswap
=
884 GIMPLE_PASS
, /* type */
886 OPTGROUP_NONE
, /* optinfo_flags */
888 PROP_ssa
, /* properties_required */
889 0, /* properties_provided */
890 0, /* properties_destroyed */
891 0, /* todo_flags_start */
892 0, /* todo_flags_finish */
895 class pass_optimize_bswap
: public gimple_opt_pass
898 pass_optimize_bswap (gcc::context
*ctxt
)
899 : gimple_opt_pass (pass_data_optimize_bswap
, ctxt
)
902 /* opt_pass methods: */
903 virtual bool gate (function
*)
905 return flag_expensive_optimizations
&& optimize
&& BITS_PER_UNIT
== 8;
908 virtual unsigned int execute (function
*);
910 }; // class pass_optimize_bswap
912 /* Perform the bswap optimization: replace the expression computed in the rhs
913 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
914 bswap, load or load + bswap expression.
915 Which of these alternatives replace the rhs is given by N->base_addr (non
916 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
917 load to perform are also given in N while the builtin bswap invoke is given
918 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
919 load statements involved to construct the rhs in gsi_stmt (GSI) and
920 N->range gives the size of the rhs expression for maintaining some
923 Note that if the replacement involve a load and if gsi_stmt (GSI) is
924 non-NULL, that stmt is moved just after INS_STMT to do the load with the
925 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
928 bswap_replace (gimple_stmt_iterator gsi
, gimple
*ins_stmt
, tree fndecl
,
929 tree bswap_type
, tree load_type
, struct symbolic_number
*n
,
932 tree src
, tmp
, tgt
= NULL_TREE
;
935 gimple
*cur_stmt
= gsi_stmt (gsi
);
938 tgt
= gimple_assign_lhs (cur_stmt
);
940 /* Need to load the value from memory first. */
943 gimple_stmt_iterator gsi_ins
= gsi
;
945 gsi_ins
= gsi_for_stmt (ins_stmt
);
946 tree addr_expr
, addr_tmp
, val_expr
, val_tmp
;
947 tree load_offset_ptr
, aligned_load_type
;
949 unsigned align
= get_object_alignment (src
);
950 poly_int64 load_offset
= 0;
954 basic_block ins_bb
= gimple_bb (ins_stmt
);
955 basic_block cur_bb
= gimple_bb (cur_stmt
);
956 if (!dominated_by_p (CDI_DOMINATORS
, cur_bb
, ins_bb
))
959 /* Move cur_stmt just before one of the load of the original
960 to ensure it has the same VUSE. See PR61517 for what could
962 if (gimple_bb (cur_stmt
) != gimple_bb (ins_stmt
))
963 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt
));
964 gsi_move_before (&gsi
, &gsi_ins
);
965 gsi
= gsi_for_stmt (cur_stmt
);
970 /* Compute address to load from and cast according to the size
972 addr_expr
= build_fold_addr_expr (src
);
973 if (is_gimple_mem_ref_addr (addr_expr
))
974 addr_tmp
= unshare_expr (addr_expr
);
977 addr_tmp
= unshare_expr (n
->base_addr
);
978 if (!is_gimple_mem_ref_addr (addr_tmp
))
979 addr_tmp
= force_gimple_operand_gsi_1 (&gsi
, addr_tmp
,
980 is_gimple_mem_ref_addr
,
983 load_offset
= n
->bytepos
;
987 = force_gimple_operand_gsi (&gsi
, unshare_expr (n
->offset
),
988 true, NULL_TREE
, true,
991 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp
)),
992 POINTER_PLUS_EXPR
, addr_tmp
, off
);
993 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
994 addr_tmp
= gimple_assign_lhs (stmt
);
998 /* Perform the load. */
999 aligned_load_type
= load_type
;
1000 if (align
< TYPE_ALIGN (load_type
))
1001 aligned_load_type
= build_aligned_type (load_type
, align
);
1002 load_offset_ptr
= build_int_cst (n
->alias_set
, load_offset
);
1003 val_expr
= fold_build2 (MEM_REF
, aligned_load_type
, addr_tmp
,
1009 nop_stats
.found_16bit
++;
1010 else if (n
->range
== 32)
1011 nop_stats
.found_32bit
++;
1014 gcc_assert (n
->range
== 64);
1015 nop_stats
.found_64bit
++;
1018 /* Convert the result of load if necessary. */
1019 if (tgt
&& !useless_type_conversion_p (TREE_TYPE (tgt
), load_type
))
1021 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
,
1023 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
1024 gimple_set_vuse (load_stmt
, n
->vuse
);
1025 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
1026 gimple_assign_set_rhs_with_ops (&gsi
, NOP_EXPR
, val_tmp
);
1027 update_stmt (cur_stmt
);
1031 gimple_assign_set_rhs_with_ops (&gsi
, MEM_REF
, val_expr
);
1032 gimple_set_vuse (cur_stmt
, n
->vuse
);
1033 update_stmt (cur_stmt
);
1037 tgt
= make_ssa_name (load_type
);
1038 cur_stmt
= gimple_build_assign (tgt
, MEM_REF
, val_expr
);
1039 gimple_set_vuse (cur_stmt
, n
->vuse
);
1040 gsi_insert_before (&gsi
, cur_stmt
, GSI_SAME_STMT
);
1046 "%d bit load in target endianness found at: ",
1048 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1054 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
, "load_dst");
1055 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
1056 gimple_set_vuse (load_stmt
, n
->vuse
);
1057 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
1064 if (tgt
&& !useless_type_conversion_p (TREE_TYPE (tgt
), TREE_TYPE (src
)))
1066 if (!is_gimple_val (src
))
1068 g
= gimple_build_assign (tgt
, NOP_EXPR
, src
);
1071 g
= gimple_build_assign (tgt
, src
);
1075 nop_stats
.found_16bit
++;
1076 else if (n
->range
== 32)
1077 nop_stats
.found_32bit
++;
1080 gcc_assert (n
->range
== 64);
1081 nop_stats
.found_64bit
++;
1086 "%d bit reshuffle in target endianness found at: ",
1089 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1092 print_generic_expr (dump_file
, tgt
, TDF_NONE
);
1093 fprintf (dump_file
, "\n");
1097 gsi_replace (&gsi
, g
, true);
1100 else if (TREE_CODE (src
) == BIT_FIELD_REF
)
1101 src
= TREE_OPERAND (src
, 0);
1104 bswap_stats
.found_16bit
++;
1105 else if (n
->range
== 32)
1106 bswap_stats
.found_32bit
++;
1109 gcc_assert (n
->range
== 64);
1110 bswap_stats
.found_64bit
++;
1115 /* Convert the src expression if necessary. */
1116 if (!useless_type_conversion_p (TREE_TYPE (tmp
), bswap_type
))
1118 gimple
*convert_stmt
;
1120 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapsrc");
1121 convert_stmt
= gimple_build_assign (tmp
, NOP_EXPR
, src
);
1122 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1125 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1126 are considered as rotation of 2N bit values by N bits is generally not
1127 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1128 gives 0x03040102 while a bswap for that value is 0x04030201. */
1129 if (bswap
&& n
->range
== 16)
1131 tree count
= build_int_cst (NULL
, BITS_PER_UNIT
);
1132 src
= fold_build2 (LROTATE_EXPR
, bswap_type
, tmp
, count
);
1133 bswap_stmt
= gimple_build_assign (NULL
, src
);
1136 bswap_stmt
= gimple_build_call (fndecl
, 1, tmp
);
1138 if (tgt
== NULL_TREE
)
1139 tgt
= make_ssa_name (bswap_type
);
1142 /* Convert the result if necessary. */
1143 if (!useless_type_conversion_p (TREE_TYPE (tgt
), bswap_type
))
1145 gimple
*convert_stmt
;
1147 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapdst");
1148 convert_stmt
= gimple_build_assign (tgt
, NOP_EXPR
, tmp
);
1149 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1152 gimple_set_lhs (bswap_stmt
, tmp
);
1156 fprintf (dump_file
, "%d bit bswap implementation found at: ",
1159 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1162 print_generic_expr (dump_file
, tgt
, TDF_NONE
);
1163 fprintf (dump_file
, "\n");
1169 gsi_insert_after (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
1170 gsi_remove (&gsi
, true);
1173 gsi_insert_before (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
1177 /* Find manual byte swap implementations as well as load in a given
1178 endianness. Byte swaps are turned into a bswap builtin invokation
1179 while endian loads are converted to bswap builtin invokation or
1180 simple load according to the target endianness. */
1183 pass_optimize_bswap::execute (function
*fun
)
1186 bool bswap32_p
, bswap64_p
;
1187 bool changed
= false;
1188 tree bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
1190 bswap32_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
1191 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
1192 bswap64_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
1193 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
1194 || (bswap32_p
&& word_mode
== SImode
)));
1196 /* Determine the argument type of the builtins. The code later on
1197 assumes that the return and argument type are the same. */
1200 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1201 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1206 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1207 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1210 memset (&nop_stats
, 0, sizeof (nop_stats
));
1211 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
1212 calculate_dominance_info (CDI_DOMINATORS
);
1214 FOR_EACH_BB_FN (bb
, fun
)
1216 gimple_stmt_iterator gsi
;
1218 /* We do a reverse scan for bswap patterns to make sure we get the
1219 widest match. As bswap pattern matching doesn't handle previously
1220 inserted smaller bswap replacements as sub-patterns, the wider
1221 variant wouldn't be detected. */
1222 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
);)
1224 gimple
*ins_stmt
, *cur_stmt
= gsi_stmt (gsi
);
1225 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
1226 enum tree_code code
;
1227 struct symbolic_number n
;
1230 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1231 might be moved to a different basic block by bswap_replace and gsi
1232 must not points to it if that's the case. Moving the gsi_prev
1233 there make sure that gsi points to the statement previous to
1234 cur_stmt while still making sure that all statements are
1235 considered in this basic block. */
1238 if (!is_gimple_assign (cur_stmt
))
1241 code
= gimple_assign_rhs_code (cur_stmt
);
1246 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt
))
1247 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt
))
1257 ins_stmt
= find_bswap_or_nop (cur_stmt
, &n
, &bswap
);
1265 /* Already in canonical form, nothing to do. */
1266 if (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
1268 load_type
= bswap_type
= uint16_type_node
;
1271 load_type
= uint32_type_node
;
1274 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1275 bswap_type
= bswap32_type
;
1279 load_type
= uint64_type_node
;
1282 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1283 bswap_type
= bswap64_type
;
1290 if (bswap
&& !fndecl
&& n
.range
!= 16)
1293 if (bswap_replace (gsi_for_stmt (cur_stmt
), ins_stmt
, fndecl
,
1294 bswap_type
, load_type
, &n
, bswap
))
1299 statistics_counter_event (fun
, "16-bit nop implementations found",
1300 nop_stats
.found_16bit
);
1301 statistics_counter_event (fun
, "32-bit nop implementations found",
1302 nop_stats
.found_32bit
);
1303 statistics_counter_event (fun
, "64-bit nop implementations found",
1304 nop_stats
.found_64bit
);
1305 statistics_counter_event (fun
, "16-bit bswap implementations found",
1306 bswap_stats
.found_16bit
);
1307 statistics_counter_event (fun
, "32-bit bswap implementations found",
1308 bswap_stats
.found_32bit
);
1309 statistics_counter_event (fun
, "64-bit bswap implementations found",
1310 bswap_stats
.found_64bit
);
1312 return (changed
? TODO_update_ssa
: 0);
1318 make_pass_optimize_bswap (gcc::context
*ctxt
)
1320 return new pass_optimize_bswap (ctxt
);
1325 /* Struct recording one operand for the store, which is either a constant,
1326 then VAL represents the constant and all the other fields are zero, or
1327 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1328 and the other fields also reflect the memory load, or an SSA name, then
1329 VAL represents the SSA name and all the other fields are zero, */
1331 class store_operand_info
1336 poly_uint64 bitsize
;
1338 poly_uint64 bitregion_start
;
1339 poly_uint64 bitregion_end
;
1342 store_operand_info ();
1345 store_operand_info::store_operand_info ()
1346 : val (NULL_TREE
), base_addr (NULL_TREE
), bitsize (0), bitpos (0),
1347 bitregion_start (0), bitregion_end (0), stmt (NULL
), bit_not_p (false)
1351 /* Struct recording the information about a single store of an immediate
1352 to memory. These are created in the first phase and coalesced into
1353 merged_store_group objects in the second phase. */
1355 class store_immediate_info
1358 unsigned HOST_WIDE_INT bitsize
;
1359 unsigned HOST_WIDE_INT bitpos
;
1360 unsigned HOST_WIDE_INT bitregion_start
;
1361 /* This is one past the last bit of the bit region. */
1362 unsigned HOST_WIDE_INT bitregion_end
;
1365 /* INTEGER_CST for constant stores, MEM_REF for memory copy,
1366 BIT_*_EXPR for logical bitwise operation, BIT_INSERT_EXPR
1368 LROTATE_EXPR if it can be only bswap optimized and
1369 ops are not really meaningful.
1370 NOP_EXPR if bswap optimization detected identity, ops
1371 are not meaningful. */
1372 enum tree_code rhs_code
;
1373 /* Two fields for bswap optimization purposes. */
1374 struct symbolic_number n
;
1376 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
1378 /* True if ops have been swapped and thus ops[1] represents
1379 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1381 /* The index number of the landing pad, or 0 if there is none. */
1383 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1384 just the first one. */
1385 store_operand_info ops
[2];
1386 store_immediate_info (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
1387 unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
1388 gimple
*, unsigned int, enum tree_code
,
1389 struct symbolic_number
&, gimple
*, bool, int,
1390 const store_operand_info
&,
1391 const store_operand_info
&);
1394 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs
,
1395 unsigned HOST_WIDE_INT bp
,
1396 unsigned HOST_WIDE_INT brs
,
1397 unsigned HOST_WIDE_INT bre
,
1400 enum tree_code rhscode
,
1401 struct symbolic_number
&nr
,
1405 const store_operand_info
&op0r
,
1406 const store_operand_info
&op1r
)
1407 : bitsize (bs
), bitpos (bp
), bitregion_start (brs
), bitregion_end (bre
),
1408 stmt (st
), order (ord
), rhs_code (rhscode
), n (nr
),
1409 ins_stmt (ins_stmtp
), bit_not_p (bitnotp
), ops_swapped_p (false),
1411 #if __cplusplus >= 201103L
1412 , ops
{ op0r
, op1r
}
1422 /* Struct representing a group of stores to contiguous memory locations.
1423 These are produced by the second phase (coalescing) and consumed in the
1424 third phase that outputs the widened stores. */
1426 class merged_store_group
1429 unsigned HOST_WIDE_INT start
;
1430 unsigned HOST_WIDE_INT width
;
1431 unsigned HOST_WIDE_INT bitregion_start
;
1432 unsigned HOST_WIDE_INT bitregion_end
;
1433 /* The size of the allocated memory for val and mask. */
1434 unsigned HOST_WIDE_INT buf_size
;
1435 unsigned HOST_WIDE_INT align_base
;
1436 poly_uint64 load_align_base
[2];
1439 unsigned int load_align
[2];
1440 unsigned int first_order
;
1441 unsigned int last_order
;
1443 bool only_constants
;
1444 unsigned int first_nonmergeable_order
;
1447 auto_vec
<store_immediate_info
*> stores
;
1448 /* We record the first and last original statements in the sequence because
1449 we'll need their vuse/vdef and replacement position. It's easier to keep
1450 track of them separately as 'stores' is reordered by apply_stores. */
1454 unsigned char *mask
;
1456 merged_store_group (store_immediate_info
*);
1457 ~merged_store_group ();
1458 bool can_be_merged_into (store_immediate_info
*);
1459 void merge_into (store_immediate_info
*);
1460 void merge_overlapping (store_immediate_info
*);
1461 bool apply_stores ();
1463 void do_merge (store_immediate_info
*);
1466 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1469 dump_char_array (FILE *fd
, unsigned char *ptr
, unsigned int len
)
1474 for (unsigned int i
= 0; i
< len
; i
++)
1475 fprintf (fd
, "%02x ", ptr
[i
]);
1479 /* Clear out LEN bits starting from bit START in the byte array
1480 PTR. This clears the bits to the *right* from START.
1481 START must be within [0, BITS_PER_UNIT) and counts starting from
1482 the least significant bit. */
1485 clear_bit_region_be (unsigned char *ptr
, unsigned int start
,
1490 /* Clear len bits to the right of start. */
1491 else if (len
<= start
+ 1)
1493 unsigned char mask
= (~(~0U << len
));
1494 mask
= mask
<< (start
+ 1U - len
);
1497 else if (start
!= BITS_PER_UNIT
- 1)
1499 clear_bit_region_be (ptr
, start
, (start
% BITS_PER_UNIT
) + 1);
1500 clear_bit_region_be (ptr
+ 1, BITS_PER_UNIT
- 1,
1501 len
- (start
% BITS_PER_UNIT
) - 1);
1503 else if (start
== BITS_PER_UNIT
- 1
1504 && len
> BITS_PER_UNIT
)
1506 unsigned int nbytes
= len
/ BITS_PER_UNIT
;
1507 memset (ptr
, 0, nbytes
);
1508 if (len
% BITS_PER_UNIT
!= 0)
1509 clear_bit_region_be (ptr
+ nbytes
, BITS_PER_UNIT
- 1,
1510 len
% BITS_PER_UNIT
);
1516 /* In the byte array PTR clear the bit region starting at bit
1517 START and is LEN bits wide.
1518 For regions spanning multiple bytes do this recursively until we reach
1519 zero LEN or a region contained within a single byte. */
1522 clear_bit_region (unsigned char *ptr
, unsigned int start
,
1525 /* Degenerate base case. */
1528 else if (start
>= BITS_PER_UNIT
)
1529 clear_bit_region (ptr
+ 1, start
- BITS_PER_UNIT
, len
);
1530 /* Second base case. */
1531 else if ((start
+ len
) <= BITS_PER_UNIT
)
1533 unsigned char mask
= (~0U) << (unsigned char) (BITS_PER_UNIT
- len
);
1534 mask
>>= BITS_PER_UNIT
- (start
+ len
);
1540 /* Clear most significant bits in a byte and proceed with the next byte. */
1541 else if (start
!= 0)
1543 clear_bit_region (ptr
, start
, BITS_PER_UNIT
- start
);
1544 clear_bit_region (ptr
+ 1, 0, len
- (BITS_PER_UNIT
- start
));
1546 /* Whole bytes need to be cleared. */
1547 else if (start
== 0 && len
> BITS_PER_UNIT
)
1549 unsigned int nbytes
= len
/ BITS_PER_UNIT
;
1550 /* We could recurse on each byte but we clear whole bytes, so a simple
1552 memset (ptr
, '\0', nbytes
);
1553 /* Clear the remaining sub-byte region if there is one. */
1554 if (len
% BITS_PER_UNIT
!= 0)
1555 clear_bit_region (ptr
+ nbytes
, 0, len
% BITS_PER_UNIT
);
1561 /* Write BITLEN bits of EXPR to the byte array PTR at
1562 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1563 Return true if the operation succeeded. */
1566 encode_tree_to_bitpos (tree expr
, unsigned char *ptr
, int bitlen
, int bitpos
,
1567 unsigned int total_bytes
)
1569 unsigned int first_byte
= bitpos
/ BITS_PER_UNIT
;
1570 bool sub_byte_op_p
= ((bitlen
% BITS_PER_UNIT
)
1571 || (bitpos
% BITS_PER_UNIT
)
1572 || !int_mode_for_size (bitlen
, 0).exists ());
1574 = (TREE_CODE (expr
) == CONSTRUCTOR
1575 && CONSTRUCTOR_NELTS (expr
) == 0
1576 && TYPE_SIZE_UNIT (TREE_TYPE (expr
))
1577 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (expr
))));
1581 if (first_byte
>= total_bytes
)
1583 total_bytes
-= first_byte
;
1586 unsigned HOST_WIDE_INT rhs_bytes
1587 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
1588 if (rhs_bytes
> total_bytes
)
1590 memset (ptr
+ first_byte
, '\0', rhs_bytes
);
1593 return native_encode_expr (expr
, ptr
+ first_byte
, total_bytes
) != 0;
1597 We are writing a non byte-sized quantity or at a position that is not
1599 |--------|--------|--------| ptr + first_byte
1601 xxx xxxxxxxx xxx< bp>
1604 First native_encode_expr EXPR into a temporary buffer and shift each
1605 byte in the buffer by 'bp' (carrying the bits over as necessary).
1606 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1607 <------bitlen---->< bp>
1608 Then we clear the destination bits:
1609 |---00000|00000000|000-----| ptr + first_byte
1610 <-------bitlen--->< bp>
1612 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1613 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1616 We are writing a non byte-sized quantity or at a position that is not
1618 ptr + first_byte |--------|--------|--------|
1620 <bp >xxx xxxxxxxx xxx
1623 First native_encode_expr EXPR into a temporary buffer and shift each
1624 byte in the buffer to the right by (carrying the bits over as necessary).
1625 We shift by as much as needed to align the most significant bit of EXPR
1627 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1628 <---bitlen----> <bp ><-----bitlen----->
1629 Then we clear the destination bits:
1630 ptr + first_byte |-----000||00000000||00000---|
1631 <bp ><-------bitlen----->
1633 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1634 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1635 The awkwardness comes from the fact that bitpos is counted from the
1636 most significant bit of a byte. */
1638 /* We must be dealing with fixed-size data at this point, since the
1639 total size is also fixed. */
1640 unsigned int byte_size
;
1643 unsigned HOST_WIDE_INT rhs_bytes
1644 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
1645 if (rhs_bytes
> total_bytes
)
1647 byte_size
= rhs_bytes
;
1651 fixed_size_mode mode
1652 = as_a
<fixed_size_mode
> (TYPE_MODE (TREE_TYPE (expr
)));
1653 byte_size
= GET_MODE_SIZE (mode
);
1655 /* Allocate an extra byte so that we have space to shift into. */
1657 unsigned char *tmpbuf
= XALLOCAVEC (unsigned char, byte_size
);
1658 memset (tmpbuf
, '\0', byte_size
);
1659 /* The store detection code should only have allowed constants that are
1660 accepted by native_encode_expr or empty ctors. */
1662 && native_encode_expr (expr
, tmpbuf
, byte_size
- 1) == 0)
1665 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1666 bytes to write. This means it can write more than
1667 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1668 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1669 bitlen and zero out the bits that are not relevant as well (that may
1670 contain a sign bit due to sign-extension). */
1671 unsigned int padding
1672 = byte_size
- ROUND_UP (bitlen
, BITS_PER_UNIT
) / BITS_PER_UNIT
- 1;
1673 /* On big-endian the padding is at the 'front' so just skip the initial
1675 if (BYTES_BIG_ENDIAN
)
1678 byte_size
-= padding
;
1680 if (bitlen
% BITS_PER_UNIT
!= 0)
1682 if (BYTES_BIG_ENDIAN
)
1683 clear_bit_region_be (tmpbuf
, BITS_PER_UNIT
- 1,
1684 BITS_PER_UNIT
- (bitlen
% BITS_PER_UNIT
));
1686 clear_bit_region (tmpbuf
, bitlen
,
1687 byte_size
* BITS_PER_UNIT
- bitlen
);
1689 /* Left shifting relies on the last byte being clear if bitlen is
1690 a multiple of BITS_PER_UNIT, which might not be clear if
1691 there are padding bytes. */
1692 else if (!BYTES_BIG_ENDIAN
)
1693 tmpbuf
[byte_size
- 1] = '\0';
1695 /* Clear the bit region in PTR where the bits from TMPBUF will be
1697 if (BYTES_BIG_ENDIAN
)
1698 clear_bit_region_be (ptr
+ first_byte
,
1699 BITS_PER_UNIT
- 1 - (bitpos
% BITS_PER_UNIT
), bitlen
);
1701 clear_bit_region (ptr
+ first_byte
, bitpos
% BITS_PER_UNIT
, bitlen
);
1704 int bitlen_mod
= bitlen
% BITS_PER_UNIT
;
1705 int bitpos_mod
= bitpos
% BITS_PER_UNIT
;
1707 bool skip_byte
= false;
1708 if (BYTES_BIG_ENDIAN
)
1710 /* BITPOS and BITLEN are exactly aligned and no shifting
1712 if (bitpos_mod
+ bitlen_mod
== BITS_PER_UNIT
1713 || (bitpos_mod
== 0 && bitlen_mod
== 0))
1715 /* |. . . . . . . .|
1717 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1718 of the value until it aligns with 'bp' in the next byte over. */
1719 else if (bitpos_mod
+ bitlen_mod
< BITS_PER_UNIT
)
1721 shift_amnt
= bitlen_mod
+ bitpos_mod
;
1722 skip_byte
= bitlen_mod
!= 0;
1724 /* |. . . . . . . .|
1727 Shift the value right within the same byte so it aligns with 'bp'. */
1729 shift_amnt
= bitlen_mod
+ bitpos_mod
- BITS_PER_UNIT
;
1732 shift_amnt
= bitpos
% BITS_PER_UNIT
;
1734 /* Create the shifted version of EXPR. */
1735 if (!BYTES_BIG_ENDIAN
)
1737 shift_bytes_in_array_left (tmpbuf
, byte_size
, shift_amnt
);
1738 if (shift_amnt
== 0)
1743 gcc_assert (BYTES_BIG_ENDIAN
);
1744 shift_bytes_in_array_right (tmpbuf
, byte_size
, shift_amnt
);
1745 /* If shifting right forced us to move into the next byte skip the now
1754 /* Insert the bits from TMPBUF. */
1755 for (unsigned int i
= 0; i
< byte_size
; i
++)
1756 ptr
[first_byte
+ i
] |= tmpbuf
[i
];
1761 /* Sorting function for store_immediate_info objects.
1762 Sorts them by bitposition. */
1765 sort_by_bitpos (const void *x
, const void *y
)
1767 store_immediate_info
*const *tmp
= (store_immediate_info
* const *) x
;
1768 store_immediate_info
*const *tmp2
= (store_immediate_info
* const *) y
;
1770 if ((*tmp
)->bitpos
< (*tmp2
)->bitpos
)
1772 else if ((*tmp
)->bitpos
> (*tmp2
)->bitpos
)
1775 /* If they are the same let's use the order which is guaranteed to
1777 return (*tmp
)->order
- (*tmp2
)->order
;
1780 /* Sorting function for store_immediate_info objects.
1781 Sorts them by the order field. */
1784 sort_by_order (const void *x
, const void *y
)
1786 store_immediate_info
*const *tmp
= (store_immediate_info
* const *) x
;
1787 store_immediate_info
*const *tmp2
= (store_immediate_info
* const *) y
;
1789 if ((*tmp
)->order
< (*tmp2
)->order
)
1791 else if ((*tmp
)->order
> (*tmp2
)->order
)
1797 /* Initialize a merged_store_group object from a store_immediate_info
1800 merged_store_group::merged_store_group (store_immediate_info
*info
)
1802 start
= info
->bitpos
;
1803 width
= info
->bitsize
;
1804 bitregion_start
= info
->bitregion_start
;
1805 bitregion_end
= info
->bitregion_end
;
1806 /* VAL has memory allocated for it in apply_stores once the group
1807 width has been finalized. */
1810 bit_insertion
= false;
1811 only_constants
= info
->rhs_code
== INTEGER_CST
;
1812 first_nonmergeable_order
= ~0U;
1813 lp_nr
= info
->lp_nr
;
1814 unsigned HOST_WIDE_INT align_bitpos
= 0;
1815 get_object_alignment_1 (gimple_assign_lhs (info
->stmt
),
1816 &align
, &align_bitpos
);
1817 align_base
= start
- align_bitpos
;
1818 for (int i
= 0; i
< 2; ++i
)
1820 store_operand_info
&op
= info
->ops
[i
];
1821 if (op
.base_addr
== NULL_TREE
)
1824 load_align_base
[i
] = 0;
1828 get_object_alignment_1 (op
.val
, &load_align
[i
], &align_bitpos
);
1829 load_align_base
[i
] = op
.bitpos
- align_bitpos
;
1833 stores
.safe_push (info
);
1834 last_stmt
= info
->stmt
;
1835 last_order
= info
->order
;
1836 first_stmt
= last_stmt
;
1837 first_order
= last_order
;
1841 merged_store_group::~merged_store_group ()
1847 /* Return true if the store described by INFO can be merged into the group. */
1850 merged_store_group::can_be_merged_into (store_immediate_info
*info
)
1852 /* Do not merge bswap patterns. */
1853 if (info
->rhs_code
== LROTATE_EXPR
)
1856 if (info
->lp_nr
!= lp_nr
)
1859 /* The canonical case. */
1860 if (info
->rhs_code
== stores
[0]->rhs_code
)
1863 /* BIT_INSERT_EXPR is compatible with INTEGER_CST. */
1864 if (info
->rhs_code
== BIT_INSERT_EXPR
&& stores
[0]->rhs_code
== INTEGER_CST
)
1867 if (stores
[0]->rhs_code
== BIT_INSERT_EXPR
&& info
->rhs_code
== INTEGER_CST
)
1870 /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores, but do it
1871 only for small regions since this can generate a lot of instructions. */
1872 if (info
->rhs_code
== MEM_REF
1873 && (stores
[0]->rhs_code
== INTEGER_CST
1874 || stores
[0]->rhs_code
== BIT_INSERT_EXPR
)
1875 && info
->bitregion_start
== stores
[0]->bitregion_start
1876 && info
->bitregion_end
== stores
[0]->bitregion_end
1877 && info
->bitregion_end
- info
->bitregion_start
<= MAX_FIXED_MODE_SIZE
)
1880 if (stores
[0]->rhs_code
== MEM_REF
1881 && (info
->rhs_code
== INTEGER_CST
1882 || info
->rhs_code
== BIT_INSERT_EXPR
)
1883 && info
->bitregion_start
== stores
[0]->bitregion_start
1884 && info
->bitregion_end
== stores
[0]->bitregion_end
1885 && info
->bitregion_end
- info
->bitregion_start
<= MAX_FIXED_MODE_SIZE
)
1891 /* Helper method for merge_into and merge_overlapping to do
1895 merged_store_group::do_merge (store_immediate_info
*info
)
1897 bitregion_start
= MIN (bitregion_start
, info
->bitregion_start
);
1898 bitregion_end
= MAX (bitregion_end
, info
->bitregion_end
);
1900 unsigned int this_align
;
1901 unsigned HOST_WIDE_INT align_bitpos
= 0;
1902 get_object_alignment_1 (gimple_assign_lhs (info
->stmt
),
1903 &this_align
, &align_bitpos
);
1904 if (this_align
> align
)
1907 align_base
= info
->bitpos
- align_bitpos
;
1909 for (int i
= 0; i
< 2; ++i
)
1911 store_operand_info
&op
= info
->ops
[i
];
1915 get_object_alignment_1 (op
.val
, &this_align
, &align_bitpos
);
1916 if (this_align
> load_align
[i
])
1918 load_align
[i
] = this_align
;
1919 load_align_base
[i
] = op
.bitpos
- align_bitpos
;
1923 gimple
*stmt
= info
->stmt
;
1924 stores
.safe_push (info
);
1925 if (info
->order
> last_order
)
1927 last_order
= info
->order
;
1930 else if (info
->order
< first_order
)
1932 first_order
= info
->order
;
1935 if (info
->rhs_code
!= INTEGER_CST
)
1936 only_constants
= false;
1939 /* Merge a store recorded by INFO into this merged store.
1940 The store is not overlapping with the existing recorded
1944 merged_store_group::merge_into (store_immediate_info
*info
)
1946 /* Make sure we're inserting in the position we think we're inserting. */
1947 gcc_assert (info
->bitpos
>= start
+ width
1948 && info
->bitregion_start
<= bitregion_end
);
1950 width
= info
->bitpos
+ info
->bitsize
- start
;
1954 /* Merge a store described by INFO into this merged store.
1955 INFO overlaps in some way with the current store (i.e. it's not contiguous
1956 which is handled by merged_store_group::merge_into). */
1959 merged_store_group::merge_overlapping (store_immediate_info
*info
)
1961 /* If the store extends the size of the group, extend the width. */
1962 if (info
->bitpos
+ info
->bitsize
> start
+ width
)
1963 width
= info
->bitpos
+ info
->bitsize
- start
;
1968 /* Go through all the recorded stores in this group in program order and
1969 apply their values to the VAL byte array to create the final merged
1970 value. Return true if the operation succeeded. */
1973 merged_store_group::apply_stores ()
1975 /* Make sure we have more than one store in the group, otherwise we cannot
1977 if (bitregion_start
% BITS_PER_UNIT
!= 0
1978 || bitregion_end
% BITS_PER_UNIT
!= 0
1979 || stores
.length () == 1)
1982 stores
.qsort (sort_by_order
);
1983 store_immediate_info
*info
;
1985 /* Create a power-of-2-sized buffer for native_encode_expr. */
1986 buf_size
= 1 << ceil_log2 ((bitregion_end
- bitregion_start
) / BITS_PER_UNIT
);
1987 val
= XNEWVEC (unsigned char, 2 * buf_size
);
1988 mask
= val
+ buf_size
;
1989 memset (val
, 0, buf_size
);
1990 memset (mask
, ~0U, buf_size
);
1992 FOR_EACH_VEC_ELT (stores
, i
, info
)
1994 unsigned int pos_in_buffer
= info
->bitpos
- bitregion_start
;
1996 if (info
->ops
[0].val
&& info
->ops
[0].base_addr
== NULL_TREE
)
1997 cst
= info
->ops
[0].val
;
1998 else if (info
->ops
[1].val
&& info
->ops
[1].base_addr
== NULL_TREE
)
1999 cst
= info
->ops
[1].val
;
2005 if (info
->rhs_code
== BIT_INSERT_EXPR
)
2006 bit_insertion
= true;
2008 ret
= encode_tree_to_bitpos (cst
, val
, info
->bitsize
,
2009 pos_in_buffer
, buf_size
);
2011 unsigned char *m
= mask
+ (pos_in_buffer
/ BITS_PER_UNIT
);
2012 if (BYTES_BIG_ENDIAN
)
2013 clear_bit_region_be (m
, (BITS_PER_UNIT
- 1
2014 - (pos_in_buffer
% BITS_PER_UNIT
)),
2017 clear_bit_region (m
, pos_in_buffer
% BITS_PER_UNIT
, info
->bitsize
);
2018 if (cst
&& dump_file
&& (dump_flags
& TDF_DETAILS
))
2022 fputs ("After writing ", dump_file
);
2023 print_generic_expr (dump_file
, cst
, TDF_NONE
);
2024 fprintf (dump_file
, " of size " HOST_WIDE_INT_PRINT_DEC
2025 " at position %d\n", info
->bitsize
, pos_in_buffer
);
2026 fputs (" the merged value contains ", dump_file
);
2027 dump_char_array (dump_file
, val
, buf_size
);
2028 fputs (" the merged mask contains ", dump_file
);
2029 dump_char_array (dump_file
, mask
, buf_size
);
2031 fputs (" bit insertion is required\n", dump_file
);
2034 fprintf (dump_file
, "Failed to merge stores\n");
2039 stores
.qsort (sort_by_bitpos
);
2043 /* Structure describing the store chain. */
2045 class imm_store_chain_info
2048 /* Doubly-linked list that imposes an order on chain processing.
2049 PNXP (prev's next pointer) points to the head of a list, or to
2050 the next field in the previous chain in the list.
2051 See pass_store_merging::m_stores_head for more rationale. */
2052 imm_store_chain_info
*next
, **pnxp
;
2054 auto_vec
<store_immediate_info
*> m_store_info
;
2055 auto_vec
<merged_store_group
*> m_merged_store_groups
;
2057 imm_store_chain_info (imm_store_chain_info
*&inspt
, tree b_a
)
2058 : next (inspt
), pnxp (&inspt
), base_addr (b_a
)
2063 gcc_checking_assert (pnxp
== next
->pnxp
);
2067 ~imm_store_chain_info ()
2072 gcc_checking_assert (&next
== next
->pnxp
);
2076 bool terminate_and_process_chain ();
2077 bool try_coalesce_bswap (merged_store_group
*, unsigned int, unsigned int);
2078 bool coalesce_immediate_stores ();
2079 bool output_merged_store (merged_store_group
*);
2080 bool output_merged_stores ();
2083 const pass_data pass_data_tree_store_merging
= {
2084 GIMPLE_PASS
, /* type */
2085 "store-merging", /* name */
2086 OPTGROUP_NONE
, /* optinfo_flags */
2087 TV_GIMPLE_STORE_MERGING
, /* tv_id */
2088 PROP_ssa
, /* properties_required */
2089 0, /* properties_provided */
2090 0, /* properties_destroyed */
2091 0, /* todo_flags_start */
2092 TODO_update_ssa
, /* todo_flags_finish */
2095 class pass_store_merging
: public gimple_opt_pass
2098 pass_store_merging (gcc::context
*ctxt
)
2099 : gimple_opt_pass (pass_data_tree_store_merging
, ctxt
), m_stores_head ()
2103 /* Pass not supported for PDP-endian, nor for insane hosts or
2104 target character sizes where native_{encode,interpret}_expr
2105 doesn't work properly. */
2109 return flag_store_merging
2110 && BYTES_BIG_ENDIAN
== WORDS_BIG_ENDIAN
2112 && BITS_PER_UNIT
== 8;
2115 virtual unsigned int execute (function
*);
2118 hash_map
<tree_operand_hash
, class imm_store_chain_info
*> m_stores
;
2120 /* Form a doubly-linked stack of the elements of m_stores, so that
2121 we can iterate over them in a predictable way. Using this order
2122 avoids extraneous differences in the compiler output just because
2123 of tree pointer variations (e.g. different chains end up in
2124 different positions of m_stores, so they are handled in different
2125 orders, so they allocate or release SSA names in different
2126 orders, and when they get reused, subsequent passes end up
2127 getting different SSA names, which may ultimately change
2128 decisions when going out of SSA). */
2129 imm_store_chain_info
*m_stores_head
;
2131 bool process_store (gimple
*);
2132 bool terminate_and_process_chain (imm_store_chain_info
*);
2133 bool terminate_all_aliasing_chains (imm_store_chain_info
**, gimple
*);
2134 bool terminate_and_process_all_chains ();
2135 }; // class pass_store_merging
2137 /* Terminate and process all recorded chains. Return true if any changes
2141 pass_store_merging::terminate_and_process_all_chains ()
2144 while (m_stores_head
)
2145 ret
|= terminate_and_process_chain (m_stores_head
);
2146 gcc_assert (m_stores
.is_empty ());
2150 /* Terminate all chains that are affected by the statement STMT.
2151 CHAIN_INFO is the chain we should ignore from the checks if
2152 non-NULL. Return true if any changes were made. */
2155 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
2161 /* If the statement doesn't touch memory it can't alias. */
2162 if (!gimple_vuse (stmt
))
2165 tree store_lhs
= gimple_store_p (stmt
) ? gimple_get_lhs (stmt
) : NULL_TREE
;
2166 ao_ref store_lhs_ref
;
2167 ao_ref_init (&store_lhs_ref
, store_lhs
);
2168 for (imm_store_chain_info
*next
= m_stores_head
, *cur
= next
; cur
; cur
= next
)
2172 /* We already checked all the stores in chain_info and terminated the
2173 chain if necessary. Skip it here. */
2174 if (chain_info
&& *chain_info
== cur
)
2177 store_immediate_info
*info
;
2179 FOR_EACH_VEC_ELT (cur
->m_store_info
, i
, info
)
2181 tree lhs
= gimple_assign_lhs (info
->stmt
);
2183 ao_ref_init (&lhs_ref
, lhs
);
2184 if (ref_maybe_used_by_stmt_p (stmt
, &lhs_ref
)
2185 || stmt_may_clobber_ref_p_1 (stmt
, &lhs_ref
)
2186 || (store_lhs
&& refs_may_alias_p_1 (&store_lhs_ref
,
2189 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2191 fprintf (dump_file
, "stmt causes chain termination:\n");
2192 print_gimple_stmt (dump_file
, stmt
, 0);
2194 ret
|= terminate_and_process_chain (cur
);
2203 /* Helper function. Terminate the recorded chain storing to base object
2204 BASE. Return true if the merging and output was successful. The m_stores
2205 entry is removed after the processing in any case. */
2208 pass_store_merging::terminate_and_process_chain (imm_store_chain_info
*chain_info
)
2210 bool ret
= chain_info
->terminate_and_process_chain ();
2211 m_stores
.remove (chain_info
->base_addr
);
2216 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
2217 may clobber REF. FIRST and LAST must have non-NULL vdef. We want to
2218 be able to sink load of REF across stores between FIRST and LAST, up
2219 to right before LAST. */
2222 stmts_may_clobber_ref_p (gimple
*first
, gimple
*last
, tree ref
)
2225 ao_ref_init (&r
, ref
);
2226 unsigned int count
= 0;
2227 tree vop
= gimple_vdef (last
);
2230 /* Return true conservatively if the basic blocks are different. */
2231 if (gimple_bb (first
) != gimple_bb (last
))
2236 stmt
= SSA_NAME_DEF_STMT (vop
);
2237 if (stmt_may_clobber_ref_p_1 (stmt
, &r
))
2239 if (gimple_store_p (stmt
)
2240 && refs_anti_dependent_p (ref
, gimple_get_lhs (stmt
)))
2242 /* Avoid quadratic compile time by bounding the number of checks
2244 if (++count
> MAX_STORE_ALIAS_CHECKS
)
2246 vop
= gimple_vuse (stmt
);
2248 while (stmt
!= first
);
2253 /* Return true if INFO->ops[IDX] is mergeable with the
2254 corresponding loads already in MERGED_STORE group.
2255 BASE_ADDR is the base address of the whole store group. */
2258 compatible_load_p (merged_store_group
*merged_store
,
2259 store_immediate_info
*info
,
2260 tree base_addr
, int idx
)
2262 store_immediate_info
*infof
= merged_store
->stores
[0];
2263 if (!info
->ops
[idx
].base_addr
2264 || maybe_ne (info
->ops
[idx
].bitpos
- infof
->ops
[idx
].bitpos
,
2265 info
->bitpos
- infof
->bitpos
)
2266 || !operand_equal_p (info
->ops
[idx
].base_addr
,
2267 infof
->ops
[idx
].base_addr
, 0))
2270 store_immediate_info
*infol
= merged_store
->stores
.last ();
2271 tree load_vuse
= gimple_vuse (info
->ops
[idx
].stmt
);
2272 /* In this case all vuses should be the same, e.g.
2273 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2275 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2276 and we can emit the coalesced load next to any of those loads. */
2277 if (gimple_vuse (infof
->ops
[idx
].stmt
) == load_vuse
2278 && gimple_vuse (infol
->ops
[idx
].stmt
) == load_vuse
)
2281 /* Otherwise, at least for now require that the load has the same
2282 vuse as the store. See following examples. */
2283 if (gimple_vuse (info
->stmt
) != load_vuse
)
2286 if (gimple_vuse (infof
->stmt
) != gimple_vuse (infof
->ops
[idx
].stmt
)
2288 && gimple_vuse (infol
->stmt
) != gimple_vuse (infol
->ops
[idx
].stmt
)))
2291 /* If the load is from the same location as the store, already
2292 the construction of the immediate chain info guarantees no intervening
2293 stores, so no further checks are needed. Example:
2294 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
2295 if (known_eq (info
->ops
[idx
].bitpos
, info
->bitpos
)
2296 && operand_equal_p (info
->ops
[idx
].base_addr
, base_addr
, 0))
2299 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2300 of the stores in the group, or any other stores in between those.
2301 Previous calls to compatible_load_p ensured that for all the
2302 merged_store->stores IDX loads, no stmts starting with
2303 merged_store->first_stmt and ending right before merged_store->last_stmt
2304 clobbers those loads. */
2305 gimple
*first
= merged_store
->first_stmt
;
2306 gimple
*last
= merged_store
->last_stmt
;
2308 store_immediate_info
*infoc
;
2309 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2310 comes before the so far first load, we'll be changing
2311 merged_store->first_stmt. In that case we need to give up if
2312 any of the earlier processed loads clobber with the stmts in the new
2314 if (info
->order
< merged_store
->first_order
)
2316 FOR_EACH_VEC_ELT (merged_store
->stores
, i
, infoc
)
2317 if (stmts_may_clobber_ref_p (info
->stmt
, first
, infoc
->ops
[idx
].val
))
2321 /* Similarly, we could change merged_store->last_stmt, so ensure
2322 in that case no stmts in the new range clobber any of the earlier
2324 else if (info
->order
> merged_store
->last_order
)
2326 FOR_EACH_VEC_ELT (merged_store
->stores
, i
, infoc
)
2327 if (stmts_may_clobber_ref_p (last
, info
->stmt
, infoc
->ops
[idx
].val
))
2331 /* And finally, we'd be adding a new load to the set, ensure it isn't
2332 clobbered in the new range. */
2333 if (stmts_may_clobber_ref_p (first
, last
, info
->ops
[idx
].val
))
2336 /* Otherwise, we are looking for:
2337 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2339 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2343 /* Add all refs loaded to compute VAL to REFS vector. */
2346 gather_bswap_load_refs (vec
<tree
> *refs
, tree val
)
2348 if (TREE_CODE (val
) != SSA_NAME
)
2351 gimple
*stmt
= SSA_NAME_DEF_STMT (val
);
2352 if (!is_gimple_assign (stmt
))
2355 if (gimple_assign_load_p (stmt
))
2357 refs
->safe_push (gimple_assign_rhs1 (stmt
));
2361 switch (gimple_assign_rhs_class (stmt
))
2363 case GIMPLE_BINARY_RHS
:
2364 gather_bswap_load_refs (refs
, gimple_assign_rhs2 (stmt
));
2366 case GIMPLE_UNARY_RHS
:
2367 gather_bswap_load_refs (refs
, gimple_assign_rhs1 (stmt
));
2374 /* Check if there are any stores in M_STORE_INFO after index I
2375 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2376 a potential group ending with END that have their order
2377 smaller than LAST_ORDER. ALL_INTEGER_CST_P is true if
2378 all the stores already merged and the one under consideration
2379 have rhs_code of INTEGER_CST. Return true if there are no such stores.
2381 MEM[(long long int *)p_28] = 0;
2382 MEM[(long long int *)p_28 + 8B] = 0;
2383 MEM[(long long int *)p_28 + 16B] = 0;
2384 MEM[(long long int *)p_28 + 24B] = 0;
2386 MEM[(int *)p_28 + 8B] = _129;
2387 MEM[(int *)p_28].a = -1;
2389 MEM[(long long int *)p_28] = 0;
2390 MEM[(int *)p_28].a = -1;
2391 stmts in the current group and need to consider if it is safe to
2392 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2393 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2394 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2395 into the group and merging of those 3 stores is successful, merged
2396 stmts will be emitted at the latest store from that group, i.e.
2397 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2398 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2399 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2400 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2401 into the group. That way it will be its own store group and will
2402 not be touched. If ALL_INTEGER_CST_P and there are overlapping
2403 INTEGER_CST stores, those are mergeable using merge_overlapping,
2404 so don't return false for those. */
2407 check_no_overlap (vec
<store_immediate_info
*> m_store_info
, unsigned int i
,
2408 bool all_integer_cst_p
, unsigned int last_order
,
2409 unsigned HOST_WIDE_INT end
)
2411 unsigned int len
= m_store_info
.length ();
2412 for (++i
; i
< len
; ++i
)
2414 store_immediate_info
*info
= m_store_info
[i
];
2415 if (info
->bitpos
>= end
)
2417 if (info
->order
< last_order
2418 && (!all_integer_cst_p
|| info
->rhs_code
!= INTEGER_CST
))
2424 /* Return true if m_store_info[first] and at least one following store
2425 form a group which store try_size bitsize value which is byte swapped
2426 from a memory load or some value, or identity from some value.
2427 This uses the bswap pass APIs. */
2430 imm_store_chain_info::try_coalesce_bswap (merged_store_group
*merged_store
,
2432 unsigned int try_size
)
2434 unsigned int len
= m_store_info
.length (), last
= first
;
2435 unsigned HOST_WIDE_INT width
= m_store_info
[first
]->bitsize
;
2436 if (width
>= try_size
)
2438 for (unsigned int i
= first
+ 1; i
< len
; ++i
)
2440 if (m_store_info
[i
]->bitpos
!= m_store_info
[first
]->bitpos
+ width
2441 || m_store_info
[i
]->lp_nr
!= merged_store
->lp_nr
2442 || m_store_info
[i
]->ins_stmt
== NULL
)
2444 width
+= m_store_info
[i
]->bitsize
;
2445 if (width
>= try_size
)
2451 if (width
!= try_size
)
2454 bool allow_unaligned
2455 = !STRICT_ALIGNMENT
&& param_store_merging_allow_unaligned
;
2456 /* Punt if the combined store would not be aligned and we need alignment. */
2457 if (!allow_unaligned
)
2459 unsigned int align
= merged_store
->align
;
2460 unsigned HOST_WIDE_INT align_base
= merged_store
->align_base
;
2461 for (unsigned int i
= first
+ 1; i
<= last
; ++i
)
2463 unsigned int this_align
;
2464 unsigned HOST_WIDE_INT align_bitpos
= 0;
2465 get_object_alignment_1 (gimple_assign_lhs (m_store_info
[i
]->stmt
),
2466 &this_align
, &align_bitpos
);
2467 if (this_align
> align
)
2470 align_base
= m_store_info
[i
]->bitpos
- align_bitpos
;
2473 unsigned HOST_WIDE_INT align_bitpos
2474 = (m_store_info
[first
]->bitpos
- align_base
) & (align
- 1);
2476 align
= least_bit_hwi (align_bitpos
);
2477 if (align
< try_size
)
2484 case 16: type
= uint16_type_node
; break;
2485 case 32: type
= uint32_type_node
; break;
2486 case 64: type
= uint64_type_node
; break;
2487 default: gcc_unreachable ();
2489 struct symbolic_number n
;
2490 gimple
*ins_stmt
= NULL
;
2491 int vuse_store
= -1;
2492 unsigned int first_order
= merged_store
->first_order
;
2493 unsigned int last_order
= merged_store
->last_order
;
2494 gimple
*first_stmt
= merged_store
->first_stmt
;
2495 gimple
*last_stmt
= merged_store
->last_stmt
;
2496 unsigned HOST_WIDE_INT end
= merged_store
->start
+ merged_store
->width
;
2497 store_immediate_info
*infof
= m_store_info
[first
];
2499 for (unsigned int i
= first
; i
<= last
; ++i
)
2501 store_immediate_info
*info
= m_store_info
[i
];
2502 struct symbolic_number this_n
= info
->n
;
2504 if (!this_n
.base_addr
)
2505 this_n
.range
= try_size
/ BITS_PER_UNIT
;
2507 /* Update vuse in case it has changed by output_merged_stores. */
2508 this_n
.vuse
= gimple_vuse (info
->ins_stmt
);
2509 unsigned int bitpos
= info
->bitpos
- infof
->bitpos
;
2510 if (!do_shift_rotate (LSHIFT_EXPR
, &this_n
,
2512 ? try_size
- info
->bitsize
- bitpos
2515 if (this_n
.base_addr
&& vuse_store
)
2518 for (j
= first
; j
<= last
; ++j
)
2519 if (this_n
.vuse
== gimple_vuse (m_store_info
[j
]->stmt
))
2523 if (vuse_store
== 1)
2531 ins_stmt
= info
->ins_stmt
;
2535 if (n
.base_addr
&& n
.vuse
!= this_n
.vuse
)
2537 if (vuse_store
== 0)
2541 if (info
->order
> last_order
)
2543 last_order
= info
->order
;
2544 last_stmt
= info
->stmt
;
2546 else if (info
->order
< first_order
)
2548 first_order
= info
->order
;
2549 first_stmt
= info
->stmt
;
2551 end
= MAX (end
, info
->bitpos
+ info
->bitsize
);
2553 ins_stmt
= perform_symbolic_merge (ins_stmt
, &n
, info
->ins_stmt
,
2555 if (ins_stmt
== NULL
)
2560 uint64_t cmpxchg
, cmpnop
;
2561 find_bswap_or_nop_finalize (&n
, &cmpxchg
, &cmpnop
);
2563 /* A complete byte swap should make the symbolic number to start with
2564 the largest digit in the highest order byte. Unchanged symbolic
2565 number indicates a read with same endianness as target architecture. */
2566 if (n
.n
!= cmpnop
&& n
.n
!= cmpxchg
)
2569 if (n
.base_addr
== NULL_TREE
&& !is_gimple_val (n
.src
))
2572 if (!check_no_overlap (m_store_info
, last
, false, last_order
, end
))
2575 /* Don't handle memory copy this way if normal non-bswap processing
2576 would handle it too. */
2577 if (n
.n
== cmpnop
&& (unsigned) n
.n_ops
== last
- first
+ 1)
2580 for (i
= first
; i
<= last
; ++i
)
2581 if (m_store_info
[i
]->rhs_code
!= MEM_REF
)
2591 /* Will emit LROTATE_EXPR. */
2594 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
2595 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
)
2599 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
2600 && optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
)
2607 if (!allow_unaligned
&& n
.base_addr
)
2609 unsigned int align
= get_object_alignment (n
.src
);
2610 if (align
< try_size
)
2614 /* If each load has vuse of the corresponding store, need to verify
2615 the loads can be sunk right before the last store. */
2616 if (vuse_store
== 1)
2618 auto_vec
<tree
, 64> refs
;
2619 for (unsigned int i
= first
; i
<= last
; ++i
)
2620 gather_bswap_load_refs (&refs
,
2621 gimple_assign_rhs1 (m_store_info
[i
]->stmt
));
2625 FOR_EACH_VEC_ELT (refs
, i
, ref
)
2626 if (stmts_may_clobber_ref_p (first_stmt
, last_stmt
, ref
))
2632 infof
->ins_stmt
= ins_stmt
;
2633 for (unsigned int i
= first
; i
<= last
; ++i
)
2635 m_store_info
[i
]->rhs_code
= n
.n
== cmpxchg
? LROTATE_EXPR
: NOP_EXPR
;
2636 m_store_info
[i
]->ops
[0].base_addr
= NULL_TREE
;
2637 m_store_info
[i
]->ops
[1].base_addr
= NULL_TREE
;
2639 merged_store
->merge_into (m_store_info
[i
]);
2645 /* Go through the candidate stores recorded in m_store_info and merge them
2646 into merged_store_group objects recorded into m_merged_store_groups
2647 representing the widened stores. Return true if coalescing was successful
2648 and the number of widened stores is fewer than the original number
2652 imm_store_chain_info::coalesce_immediate_stores ()
2654 /* Anything less can't be processed. */
2655 if (m_store_info
.length () < 2)
2658 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2659 fprintf (dump_file
, "Attempting to coalesce %u stores in chain\n",
2660 m_store_info
.length ());
2662 store_immediate_info
*info
;
2663 unsigned int i
, ignore
= 0;
2665 /* Order the stores by the bitposition they write to. */
2666 m_store_info
.qsort (sort_by_bitpos
);
2668 info
= m_store_info
[0];
2669 merged_store_group
*merged_store
= new merged_store_group (info
);
2670 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2671 fputs ("New store group\n", dump_file
);
2673 FOR_EACH_VEC_ELT (m_store_info
, i
, info
)
2675 unsigned HOST_WIDE_INT new_bitregion_start
, new_bitregion_end
;
2680 /* First try to handle group of stores like:
2685 using the bswap framework. */
2686 if (info
->bitpos
== merged_store
->start
+ merged_store
->width
2687 && merged_store
->stores
.length () == 1
2688 && merged_store
->stores
[0]->ins_stmt
!= NULL
2689 && info
->lp_nr
== merged_store
->lp_nr
2690 && info
->ins_stmt
!= NULL
)
2692 unsigned int try_size
;
2693 for (try_size
= 64; try_size
>= 16; try_size
>>= 1)
2694 if (try_coalesce_bswap (merged_store
, i
- 1, try_size
))
2699 ignore
= i
+ merged_store
->stores
.length () - 1;
2700 m_merged_store_groups
.safe_push (merged_store
);
2701 if (ignore
< m_store_info
.length ())
2702 merged_store
= new merged_store_group (m_store_info
[ignore
]);
2704 merged_store
= NULL
;
2710 = MIN (merged_store
->bitregion_start
, info
->bitregion_start
);
2712 = MAX (merged_store
->bitregion_end
, info
->bitregion_end
);
2714 if (info
->order
>= merged_store
->first_nonmergeable_order
2715 || (((new_bitregion_end
- new_bitregion_start
+ 1) / BITS_PER_UNIT
)
2716 > (unsigned) param_store_merging_max_size
))
2721 Overlapping stores. */
2722 else if (IN_RANGE (info
->bitpos
, merged_store
->start
,
2723 merged_store
->start
+ merged_store
->width
- 1)
2724 /* |---store 1---||---store 2---|
2725 Handle also the consecutive INTEGER_CST stores case here,
2726 as we have here the code to deal with overlaps. */
2727 || (info
->bitregion_start
<= merged_store
->bitregion_end
2728 && info
->rhs_code
== INTEGER_CST
2729 && merged_store
->only_constants
2730 && merged_store
->can_be_merged_into (info
)))
2732 /* Only allow overlapping stores of constants. */
2733 if (info
->rhs_code
== INTEGER_CST
2734 && merged_store
->only_constants
2735 && info
->lp_nr
== merged_store
->lp_nr
)
2737 unsigned int last_order
2738 = MAX (merged_store
->last_order
, info
->order
);
2739 unsigned HOST_WIDE_INT end
2740 = MAX (merged_store
->start
+ merged_store
->width
,
2741 info
->bitpos
+ info
->bitsize
);
2742 if (check_no_overlap (m_store_info
, i
, true, last_order
, end
))
2744 /* check_no_overlap call above made sure there are no
2745 overlapping stores with non-INTEGER_CST rhs_code
2746 in between the first and last of the stores we've
2747 just merged. If there are any INTEGER_CST rhs_code
2748 stores in between, we need to merge_overlapping them
2749 even if in the sort_by_bitpos order there are other
2750 overlapping stores in between. Keep those stores as is.
2752 MEM[(int *)p_28] = 0;
2753 MEM[(char *)p_28 + 3B] = 1;
2754 MEM[(char *)p_28 + 1B] = 2;
2755 MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B];
2756 We can't merge the zero store with the store of two and
2757 not merge anything else, because the store of one is
2758 in the original order in between those two, but in
2759 store_by_bitpos order it comes after the last store that
2760 we can't merge with them. We can merge the first 3 stores
2761 and keep the last store as is though. */
2762 unsigned int len
= m_store_info
.length ();
2763 unsigned int try_order
= last_order
;
2764 unsigned int first_nonmergeable_order
;
2766 bool last_iter
= false;
2770 unsigned int max_order
= 0;
2771 unsigned first_nonmergeable_int_order
= ~0U;
2772 unsigned HOST_WIDE_INT this_end
= end
;
2774 first_nonmergeable_order
= ~0U;
2775 for (unsigned int j
= i
+ 1; j
< len
; ++j
)
2777 store_immediate_info
*info2
= m_store_info
[j
];
2778 if (info2
->bitpos
>= this_end
)
2780 if (info2
->order
< try_order
)
2782 if (info2
->rhs_code
!= INTEGER_CST
2783 || info2
->lp_nr
!= merged_store
->lp_nr
)
2785 /* Normally check_no_overlap makes sure this
2786 doesn't happen, but if end grows below,
2787 then we need to process more stores than
2788 check_no_overlap verified. Example:
2789 MEM[(int *)p_5] = 0;
2790 MEM[(short *)p_5 + 3B] = 1;
2791 MEM[(char *)p_5 + 4B] = _9;
2792 MEM[(char *)p_5 + 2B] = 2; */
2797 this_end
= MAX (this_end
,
2798 info2
->bitpos
+ info2
->bitsize
);
2800 else if (info2
->rhs_code
== INTEGER_CST
2801 && info2
->lp_nr
== merged_store
->lp_nr
2804 max_order
= MAX (max_order
, info2
->order
+ 1);
2805 first_nonmergeable_int_order
2806 = MIN (first_nonmergeable_int_order
,
2810 first_nonmergeable_order
2811 = MIN (first_nonmergeable_order
, info2
->order
);
2815 if (last_order
== try_order
)
2817 /* If this failed, but only because we grew
2818 try_order, retry with the last working one,
2819 so that we merge at least something. */
2820 try_order
= last_order
;
2824 last_order
= try_order
;
2825 /* Retry with a larger try_order to see if we could
2826 merge some further INTEGER_CST stores. */
2828 && (first_nonmergeable_int_order
2829 < first_nonmergeable_order
))
2831 try_order
= MIN (max_order
,
2832 first_nonmergeable_order
);
2835 merged_store
->first_nonmergeable_order
);
2836 if (try_order
> last_order
&& ++attempts
< 16)
2839 first_nonmergeable_order
2840 = MIN (first_nonmergeable_order
,
2841 first_nonmergeable_int_order
);
2849 merged_store
->merge_overlapping (info
);
2851 merged_store
->first_nonmergeable_order
2852 = MIN (merged_store
->first_nonmergeable_order
,
2853 first_nonmergeable_order
);
2855 for (unsigned int j
= i
+ 1; j
<= k
; j
++)
2857 store_immediate_info
*info2
= m_store_info
[j
];
2858 gcc_assert (info2
->bitpos
< end
);
2859 if (info2
->order
< last_order
)
2861 gcc_assert (info2
->rhs_code
== INTEGER_CST
);
2863 merged_store
->merge_overlapping (info2
);
2865 /* Other stores are kept and not merged in any
2874 /* |---store 1---||---store 2---|
2875 This store is consecutive to the previous one.
2876 Merge it into the current store group. There can be gaps in between
2877 the stores, but there can't be gaps in between bitregions. */
2878 else if (info
->bitregion_start
<= merged_store
->bitregion_end
2879 && merged_store
->can_be_merged_into (info
))
2881 store_immediate_info
*infof
= merged_store
->stores
[0];
2883 /* All the rhs_code ops that take 2 operands are commutative,
2884 swap the operands if it could make the operands compatible. */
2885 if (infof
->ops
[0].base_addr
2886 && infof
->ops
[1].base_addr
2887 && info
->ops
[0].base_addr
2888 && info
->ops
[1].base_addr
2889 && known_eq (info
->ops
[1].bitpos
- infof
->ops
[0].bitpos
,
2890 info
->bitpos
- infof
->bitpos
)
2891 && operand_equal_p (info
->ops
[1].base_addr
,
2892 infof
->ops
[0].base_addr
, 0))
2894 std::swap (info
->ops
[0], info
->ops
[1]);
2895 info
->ops_swapped_p
= true;
2897 if (check_no_overlap (m_store_info
, i
, false,
2898 MAX (merged_store
->last_order
, info
->order
),
2899 MAX (merged_store
->start
+ merged_store
->width
,
2900 info
->bitpos
+ info
->bitsize
)))
2902 /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
2903 if (info
->rhs_code
== MEM_REF
&& infof
->rhs_code
!= MEM_REF
)
2905 info
->rhs_code
= BIT_INSERT_EXPR
;
2906 info
->ops
[0].val
= gimple_assign_rhs1 (info
->stmt
);
2907 info
->ops
[0].base_addr
= NULL_TREE
;
2909 else if (infof
->rhs_code
== MEM_REF
&& info
->rhs_code
!= MEM_REF
)
2911 store_immediate_info
*infoj
;
2913 FOR_EACH_VEC_ELT (merged_store
->stores
, j
, infoj
)
2915 infoj
->rhs_code
= BIT_INSERT_EXPR
;
2916 infoj
->ops
[0].val
= gimple_assign_rhs1 (infoj
->stmt
);
2917 infoj
->ops
[0].base_addr
= NULL_TREE
;
2920 if ((infof
->ops
[0].base_addr
2921 ? compatible_load_p (merged_store
, info
, base_addr
, 0)
2922 : !info
->ops
[0].base_addr
)
2923 && (infof
->ops
[1].base_addr
2924 ? compatible_load_p (merged_store
, info
, base_addr
, 1)
2925 : !info
->ops
[1].base_addr
))
2927 merged_store
->merge_into (info
);
2933 /* |---store 1---| <gap> |---store 2---|.
2934 Gap between stores or the rhs not compatible. Start a new group. */
2936 /* Try to apply all the stores recorded for the group to determine
2937 the bitpattern they write and discard it if that fails.
2938 This will also reject single-store groups. */
2939 if (merged_store
->apply_stores ())
2940 m_merged_store_groups
.safe_push (merged_store
);
2942 delete merged_store
;
2944 merged_store
= new merged_store_group (info
);
2945 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2946 fputs ("New store group\n", dump_file
);
2949 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2951 fprintf (dump_file
, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
2952 " bitpos:" HOST_WIDE_INT_PRINT_DEC
" val:",
2953 i
, info
->bitsize
, info
->bitpos
);
2954 print_generic_expr (dump_file
, gimple_assign_rhs1 (info
->stmt
));
2955 fputc ('\n', dump_file
);
2959 /* Record or discard the last store group. */
2962 if (merged_store
->apply_stores ())
2963 m_merged_store_groups
.safe_push (merged_store
);
2965 delete merged_store
;
2968 gcc_assert (m_merged_store_groups
.length () <= m_store_info
.length ());
2971 = !m_merged_store_groups
.is_empty ()
2972 && m_merged_store_groups
.length () < m_store_info
.length ();
2974 if (success
&& dump_file
)
2975 fprintf (dump_file
, "Coalescing successful!\nMerged into %u stores\n",
2976 m_merged_store_groups
.length ());
2981 /* Return the type to use for the merged stores or loads described by STMTS.
2982 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
2983 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
2984 of the MEM_REFs if any. */
2987 get_alias_type_for_stmts (vec
<gimple
*> &stmts
, bool is_load
,
2988 unsigned short *cliquep
, unsigned short *basep
)
2992 tree type
= NULL_TREE
;
2993 tree ret
= NULL_TREE
;
2997 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
2999 tree ref
= is_load
? gimple_assign_rhs1 (stmt
)
3000 : gimple_assign_lhs (stmt
);
3001 tree type1
= reference_alias_ptr_type (ref
);
3002 tree base
= get_base_address (ref
);
3006 if (TREE_CODE (base
) == MEM_REF
)
3008 *cliquep
= MR_DEPENDENCE_CLIQUE (base
);
3009 *basep
= MR_DEPENDENCE_BASE (base
);
3014 if (!alias_ptr_types_compatible_p (type
, type1
))
3015 ret
= ptr_type_node
;
3016 if (TREE_CODE (base
) != MEM_REF
3017 || *cliquep
!= MR_DEPENDENCE_CLIQUE (base
)
3018 || *basep
!= MR_DEPENDENCE_BASE (base
))
3027 /* Return the location_t information we can find among the statements
3031 get_location_for_stmts (vec
<gimple
*> &stmts
)
3036 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
3037 if (gimple_has_location (stmt
))
3038 return gimple_location (stmt
);
3040 return UNKNOWN_LOCATION
;
3043 /* Used to decribe a store resulting from splitting a wide store in smaller
3044 regularly-sized stores in split_group. */
3049 unsigned HOST_WIDE_INT bytepos
;
3050 unsigned HOST_WIDE_INT size
;
3051 unsigned HOST_WIDE_INT align
;
3052 auto_vec
<store_immediate_info
*> orig_stores
;
3053 /* True if there is a single orig stmt covering the whole split store. */
3055 split_store (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
3056 unsigned HOST_WIDE_INT
);
3059 /* Simple constructor. */
3061 split_store::split_store (unsigned HOST_WIDE_INT bp
,
3062 unsigned HOST_WIDE_INT sz
,
3063 unsigned HOST_WIDE_INT al
)
3064 : bytepos (bp
), size (sz
), align (al
), orig (false)
3066 orig_stores
.create (0);
3069 /* Record all stores in GROUP that write to the region starting at BITPOS and
3070 is of size BITSIZE. Record infos for such statements in STORES if
3071 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
3072 if there is exactly one original store in the range (in that case ignore
3073 clobber stmts, unless there are only clobber stmts). */
3075 static store_immediate_info
*
3076 find_constituent_stores (class merged_store_group
*group
,
3077 vec
<store_immediate_info
*> *stores
,
3078 unsigned int *first
,
3079 unsigned HOST_WIDE_INT bitpos
,
3080 unsigned HOST_WIDE_INT bitsize
)
3082 store_immediate_info
*info
, *ret
= NULL
;
3084 bool second
= false;
3085 bool update_first
= true;
3086 unsigned HOST_WIDE_INT end
= bitpos
+ bitsize
;
3087 for (i
= *first
; group
->stores
.iterate (i
, &info
); ++i
)
3089 unsigned HOST_WIDE_INT stmt_start
= info
->bitpos
;
3090 unsigned HOST_WIDE_INT stmt_end
= stmt_start
+ info
->bitsize
;
3091 if (stmt_end
<= bitpos
)
3093 /* BITPOS passed to this function never decreases from within the
3094 same split_group call, so optimize and don't scan info records
3095 which are known to end before or at BITPOS next time.
3096 Only do it if all stores before this one also pass this. */
3102 update_first
= false;
3104 /* The stores in GROUP are ordered by bitposition so if we're past
3105 the region for this group return early. */
3106 if (stmt_start
>= end
)
3109 if (gimple_clobber_p (info
->stmt
))
3112 stores
->safe_push (info
);
3119 stores
->safe_push (info
);
3120 if (ret
&& !gimple_clobber_p (ret
->stmt
))
3126 else if (ret
&& !gimple_clobber_p (ret
->stmt
))
3134 /* Return how many SSA_NAMEs used to compute value to store in the INFO
3135 store have multiple uses. If any SSA_NAME has multiple uses, also
3136 count statements needed to compute it. */
3139 count_multiple_uses (store_immediate_info
*info
)
3141 gimple
*stmt
= info
->stmt
;
3143 switch (info
->rhs_code
)
3150 if (info
->bit_not_p
)
3152 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3153 ret
= 1; /* Fall through below to return
3154 the BIT_NOT_EXPR stmt and then
3155 BIT_{AND,IOR,XOR}_EXPR and anything it
3158 /* stmt is after this the BIT_NOT_EXPR. */
3159 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3161 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3163 ret
+= 1 + info
->ops
[0].bit_not_p
;
3164 if (info
->ops
[1].base_addr
)
3165 ret
+= 1 + info
->ops
[1].bit_not_p
;
3168 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3169 /* stmt is now the BIT_*_EXPR. */
3170 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3171 ret
+= 1 + info
->ops
[info
->ops_swapped_p
].bit_not_p
;
3172 else if (info
->ops
[info
->ops_swapped_p
].bit_not_p
)
3174 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3175 if (!has_single_use (gimple_assign_rhs1 (stmt2
)))
3178 if (info
->ops
[1].base_addr
== NULL_TREE
)
3180 gcc_checking_assert (!info
->ops_swapped_p
);
3183 if (!has_single_use (gimple_assign_rhs2 (stmt
)))
3184 ret
+= 1 + info
->ops
[1 - info
->ops_swapped_p
].bit_not_p
;
3185 else if (info
->ops
[1 - info
->ops_swapped_p
].bit_not_p
)
3187 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
3188 if (!has_single_use (gimple_assign_rhs1 (stmt2
)))
3193 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3194 return 1 + info
->ops
[0].bit_not_p
;
3195 else if (info
->ops
[0].bit_not_p
)
3197 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3198 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3202 case BIT_INSERT_EXPR
:
3203 return has_single_use (gimple_assign_rhs1 (stmt
)) ? 0 : 1;
3209 /* Split a merged store described by GROUP by populating the SPLIT_STORES
3210 vector (if non-NULL) with split_store structs describing the byte offset
3211 (from the base), the bit size and alignment of each store as well as the
3212 original statements involved in each such split group.
3213 This is to separate the splitting strategy from the statement
3214 building/emission/linking done in output_merged_store.
3215 Return number of new stores.
3216 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
3217 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
3218 BZERO_FIRST may be true only when the first store covers the whole group
3219 and clears it; if BZERO_FIRST is true, keep that first store in the set
3220 unmodified and emit further stores for the overrides only.
3221 If SPLIT_STORES is NULL, it is just a dry run to count number of
3225 split_group (merged_store_group
*group
, bool allow_unaligned_store
,
3226 bool allow_unaligned_load
, bool bzero_first
,
3227 vec
<split_store
*> *split_stores
,
3228 unsigned *total_orig
,
3229 unsigned *total_new
)
3231 unsigned HOST_WIDE_INT pos
= group
->bitregion_start
;
3232 unsigned HOST_WIDE_INT size
= group
->bitregion_end
- pos
;
3233 unsigned HOST_WIDE_INT bytepos
= pos
/ BITS_PER_UNIT
;
3234 unsigned HOST_WIDE_INT group_align
= group
->align
;
3235 unsigned HOST_WIDE_INT align_base
= group
->align_base
;
3236 unsigned HOST_WIDE_INT group_load_align
= group_align
;
3237 bool any_orig
= false;
3239 gcc_assert ((size
% BITS_PER_UNIT
== 0) && (pos
% BITS_PER_UNIT
== 0));
3241 if (group
->stores
[0]->rhs_code
== LROTATE_EXPR
3242 || group
->stores
[0]->rhs_code
== NOP_EXPR
)
3244 gcc_assert (!bzero_first
);
3245 /* For bswap framework using sets of stores, all the checking
3246 has been done earlier in try_coalesce_bswap and needs to be
3247 emitted as a single store. */
3250 /* Avoid the old/new stmt count heuristics. It should be
3251 always beneficial. */
3258 unsigned HOST_WIDE_INT align_bitpos
3259 = (group
->start
- align_base
) & (group_align
- 1);
3260 unsigned HOST_WIDE_INT align
= group_align
;
3262 align
= least_bit_hwi (align_bitpos
);
3263 bytepos
= group
->start
/ BITS_PER_UNIT
;
3265 = new split_store (bytepos
, group
->width
, align
);
3266 unsigned int first
= 0;
3267 find_constituent_stores (group
, &store
->orig_stores
,
3268 &first
, group
->start
, group
->width
);
3269 split_stores
->safe_push (store
);
3275 unsigned int ret
= 0, first
= 0;
3276 unsigned HOST_WIDE_INT try_pos
= bytepos
;
3281 store_immediate_info
*info
= group
->stores
[0];
3284 total_orig
[0] = 1; /* The orig store. */
3285 info
= group
->stores
[0];
3286 if (info
->ops
[0].base_addr
)
3288 if (info
->ops
[1].base_addr
)
3290 switch (info
->rhs_code
)
3295 total_orig
[0]++; /* The orig BIT_*_EXPR stmt. */
3300 total_orig
[0] *= group
->stores
.length ();
3302 FOR_EACH_VEC_ELT (group
->stores
, i
, info
)
3304 total_new
[0] += count_multiple_uses (info
);
3305 total_orig
[0] += (info
->bit_not_p
3306 + info
->ops
[0].bit_not_p
3307 + info
->ops
[1].bit_not_p
);
3311 if (!allow_unaligned_load
)
3312 for (int i
= 0; i
< 2; ++i
)
3313 if (group
->load_align
[i
])
3314 group_load_align
= MIN (group_load_align
, group
->load_align
[i
]);
3318 store_immediate_info
*gstore
;
3319 FOR_EACH_VEC_ELT (group
->stores
, first
, gstore
)
3320 if (!gimple_clobber_p (gstore
->stmt
))
3327 = new split_store (bytepos
, gstore
->bitsize
, align_base
);
3328 store
->orig_stores
.safe_push (gstore
);
3331 split_stores
->safe_push (store
);
3337 if ((allow_unaligned_store
|| group_align
<= BITS_PER_UNIT
)
3338 && (group
->mask
[try_pos
- bytepos
] == (unsigned char) ~0U
3339 || (bzero_first
&& group
->val
[try_pos
- bytepos
] == 0)))
3341 /* Skip padding bytes. */
3343 size
-= BITS_PER_UNIT
;
3347 unsigned HOST_WIDE_INT try_bitpos
= try_pos
* BITS_PER_UNIT
;
3348 unsigned int try_size
= MAX_STORE_BITSIZE
, nonmasked
;
3349 unsigned HOST_WIDE_INT align_bitpos
3350 = (try_bitpos
- align_base
) & (group_align
- 1);
3351 unsigned HOST_WIDE_INT align
= group_align
;
3352 bool found_orig
= false;
3354 align
= least_bit_hwi (align_bitpos
);
3355 if (!allow_unaligned_store
)
3356 try_size
= MIN (try_size
, align
);
3357 if (!allow_unaligned_load
)
3359 /* If we can't do or don't want to do unaligned stores
3360 as well as loads, we need to take the loads into account
3362 unsigned HOST_WIDE_INT load_align
= group_load_align
;
3363 align_bitpos
= (try_bitpos
- align_base
) & (load_align
- 1);
3365 load_align
= least_bit_hwi (align_bitpos
);
3366 for (int i
= 0; i
< 2; ++i
)
3367 if (group
->load_align
[i
])
3370 = known_alignment (try_bitpos
3371 - group
->stores
[0]->bitpos
3372 + group
->stores
[0]->ops
[i
].bitpos
3373 - group
->load_align_base
[i
]);
3374 if (align_bitpos
& (group_load_align
- 1))
3376 unsigned HOST_WIDE_INT a
= least_bit_hwi (align_bitpos
);
3377 load_align
= MIN (load_align
, a
);
3380 try_size
= MIN (try_size
, load_align
);
3382 store_immediate_info
*info
3383 = find_constituent_stores (group
, NULL
, &first
, try_bitpos
, try_size
);
3384 if (info
&& !gimple_clobber_p (info
->stmt
))
3386 /* If there is just one original statement for the range, see if
3387 we can just reuse the original store which could be even larger
3389 unsigned HOST_WIDE_INT stmt_end
3390 = ROUND_UP (info
->bitpos
+ info
->bitsize
, BITS_PER_UNIT
);
3391 info
= find_constituent_stores (group
, NULL
, &first
, try_bitpos
,
3392 stmt_end
- try_bitpos
);
3393 if (info
&& info
->bitpos
>= try_bitpos
)
3395 store_immediate_info
*info2
= NULL
;
3396 unsigned int first_copy
= first
;
3397 if (info
->bitpos
> try_bitpos
3398 && stmt_end
- try_bitpos
<= try_size
)
3400 info2
= find_constituent_stores (group
, NULL
, &first_copy
,
3402 info
->bitpos
- try_bitpos
);
3403 gcc_assert (info2
== NULL
|| gimple_clobber_p (info2
->stmt
));
3405 if (info2
== NULL
&& stmt_end
- try_bitpos
< try_size
)
3407 info2
= find_constituent_stores (group
, NULL
, &first_copy
,
3409 (try_bitpos
+ try_size
)
3411 gcc_assert (info2
== NULL
|| gimple_clobber_p (info2
->stmt
));
3415 try_size
= stmt_end
- try_bitpos
;
3422 /* Approximate store bitsize for the case when there are no padding
3424 while (try_size
> size
)
3426 /* Now look for whole padding bytes at the end of that bitsize. */
3427 for (nonmasked
= try_size
/ BITS_PER_UNIT
; nonmasked
> 0; --nonmasked
)
3428 if (group
->mask
[try_pos
- bytepos
+ nonmasked
- 1]
3429 != (unsigned char) ~0U
3431 || group
->val
[try_pos
- bytepos
+ nonmasked
- 1] != 0))
3433 if (nonmasked
== 0 || (info
&& gimple_clobber_p (info
->stmt
)))
3435 /* If entire try_size range is padding, skip it. */
3436 try_pos
+= try_size
/ BITS_PER_UNIT
;
3440 /* Otherwise try to decrease try_size if second half, last 3 quarters
3441 etc. are padding. */
3442 nonmasked
*= BITS_PER_UNIT
;
3443 while (nonmasked
<= try_size
/ 2)
3445 if (!allow_unaligned_store
&& group_align
> BITS_PER_UNIT
)
3447 /* Now look for whole padding bytes at the start of that bitsize. */
3448 unsigned int try_bytesize
= try_size
/ BITS_PER_UNIT
, masked
;
3449 for (masked
= 0; masked
< try_bytesize
; ++masked
)
3450 if (group
->mask
[try_pos
- bytepos
+ masked
] != (unsigned char) ~0U
3452 || group
->val
[try_pos
- bytepos
+ masked
] != 0))
3454 masked
*= BITS_PER_UNIT
;
3455 gcc_assert (masked
< try_size
);
3456 if (masked
>= try_size
/ 2)
3458 while (masked
>= try_size
/ 2)
3461 try_pos
+= try_size
/ BITS_PER_UNIT
;
3465 /* Need to recompute the alignment, so just retry at the new
3477 = new split_store (try_pos
, try_size
, align
);
3478 info
= find_constituent_stores (group
, &store
->orig_stores
,
3479 &first
, try_bitpos
, try_size
);
3481 && !gimple_clobber_p (info
->stmt
)
3482 && info
->bitpos
>= try_bitpos
3483 && info
->bitpos
+ info
->bitsize
<= try_bitpos
+ try_size
3484 && (store
->orig_stores
.length () == 1
3486 || (info
->bitpos
== try_bitpos
3487 && (info
->bitpos
+ info
->bitsize
3488 == try_bitpos
+ try_size
))))
3493 split_stores
->safe_push (store
);
3496 try_pos
+= try_size
/ BITS_PER_UNIT
;
3504 /* If we are reusing some original stores and any of the
3505 original SSA_NAMEs had multiple uses, we need to subtract
3506 those now before we add the new ones. */
3507 if (total_new
[0] && any_orig
)
3509 FOR_EACH_VEC_ELT (*split_stores
, i
, store
)
3511 total_new
[0] -= count_multiple_uses (store
->orig_stores
[0]);
3513 total_new
[0] += ret
; /* The new store. */
3514 store_immediate_info
*info
= group
->stores
[0];
3515 if (info
->ops
[0].base_addr
)
3516 total_new
[0] += ret
;
3517 if (info
->ops
[1].base_addr
)
3518 total_new
[0] += ret
;
3519 switch (info
->rhs_code
)
3524 total_new
[0] += ret
; /* The new BIT_*_EXPR stmt. */
3529 FOR_EACH_VEC_ELT (*split_stores
, i
, store
)
3532 bool bit_not_p
[3] = { false, false, false };
3533 /* If all orig_stores have certain bit_not_p set, then
3534 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3535 If some orig_stores have certain bit_not_p set, then
3536 we'd use a BIT_XOR_EXPR with a mask and need to account for
3538 FOR_EACH_VEC_ELT (store
->orig_stores
, j
, info
)
3540 if (info
->ops
[0].bit_not_p
)
3541 bit_not_p
[0] = true;
3542 if (info
->ops
[1].bit_not_p
)
3543 bit_not_p
[1] = true;
3544 if (info
->bit_not_p
)
3545 bit_not_p
[2] = true;
3547 total_new
[0] += bit_not_p
[0] + bit_not_p
[1] + bit_not_p
[2];
3555 /* Return the operation through which the operand IDX (if < 2) or
3556 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3557 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3558 the bits should be xored with mask. */
3560 static enum tree_code
3561 invert_op (split_store
*split_store
, int idx
, tree int_type
, tree
&mask
)
3564 store_immediate_info
*info
;
3565 unsigned int cnt
= 0;
3566 bool any_paddings
= false;
3567 FOR_EACH_VEC_ELT (split_store
->orig_stores
, i
, info
)
3569 bool bit_not_p
= idx
< 2 ? info
->ops
[idx
].bit_not_p
: info
->bit_not_p
;
3573 tree lhs
= gimple_assign_lhs (info
->stmt
);
3574 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3575 && TYPE_PRECISION (TREE_TYPE (lhs
)) < info
->bitsize
)
3576 any_paddings
= true;
3582 if (cnt
== split_store
->orig_stores
.length () && !any_paddings
)
3583 return BIT_NOT_EXPR
;
3585 unsigned HOST_WIDE_INT try_bitpos
= split_store
->bytepos
* BITS_PER_UNIT
;
3586 unsigned buf_size
= split_store
->size
/ BITS_PER_UNIT
;
3588 = XALLOCAVEC (unsigned char, buf_size
);
3589 memset (buf
, ~0U, buf_size
);
3590 FOR_EACH_VEC_ELT (split_store
->orig_stores
, i
, info
)
3592 bool bit_not_p
= idx
< 2 ? info
->ops
[idx
].bit_not_p
: info
->bit_not_p
;
3595 /* Clear regions with bit_not_p and invert afterwards, rather than
3596 clear regions with !bit_not_p, so that gaps in between stores aren't
3598 unsigned HOST_WIDE_INT bitsize
= info
->bitsize
;
3599 unsigned HOST_WIDE_INT prec
= bitsize
;
3600 unsigned int pos_in_buffer
= 0;
3603 tree lhs
= gimple_assign_lhs (info
->stmt
);
3604 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3605 && TYPE_PRECISION (TREE_TYPE (lhs
)) < bitsize
)
3606 prec
= TYPE_PRECISION (TREE_TYPE (lhs
));
3608 if (info
->bitpos
< try_bitpos
)
3610 gcc_assert (info
->bitpos
+ bitsize
> try_bitpos
);
3611 if (!BYTES_BIG_ENDIAN
)
3613 if (prec
<= try_bitpos
- info
->bitpos
)
3615 prec
-= try_bitpos
- info
->bitpos
;
3617 bitsize
-= try_bitpos
- info
->bitpos
;
3618 if (BYTES_BIG_ENDIAN
&& prec
> bitsize
)
3622 pos_in_buffer
= info
->bitpos
- try_bitpos
;
3625 /* If this is a bool inversion, invert just the least significant
3626 prec bits rather than all bits of it. */
3627 if (BYTES_BIG_ENDIAN
)
3629 pos_in_buffer
+= bitsize
- prec
;
3630 if (pos_in_buffer
>= split_store
->size
)
3635 if (pos_in_buffer
+ bitsize
> split_store
->size
)
3636 bitsize
= split_store
->size
- pos_in_buffer
;
3637 unsigned char *p
= buf
+ (pos_in_buffer
/ BITS_PER_UNIT
);
3638 if (BYTES_BIG_ENDIAN
)
3639 clear_bit_region_be (p
, (BITS_PER_UNIT
- 1
3640 - (pos_in_buffer
% BITS_PER_UNIT
)), bitsize
);
3642 clear_bit_region (p
, pos_in_buffer
% BITS_PER_UNIT
, bitsize
);
3644 for (unsigned int i
= 0; i
< buf_size
; ++i
)
3646 mask
= native_interpret_expr (int_type
, buf
, buf_size
);
3647 return BIT_XOR_EXPR
;
3650 /* Given a merged store group GROUP output the widened version of it.
3651 The store chain is against the base object BASE.
3652 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3653 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3654 Make sure that the number of statements output is less than the number of
3655 original statements. If a better sequence is possible emit it and
3659 imm_store_chain_info::output_merged_store (merged_store_group
*group
)
3661 split_store
*split_store
;
3663 unsigned HOST_WIDE_INT start_byte_pos
3664 = group
->bitregion_start
/ BITS_PER_UNIT
;
3666 unsigned int orig_num_stmts
= group
->stores
.length ();
3667 if (orig_num_stmts
< 2)
3670 auto_vec
<class split_store
*, 32> split_stores
;
3671 bool allow_unaligned_store
3672 = !STRICT_ALIGNMENT
&& param_store_merging_allow_unaligned
;
3673 bool allow_unaligned_load
= allow_unaligned_store
;
3674 bool bzero_first
= false;
3675 store_immediate_info
*store
;
3676 unsigned int num_clobber_stmts
= 0;
3677 if (group
->stores
[0]->rhs_code
== INTEGER_CST
)
3679 FOR_EACH_VEC_ELT (group
->stores
, i
, store
)
3680 if (gimple_clobber_p (store
->stmt
))
3681 num_clobber_stmts
++;
3682 else if (TREE_CODE (gimple_assign_rhs1 (store
->stmt
)) == CONSTRUCTOR
3683 && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (store
->stmt
)) == 0
3684 && group
->start
== store
->bitpos
3685 && group
->width
== store
->bitsize
3686 && (group
->start
% BITS_PER_UNIT
) == 0
3687 && (group
->width
% BITS_PER_UNIT
) == 0)
3694 FOR_EACH_VEC_ELT_FROM (group
->stores
, i
, store
, i
)
3695 if (gimple_clobber_p (store
->stmt
))
3696 num_clobber_stmts
++;
3697 if (num_clobber_stmts
== orig_num_stmts
)
3699 orig_num_stmts
-= num_clobber_stmts
;
3701 if (allow_unaligned_store
|| bzero_first
)
3703 /* If unaligned stores are allowed, see how many stores we'd emit
3704 for unaligned and how many stores we'd emit for aligned stores.
3705 Only use unaligned stores if it allows fewer stores than aligned.
3706 Similarly, if there is a whole region clear first, prefer expanding
3707 it together compared to expanding clear first followed by merged
3709 unsigned cnt
[4] = { ~0, ~0, ~0, ~0 };
3711 for (int pass
= 0; pass
< 4; ++pass
)
3713 if (!allow_unaligned_store
&& (pass
& 1) != 0)
3715 if (!bzero_first
&& (pass
& 2) != 0)
3717 cnt
[pass
] = split_group (group
, (pass
& 1) != 0,
3718 allow_unaligned_load
, (pass
& 2) != 0,
3720 if (cnt
[pass
] < cnt
[pass_min
])
3723 if ((pass_min
& 1) == 0)
3724 allow_unaligned_store
= false;
3725 if ((pass_min
& 2) == 0)
3726 bzero_first
= false;
3728 unsigned total_orig
, total_new
;
3729 split_group (group
, allow_unaligned_store
, allow_unaligned_load
, bzero_first
,
3730 &split_stores
, &total_orig
, &total_new
);
3732 /* Determine if there is a clobber covering the whole group at the start,
3733 followed by proposed split stores that cover the whole group. In that
3734 case, prefer the transformation even if
3735 split_stores.length () == orig_num_stmts. */
3736 bool clobber_first
= false;
3737 if (num_clobber_stmts
3738 && gimple_clobber_p (group
->stores
[0]->stmt
)
3739 && group
->start
== group
->stores
[0]->bitpos
3740 && group
->width
== group
->stores
[0]->bitsize
3741 && (group
->start
% BITS_PER_UNIT
) == 0
3742 && (group
->width
% BITS_PER_UNIT
) == 0)
3744 clobber_first
= true;
3745 unsigned HOST_WIDE_INT pos
= group
->start
/ BITS_PER_UNIT
;
3746 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3747 if (split_store
->bytepos
!= pos
)
3749 clobber_first
= false;
3753 pos
+= split_store
->size
/ BITS_PER_UNIT
;
3754 if (pos
!= (group
->start
+ group
->width
) / BITS_PER_UNIT
)
3755 clobber_first
= false;
3758 if (split_stores
.length () >= orig_num_stmts
+ clobber_first
)
3761 /* We didn't manage to reduce the number of statements. Bail out. */
3762 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3763 fprintf (dump_file
, "Exceeded original number of stmts (%u)."
3764 " Not profitable to emit new sequence.\n",
3766 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3770 if (total_orig
<= total_new
)
3772 /* If number of estimated new statements is above estimated original
3773 statements, bail out too. */
3774 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3775 fprintf (dump_file
, "Estimated number of original stmts (%u)"
3776 " not larger than estimated number of new"
3778 total_orig
, total_new
);
3779 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3783 if (group
->stores
[0]->rhs_code
== INTEGER_CST
)
3785 bool all_orig
= true;
3786 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3787 if (!split_store
->orig
)
3794 unsigned int cnt
= split_stores
.length ();
3795 store_immediate_info
*store
;
3796 FOR_EACH_VEC_ELT (group
->stores
, i
, store
)
3797 if (gimple_clobber_p (store
->stmt
))
3799 /* Punt if we wouldn't make any real changes, i.e. keep all
3800 orig stmts + all clobbers. */
3801 if (cnt
== group
->stores
.length ())
3803 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3804 fprintf (dump_file
, "Exceeded original number of stmts (%u)."
3805 " Not profitable to emit new sequence.\n",
3807 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3814 gimple_stmt_iterator last_gsi
= gsi_for_stmt (group
->last_stmt
);
3815 gimple_seq seq
= NULL
;
3816 tree last_vdef
, new_vuse
;
3817 last_vdef
= gimple_vdef (group
->last_stmt
);
3818 new_vuse
= gimple_vuse (group
->last_stmt
);
3819 tree bswap_res
= NULL_TREE
;
3821 /* Clobbers are not removed. */
3822 if (gimple_clobber_p (group
->last_stmt
))
3824 new_vuse
= make_ssa_name (gimple_vop (cfun
), group
->last_stmt
);
3825 gimple_set_vdef (group
->last_stmt
, new_vuse
);
3828 if (group
->stores
[0]->rhs_code
== LROTATE_EXPR
3829 || group
->stores
[0]->rhs_code
== NOP_EXPR
)
3831 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
3832 gimple
*ins_stmt
= group
->stores
[0]->ins_stmt
;
3833 struct symbolic_number
*n
= &group
->stores
[0]->n
;
3834 bool bswap
= group
->stores
[0]->rhs_code
== LROTATE_EXPR
;
3839 load_type
= bswap_type
= uint16_type_node
;
3842 load_type
= uint32_type_node
;
3845 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
3846 bswap_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
3850 load_type
= uint64_type_node
;
3853 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
3854 bswap_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
3861 /* If the loads have each vuse of the corresponding store,
3862 we've checked the aliasing already in try_coalesce_bswap and
3863 we want to sink the need load into seq. So need to use new_vuse
3867 if (n
->vuse
== NULL
)
3873 /* Update vuse in case it has changed by output_merged_stores. */
3874 n
->vuse
= gimple_vuse (ins_stmt
);
3876 bswap_res
= bswap_replace (gsi_start (seq
), ins_stmt
, fndecl
,
3877 bswap_type
, load_type
, n
, bswap
);
3878 gcc_assert (bswap_res
);
3881 gimple
*stmt
= NULL
;
3882 auto_vec
<gimple
*, 32> orig_stmts
;
3883 gimple_seq this_seq
;
3884 tree addr
= force_gimple_operand_1 (unshare_expr (base_addr
), &this_seq
,
3885 is_gimple_mem_ref_addr
, NULL_TREE
);
3886 gimple_seq_add_seq_without_update (&seq
, this_seq
);
3888 tree load_addr
[2] = { NULL_TREE
, NULL_TREE
};
3889 gimple_seq load_seq
[2] = { NULL
, NULL
};
3890 gimple_stmt_iterator load_gsi
[2] = { gsi_none (), gsi_none () };
3891 for (int j
= 0; j
< 2; ++j
)
3893 store_operand_info
&op
= group
->stores
[0]->ops
[j
];
3894 if (op
.base_addr
== NULL_TREE
)
3897 store_immediate_info
*infol
= group
->stores
.last ();
3898 if (gimple_vuse (op
.stmt
) == gimple_vuse (infol
->ops
[j
].stmt
))
3900 /* We can't pick the location randomly; while we've verified
3901 all the loads have the same vuse, they can be still in different
3902 basic blocks and we need to pick the one from the last bb:
3908 otherwise if we put the wider load at the q[0] load, we might
3909 segfault if q[1] is not mapped. */
3910 basic_block bb
= gimple_bb (op
.stmt
);
3911 gimple
*ostmt
= op
.stmt
;
3912 store_immediate_info
*info
;
3913 FOR_EACH_VEC_ELT (group
->stores
, i
, info
)
3915 gimple
*tstmt
= info
->ops
[j
].stmt
;
3916 basic_block tbb
= gimple_bb (tstmt
);
3917 if (dominated_by_p (CDI_DOMINATORS
, tbb
, bb
))
3923 load_gsi
[j
] = gsi_for_stmt (ostmt
);
3925 = force_gimple_operand_1 (unshare_expr (op
.base_addr
),
3926 &load_seq
[j
], is_gimple_mem_ref_addr
,
3929 else if (operand_equal_p (base_addr
, op
.base_addr
, 0))
3930 load_addr
[j
] = addr
;
3934 = force_gimple_operand_1 (unshare_expr (op
.base_addr
),
3935 &this_seq
, is_gimple_mem_ref_addr
,
3937 gimple_seq_add_seq_without_update (&seq
, this_seq
);
3941 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3943 unsigned HOST_WIDE_INT try_size
= split_store
->size
;
3944 unsigned HOST_WIDE_INT try_pos
= split_store
->bytepos
;
3945 unsigned HOST_WIDE_INT try_bitpos
= try_pos
* BITS_PER_UNIT
;
3946 unsigned HOST_WIDE_INT align
= split_store
->align
;
3949 if (split_store
->orig
)
3951 /* If there is just a single non-clobber constituent store
3952 which covers the whole area, just reuse the lhs and rhs. */
3953 gimple
*orig_stmt
= NULL
;
3954 store_immediate_info
*store
;
3956 FOR_EACH_VEC_ELT (split_store
->orig_stores
, j
, store
)
3957 if (!gimple_clobber_p (store
->stmt
))
3959 orig_stmt
= store
->stmt
;
3962 dest
= gimple_assign_lhs (orig_stmt
);
3963 src
= gimple_assign_rhs1 (orig_stmt
);
3964 loc
= gimple_location (orig_stmt
);
3968 store_immediate_info
*info
;
3969 unsigned short clique
, base
;
3971 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
3972 orig_stmts
.safe_push (info
->stmt
);
3974 = get_alias_type_for_stmts (orig_stmts
, false, &clique
, &base
);
3975 loc
= get_location_for_stmts (orig_stmts
);
3976 orig_stmts
.truncate (0);
3978 tree int_type
= build_nonstandard_integer_type (try_size
, UNSIGNED
);
3979 int_type
= build_aligned_type (int_type
, align
);
3980 dest
= fold_build2 (MEM_REF
, int_type
, addr
,
3981 build_int_cst (offset_type
, try_pos
));
3982 if (TREE_CODE (dest
) == MEM_REF
)
3984 MR_DEPENDENCE_CLIQUE (dest
) = clique
;
3985 MR_DEPENDENCE_BASE (dest
) = base
;
3990 mask
= integer_zero_node
;
3992 mask
= native_interpret_expr (int_type
,
3993 group
->mask
+ try_pos
3999 j
< 1 + (split_store
->orig_stores
[0]->ops
[1].val
!= NULL_TREE
);
4002 store_operand_info
&op
= split_store
->orig_stores
[0]->ops
[j
];
4005 else if (op
.base_addr
)
4007 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
4008 orig_stmts
.safe_push (info
->ops
[j
].stmt
);
4010 offset_type
= get_alias_type_for_stmts (orig_stmts
, true,
4012 location_t load_loc
= get_location_for_stmts (orig_stmts
);
4013 orig_stmts
.truncate (0);
4015 unsigned HOST_WIDE_INT load_align
= group
->load_align
[j
];
4016 unsigned HOST_WIDE_INT align_bitpos
4017 = known_alignment (try_bitpos
4018 - split_store
->orig_stores
[0]->bitpos
4020 if (align_bitpos
& (load_align
- 1))
4021 load_align
= least_bit_hwi (align_bitpos
);
4024 = build_nonstandard_integer_type (try_size
, UNSIGNED
);
4026 = build_aligned_type (load_int_type
, load_align
);
4028 poly_uint64 load_pos
4029 = exact_div (try_bitpos
4030 - split_store
->orig_stores
[0]->bitpos
4033 ops
[j
] = fold_build2 (MEM_REF
, load_int_type
, load_addr
[j
],
4034 build_int_cst (offset_type
, load_pos
));
4035 if (TREE_CODE (ops
[j
]) == MEM_REF
)
4037 MR_DEPENDENCE_CLIQUE (ops
[j
]) = clique
;
4038 MR_DEPENDENCE_BASE (ops
[j
]) = base
;
4040 if (!integer_zerop (mask
))
4041 /* The load might load some bits (that will be masked off
4042 later on) uninitialized, avoid -W*uninitialized
4043 warnings in that case. */
4044 TREE_NO_WARNING (ops
[j
]) = 1;
4046 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4048 gimple_set_location (stmt
, load_loc
);
4049 if (gsi_bb (load_gsi
[j
]))
4051 gimple_set_vuse (stmt
, gimple_vuse (op
.stmt
));
4052 gimple_seq_add_stmt_without_update (&load_seq
[j
], stmt
);
4056 gimple_set_vuse (stmt
, new_vuse
);
4057 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4059 ops
[j
] = gimple_assign_lhs (stmt
);
4061 enum tree_code inv_op
4062 = invert_op (split_store
, j
, int_type
, xor_mask
);
4063 if (inv_op
!= NOP_EXPR
)
4065 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4066 inv_op
, ops
[j
], xor_mask
);
4067 gimple_set_location (stmt
, load_loc
);
4068 ops
[j
] = gimple_assign_lhs (stmt
);
4070 if (gsi_bb (load_gsi
[j
]))
4071 gimple_seq_add_stmt_without_update (&load_seq
[j
],
4074 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4078 ops
[j
] = native_interpret_expr (int_type
,
4079 group
->val
+ try_pos
4084 switch (split_store
->orig_stores
[0]->rhs_code
)
4089 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
4091 tree rhs1
= gimple_assign_rhs1 (info
->stmt
);
4092 orig_stmts
.safe_push (SSA_NAME_DEF_STMT (rhs1
));
4095 bit_loc
= get_location_for_stmts (orig_stmts
);
4096 orig_stmts
.truncate (0);
4099 = gimple_build_assign (make_ssa_name (int_type
),
4100 split_store
->orig_stores
[0]->rhs_code
,
4102 gimple_set_location (stmt
, bit_loc
);
4103 /* If there is just one load and there is a separate
4104 load_seq[0], emit the bitwise op right after it. */
4105 if (load_addr
[1] == NULL_TREE
&& gsi_bb (load_gsi
[0]))
4106 gimple_seq_add_stmt_without_update (&load_seq
[0], stmt
);
4107 /* Otherwise, if at least one load is in seq, we need to
4108 emit the bitwise op right before the store. If there
4109 are two loads and are emitted somewhere else, it would
4110 be better to emit the bitwise op as early as possible;
4111 we don't track where that would be possible right now
4114 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4115 src
= gimple_assign_lhs (stmt
);
4117 enum tree_code inv_op
;
4118 inv_op
= invert_op (split_store
, 2, int_type
, xor_mask
);
4119 if (inv_op
!= NOP_EXPR
)
4121 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4122 inv_op
, src
, xor_mask
);
4123 gimple_set_location (stmt
, bit_loc
);
4124 if (load_addr
[1] == NULL_TREE
&& gsi_bb (load_gsi
[0]))
4125 gimple_seq_add_stmt_without_update (&load_seq
[0], stmt
);
4127 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4128 src
= gimple_assign_lhs (stmt
);
4134 if (!is_gimple_val (src
))
4136 stmt
= gimple_build_assign (make_ssa_name (TREE_TYPE (src
)),
4138 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4139 src
= gimple_assign_lhs (stmt
);
4141 if (!useless_type_conversion_p (int_type
, TREE_TYPE (src
)))
4143 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4145 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4146 src
= gimple_assign_lhs (stmt
);
4148 inv_op
= invert_op (split_store
, 2, int_type
, xor_mask
);
4149 if (inv_op
!= NOP_EXPR
)
4151 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4152 inv_op
, src
, xor_mask
);
4153 gimple_set_location (stmt
, loc
);
4154 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4155 src
= gimple_assign_lhs (stmt
);
4163 /* If bit insertion is required, we use the source as an accumulator
4164 into which the successive bit-field values are manually inserted.
4165 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
4166 if (group
->bit_insertion
)
4167 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
4168 if (info
->rhs_code
== BIT_INSERT_EXPR
4169 && info
->bitpos
< try_bitpos
+ try_size
4170 && info
->bitpos
+ info
->bitsize
> try_bitpos
)
4172 /* Mask, truncate, convert to final type, shift and ior into
4173 the accumulator. Note that every step can be a no-op. */
4174 const HOST_WIDE_INT start_gap
= info
->bitpos
- try_bitpos
;
4175 const HOST_WIDE_INT end_gap
4176 = (try_bitpos
+ try_size
) - (info
->bitpos
+ info
->bitsize
);
4177 tree tem
= info
->ops
[0].val
;
4178 if (!INTEGRAL_TYPE_P (TREE_TYPE (tem
)))
4180 const unsigned HOST_WIDE_INT size
4181 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (tem
)));
4183 = build_nonstandard_integer_type (size
, UNSIGNED
);
4184 tem
= gimple_build (&seq
, loc
, VIEW_CONVERT_EXPR
,
4187 if (TYPE_PRECISION (TREE_TYPE (tem
)) <= info
->bitsize
)
4190 = build_nonstandard_integer_type (info
->bitsize
,
4192 tem
= gimple_convert (&seq
, loc
, bitfield_type
, tem
);
4194 else if ((BYTES_BIG_ENDIAN
? start_gap
: end_gap
) > 0)
4196 const unsigned HOST_WIDE_INT imask
4197 = (HOST_WIDE_INT_1U
<< info
->bitsize
) - 1;
4198 tem
= gimple_build (&seq
, loc
,
4199 BIT_AND_EXPR
, TREE_TYPE (tem
), tem
,
4200 build_int_cst (TREE_TYPE (tem
),
4203 const HOST_WIDE_INT shift
4204 = (BYTES_BIG_ENDIAN
? end_gap
: start_gap
);
4206 tem
= gimple_build (&seq
, loc
,
4207 RSHIFT_EXPR
, TREE_TYPE (tem
), tem
,
4208 build_int_cst (NULL_TREE
, -shift
));
4209 tem
= gimple_convert (&seq
, loc
, int_type
, tem
);
4211 tem
= gimple_build (&seq
, loc
,
4212 LSHIFT_EXPR
, int_type
, tem
,
4213 build_int_cst (NULL_TREE
, shift
));
4214 src
= gimple_build (&seq
, loc
,
4215 BIT_IOR_EXPR
, int_type
, tem
, src
);
4218 if (!integer_zerop (mask
))
4220 tree tem
= make_ssa_name (int_type
);
4221 tree load_src
= unshare_expr (dest
);
4222 /* The load might load some or all bits uninitialized,
4223 avoid -W*uninitialized warnings in that case.
4224 As optimization, it would be nice if all the bits are
4225 provably uninitialized (no stores at all yet or previous
4226 store a CLOBBER) we'd optimize away the load and replace
4228 TREE_NO_WARNING (load_src
) = 1;
4229 stmt
= gimple_build_assign (tem
, load_src
);
4230 gimple_set_location (stmt
, loc
);
4231 gimple_set_vuse (stmt
, new_vuse
);
4232 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4234 /* FIXME: If there is a single chunk of zero bits in mask,
4235 perhaps use BIT_INSERT_EXPR instead? */
4236 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4237 BIT_AND_EXPR
, tem
, mask
);
4238 gimple_set_location (stmt
, loc
);
4239 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4240 tem
= gimple_assign_lhs (stmt
);
4242 if (TREE_CODE (src
) == INTEGER_CST
)
4243 src
= wide_int_to_tree (int_type
,
4244 wi::bit_and_not (wi::to_wide (src
),
4245 wi::to_wide (mask
)));
4249 = wide_int_to_tree (int_type
,
4250 wi::bit_not (wi::to_wide (mask
)));
4251 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4252 BIT_AND_EXPR
, src
, nmask
);
4253 gimple_set_location (stmt
, loc
);
4254 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4255 src
= gimple_assign_lhs (stmt
);
4257 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4258 BIT_IOR_EXPR
, tem
, src
);
4259 gimple_set_location (stmt
, loc
);
4260 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4261 src
= gimple_assign_lhs (stmt
);
4265 stmt
= gimple_build_assign (dest
, src
);
4266 gimple_set_location (stmt
, loc
);
4267 gimple_set_vuse (stmt
, new_vuse
);
4268 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4270 if (group
->lp_nr
&& stmt_could_throw_p (cfun
, stmt
))
4271 add_stmt_to_eh_lp (stmt
, group
->lp_nr
);
4274 if (i
< split_stores
.length () - 1)
4275 new_vdef
= make_ssa_name (gimple_vop (cfun
), stmt
);
4277 new_vdef
= last_vdef
;
4279 gimple_set_vdef (stmt
, new_vdef
);
4280 SSA_NAME_DEF_STMT (new_vdef
) = stmt
;
4281 new_vuse
= new_vdef
;
4284 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
4291 "New sequence of %u stores to replace old one of %u stores\n",
4292 split_stores
.length (), orig_num_stmts
);
4293 if (dump_flags
& TDF_DETAILS
)
4294 print_gimple_seq (dump_file
, seq
, 0, TDF_VOPS
| TDF_MEMSYMS
);
4297 if (gimple_clobber_p (group
->last_stmt
))
4298 update_stmt (group
->last_stmt
);
4300 if (group
->lp_nr
> 0)
4302 /* We're going to insert a sequence of (potentially) throwing stores
4303 into an active EH region. This means that we're going to create
4304 new basic blocks with EH edges pointing to the post landing pad
4305 and, therefore, to have to update its PHI nodes, if any. For the
4306 virtual PHI node, we're going to use the VDEFs created above, but
4307 for the other nodes, we need to record the original reaching defs. */
4308 eh_landing_pad lp
= get_eh_landing_pad_from_number (group
->lp_nr
);
4309 basic_block lp_bb
= label_to_block (cfun
, lp
->post_landing_pad
);
4310 basic_block last_bb
= gimple_bb (group
->last_stmt
);
4311 edge last_edge
= find_edge (last_bb
, lp_bb
);
4312 auto_vec
<tree
, 16> last_defs
;
4314 for (gpi
= gsi_start_phis (lp_bb
); !gsi_end_p (gpi
); gsi_next (&gpi
))
4316 gphi
*phi
= gpi
.phi ();
4318 if (virtual_operand_p (gimple_phi_result (phi
)))
4319 last_def
= NULL_TREE
;
4321 last_def
= gimple_phi_arg_def (phi
, last_edge
->dest_idx
);
4322 last_defs
.safe_push (last_def
);
4325 /* Do the insertion. Then, if new basic blocks have been created in the
4326 process, rewind the chain of VDEFs create above to walk the new basic
4327 blocks and update the corresponding arguments of the PHI nodes. */
4328 update_modified_stmts (seq
);
4329 if (gimple_find_sub_bbs (seq
, &last_gsi
))
4330 while (last_vdef
!= gimple_vuse (group
->last_stmt
))
4332 gimple
*stmt
= SSA_NAME_DEF_STMT (last_vdef
);
4333 if (stmt_could_throw_p (cfun
, stmt
))
4335 edge new_edge
= find_edge (gimple_bb (stmt
), lp_bb
);
4337 for (gpi
= gsi_start_phis (lp_bb
), i
= 0;
4339 gsi_next (&gpi
), i
++)
4341 gphi
*phi
= gpi
.phi ();
4343 if (virtual_operand_p (gimple_phi_result (phi
)))
4344 new_def
= last_vdef
;
4346 new_def
= last_defs
[i
];
4347 add_phi_arg (phi
, new_def
, new_edge
, UNKNOWN_LOCATION
);
4350 last_vdef
= gimple_vuse (stmt
);
4354 gsi_insert_seq_after (&last_gsi
, seq
, GSI_SAME_STMT
);
4356 for (int j
= 0; j
< 2; ++j
)
4358 gsi_insert_seq_after (&load_gsi
[j
], load_seq
[j
], GSI_SAME_STMT
);
4363 /* Process the merged_store_group objects created in the coalescing phase.
4364 The stores are all against the base object BASE.
4365 Try to output the widened stores and delete the original statements if
4366 successful. Return true iff any changes were made. */
4369 imm_store_chain_info::output_merged_stores ()
4372 merged_store_group
*merged_store
;
4374 FOR_EACH_VEC_ELT (m_merged_store_groups
, i
, merged_store
)
4376 if (dbg_cnt (store_merging
)
4377 && output_merged_store (merged_store
))
4380 store_immediate_info
*store
;
4381 FOR_EACH_VEC_ELT (merged_store
->stores
, j
, store
)
4383 gimple
*stmt
= store
->stmt
;
4384 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
4385 /* Don't remove clobbers, they are still useful even if
4386 everything is overwritten afterwards. */
4387 if (gimple_clobber_p (stmt
))
4389 gsi_remove (&gsi
, true);
4391 remove_stmt_from_eh_lp (stmt
);
4392 if (stmt
!= merged_store
->last_stmt
)
4394 unlink_stmt_vdef (stmt
);
4395 release_defs (stmt
);
4401 if (ret
&& dump_file
)
4402 fprintf (dump_file
, "Merging successful!\n");
4407 /* Coalesce the store_immediate_info objects recorded against the base object
4408 BASE in the first phase and output them.
4409 Delete the allocated structures.
4410 Return true if any changes were made. */
4413 imm_store_chain_info::terminate_and_process_chain ()
4415 /* Process store chain. */
4417 if (m_store_info
.length () > 1)
4419 ret
= coalesce_immediate_stores ();
4421 ret
= output_merged_stores ();
4424 /* Delete all the entries we allocated ourselves. */
4425 store_immediate_info
*info
;
4427 FOR_EACH_VEC_ELT (m_store_info
, i
, info
)
4430 merged_store_group
*merged_info
;
4431 FOR_EACH_VEC_ELT (m_merged_store_groups
, i
, merged_info
)
4437 /* Return true iff LHS is a destination potentially interesting for
4438 store merging. In practice these are the codes that get_inner_reference
4442 lhs_valid_for_store_merging_p (tree lhs
)
4447 switch (TREE_CODE (lhs
))
4450 case ARRAY_RANGE_REF
:
4462 /* Return true if the tree RHS is a constant we want to consider
4463 during store merging. In practice accept all codes that
4464 native_encode_expr accepts. */
4467 rhs_valid_for_store_merging_p (tree rhs
)
4469 unsigned HOST_WIDE_INT size
;
4470 if (TREE_CODE (rhs
) == CONSTRUCTOR
4471 && CONSTRUCTOR_NELTS (rhs
) == 0
4472 && TYPE_SIZE_UNIT (TREE_TYPE (rhs
))
4473 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs
))))
4475 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs
))).is_constant (&size
)
4476 && native_encode_expr (rhs
, NULL
, size
) != 0);
4479 /* Adjust *PBITPOS, *PBITREGION_START and *PBITREGION_END by BYTE_OFF bytes
4480 and return true on success or false on failure. */
4483 adjust_bit_pos (poly_offset_int byte_off
,
4484 poly_int64
*pbitpos
,
4485 poly_uint64
*pbitregion_start
,
4486 poly_uint64
*pbitregion_end
)
4488 poly_offset_int bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4489 bit_off
+= *pbitpos
;
4491 if (known_ge (bit_off
, 0) && bit_off
.to_shwi (pbitpos
))
4493 if (maybe_ne (*pbitregion_end
, 0U))
4495 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4496 bit_off
+= *pbitregion_start
;
4497 if (bit_off
.to_uhwi (pbitregion_start
))
4499 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4500 bit_off
+= *pbitregion_end
;
4501 if (!bit_off
.to_uhwi (pbitregion_end
))
4502 *pbitregion_end
= 0;
4505 *pbitregion_end
= 0;
4513 /* If MEM is a memory reference usable for store merging (either as
4514 store destination or for loads), return the non-NULL base_addr
4515 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
4516 Otherwise return NULL, *PBITPOS should be still valid even for that
4520 mem_valid_for_store_merging (tree mem
, poly_uint64
*pbitsize
,
4521 poly_uint64
*pbitpos
,
4522 poly_uint64
*pbitregion_start
,
4523 poly_uint64
*pbitregion_end
)
4525 poly_int64 bitsize
, bitpos
;
4526 poly_uint64 bitregion_start
= 0, bitregion_end
= 0;
4528 int unsignedp
= 0, reversep
= 0, volatilep
= 0;
4530 tree base_addr
= get_inner_reference (mem
, &bitsize
, &bitpos
, &offset
, &mode
,
4531 &unsignedp
, &reversep
, &volatilep
);
4532 *pbitsize
= bitsize
;
4533 if (known_eq (bitsize
, 0))
4536 if (TREE_CODE (mem
) == COMPONENT_REF
4537 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem
, 1)))
4539 get_bit_range (&bitregion_start
, &bitregion_end
, mem
, &bitpos
, &offset
);
4540 if (maybe_ne (bitregion_end
, 0U))
4547 /* We do not want to rewrite TARGET_MEM_REFs. */
4548 if (TREE_CODE (base_addr
) == TARGET_MEM_REF
)
4550 /* In some cases get_inner_reference may return a
4551 MEM_REF [ptr + byteoffset]. For the purposes of this pass
4552 canonicalize the base_addr to MEM_REF [ptr] and take
4553 byteoffset into account in the bitpos. This occurs in
4554 PR 23684 and this way we can catch more chains. */
4555 else if (TREE_CODE (base_addr
) == MEM_REF
)
4557 if (!adjust_bit_pos (mem_ref_offset (base_addr
), &bitpos
,
4558 &bitregion_start
, &bitregion_end
))
4560 base_addr
= TREE_OPERAND (base_addr
, 0);
4562 /* get_inner_reference returns the base object, get at its
4566 if (maybe_lt (bitpos
, 0))
4568 base_addr
= build_fold_addr_expr (base_addr
);
4573 /* If the access is variable offset then a base decl has to be
4574 address-taken to be able to emit pointer-based stores to it.
4575 ??? We might be able to get away with re-using the original
4576 base up to the first variable part and then wrapping that inside
4578 tree base
= get_base_address (base_addr
);
4579 if (!base
|| (DECL_P (base
) && !TREE_ADDRESSABLE (base
)))
4582 /* Similarly to above for the base, remove constant from the offset. */
4583 if (TREE_CODE (offset
) == PLUS_EXPR
4584 && TREE_CODE (TREE_OPERAND (offset
, 1)) == INTEGER_CST
4585 && adjust_bit_pos (wi::to_poly_offset (TREE_OPERAND (offset
, 1)),
4586 &bitpos
, &bitregion_start
, &bitregion_end
))
4587 offset
= TREE_OPERAND (offset
, 0);
4589 base_addr
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base_addr
),
4593 if (known_eq (bitregion_end
, 0U))
4595 bitregion_start
= round_down_to_byte_boundary (bitpos
);
4596 bitregion_end
= round_up_to_byte_boundary (bitpos
+ bitsize
);
4599 *pbitsize
= bitsize
;
4601 *pbitregion_start
= bitregion_start
;
4602 *pbitregion_end
= bitregion_end
;
4606 /* Return true if STMT is a load that can be used for store merging.
4607 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
4608 BITREGION_END are properties of the corresponding store. */
4611 handled_load (gimple
*stmt
, store_operand_info
*op
,
4612 poly_uint64 bitsize
, poly_uint64 bitpos
,
4613 poly_uint64 bitregion_start
, poly_uint64 bitregion_end
)
4615 if (!is_gimple_assign (stmt
))
4617 if (gimple_assign_rhs_code (stmt
) == BIT_NOT_EXPR
)
4619 tree rhs1
= gimple_assign_rhs1 (stmt
);
4620 if (TREE_CODE (rhs1
) == SSA_NAME
4621 && handled_load (SSA_NAME_DEF_STMT (rhs1
), op
, bitsize
, bitpos
,
4622 bitregion_start
, bitregion_end
))
4624 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
4625 been optimized earlier, but if allowed here, would confuse the
4626 multiple uses counting. */
4629 op
->bit_not_p
= !op
->bit_not_p
;
4634 if (gimple_vuse (stmt
)
4635 && gimple_assign_load_p (stmt
)
4636 && !stmt_can_throw_internal (cfun
, stmt
)
4637 && !gimple_has_volatile_ops (stmt
))
4639 tree mem
= gimple_assign_rhs1 (stmt
);
4641 = mem_valid_for_store_merging (mem
, &op
->bitsize
, &op
->bitpos
,
4642 &op
->bitregion_start
,
4643 &op
->bitregion_end
);
4644 if (op
->base_addr
!= NULL_TREE
4645 && known_eq (op
->bitsize
, bitsize
)
4646 && multiple_p (op
->bitpos
- bitpos
, BITS_PER_UNIT
)
4647 && known_ge (op
->bitpos
- op
->bitregion_start
,
4648 bitpos
- bitregion_start
)
4649 && known_ge (op
->bitregion_end
- op
->bitpos
,
4650 bitregion_end
- bitpos
))
4654 op
->bit_not_p
= false;
4661 /* Return the index number of the landing pad for STMT, if any. */
4664 lp_nr_for_store (gimple
*stmt
)
4666 if (!cfun
->can_throw_non_call_exceptions
|| !cfun
->eh
)
4669 if (!stmt_could_throw_p (cfun
, stmt
))
4672 return lookup_stmt_eh_lp (stmt
);
4675 /* Record the store STMT for store merging optimization if it can be
4676 optimized. Return true if any changes were made. */
4679 pass_store_merging::process_store (gimple
*stmt
)
4681 tree lhs
= gimple_assign_lhs (stmt
);
4682 tree rhs
= gimple_assign_rhs1 (stmt
);
4683 poly_uint64 bitsize
, bitpos
= 0;
4684 poly_uint64 bitregion_start
= 0, bitregion_end
= 0;
4686 = mem_valid_for_store_merging (lhs
, &bitsize
, &bitpos
,
4687 &bitregion_start
, &bitregion_end
);
4688 if (known_eq (bitsize
, 0U))
4691 bool invalid
= (base_addr
== NULL_TREE
4692 || (maybe_gt (bitsize
,
4693 (unsigned int) MAX_BITSIZE_MODE_ANY_INT
)
4694 && TREE_CODE (rhs
) != INTEGER_CST
4695 && (TREE_CODE (rhs
) != CONSTRUCTOR
4696 || CONSTRUCTOR_NELTS (rhs
) != 0)));
4697 enum tree_code rhs_code
= ERROR_MARK
;
4698 bool bit_not_p
= false;
4699 struct symbolic_number n
;
4700 gimple
*ins_stmt
= NULL
;
4701 store_operand_info ops
[2];
4704 else if (rhs_valid_for_store_merging_p (rhs
))
4706 rhs_code
= INTEGER_CST
;
4709 else if (TREE_CODE (rhs
) != SSA_NAME
)
4713 gimple
*def_stmt
= SSA_NAME_DEF_STMT (rhs
), *def_stmt1
, *def_stmt2
;
4714 if (!is_gimple_assign (def_stmt
))
4716 else if (handled_load (def_stmt
, &ops
[0], bitsize
, bitpos
,
4717 bitregion_start
, bitregion_end
))
4719 else if (gimple_assign_rhs_code (def_stmt
) == BIT_NOT_EXPR
)
4721 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
4722 if (TREE_CODE (rhs1
) == SSA_NAME
4723 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1
)))
4726 def_stmt
= SSA_NAME_DEF_STMT (rhs1
);
4730 if (rhs_code
== ERROR_MARK
&& !invalid
)
4731 switch ((rhs_code
= gimple_assign_rhs_code (def_stmt
)))
4737 rhs1
= gimple_assign_rhs1 (def_stmt
);
4738 rhs2
= gimple_assign_rhs2 (def_stmt
);
4740 if (TREE_CODE (rhs1
) != SSA_NAME
)
4742 def_stmt1
= SSA_NAME_DEF_STMT (rhs1
);
4743 if (!is_gimple_assign (def_stmt1
)
4744 || !handled_load (def_stmt1
, &ops
[0], bitsize
, bitpos
,
4745 bitregion_start
, bitregion_end
))
4747 if (rhs_valid_for_store_merging_p (rhs2
))
4749 else if (TREE_CODE (rhs2
) != SSA_NAME
)
4753 def_stmt2
= SSA_NAME_DEF_STMT (rhs2
);
4754 if (!is_gimple_assign (def_stmt2
))
4756 else if (!handled_load (def_stmt2
, &ops
[1], bitsize
, bitpos
,
4757 bitregion_start
, bitregion_end
))
4767 unsigned HOST_WIDE_INT const_bitsize
;
4768 if (bitsize
.is_constant (&const_bitsize
)
4769 && (const_bitsize
% BITS_PER_UNIT
) == 0
4770 && const_bitsize
<= 64
4771 && multiple_p (bitpos
, BITS_PER_UNIT
))
4773 ins_stmt
= find_bswap_or_nop_1 (def_stmt
, &n
, 12);
4777 for (unsigned HOST_WIDE_INT i
= 0;
4779 i
+= BITS_PER_UNIT
, nn
>>= BITS_PER_MARKER
)
4780 if ((nn
& MARKER_MASK
) == 0
4781 || (nn
& MARKER_MASK
) == MARKER_BYTE_UNKNOWN
)
4790 rhs_code
= LROTATE_EXPR
;
4791 ops
[0].base_addr
= NULL_TREE
;
4792 ops
[1].base_addr
= NULL_TREE
;
4800 && bitsize
.is_constant (&const_bitsize
)
4801 && ((const_bitsize
% BITS_PER_UNIT
) != 0
4802 || !multiple_p (bitpos
, BITS_PER_UNIT
))
4803 && const_bitsize
<= MAX_FIXED_MODE_SIZE
)
4805 /* Bypass a conversion to the bit-field type. */
4807 && is_gimple_assign (def_stmt
)
4808 && CONVERT_EXPR_CODE_P (rhs_code
))
4810 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
4811 if (TREE_CODE (rhs1
) == SSA_NAME
4812 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1
)))
4815 rhs_code
= BIT_INSERT_EXPR
;
4818 ops
[0].base_addr
= NULL_TREE
;
4819 ops
[1].base_addr
= NULL_TREE
;
4824 unsigned HOST_WIDE_INT const_bitsize
, const_bitpos
;
4825 unsigned HOST_WIDE_INT const_bitregion_start
, const_bitregion_end
;
4827 || !bitsize
.is_constant (&const_bitsize
)
4828 || !bitpos
.is_constant (&const_bitpos
)
4829 || !bitregion_start
.is_constant (&const_bitregion_start
)
4830 || !bitregion_end
.is_constant (&const_bitregion_end
))
4831 return terminate_all_aliasing_chains (NULL
, stmt
);
4834 memset (&n
, 0, sizeof (n
));
4836 class imm_store_chain_info
**chain_info
= NULL
;
4839 chain_info
= m_stores
.get (base_addr
);
4841 store_immediate_info
*info
;
4844 unsigned int ord
= (*chain_info
)->m_store_info
.length ();
4845 info
= new store_immediate_info (const_bitsize
, const_bitpos
,
4846 const_bitregion_start
,
4847 const_bitregion_end
,
4848 stmt
, ord
, rhs_code
, n
, ins_stmt
,
4849 bit_not_p
, lp_nr_for_store (stmt
),
4851 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4853 fprintf (dump_file
, "Recording immediate store from stmt:\n");
4854 print_gimple_stmt (dump_file
, stmt
, 0);
4856 (*chain_info
)->m_store_info
.safe_push (info
);
4857 ret
|= terminate_all_aliasing_chains (chain_info
, stmt
);
4858 /* If we reach the limit of stores to merge in a chain terminate and
4859 process the chain now. */
4860 if ((*chain_info
)->m_store_info
.length ()
4861 == (unsigned int) param_max_stores_to_merge
)
4863 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4865 "Reached maximum number of statements to merge:\n");
4866 ret
|= terminate_and_process_chain (*chain_info
);
4871 /* Store aliases any existing chain? */
4872 ret
|= terminate_all_aliasing_chains (NULL
, stmt
);
4873 /* Start a new chain. */
4874 class imm_store_chain_info
*new_chain
4875 = new imm_store_chain_info (m_stores_head
, base_addr
);
4876 info
= new store_immediate_info (const_bitsize
, const_bitpos
,
4877 const_bitregion_start
,
4878 const_bitregion_end
,
4879 stmt
, 0, rhs_code
, n
, ins_stmt
,
4880 bit_not_p
, lp_nr_for_store (stmt
),
4882 new_chain
->m_store_info
.safe_push (info
);
4883 m_stores
.put (base_addr
, new_chain
);
4884 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4886 fprintf (dump_file
, "Starting new chain with statement:\n");
4887 print_gimple_stmt (dump_file
, stmt
, 0);
4888 fprintf (dump_file
, "The base object is:\n");
4889 print_generic_expr (dump_file
, base_addr
);
4890 fprintf (dump_file
, "\n");
4895 /* Return true if STMT is a store valid for store merging. */
4898 store_valid_for_store_merging_p (gimple
*stmt
)
4900 return gimple_assign_single_p (stmt
)
4901 && gimple_vdef (stmt
)
4902 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt
))
4903 && (!gimple_has_volatile_ops (stmt
) || gimple_clobber_p (stmt
));
4906 enum basic_block_status
{ BB_INVALID
, BB_VALID
, BB_EXTENDED_VALID
};
4908 /* Return the status of basic block BB wrt store merging. */
4910 static enum basic_block_status
4911 get_status_for_store_merging (basic_block bb
)
4913 unsigned int num_statements
= 0;
4914 gimple_stmt_iterator gsi
;
4917 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4919 gimple
*stmt
= gsi_stmt (gsi
);
4921 if (is_gimple_debug (stmt
))
4924 if (store_valid_for_store_merging_p (stmt
) && ++num_statements
>= 2)
4928 if (num_statements
== 0)
4931 if (cfun
->can_throw_non_call_exceptions
&& cfun
->eh
4932 && store_valid_for_store_merging_p (gimple_seq_last_stmt (bb_seq (bb
)))
4933 && (e
= find_fallthru_edge (bb
->succs
))
4934 && e
->dest
== bb
->next_bb
)
4935 return BB_EXTENDED_VALID
;
4937 return num_statements
>= 2 ? BB_VALID
: BB_INVALID
;
4940 /* Entry point for the pass. Go over each basic block recording chains of
4941 immediate stores. Upon encountering a terminating statement (as defined
4942 by stmt_terminates_chain_p) process the recorded stores and emit the widened
4946 pass_store_merging::execute (function
*fun
)
4949 hash_set
<gimple
*> orig_stmts
;
4950 bool changed
= false, open_chains
= false;
4952 /* If the function can throw and catch non-call exceptions, we'll be trying
4953 to merge stores across different basic blocks so we need to first unsplit
4954 the EH edges in order to streamline the CFG of the function. */
4955 if (cfun
->can_throw_non_call_exceptions
&& cfun
->eh
)
4956 unsplit_eh_edges ();
4958 calculate_dominance_info (CDI_DOMINATORS
);
4960 FOR_EACH_BB_FN (bb
, fun
)
4962 const basic_block_status bb_status
= get_status_for_store_merging (bb
);
4963 gimple_stmt_iterator gsi
;
4965 if (open_chains
&& (bb_status
== BB_INVALID
|| !single_pred_p (bb
)))
4967 changed
|= terminate_and_process_all_chains ();
4968 open_chains
= false;
4971 if (bb_status
== BB_INVALID
)
4974 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4975 fprintf (dump_file
, "Processing basic block <%d>:\n", bb
->index
);
4977 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4979 gimple
*stmt
= gsi_stmt (gsi
);
4981 if (is_gimple_debug (stmt
))
4984 if (gimple_has_volatile_ops (stmt
) && !gimple_clobber_p (stmt
))
4986 /* Terminate all chains. */
4987 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4988 fprintf (dump_file
, "Volatile access terminates "
4990 changed
|= terminate_and_process_all_chains ();
4991 open_chains
= false;
4995 if (store_valid_for_store_merging_p (stmt
))
4996 changed
|= process_store (stmt
);
4998 changed
|= terminate_all_aliasing_chains (NULL
, stmt
);
5001 if (bb_status
== BB_EXTENDED_VALID
)
5005 changed
|= terminate_and_process_all_chains ();
5006 open_chains
= false;
5011 changed
|= terminate_and_process_all_chains ();
5013 /* If the function can throw and catch non-call exceptions and something
5014 changed during the pass, then the CFG has (very likely) changed too. */
5015 if (cfun
->can_throw_non_call_exceptions
&& cfun
->eh
&& changed
)
5017 free_dominance_info (CDI_DOMINATORS
);
5018 return TODO_cleanup_cfg
;
5026 /* Construct and return a store merging pass object. */
5029 make_pass_store_merging (gcc::context
*ctxt
)
5031 return new pass_store_merging (ctxt
);
5036 namespace selftest
{
5038 /* Selftests for store merging helpers. */
5040 /* Assert that all elements of the byte arrays X and Y, both of length N
5044 verify_array_eq (unsigned char *x
, unsigned char *y
, unsigned int n
)
5046 for (unsigned int i
= 0; i
< n
; i
++)
5050 fprintf (stderr
, "Arrays do not match. X:\n");
5051 dump_char_array (stderr
, x
, n
);
5052 fprintf (stderr
, "Y:\n");
5053 dump_char_array (stderr
, y
, n
);
5055 ASSERT_EQ (x
[i
], y
[i
]);
5059 /* Test shift_bytes_in_array_left and that it carries bits across between
5063 verify_shift_bytes_in_array_left (void)
5066 00011111 | 11100000. */
5067 unsigned char orig
[2] = { 0xe0, 0x1f };
5068 unsigned char in
[2];
5069 memcpy (in
, orig
, sizeof orig
);
5071 unsigned char expected
[2] = { 0x80, 0x7f };
5072 shift_bytes_in_array_left (in
, sizeof (in
), 2);
5073 verify_array_eq (in
, expected
, sizeof (in
));
5075 memcpy (in
, orig
, sizeof orig
);
5076 memcpy (expected
, orig
, sizeof orig
);
5077 /* Check that shifting by zero doesn't change anything. */
5078 shift_bytes_in_array_left (in
, sizeof (in
), 0);
5079 verify_array_eq (in
, expected
, sizeof (in
));
5083 /* Test shift_bytes_in_array_right and that it carries bits across between
5087 verify_shift_bytes_in_array_right (void)
5090 00011111 | 11100000. */
5091 unsigned char orig
[2] = { 0x1f, 0xe0};
5092 unsigned char in
[2];
5093 memcpy (in
, orig
, sizeof orig
);
5094 unsigned char expected
[2] = { 0x07, 0xf8};
5095 shift_bytes_in_array_right (in
, sizeof (in
), 2);
5096 verify_array_eq (in
, expected
, sizeof (in
));
5098 memcpy (in
, orig
, sizeof orig
);
5099 memcpy (expected
, orig
, sizeof orig
);
5100 /* Check that shifting by zero doesn't change anything. */
5101 shift_bytes_in_array_right (in
, sizeof (in
), 0);
5102 verify_array_eq (in
, expected
, sizeof (in
));
5105 /* Test clear_bit_region that it clears exactly the bits asked and
5109 verify_clear_bit_region (void)
5111 /* Start with all bits set and test clearing various patterns in them. */
5112 unsigned char orig
[3] = { 0xff, 0xff, 0xff};
5113 unsigned char in
[3];
5114 unsigned char expected
[3];
5115 memcpy (in
, orig
, sizeof in
);
5117 /* Check zeroing out all the bits. */
5118 clear_bit_region (in
, 0, 3 * BITS_PER_UNIT
);
5119 expected
[0] = expected
[1] = expected
[2] = 0;
5120 verify_array_eq (in
, expected
, sizeof in
);
5122 memcpy (in
, orig
, sizeof in
);
5123 /* Leave the first and last bits intact. */
5124 clear_bit_region (in
, 1, 3 * BITS_PER_UNIT
- 2);
5128 verify_array_eq (in
, expected
, sizeof in
);
5131 /* Test clear_bit_region_be that it clears exactly the bits asked and
5135 verify_clear_bit_region_be (void)
5137 /* Start with all bits set and test clearing various patterns in them. */
5138 unsigned char orig
[3] = { 0xff, 0xff, 0xff};
5139 unsigned char in
[3];
5140 unsigned char expected
[3];
5141 memcpy (in
, orig
, sizeof in
);
5143 /* Check zeroing out all the bits. */
5144 clear_bit_region_be (in
, BITS_PER_UNIT
- 1, 3 * BITS_PER_UNIT
);
5145 expected
[0] = expected
[1] = expected
[2] = 0;
5146 verify_array_eq (in
, expected
, sizeof in
);
5148 memcpy (in
, orig
, sizeof in
);
5149 /* Leave the first and last bits intact. */
5150 clear_bit_region_be (in
, BITS_PER_UNIT
- 2, 3 * BITS_PER_UNIT
- 2);
5154 verify_array_eq (in
, expected
, sizeof in
);
5158 /* Run all of the selftests within this file. */
5161 store_merging_c_tests (void)
5163 verify_shift_bytes_in_array_left ();
5164 verify_shift_bytes_in_array_right ();
5165 verify_clear_bit_region ();
5166 verify_clear_bit_region_be ();
5169 } // namespace selftest
5170 #endif /* CHECKING_P. */