]>
Commit | Line | Data |
---|---|---|
dffec8eb | 1 | /* GIMPLE store merging and byte swapping passes. |
85ec4feb | 2 | Copyright (C) 2009-2018 Free Software Foundation, Inc. |
f663d9ad KT |
3 | Contributed by ARM Ltd. |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it | |
8 | under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 3, or (at your option) | |
10 | any later version. | |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but | |
13 | WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with GCC; see the file COPYING3. If not see | |
19 | <http://www.gnu.org/licenses/>. */ | |
20 | ||
dffec8eb JJ |
21 | /* The purpose of the store merging pass is to combine multiple memory |
22 | stores of constant values, values loaded from memory or bitwise operations | |
245f6de1 | 23 | on those to consecutive memory locations into fewer wider stores. |
f663d9ad KT |
24 | For example, if we have a sequence peforming four byte stores to |
25 | consecutive memory locations: | |
26 | [p ] := imm1; | |
27 | [p + 1B] := imm2; | |
28 | [p + 2B] := imm3; | |
29 | [p + 3B] := imm4; | |
30 | we can transform this into a single 4-byte store if the target supports it: | |
31 | [p] := imm1:imm2:imm3:imm4 //concatenated immediates according to endianness. | |
32 | ||
245f6de1 JJ |
33 | Or: |
34 | [p ] := [q ]; | |
35 | [p + 1B] := [q + 1B]; | |
36 | [p + 2B] := [q + 2B]; | |
37 | [p + 3B] := [q + 3B]; | |
38 | if there is no overlap can be transformed into a single 4-byte | |
39 | load followed by single 4-byte store. | |
40 | ||
41 | Or: | |
42 | [p ] := [q ] ^ imm1; | |
43 | [p + 1B] := [q + 1B] ^ imm2; | |
44 | [p + 2B] := [q + 2B] ^ imm3; | |
45 | [p + 3B] := [q + 3B] ^ imm4; | |
46 | if there is no overlap can be transformed into a single 4-byte | |
47 | load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store. | |
48 | ||
f663d9ad KT |
49 | The algorithm is applied to each basic block in three phases: |
50 | ||
245f6de1 | 51 | 1) Scan through the basic block recording assignments to |
f663d9ad | 52 | destinations that can be expressed as a store to memory of a certain size |
245f6de1 JJ |
53 | at a certain bit offset from expressions we can handle. For bit-fields |
54 | we also note the surrounding bit region, bits that could be stored in | |
55 | a read-modify-write operation when storing the bit-field. Record store | |
56 | chains to different bases in a hash_map (m_stores) and make sure to | |
57 | terminate such chains when appropriate (for example when when the stored | |
58 | values get used subsequently). | |
f663d9ad KT |
59 | These stores can be a result of structure element initializers, array stores |
60 | etc. A store_immediate_info object is recorded for every such store. | |
61 | Record as many such assignments to a single base as possible until a | |
62 | statement that interferes with the store sequence is encountered. | |
245f6de1 JJ |
63 | Each store has up to 2 operands, which can be an immediate constant |
64 | or a memory load, from which the value to be stored can be computed. | |
65 | At most one of the operands can be a constant. The operands are recorded | |
66 | in store_operand_info struct. | |
f663d9ad KT |
67 | |
68 | 2) Analyze the chain of stores recorded in phase 1) (i.e. the vector of | |
69 | store_immediate_info objects) and coalesce contiguous stores into | |
245f6de1 JJ |
70 | merged_store_group objects. For bit-fields stores, we don't need to |
71 | require the stores to be contiguous, just their surrounding bit regions | |
72 | have to be contiguous. If the expression being stored is different | |
73 | between adjacent stores, such as one store storing a constant and | |
74 | following storing a value loaded from memory, or if the loaded memory | |
75 | objects are not adjacent, a new merged_store_group is created as well. | |
f663d9ad KT |
76 | |
77 | For example, given the stores: | |
78 | [p ] := 0; | |
79 | [p + 1B] := 1; | |
80 | [p + 3B] := 0; | |
81 | [p + 4B] := 1; | |
82 | [p + 5B] := 0; | |
83 | [p + 6B] := 0; | |
84 | This phase would produce two merged_store_group objects, one recording the | |
85 | two bytes stored in the memory region [p : p + 1] and another | |
86 | recording the four bytes stored in the memory region [p + 3 : p + 6]. | |
87 | ||
88 | 3) The merged_store_group objects produced in phase 2) are processed | |
89 | to generate the sequence of wider stores that set the contiguous memory | |
90 | regions to the sequence of bytes that correspond to it. This may emit | |
91 | multiple stores per store group to handle contiguous stores that are not | |
92 | of a size that is a power of 2. For example it can try to emit a 40-bit | |
93 | store as a 32-bit store followed by an 8-bit store. | |
94 | We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT or | |
e0bd6c9f | 95 | TARGET_SLOW_UNALIGNED_ACCESS rules. |
f663d9ad KT |
96 | |
97 | Note on endianness and example: | |
98 | Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores: | |
99 | [p ] := 0x1234; | |
100 | [p + 2B] := 0x5678; | |
101 | [p + 4B] := 0xab; | |
102 | [p + 5B] := 0xcd; | |
103 | ||
104 | The memory layout for little-endian (LE) and big-endian (BE) must be: | |
105 | p |LE|BE| | |
106 | --------- | |
107 | 0 |34|12| | |
108 | 1 |12|34| | |
109 | 2 |78|56| | |
110 | 3 |56|78| | |
111 | 4 |ab|ab| | |
112 | 5 |cd|cd| | |
113 | ||
114 | To merge these into a single 48-bit merged value 'val' in phase 2) | |
115 | on little-endian we insert stores to higher (consecutive) bitpositions | |
116 | into the most significant bits of the merged value. | |
117 | The final merged value would be: 0xcdab56781234 | |
118 | ||
119 | For big-endian we insert stores to higher bitpositions into the least | |
120 | significant bits of the merged value. | |
121 | The final merged value would be: 0x12345678abcd | |
122 | ||
123 | Then, in phase 3), we want to emit this 48-bit value as a 32-bit store | |
124 | followed by a 16-bit store. Again, we must consider endianness when | |
125 | breaking down the 48-bit value 'val' computed above. | |
126 | For little endian we emit: | |
127 | [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff; | |
128 | [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32; | |
129 | ||
130 | Whereas for big-endian we emit: | |
131 | [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16; | |
132 | [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */ | |
133 | ||
134 | #include "config.h" | |
135 | #include "system.h" | |
136 | #include "coretypes.h" | |
137 | #include "backend.h" | |
138 | #include "tree.h" | |
139 | #include "gimple.h" | |
140 | #include "builtins.h" | |
141 | #include "fold-const.h" | |
142 | #include "tree-pass.h" | |
143 | #include "ssa.h" | |
144 | #include "gimple-pretty-print.h" | |
145 | #include "alias.h" | |
146 | #include "fold-const.h" | |
147 | #include "params.h" | |
148 | #include "print-tree.h" | |
149 | #include "tree-hash-traits.h" | |
150 | #include "gimple-iterator.h" | |
151 | #include "gimplify.h" | |
152 | #include "stor-layout.h" | |
153 | #include "timevar.h" | |
154 | #include "tree-cfg.h" | |
155 | #include "tree-eh.h" | |
156 | #include "target.h" | |
aa55dc0c | 157 | #include "gimplify-me.h" |
a62b3dc5 JJ |
158 | #include "rtl.h" |
159 | #include "expr.h" /* For get_bit_range. */ | |
dffec8eb | 160 | #include "optabs-tree.h" |
c22d8787 | 161 | #include "selftest.h" |
f663d9ad KT |
162 | |
163 | /* The maximum size (in bits) of the stores this pass should generate. */ | |
164 | #define MAX_STORE_BITSIZE (BITS_PER_WORD) | |
165 | #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT) | |
166 | ||
245f6de1 JJ |
167 | /* Limit to bound the number of aliasing checks for loads with the same |
168 | vuse as the corresponding store. */ | |
169 | #define MAX_STORE_ALIAS_CHECKS 64 | |
170 | ||
f663d9ad KT |
171 | namespace { |
172 | ||
bebadeca | 173 | struct bswap_stat |
dffec8eb JJ |
174 | { |
175 | /* Number of hand-written 16-bit nop / bswaps found. */ | |
176 | int found_16bit; | |
177 | ||
178 | /* Number of hand-written 32-bit nop / bswaps found. */ | |
179 | int found_32bit; | |
180 | ||
181 | /* Number of hand-written 64-bit nop / bswaps found. */ | |
182 | int found_64bit; | |
183 | } nop_stats, bswap_stats; | |
184 | ||
185 | /* A symbolic number structure is used to detect byte permutation and selection | |
186 | patterns of a source. To achieve that, its field N contains an artificial | |
187 | number consisting of BITS_PER_MARKER sized markers tracking where does each | |
188 | byte come from in the source: | |
189 | ||
190 | 0 - target byte has the value 0 | |
191 | FF - target byte has an unknown value (eg. due to sign extension) | |
192 | 1..size - marker value is the byte index in the source (0 for lsb). | |
193 | ||
194 | To detect permutations on memory sources (arrays and structures), a symbolic | |
195 | number is also associated: | |
196 | - a base address BASE_ADDR and an OFFSET giving the address of the source; | |
197 | - a range which gives the difference between the highest and lowest accessed | |
198 | memory location to make such a symbolic number; | |
199 | - the address SRC of the source element of lowest address as a convenience | |
200 | to easily get BASE_ADDR + offset + lowest bytepos; | |
201 | - number of expressions N_OPS bitwise ored together to represent | |
202 | approximate cost of the computation. | |
203 | ||
204 | Note 1: the range is different from size as size reflects the size of the | |
205 | type of the current expression. For instance, for an array char a[], | |
206 | (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while | |
207 | (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this | |
208 | time a range of 1. | |
209 | ||
210 | Note 2: for non-memory sources, range holds the same value as size. | |
211 | ||
212 | Note 3: SRC points to the SSA_NAME in case of non-memory source. */ | |
213 | ||
214 | struct symbolic_number { | |
215 | uint64_t n; | |
216 | tree type; | |
217 | tree base_addr; | |
218 | tree offset; | |
4a022c70 | 219 | poly_int64_pod bytepos; |
dffec8eb JJ |
220 | tree src; |
221 | tree alias_set; | |
222 | tree vuse; | |
223 | unsigned HOST_WIDE_INT range; | |
224 | int n_ops; | |
225 | }; | |
226 | ||
227 | #define BITS_PER_MARKER 8 | |
228 | #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1) | |
229 | #define MARKER_BYTE_UNKNOWN MARKER_MASK | |
230 | #define HEAD_MARKER(n, size) \ | |
231 | ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER))) | |
232 | ||
233 | /* The number which the find_bswap_or_nop_1 result should match in | |
234 | order to have a nop. The number is masked according to the size of | |
235 | the symbolic number before using it. */ | |
236 | #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \ | |
237 | (uint64_t)0x08070605 << 32 | 0x04030201) | |
238 | ||
239 | /* The number which the find_bswap_or_nop_1 result should match in | |
240 | order to have a byte swap. The number is masked according to the | |
241 | size of the symbolic number before using it. */ | |
242 | #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \ | |
243 | (uint64_t)0x01020304 << 32 | 0x05060708) | |
244 | ||
245 | /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic | |
246 | number N. Return false if the requested operation is not permitted | |
247 | on a symbolic number. */ | |
248 | ||
249 | inline bool | |
250 | do_shift_rotate (enum tree_code code, | |
251 | struct symbolic_number *n, | |
252 | int count) | |
253 | { | |
254 | int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; | |
255 | unsigned head_marker; | |
256 | ||
257 | if (count % BITS_PER_UNIT != 0) | |
258 | return false; | |
259 | count = (count / BITS_PER_UNIT) * BITS_PER_MARKER; | |
260 | ||
261 | /* Zero out the extra bits of N in order to avoid them being shifted | |
262 | into the significant bits. */ | |
263 | if (size < 64 / BITS_PER_MARKER) | |
264 | n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; | |
265 | ||
266 | switch (code) | |
267 | { | |
268 | case LSHIFT_EXPR: | |
269 | n->n <<= count; | |
270 | break; | |
271 | case RSHIFT_EXPR: | |
272 | head_marker = HEAD_MARKER (n->n, size); | |
273 | n->n >>= count; | |
274 | /* Arithmetic shift of signed type: result is dependent on the value. */ | |
275 | if (!TYPE_UNSIGNED (n->type) && head_marker) | |
276 | for (i = 0; i < count / BITS_PER_MARKER; i++) | |
277 | n->n |= (uint64_t) MARKER_BYTE_UNKNOWN | |
278 | << ((size - 1 - i) * BITS_PER_MARKER); | |
279 | break; | |
280 | case LROTATE_EXPR: | |
281 | n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count)); | |
282 | break; | |
283 | case RROTATE_EXPR: | |
284 | n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count)); | |
285 | break; | |
286 | default: | |
287 | return false; | |
288 | } | |
289 | /* Zero unused bits for size. */ | |
290 | if (size < 64 / BITS_PER_MARKER) | |
291 | n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; | |
292 | return true; | |
293 | } | |
294 | ||
295 | /* Perform sanity checking for the symbolic number N and the gimple | |
296 | statement STMT. */ | |
297 | ||
298 | inline bool | |
299 | verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt) | |
300 | { | |
301 | tree lhs_type; | |
302 | ||
303 | lhs_type = gimple_expr_type (stmt); | |
304 | ||
305 | if (TREE_CODE (lhs_type) != INTEGER_TYPE) | |
306 | return false; | |
307 | ||
308 | if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type)) | |
309 | return false; | |
310 | ||
311 | return true; | |
312 | } | |
313 | ||
314 | /* Initialize the symbolic number N for the bswap pass from the base element | |
315 | SRC manipulated by the bitwise OR expression. */ | |
316 | ||
317 | bool | |
318 | init_symbolic_number (struct symbolic_number *n, tree src) | |
319 | { | |
320 | int size; | |
321 | ||
322 | if (! INTEGRAL_TYPE_P (TREE_TYPE (src))) | |
323 | return false; | |
324 | ||
325 | n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE; | |
326 | n->src = src; | |
327 | ||
328 | /* Set up the symbolic number N by setting each byte to a value between 1 and | |
329 | the byte size of rhs1. The highest order byte is set to n->size and the | |
330 | lowest order byte to 1. */ | |
331 | n->type = TREE_TYPE (src); | |
332 | size = TYPE_PRECISION (n->type); | |
333 | if (size % BITS_PER_UNIT != 0) | |
334 | return false; | |
335 | size /= BITS_PER_UNIT; | |
336 | if (size > 64 / BITS_PER_MARKER) | |
337 | return false; | |
338 | n->range = size; | |
339 | n->n = CMPNOP; | |
340 | n->n_ops = 1; | |
341 | ||
342 | if (size < 64 / BITS_PER_MARKER) | |
343 | n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; | |
344 | ||
345 | return true; | |
346 | } | |
347 | ||
348 | /* Check if STMT might be a byte swap or a nop from a memory source and returns | |
349 | the answer. If so, REF is that memory source and the base of the memory area | |
350 | accessed and the offset of the access from that base are recorded in N. */ | |
351 | ||
352 | bool | |
353 | find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n) | |
354 | { | |
355 | /* Leaf node is an array or component ref. Memorize its base and | |
356 | offset from base to compare to other such leaf node. */ | |
f37fac2b | 357 | poly_int64 bitsize, bitpos, bytepos; |
dffec8eb JJ |
358 | machine_mode mode; |
359 | int unsignedp, reversep, volatilep; | |
360 | tree offset, base_addr; | |
361 | ||
362 | /* Not prepared to handle PDP endian. */ | |
363 | if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN) | |
364 | return false; | |
365 | ||
366 | if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt)) | |
367 | return false; | |
368 | ||
369 | base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode, | |
370 | &unsignedp, &reversep, &volatilep); | |
371 | ||
4b84d9b8 JJ |
372 | if (TREE_CODE (base_addr) == TARGET_MEM_REF) |
373 | /* Do not rewrite TARGET_MEM_REF. */ | |
374 | return false; | |
375 | else if (TREE_CODE (base_addr) == MEM_REF) | |
dffec8eb | 376 | { |
3fed2ce9 | 377 | poly_offset_int bit_offset = 0; |
dffec8eb JJ |
378 | tree off = TREE_OPERAND (base_addr, 1); |
379 | ||
380 | if (!integer_zerop (off)) | |
381 | { | |
3fed2ce9 RS |
382 | poly_offset_int boff = mem_ref_offset (base_addr); |
383 | boff <<= LOG2_BITS_PER_UNIT; | |
dffec8eb JJ |
384 | bit_offset += boff; |
385 | } | |
386 | ||
387 | base_addr = TREE_OPERAND (base_addr, 0); | |
388 | ||
389 | /* Avoid returning a negative bitpos as this may wreak havoc later. */ | |
3fed2ce9 | 390 | if (maybe_lt (bit_offset, 0)) |
dffec8eb | 391 | { |
3fed2ce9 RS |
392 | tree byte_offset = wide_int_to_tree |
393 | (sizetype, bits_to_bytes_round_down (bit_offset)); | |
394 | bit_offset = num_trailing_bits (bit_offset); | |
dffec8eb | 395 | if (offset) |
3fed2ce9 | 396 | offset = size_binop (PLUS_EXPR, offset, byte_offset); |
dffec8eb | 397 | else |
3fed2ce9 | 398 | offset = byte_offset; |
dffec8eb JJ |
399 | } |
400 | ||
3fed2ce9 | 401 | bitpos += bit_offset.force_shwi (); |
dffec8eb | 402 | } |
4b84d9b8 JJ |
403 | else |
404 | base_addr = build_fold_addr_expr (base_addr); | |
dffec8eb | 405 | |
f37fac2b | 406 | if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos)) |
dffec8eb | 407 | return false; |
f37fac2b | 408 | if (!multiple_p (bitsize, BITS_PER_UNIT)) |
dffec8eb JJ |
409 | return false; |
410 | if (reversep) | |
411 | return false; | |
412 | ||
413 | if (!init_symbolic_number (n, ref)) | |
414 | return false; | |
415 | n->base_addr = base_addr; | |
416 | n->offset = offset; | |
f37fac2b | 417 | n->bytepos = bytepos; |
dffec8eb JJ |
418 | n->alias_set = reference_alias_ptr_type (ref); |
419 | n->vuse = gimple_vuse (stmt); | |
420 | return true; | |
421 | } | |
422 | ||
423 | /* Compute the symbolic number N representing the result of a bitwise OR on 2 | |
424 | symbolic number N1 and N2 whose source statements are respectively | |
425 | SOURCE_STMT1 and SOURCE_STMT2. */ | |
426 | ||
427 | gimple * | |
428 | perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1, | |
429 | gimple *source_stmt2, struct symbolic_number *n2, | |
430 | struct symbolic_number *n) | |
431 | { | |
432 | int i, size; | |
433 | uint64_t mask; | |
434 | gimple *source_stmt; | |
435 | struct symbolic_number *n_start; | |
436 | ||
437 | tree rhs1 = gimple_assign_rhs1 (source_stmt1); | |
438 | if (TREE_CODE (rhs1) == BIT_FIELD_REF | |
439 | && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME) | |
440 | rhs1 = TREE_OPERAND (rhs1, 0); | |
441 | tree rhs2 = gimple_assign_rhs1 (source_stmt2); | |
442 | if (TREE_CODE (rhs2) == BIT_FIELD_REF | |
443 | && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME) | |
444 | rhs2 = TREE_OPERAND (rhs2, 0); | |
445 | ||
446 | /* Sources are different, cancel bswap if they are not memory location with | |
447 | the same base (array, structure, ...). */ | |
448 | if (rhs1 != rhs2) | |
449 | { | |
450 | uint64_t inc; | |
4a022c70 | 451 | HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end; |
dffec8eb JJ |
452 | struct symbolic_number *toinc_n_ptr, *n_end; |
453 | basic_block bb1, bb2; | |
454 | ||
455 | if (!n1->base_addr || !n2->base_addr | |
456 | || !operand_equal_p (n1->base_addr, n2->base_addr, 0)) | |
457 | return NULL; | |
458 | ||
459 | if (!n1->offset != !n2->offset | |
460 | || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0))) | |
461 | return NULL; | |
462 | ||
4a022c70 RS |
463 | start1 = 0; |
464 | if (!(n2->bytepos - n1->bytepos).is_constant (&start2)) | |
465 | return NULL; | |
466 | ||
467 | if (start1 < start2) | |
dffec8eb JJ |
468 | { |
469 | n_start = n1; | |
4a022c70 | 470 | start_sub = start2 - start1; |
dffec8eb JJ |
471 | } |
472 | else | |
473 | { | |
474 | n_start = n2; | |
4a022c70 | 475 | start_sub = start1 - start2; |
dffec8eb JJ |
476 | } |
477 | ||
478 | bb1 = gimple_bb (source_stmt1); | |
479 | bb2 = gimple_bb (source_stmt2); | |
480 | if (dominated_by_p (CDI_DOMINATORS, bb1, bb2)) | |
481 | source_stmt = source_stmt1; | |
482 | else | |
483 | source_stmt = source_stmt2; | |
484 | ||
485 | /* Find the highest address at which a load is performed and | |
486 | compute related info. */ | |
4a022c70 RS |
487 | end1 = start1 + (n1->range - 1); |
488 | end2 = start2 + (n2->range - 1); | |
dffec8eb JJ |
489 | if (end1 < end2) |
490 | { | |
491 | end = end2; | |
492 | end_sub = end2 - end1; | |
493 | } | |
494 | else | |
495 | { | |
496 | end = end1; | |
497 | end_sub = end1 - end2; | |
498 | } | |
499 | n_end = (end2 > end1) ? n2 : n1; | |
500 | ||
501 | /* Find symbolic number whose lsb is the most significant. */ | |
502 | if (BYTES_BIG_ENDIAN) | |
503 | toinc_n_ptr = (n_end == n1) ? n2 : n1; | |
504 | else | |
505 | toinc_n_ptr = (n_start == n1) ? n2 : n1; | |
506 | ||
4a022c70 | 507 | n->range = end - MIN (start1, start2) + 1; |
dffec8eb JJ |
508 | |
509 | /* Check that the range of memory covered can be represented by | |
510 | a symbolic number. */ | |
511 | if (n->range > 64 / BITS_PER_MARKER) | |
512 | return NULL; | |
513 | ||
514 | /* Reinterpret byte marks in symbolic number holding the value of | |
515 | bigger weight according to target endianness. */ | |
516 | inc = BYTES_BIG_ENDIAN ? end_sub : start_sub; | |
517 | size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT; | |
518 | for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER) | |
519 | { | |
520 | unsigned marker | |
521 | = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK; | |
522 | if (marker && marker != MARKER_BYTE_UNKNOWN) | |
523 | toinc_n_ptr->n += inc; | |
524 | } | |
525 | } | |
526 | else | |
527 | { | |
528 | n->range = n1->range; | |
529 | n_start = n1; | |
530 | source_stmt = source_stmt1; | |
531 | } | |
532 | ||
533 | if (!n1->alias_set | |
534 | || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set)) | |
535 | n->alias_set = n1->alias_set; | |
536 | else | |
537 | n->alias_set = ptr_type_node; | |
538 | n->vuse = n_start->vuse; | |
539 | n->base_addr = n_start->base_addr; | |
540 | n->offset = n_start->offset; | |
541 | n->src = n_start->src; | |
542 | n->bytepos = n_start->bytepos; | |
543 | n->type = n_start->type; | |
544 | size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; | |
545 | ||
546 | for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER) | |
547 | { | |
548 | uint64_t masked1, masked2; | |
549 | ||
550 | masked1 = n1->n & mask; | |
551 | masked2 = n2->n & mask; | |
552 | if (masked1 && masked2 && masked1 != masked2) | |
553 | return NULL; | |
554 | } | |
555 | n->n = n1->n | n2->n; | |
556 | n->n_ops = n1->n_ops + n2->n_ops; | |
557 | ||
558 | return source_stmt; | |
559 | } | |
560 | ||
561 | /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform | |
562 | the operation given by the rhs of STMT on the result. If the operation | |
563 | could successfully be executed the function returns a gimple stmt whose | |
564 | rhs's first tree is the expression of the source operand and NULL | |
565 | otherwise. */ | |
566 | ||
567 | gimple * | |
568 | find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit) | |
569 | { | |
570 | enum tree_code code; | |
571 | tree rhs1, rhs2 = NULL; | |
572 | gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1; | |
573 | enum gimple_rhs_class rhs_class; | |
574 | ||
575 | if (!limit || !is_gimple_assign (stmt)) | |
576 | return NULL; | |
577 | ||
578 | rhs1 = gimple_assign_rhs1 (stmt); | |
579 | ||
580 | if (find_bswap_or_nop_load (stmt, rhs1, n)) | |
581 | return stmt; | |
582 | ||
583 | /* Handle BIT_FIELD_REF. */ | |
584 | if (TREE_CODE (rhs1) == BIT_FIELD_REF | |
585 | && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME) | |
586 | { | |
587 | unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1)); | |
588 | unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2)); | |
589 | if (bitpos % BITS_PER_UNIT == 0 | |
590 | && bitsize % BITS_PER_UNIT == 0 | |
591 | && init_symbolic_number (n, TREE_OPERAND (rhs1, 0))) | |
592 | { | |
593 | /* Handle big-endian bit numbering in BIT_FIELD_REF. */ | |
594 | if (BYTES_BIG_ENDIAN) | |
595 | bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize; | |
596 | ||
597 | /* Shift. */ | |
598 | if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos)) | |
599 | return NULL; | |
600 | ||
601 | /* Mask. */ | |
602 | uint64_t mask = 0; | |
603 | uint64_t tmp = (1 << BITS_PER_UNIT) - 1; | |
604 | for (unsigned i = 0; i < bitsize / BITS_PER_UNIT; | |
605 | i++, tmp <<= BITS_PER_UNIT) | |
606 | mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER); | |
607 | n->n &= mask; | |
608 | ||
609 | /* Convert. */ | |
610 | n->type = TREE_TYPE (rhs1); | |
611 | if (!n->base_addr) | |
612 | n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT; | |
613 | ||
614 | return verify_symbolic_number_p (n, stmt) ? stmt : NULL; | |
615 | } | |
616 | ||
617 | return NULL; | |
618 | } | |
619 | ||
620 | if (TREE_CODE (rhs1) != SSA_NAME) | |
621 | return NULL; | |
622 | ||
623 | code = gimple_assign_rhs_code (stmt); | |
624 | rhs_class = gimple_assign_rhs_class (stmt); | |
625 | rhs1_stmt = SSA_NAME_DEF_STMT (rhs1); | |
626 | ||
627 | if (rhs_class == GIMPLE_BINARY_RHS) | |
628 | rhs2 = gimple_assign_rhs2 (stmt); | |
629 | ||
630 | /* Handle unary rhs and binary rhs with integer constants as second | |
631 | operand. */ | |
632 | ||
633 | if (rhs_class == GIMPLE_UNARY_RHS | |
634 | || (rhs_class == GIMPLE_BINARY_RHS | |
635 | && TREE_CODE (rhs2) == INTEGER_CST)) | |
636 | { | |
637 | if (code != BIT_AND_EXPR | |
638 | && code != LSHIFT_EXPR | |
639 | && code != RSHIFT_EXPR | |
640 | && code != LROTATE_EXPR | |
641 | && code != RROTATE_EXPR | |
642 | && !CONVERT_EXPR_CODE_P (code)) | |
643 | return NULL; | |
644 | ||
645 | source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1); | |
646 | ||
647 | /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and | |
648 | we have to initialize the symbolic number. */ | |
649 | if (!source_stmt1) | |
650 | { | |
651 | if (gimple_assign_load_p (stmt) | |
652 | || !init_symbolic_number (n, rhs1)) | |
653 | return NULL; | |
654 | source_stmt1 = stmt; | |
655 | } | |
656 | ||
657 | switch (code) | |
658 | { | |
659 | case BIT_AND_EXPR: | |
660 | { | |
661 | int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; | |
662 | uint64_t val = int_cst_value (rhs2), mask = 0; | |
663 | uint64_t tmp = (1 << BITS_PER_UNIT) - 1; | |
664 | ||
665 | /* Only constants masking full bytes are allowed. */ | |
666 | for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT) | |
667 | if ((val & tmp) != 0 && (val & tmp) != tmp) | |
668 | return NULL; | |
669 | else if (val & tmp) | |
670 | mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER); | |
671 | ||
672 | n->n &= mask; | |
673 | } | |
674 | break; | |
675 | case LSHIFT_EXPR: | |
676 | case RSHIFT_EXPR: | |
677 | case LROTATE_EXPR: | |
678 | case RROTATE_EXPR: | |
679 | if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2))) | |
680 | return NULL; | |
681 | break; | |
682 | CASE_CONVERT: | |
683 | { | |
684 | int i, type_size, old_type_size; | |
685 | tree type; | |
686 | ||
687 | type = gimple_expr_type (stmt); | |
688 | type_size = TYPE_PRECISION (type); | |
689 | if (type_size % BITS_PER_UNIT != 0) | |
690 | return NULL; | |
691 | type_size /= BITS_PER_UNIT; | |
692 | if (type_size > 64 / BITS_PER_MARKER) | |
693 | return NULL; | |
694 | ||
695 | /* Sign extension: result is dependent on the value. */ | |
696 | old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; | |
697 | if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size | |
698 | && HEAD_MARKER (n->n, old_type_size)) | |
699 | for (i = 0; i < type_size - old_type_size; i++) | |
700 | n->n |= (uint64_t) MARKER_BYTE_UNKNOWN | |
701 | << ((type_size - 1 - i) * BITS_PER_MARKER); | |
702 | ||
703 | if (type_size < 64 / BITS_PER_MARKER) | |
704 | { | |
705 | /* If STMT casts to a smaller type mask out the bits not | |
706 | belonging to the target type. */ | |
707 | n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1; | |
708 | } | |
709 | n->type = type; | |
710 | if (!n->base_addr) | |
711 | n->range = type_size; | |
712 | } | |
713 | break; | |
714 | default: | |
715 | return NULL; | |
716 | }; | |
717 | return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL; | |
718 | } | |
719 | ||
720 | /* Handle binary rhs. */ | |
721 | ||
722 | if (rhs_class == GIMPLE_BINARY_RHS) | |
723 | { | |
724 | struct symbolic_number n1, n2; | |
725 | gimple *source_stmt, *source_stmt2; | |
726 | ||
727 | if (code != BIT_IOR_EXPR) | |
728 | return NULL; | |
729 | ||
730 | if (TREE_CODE (rhs2) != SSA_NAME) | |
731 | return NULL; | |
732 | ||
733 | rhs2_stmt = SSA_NAME_DEF_STMT (rhs2); | |
734 | ||
735 | switch (code) | |
736 | { | |
737 | case BIT_IOR_EXPR: | |
738 | source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1); | |
739 | ||
740 | if (!source_stmt1) | |
741 | return NULL; | |
742 | ||
743 | source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1); | |
744 | ||
745 | if (!source_stmt2) | |
746 | return NULL; | |
747 | ||
748 | if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type)) | |
749 | return NULL; | |
750 | ||
4b84d9b8 | 751 | if (n1.vuse != n2.vuse) |
dffec8eb JJ |
752 | return NULL; |
753 | ||
754 | source_stmt | |
755 | = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n); | |
756 | ||
757 | if (!source_stmt) | |
758 | return NULL; | |
759 | ||
760 | if (!verify_symbolic_number_p (n, stmt)) | |
761 | return NULL; | |
762 | ||
763 | break; | |
764 | default: | |
765 | return NULL; | |
766 | } | |
767 | return source_stmt; | |
768 | } | |
769 | return NULL; | |
770 | } | |
771 | ||
4b84d9b8 JJ |
772 | /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute |
773 | *CMPXCHG, *CMPNOP and adjust *N. */ | |
dffec8eb | 774 | |
4b84d9b8 JJ |
775 | void |
776 | find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg, | |
777 | uint64_t *cmpnop) | |
dffec8eb JJ |
778 | { |
779 | unsigned rsize; | |
780 | uint64_t tmpn, mask; | |
dffec8eb | 781 | |
4b84d9b8 JJ |
782 | /* The number which the find_bswap_or_nop_1 result should match in order |
783 | to have a full byte swap. The number is shifted to the right | |
784 | according to the size of the symbolic number before using it. */ | |
785 | *cmpxchg = CMPXCHG; | |
786 | *cmpnop = CMPNOP; | |
dffec8eb JJ |
787 | |
788 | /* Find real size of result (highest non-zero byte). */ | |
789 | if (n->base_addr) | |
790 | for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++); | |
791 | else | |
792 | rsize = n->range; | |
793 | ||
794 | /* Zero out the bits corresponding to untouched bytes in original gimple | |
795 | expression. */ | |
796 | if (n->range < (int) sizeof (int64_t)) | |
797 | { | |
798 | mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1; | |
4b84d9b8 JJ |
799 | *cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER; |
800 | *cmpnop &= mask; | |
dffec8eb JJ |
801 | } |
802 | ||
803 | /* Zero out the bits corresponding to unused bytes in the result of the | |
804 | gimple expression. */ | |
805 | if (rsize < n->range) | |
806 | { | |
807 | if (BYTES_BIG_ENDIAN) | |
808 | { | |
809 | mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1; | |
4b84d9b8 JJ |
810 | *cmpxchg &= mask; |
811 | *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER; | |
dffec8eb JJ |
812 | } |
813 | else | |
814 | { | |
815 | mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1; | |
4b84d9b8 JJ |
816 | *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER; |
817 | *cmpnop &= mask; | |
dffec8eb JJ |
818 | } |
819 | n->range = rsize; | |
820 | } | |
821 | ||
4b84d9b8 JJ |
822 | n->range *= BITS_PER_UNIT; |
823 | } | |
824 | ||
825 | /* Check if STMT completes a bswap implementation or a read in a given | |
826 | endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP | |
827 | accordingly. It also sets N to represent the kind of operations | |
828 | performed: size of the resulting expression and whether it works on | |
829 | a memory source, and if so alias-set and vuse. At last, the | |
830 | function returns a stmt whose rhs's first tree is the source | |
831 | expression. */ | |
832 | ||
833 | gimple * | |
834 | find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap) | |
835 | { | |
836 | /* The last parameter determines the depth search limit. It usually | |
837 | correlates directly to the number n of bytes to be touched. We | |
838 | increase that number by log2(n) + 1 here in order to also | |
839 | cover signed -> unsigned conversions of the src operand as can be seen | |
840 | in libgcc, and for initial shift/and operation of the src operand. */ | |
841 | int limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt))); | |
842 | limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit); | |
843 | gimple *ins_stmt = find_bswap_or_nop_1 (stmt, n, limit); | |
844 | ||
845 | if (!ins_stmt) | |
846 | return NULL; | |
847 | ||
848 | uint64_t cmpxchg, cmpnop; | |
849 | find_bswap_or_nop_finalize (n, &cmpxchg, &cmpnop); | |
850 | ||
dffec8eb JJ |
851 | /* A complete byte swap should make the symbolic number to start with |
852 | the largest digit in the highest order byte. Unchanged symbolic | |
853 | number indicates a read with same endianness as target architecture. */ | |
854 | if (n->n == cmpnop) | |
855 | *bswap = false; | |
856 | else if (n->n == cmpxchg) | |
857 | *bswap = true; | |
858 | else | |
859 | return NULL; | |
860 | ||
861 | /* Useless bit manipulation performed by code. */ | |
862 | if (!n->base_addr && n->n == cmpnop && n->n_ops == 1) | |
863 | return NULL; | |
864 | ||
dffec8eb JJ |
865 | return ins_stmt; |
866 | } | |
867 | ||
868 | const pass_data pass_data_optimize_bswap = | |
869 | { | |
870 | GIMPLE_PASS, /* type */ | |
871 | "bswap", /* name */ | |
872 | OPTGROUP_NONE, /* optinfo_flags */ | |
873 | TV_NONE, /* tv_id */ | |
874 | PROP_ssa, /* properties_required */ | |
875 | 0, /* properties_provided */ | |
876 | 0, /* properties_destroyed */ | |
877 | 0, /* todo_flags_start */ | |
878 | 0, /* todo_flags_finish */ | |
879 | }; | |
880 | ||
881 | class pass_optimize_bswap : public gimple_opt_pass | |
882 | { | |
883 | public: | |
884 | pass_optimize_bswap (gcc::context *ctxt) | |
885 | : gimple_opt_pass (pass_data_optimize_bswap, ctxt) | |
886 | {} | |
887 | ||
888 | /* opt_pass methods: */ | |
889 | virtual bool gate (function *) | |
890 | { | |
891 | return flag_expensive_optimizations && optimize && BITS_PER_UNIT == 8; | |
892 | } | |
893 | ||
894 | virtual unsigned int execute (function *); | |
895 | ||
896 | }; // class pass_optimize_bswap | |
897 | ||
898 | /* Perform the bswap optimization: replace the expression computed in the rhs | |
4b84d9b8 JJ |
899 | of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent |
900 | bswap, load or load + bswap expression. | |
dffec8eb JJ |
901 | Which of these alternatives replace the rhs is given by N->base_addr (non |
902 | null if a load is needed) and BSWAP. The type, VUSE and set-alias of the | |
903 | load to perform are also given in N while the builtin bswap invoke is given | |
4b84d9b8 JJ |
904 | in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the |
905 | load statements involved to construct the rhs in gsi_stmt (GSI) and | |
906 | N->range gives the size of the rhs expression for maintaining some | |
907 | statistics. | |
dffec8eb | 908 | |
4b84d9b8 JJ |
909 | Note that if the replacement involve a load and if gsi_stmt (GSI) is |
910 | non-NULL, that stmt is moved just after INS_STMT to do the load with the | |
911 | same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */ | |
dffec8eb | 912 | |
4b84d9b8 JJ |
913 | tree |
914 | bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl, | |
dffec8eb JJ |
915 | tree bswap_type, tree load_type, struct symbolic_number *n, |
916 | bool bswap) | |
917 | { | |
4b84d9b8 | 918 | tree src, tmp, tgt = NULL_TREE; |
dffec8eb JJ |
919 | gimple *bswap_stmt; |
920 | ||
4b84d9b8 | 921 | gimple *cur_stmt = gsi_stmt (gsi); |
dffec8eb | 922 | src = n->src; |
4b84d9b8 JJ |
923 | if (cur_stmt) |
924 | tgt = gimple_assign_lhs (cur_stmt); | |
dffec8eb JJ |
925 | |
926 | /* Need to load the value from memory first. */ | |
927 | if (n->base_addr) | |
928 | { | |
4b84d9b8 JJ |
929 | gimple_stmt_iterator gsi_ins = gsi; |
930 | if (ins_stmt) | |
931 | gsi_ins = gsi_for_stmt (ins_stmt); | |
dffec8eb JJ |
932 | tree addr_expr, addr_tmp, val_expr, val_tmp; |
933 | tree load_offset_ptr, aligned_load_type; | |
4b84d9b8 JJ |
934 | gimple *load_stmt; |
935 | unsigned align = get_object_alignment (src); | |
4a022c70 | 936 | poly_int64 load_offset = 0; |
dffec8eb | 937 | |
4b84d9b8 JJ |
938 | if (cur_stmt) |
939 | { | |
940 | basic_block ins_bb = gimple_bb (ins_stmt); | |
941 | basic_block cur_bb = gimple_bb (cur_stmt); | |
942 | if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb)) | |
943 | return NULL_TREE; | |
944 | ||
945 | /* Move cur_stmt just before one of the load of the original | |
946 | to ensure it has the same VUSE. See PR61517 for what could | |
947 | go wrong. */ | |
948 | if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt)) | |
949 | reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt)); | |
950 | gsi_move_before (&gsi, &gsi_ins); | |
951 | gsi = gsi_for_stmt (cur_stmt); | |
952 | } | |
953 | else | |
954 | gsi = gsi_ins; | |
dffec8eb JJ |
955 | |
956 | /* Compute address to load from and cast according to the size | |
957 | of the load. */ | |
4b84d9b8 | 958 | addr_expr = build_fold_addr_expr (src); |
dffec8eb | 959 | if (is_gimple_mem_ref_addr (addr_expr)) |
4b84d9b8 | 960 | addr_tmp = unshare_expr (addr_expr); |
dffec8eb JJ |
961 | else |
962 | { | |
4b84d9b8 JJ |
963 | addr_tmp = unshare_expr (n->base_addr); |
964 | if (!is_gimple_mem_ref_addr (addr_tmp)) | |
965 | addr_tmp = force_gimple_operand_gsi_1 (&gsi, addr_tmp, | |
966 | is_gimple_mem_ref_addr, | |
967 | NULL_TREE, true, | |
968 | GSI_SAME_STMT); | |
969 | load_offset = n->bytepos; | |
970 | if (n->offset) | |
971 | { | |
972 | tree off | |
973 | = force_gimple_operand_gsi (&gsi, unshare_expr (n->offset), | |
974 | true, NULL_TREE, true, | |
975 | GSI_SAME_STMT); | |
976 | gimple *stmt | |
977 | = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp)), | |
978 | POINTER_PLUS_EXPR, addr_tmp, off); | |
979 | gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); | |
980 | addr_tmp = gimple_assign_lhs (stmt); | |
981 | } | |
dffec8eb JJ |
982 | } |
983 | ||
984 | /* Perform the load. */ | |
985 | aligned_load_type = load_type; | |
986 | if (align < TYPE_ALIGN (load_type)) | |
987 | aligned_load_type = build_aligned_type (load_type, align); | |
988 | load_offset_ptr = build_int_cst (n->alias_set, load_offset); | |
989 | val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp, | |
990 | load_offset_ptr); | |
991 | ||
992 | if (!bswap) | |
993 | { | |
994 | if (n->range == 16) | |
995 | nop_stats.found_16bit++; | |
996 | else if (n->range == 32) | |
997 | nop_stats.found_32bit++; | |
998 | else | |
999 | { | |
1000 | gcc_assert (n->range == 64); | |
1001 | nop_stats.found_64bit++; | |
1002 | } | |
1003 | ||
1004 | /* Convert the result of load if necessary. */ | |
4b84d9b8 | 1005 | if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), load_type)) |
dffec8eb JJ |
1006 | { |
1007 | val_tmp = make_temp_ssa_name (aligned_load_type, NULL, | |
1008 | "load_dst"); | |
1009 | load_stmt = gimple_build_assign (val_tmp, val_expr); | |
1010 | gimple_set_vuse (load_stmt, n->vuse); | |
1011 | gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT); | |
1012 | gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp); | |
4b84d9b8 | 1013 | update_stmt (cur_stmt); |
dffec8eb | 1014 | } |
4b84d9b8 | 1015 | else if (cur_stmt) |
dffec8eb JJ |
1016 | { |
1017 | gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr); | |
1018 | gimple_set_vuse (cur_stmt, n->vuse); | |
4b84d9b8 JJ |
1019 | update_stmt (cur_stmt); |
1020 | } | |
1021 | else | |
1022 | { | |
1023 | tgt = make_ssa_name (load_type); | |
1024 | cur_stmt = gimple_build_assign (tgt, MEM_REF, val_expr); | |
1025 | gimple_set_vuse (cur_stmt, n->vuse); | |
1026 | gsi_insert_before (&gsi, cur_stmt, GSI_SAME_STMT); | |
dffec8eb | 1027 | } |
dffec8eb JJ |
1028 | |
1029 | if (dump_file) | |
1030 | { | |
1031 | fprintf (dump_file, | |
1032 | "%d bit load in target endianness found at: ", | |
1033 | (int) n->range); | |
1034 | print_gimple_stmt (dump_file, cur_stmt, 0); | |
1035 | } | |
4b84d9b8 | 1036 | return tgt; |
dffec8eb JJ |
1037 | } |
1038 | else | |
1039 | { | |
1040 | val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst"); | |
1041 | load_stmt = gimple_build_assign (val_tmp, val_expr); | |
1042 | gimple_set_vuse (load_stmt, n->vuse); | |
1043 | gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT); | |
1044 | } | |
1045 | src = val_tmp; | |
1046 | } | |
1047 | else if (!bswap) | |
1048 | { | |
4b84d9b8 JJ |
1049 | gimple *g = NULL; |
1050 | if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src))) | |
dffec8eb JJ |
1051 | { |
1052 | if (!is_gimple_val (src)) | |
4b84d9b8 | 1053 | return NULL_TREE; |
dffec8eb JJ |
1054 | g = gimple_build_assign (tgt, NOP_EXPR, src); |
1055 | } | |
4b84d9b8 | 1056 | else if (cur_stmt) |
dffec8eb | 1057 | g = gimple_build_assign (tgt, src); |
4b84d9b8 JJ |
1058 | else |
1059 | tgt = src; | |
dffec8eb JJ |
1060 | if (n->range == 16) |
1061 | nop_stats.found_16bit++; | |
1062 | else if (n->range == 32) | |
1063 | nop_stats.found_32bit++; | |
1064 | else | |
1065 | { | |
1066 | gcc_assert (n->range == 64); | |
1067 | nop_stats.found_64bit++; | |
1068 | } | |
1069 | if (dump_file) | |
1070 | { | |
1071 | fprintf (dump_file, | |
1072 | "%d bit reshuffle in target endianness found at: ", | |
1073 | (int) n->range); | |
4b84d9b8 JJ |
1074 | if (cur_stmt) |
1075 | print_gimple_stmt (dump_file, cur_stmt, 0); | |
1076 | else | |
1077 | { | |
1078 | print_generic_expr (dump_file, tgt, 0); | |
1079 | fprintf (dump_file, "\n"); | |
1080 | } | |
dffec8eb | 1081 | } |
4b84d9b8 JJ |
1082 | if (cur_stmt) |
1083 | gsi_replace (&gsi, g, true); | |
1084 | return tgt; | |
dffec8eb JJ |
1085 | } |
1086 | else if (TREE_CODE (src) == BIT_FIELD_REF) | |
1087 | src = TREE_OPERAND (src, 0); | |
1088 | ||
1089 | if (n->range == 16) | |
1090 | bswap_stats.found_16bit++; | |
1091 | else if (n->range == 32) | |
1092 | bswap_stats.found_32bit++; | |
1093 | else | |
1094 | { | |
1095 | gcc_assert (n->range == 64); | |
1096 | bswap_stats.found_64bit++; | |
1097 | } | |
1098 | ||
1099 | tmp = src; | |
1100 | ||
1101 | /* Convert the src expression if necessary. */ | |
1102 | if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type)) | |
1103 | { | |
1104 | gimple *convert_stmt; | |
1105 | ||
1106 | tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc"); | |
1107 | convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src); | |
1108 | gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT); | |
1109 | } | |
1110 | ||
1111 | /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values | |
1112 | are considered as rotation of 2N bit values by N bits is generally not | |
1113 | equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which | |
1114 | gives 0x03040102 while a bswap for that value is 0x04030201. */ | |
1115 | if (bswap && n->range == 16) | |
1116 | { | |
1117 | tree count = build_int_cst (NULL, BITS_PER_UNIT); | |
1118 | src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count); | |
1119 | bswap_stmt = gimple_build_assign (NULL, src); | |
1120 | } | |
1121 | else | |
1122 | bswap_stmt = gimple_build_call (fndecl, 1, tmp); | |
1123 | ||
4b84d9b8 JJ |
1124 | if (tgt == NULL_TREE) |
1125 | tgt = make_ssa_name (bswap_type); | |
dffec8eb JJ |
1126 | tmp = tgt; |
1127 | ||
1128 | /* Convert the result if necessary. */ | |
1129 | if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type)) | |
1130 | { | |
1131 | gimple *convert_stmt; | |
1132 | ||
1133 | tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst"); | |
1134 | convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp); | |
1135 | gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT); | |
1136 | } | |
1137 | ||
1138 | gimple_set_lhs (bswap_stmt, tmp); | |
1139 | ||
1140 | if (dump_file) | |
1141 | { | |
1142 | fprintf (dump_file, "%d bit bswap implementation found at: ", | |
1143 | (int) n->range); | |
4b84d9b8 JJ |
1144 | if (cur_stmt) |
1145 | print_gimple_stmt (dump_file, cur_stmt, 0); | |
1146 | else | |
1147 | { | |
1148 | print_generic_expr (dump_file, tgt, 0); | |
1149 | fprintf (dump_file, "\n"); | |
1150 | } | |
dffec8eb JJ |
1151 | } |
1152 | ||
4b84d9b8 JJ |
1153 | if (cur_stmt) |
1154 | { | |
1155 | gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT); | |
1156 | gsi_remove (&gsi, true); | |
1157 | } | |
1158 | else | |
1159 | gsi_insert_before (&gsi, bswap_stmt, GSI_SAME_STMT); | |
1160 | return tgt; | |
dffec8eb JJ |
1161 | } |
1162 | ||
1163 | /* Find manual byte swap implementations as well as load in a given | |
1164 | endianness. Byte swaps are turned into a bswap builtin invokation | |
1165 | while endian loads are converted to bswap builtin invokation or | |
1166 | simple load according to the target endianness. */ | |
1167 | ||
1168 | unsigned int | |
1169 | pass_optimize_bswap::execute (function *fun) | |
1170 | { | |
1171 | basic_block bb; | |
1172 | bool bswap32_p, bswap64_p; | |
1173 | bool changed = false; | |
1174 | tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE; | |
1175 | ||
1176 | bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32) | |
1177 | && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing); | |
1178 | bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64) | |
1179 | && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing | |
1180 | || (bswap32_p && word_mode == SImode))); | |
1181 | ||
1182 | /* Determine the argument type of the builtins. The code later on | |
1183 | assumes that the return and argument type are the same. */ | |
1184 | if (bswap32_p) | |
1185 | { | |
1186 | tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32); | |
1187 | bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); | |
1188 | } | |
1189 | ||
1190 | if (bswap64_p) | |
1191 | { | |
1192 | tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64); | |
1193 | bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); | |
1194 | } | |
1195 | ||
1196 | memset (&nop_stats, 0, sizeof (nop_stats)); | |
1197 | memset (&bswap_stats, 0, sizeof (bswap_stats)); | |
1198 | calculate_dominance_info (CDI_DOMINATORS); | |
1199 | ||
1200 | FOR_EACH_BB_FN (bb, fun) | |
1201 | { | |
1202 | gimple_stmt_iterator gsi; | |
1203 | ||
1204 | /* We do a reverse scan for bswap patterns to make sure we get the | |
1205 | widest match. As bswap pattern matching doesn't handle previously | |
1206 | inserted smaller bswap replacements as sub-patterns, the wider | |
1207 | variant wouldn't be detected. */ | |
1208 | for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);) | |
1209 | { | |
1210 | gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi); | |
1211 | tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type; | |
1212 | enum tree_code code; | |
1213 | struct symbolic_number n; | |
1214 | bool bswap; | |
1215 | ||
1216 | /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt | |
1217 | might be moved to a different basic block by bswap_replace and gsi | |
1218 | must not points to it if that's the case. Moving the gsi_prev | |
1219 | there make sure that gsi points to the statement previous to | |
1220 | cur_stmt while still making sure that all statements are | |
1221 | considered in this basic block. */ | |
1222 | gsi_prev (&gsi); | |
1223 | ||
1224 | if (!is_gimple_assign (cur_stmt)) | |
1225 | continue; | |
1226 | ||
1227 | code = gimple_assign_rhs_code (cur_stmt); | |
1228 | switch (code) | |
1229 | { | |
1230 | case LROTATE_EXPR: | |
1231 | case RROTATE_EXPR: | |
1232 | if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt)) | |
1233 | || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt)) | |
1234 | % BITS_PER_UNIT) | |
1235 | continue; | |
1236 | /* Fall through. */ | |
1237 | case BIT_IOR_EXPR: | |
1238 | break; | |
1239 | default: | |
1240 | continue; | |
1241 | } | |
1242 | ||
1243 | ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap); | |
1244 | ||
1245 | if (!ins_stmt) | |
1246 | continue; | |
1247 | ||
1248 | switch (n.range) | |
1249 | { | |
1250 | case 16: | |
1251 | /* Already in canonical form, nothing to do. */ | |
1252 | if (code == LROTATE_EXPR || code == RROTATE_EXPR) | |
1253 | continue; | |
1254 | load_type = bswap_type = uint16_type_node; | |
1255 | break; | |
1256 | case 32: | |
1257 | load_type = uint32_type_node; | |
1258 | if (bswap32_p) | |
1259 | { | |
1260 | fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32); | |
1261 | bswap_type = bswap32_type; | |
1262 | } | |
1263 | break; | |
1264 | case 64: | |
1265 | load_type = uint64_type_node; | |
1266 | if (bswap64_p) | |
1267 | { | |
1268 | fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64); | |
1269 | bswap_type = bswap64_type; | |
1270 | } | |
1271 | break; | |
1272 | default: | |
1273 | continue; | |
1274 | } | |
1275 | ||
1276 | if (bswap && !fndecl && n.range != 16) | |
1277 | continue; | |
1278 | ||
4b84d9b8 JJ |
1279 | if (bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl, |
1280 | bswap_type, load_type, &n, bswap)) | |
dffec8eb JJ |
1281 | changed = true; |
1282 | } | |
1283 | } | |
1284 | ||
1285 | statistics_counter_event (fun, "16-bit nop implementations found", | |
1286 | nop_stats.found_16bit); | |
1287 | statistics_counter_event (fun, "32-bit nop implementations found", | |
1288 | nop_stats.found_32bit); | |
1289 | statistics_counter_event (fun, "64-bit nop implementations found", | |
1290 | nop_stats.found_64bit); | |
1291 | statistics_counter_event (fun, "16-bit bswap implementations found", | |
1292 | bswap_stats.found_16bit); | |
1293 | statistics_counter_event (fun, "32-bit bswap implementations found", | |
1294 | bswap_stats.found_32bit); | |
1295 | statistics_counter_event (fun, "64-bit bswap implementations found", | |
1296 | bswap_stats.found_64bit); | |
1297 | ||
1298 | return (changed ? TODO_update_ssa : 0); | |
1299 | } | |
1300 | ||
1301 | } // anon namespace | |
1302 | ||
1303 | gimple_opt_pass * | |
1304 | make_pass_optimize_bswap (gcc::context *ctxt) | |
1305 | { | |
1306 | return new pass_optimize_bswap (ctxt); | |
1307 | } | |
1308 | ||
1309 | namespace { | |
1310 | ||
245f6de1 JJ |
1311 | /* Struct recording one operand for the store, which is either a constant, |
1312 | then VAL represents the constant and all the other fields are zero, | |
1313 | or a memory load, then VAL represents the reference, BASE_ADDR is non-NULL | |
1314 | and the other fields also reflect the memory load. */ | |
1315 | ||
1316 | struct store_operand_info | |
1317 | { | |
1318 | tree val; | |
1319 | tree base_addr; | |
8a91d545 RS |
1320 | poly_uint64 bitsize; |
1321 | poly_uint64 bitpos; | |
1322 | poly_uint64 bitregion_start; | |
1323 | poly_uint64 bitregion_end; | |
245f6de1 | 1324 | gimple *stmt; |
383ac8dc | 1325 | bool bit_not_p; |
245f6de1 JJ |
1326 | store_operand_info (); |
1327 | }; | |
1328 | ||
1329 | store_operand_info::store_operand_info () | |
1330 | : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0), | |
383ac8dc | 1331 | bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false) |
245f6de1 JJ |
1332 | { |
1333 | } | |
1334 | ||
f663d9ad KT |
1335 | /* Struct recording the information about a single store of an immediate |
1336 | to memory. These are created in the first phase and coalesced into | |
1337 | merged_store_group objects in the second phase. */ | |
1338 | ||
1339 | struct store_immediate_info | |
1340 | { | |
1341 | unsigned HOST_WIDE_INT bitsize; | |
1342 | unsigned HOST_WIDE_INT bitpos; | |
a62b3dc5 JJ |
1343 | unsigned HOST_WIDE_INT bitregion_start; |
1344 | /* This is one past the last bit of the bit region. */ | |
1345 | unsigned HOST_WIDE_INT bitregion_end; | |
f663d9ad KT |
1346 | gimple *stmt; |
1347 | unsigned int order; | |
245f6de1 | 1348 | /* INTEGER_CST for constant stores, MEM_REF for memory copy or |
4b84d9b8 JJ |
1349 | BIT_*_EXPR for logical bitwise operation. |
1350 | LROTATE_EXPR if it can be only bswap optimized and | |
1351 | ops are not really meaningful. | |
1352 | NOP_EXPR if bswap optimization detected identity, ops | |
1353 | are not meaningful. */ | |
245f6de1 | 1354 | enum tree_code rhs_code; |
4b84d9b8 JJ |
1355 | /* Two fields for bswap optimization purposes. */ |
1356 | struct symbolic_number n; | |
1357 | gimple *ins_stmt; | |
127ef369 | 1358 | /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */ |
d60edaba | 1359 | bool bit_not_p; |
127ef369 JJ |
1360 | /* True if ops have been swapped and thus ops[1] represents |
1361 | rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */ | |
1362 | bool ops_swapped_p; | |
245f6de1 JJ |
1363 | /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise |
1364 | just the first one. */ | |
1365 | store_operand_info ops[2]; | |
b5926e23 | 1366 | store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, |
a62b3dc5 | 1367 | unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, |
4b84d9b8 JJ |
1368 | gimple *, unsigned int, enum tree_code, |
1369 | struct symbolic_number &, gimple *, bool, | |
245f6de1 JJ |
1370 | const store_operand_info &, |
1371 | const store_operand_info &); | |
f663d9ad KT |
1372 | }; |
1373 | ||
1374 | store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs, | |
b5926e23 | 1375 | unsigned HOST_WIDE_INT bp, |
a62b3dc5 JJ |
1376 | unsigned HOST_WIDE_INT brs, |
1377 | unsigned HOST_WIDE_INT bre, | |
b5926e23 | 1378 | gimple *st, |
245f6de1 JJ |
1379 | unsigned int ord, |
1380 | enum tree_code rhscode, | |
4b84d9b8 JJ |
1381 | struct symbolic_number &nr, |
1382 | gimple *ins_stmtp, | |
d60edaba | 1383 | bool bitnotp, |
245f6de1 JJ |
1384 | const store_operand_info &op0r, |
1385 | const store_operand_info &op1r) | |
a62b3dc5 | 1386 | : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre), |
4b84d9b8 JJ |
1387 | stmt (st), order (ord), rhs_code (rhscode), n (nr), |
1388 | ins_stmt (ins_stmtp), bit_not_p (bitnotp), ops_swapped_p (false) | |
245f6de1 JJ |
1389 | #if __cplusplus >= 201103L |
1390 | , ops { op0r, op1r } | |
1391 | { | |
1392 | } | |
1393 | #else | |
f663d9ad | 1394 | { |
245f6de1 JJ |
1395 | ops[0] = op0r; |
1396 | ops[1] = op1r; | |
f663d9ad | 1397 | } |
245f6de1 | 1398 | #endif |
f663d9ad KT |
1399 | |
1400 | /* Struct representing a group of stores to contiguous memory locations. | |
1401 | These are produced by the second phase (coalescing) and consumed in the | |
1402 | third phase that outputs the widened stores. */ | |
1403 | ||
1404 | struct merged_store_group | |
1405 | { | |
1406 | unsigned HOST_WIDE_INT start; | |
1407 | unsigned HOST_WIDE_INT width; | |
a62b3dc5 JJ |
1408 | unsigned HOST_WIDE_INT bitregion_start; |
1409 | unsigned HOST_WIDE_INT bitregion_end; | |
1410 | /* The size of the allocated memory for val and mask. */ | |
f663d9ad | 1411 | unsigned HOST_WIDE_INT buf_size; |
a62b3dc5 | 1412 | unsigned HOST_WIDE_INT align_base; |
8a91d545 | 1413 | poly_uint64 load_align_base[2]; |
f663d9ad KT |
1414 | |
1415 | unsigned int align; | |
245f6de1 | 1416 | unsigned int load_align[2]; |
f663d9ad KT |
1417 | unsigned int first_order; |
1418 | unsigned int last_order; | |
1419 | ||
a62b3dc5 | 1420 | auto_vec<store_immediate_info *> stores; |
f663d9ad KT |
1421 | /* We record the first and last original statements in the sequence because |
1422 | we'll need their vuse/vdef and replacement position. It's easier to keep | |
1423 | track of them separately as 'stores' is reordered by apply_stores. */ | |
1424 | gimple *last_stmt; | |
1425 | gimple *first_stmt; | |
1426 | unsigned char *val; | |
a62b3dc5 | 1427 | unsigned char *mask; |
f663d9ad KT |
1428 | |
1429 | merged_store_group (store_immediate_info *); | |
1430 | ~merged_store_group (); | |
1431 | void merge_into (store_immediate_info *); | |
1432 | void merge_overlapping (store_immediate_info *); | |
1433 | bool apply_stores (); | |
a62b3dc5 JJ |
1434 | private: |
1435 | void do_merge (store_immediate_info *); | |
f663d9ad KT |
1436 | }; |
1437 | ||
1438 | /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */ | |
1439 | ||
1440 | static void | |
1441 | dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len) | |
1442 | { | |
1443 | if (!fd) | |
1444 | return; | |
1445 | ||
1446 | for (unsigned int i = 0; i < len; i++) | |
1447 | fprintf (fd, "%x ", ptr[i]); | |
1448 | fprintf (fd, "\n"); | |
1449 | } | |
1450 | ||
f663d9ad KT |
1451 | /* Shift left the bytes in PTR of SZ elements by AMNT bits, carrying over the |
1452 | bits between adjacent elements. AMNT should be within | |
1453 | [0, BITS_PER_UNIT). | |
1454 | Example, AMNT = 2: | |
1455 | 00011111|11100000 << 2 = 01111111|10000000 | |
1456 | PTR[1] | PTR[0] PTR[1] | PTR[0]. */ | |
1457 | ||
1458 | static void | |
1459 | shift_bytes_in_array (unsigned char *ptr, unsigned int sz, unsigned int amnt) | |
1460 | { | |
1461 | if (amnt == 0) | |
1462 | return; | |
1463 | ||
1464 | unsigned char carry_over = 0U; | |
46a61395 | 1465 | unsigned char carry_mask = (~0U) << (unsigned char) (BITS_PER_UNIT - amnt); |
f663d9ad KT |
1466 | unsigned char clear_mask = (~0U) << amnt; |
1467 | ||
1468 | for (unsigned int i = 0; i < sz; i++) | |
1469 | { | |
1470 | unsigned prev_carry_over = carry_over; | |
46a61395 | 1471 | carry_over = (ptr[i] & carry_mask) >> (BITS_PER_UNIT - amnt); |
f663d9ad KT |
1472 | |
1473 | ptr[i] <<= amnt; | |
1474 | if (i != 0) | |
1475 | { | |
1476 | ptr[i] &= clear_mask; | |
1477 | ptr[i] |= prev_carry_over; | |
1478 | } | |
1479 | } | |
1480 | } | |
1481 | ||
1482 | /* Like shift_bytes_in_array but for big-endian. | |
1483 | Shift right the bytes in PTR of SZ elements by AMNT bits, carrying over the | |
1484 | bits between adjacent elements. AMNT should be within | |
1485 | [0, BITS_PER_UNIT). | |
1486 | Example, AMNT = 2: | |
1487 | 00011111|11100000 >> 2 = 00000111|11111000 | |
1488 | PTR[0] | PTR[1] PTR[0] | PTR[1]. */ | |
1489 | ||
1490 | static void | |
1491 | shift_bytes_in_array_right (unsigned char *ptr, unsigned int sz, | |
1492 | unsigned int amnt) | |
1493 | { | |
1494 | if (amnt == 0) | |
1495 | return; | |
1496 | ||
1497 | unsigned char carry_over = 0U; | |
1498 | unsigned char carry_mask = ~(~0U << amnt); | |
1499 | ||
1500 | for (unsigned int i = 0; i < sz; i++) | |
1501 | { | |
1502 | unsigned prev_carry_over = carry_over; | |
46a61395 | 1503 | carry_over = ptr[i] & carry_mask; |
f663d9ad | 1504 | |
ad1de652 JJ |
1505 | carry_over <<= (unsigned char) BITS_PER_UNIT - amnt; |
1506 | ptr[i] >>= amnt; | |
1507 | ptr[i] |= prev_carry_over; | |
f663d9ad KT |
1508 | } |
1509 | } | |
1510 | ||
1511 | /* Clear out LEN bits starting from bit START in the byte array | |
1512 | PTR. This clears the bits to the *right* from START. | |
1513 | START must be within [0, BITS_PER_UNIT) and counts starting from | |
1514 | the least significant bit. */ | |
1515 | ||
1516 | static void | |
1517 | clear_bit_region_be (unsigned char *ptr, unsigned int start, | |
1518 | unsigned int len) | |
1519 | { | |
1520 | if (len == 0) | |
1521 | return; | |
1522 | /* Clear len bits to the right of start. */ | |
1523 | else if (len <= start + 1) | |
1524 | { | |
1525 | unsigned char mask = (~(~0U << len)); | |
1526 | mask = mask << (start + 1U - len); | |
1527 | ptr[0] &= ~mask; | |
1528 | } | |
1529 | else if (start != BITS_PER_UNIT - 1) | |
1530 | { | |
1531 | clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1); | |
1532 | clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1, | |
1533 | len - (start % BITS_PER_UNIT) - 1); | |
1534 | } | |
1535 | else if (start == BITS_PER_UNIT - 1 | |
1536 | && len > BITS_PER_UNIT) | |
1537 | { | |
1538 | unsigned int nbytes = len / BITS_PER_UNIT; | |
a62b3dc5 | 1539 | memset (ptr, 0, nbytes); |
f663d9ad KT |
1540 | if (len % BITS_PER_UNIT != 0) |
1541 | clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1, | |
1542 | len % BITS_PER_UNIT); | |
1543 | } | |
1544 | else | |
1545 | gcc_unreachable (); | |
1546 | } | |
1547 | ||
1548 | /* In the byte array PTR clear the bit region starting at bit | |
1549 | START and is LEN bits wide. | |
1550 | For regions spanning multiple bytes do this recursively until we reach | |
1551 | zero LEN or a region contained within a single byte. */ | |
1552 | ||
1553 | static void | |
1554 | clear_bit_region (unsigned char *ptr, unsigned int start, | |
1555 | unsigned int len) | |
1556 | { | |
1557 | /* Degenerate base case. */ | |
1558 | if (len == 0) | |
1559 | return; | |
1560 | else if (start >= BITS_PER_UNIT) | |
1561 | clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len); | |
1562 | /* Second base case. */ | |
1563 | else if ((start + len) <= BITS_PER_UNIT) | |
1564 | { | |
46a61395 | 1565 | unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len); |
f663d9ad KT |
1566 | mask >>= BITS_PER_UNIT - (start + len); |
1567 | ||
1568 | ptr[0] &= ~mask; | |
1569 | ||
1570 | return; | |
1571 | } | |
1572 | /* Clear most significant bits in a byte and proceed with the next byte. */ | |
1573 | else if (start != 0) | |
1574 | { | |
1575 | clear_bit_region (ptr, start, BITS_PER_UNIT - start); | |
1f069ef5 | 1576 | clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start)); |
f663d9ad KT |
1577 | } |
1578 | /* Whole bytes need to be cleared. */ | |
1579 | else if (start == 0 && len > BITS_PER_UNIT) | |
1580 | { | |
1581 | unsigned int nbytes = len / BITS_PER_UNIT; | |
a848c710 KT |
1582 | /* We could recurse on each byte but we clear whole bytes, so a simple |
1583 | memset will do. */ | |
46a61395 | 1584 | memset (ptr, '\0', nbytes); |
f663d9ad KT |
1585 | /* Clear the remaining sub-byte region if there is one. */ |
1586 | if (len % BITS_PER_UNIT != 0) | |
1587 | clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT); | |
1588 | } | |
1589 | else | |
1590 | gcc_unreachable (); | |
1591 | } | |
1592 | ||
1593 | /* Write BITLEN bits of EXPR to the byte array PTR at | |
1594 | bit position BITPOS. PTR should contain TOTAL_BYTES elements. | |
1595 | Return true if the operation succeeded. */ | |
1596 | ||
1597 | static bool | |
1598 | encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos, | |
46a61395 | 1599 | unsigned int total_bytes) |
f663d9ad KT |
1600 | { |
1601 | unsigned int first_byte = bitpos / BITS_PER_UNIT; | |
1602 | tree tmp_int = expr; | |
ad1de652 JJ |
1603 | bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT) |
1604 | || (bitpos % BITS_PER_UNIT) | |
f4b31647 | 1605 | || !int_mode_for_size (bitlen, 0).exists ()); |
f663d9ad KT |
1606 | |
1607 | if (!sub_byte_op_p) | |
2f391428 | 1608 | return native_encode_expr (tmp_int, ptr + first_byte, total_bytes) != 0; |
f663d9ad KT |
1609 | |
1610 | /* LITTLE-ENDIAN | |
1611 | We are writing a non byte-sized quantity or at a position that is not | |
1612 | at a byte boundary. | |
1613 | |--------|--------|--------| ptr + first_byte | |
1614 | ^ ^ | |
1615 | xxx xxxxxxxx xxx< bp> | |
1616 | |______EXPR____| | |
1617 | ||
46a61395 | 1618 | First native_encode_expr EXPR into a temporary buffer and shift each |
f663d9ad KT |
1619 | byte in the buffer by 'bp' (carrying the bits over as necessary). |
1620 | |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000| | |
1621 | <------bitlen---->< bp> | |
1622 | Then we clear the destination bits: | |
1623 | |---00000|00000000|000-----| ptr + first_byte | |
1624 | <-------bitlen--->< bp> | |
1625 | ||
1626 | Finally we ORR the bytes of the shifted EXPR into the cleared region: | |
1627 | |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte. | |
1628 | ||
1629 | BIG-ENDIAN | |
1630 | We are writing a non byte-sized quantity or at a position that is not | |
1631 | at a byte boundary. | |
1632 | ptr + first_byte |--------|--------|--------| | |
1633 | ^ ^ | |
1634 | <bp >xxx xxxxxxxx xxx | |
1635 | |_____EXPR_____| | |
1636 | ||
46a61395 | 1637 | First native_encode_expr EXPR into a temporary buffer and shift each |
f663d9ad KT |
1638 | byte in the buffer to the right by (carrying the bits over as necessary). |
1639 | We shift by as much as needed to align the most significant bit of EXPR | |
1640 | with bitpos: | |
1641 | |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000| | |
1642 | <---bitlen----> <bp ><-----bitlen-----> | |
1643 | Then we clear the destination bits: | |
1644 | ptr + first_byte |-----000||00000000||00000---| | |
1645 | <bp ><-------bitlen-----> | |
1646 | ||
1647 | Finally we ORR the bytes of the shifted EXPR into the cleared region: | |
1648 | ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|. | |
1649 | The awkwardness comes from the fact that bitpos is counted from the | |
1650 | most significant bit of a byte. */ | |
1651 | ||
ef1d3b57 RS |
1652 | /* We must be dealing with fixed-size data at this point, since the |
1653 | total size is also fixed. */ | |
1654 | fixed_size_mode mode = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr))); | |
f663d9ad | 1655 | /* Allocate an extra byte so that we have space to shift into. */ |
ef1d3b57 | 1656 | unsigned int byte_size = GET_MODE_SIZE (mode) + 1; |
f663d9ad | 1657 | unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size); |
46a61395 | 1658 | memset (tmpbuf, '\0', byte_size); |
f663d9ad KT |
1659 | /* The store detection code should only have allowed constants that are |
1660 | accepted by native_encode_expr. */ | |
2f391428 | 1661 | if (native_encode_expr (expr, tmpbuf, byte_size - 1) == 0) |
f663d9ad KT |
1662 | gcc_unreachable (); |
1663 | ||
1664 | /* The native_encode_expr machinery uses TYPE_MODE to determine how many | |
1665 | bytes to write. This means it can write more than | |
1666 | ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example | |
1667 | write 8 bytes for a bitlen of 40). Skip the bytes that are not within | |
1668 | bitlen and zero out the bits that are not relevant as well (that may | |
1669 | contain a sign bit due to sign-extension). */ | |
1670 | unsigned int padding | |
1671 | = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1; | |
ad1de652 JJ |
1672 | /* On big-endian the padding is at the 'front' so just skip the initial |
1673 | bytes. */ | |
1674 | if (BYTES_BIG_ENDIAN) | |
1675 | tmpbuf += padding; | |
1676 | ||
1677 | byte_size -= padding; | |
1678 | ||
1679 | if (bitlen % BITS_PER_UNIT != 0) | |
f663d9ad | 1680 | { |
4b2c06f4 | 1681 | if (BYTES_BIG_ENDIAN) |
ad1de652 JJ |
1682 | clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1, |
1683 | BITS_PER_UNIT - (bitlen % BITS_PER_UNIT)); | |
1684 | else | |
1685 | clear_bit_region (tmpbuf, bitlen, | |
1686 | byte_size * BITS_PER_UNIT - bitlen); | |
f663d9ad | 1687 | } |
ad1de652 JJ |
1688 | /* Left shifting relies on the last byte being clear if bitlen is |
1689 | a multiple of BITS_PER_UNIT, which might not be clear if | |
1690 | there are padding bytes. */ | |
1691 | else if (!BYTES_BIG_ENDIAN) | |
1692 | tmpbuf[byte_size - 1] = '\0'; | |
f663d9ad KT |
1693 | |
1694 | /* Clear the bit region in PTR where the bits from TMPBUF will be | |
46a61395 | 1695 | inserted into. */ |
f663d9ad KT |
1696 | if (BYTES_BIG_ENDIAN) |
1697 | clear_bit_region_be (ptr + first_byte, | |
1698 | BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen); | |
1699 | else | |
1700 | clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen); | |
1701 | ||
1702 | int shift_amnt; | |
1703 | int bitlen_mod = bitlen % BITS_PER_UNIT; | |
1704 | int bitpos_mod = bitpos % BITS_PER_UNIT; | |
1705 | ||
1706 | bool skip_byte = false; | |
1707 | if (BYTES_BIG_ENDIAN) | |
1708 | { | |
1709 | /* BITPOS and BITLEN are exactly aligned and no shifting | |
1710 | is necessary. */ | |
1711 | if (bitpos_mod + bitlen_mod == BITS_PER_UNIT | |
1712 | || (bitpos_mod == 0 && bitlen_mod == 0)) | |
1713 | shift_amnt = 0; | |
1714 | /* |. . . . . . . .| | |
1715 | <bp > <blen >. | |
1716 | We always shift right for BYTES_BIG_ENDIAN so shift the beginning | |
1717 | of the value until it aligns with 'bp' in the next byte over. */ | |
1718 | else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT) | |
1719 | { | |
1720 | shift_amnt = bitlen_mod + bitpos_mod; | |
1721 | skip_byte = bitlen_mod != 0; | |
1722 | } | |
1723 | /* |. . . . . . . .| | |
1724 | <----bp---> | |
1725 | <---blen---->. | |
1726 | Shift the value right within the same byte so it aligns with 'bp'. */ | |
1727 | else | |
1728 | shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT; | |
1729 | } | |
1730 | else | |
1731 | shift_amnt = bitpos % BITS_PER_UNIT; | |
1732 | ||
1733 | /* Create the shifted version of EXPR. */ | |
1734 | if (!BYTES_BIG_ENDIAN) | |
46a61395 JJ |
1735 | { |
1736 | shift_bytes_in_array (tmpbuf, byte_size, shift_amnt); | |
1737 | if (shift_amnt == 0) | |
1738 | byte_size--; | |
1739 | } | |
f663d9ad KT |
1740 | else |
1741 | { | |
1742 | gcc_assert (BYTES_BIG_ENDIAN); | |
1743 | shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt); | |
1744 | /* If shifting right forced us to move into the next byte skip the now | |
1745 | empty byte. */ | |
1746 | if (skip_byte) | |
1747 | { | |
1748 | tmpbuf++; | |
1749 | byte_size--; | |
1750 | } | |
1751 | } | |
1752 | ||
1753 | /* Insert the bits from TMPBUF. */ | |
1754 | for (unsigned int i = 0; i < byte_size; i++) | |
1755 | ptr[first_byte + i] |= tmpbuf[i]; | |
1756 | ||
1757 | return true; | |
1758 | } | |
1759 | ||
1760 | /* Sorting function for store_immediate_info objects. | |
1761 | Sorts them by bitposition. */ | |
1762 | ||
1763 | static int | |
1764 | sort_by_bitpos (const void *x, const void *y) | |
1765 | { | |
1766 | store_immediate_info *const *tmp = (store_immediate_info * const *) x; | |
1767 | store_immediate_info *const *tmp2 = (store_immediate_info * const *) y; | |
1768 | ||
109cca3b | 1769 | if ((*tmp)->bitpos < (*tmp2)->bitpos) |
f663d9ad KT |
1770 | return -1; |
1771 | else if ((*tmp)->bitpos > (*tmp2)->bitpos) | |
1772 | return 1; | |
109cca3b | 1773 | else |
0f0027d1 KT |
1774 | /* If they are the same let's use the order which is guaranteed to |
1775 | be different. */ | |
1776 | return (*tmp)->order - (*tmp2)->order; | |
f663d9ad KT |
1777 | } |
1778 | ||
1779 | /* Sorting function for store_immediate_info objects. | |
1780 | Sorts them by the order field. */ | |
1781 | ||
1782 | static int | |
1783 | sort_by_order (const void *x, const void *y) | |
1784 | { | |
1785 | store_immediate_info *const *tmp = (store_immediate_info * const *) x; | |
1786 | store_immediate_info *const *tmp2 = (store_immediate_info * const *) y; | |
1787 | ||
1788 | if ((*tmp)->order < (*tmp2)->order) | |
1789 | return -1; | |
1790 | else if ((*tmp)->order > (*tmp2)->order) | |
1791 | return 1; | |
1792 | ||
1793 | gcc_unreachable (); | |
1794 | } | |
1795 | ||
1796 | /* Initialize a merged_store_group object from a store_immediate_info | |
1797 | object. */ | |
1798 | ||
1799 | merged_store_group::merged_store_group (store_immediate_info *info) | |
1800 | { | |
1801 | start = info->bitpos; | |
1802 | width = info->bitsize; | |
a62b3dc5 JJ |
1803 | bitregion_start = info->bitregion_start; |
1804 | bitregion_end = info->bitregion_end; | |
f663d9ad KT |
1805 | /* VAL has memory allocated for it in apply_stores once the group |
1806 | width has been finalized. */ | |
1807 | val = NULL; | |
a62b3dc5 JJ |
1808 | mask = NULL; |
1809 | unsigned HOST_WIDE_INT align_bitpos = 0; | |
1810 | get_object_alignment_1 (gimple_assign_lhs (info->stmt), | |
1811 | &align, &align_bitpos); | |
1812 | align_base = start - align_bitpos; | |
245f6de1 JJ |
1813 | for (int i = 0; i < 2; ++i) |
1814 | { | |
1815 | store_operand_info &op = info->ops[i]; | |
1816 | if (op.base_addr == NULL_TREE) | |
1817 | { | |
1818 | load_align[i] = 0; | |
1819 | load_align_base[i] = 0; | |
1820 | } | |
1821 | else | |
1822 | { | |
1823 | get_object_alignment_1 (op.val, &load_align[i], &align_bitpos); | |
1824 | load_align_base[i] = op.bitpos - align_bitpos; | |
1825 | } | |
1826 | } | |
f663d9ad KT |
1827 | stores.create (1); |
1828 | stores.safe_push (info); | |
1829 | last_stmt = info->stmt; | |
1830 | last_order = info->order; | |
1831 | first_stmt = last_stmt; | |
1832 | first_order = last_order; | |
1833 | buf_size = 0; | |
1834 | } | |
1835 | ||
1836 | merged_store_group::~merged_store_group () | |
1837 | { | |
1838 | if (val) | |
1839 | XDELETEVEC (val); | |
1840 | } | |
1841 | ||
a62b3dc5 JJ |
1842 | /* Helper method for merge_into and merge_overlapping to do |
1843 | the common part. */ | |
f663d9ad | 1844 | void |
a62b3dc5 | 1845 | merged_store_group::do_merge (store_immediate_info *info) |
f663d9ad | 1846 | { |
a62b3dc5 JJ |
1847 | bitregion_start = MIN (bitregion_start, info->bitregion_start); |
1848 | bitregion_end = MAX (bitregion_end, info->bitregion_end); | |
1849 | ||
1850 | unsigned int this_align; | |
1851 | unsigned HOST_WIDE_INT align_bitpos = 0; | |
1852 | get_object_alignment_1 (gimple_assign_lhs (info->stmt), | |
1853 | &this_align, &align_bitpos); | |
1854 | if (this_align > align) | |
1855 | { | |
1856 | align = this_align; | |
1857 | align_base = info->bitpos - align_bitpos; | |
1858 | } | |
245f6de1 JJ |
1859 | for (int i = 0; i < 2; ++i) |
1860 | { | |
1861 | store_operand_info &op = info->ops[i]; | |
1862 | if (!op.base_addr) | |
1863 | continue; | |
1864 | ||
1865 | get_object_alignment_1 (op.val, &this_align, &align_bitpos); | |
1866 | if (this_align > load_align[i]) | |
1867 | { | |
1868 | load_align[i] = this_align; | |
1869 | load_align_base[i] = op.bitpos - align_bitpos; | |
1870 | } | |
1871 | } | |
f663d9ad | 1872 | |
f663d9ad KT |
1873 | gimple *stmt = info->stmt; |
1874 | stores.safe_push (info); | |
1875 | if (info->order > last_order) | |
1876 | { | |
1877 | last_order = info->order; | |
1878 | last_stmt = stmt; | |
1879 | } | |
1880 | else if (info->order < first_order) | |
1881 | { | |
1882 | first_order = info->order; | |
1883 | first_stmt = stmt; | |
1884 | } | |
1885 | } | |
1886 | ||
a62b3dc5 JJ |
1887 | /* Merge a store recorded by INFO into this merged store. |
1888 | The store is not overlapping with the existing recorded | |
1889 | stores. */ | |
1890 | ||
1891 | void | |
1892 | merged_store_group::merge_into (store_immediate_info *info) | |
1893 | { | |
1894 | unsigned HOST_WIDE_INT wid = info->bitsize; | |
1895 | /* Make sure we're inserting in the position we think we're inserting. */ | |
1896 | gcc_assert (info->bitpos >= start + width | |
1897 | && info->bitregion_start <= bitregion_end); | |
1898 | ||
1899 | width += wid; | |
1900 | do_merge (info); | |
1901 | } | |
1902 | ||
f663d9ad KT |
1903 | /* Merge a store described by INFO into this merged store. |
1904 | INFO overlaps in some way with the current store (i.e. it's not contiguous | |
1905 | which is handled by merged_store_group::merge_into). */ | |
1906 | ||
1907 | void | |
1908 | merged_store_group::merge_overlapping (store_immediate_info *info) | |
1909 | { | |
f663d9ad | 1910 | /* If the store extends the size of the group, extend the width. */ |
a62b3dc5 | 1911 | if (info->bitpos + info->bitsize > start + width) |
f663d9ad KT |
1912 | width += info->bitpos + info->bitsize - (start + width); |
1913 | ||
a62b3dc5 | 1914 | do_merge (info); |
f663d9ad KT |
1915 | } |
1916 | ||
1917 | /* Go through all the recorded stores in this group in program order and | |
1918 | apply their values to the VAL byte array to create the final merged | |
1919 | value. Return true if the operation succeeded. */ | |
1920 | ||
1921 | bool | |
1922 | merged_store_group::apply_stores () | |
1923 | { | |
a62b3dc5 JJ |
1924 | /* Make sure we have more than one store in the group, otherwise we cannot |
1925 | merge anything. */ | |
1926 | if (bitregion_start % BITS_PER_UNIT != 0 | |
1927 | || bitregion_end % BITS_PER_UNIT != 0 | |
f663d9ad KT |
1928 | || stores.length () == 1) |
1929 | return false; | |
1930 | ||
1931 | stores.qsort (sort_by_order); | |
a62b3dc5 | 1932 | store_immediate_info *info; |
f663d9ad KT |
1933 | unsigned int i; |
1934 | /* Create a buffer of a size that is 2 times the number of bytes we're | |
1935 | storing. That way native_encode_expr can write power-of-2-sized | |
1936 | chunks without overrunning. */ | |
a62b3dc5 JJ |
1937 | buf_size = 2 * ((bitregion_end - bitregion_start) / BITS_PER_UNIT); |
1938 | val = XNEWVEC (unsigned char, 2 * buf_size); | |
1939 | mask = val + buf_size; | |
1940 | memset (val, 0, buf_size); | |
1941 | memset (mask, ~0U, buf_size); | |
f663d9ad KT |
1942 | |
1943 | FOR_EACH_VEC_ELT (stores, i, info) | |
1944 | { | |
a62b3dc5 | 1945 | unsigned int pos_in_buffer = info->bitpos - bitregion_start; |
245f6de1 JJ |
1946 | tree cst = NULL_TREE; |
1947 | if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE) | |
1948 | cst = info->ops[0].val; | |
1949 | else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE) | |
1950 | cst = info->ops[1].val; | |
1951 | bool ret = true; | |
1952 | if (cst) | |
1953 | ret = encode_tree_to_bitpos (cst, val, info->bitsize, | |
1954 | pos_in_buffer, buf_size); | |
1955 | if (cst && dump_file && (dump_flags & TDF_DETAILS)) | |
f663d9ad KT |
1956 | { |
1957 | if (ret) | |
1958 | { | |
1959 | fprintf (dump_file, "After writing "); | |
245f6de1 | 1960 | print_generic_expr (dump_file, cst, 0); |
f663d9ad | 1961 | fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC |
4b84d9b8 JJ |
1962 | " at position %d the merged region contains:\n", |
1963 | info->bitsize, pos_in_buffer); | |
f663d9ad KT |
1964 | dump_char_array (dump_file, val, buf_size); |
1965 | } | |
1966 | else | |
1967 | fprintf (dump_file, "Failed to merge stores\n"); | |
4b84d9b8 | 1968 | } |
f663d9ad KT |
1969 | if (!ret) |
1970 | return false; | |
a62b3dc5 JJ |
1971 | unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT); |
1972 | if (BYTES_BIG_ENDIAN) | |
4403d99a JJ |
1973 | clear_bit_region_be (m, (BITS_PER_UNIT - 1 |
1974 | - (pos_in_buffer % BITS_PER_UNIT)), | |
1975 | info->bitsize); | |
a62b3dc5 JJ |
1976 | else |
1977 | clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize); | |
f663d9ad | 1978 | } |
4b84d9b8 | 1979 | stores.qsort (sort_by_bitpos); |
f663d9ad KT |
1980 | return true; |
1981 | } | |
1982 | ||
1983 | /* Structure describing the store chain. */ | |
1984 | ||
1985 | struct imm_store_chain_info | |
1986 | { | |
50b6d676 AO |
1987 | /* Doubly-linked list that imposes an order on chain processing. |
1988 | PNXP (prev's next pointer) points to the head of a list, or to | |
1989 | the next field in the previous chain in the list. | |
1990 | See pass_store_merging::m_stores_head for more rationale. */ | |
1991 | imm_store_chain_info *next, **pnxp; | |
b5926e23 | 1992 | tree base_addr; |
a62b3dc5 | 1993 | auto_vec<store_immediate_info *> m_store_info; |
f663d9ad KT |
1994 | auto_vec<merged_store_group *> m_merged_store_groups; |
1995 | ||
50b6d676 AO |
1996 | imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a) |
1997 | : next (inspt), pnxp (&inspt), base_addr (b_a) | |
1998 | { | |
1999 | inspt = this; | |
2000 | if (next) | |
2001 | { | |
2002 | gcc_checking_assert (pnxp == next->pnxp); | |
2003 | next->pnxp = &next; | |
2004 | } | |
2005 | } | |
2006 | ~imm_store_chain_info () | |
2007 | { | |
2008 | *pnxp = next; | |
2009 | if (next) | |
2010 | { | |
2011 | gcc_checking_assert (&next == next->pnxp); | |
2012 | next->pnxp = pnxp; | |
2013 | } | |
2014 | } | |
b5926e23 | 2015 | bool terminate_and_process_chain (); |
4b84d9b8 | 2016 | bool try_coalesce_bswap (merged_store_group *, unsigned int, unsigned int); |
f663d9ad | 2017 | bool coalesce_immediate_stores (); |
b5926e23 RB |
2018 | bool output_merged_store (merged_store_group *); |
2019 | bool output_merged_stores (); | |
f663d9ad KT |
2020 | }; |
2021 | ||
2022 | const pass_data pass_data_tree_store_merging = { | |
2023 | GIMPLE_PASS, /* type */ | |
2024 | "store-merging", /* name */ | |
2025 | OPTGROUP_NONE, /* optinfo_flags */ | |
2026 | TV_GIMPLE_STORE_MERGING, /* tv_id */ | |
2027 | PROP_ssa, /* properties_required */ | |
2028 | 0, /* properties_provided */ | |
2029 | 0, /* properties_destroyed */ | |
2030 | 0, /* todo_flags_start */ | |
2031 | TODO_update_ssa, /* todo_flags_finish */ | |
2032 | }; | |
2033 | ||
2034 | class pass_store_merging : public gimple_opt_pass | |
2035 | { | |
2036 | public: | |
2037 | pass_store_merging (gcc::context *ctxt) | |
faec5f24 | 2038 | : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head () |
f663d9ad KT |
2039 | { |
2040 | } | |
2041 | ||
a62b3dc5 JJ |
2042 | /* Pass not supported for PDP-endianness, nor for insane hosts |
2043 | or target character sizes where native_{encode,interpret}_expr | |
2044 | doesn't work properly. */ | |
f663d9ad KT |
2045 | virtual bool |
2046 | gate (function *) | |
2047 | { | |
a62b3dc5 JJ |
2048 | return flag_store_merging |
2049 | && WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN | |
2050 | && CHAR_BIT == 8 | |
2051 | && BITS_PER_UNIT == 8; | |
f663d9ad KT |
2052 | } |
2053 | ||
2054 | virtual unsigned int execute (function *); | |
2055 | ||
2056 | private: | |
2057 | hash_map<tree_operand_hash, struct imm_store_chain_info *> m_stores; | |
2058 | ||
50b6d676 AO |
2059 | /* Form a doubly-linked stack of the elements of m_stores, so that |
2060 | we can iterate over them in a predictable way. Using this order | |
2061 | avoids extraneous differences in the compiler output just because | |
2062 | of tree pointer variations (e.g. different chains end up in | |
2063 | different positions of m_stores, so they are handled in different | |
2064 | orders, so they allocate or release SSA names in different | |
2065 | orders, and when they get reused, subsequent passes end up | |
2066 | getting different SSA names, which may ultimately change | |
2067 | decisions when going out of SSA). */ | |
2068 | imm_store_chain_info *m_stores_head; | |
2069 | ||
245f6de1 | 2070 | void process_store (gimple *); |
f663d9ad | 2071 | bool terminate_and_process_all_chains (); |
383ac8dc | 2072 | bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *); |
b5926e23 | 2073 | bool terminate_and_release_chain (imm_store_chain_info *); |
f663d9ad KT |
2074 | }; // class pass_store_merging |
2075 | ||
2076 | /* Terminate and process all recorded chains. Return true if any changes | |
2077 | were made. */ | |
2078 | ||
2079 | bool | |
2080 | pass_store_merging::terminate_and_process_all_chains () | |
2081 | { | |
f663d9ad | 2082 | bool ret = false; |
50b6d676 AO |
2083 | while (m_stores_head) |
2084 | ret |= terminate_and_release_chain (m_stores_head); | |
2085 | gcc_assert (m_stores.elements () == 0); | |
2086 | gcc_assert (m_stores_head == NULL); | |
f663d9ad KT |
2087 | |
2088 | return ret; | |
2089 | } | |
2090 | ||
383ac8dc JJ |
2091 | /* Terminate all chains that are affected by the statement STMT. |
2092 | CHAIN_INFO is the chain we should ignore from the checks if | |
2093 | non-NULL. */ | |
f663d9ad KT |
2094 | |
2095 | bool | |
20770eb8 | 2096 | pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info |
b5926e23 | 2097 | **chain_info, |
f663d9ad KT |
2098 | gimple *stmt) |
2099 | { | |
2100 | bool ret = false; | |
2101 | ||
2102 | /* If the statement doesn't touch memory it can't alias. */ | |
2103 | if (!gimple_vuse (stmt)) | |
2104 | return false; | |
2105 | ||
9e875fd8 | 2106 | tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE; |
383ac8dc | 2107 | for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next) |
f663d9ad | 2108 | { |
383ac8dc JJ |
2109 | next = cur->next; |
2110 | ||
2111 | /* We already checked all the stores in chain_info and terminated the | |
2112 | chain if necessary. Skip it here. */ | |
2113 | if (chain_info && *chain_info == cur) | |
2114 | continue; | |
2115 | ||
245f6de1 JJ |
2116 | store_immediate_info *info; |
2117 | unsigned int i; | |
383ac8dc | 2118 | FOR_EACH_VEC_ELT (cur->m_store_info, i, info) |
f663d9ad | 2119 | { |
9e875fd8 JJ |
2120 | tree lhs = gimple_assign_lhs (info->stmt); |
2121 | if (ref_maybe_used_by_stmt_p (stmt, lhs) | |
2122 | || stmt_may_clobber_ref_p (stmt, lhs) | |
2123 | || (store_lhs && refs_output_dependent_p (store_lhs, lhs))) | |
f663d9ad | 2124 | { |
245f6de1 | 2125 | if (dump_file && (dump_flags & TDF_DETAILS)) |
f663d9ad | 2126 | { |
245f6de1 JJ |
2127 | fprintf (dump_file, "stmt causes chain termination:\n"); |
2128 | print_gimple_stmt (dump_file, stmt, 0); | |
f663d9ad | 2129 | } |
383ac8dc | 2130 | terminate_and_release_chain (cur); |
245f6de1 JJ |
2131 | ret = true; |
2132 | break; | |
f663d9ad KT |
2133 | } |
2134 | } | |
2135 | } | |
2136 | ||
f663d9ad KT |
2137 | return ret; |
2138 | } | |
2139 | ||
2140 | /* Helper function. Terminate the recorded chain storing to base object | |
2141 | BASE. Return true if the merging and output was successful. The m_stores | |
2142 | entry is removed after the processing in any case. */ | |
2143 | ||
2144 | bool | |
b5926e23 | 2145 | pass_store_merging::terminate_and_release_chain (imm_store_chain_info *chain_info) |
f663d9ad | 2146 | { |
b5926e23 RB |
2147 | bool ret = chain_info->terminate_and_process_chain (); |
2148 | m_stores.remove (chain_info->base_addr); | |
2149 | delete chain_info; | |
f663d9ad KT |
2150 | return ret; |
2151 | } | |
2152 | ||
245f6de1 JJ |
2153 | /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive) |
2154 | may clobber REF. FIRST and LAST must be in the same basic block and | |
4b84d9b8 JJ |
2155 | have non-NULL vdef. We want to be able to sink load of REF across |
2156 | stores between FIRST and LAST, up to right before LAST. */ | |
245f6de1 JJ |
2157 | |
2158 | bool | |
2159 | stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref) | |
2160 | { | |
2161 | ao_ref r; | |
2162 | ao_ref_init (&r, ref); | |
2163 | unsigned int count = 0; | |
2164 | tree vop = gimple_vdef (last); | |
2165 | gimple *stmt; | |
2166 | ||
2167 | gcc_checking_assert (gimple_bb (first) == gimple_bb (last)); | |
2168 | do | |
2169 | { | |
2170 | stmt = SSA_NAME_DEF_STMT (vop); | |
2171 | if (stmt_may_clobber_ref_p_1 (stmt, &r)) | |
2172 | return true; | |
4b84d9b8 JJ |
2173 | if (gimple_store_p (stmt) |
2174 | && refs_anti_dependent_p (ref, gimple_get_lhs (stmt))) | |
2175 | return true; | |
245f6de1 JJ |
2176 | /* Avoid quadratic compile time by bounding the number of checks |
2177 | we perform. */ | |
2178 | if (++count > MAX_STORE_ALIAS_CHECKS) | |
2179 | return true; | |
2180 | vop = gimple_vuse (stmt); | |
2181 | } | |
2182 | while (stmt != first); | |
2183 | return false; | |
2184 | } | |
2185 | ||
2186 | /* Return true if INFO->ops[IDX] is mergeable with the | |
2187 | corresponding loads already in MERGED_STORE group. | |
2188 | BASE_ADDR is the base address of the whole store group. */ | |
2189 | ||
2190 | bool | |
2191 | compatible_load_p (merged_store_group *merged_store, | |
2192 | store_immediate_info *info, | |
2193 | tree base_addr, int idx) | |
2194 | { | |
2195 | store_immediate_info *infof = merged_store->stores[0]; | |
2196 | if (!info->ops[idx].base_addr | |
8a91d545 RS |
2197 | || maybe_ne (info->ops[idx].bitpos - infof->ops[idx].bitpos, |
2198 | info->bitpos - infof->bitpos) | |
245f6de1 JJ |
2199 | || !operand_equal_p (info->ops[idx].base_addr, |
2200 | infof->ops[idx].base_addr, 0)) | |
2201 | return false; | |
2202 | ||
2203 | store_immediate_info *infol = merged_store->stores.last (); | |
2204 | tree load_vuse = gimple_vuse (info->ops[idx].stmt); | |
2205 | /* In this case all vuses should be the same, e.g. | |
2206 | _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4; | |
2207 | or | |
2208 | _1 = s.a; _2 = s.b; t.a = _1; t.b = _2; | |
2209 | and we can emit the coalesced load next to any of those loads. */ | |
2210 | if (gimple_vuse (infof->ops[idx].stmt) == load_vuse | |
2211 | && gimple_vuse (infol->ops[idx].stmt) == load_vuse) | |
2212 | return true; | |
2213 | ||
2214 | /* Otherwise, at least for now require that the load has the same | |
2215 | vuse as the store. See following examples. */ | |
2216 | if (gimple_vuse (info->stmt) != load_vuse) | |
2217 | return false; | |
2218 | ||
2219 | if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt) | |
2220 | || (infof != infol | |
2221 | && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt))) | |
2222 | return false; | |
2223 | ||
2224 | /* If the load is from the same location as the store, already | |
2225 | the construction of the immediate chain info guarantees no intervening | |
2226 | stores, so no further checks are needed. Example: | |
2227 | _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */ | |
8a91d545 | 2228 | if (known_eq (info->ops[idx].bitpos, info->bitpos) |
245f6de1 JJ |
2229 | && operand_equal_p (info->ops[idx].base_addr, base_addr, 0)) |
2230 | return true; | |
2231 | ||
2232 | /* Otherwise, we need to punt if any of the loads can be clobbered by any | |
2233 | of the stores in the group, or any other stores in between those. | |
2234 | Previous calls to compatible_load_p ensured that for all the | |
2235 | merged_store->stores IDX loads, no stmts starting with | |
2236 | merged_store->first_stmt and ending right before merged_store->last_stmt | |
2237 | clobbers those loads. */ | |
2238 | gimple *first = merged_store->first_stmt; | |
2239 | gimple *last = merged_store->last_stmt; | |
2240 | unsigned int i; | |
2241 | store_immediate_info *infoc; | |
2242 | /* The stores are sorted by increasing store bitpos, so if info->stmt store | |
2243 | comes before the so far first load, we'll be changing | |
2244 | merged_store->first_stmt. In that case we need to give up if | |
2245 | any of the earlier processed loads clobber with the stmts in the new | |
2246 | range. */ | |
2247 | if (info->order < merged_store->first_order) | |
2248 | { | |
2249 | FOR_EACH_VEC_ELT (merged_store->stores, i, infoc) | |
2250 | if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val)) | |
2251 | return false; | |
2252 | first = info->stmt; | |
2253 | } | |
2254 | /* Similarly, we could change merged_store->last_stmt, so ensure | |
2255 | in that case no stmts in the new range clobber any of the earlier | |
2256 | processed loads. */ | |
2257 | else if (info->order > merged_store->last_order) | |
2258 | { | |
2259 | FOR_EACH_VEC_ELT (merged_store->stores, i, infoc) | |
2260 | if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val)) | |
2261 | return false; | |
2262 | last = info->stmt; | |
2263 | } | |
2264 | /* And finally, we'd be adding a new load to the set, ensure it isn't | |
2265 | clobbered in the new range. */ | |
2266 | if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val)) | |
2267 | return false; | |
2268 | ||
2269 | /* Otherwise, we are looking for: | |
2270 | _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4; | |
2271 | or | |
2272 | _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */ | |
2273 | return true; | |
2274 | } | |
2275 | ||
4b84d9b8 JJ |
2276 | /* Add all refs loaded to compute VAL to REFS vector. */ |
2277 | ||
2278 | void | |
2279 | gather_bswap_load_refs (vec<tree> *refs, tree val) | |
2280 | { | |
2281 | if (TREE_CODE (val) != SSA_NAME) | |
2282 | return; | |
2283 | ||
2284 | gimple *stmt = SSA_NAME_DEF_STMT (val); | |
2285 | if (!is_gimple_assign (stmt)) | |
2286 | return; | |
2287 | ||
2288 | if (gimple_assign_load_p (stmt)) | |
2289 | { | |
2290 | refs->safe_push (gimple_assign_rhs1 (stmt)); | |
2291 | return; | |
2292 | } | |
2293 | ||
2294 | switch (gimple_assign_rhs_class (stmt)) | |
2295 | { | |
2296 | case GIMPLE_BINARY_RHS: | |
2297 | gather_bswap_load_refs (refs, gimple_assign_rhs2 (stmt)); | |
2298 | /* FALLTHRU */ | |
2299 | case GIMPLE_UNARY_RHS: | |
2300 | gather_bswap_load_refs (refs, gimple_assign_rhs1 (stmt)); | |
2301 | break; | |
2302 | default: | |
2303 | gcc_unreachable (); | |
2304 | } | |
2305 | } | |
2306 | ||
2307 | /* Return true if m_store_info[first] and at least one following store | |
2308 | form a group which store try_size bitsize value which is byte swapped | |
2309 | from a memory load or some value, or identity from some value. | |
2310 | This uses the bswap pass APIs. */ | |
2311 | ||
2312 | bool | |
2313 | imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store, | |
2314 | unsigned int first, | |
2315 | unsigned int try_size) | |
2316 | { | |
2317 | unsigned int len = m_store_info.length (), last = first; | |
2318 | unsigned HOST_WIDE_INT width = m_store_info[first]->bitsize; | |
2319 | if (width >= try_size) | |
2320 | return false; | |
2321 | for (unsigned int i = first + 1; i < len; ++i) | |
2322 | { | |
2323 | if (m_store_info[i]->bitpos != m_store_info[first]->bitpos + width | |
2324 | || m_store_info[i]->ins_stmt == NULL) | |
2325 | return false; | |
2326 | width += m_store_info[i]->bitsize; | |
2327 | if (width >= try_size) | |
2328 | { | |
2329 | last = i; | |
2330 | break; | |
2331 | } | |
2332 | } | |
2333 | if (width != try_size) | |
2334 | return false; | |
2335 | ||
2336 | bool allow_unaligned | |
2337 | = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED); | |
2338 | /* Punt if the combined store would not be aligned and we need alignment. */ | |
2339 | if (!allow_unaligned) | |
2340 | { | |
2341 | unsigned int align = merged_store->align; | |
2342 | unsigned HOST_WIDE_INT align_base = merged_store->align_base; | |
2343 | for (unsigned int i = first + 1; i <= last; ++i) | |
2344 | { | |
2345 | unsigned int this_align; | |
2346 | unsigned HOST_WIDE_INT align_bitpos = 0; | |
2347 | get_object_alignment_1 (gimple_assign_lhs (m_store_info[i]->stmt), | |
2348 | &this_align, &align_bitpos); | |
2349 | if (this_align > align) | |
2350 | { | |
2351 | align = this_align; | |
2352 | align_base = m_store_info[i]->bitpos - align_bitpos; | |
2353 | } | |
2354 | } | |
2355 | unsigned HOST_WIDE_INT align_bitpos | |
2356 | = (m_store_info[first]->bitpos - align_base) & (align - 1); | |
2357 | if (align_bitpos) | |
2358 | align = least_bit_hwi (align_bitpos); | |
2359 | if (align < try_size) | |
2360 | return false; | |
2361 | } | |
2362 | ||
2363 | tree type; | |
2364 | switch (try_size) | |
2365 | { | |
2366 | case 16: type = uint16_type_node; break; | |
2367 | case 32: type = uint32_type_node; break; | |
2368 | case 64: type = uint64_type_node; break; | |
2369 | default: gcc_unreachable (); | |
2370 | } | |
2371 | struct symbolic_number n; | |
2372 | gimple *ins_stmt = NULL; | |
2373 | int vuse_store = -1; | |
2374 | unsigned int first_order = merged_store->first_order; | |
2375 | unsigned int last_order = merged_store->last_order; | |
2376 | gimple *first_stmt = merged_store->first_stmt; | |
2377 | gimple *last_stmt = merged_store->last_stmt; | |
2378 | store_immediate_info *infof = m_store_info[first]; | |
2379 | ||
2380 | for (unsigned int i = first; i <= last; ++i) | |
2381 | { | |
2382 | store_immediate_info *info = m_store_info[i]; | |
2383 | struct symbolic_number this_n = info->n; | |
2384 | this_n.type = type; | |
2385 | if (!this_n.base_addr) | |
2386 | this_n.range = try_size / BITS_PER_UNIT; | |
30fa8e9c JJ |
2387 | else |
2388 | /* Update vuse in case it has changed by output_merged_stores. */ | |
2389 | this_n.vuse = gimple_vuse (info->ins_stmt); | |
4b84d9b8 JJ |
2390 | unsigned int bitpos = info->bitpos - infof->bitpos; |
2391 | if (!do_shift_rotate (LSHIFT_EXPR, &this_n, | |
2392 | BYTES_BIG_ENDIAN | |
2393 | ? try_size - info->bitsize - bitpos | |
2394 | : bitpos)) | |
2395 | return false; | |
aa11164a | 2396 | if (this_n.base_addr && vuse_store) |
4b84d9b8 JJ |
2397 | { |
2398 | unsigned int j; | |
2399 | for (j = first; j <= last; ++j) | |
2400 | if (this_n.vuse == gimple_vuse (m_store_info[j]->stmt)) | |
2401 | break; | |
2402 | if (j > last) | |
2403 | { | |
2404 | if (vuse_store == 1) | |
2405 | return false; | |
2406 | vuse_store = 0; | |
2407 | } | |
2408 | } | |
2409 | if (i == first) | |
2410 | { | |
2411 | n = this_n; | |
2412 | ins_stmt = info->ins_stmt; | |
2413 | } | |
2414 | else | |
2415 | { | |
2416 | if (n.base_addr) | |
2417 | { | |
2418 | if (n.vuse != this_n.vuse) | |
2419 | { | |
2420 | if (vuse_store == 0) | |
2421 | return false; | |
2422 | vuse_store = 1; | |
2423 | } | |
2424 | if (info->order > last_order) | |
2425 | { | |
2426 | last_order = info->order; | |
2427 | last_stmt = info->stmt; | |
2428 | } | |
2429 | else if (info->order < first_order) | |
2430 | { | |
2431 | first_order = info->order; | |
2432 | first_stmt = info->stmt; | |
2433 | } | |
2434 | } | |
2435 | ||
2436 | ins_stmt = perform_symbolic_merge (ins_stmt, &n, info->ins_stmt, | |
2437 | &this_n, &n); | |
2438 | if (ins_stmt == NULL) | |
2439 | return false; | |
2440 | } | |
2441 | } | |
2442 | ||
2443 | uint64_t cmpxchg, cmpnop; | |
2444 | find_bswap_or_nop_finalize (&n, &cmpxchg, &cmpnop); | |
2445 | ||
2446 | /* A complete byte swap should make the symbolic number to start with | |
2447 | the largest digit in the highest order byte. Unchanged symbolic | |
2448 | number indicates a read with same endianness as target architecture. */ | |
2449 | if (n.n != cmpnop && n.n != cmpxchg) | |
2450 | return false; | |
2451 | ||
2452 | if (n.base_addr == NULL_TREE && !is_gimple_val (n.src)) | |
2453 | return false; | |
2454 | ||
2455 | /* Don't handle memory copy this way if normal non-bswap processing | |
2456 | would handle it too. */ | |
2457 | if (n.n == cmpnop && (unsigned) n.n_ops == last - first + 1) | |
2458 | { | |
2459 | unsigned int i; | |
2460 | for (i = first; i <= last; ++i) | |
2461 | if (m_store_info[i]->rhs_code != MEM_REF) | |
2462 | break; | |
2463 | if (i == last + 1) | |
2464 | return false; | |
2465 | } | |
2466 | ||
2467 | if (n.n == cmpxchg) | |
2468 | switch (try_size) | |
2469 | { | |
2470 | case 16: | |
2471 | /* Will emit LROTATE_EXPR. */ | |
2472 | break; | |
2473 | case 32: | |
2474 | if (builtin_decl_explicit_p (BUILT_IN_BSWAP32) | |
2475 | && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing) | |
2476 | break; | |
2477 | return false; | |
2478 | case 64: | |
2479 | if (builtin_decl_explicit_p (BUILT_IN_BSWAP64) | |
2480 | && optab_handler (bswap_optab, DImode) != CODE_FOR_nothing) | |
2481 | break; | |
2482 | return false; | |
2483 | default: | |
2484 | gcc_unreachable (); | |
2485 | } | |
2486 | ||
2487 | if (!allow_unaligned && n.base_addr) | |
2488 | { | |
2489 | unsigned int align = get_object_alignment (n.src); | |
2490 | if (align < try_size) | |
2491 | return false; | |
2492 | } | |
2493 | ||
2494 | /* If each load has vuse of the corresponding store, need to verify | |
2495 | the loads can be sunk right before the last store. */ | |
2496 | if (vuse_store == 1) | |
2497 | { | |
2498 | auto_vec<tree, 64> refs; | |
2499 | for (unsigned int i = first; i <= last; ++i) | |
2500 | gather_bswap_load_refs (&refs, | |
2501 | gimple_assign_rhs1 (m_store_info[i]->stmt)); | |
2502 | ||
2503 | unsigned int i; | |
2504 | tree ref; | |
2505 | FOR_EACH_VEC_ELT (refs, i, ref) | |
2506 | if (stmts_may_clobber_ref_p (first_stmt, last_stmt, ref)) | |
2507 | return false; | |
2508 | n.vuse = NULL_TREE; | |
2509 | } | |
2510 | ||
2511 | infof->n = n; | |
2512 | infof->ins_stmt = ins_stmt; | |
2513 | for (unsigned int i = first; i <= last; ++i) | |
2514 | { | |
2515 | m_store_info[i]->rhs_code = n.n == cmpxchg ? LROTATE_EXPR : NOP_EXPR; | |
2516 | m_store_info[i]->ops[0].base_addr = NULL_TREE; | |
2517 | m_store_info[i]->ops[1].base_addr = NULL_TREE; | |
2518 | if (i != first) | |
2519 | merged_store->merge_into (m_store_info[i]); | |
2520 | } | |
2521 | ||
2522 | return true; | |
2523 | } | |
2524 | ||
f663d9ad KT |
2525 | /* Go through the candidate stores recorded in m_store_info and merge them |
2526 | into merged_store_group objects recorded into m_merged_store_groups | |
2527 | representing the widened stores. Return true if coalescing was successful | |
2528 | and the number of widened stores is fewer than the original number | |
2529 | of stores. */ | |
2530 | ||
2531 | bool | |
2532 | imm_store_chain_info::coalesce_immediate_stores () | |
2533 | { | |
2534 | /* Anything less can't be processed. */ | |
2535 | if (m_store_info.length () < 2) | |
2536 | return false; | |
2537 | ||
2538 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2539 | fprintf (dump_file, "Attempting to coalesce %u stores in chain.\n", | |
2540 | m_store_info.length ()); | |
2541 | ||
2542 | store_immediate_info *info; | |
4b84d9b8 | 2543 | unsigned int i, ignore = 0; |
f663d9ad KT |
2544 | |
2545 | /* Order the stores by the bitposition they write to. */ | |
2546 | m_store_info.qsort (sort_by_bitpos); | |
2547 | ||
2548 | info = m_store_info[0]; | |
2549 | merged_store_group *merged_store = new merged_store_group (info); | |
2550 | ||
2551 | FOR_EACH_VEC_ELT (m_store_info, i, info) | |
2552 | { | |
2553 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2554 | { | |
2555 | fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC | |
2556 | " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:\n", | |
2557 | i, info->bitsize, info->bitpos); | |
ef6cb4c7 | 2558 | print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt)); |
f663d9ad KT |
2559 | fprintf (dump_file, "\n------------\n"); |
2560 | } | |
2561 | ||
4b84d9b8 | 2562 | if (i <= ignore) |
f663d9ad KT |
2563 | continue; |
2564 | ||
4b84d9b8 JJ |
2565 | /* First try to handle group of stores like: |
2566 | p[0] = data >> 24; | |
2567 | p[1] = data >> 16; | |
2568 | p[2] = data >> 8; | |
2569 | p[3] = data; | |
2570 | using the bswap framework. */ | |
2571 | if (info->bitpos == merged_store->start + merged_store->width | |
2572 | && merged_store->stores.length () == 1 | |
2573 | && merged_store->stores[0]->ins_stmt != NULL | |
2574 | && info->ins_stmt != NULL) | |
2575 | { | |
2576 | unsigned int try_size; | |
2577 | for (try_size = 64; try_size >= 16; try_size >>= 1) | |
2578 | if (try_coalesce_bswap (merged_store, i - 1, try_size)) | |
2579 | break; | |
2580 | ||
2581 | if (try_size >= 16) | |
2582 | { | |
2583 | ignore = i + merged_store->stores.length () - 1; | |
2584 | m_merged_store_groups.safe_push (merged_store); | |
2585 | if (ignore < m_store_info.length ()) | |
2586 | merged_store = new merged_store_group (m_store_info[ignore]); | |
2587 | else | |
2588 | merged_store = NULL; | |
2589 | continue; | |
2590 | } | |
2591 | } | |
2592 | ||
f663d9ad KT |
2593 | /* |---store 1---| |
2594 | |---store 2---| | |
4b84d9b8 JJ |
2595 | Overlapping stores. */ |
2596 | if (IN_RANGE (info->bitpos, merged_store->start, | |
f663d9ad KT |
2597 | merged_store->start + merged_store->width - 1)) |
2598 | { | |
245f6de1 JJ |
2599 | /* Only allow overlapping stores of constants. */ |
2600 | if (info->rhs_code == INTEGER_CST | |
2601 | && merged_store->stores[0]->rhs_code == INTEGER_CST) | |
2602 | { | |
2603 | merged_store->merge_overlapping (info); | |
2604 | continue; | |
2605 | } | |
f663d9ad | 2606 | } |
245f6de1 JJ |
2607 | /* |---store 1---||---store 2---| |
2608 | This store is consecutive to the previous one. | |
2609 | Merge it into the current store group. There can be gaps in between | |
2610 | the stores, but there can't be gaps in between bitregions. */ | |
4b84d9b8 JJ |
2611 | else if (info->rhs_code != LROTATE_EXPR |
2612 | && info->bitregion_start <= merged_store->bitregion_end | |
a6fbd154 | 2613 | && info->rhs_code == merged_store->stores[0]->rhs_code) |
f663d9ad | 2614 | { |
245f6de1 JJ |
2615 | store_immediate_info *infof = merged_store->stores[0]; |
2616 | ||
2617 | /* All the rhs_code ops that take 2 operands are commutative, | |
2618 | swap the operands if it could make the operands compatible. */ | |
2619 | if (infof->ops[0].base_addr | |
2620 | && infof->ops[1].base_addr | |
2621 | && info->ops[0].base_addr | |
2622 | && info->ops[1].base_addr | |
8a91d545 RS |
2623 | && known_eq (info->ops[1].bitpos - infof->ops[0].bitpos, |
2624 | info->bitpos - infof->bitpos) | |
245f6de1 JJ |
2625 | && operand_equal_p (info->ops[1].base_addr, |
2626 | infof->ops[0].base_addr, 0)) | |
127ef369 JJ |
2627 | { |
2628 | std::swap (info->ops[0], info->ops[1]); | |
2629 | info->ops_swapped_p = true; | |
2630 | } | |
5bfd2f9b JJ |
2631 | if ((infof->ops[0].base_addr |
2632 | ? compatible_load_p (merged_store, info, base_addr, 0) | |
2633 | : !info->ops[0].base_addr) | |
2634 | && (infof->ops[1].base_addr | |
2635 | ? compatible_load_p (merged_store, info, base_addr, 1) | |
2636 | : !info->ops[1].base_addr)) | |
245f6de1 JJ |
2637 | { |
2638 | merged_store->merge_into (info); | |
2639 | continue; | |
2640 | } | |
2641 | } | |
f663d9ad | 2642 | |
245f6de1 JJ |
2643 | /* |---store 1---| <gap> |---store 2---|. |
2644 | Gap between stores or the rhs not compatible. Start a new group. */ | |
f663d9ad | 2645 | |
245f6de1 JJ |
2646 | /* Try to apply all the stores recorded for the group to determine |
2647 | the bitpattern they write and discard it if that fails. | |
2648 | This will also reject single-store groups. */ | |
2649 | if (!merged_store->apply_stores ()) | |
2650 | delete merged_store; | |
2651 | else | |
2652 | m_merged_store_groups.safe_push (merged_store); | |
f663d9ad | 2653 | |
245f6de1 | 2654 | merged_store = new merged_store_group (info); |
f663d9ad KT |
2655 | } |
2656 | ||
a62b3dc5 | 2657 | /* Record or discard the last store group. */ |
4b84d9b8 JJ |
2658 | if (merged_store) |
2659 | { | |
2660 | if (!merged_store->apply_stores ()) | |
2661 | delete merged_store; | |
2662 | else | |
2663 | m_merged_store_groups.safe_push (merged_store); | |
2664 | } | |
f663d9ad KT |
2665 | |
2666 | gcc_assert (m_merged_store_groups.length () <= m_store_info.length ()); | |
2667 | bool success | |
2668 | = !m_merged_store_groups.is_empty () | |
2669 | && m_merged_store_groups.length () < m_store_info.length (); | |
2670 | ||
2671 | if (success && dump_file) | |
2672 | fprintf (dump_file, "Coalescing successful!\n" | |
a62b3dc5 JJ |
2673 | "Merged into %u stores\n", |
2674 | m_merged_store_groups.length ()); | |
f663d9ad KT |
2675 | |
2676 | return success; | |
2677 | } | |
2678 | ||
245f6de1 JJ |
2679 | /* Return the type to use for the merged stores or loads described by STMTS. |
2680 | This is needed to get the alias sets right. If IS_LOAD, look for rhs, | |
2681 | otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_* | |
2682 | of the MEM_REFs if any. */ | |
f663d9ad KT |
2683 | |
2684 | static tree | |
245f6de1 JJ |
2685 | get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load, |
2686 | unsigned short *cliquep, unsigned short *basep) | |
f663d9ad KT |
2687 | { |
2688 | gimple *stmt; | |
2689 | unsigned int i; | |
245f6de1 JJ |
2690 | tree type = NULL_TREE; |
2691 | tree ret = NULL_TREE; | |
2692 | *cliquep = 0; | |
2693 | *basep = 0; | |
f663d9ad KT |
2694 | |
2695 | FOR_EACH_VEC_ELT (stmts, i, stmt) | |
2696 | { | |
245f6de1 JJ |
2697 | tree ref = is_load ? gimple_assign_rhs1 (stmt) |
2698 | : gimple_assign_lhs (stmt); | |
2699 | tree type1 = reference_alias_ptr_type (ref); | |
2700 | tree base = get_base_address (ref); | |
f663d9ad | 2701 | |
245f6de1 JJ |
2702 | if (i == 0) |
2703 | { | |
2704 | if (TREE_CODE (base) == MEM_REF) | |
2705 | { | |
2706 | *cliquep = MR_DEPENDENCE_CLIQUE (base); | |
2707 | *basep = MR_DEPENDENCE_BASE (base); | |
2708 | } | |
2709 | ret = type = type1; | |
2710 | continue; | |
2711 | } | |
f663d9ad | 2712 | if (!alias_ptr_types_compatible_p (type, type1)) |
245f6de1 JJ |
2713 | ret = ptr_type_node; |
2714 | if (TREE_CODE (base) != MEM_REF | |
2715 | || *cliquep != MR_DEPENDENCE_CLIQUE (base) | |
2716 | || *basep != MR_DEPENDENCE_BASE (base)) | |
2717 | { | |
2718 | *cliquep = 0; | |
2719 | *basep = 0; | |
2720 | } | |
f663d9ad | 2721 | } |
245f6de1 | 2722 | return ret; |
f663d9ad KT |
2723 | } |
2724 | ||
2725 | /* Return the location_t information we can find among the statements | |
2726 | in STMTS. */ | |
2727 | ||
2728 | static location_t | |
245f6de1 | 2729 | get_location_for_stmts (vec<gimple *> &stmts) |
f663d9ad KT |
2730 | { |
2731 | gimple *stmt; | |
2732 | unsigned int i; | |
2733 | ||
2734 | FOR_EACH_VEC_ELT (stmts, i, stmt) | |
2735 | if (gimple_has_location (stmt)) | |
2736 | return gimple_location (stmt); | |
2737 | ||
2738 | return UNKNOWN_LOCATION; | |
2739 | } | |
2740 | ||
2741 | /* Used to decribe a store resulting from splitting a wide store in smaller | |
2742 | regularly-sized stores in split_group. */ | |
2743 | ||
2744 | struct split_store | |
2745 | { | |
2746 | unsigned HOST_WIDE_INT bytepos; | |
2747 | unsigned HOST_WIDE_INT size; | |
2748 | unsigned HOST_WIDE_INT align; | |
245f6de1 | 2749 | auto_vec<store_immediate_info *> orig_stores; |
a62b3dc5 JJ |
2750 | /* True if there is a single orig stmt covering the whole split store. */ |
2751 | bool orig; | |
f663d9ad KT |
2752 | split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, |
2753 | unsigned HOST_WIDE_INT); | |
2754 | }; | |
2755 | ||
2756 | /* Simple constructor. */ | |
2757 | ||
2758 | split_store::split_store (unsigned HOST_WIDE_INT bp, | |
2759 | unsigned HOST_WIDE_INT sz, | |
2760 | unsigned HOST_WIDE_INT al) | |
a62b3dc5 | 2761 | : bytepos (bp), size (sz), align (al), orig (false) |
f663d9ad | 2762 | { |
245f6de1 | 2763 | orig_stores.create (0); |
f663d9ad KT |
2764 | } |
2765 | ||
245f6de1 JJ |
2766 | /* Record all stores in GROUP that write to the region starting at BITPOS and |
2767 | is of size BITSIZE. Record infos for such statements in STORES if | |
2768 | non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO | |
2769 | if there is exactly one original store in the range. */ | |
f663d9ad | 2770 | |
a62b3dc5 | 2771 | static store_immediate_info * |
245f6de1 JJ |
2772 | find_constituent_stores (struct merged_store_group *group, |
2773 | vec<store_immediate_info *> *stores, | |
2774 | unsigned int *first, | |
2775 | unsigned HOST_WIDE_INT bitpos, | |
2776 | unsigned HOST_WIDE_INT bitsize) | |
f663d9ad | 2777 | { |
a62b3dc5 | 2778 | store_immediate_info *info, *ret = NULL; |
f663d9ad | 2779 | unsigned int i; |
a62b3dc5 JJ |
2780 | bool second = false; |
2781 | bool update_first = true; | |
f663d9ad | 2782 | unsigned HOST_WIDE_INT end = bitpos + bitsize; |
a62b3dc5 | 2783 | for (i = *first; group->stores.iterate (i, &info); ++i) |
f663d9ad KT |
2784 | { |
2785 | unsigned HOST_WIDE_INT stmt_start = info->bitpos; | |
2786 | unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize; | |
a62b3dc5 JJ |
2787 | if (stmt_end <= bitpos) |
2788 | { | |
2789 | /* BITPOS passed to this function never decreases from within the | |
2790 | same split_group call, so optimize and don't scan info records | |
2791 | which are known to end before or at BITPOS next time. | |
2792 | Only do it if all stores before this one also pass this. */ | |
2793 | if (update_first) | |
2794 | *first = i + 1; | |
2795 | continue; | |
2796 | } | |
2797 | else | |
2798 | update_first = false; | |
2799 | ||
f663d9ad | 2800 | /* The stores in GROUP are ordered by bitposition so if we're past |
a62b3dc5 JJ |
2801 | the region for this group return early. */ |
2802 | if (stmt_start >= end) | |
2803 | return ret; | |
2804 | ||
245f6de1 | 2805 | if (stores) |
a62b3dc5 | 2806 | { |
245f6de1 | 2807 | stores->safe_push (info); |
a62b3dc5 JJ |
2808 | if (ret) |
2809 | { | |
2810 | ret = NULL; | |
2811 | second = true; | |
2812 | } | |
2813 | } | |
2814 | else if (ret) | |
2815 | return NULL; | |
2816 | if (!second) | |
2817 | ret = info; | |
f663d9ad | 2818 | } |
a62b3dc5 | 2819 | return ret; |
f663d9ad KT |
2820 | } |
2821 | ||
d7a9512e JJ |
2822 | /* Return how many SSA_NAMEs used to compute value to store in the INFO |
2823 | store have multiple uses. If any SSA_NAME has multiple uses, also | |
2824 | count statements needed to compute it. */ | |
2825 | ||
2826 | static unsigned | |
2827 | count_multiple_uses (store_immediate_info *info) | |
2828 | { | |
2829 | gimple *stmt = info->stmt; | |
2830 | unsigned ret = 0; | |
2831 | switch (info->rhs_code) | |
2832 | { | |
2833 | case INTEGER_CST: | |
2834 | return 0; | |
2835 | case BIT_AND_EXPR: | |
2836 | case BIT_IOR_EXPR: | |
2837 | case BIT_XOR_EXPR: | |
d60edaba JJ |
2838 | if (info->bit_not_p) |
2839 | { | |
2840 | if (!has_single_use (gimple_assign_rhs1 (stmt))) | |
2841 | ret = 1; /* Fall through below to return | |
2842 | the BIT_NOT_EXPR stmt and then | |
2843 | BIT_{AND,IOR,XOR}_EXPR and anything it | |
2844 | uses. */ | |
2845 | else | |
2846 | /* stmt is after this the BIT_NOT_EXPR. */ | |
2847 | stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
2848 | } | |
d7a9512e JJ |
2849 | if (!has_single_use (gimple_assign_rhs1 (stmt))) |
2850 | { | |
2851 | ret += 1 + info->ops[0].bit_not_p; | |
2852 | if (info->ops[1].base_addr) | |
2853 | ret += 1 + info->ops[1].bit_not_p; | |
2854 | return ret + 1; | |
2855 | } | |
2856 | stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
2857 | /* stmt is now the BIT_*_EXPR. */ | |
2858 | if (!has_single_use (gimple_assign_rhs1 (stmt))) | |
127ef369 JJ |
2859 | ret += 1 + info->ops[info->ops_swapped_p].bit_not_p; |
2860 | else if (info->ops[info->ops_swapped_p].bit_not_p) | |
d7a9512e JJ |
2861 | { |
2862 | gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
2863 | if (!has_single_use (gimple_assign_rhs1 (stmt2))) | |
2864 | ++ret; | |
2865 | } | |
2866 | if (info->ops[1].base_addr == NULL_TREE) | |
127ef369 JJ |
2867 | { |
2868 | gcc_checking_assert (!info->ops_swapped_p); | |
2869 | return ret; | |
2870 | } | |
d7a9512e | 2871 | if (!has_single_use (gimple_assign_rhs2 (stmt))) |
127ef369 JJ |
2872 | ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p; |
2873 | else if (info->ops[1 - info->ops_swapped_p].bit_not_p) | |
d7a9512e JJ |
2874 | { |
2875 | gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt)); | |
2876 | if (!has_single_use (gimple_assign_rhs1 (stmt2))) | |
2877 | ++ret; | |
2878 | } | |
2879 | return ret; | |
2880 | case MEM_REF: | |
2881 | if (!has_single_use (gimple_assign_rhs1 (stmt))) | |
2882 | return 1 + info->ops[0].bit_not_p; | |
2883 | else if (info->ops[0].bit_not_p) | |
2884 | { | |
2885 | stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
2886 | if (!has_single_use (gimple_assign_rhs1 (stmt))) | |
2887 | return 1; | |
2888 | } | |
2889 | return 0; | |
2890 | default: | |
2891 | gcc_unreachable (); | |
2892 | } | |
2893 | } | |
2894 | ||
f663d9ad | 2895 | /* Split a merged store described by GROUP by populating the SPLIT_STORES |
a62b3dc5 JJ |
2896 | vector (if non-NULL) with split_store structs describing the byte offset |
2897 | (from the base), the bit size and alignment of each store as well as the | |
2898 | original statements involved in each such split group. | |
f663d9ad KT |
2899 | This is to separate the splitting strategy from the statement |
2900 | building/emission/linking done in output_merged_store. | |
a62b3dc5 | 2901 | Return number of new stores. |
245f6de1 JJ |
2902 | If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned. |
2903 | If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned. | |
a62b3dc5 JJ |
2904 | If SPLIT_STORES is NULL, it is just a dry run to count number of |
2905 | new stores. */ | |
f663d9ad | 2906 | |
a62b3dc5 | 2907 | static unsigned int |
245f6de1 JJ |
2908 | split_group (merged_store_group *group, bool allow_unaligned_store, |
2909 | bool allow_unaligned_load, | |
d7a9512e JJ |
2910 | vec<struct split_store *> *split_stores, |
2911 | unsigned *total_orig, | |
2912 | unsigned *total_new) | |
f663d9ad | 2913 | { |
a62b3dc5 JJ |
2914 | unsigned HOST_WIDE_INT pos = group->bitregion_start; |
2915 | unsigned HOST_WIDE_INT size = group->bitregion_end - pos; | |
f663d9ad | 2916 | unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT; |
a62b3dc5 JJ |
2917 | unsigned HOST_WIDE_INT group_align = group->align; |
2918 | unsigned HOST_WIDE_INT align_base = group->align_base; | |
245f6de1 | 2919 | unsigned HOST_WIDE_INT group_load_align = group_align; |
d7a9512e | 2920 | bool any_orig = false; |
f663d9ad | 2921 | |
f663d9ad KT |
2922 | gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0)); |
2923 | ||
4b84d9b8 JJ |
2924 | if (group->stores[0]->rhs_code == LROTATE_EXPR |
2925 | || group->stores[0]->rhs_code == NOP_EXPR) | |
2926 | { | |
2927 | /* For bswap framework using sets of stores, all the checking | |
2928 | has been done earlier in try_coalesce_bswap and needs to be | |
2929 | emitted as a single store. */ | |
2930 | if (total_orig) | |
2931 | { | |
2932 | /* Avoid the old/new stmt count heuristics. It should be | |
2933 | always beneficial. */ | |
2934 | total_new[0] = 1; | |
2935 | total_orig[0] = 2; | |
2936 | } | |
2937 | ||
2938 | if (split_stores) | |
2939 | { | |
2940 | unsigned HOST_WIDE_INT align_bitpos | |
2941 | = (group->start - align_base) & (group_align - 1); | |
2942 | unsigned HOST_WIDE_INT align = group_align; | |
2943 | if (align_bitpos) | |
2944 | align = least_bit_hwi (align_bitpos); | |
2945 | bytepos = group->start / BITS_PER_UNIT; | |
2946 | struct split_store *store | |
2947 | = new split_store (bytepos, group->width, align); | |
2948 | unsigned int first = 0; | |
2949 | find_constituent_stores (group, &store->orig_stores, | |
2950 | &first, group->start, group->width); | |
2951 | split_stores->safe_push (store); | |
2952 | } | |
2953 | ||
2954 | return 1; | |
2955 | } | |
2956 | ||
a62b3dc5 | 2957 | unsigned int ret = 0, first = 0; |
f663d9ad | 2958 | unsigned HOST_WIDE_INT try_pos = bytepos; |
f663d9ad | 2959 | |
d7a9512e JJ |
2960 | if (total_orig) |
2961 | { | |
2962 | unsigned int i; | |
2963 | store_immediate_info *info = group->stores[0]; | |
2964 | ||
2965 | total_new[0] = 0; | |
2966 | total_orig[0] = 1; /* The orig store. */ | |
2967 | info = group->stores[0]; | |
2968 | if (info->ops[0].base_addr) | |
a6fbd154 | 2969 | total_orig[0]++; |
d7a9512e | 2970 | if (info->ops[1].base_addr) |
a6fbd154 | 2971 | total_orig[0]++; |
d7a9512e JJ |
2972 | switch (info->rhs_code) |
2973 | { | |
2974 | case BIT_AND_EXPR: | |
2975 | case BIT_IOR_EXPR: | |
2976 | case BIT_XOR_EXPR: | |
2977 | total_orig[0]++; /* The orig BIT_*_EXPR stmt. */ | |
2978 | break; | |
2979 | default: | |
2980 | break; | |
2981 | } | |
2982 | total_orig[0] *= group->stores.length (); | |
2983 | ||
2984 | FOR_EACH_VEC_ELT (group->stores, i, info) | |
a6fbd154 JJ |
2985 | { |
2986 | total_new[0] += count_multiple_uses (info); | |
2987 | total_orig[0] += (info->bit_not_p | |
2988 | + info->ops[0].bit_not_p | |
2989 | + info->ops[1].bit_not_p); | |
2990 | } | |
d7a9512e JJ |
2991 | } |
2992 | ||
245f6de1 JJ |
2993 | if (!allow_unaligned_load) |
2994 | for (int i = 0; i < 2; ++i) | |
2995 | if (group->load_align[i]) | |
2996 | group_load_align = MIN (group_load_align, group->load_align[i]); | |
2997 | ||
f663d9ad KT |
2998 | while (size > 0) |
2999 | { | |
245f6de1 | 3000 | if ((allow_unaligned_store || group_align <= BITS_PER_UNIT) |
a62b3dc5 JJ |
3001 | && group->mask[try_pos - bytepos] == (unsigned char) ~0U) |
3002 | { | |
3003 | /* Skip padding bytes. */ | |
3004 | ++try_pos; | |
3005 | size -= BITS_PER_UNIT; | |
3006 | continue; | |
3007 | } | |
3008 | ||
f663d9ad | 3009 | unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT; |
a62b3dc5 JJ |
3010 | unsigned int try_size = MAX_STORE_BITSIZE, nonmasked; |
3011 | unsigned HOST_WIDE_INT align_bitpos | |
3012 | = (try_bitpos - align_base) & (group_align - 1); | |
3013 | unsigned HOST_WIDE_INT align = group_align; | |
3014 | if (align_bitpos) | |
3015 | align = least_bit_hwi (align_bitpos); | |
245f6de1 | 3016 | if (!allow_unaligned_store) |
a62b3dc5 | 3017 | try_size = MIN (try_size, align); |
245f6de1 JJ |
3018 | if (!allow_unaligned_load) |
3019 | { | |
3020 | /* If we can't do or don't want to do unaligned stores | |
3021 | as well as loads, we need to take the loads into account | |
3022 | as well. */ | |
3023 | unsigned HOST_WIDE_INT load_align = group_load_align; | |
3024 | align_bitpos = (try_bitpos - align_base) & (load_align - 1); | |
3025 | if (align_bitpos) | |
3026 | load_align = least_bit_hwi (align_bitpos); | |
3027 | for (int i = 0; i < 2; ++i) | |
3028 | if (group->load_align[i]) | |
3029 | { | |
8a91d545 RS |
3030 | align_bitpos |
3031 | = known_alignment (try_bitpos | |
3032 | - group->stores[0]->bitpos | |
3033 | + group->stores[0]->ops[i].bitpos | |
3034 | - group->load_align_base[i]); | |
3035 | if (align_bitpos & (group_load_align - 1)) | |
245f6de1 JJ |
3036 | { |
3037 | unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos); | |
3038 | load_align = MIN (load_align, a); | |
3039 | } | |
3040 | } | |
3041 | try_size = MIN (try_size, load_align); | |
3042 | } | |
a62b3dc5 | 3043 | store_immediate_info *info |
245f6de1 | 3044 | = find_constituent_stores (group, NULL, &first, try_bitpos, try_size); |
a62b3dc5 JJ |
3045 | if (info) |
3046 | { | |
3047 | /* If there is just one original statement for the range, see if | |
3048 | we can just reuse the original store which could be even larger | |
3049 | than try_size. */ | |
3050 | unsigned HOST_WIDE_INT stmt_end | |
3051 | = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT); | |
245f6de1 JJ |
3052 | info = find_constituent_stores (group, NULL, &first, try_bitpos, |
3053 | stmt_end - try_bitpos); | |
a62b3dc5 JJ |
3054 | if (info && info->bitpos >= try_bitpos) |
3055 | { | |
3056 | try_size = stmt_end - try_bitpos; | |
3057 | goto found; | |
3058 | } | |
3059 | } | |
f663d9ad | 3060 | |
a62b3dc5 JJ |
3061 | /* Approximate store bitsize for the case when there are no padding |
3062 | bits. */ | |
3063 | while (try_size > size) | |
3064 | try_size /= 2; | |
3065 | /* Now look for whole padding bytes at the end of that bitsize. */ | |
3066 | for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked) | |
3067 | if (group->mask[try_pos - bytepos + nonmasked - 1] | |
3068 | != (unsigned char) ~0U) | |
3069 | break; | |
3070 | if (nonmasked == 0) | |
3071 | { | |
3072 | /* If entire try_size range is padding, skip it. */ | |
3073 | try_pos += try_size / BITS_PER_UNIT; | |
3074 | size -= try_size; | |
3075 | continue; | |
3076 | } | |
3077 | /* Otherwise try to decrease try_size if second half, last 3 quarters | |
3078 | etc. are padding. */ | |
3079 | nonmasked *= BITS_PER_UNIT; | |
3080 | while (nonmasked <= try_size / 2) | |
3081 | try_size /= 2; | |
245f6de1 | 3082 | if (!allow_unaligned_store && group_align > BITS_PER_UNIT) |
a62b3dc5 JJ |
3083 | { |
3084 | /* Now look for whole padding bytes at the start of that bitsize. */ | |
3085 | unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked; | |
3086 | for (masked = 0; masked < try_bytesize; ++masked) | |
3087 | if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U) | |
3088 | break; | |
3089 | masked *= BITS_PER_UNIT; | |
3090 | gcc_assert (masked < try_size); | |
3091 | if (masked >= try_size / 2) | |
3092 | { | |
3093 | while (masked >= try_size / 2) | |
3094 | { | |
3095 | try_size /= 2; | |
3096 | try_pos += try_size / BITS_PER_UNIT; | |
3097 | size -= try_size; | |
3098 | masked -= try_size; | |
3099 | } | |
3100 | /* Need to recompute the alignment, so just retry at the new | |
3101 | position. */ | |
3102 | continue; | |
3103 | } | |
3104 | } | |
3105 | ||
3106 | found: | |
3107 | ++ret; | |
f663d9ad | 3108 | |
a62b3dc5 JJ |
3109 | if (split_stores) |
3110 | { | |
3111 | struct split_store *store | |
3112 | = new split_store (try_pos, try_size, align); | |
245f6de1 JJ |
3113 | info = find_constituent_stores (group, &store->orig_stores, |
3114 | &first, try_bitpos, try_size); | |
a62b3dc5 JJ |
3115 | if (info |
3116 | && info->bitpos >= try_bitpos | |
3117 | && info->bitpos + info->bitsize <= try_bitpos + try_size) | |
d7a9512e JJ |
3118 | { |
3119 | store->orig = true; | |
3120 | any_orig = true; | |
3121 | } | |
a62b3dc5 JJ |
3122 | split_stores->safe_push (store); |
3123 | } | |
3124 | ||
3125 | try_pos += try_size / BITS_PER_UNIT; | |
f663d9ad | 3126 | size -= try_size; |
f663d9ad | 3127 | } |
a62b3dc5 | 3128 | |
d7a9512e JJ |
3129 | if (total_orig) |
3130 | { | |
a6fbd154 JJ |
3131 | unsigned int i; |
3132 | struct split_store *store; | |
d7a9512e JJ |
3133 | /* If we are reusing some original stores and any of the |
3134 | original SSA_NAMEs had multiple uses, we need to subtract | |
3135 | those now before we add the new ones. */ | |
3136 | if (total_new[0] && any_orig) | |
3137 | { | |
d7a9512e JJ |
3138 | FOR_EACH_VEC_ELT (*split_stores, i, store) |
3139 | if (store->orig) | |
3140 | total_new[0] -= count_multiple_uses (store->orig_stores[0]); | |
3141 | } | |
3142 | total_new[0] += ret; /* The new store. */ | |
3143 | store_immediate_info *info = group->stores[0]; | |
3144 | if (info->ops[0].base_addr) | |
a6fbd154 | 3145 | total_new[0] += ret; |
d7a9512e | 3146 | if (info->ops[1].base_addr) |
a6fbd154 | 3147 | total_new[0] += ret; |
d7a9512e JJ |
3148 | switch (info->rhs_code) |
3149 | { | |
3150 | case BIT_AND_EXPR: | |
3151 | case BIT_IOR_EXPR: | |
3152 | case BIT_XOR_EXPR: | |
3153 | total_new[0] += ret; /* The new BIT_*_EXPR stmt. */ | |
3154 | break; | |
3155 | default: | |
3156 | break; | |
3157 | } | |
a6fbd154 JJ |
3158 | FOR_EACH_VEC_ELT (*split_stores, i, store) |
3159 | { | |
3160 | unsigned int j; | |
3161 | bool bit_not_p[3] = { false, false, false }; | |
3162 | /* If all orig_stores have certain bit_not_p set, then | |
3163 | we'd use a BIT_NOT_EXPR stmt and need to account for it. | |
3164 | If some orig_stores have certain bit_not_p set, then | |
3165 | we'd use a BIT_XOR_EXPR with a mask and need to account for | |
3166 | it. */ | |
3167 | FOR_EACH_VEC_ELT (store->orig_stores, j, info) | |
3168 | { | |
3169 | if (info->ops[0].bit_not_p) | |
3170 | bit_not_p[0] = true; | |
3171 | if (info->ops[1].bit_not_p) | |
3172 | bit_not_p[1] = true; | |
3173 | if (info->bit_not_p) | |
3174 | bit_not_p[2] = true; | |
3175 | } | |
3176 | total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2]; | |
3177 | } | |
3178 | ||
d7a9512e JJ |
3179 | } |
3180 | ||
a62b3dc5 | 3181 | return ret; |
f663d9ad KT |
3182 | } |
3183 | ||
a6fbd154 JJ |
3184 | /* Return the operation through which the operand IDX (if < 2) or |
3185 | result (IDX == 2) should be inverted. If NOP_EXPR, no inversion | |
3186 | is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR, | |
3187 | the bits should be xored with mask. */ | |
3188 | ||
3189 | static enum tree_code | |
3190 | invert_op (split_store *split_store, int idx, tree int_type, tree &mask) | |
3191 | { | |
3192 | unsigned int i; | |
3193 | store_immediate_info *info; | |
3194 | unsigned int cnt = 0; | |
3195 | FOR_EACH_VEC_ELT (split_store->orig_stores, i, info) | |
3196 | { | |
3197 | bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p; | |
3198 | if (bit_not_p) | |
3199 | ++cnt; | |
3200 | } | |
3201 | mask = NULL_TREE; | |
3202 | if (cnt == 0) | |
3203 | return NOP_EXPR; | |
3204 | if (cnt == split_store->orig_stores.length ()) | |
3205 | return BIT_NOT_EXPR; | |
3206 | ||
3207 | unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT; | |
3208 | unsigned buf_size = split_store->size / BITS_PER_UNIT; | |
3209 | unsigned char *buf | |
3210 | = XALLOCAVEC (unsigned char, buf_size); | |
3211 | memset (buf, ~0U, buf_size); | |
3212 | FOR_EACH_VEC_ELT (split_store->orig_stores, i, info) | |
3213 | { | |
3214 | bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p; | |
3215 | if (!bit_not_p) | |
3216 | continue; | |
3217 | /* Clear regions with bit_not_p and invert afterwards, rather than | |
3218 | clear regions with !bit_not_p, so that gaps in between stores aren't | |
3219 | set in the mask. */ | |
3220 | unsigned HOST_WIDE_INT bitsize = info->bitsize; | |
3221 | unsigned int pos_in_buffer = 0; | |
3222 | if (info->bitpos < try_bitpos) | |
3223 | { | |
3224 | gcc_assert (info->bitpos + bitsize > try_bitpos); | |
3225 | bitsize -= (try_bitpos - info->bitpos); | |
3226 | } | |
3227 | else | |
3228 | pos_in_buffer = info->bitpos - try_bitpos; | |
3229 | if (pos_in_buffer + bitsize > split_store->size) | |
3230 | bitsize = split_store->size - pos_in_buffer; | |
3231 | unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT); | |
3232 | if (BYTES_BIG_ENDIAN) | |
3233 | clear_bit_region_be (p, (BITS_PER_UNIT - 1 | |
3234 | - (pos_in_buffer % BITS_PER_UNIT)), bitsize); | |
3235 | else | |
3236 | clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize); | |
3237 | } | |
3238 | for (unsigned int i = 0; i < buf_size; ++i) | |
3239 | buf[i] = ~buf[i]; | |
3240 | mask = native_interpret_expr (int_type, buf, buf_size); | |
3241 | return BIT_XOR_EXPR; | |
3242 | } | |
3243 | ||
f663d9ad KT |
3244 | /* Given a merged store group GROUP output the widened version of it. |
3245 | The store chain is against the base object BASE. | |
3246 | Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output | |
3247 | unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive. | |
3248 | Make sure that the number of statements output is less than the number of | |
3249 | original statements. If a better sequence is possible emit it and | |
3250 | return true. */ | |
3251 | ||
3252 | bool | |
b5926e23 | 3253 | imm_store_chain_info::output_merged_store (merged_store_group *group) |
f663d9ad | 3254 | { |
a62b3dc5 JJ |
3255 | unsigned HOST_WIDE_INT start_byte_pos |
3256 | = group->bitregion_start / BITS_PER_UNIT; | |
f663d9ad KT |
3257 | |
3258 | unsigned int orig_num_stmts = group->stores.length (); | |
3259 | if (orig_num_stmts < 2) | |
3260 | return false; | |
3261 | ||
a62b3dc5 | 3262 | auto_vec<struct split_store *, 32> split_stores; |
f663d9ad | 3263 | split_stores.create (0); |
245f6de1 | 3264 | bool allow_unaligned_store |
a62b3dc5 | 3265 | = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED); |
245f6de1 JJ |
3266 | bool allow_unaligned_load = allow_unaligned_store; |
3267 | if (allow_unaligned_store) | |
a62b3dc5 JJ |
3268 | { |
3269 | /* If unaligned stores are allowed, see how many stores we'd emit | |
3270 | for unaligned and how many stores we'd emit for aligned stores. | |
3271 | Only use unaligned stores if it allows fewer stores than aligned. */ | |
245f6de1 | 3272 | unsigned aligned_cnt |
d7a9512e | 3273 | = split_group (group, false, allow_unaligned_load, NULL, NULL, NULL); |
245f6de1 | 3274 | unsigned unaligned_cnt |
d7a9512e | 3275 | = split_group (group, true, allow_unaligned_load, NULL, NULL, NULL); |
a62b3dc5 | 3276 | if (aligned_cnt <= unaligned_cnt) |
245f6de1 | 3277 | allow_unaligned_store = false; |
a62b3dc5 | 3278 | } |
d7a9512e | 3279 | unsigned total_orig, total_new; |
245f6de1 | 3280 | split_group (group, allow_unaligned_store, allow_unaligned_load, |
d7a9512e | 3281 | &split_stores, &total_orig, &total_new); |
a62b3dc5 JJ |
3282 | |
3283 | if (split_stores.length () >= orig_num_stmts) | |
3284 | { | |
3285 | /* We didn't manage to reduce the number of statements. Bail out. */ | |
3286 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
d7a9512e JJ |
3287 | fprintf (dump_file, "Exceeded original number of stmts (%u)." |
3288 | " Not profitable to emit new sequence.\n", | |
3289 | orig_num_stmts); | |
a62b3dc5 JJ |
3290 | return false; |
3291 | } | |
d7a9512e JJ |
3292 | if (total_orig <= total_new) |
3293 | { | |
3294 | /* If number of estimated new statements is above estimated original | |
3295 | statements, bail out too. */ | |
3296 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
3297 | fprintf (dump_file, "Estimated number of original stmts (%u)" | |
3298 | " not larger than estimated number of new" | |
3299 | " stmts (%u).\n", | |
3300 | total_orig, total_new); | |
4b84d9b8 | 3301 | return false; |
d7a9512e | 3302 | } |
f663d9ad KT |
3303 | |
3304 | gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt); | |
3305 | gimple_seq seq = NULL; | |
f663d9ad KT |
3306 | tree last_vdef, new_vuse; |
3307 | last_vdef = gimple_vdef (group->last_stmt); | |
3308 | new_vuse = gimple_vuse (group->last_stmt); | |
4b84d9b8 JJ |
3309 | tree bswap_res = NULL_TREE; |
3310 | ||
3311 | if (group->stores[0]->rhs_code == LROTATE_EXPR | |
3312 | || group->stores[0]->rhs_code == NOP_EXPR) | |
3313 | { | |
3314 | tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type; | |
3315 | gimple *ins_stmt = group->stores[0]->ins_stmt; | |
3316 | struct symbolic_number *n = &group->stores[0]->n; | |
3317 | bool bswap = group->stores[0]->rhs_code == LROTATE_EXPR; | |
3318 | ||
3319 | switch (n->range) | |
3320 | { | |
3321 | case 16: | |
3322 | load_type = bswap_type = uint16_type_node; | |
3323 | break; | |
3324 | case 32: | |
3325 | load_type = uint32_type_node; | |
3326 | if (bswap) | |
3327 | { | |
3328 | fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32); | |
3329 | bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); | |
3330 | } | |
3331 | break; | |
3332 | case 64: | |
3333 | load_type = uint64_type_node; | |
3334 | if (bswap) | |
3335 | { | |
3336 | fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64); | |
3337 | bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); | |
3338 | } | |
3339 | break; | |
3340 | default: | |
3341 | gcc_unreachable (); | |
3342 | } | |
3343 | ||
3344 | /* If the loads have each vuse of the corresponding store, | |
3345 | we've checked the aliasing already in try_coalesce_bswap and | |
3346 | we want to sink the need load into seq. So need to use new_vuse | |
3347 | on the load. */ | |
30fa8e9c | 3348 | if (n->base_addr) |
4b84d9b8 | 3349 | { |
30fa8e9c JJ |
3350 | if (n->vuse == NULL) |
3351 | { | |
3352 | n->vuse = new_vuse; | |
3353 | ins_stmt = NULL; | |
3354 | } | |
3355 | else | |
3356 | /* Update vuse in case it has changed by output_merged_stores. */ | |
3357 | n->vuse = gimple_vuse (ins_stmt); | |
4b84d9b8 JJ |
3358 | } |
3359 | bswap_res = bswap_replace (gsi_start (seq), ins_stmt, fndecl, | |
3360 | bswap_type, load_type, n, bswap); | |
3361 | gcc_assert (bswap_res); | |
3362 | } | |
f663d9ad KT |
3363 | |
3364 | gimple *stmt = NULL; | |
f663d9ad KT |
3365 | split_store *split_store; |
3366 | unsigned int i; | |
245f6de1 | 3367 | auto_vec<gimple *, 32> orig_stmts; |
4b84d9b8 JJ |
3368 | gimple_seq this_seq; |
3369 | tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq, | |
aa55dc0c | 3370 | is_gimple_mem_ref_addr, NULL_TREE); |
4b84d9b8 | 3371 | gimple_seq_add_seq_without_update (&seq, this_seq); |
245f6de1 JJ |
3372 | |
3373 | tree load_addr[2] = { NULL_TREE, NULL_TREE }; | |
3374 | gimple_seq load_seq[2] = { NULL, NULL }; | |
3375 | gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () }; | |
3376 | for (int j = 0; j < 2; ++j) | |
3377 | { | |
3378 | store_operand_info &op = group->stores[0]->ops[j]; | |
3379 | if (op.base_addr == NULL_TREE) | |
3380 | continue; | |
3381 | ||
3382 | store_immediate_info *infol = group->stores.last (); | |
3383 | if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt)) | |
3384 | { | |
97031af7 JJ |
3385 | /* We can't pick the location randomly; while we've verified |
3386 | all the loads have the same vuse, they can be still in different | |
3387 | basic blocks and we need to pick the one from the last bb: | |
3388 | int x = q[0]; | |
3389 | if (x == N) return; | |
3390 | int y = q[1]; | |
3391 | p[0] = x; | |
3392 | p[1] = y; | |
3393 | otherwise if we put the wider load at the q[0] load, we might | |
3394 | segfault if q[1] is not mapped. */ | |
3395 | basic_block bb = gimple_bb (op.stmt); | |
3396 | gimple *ostmt = op.stmt; | |
3397 | store_immediate_info *info; | |
3398 | FOR_EACH_VEC_ELT (group->stores, i, info) | |
3399 | { | |
3400 | gimple *tstmt = info->ops[j].stmt; | |
3401 | basic_block tbb = gimple_bb (tstmt); | |
3402 | if (dominated_by_p (CDI_DOMINATORS, tbb, bb)) | |
3403 | { | |
3404 | ostmt = tstmt; | |
3405 | bb = tbb; | |
3406 | } | |
3407 | } | |
3408 | load_gsi[j] = gsi_for_stmt (ostmt); | |
245f6de1 JJ |
3409 | load_addr[j] |
3410 | = force_gimple_operand_1 (unshare_expr (op.base_addr), | |
3411 | &load_seq[j], is_gimple_mem_ref_addr, | |
3412 | NULL_TREE); | |
3413 | } | |
3414 | else if (operand_equal_p (base_addr, op.base_addr, 0)) | |
3415 | load_addr[j] = addr; | |
3416 | else | |
3e2927a1 | 3417 | { |
3e2927a1 JJ |
3418 | load_addr[j] |
3419 | = force_gimple_operand_1 (unshare_expr (op.base_addr), | |
3420 | &this_seq, is_gimple_mem_ref_addr, | |
3421 | NULL_TREE); | |
3422 | gimple_seq_add_seq_without_update (&seq, this_seq); | |
3423 | } | |
245f6de1 JJ |
3424 | } |
3425 | ||
f663d9ad KT |
3426 | FOR_EACH_VEC_ELT (split_stores, i, split_store) |
3427 | { | |
3428 | unsigned HOST_WIDE_INT try_size = split_store->size; | |
3429 | unsigned HOST_WIDE_INT try_pos = split_store->bytepos; | |
3430 | unsigned HOST_WIDE_INT align = split_store->align; | |
a62b3dc5 JJ |
3431 | tree dest, src; |
3432 | location_t loc; | |
3433 | if (split_store->orig) | |
3434 | { | |
3435 | /* If there is just a single constituent store which covers | |
3436 | the whole area, just reuse the lhs and rhs. */ | |
245f6de1 JJ |
3437 | gimple *orig_stmt = split_store->orig_stores[0]->stmt; |
3438 | dest = gimple_assign_lhs (orig_stmt); | |
3439 | src = gimple_assign_rhs1 (orig_stmt); | |
3440 | loc = gimple_location (orig_stmt); | |
a62b3dc5 JJ |
3441 | } |
3442 | else | |
3443 | { | |
245f6de1 JJ |
3444 | store_immediate_info *info; |
3445 | unsigned short clique, base; | |
3446 | unsigned int k; | |
3447 | FOR_EACH_VEC_ELT (split_store->orig_stores, k, info) | |
3448 | orig_stmts.safe_push (info->stmt); | |
a62b3dc5 | 3449 | tree offset_type |
245f6de1 JJ |
3450 | = get_alias_type_for_stmts (orig_stmts, false, &clique, &base); |
3451 | loc = get_location_for_stmts (orig_stmts); | |
3452 | orig_stmts.truncate (0); | |
a62b3dc5 JJ |
3453 | |
3454 | tree int_type = build_nonstandard_integer_type (try_size, UNSIGNED); | |
3455 | int_type = build_aligned_type (int_type, align); | |
3456 | dest = fold_build2 (MEM_REF, int_type, addr, | |
3457 | build_int_cst (offset_type, try_pos)); | |
245f6de1 JJ |
3458 | if (TREE_CODE (dest) == MEM_REF) |
3459 | { | |
3460 | MR_DEPENDENCE_CLIQUE (dest) = clique; | |
3461 | MR_DEPENDENCE_BASE (dest) = base; | |
3462 | } | |
3463 | ||
4b84d9b8 JJ |
3464 | tree mask = integer_zero_node; |
3465 | if (!bswap_res) | |
3466 | mask = native_interpret_expr (int_type, | |
3467 | group->mask + try_pos | |
3468 | - start_byte_pos, | |
3469 | group->buf_size); | |
245f6de1 JJ |
3470 | |
3471 | tree ops[2]; | |
3472 | for (int j = 0; | |
3473 | j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE); | |
3474 | ++j) | |
3475 | { | |
3476 | store_operand_info &op = split_store->orig_stores[0]->ops[j]; | |
4b84d9b8 JJ |
3477 | if (bswap_res) |
3478 | ops[j] = bswap_res; | |
3479 | else if (op.base_addr) | |
245f6de1 JJ |
3480 | { |
3481 | FOR_EACH_VEC_ELT (split_store->orig_stores, k, info) | |
3482 | orig_stmts.safe_push (info->ops[j].stmt); | |
3483 | ||
3484 | offset_type = get_alias_type_for_stmts (orig_stmts, true, | |
3485 | &clique, &base); | |
3486 | location_t load_loc = get_location_for_stmts (orig_stmts); | |
3487 | orig_stmts.truncate (0); | |
3488 | ||
3489 | unsigned HOST_WIDE_INT load_align = group->load_align[j]; | |
3490 | unsigned HOST_WIDE_INT align_bitpos | |
8a91d545 RS |
3491 | = known_alignment (try_pos * BITS_PER_UNIT |
3492 | - split_store->orig_stores[0]->bitpos | |
3493 | + op.bitpos); | |
3494 | if (align_bitpos & (load_align - 1)) | |
245f6de1 JJ |
3495 | load_align = least_bit_hwi (align_bitpos); |
3496 | ||
3497 | tree load_int_type | |
3498 | = build_nonstandard_integer_type (try_size, UNSIGNED); | |
3499 | load_int_type | |
3500 | = build_aligned_type (load_int_type, load_align); | |
3501 | ||
8a91d545 RS |
3502 | poly_uint64 load_pos |
3503 | = exact_div (try_pos * BITS_PER_UNIT | |
3504 | - split_store->orig_stores[0]->bitpos | |
3505 | + op.bitpos, | |
3506 | BITS_PER_UNIT); | |
245f6de1 JJ |
3507 | ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j], |
3508 | build_int_cst (offset_type, load_pos)); | |
3509 | if (TREE_CODE (ops[j]) == MEM_REF) | |
3510 | { | |
3511 | MR_DEPENDENCE_CLIQUE (ops[j]) = clique; | |
3512 | MR_DEPENDENCE_BASE (ops[j]) = base; | |
3513 | } | |
3514 | if (!integer_zerop (mask)) | |
3515 | /* The load might load some bits (that will be masked off | |
3516 | later on) uninitialized, avoid -W*uninitialized | |
3517 | warnings in that case. */ | |
3518 | TREE_NO_WARNING (ops[j]) = 1; | |
3519 | ||
3520 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
3521 | ops[j]); | |
3522 | gimple_set_location (stmt, load_loc); | |
3523 | if (gsi_bb (load_gsi[j])) | |
3524 | { | |
3525 | gimple_set_vuse (stmt, gimple_vuse (op.stmt)); | |
3526 | gimple_seq_add_stmt_without_update (&load_seq[j], stmt); | |
3527 | } | |
3528 | else | |
3529 | { | |
3530 | gimple_set_vuse (stmt, new_vuse); | |
3531 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3532 | } | |
3533 | ops[j] = gimple_assign_lhs (stmt); | |
a6fbd154 JJ |
3534 | tree xor_mask; |
3535 | enum tree_code inv_op | |
3536 | = invert_op (split_store, j, int_type, xor_mask); | |
3537 | if (inv_op != NOP_EXPR) | |
383ac8dc JJ |
3538 | { |
3539 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
a6fbd154 | 3540 | inv_op, ops[j], xor_mask); |
383ac8dc JJ |
3541 | gimple_set_location (stmt, load_loc); |
3542 | ops[j] = gimple_assign_lhs (stmt); | |
3543 | ||
3544 | if (gsi_bb (load_gsi[j])) | |
3545 | gimple_seq_add_stmt_without_update (&load_seq[j], | |
3546 | stmt); | |
3547 | else | |
3548 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3549 | } | |
245f6de1 JJ |
3550 | } |
3551 | else | |
3552 | ops[j] = native_interpret_expr (int_type, | |
3553 | group->val + try_pos | |
3554 | - start_byte_pos, | |
3555 | group->buf_size); | |
3556 | } | |
3557 | ||
3558 | switch (split_store->orig_stores[0]->rhs_code) | |
3559 | { | |
3560 | case BIT_AND_EXPR: | |
3561 | case BIT_IOR_EXPR: | |
3562 | case BIT_XOR_EXPR: | |
3563 | FOR_EACH_VEC_ELT (split_store->orig_stores, k, info) | |
3564 | { | |
3565 | tree rhs1 = gimple_assign_rhs1 (info->stmt); | |
3566 | orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1)); | |
3567 | } | |
3568 | location_t bit_loc; | |
3569 | bit_loc = get_location_for_stmts (orig_stmts); | |
3570 | orig_stmts.truncate (0); | |
3571 | ||
3572 | stmt | |
3573 | = gimple_build_assign (make_ssa_name (int_type), | |
3574 | split_store->orig_stores[0]->rhs_code, | |
3575 | ops[0], ops[1]); | |
3576 | gimple_set_location (stmt, bit_loc); | |
3577 | /* If there is just one load and there is a separate | |
3578 | load_seq[0], emit the bitwise op right after it. */ | |
3579 | if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0])) | |
3580 | gimple_seq_add_stmt_without_update (&load_seq[0], stmt); | |
3581 | /* Otherwise, if at least one load is in seq, we need to | |
3582 | emit the bitwise op right before the store. If there | |
3583 | are two loads and are emitted somewhere else, it would | |
3584 | be better to emit the bitwise op as early as possible; | |
3585 | we don't track where that would be possible right now | |
3586 | though. */ | |
3587 | else | |
3588 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3589 | src = gimple_assign_lhs (stmt); | |
a6fbd154 JJ |
3590 | tree xor_mask; |
3591 | enum tree_code inv_op; | |
3592 | inv_op = invert_op (split_store, 2, int_type, xor_mask); | |
3593 | if (inv_op != NOP_EXPR) | |
d60edaba JJ |
3594 | { |
3595 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
a6fbd154 | 3596 | inv_op, src, xor_mask); |
d60edaba JJ |
3597 | gimple_set_location (stmt, bit_loc); |
3598 | if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0])) | |
3599 | gimple_seq_add_stmt_without_update (&load_seq[0], stmt); | |
3600 | else | |
3601 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3602 | src = gimple_assign_lhs (stmt); | |
3603 | } | |
245f6de1 | 3604 | break; |
4b84d9b8 JJ |
3605 | case LROTATE_EXPR: |
3606 | case NOP_EXPR: | |
3607 | src = ops[0]; | |
3608 | if (!is_gimple_val (src)) | |
3609 | { | |
3610 | stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (src)), | |
3611 | src); | |
3612 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3613 | src = gimple_assign_lhs (stmt); | |
3614 | } | |
3615 | if (!useless_type_conversion_p (int_type, TREE_TYPE (src))) | |
3616 | { | |
3617 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
3618 | NOP_EXPR, src); | |
3619 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3620 | src = gimple_assign_lhs (stmt); | |
3621 | } | |
3622 | break; | |
245f6de1 JJ |
3623 | default: |
3624 | src = ops[0]; | |
3625 | break; | |
3626 | } | |
3627 | ||
a62b3dc5 JJ |
3628 | if (!integer_zerop (mask)) |
3629 | { | |
3630 | tree tem = make_ssa_name (int_type); | |
3631 | tree load_src = unshare_expr (dest); | |
3632 | /* The load might load some or all bits uninitialized, | |
3633 | avoid -W*uninitialized warnings in that case. | |
3634 | As optimization, it would be nice if all the bits are | |
3635 | provably uninitialized (no stores at all yet or previous | |
3636 | store a CLOBBER) we'd optimize away the load and replace | |
3637 | it e.g. with 0. */ | |
3638 | TREE_NO_WARNING (load_src) = 1; | |
3639 | stmt = gimple_build_assign (tem, load_src); | |
3640 | gimple_set_location (stmt, loc); | |
3641 | gimple_set_vuse (stmt, new_vuse); | |
3642 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3643 | ||
3644 | /* FIXME: If there is a single chunk of zero bits in mask, | |
3645 | perhaps use BIT_INSERT_EXPR instead? */ | |
3646 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
3647 | BIT_AND_EXPR, tem, mask); | |
3648 | gimple_set_location (stmt, loc); | |
3649 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3650 | tem = gimple_assign_lhs (stmt); | |
3651 | ||
245f6de1 JJ |
3652 | if (TREE_CODE (src) == INTEGER_CST) |
3653 | src = wide_int_to_tree (int_type, | |
3654 | wi::bit_and_not (wi::to_wide (src), | |
3655 | wi::to_wide (mask))); | |
3656 | else | |
3657 | { | |
3658 | tree nmask | |
3659 | = wide_int_to_tree (int_type, | |
3660 | wi::bit_not (wi::to_wide (mask))); | |
3661 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
3662 | BIT_AND_EXPR, src, nmask); | |
3663 | gimple_set_location (stmt, loc); | |
3664 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3665 | src = gimple_assign_lhs (stmt); | |
3666 | } | |
a62b3dc5 JJ |
3667 | stmt = gimple_build_assign (make_ssa_name (int_type), |
3668 | BIT_IOR_EXPR, tem, src); | |
3669 | gimple_set_location (stmt, loc); | |
3670 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3671 | src = gimple_assign_lhs (stmt); | |
3672 | } | |
3673 | } | |
f663d9ad KT |
3674 | |
3675 | stmt = gimple_build_assign (dest, src); | |
3676 | gimple_set_location (stmt, loc); | |
3677 | gimple_set_vuse (stmt, new_vuse); | |
3678 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3679 | ||
f663d9ad KT |
3680 | tree new_vdef; |
3681 | if (i < split_stores.length () - 1) | |
a62b3dc5 | 3682 | new_vdef = make_ssa_name (gimple_vop (cfun), stmt); |
f663d9ad KT |
3683 | else |
3684 | new_vdef = last_vdef; | |
3685 | ||
3686 | gimple_set_vdef (stmt, new_vdef); | |
3687 | SSA_NAME_DEF_STMT (new_vdef) = stmt; | |
3688 | new_vuse = new_vdef; | |
3689 | } | |
3690 | ||
3691 | FOR_EACH_VEC_ELT (split_stores, i, split_store) | |
3692 | delete split_store; | |
3693 | ||
f663d9ad KT |
3694 | gcc_assert (seq); |
3695 | if (dump_file) | |
3696 | { | |
3697 | fprintf (dump_file, | |
3698 | "New sequence of %u stmts to replace old one of %u stmts\n", | |
a62b3dc5 | 3699 | split_stores.length (), orig_num_stmts); |
f663d9ad KT |
3700 | if (dump_flags & TDF_DETAILS) |
3701 | print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS); | |
3702 | } | |
3703 | gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT); | |
245f6de1 JJ |
3704 | for (int j = 0; j < 2; ++j) |
3705 | if (load_seq[j]) | |
3706 | gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT); | |
f663d9ad KT |
3707 | |
3708 | return true; | |
3709 | } | |
3710 | ||
3711 | /* Process the merged_store_group objects created in the coalescing phase. | |
3712 | The stores are all against the base object BASE. | |
3713 | Try to output the widened stores and delete the original statements if | |
3714 | successful. Return true iff any changes were made. */ | |
3715 | ||
3716 | bool | |
b5926e23 | 3717 | imm_store_chain_info::output_merged_stores () |
f663d9ad KT |
3718 | { |
3719 | unsigned int i; | |
3720 | merged_store_group *merged_store; | |
3721 | bool ret = false; | |
3722 | FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store) | |
3723 | { | |
b5926e23 | 3724 | if (output_merged_store (merged_store)) |
f663d9ad KT |
3725 | { |
3726 | unsigned int j; | |
3727 | store_immediate_info *store; | |
3728 | FOR_EACH_VEC_ELT (merged_store->stores, j, store) | |
3729 | { | |
3730 | gimple *stmt = store->stmt; | |
3731 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
3732 | gsi_remove (&gsi, true); | |
3733 | if (stmt != merged_store->last_stmt) | |
3734 | { | |
3735 | unlink_stmt_vdef (stmt); | |
3736 | release_defs (stmt); | |
3737 | } | |
3738 | } | |
3739 | ret = true; | |
3740 | } | |
3741 | } | |
3742 | if (ret && dump_file) | |
3743 | fprintf (dump_file, "Merging successful!\n"); | |
3744 | ||
3745 | return ret; | |
3746 | } | |
3747 | ||
3748 | /* Coalesce the store_immediate_info objects recorded against the base object | |
3749 | BASE in the first phase and output them. | |
3750 | Delete the allocated structures. | |
3751 | Return true if any changes were made. */ | |
3752 | ||
3753 | bool | |
b5926e23 | 3754 | imm_store_chain_info::terminate_and_process_chain () |
f663d9ad KT |
3755 | { |
3756 | /* Process store chain. */ | |
3757 | bool ret = false; | |
3758 | if (m_store_info.length () > 1) | |
3759 | { | |
3760 | ret = coalesce_immediate_stores (); | |
3761 | if (ret) | |
b5926e23 | 3762 | ret = output_merged_stores (); |
f663d9ad KT |
3763 | } |
3764 | ||
3765 | /* Delete all the entries we allocated ourselves. */ | |
3766 | store_immediate_info *info; | |
3767 | unsigned int i; | |
3768 | FOR_EACH_VEC_ELT (m_store_info, i, info) | |
3769 | delete info; | |
3770 | ||
3771 | merged_store_group *merged_info; | |
3772 | FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info) | |
3773 | delete merged_info; | |
3774 | ||
3775 | return ret; | |
3776 | } | |
3777 | ||
3778 | /* Return true iff LHS is a destination potentially interesting for | |
3779 | store merging. In practice these are the codes that get_inner_reference | |
3780 | can process. */ | |
3781 | ||
3782 | static bool | |
3783 | lhs_valid_for_store_merging_p (tree lhs) | |
3784 | { | |
3785 | tree_code code = TREE_CODE (lhs); | |
3786 | ||
3787 | if (code == ARRAY_REF || code == ARRAY_RANGE_REF || code == MEM_REF | |
3788 | || code == COMPONENT_REF || code == BIT_FIELD_REF) | |
3789 | return true; | |
3790 | ||
3791 | return false; | |
3792 | } | |
3793 | ||
3794 | /* Return true if the tree RHS is a constant we want to consider | |
3795 | during store merging. In practice accept all codes that | |
3796 | native_encode_expr accepts. */ | |
3797 | ||
3798 | static bool | |
3799 | rhs_valid_for_store_merging_p (tree rhs) | |
3800 | { | |
2f391428 JJ |
3801 | return native_encode_expr (rhs, NULL, |
3802 | GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs)))) != 0; | |
f663d9ad KT |
3803 | } |
3804 | ||
245f6de1 JJ |
3805 | /* If MEM is a memory reference usable for store merging (either as |
3806 | store destination or for loads), return the non-NULL base_addr | |
3807 | and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END. | |
3808 | Otherwise return NULL, *PBITPOS should be still valid even for that | |
3809 | case. */ | |
3810 | ||
3811 | static tree | |
8a91d545 RS |
3812 | mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize, |
3813 | poly_uint64 *pbitpos, | |
3814 | poly_uint64 *pbitregion_start, | |
3815 | poly_uint64 *pbitregion_end) | |
245f6de1 | 3816 | { |
8a91d545 RS |
3817 | poly_int64 bitsize, bitpos; |
3818 | poly_uint64 bitregion_start = 0, bitregion_end = 0; | |
245f6de1 JJ |
3819 | machine_mode mode; |
3820 | int unsignedp = 0, reversep = 0, volatilep = 0; | |
3821 | tree offset; | |
3822 | tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode, | |
3823 | &unsignedp, &reversep, &volatilep); | |
3824 | *pbitsize = bitsize; | |
8a91d545 | 3825 | if (known_eq (bitsize, 0)) |
245f6de1 JJ |
3826 | return NULL_TREE; |
3827 | ||
3828 | if (TREE_CODE (mem) == COMPONENT_REF | |
3829 | && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1))) | |
3830 | { | |
3831 | get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset); | |
8a91d545 RS |
3832 | if (maybe_ne (bitregion_end, 0U)) |
3833 | bitregion_end += 1; | |
245f6de1 JJ |
3834 | } |
3835 | ||
3836 | if (reversep) | |
3837 | return NULL_TREE; | |
3838 | ||
3839 | /* We do not want to rewrite TARGET_MEM_REFs. */ | |
3840 | if (TREE_CODE (base_addr) == TARGET_MEM_REF) | |
3841 | return NULL_TREE; | |
3842 | /* In some cases get_inner_reference may return a | |
3843 | MEM_REF [ptr + byteoffset]. For the purposes of this pass | |
3844 | canonicalize the base_addr to MEM_REF [ptr] and take | |
3845 | byteoffset into account in the bitpos. This occurs in | |
3846 | PR 23684 and this way we can catch more chains. */ | |
3847 | else if (TREE_CODE (base_addr) == MEM_REF) | |
3848 | { | |
8a91d545 RS |
3849 | poly_offset_int byte_off = mem_ref_offset (base_addr); |
3850 | poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT; | |
245f6de1 | 3851 | bit_off += bitpos; |
8a91d545 | 3852 | if (known_ge (bit_off, 0) && bit_off.to_shwi (&bitpos)) |
245f6de1 | 3853 | { |
8a91d545 | 3854 | if (maybe_ne (bitregion_end, 0U)) |
245f6de1 JJ |
3855 | { |
3856 | bit_off = byte_off << LOG2_BITS_PER_UNIT; | |
3857 | bit_off += bitregion_start; | |
8a91d545 | 3858 | if (bit_off.to_uhwi (&bitregion_start)) |
245f6de1 | 3859 | { |
245f6de1 JJ |
3860 | bit_off = byte_off << LOG2_BITS_PER_UNIT; |
3861 | bit_off += bitregion_end; | |
8a91d545 | 3862 | if (!bit_off.to_uhwi (&bitregion_end)) |
245f6de1 JJ |
3863 | bitregion_end = 0; |
3864 | } | |
3865 | else | |
3866 | bitregion_end = 0; | |
3867 | } | |
3868 | } | |
3869 | else | |
3870 | return NULL_TREE; | |
3871 | base_addr = TREE_OPERAND (base_addr, 0); | |
3872 | } | |
3873 | /* get_inner_reference returns the base object, get at its | |
3874 | address now. */ | |
3875 | else | |
3876 | { | |
8a91d545 | 3877 | if (maybe_lt (bitpos, 0)) |
245f6de1 JJ |
3878 | return NULL_TREE; |
3879 | base_addr = build_fold_addr_expr (base_addr); | |
3880 | } | |
3881 | ||
8a91d545 | 3882 | if (known_eq (bitregion_end, 0U)) |
245f6de1 | 3883 | { |
8a91d545 RS |
3884 | bitregion_start = round_down_to_byte_boundary (bitpos); |
3885 | bitregion_end = round_up_to_byte_boundary (bitpos + bitsize); | |
245f6de1 JJ |
3886 | } |
3887 | ||
3888 | if (offset != NULL_TREE) | |
3889 | { | |
3890 | /* If the access is variable offset then a base decl has to be | |
3891 | address-taken to be able to emit pointer-based stores to it. | |
3892 | ??? We might be able to get away with re-using the original | |
3893 | base up to the first variable part and then wrapping that inside | |
3894 | a BIT_FIELD_REF. */ | |
3895 | tree base = get_base_address (base_addr); | |
3896 | if (! base | |
3897 | || (DECL_P (base) && ! TREE_ADDRESSABLE (base))) | |
3898 | return NULL_TREE; | |
3899 | ||
3900 | base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr), | |
3901 | base_addr, offset); | |
3902 | } | |
3903 | ||
3904 | *pbitsize = bitsize; | |
3905 | *pbitpos = bitpos; | |
3906 | *pbitregion_start = bitregion_start; | |
3907 | *pbitregion_end = bitregion_end; | |
3908 | return base_addr; | |
3909 | } | |
3910 | ||
3911 | /* Return true if STMT is a load that can be used for store merging. | |
3912 | In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and | |
3913 | BITREGION_END are properties of the corresponding store. */ | |
3914 | ||
3915 | static bool | |
3916 | handled_load (gimple *stmt, store_operand_info *op, | |
8a91d545 RS |
3917 | poly_uint64 bitsize, poly_uint64 bitpos, |
3918 | poly_uint64 bitregion_start, poly_uint64 bitregion_end) | |
245f6de1 | 3919 | { |
383ac8dc | 3920 | if (!is_gimple_assign (stmt)) |
245f6de1 | 3921 | return false; |
383ac8dc JJ |
3922 | if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR) |
3923 | { | |
3924 | tree rhs1 = gimple_assign_rhs1 (stmt); | |
3925 | if (TREE_CODE (rhs1) == SSA_NAME | |
383ac8dc JJ |
3926 | && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos, |
3927 | bitregion_start, bitregion_end)) | |
3928 | { | |
d60edaba JJ |
3929 | /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have |
3930 | been optimized earlier, but if allowed here, would confuse the | |
3931 | multiple uses counting. */ | |
3932 | if (op->bit_not_p) | |
3933 | return false; | |
383ac8dc JJ |
3934 | op->bit_not_p = !op->bit_not_p; |
3935 | return true; | |
3936 | } | |
3937 | return false; | |
3938 | } | |
3939 | if (gimple_vuse (stmt) | |
3940 | && gimple_assign_load_p (stmt) | |
245f6de1 JJ |
3941 | && !stmt_can_throw_internal (stmt) |
3942 | && !gimple_has_volatile_ops (stmt)) | |
3943 | { | |
3944 | tree mem = gimple_assign_rhs1 (stmt); | |
3945 | op->base_addr | |
3946 | = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos, | |
3947 | &op->bitregion_start, | |
3948 | &op->bitregion_end); | |
3949 | if (op->base_addr != NULL_TREE | |
8a91d545 RS |
3950 | && known_eq (op->bitsize, bitsize) |
3951 | && multiple_p (op->bitpos - bitpos, BITS_PER_UNIT) | |
3952 | && known_ge (op->bitpos - op->bitregion_start, | |
3953 | bitpos - bitregion_start) | |
3954 | && known_ge (op->bitregion_end - op->bitpos, | |
3955 | bitregion_end - bitpos)) | |
245f6de1 JJ |
3956 | { |
3957 | op->stmt = stmt; | |
3958 | op->val = mem; | |
383ac8dc | 3959 | op->bit_not_p = false; |
245f6de1 JJ |
3960 | return true; |
3961 | } | |
3962 | } | |
3963 | return false; | |
3964 | } | |
3965 | ||
3966 | /* Record the store STMT for store merging optimization if it can be | |
3967 | optimized. */ | |
3968 | ||
3969 | void | |
3970 | pass_store_merging::process_store (gimple *stmt) | |
3971 | { | |
3972 | tree lhs = gimple_assign_lhs (stmt); | |
3973 | tree rhs = gimple_assign_rhs1 (stmt); | |
8a91d545 RS |
3974 | poly_uint64 bitsize, bitpos; |
3975 | poly_uint64 bitregion_start, bitregion_end; | |
245f6de1 JJ |
3976 | tree base_addr |
3977 | = mem_valid_for_store_merging (lhs, &bitsize, &bitpos, | |
3978 | &bitregion_start, &bitregion_end); | |
8a91d545 | 3979 | if (known_eq (bitsize, 0U)) |
245f6de1 JJ |
3980 | return; |
3981 | ||
3982 | bool invalid = (base_addr == NULL_TREE | |
8a91d545 RS |
3983 | || (maybe_gt (bitsize, |
3984 | (unsigned int) MAX_BITSIZE_MODE_ANY_INT) | |
3985 | && (TREE_CODE (rhs) != INTEGER_CST))); | |
245f6de1 | 3986 | enum tree_code rhs_code = ERROR_MARK; |
d60edaba | 3987 | bool bit_not_p = false; |
4b84d9b8 JJ |
3988 | struct symbolic_number n; |
3989 | gimple *ins_stmt = NULL; | |
245f6de1 JJ |
3990 | store_operand_info ops[2]; |
3991 | if (invalid) | |
3992 | ; | |
3993 | else if (rhs_valid_for_store_merging_p (rhs)) | |
3994 | { | |
3995 | rhs_code = INTEGER_CST; | |
3996 | ops[0].val = rhs; | |
3997 | } | |
d7a9512e | 3998 | else if (TREE_CODE (rhs) != SSA_NAME) |
245f6de1 JJ |
3999 | invalid = true; |
4000 | else | |
4001 | { | |
4002 | gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2; | |
4003 | if (!is_gimple_assign (def_stmt)) | |
4004 | invalid = true; | |
4005 | else if (handled_load (def_stmt, &ops[0], bitsize, bitpos, | |
4006 | bitregion_start, bitregion_end)) | |
4007 | rhs_code = MEM_REF; | |
d60edaba JJ |
4008 | else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR) |
4009 | { | |
4010 | tree rhs1 = gimple_assign_rhs1 (def_stmt); | |
4011 | if (TREE_CODE (rhs1) == SSA_NAME | |
4012 | && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1))) | |
4013 | { | |
4014 | bit_not_p = true; | |
4015 | def_stmt = SSA_NAME_DEF_STMT (rhs1); | |
4016 | } | |
4017 | } | |
4018 | if (rhs_code == ERROR_MARK && !invalid) | |
245f6de1 JJ |
4019 | switch ((rhs_code = gimple_assign_rhs_code (def_stmt))) |
4020 | { | |
4021 | case BIT_AND_EXPR: | |
4022 | case BIT_IOR_EXPR: | |
4023 | case BIT_XOR_EXPR: | |
4024 | tree rhs1, rhs2; | |
4025 | rhs1 = gimple_assign_rhs1 (def_stmt); | |
4026 | rhs2 = gimple_assign_rhs2 (def_stmt); | |
4027 | invalid = true; | |
d7a9512e | 4028 | if (TREE_CODE (rhs1) != SSA_NAME) |
245f6de1 JJ |
4029 | break; |
4030 | def_stmt1 = SSA_NAME_DEF_STMT (rhs1); | |
4031 | if (!is_gimple_assign (def_stmt1) | |
4032 | || !handled_load (def_stmt1, &ops[0], bitsize, bitpos, | |
4033 | bitregion_start, bitregion_end)) | |
4034 | break; | |
4035 | if (rhs_valid_for_store_merging_p (rhs2)) | |
4036 | ops[1].val = rhs2; | |
d7a9512e | 4037 | else if (TREE_CODE (rhs2) != SSA_NAME) |
245f6de1 JJ |
4038 | break; |
4039 | else | |
4040 | { | |
4041 | def_stmt2 = SSA_NAME_DEF_STMT (rhs2); | |
4042 | if (!is_gimple_assign (def_stmt2)) | |
4043 | break; | |
4044 | else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos, | |
4045 | bitregion_start, bitregion_end)) | |
4046 | break; | |
4047 | } | |
4048 | invalid = false; | |
4049 | break; | |
4050 | default: | |
4051 | invalid = true; | |
4052 | break; | |
4053 | } | |
8a91d545 RS |
4054 | unsigned HOST_WIDE_INT const_bitsize; |
4055 | if (bitsize.is_constant (&const_bitsize) | |
4056 | && multiple_p (const_bitsize, BITS_PER_UNIT) | |
4057 | && multiple_p (bitpos, BITS_PER_UNIT) | |
4058 | && const_bitsize <= 64 | |
4b84d9b8 JJ |
4059 | && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN) |
4060 | { | |
4061 | ins_stmt = find_bswap_or_nop_1 (def_stmt, &n, 12); | |
4062 | if (ins_stmt) | |
4063 | { | |
4064 | uint64_t nn = n.n; | |
4065 | for (unsigned HOST_WIDE_INT i = 0; | |
8a91d545 RS |
4066 | i < const_bitsize; |
4067 | i += BITS_PER_UNIT, nn >>= BITS_PER_MARKER) | |
4b84d9b8 JJ |
4068 | if ((nn & MARKER_MASK) == 0 |
4069 | || (nn & MARKER_MASK) == MARKER_BYTE_UNKNOWN) | |
4070 | { | |
4071 | ins_stmt = NULL; | |
4072 | break; | |
4073 | } | |
4074 | if (ins_stmt) | |
4075 | { | |
4076 | if (invalid) | |
4077 | { | |
4078 | rhs_code = LROTATE_EXPR; | |
4079 | ops[0].base_addr = NULL_TREE; | |
4080 | ops[1].base_addr = NULL_TREE; | |
4081 | } | |
4082 | invalid = false; | |
4083 | } | |
4084 | } | |
4085 | } | |
245f6de1 JJ |
4086 | } |
4087 | ||
8a91d545 RS |
4088 | unsigned HOST_WIDE_INT const_bitsize, const_bitpos; |
4089 | unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end; | |
4090 | if (invalid | |
4091 | || !bitsize.is_constant (&const_bitsize) | |
4092 | || !bitpos.is_constant (&const_bitpos) | |
4093 | || !bitregion_start.is_constant (&const_bitregion_start) | |
4094 | || !bitregion_end.is_constant (&const_bitregion_end)) | |
245f6de1 | 4095 | { |
383ac8dc | 4096 | terminate_all_aliasing_chains (NULL, stmt); |
245f6de1 JJ |
4097 | return; |
4098 | } | |
4099 | ||
4b84d9b8 JJ |
4100 | if (!ins_stmt) |
4101 | memset (&n, 0, sizeof (n)); | |
4102 | ||
383ac8dc JJ |
4103 | struct imm_store_chain_info **chain_info = NULL; |
4104 | if (base_addr) | |
4105 | chain_info = m_stores.get (base_addr); | |
4106 | ||
245f6de1 JJ |
4107 | store_immediate_info *info; |
4108 | if (chain_info) | |
4109 | { | |
4110 | unsigned int ord = (*chain_info)->m_store_info.length (); | |
8a91d545 RS |
4111 | info = new store_immediate_info (const_bitsize, const_bitpos, |
4112 | const_bitregion_start, | |
4113 | const_bitregion_end, | |
4114 | stmt, ord, rhs_code, n, ins_stmt, | |
d60edaba | 4115 | bit_not_p, ops[0], ops[1]); |
245f6de1 JJ |
4116 | if (dump_file && (dump_flags & TDF_DETAILS)) |
4117 | { | |
4118 | fprintf (dump_file, "Recording immediate store from stmt:\n"); | |
4119 | print_gimple_stmt (dump_file, stmt, 0); | |
4120 | } | |
4121 | (*chain_info)->m_store_info.safe_push (info); | |
383ac8dc | 4122 | terminate_all_aliasing_chains (chain_info, stmt); |
245f6de1 JJ |
4123 | /* If we reach the limit of stores to merge in a chain terminate and |
4124 | process the chain now. */ | |
4125 | if ((*chain_info)->m_store_info.length () | |
4126 | == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE)) | |
4127 | { | |
4128 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
4129 | fprintf (dump_file, | |
4130 | "Reached maximum number of statements to merge:\n"); | |
4131 | terminate_and_release_chain (*chain_info); | |
4132 | } | |
4133 | return; | |
4134 | } | |
4135 | ||
4136 | /* Store aliases any existing chain? */ | |
383ac8dc | 4137 | terminate_all_aliasing_chains (NULL, stmt); |
245f6de1 JJ |
4138 | /* Start a new chain. */ |
4139 | struct imm_store_chain_info *new_chain | |
4140 | = new imm_store_chain_info (m_stores_head, base_addr); | |
8a91d545 RS |
4141 | info = new store_immediate_info (const_bitsize, const_bitpos, |
4142 | const_bitregion_start, | |
4143 | const_bitregion_end, | |
4144 | stmt, 0, rhs_code, n, ins_stmt, | |
d60edaba | 4145 | bit_not_p, ops[0], ops[1]); |
245f6de1 JJ |
4146 | new_chain->m_store_info.safe_push (info); |
4147 | m_stores.put (base_addr, new_chain); | |
4148 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
4149 | { | |
4150 | fprintf (dump_file, "Starting new chain with statement:\n"); | |
4151 | print_gimple_stmt (dump_file, stmt, 0); | |
4152 | fprintf (dump_file, "The base object is:\n"); | |
4153 | print_generic_expr (dump_file, base_addr); | |
4154 | fprintf (dump_file, "\n"); | |
4155 | } | |
4156 | } | |
4157 | ||
f663d9ad | 4158 | /* Entry point for the pass. Go over each basic block recording chains of |
245f6de1 JJ |
4159 | immediate stores. Upon encountering a terminating statement (as defined |
4160 | by stmt_terminates_chain_p) process the recorded stores and emit the widened | |
4161 | variants. */ | |
f663d9ad KT |
4162 | |
4163 | unsigned int | |
4164 | pass_store_merging::execute (function *fun) | |
4165 | { | |
4166 | basic_block bb; | |
4167 | hash_set<gimple *> orig_stmts; | |
4168 | ||
4b84d9b8 JJ |
4169 | calculate_dominance_info (CDI_DOMINATORS); |
4170 | ||
f663d9ad KT |
4171 | FOR_EACH_BB_FN (bb, fun) |
4172 | { | |
4173 | gimple_stmt_iterator gsi; | |
4174 | unsigned HOST_WIDE_INT num_statements = 0; | |
4175 | /* Record the original statements so that we can keep track of | |
4176 | statements emitted in this pass and not re-process new | |
4177 | statements. */ | |
4178 | for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4179 | { | |
4180 | if (is_gimple_debug (gsi_stmt (gsi))) | |
4181 | continue; | |
4182 | ||
2f391428 | 4183 | if (++num_statements >= 2) |
f663d9ad KT |
4184 | break; |
4185 | } | |
4186 | ||
4187 | if (num_statements < 2) | |
4188 | continue; | |
4189 | ||
4190 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
4191 | fprintf (dump_file, "Processing basic block <%d>:\n", bb->index); | |
4192 | ||
4193 | for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4194 | { | |
4195 | gimple *stmt = gsi_stmt (gsi); | |
4196 | ||
50b6d676 AO |
4197 | if (is_gimple_debug (stmt)) |
4198 | continue; | |
4199 | ||
f663d9ad KT |
4200 | if (gimple_has_volatile_ops (stmt)) |
4201 | { | |
4202 | /* Terminate all chains. */ | |
4203 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
4204 | fprintf (dump_file, "Volatile access terminates " | |
4205 | "all chains\n"); | |
4206 | terminate_and_process_all_chains (); | |
4207 | continue; | |
4208 | } | |
4209 | ||
f663d9ad KT |
4210 | if (gimple_assign_single_p (stmt) && gimple_vdef (stmt) |
4211 | && !stmt_can_throw_internal (stmt) | |
4212 | && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt))) | |
245f6de1 JJ |
4213 | process_store (stmt); |
4214 | else | |
4215 | terminate_all_aliasing_chains (NULL, stmt); | |
f663d9ad KT |
4216 | } |
4217 | terminate_and_process_all_chains (); | |
4218 | } | |
4219 | return 0; | |
4220 | } | |
4221 | ||
4222 | } // anon namespace | |
4223 | ||
4224 | /* Construct and return a store merging pass object. */ | |
4225 | ||
4226 | gimple_opt_pass * | |
4227 | make_pass_store_merging (gcc::context *ctxt) | |
4228 | { | |
4229 | return new pass_store_merging (ctxt); | |
4230 | } | |
c22d8787 KT |
4231 | |
4232 | #if CHECKING_P | |
4233 | ||
4234 | namespace selftest { | |
4235 | ||
4236 | /* Selftests for store merging helpers. */ | |
4237 | ||
4238 | /* Assert that all elements of the byte arrays X and Y, both of length N | |
4239 | are equal. */ | |
4240 | ||
4241 | static void | |
4242 | verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n) | |
4243 | { | |
4244 | for (unsigned int i = 0; i < n; i++) | |
4245 | { | |
4246 | if (x[i] != y[i]) | |
4247 | { | |
4248 | fprintf (stderr, "Arrays do not match. X:\n"); | |
4249 | dump_char_array (stderr, x, n); | |
4250 | fprintf (stderr, "Y:\n"); | |
4251 | dump_char_array (stderr, y, n); | |
4252 | } | |
4253 | ASSERT_EQ (x[i], y[i]); | |
4254 | } | |
4255 | } | |
4256 | ||
4257 | /* Test shift_bytes_in_array and that it carries bits across between | |
4258 | bytes correctly. */ | |
4259 | ||
4260 | static void | |
4261 | verify_shift_bytes_in_array (void) | |
4262 | { | |
4263 | /* byte 1 | byte 0 | |
4264 | 00011111 | 11100000. */ | |
4265 | unsigned char orig[2] = { 0xe0, 0x1f }; | |
4266 | unsigned char in[2]; | |
4267 | memcpy (in, orig, sizeof orig); | |
4268 | ||
4269 | unsigned char expected[2] = { 0x80, 0x7f }; | |
4270 | shift_bytes_in_array (in, sizeof (in), 2); | |
4271 | verify_array_eq (in, expected, sizeof (in)); | |
4272 | ||
4273 | memcpy (in, orig, sizeof orig); | |
4274 | memcpy (expected, orig, sizeof orig); | |
4275 | /* Check that shifting by zero doesn't change anything. */ | |
4276 | shift_bytes_in_array (in, sizeof (in), 0); | |
4277 | verify_array_eq (in, expected, sizeof (in)); | |
4278 | ||
4279 | } | |
4280 | ||
4281 | /* Test shift_bytes_in_array_right and that it carries bits across between | |
4282 | bytes correctly. */ | |
4283 | ||
4284 | static void | |
4285 | verify_shift_bytes_in_array_right (void) | |
4286 | { | |
4287 | /* byte 1 | byte 0 | |
4288 | 00011111 | 11100000. */ | |
4289 | unsigned char orig[2] = { 0x1f, 0xe0}; | |
4290 | unsigned char in[2]; | |
4291 | memcpy (in, orig, sizeof orig); | |
4292 | unsigned char expected[2] = { 0x07, 0xf8}; | |
4293 | shift_bytes_in_array_right (in, sizeof (in), 2); | |
4294 | verify_array_eq (in, expected, sizeof (in)); | |
4295 | ||
4296 | memcpy (in, orig, sizeof orig); | |
4297 | memcpy (expected, orig, sizeof orig); | |
4298 | /* Check that shifting by zero doesn't change anything. */ | |
4299 | shift_bytes_in_array_right (in, sizeof (in), 0); | |
4300 | verify_array_eq (in, expected, sizeof (in)); | |
4301 | } | |
4302 | ||
4303 | /* Test clear_bit_region that it clears exactly the bits asked and | |
4304 | nothing more. */ | |
4305 | ||
4306 | static void | |
4307 | verify_clear_bit_region (void) | |
4308 | { | |
4309 | /* Start with all bits set and test clearing various patterns in them. */ | |
4310 | unsigned char orig[3] = { 0xff, 0xff, 0xff}; | |
4311 | unsigned char in[3]; | |
4312 | unsigned char expected[3]; | |
4313 | memcpy (in, orig, sizeof in); | |
4314 | ||
4315 | /* Check zeroing out all the bits. */ | |
4316 | clear_bit_region (in, 0, 3 * BITS_PER_UNIT); | |
4317 | expected[0] = expected[1] = expected[2] = 0; | |
4318 | verify_array_eq (in, expected, sizeof in); | |
4319 | ||
4320 | memcpy (in, orig, sizeof in); | |
4321 | /* Leave the first and last bits intact. */ | |
4322 | clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2); | |
4323 | expected[0] = 0x1; | |
4324 | expected[1] = 0; | |
4325 | expected[2] = 0x80; | |
4326 | verify_array_eq (in, expected, sizeof in); | |
4327 | } | |
4328 | ||
4329 | /* Test verify_clear_bit_region_be that it clears exactly the bits asked and | |
4330 | nothing more. */ | |
4331 | ||
4332 | static void | |
4333 | verify_clear_bit_region_be (void) | |
4334 | { | |
4335 | /* Start with all bits set and test clearing various patterns in them. */ | |
4336 | unsigned char orig[3] = { 0xff, 0xff, 0xff}; | |
4337 | unsigned char in[3]; | |
4338 | unsigned char expected[3]; | |
4339 | memcpy (in, orig, sizeof in); | |
4340 | ||
4341 | /* Check zeroing out all the bits. */ | |
4342 | clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT); | |
4343 | expected[0] = expected[1] = expected[2] = 0; | |
4344 | verify_array_eq (in, expected, sizeof in); | |
4345 | ||
4346 | memcpy (in, orig, sizeof in); | |
4347 | /* Leave the first and last bits intact. */ | |
4348 | clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2); | |
4349 | expected[0] = 0x80; | |
4350 | expected[1] = 0; | |
4351 | expected[2] = 0x1; | |
4352 | verify_array_eq (in, expected, sizeof in); | |
4353 | } | |
4354 | ||
4355 | ||
4356 | /* Run all of the selftests within this file. */ | |
4357 | ||
4358 | void | |
4359 | store_merging_c_tests (void) | |
4360 | { | |
4361 | verify_shift_bytes_in_array (); | |
4362 | verify_shift_bytes_in_array_right (); | |
4363 | verify_clear_bit_region (); | |
4364 | verify_clear_bit_region_be (); | |
4365 | } | |
4366 | ||
4367 | } // namespace selftest | |
4368 | #endif /* CHECKING_P. */ |