]>
Commit | Line | Data |
---|---|---|
4aba7bd3 | 1 | /* GIMPLE store merging and byte swapping passes. |
8e8f6434 | 2 | Copyright (C) 2009-2018 Free Software Foundation, Inc. |
3d3e04ac | 3 | Contributed by ARM Ltd. |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it | |
8 | under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 3, or (at your option) | |
10 | any later version. | |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but | |
13 | WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with GCC; see the file COPYING3. If not see | |
19 | <http://www.gnu.org/licenses/>. */ | |
20 | ||
10f0d48d | 21 | /* The purpose of the store merging pass is to combine multiple memory stores |
22 | of constant values, values loaded from memory, bitwise operations on those, | |
23 | or bit-field values, to consecutive locations, into fewer wider stores. | |
24 | ||
3d3e04ac | 25 | For example, if we have a sequence peforming four byte stores to |
26 | consecutive memory locations: | |
27 | [p ] := imm1; | |
28 | [p + 1B] := imm2; | |
29 | [p + 2B] := imm3; | |
30 | [p + 3B] := imm4; | |
31 | we can transform this into a single 4-byte store if the target supports it: | |
10f0d48d | 32 | [p] := imm1:imm2:imm3:imm4 concatenated according to endianness. |
3d3e04ac | 33 | |
9991d1d3 | 34 | Or: |
35 | [p ] := [q ]; | |
36 | [p + 1B] := [q + 1B]; | |
37 | [p + 2B] := [q + 2B]; | |
38 | [p + 3B] := [q + 3B]; | |
39 | if there is no overlap can be transformed into a single 4-byte | |
40 | load followed by single 4-byte store. | |
41 | ||
42 | Or: | |
43 | [p ] := [q ] ^ imm1; | |
44 | [p + 1B] := [q + 1B] ^ imm2; | |
45 | [p + 2B] := [q + 2B] ^ imm3; | |
46 | [p + 3B] := [q + 3B] ^ imm4; | |
47 | if there is no overlap can be transformed into a single 4-byte | |
48 | load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store. | |
49 | ||
10f0d48d | 50 | Or: |
51 | [p:1 ] := imm; | |
52 | [p:31] := val & 0x7FFFFFFF; | |
53 | we can transform this into a single 4-byte store if the target supports it: | |
54 | [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness. | |
55 | ||
3d3e04ac | 56 | The algorithm is applied to each basic block in three phases: |
57 | ||
10f0d48d | 58 | 1) Scan through the basic block and record assignments to destinations |
59 | that can be expressed as a store to memory of a certain size at a certain | |
60 | bit offset from base expressions we can handle. For bit-fields we also | |
61 | record the surrounding bit region, i.e. bits that could be stored in | |
9991d1d3 | 62 | a read-modify-write operation when storing the bit-field. Record store |
63 | chains to different bases in a hash_map (m_stores) and make sure to | |
64 | terminate such chains when appropriate (for example when when the stored | |
65 | values get used subsequently). | |
3d3e04ac | 66 | These stores can be a result of structure element initializers, array stores |
67 | etc. A store_immediate_info object is recorded for every such store. | |
68 | Record as many such assignments to a single base as possible until a | |
69 | statement that interferes with the store sequence is encountered. | |
10f0d48d | 70 | Each store has up to 2 operands, which can be a either constant, a memory |
71 | load or an SSA name, from which the value to be stored can be computed. | |
9991d1d3 | 72 | At most one of the operands can be a constant. The operands are recorded |
73 | in store_operand_info struct. | |
3d3e04ac | 74 | |
10f0d48d | 75 | 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of |
3d3e04ac | 76 | store_immediate_info objects) and coalesce contiguous stores into |
10f0d48d | 77 | merged_store_group objects. For bit-field stores, we don't need to |
9991d1d3 | 78 | require the stores to be contiguous, just their surrounding bit regions |
79 | have to be contiguous. If the expression being stored is different | |
80 | between adjacent stores, such as one store storing a constant and | |
81 | following storing a value loaded from memory, or if the loaded memory | |
82 | objects are not adjacent, a new merged_store_group is created as well. | |
3d3e04ac | 83 | |
84 | For example, given the stores: | |
85 | [p ] := 0; | |
86 | [p + 1B] := 1; | |
87 | [p + 3B] := 0; | |
88 | [p + 4B] := 1; | |
89 | [p + 5B] := 0; | |
90 | [p + 6B] := 0; | |
91 | This phase would produce two merged_store_group objects, one recording the | |
92 | two bytes stored in the memory region [p : p + 1] and another | |
93 | recording the four bytes stored in the memory region [p + 3 : p + 6]. | |
94 | ||
95 | 3) The merged_store_group objects produced in phase 2) are processed | |
96 | to generate the sequence of wider stores that set the contiguous memory | |
97 | regions to the sequence of bytes that correspond to it. This may emit | |
98 | multiple stores per store group to handle contiguous stores that are not | |
99 | of a size that is a power of 2. For example it can try to emit a 40-bit | |
100 | store as a 32-bit store followed by an 8-bit store. | |
10f0d48d | 101 | We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT |
102 | or TARGET_SLOW_UNALIGNED_ACCESS settings. | |
3d3e04ac | 103 | |
104 | Note on endianness and example: | |
105 | Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores: | |
106 | [p ] := 0x1234; | |
107 | [p + 2B] := 0x5678; | |
108 | [p + 4B] := 0xab; | |
109 | [p + 5B] := 0xcd; | |
110 | ||
111 | The memory layout for little-endian (LE) and big-endian (BE) must be: | |
112 | p |LE|BE| | |
113 | --------- | |
114 | 0 |34|12| | |
115 | 1 |12|34| | |
116 | 2 |78|56| | |
117 | 3 |56|78| | |
118 | 4 |ab|ab| | |
119 | 5 |cd|cd| | |
120 | ||
121 | To merge these into a single 48-bit merged value 'val' in phase 2) | |
122 | on little-endian we insert stores to higher (consecutive) bitpositions | |
123 | into the most significant bits of the merged value. | |
124 | The final merged value would be: 0xcdab56781234 | |
125 | ||
126 | For big-endian we insert stores to higher bitpositions into the least | |
127 | significant bits of the merged value. | |
128 | The final merged value would be: 0x12345678abcd | |
129 | ||
130 | Then, in phase 3), we want to emit this 48-bit value as a 32-bit store | |
131 | followed by a 16-bit store. Again, we must consider endianness when | |
132 | breaking down the 48-bit value 'val' computed above. | |
133 | For little endian we emit: | |
134 | [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff; | |
135 | [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32; | |
136 | ||
137 | Whereas for big-endian we emit: | |
138 | [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16; | |
139 | [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */ | |
140 | ||
141 | #include "config.h" | |
142 | #include "system.h" | |
143 | #include "coretypes.h" | |
144 | #include "backend.h" | |
145 | #include "tree.h" | |
146 | #include "gimple.h" | |
147 | #include "builtins.h" | |
148 | #include "fold-const.h" | |
149 | #include "tree-pass.h" | |
150 | #include "ssa.h" | |
151 | #include "gimple-pretty-print.h" | |
152 | #include "alias.h" | |
153 | #include "fold-const.h" | |
154 | #include "params.h" | |
155 | #include "print-tree.h" | |
156 | #include "tree-hash-traits.h" | |
157 | #include "gimple-iterator.h" | |
158 | #include "gimplify.h" | |
10f0d48d | 159 | #include "gimple-fold.h" |
3d3e04ac | 160 | #include "stor-layout.h" |
161 | #include "timevar.h" | |
162 | #include "tree-cfg.h" | |
163 | #include "tree-eh.h" | |
164 | #include "target.h" | |
427223f1 | 165 | #include "gimplify-me.h" |
902cb3b7 | 166 | #include "rtl.h" |
167 | #include "expr.h" /* For get_bit_range. */ | |
4aba7bd3 | 168 | #include "optabs-tree.h" |
3d9a2fb3 | 169 | #include "selftest.h" |
3d3e04ac | 170 | |
171 | /* The maximum size (in bits) of the stores this pass should generate. */ | |
172 | #define MAX_STORE_BITSIZE (BITS_PER_WORD) | |
173 | #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT) | |
174 | ||
9991d1d3 | 175 | /* Limit to bound the number of aliasing checks for loads with the same |
176 | vuse as the corresponding store. */ | |
177 | #define MAX_STORE_ALIAS_CHECKS 64 | |
178 | ||
3d3e04ac | 179 | namespace { |
180 | ||
81c8113b | 181 | struct bswap_stat |
4aba7bd3 | 182 | { |
183 | /* Number of hand-written 16-bit nop / bswaps found. */ | |
184 | int found_16bit; | |
185 | ||
186 | /* Number of hand-written 32-bit nop / bswaps found. */ | |
187 | int found_32bit; | |
188 | ||
189 | /* Number of hand-written 64-bit nop / bswaps found. */ | |
190 | int found_64bit; | |
191 | } nop_stats, bswap_stats; | |
192 | ||
193 | /* A symbolic number structure is used to detect byte permutation and selection | |
194 | patterns of a source. To achieve that, its field N contains an artificial | |
195 | number consisting of BITS_PER_MARKER sized markers tracking where does each | |
196 | byte come from in the source: | |
197 | ||
198 | 0 - target byte has the value 0 | |
199 | FF - target byte has an unknown value (eg. due to sign extension) | |
200 | 1..size - marker value is the byte index in the source (0 for lsb). | |
201 | ||
202 | To detect permutations on memory sources (arrays and structures), a symbolic | |
203 | number is also associated: | |
204 | - a base address BASE_ADDR and an OFFSET giving the address of the source; | |
205 | - a range which gives the difference between the highest and lowest accessed | |
206 | memory location to make such a symbolic number; | |
207 | - the address SRC of the source element of lowest address as a convenience | |
208 | to easily get BASE_ADDR + offset + lowest bytepos; | |
209 | - number of expressions N_OPS bitwise ored together to represent | |
210 | approximate cost of the computation. | |
211 | ||
212 | Note 1: the range is different from size as size reflects the size of the | |
213 | type of the current expression. For instance, for an array char a[], | |
214 | (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while | |
215 | (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this | |
216 | time a range of 1. | |
217 | ||
218 | Note 2: for non-memory sources, range holds the same value as size. | |
219 | ||
220 | Note 3: SRC points to the SSA_NAME in case of non-memory source. */ | |
221 | ||
222 | struct symbolic_number { | |
223 | uint64_t n; | |
224 | tree type; | |
225 | tree base_addr; | |
226 | tree offset; | |
08454aa5 | 227 | poly_int64_pod bytepos; |
4aba7bd3 | 228 | tree src; |
229 | tree alias_set; | |
230 | tree vuse; | |
231 | unsigned HOST_WIDE_INT range; | |
232 | int n_ops; | |
233 | }; | |
234 | ||
235 | #define BITS_PER_MARKER 8 | |
236 | #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1) | |
237 | #define MARKER_BYTE_UNKNOWN MARKER_MASK | |
238 | #define HEAD_MARKER(n, size) \ | |
239 | ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER))) | |
240 | ||
241 | /* The number which the find_bswap_or_nop_1 result should match in | |
242 | order to have a nop. The number is masked according to the size of | |
243 | the symbolic number before using it. */ | |
244 | #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \ | |
245 | (uint64_t)0x08070605 << 32 | 0x04030201) | |
246 | ||
247 | /* The number which the find_bswap_or_nop_1 result should match in | |
248 | order to have a byte swap. The number is masked according to the | |
249 | size of the symbolic number before using it. */ | |
250 | #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \ | |
251 | (uint64_t)0x01020304 << 32 | 0x05060708) | |
252 | ||
253 | /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic | |
254 | number N. Return false if the requested operation is not permitted | |
255 | on a symbolic number. */ | |
256 | ||
257 | inline bool | |
258 | do_shift_rotate (enum tree_code code, | |
259 | struct symbolic_number *n, | |
260 | int count) | |
261 | { | |
262 | int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; | |
263 | unsigned head_marker; | |
264 | ||
265 | if (count % BITS_PER_UNIT != 0) | |
266 | return false; | |
267 | count = (count / BITS_PER_UNIT) * BITS_PER_MARKER; | |
268 | ||
269 | /* Zero out the extra bits of N in order to avoid them being shifted | |
270 | into the significant bits. */ | |
271 | if (size < 64 / BITS_PER_MARKER) | |
272 | n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; | |
273 | ||
274 | switch (code) | |
275 | { | |
276 | case LSHIFT_EXPR: | |
277 | n->n <<= count; | |
278 | break; | |
279 | case RSHIFT_EXPR: | |
280 | head_marker = HEAD_MARKER (n->n, size); | |
281 | n->n >>= count; | |
282 | /* Arithmetic shift of signed type: result is dependent on the value. */ | |
283 | if (!TYPE_UNSIGNED (n->type) && head_marker) | |
284 | for (i = 0; i < count / BITS_PER_MARKER; i++) | |
285 | n->n |= (uint64_t) MARKER_BYTE_UNKNOWN | |
286 | << ((size - 1 - i) * BITS_PER_MARKER); | |
287 | break; | |
288 | case LROTATE_EXPR: | |
289 | n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count)); | |
290 | break; | |
291 | case RROTATE_EXPR: | |
292 | n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count)); | |
293 | break; | |
294 | default: | |
295 | return false; | |
296 | } | |
297 | /* Zero unused bits for size. */ | |
298 | if (size < 64 / BITS_PER_MARKER) | |
299 | n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; | |
300 | return true; | |
301 | } | |
302 | ||
303 | /* Perform sanity checking for the symbolic number N and the gimple | |
304 | statement STMT. */ | |
305 | ||
306 | inline bool | |
307 | verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt) | |
308 | { | |
309 | tree lhs_type; | |
310 | ||
311 | lhs_type = gimple_expr_type (stmt); | |
312 | ||
313 | if (TREE_CODE (lhs_type) != INTEGER_TYPE) | |
314 | return false; | |
315 | ||
316 | if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type)) | |
317 | return false; | |
318 | ||
319 | return true; | |
320 | } | |
321 | ||
322 | /* Initialize the symbolic number N for the bswap pass from the base element | |
323 | SRC manipulated by the bitwise OR expression. */ | |
324 | ||
325 | bool | |
326 | init_symbolic_number (struct symbolic_number *n, tree src) | |
327 | { | |
328 | int size; | |
329 | ||
330 | if (! INTEGRAL_TYPE_P (TREE_TYPE (src))) | |
331 | return false; | |
332 | ||
333 | n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE; | |
334 | n->src = src; | |
335 | ||
336 | /* Set up the symbolic number N by setting each byte to a value between 1 and | |
337 | the byte size of rhs1. The highest order byte is set to n->size and the | |
338 | lowest order byte to 1. */ | |
339 | n->type = TREE_TYPE (src); | |
340 | size = TYPE_PRECISION (n->type); | |
341 | if (size % BITS_PER_UNIT != 0) | |
342 | return false; | |
343 | size /= BITS_PER_UNIT; | |
344 | if (size > 64 / BITS_PER_MARKER) | |
345 | return false; | |
346 | n->range = size; | |
347 | n->n = CMPNOP; | |
348 | n->n_ops = 1; | |
349 | ||
350 | if (size < 64 / BITS_PER_MARKER) | |
351 | n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1; | |
352 | ||
353 | return true; | |
354 | } | |
355 | ||
356 | /* Check if STMT might be a byte swap or a nop from a memory source and returns | |
357 | the answer. If so, REF is that memory source and the base of the memory area | |
358 | accessed and the offset of the access from that base are recorded in N. */ | |
359 | ||
360 | bool | |
361 | find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n) | |
362 | { | |
363 | /* Leaf node is an array or component ref. Memorize its base and | |
364 | offset from base to compare to other such leaf node. */ | |
81bc0f0f | 365 | poly_int64 bitsize, bitpos, bytepos; |
4aba7bd3 | 366 | machine_mode mode; |
367 | int unsignedp, reversep, volatilep; | |
368 | tree offset, base_addr; | |
369 | ||
370 | /* Not prepared to handle PDP endian. */ | |
371 | if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN) | |
372 | return false; | |
373 | ||
374 | if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt)) | |
375 | return false; | |
376 | ||
377 | base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode, | |
378 | &unsignedp, &reversep, &volatilep); | |
379 | ||
509ab8cd | 380 | if (TREE_CODE (base_addr) == TARGET_MEM_REF) |
381 | /* Do not rewrite TARGET_MEM_REF. */ | |
382 | return false; | |
383 | else if (TREE_CODE (base_addr) == MEM_REF) | |
4aba7bd3 | 384 | { |
9c6deaf0 | 385 | poly_offset_int bit_offset = 0; |
4aba7bd3 | 386 | tree off = TREE_OPERAND (base_addr, 1); |
387 | ||
388 | if (!integer_zerop (off)) | |
389 | { | |
9c6deaf0 | 390 | poly_offset_int boff = mem_ref_offset (base_addr); |
391 | boff <<= LOG2_BITS_PER_UNIT; | |
4aba7bd3 | 392 | bit_offset += boff; |
393 | } | |
394 | ||
395 | base_addr = TREE_OPERAND (base_addr, 0); | |
396 | ||
397 | /* Avoid returning a negative bitpos as this may wreak havoc later. */ | |
9c6deaf0 | 398 | if (maybe_lt (bit_offset, 0)) |
4aba7bd3 | 399 | { |
9c6deaf0 | 400 | tree byte_offset = wide_int_to_tree |
401 | (sizetype, bits_to_bytes_round_down (bit_offset)); | |
402 | bit_offset = num_trailing_bits (bit_offset); | |
4aba7bd3 | 403 | if (offset) |
9c6deaf0 | 404 | offset = size_binop (PLUS_EXPR, offset, byte_offset); |
4aba7bd3 | 405 | else |
9c6deaf0 | 406 | offset = byte_offset; |
4aba7bd3 | 407 | } |
408 | ||
9c6deaf0 | 409 | bitpos += bit_offset.force_shwi (); |
4aba7bd3 | 410 | } |
509ab8cd | 411 | else |
412 | base_addr = build_fold_addr_expr (base_addr); | |
4aba7bd3 | 413 | |
81bc0f0f | 414 | if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos)) |
4aba7bd3 | 415 | return false; |
81bc0f0f | 416 | if (!multiple_p (bitsize, BITS_PER_UNIT)) |
4aba7bd3 | 417 | return false; |
418 | if (reversep) | |
419 | return false; | |
420 | ||
421 | if (!init_symbolic_number (n, ref)) | |
422 | return false; | |
423 | n->base_addr = base_addr; | |
424 | n->offset = offset; | |
81bc0f0f | 425 | n->bytepos = bytepos; |
4aba7bd3 | 426 | n->alias_set = reference_alias_ptr_type (ref); |
427 | n->vuse = gimple_vuse (stmt); | |
428 | return true; | |
429 | } | |
430 | ||
431 | /* Compute the symbolic number N representing the result of a bitwise OR on 2 | |
432 | symbolic number N1 and N2 whose source statements are respectively | |
433 | SOURCE_STMT1 and SOURCE_STMT2. */ | |
434 | ||
435 | gimple * | |
436 | perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1, | |
437 | gimple *source_stmt2, struct symbolic_number *n2, | |
438 | struct symbolic_number *n) | |
439 | { | |
440 | int i, size; | |
441 | uint64_t mask; | |
442 | gimple *source_stmt; | |
443 | struct symbolic_number *n_start; | |
444 | ||
445 | tree rhs1 = gimple_assign_rhs1 (source_stmt1); | |
446 | if (TREE_CODE (rhs1) == BIT_FIELD_REF | |
447 | && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME) | |
448 | rhs1 = TREE_OPERAND (rhs1, 0); | |
449 | tree rhs2 = gimple_assign_rhs1 (source_stmt2); | |
450 | if (TREE_CODE (rhs2) == BIT_FIELD_REF | |
451 | && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME) | |
452 | rhs2 = TREE_OPERAND (rhs2, 0); | |
453 | ||
454 | /* Sources are different, cancel bswap if they are not memory location with | |
455 | the same base (array, structure, ...). */ | |
456 | if (rhs1 != rhs2) | |
457 | { | |
458 | uint64_t inc; | |
08454aa5 | 459 | HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end; |
4aba7bd3 | 460 | struct symbolic_number *toinc_n_ptr, *n_end; |
461 | basic_block bb1, bb2; | |
462 | ||
463 | if (!n1->base_addr || !n2->base_addr | |
464 | || !operand_equal_p (n1->base_addr, n2->base_addr, 0)) | |
465 | return NULL; | |
466 | ||
467 | if (!n1->offset != !n2->offset | |
468 | || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0))) | |
469 | return NULL; | |
470 | ||
08454aa5 | 471 | start1 = 0; |
472 | if (!(n2->bytepos - n1->bytepos).is_constant (&start2)) | |
473 | return NULL; | |
474 | ||
475 | if (start1 < start2) | |
4aba7bd3 | 476 | { |
477 | n_start = n1; | |
08454aa5 | 478 | start_sub = start2 - start1; |
4aba7bd3 | 479 | } |
480 | else | |
481 | { | |
482 | n_start = n2; | |
08454aa5 | 483 | start_sub = start1 - start2; |
4aba7bd3 | 484 | } |
485 | ||
486 | bb1 = gimple_bb (source_stmt1); | |
487 | bb2 = gimple_bb (source_stmt2); | |
488 | if (dominated_by_p (CDI_DOMINATORS, bb1, bb2)) | |
489 | source_stmt = source_stmt1; | |
490 | else | |
491 | source_stmt = source_stmt2; | |
492 | ||
493 | /* Find the highest address at which a load is performed and | |
494 | compute related info. */ | |
08454aa5 | 495 | end1 = start1 + (n1->range - 1); |
496 | end2 = start2 + (n2->range - 1); | |
4aba7bd3 | 497 | if (end1 < end2) |
498 | { | |
499 | end = end2; | |
500 | end_sub = end2 - end1; | |
501 | } | |
502 | else | |
503 | { | |
504 | end = end1; | |
505 | end_sub = end1 - end2; | |
506 | } | |
507 | n_end = (end2 > end1) ? n2 : n1; | |
508 | ||
509 | /* Find symbolic number whose lsb is the most significant. */ | |
510 | if (BYTES_BIG_ENDIAN) | |
511 | toinc_n_ptr = (n_end == n1) ? n2 : n1; | |
512 | else | |
513 | toinc_n_ptr = (n_start == n1) ? n2 : n1; | |
514 | ||
08454aa5 | 515 | n->range = end - MIN (start1, start2) + 1; |
4aba7bd3 | 516 | |
517 | /* Check that the range of memory covered can be represented by | |
518 | a symbolic number. */ | |
519 | if (n->range > 64 / BITS_PER_MARKER) | |
520 | return NULL; | |
521 | ||
522 | /* Reinterpret byte marks in symbolic number holding the value of | |
523 | bigger weight according to target endianness. */ | |
524 | inc = BYTES_BIG_ENDIAN ? end_sub : start_sub; | |
525 | size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT; | |
526 | for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER) | |
527 | { | |
528 | unsigned marker | |
529 | = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK; | |
530 | if (marker && marker != MARKER_BYTE_UNKNOWN) | |
531 | toinc_n_ptr->n += inc; | |
532 | } | |
533 | } | |
534 | else | |
535 | { | |
536 | n->range = n1->range; | |
537 | n_start = n1; | |
538 | source_stmt = source_stmt1; | |
539 | } | |
540 | ||
541 | if (!n1->alias_set | |
542 | || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set)) | |
543 | n->alias_set = n1->alias_set; | |
544 | else | |
545 | n->alias_set = ptr_type_node; | |
546 | n->vuse = n_start->vuse; | |
547 | n->base_addr = n_start->base_addr; | |
548 | n->offset = n_start->offset; | |
549 | n->src = n_start->src; | |
550 | n->bytepos = n_start->bytepos; | |
551 | n->type = n_start->type; | |
552 | size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; | |
553 | ||
554 | for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER) | |
555 | { | |
556 | uint64_t masked1, masked2; | |
557 | ||
558 | masked1 = n1->n & mask; | |
559 | masked2 = n2->n & mask; | |
560 | if (masked1 && masked2 && masked1 != masked2) | |
561 | return NULL; | |
562 | } | |
563 | n->n = n1->n | n2->n; | |
564 | n->n_ops = n1->n_ops + n2->n_ops; | |
565 | ||
566 | return source_stmt; | |
567 | } | |
568 | ||
569 | /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform | |
570 | the operation given by the rhs of STMT on the result. If the operation | |
571 | could successfully be executed the function returns a gimple stmt whose | |
572 | rhs's first tree is the expression of the source operand and NULL | |
573 | otherwise. */ | |
574 | ||
575 | gimple * | |
576 | find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit) | |
577 | { | |
578 | enum tree_code code; | |
579 | tree rhs1, rhs2 = NULL; | |
580 | gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1; | |
581 | enum gimple_rhs_class rhs_class; | |
582 | ||
583 | if (!limit || !is_gimple_assign (stmt)) | |
584 | return NULL; | |
585 | ||
586 | rhs1 = gimple_assign_rhs1 (stmt); | |
587 | ||
588 | if (find_bswap_or_nop_load (stmt, rhs1, n)) | |
589 | return stmt; | |
590 | ||
591 | /* Handle BIT_FIELD_REF. */ | |
592 | if (TREE_CODE (rhs1) == BIT_FIELD_REF | |
593 | && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME) | |
594 | { | |
595 | unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1)); | |
596 | unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2)); | |
597 | if (bitpos % BITS_PER_UNIT == 0 | |
598 | && bitsize % BITS_PER_UNIT == 0 | |
599 | && init_symbolic_number (n, TREE_OPERAND (rhs1, 0))) | |
600 | { | |
601 | /* Handle big-endian bit numbering in BIT_FIELD_REF. */ | |
602 | if (BYTES_BIG_ENDIAN) | |
603 | bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize; | |
604 | ||
605 | /* Shift. */ | |
606 | if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos)) | |
607 | return NULL; | |
608 | ||
609 | /* Mask. */ | |
610 | uint64_t mask = 0; | |
611 | uint64_t tmp = (1 << BITS_PER_UNIT) - 1; | |
612 | for (unsigned i = 0; i < bitsize / BITS_PER_UNIT; | |
613 | i++, tmp <<= BITS_PER_UNIT) | |
614 | mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER); | |
615 | n->n &= mask; | |
616 | ||
617 | /* Convert. */ | |
618 | n->type = TREE_TYPE (rhs1); | |
619 | if (!n->base_addr) | |
620 | n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT; | |
621 | ||
622 | return verify_symbolic_number_p (n, stmt) ? stmt : NULL; | |
623 | } | |
624 | ||
625 | return NULL; | |
626 | } | |
627 | ||
628 | if (TREE_CODE (rhs1) != SSA_NAME) | |
629 | return NULL; | |
630 | ||
631 | code = gimple_assign_rhs_code (stmt); | |
632 | rhs_class = gimple_assign_rhs_class (stmt); | |
633 | rhs1_stmt = SSA_NAME_DEF_STMT (rhs1); | |
634 | ||
635 | if (rhs_class == GIMPLE_BINARY_RHS) | |
636 | rhs2 = gimple_assign_rhs2 (stmt); | |
637 | ||
638 | /* Handle unary rhs and binary rhs with integer constants as second | |
639 | operand. */ | |
640 | ||
641 | if (rhs_class == GIMPLE_UNARY_RHS | |
642 | || (rhs_class == GIMPLE_BINARY_RHS | |
643 | && TREE_CODE (rhs2) == INTEGER_CST)) | |
644 | { | |
645 | if (code != BIT_AND_EXPR | |
646 | && code != LSHIFT_EXPR | |
647 | && code != RSHIFT_EXPR | |
648 | && code != LROTATE_EXPR | |
649 | && code != RROTATE_EXPR | |
650 | && !CONVERT_EXPR_CODE_P (code)) | |
651 | return NULL; | |
652 | ||
653 | source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1); | |
654 | ||
655 | /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and | |
656 | we have to initialize the symbolic number. */ | |
657 | if (!source_stmt1) | |
658 | { | |
659 | if (gimple_assign_load_p (stmt) | |
660 | || !init_symbolic_number (n, rhs1)) | |
661 | return NULL; | |
662 | source_stmt1 = stmt; | |
663 | } | |
664 | ||
665 | switch (code) | |
666 | { | |
667 | case BIT_AND_EXPR: | |
668 | { | |
669 | int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; | |
670 | uint64_t val = int_cst_value (rhs2), mask = 0; | |
671 | uint64_t tmp = (1 << BITS_PER_UNIT) - 1; | |
672 | ||
673 | /* Only constants masking full bytes are allowed. */ | |
674 | for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT) | |
675 | if ((val & tmp) != 0 && (val & tmp) != tmp) | |
676 | return NULL; | |
677 | else if (val & tmp) | |
678 | mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER); | |
679 | ||
680 | n->n &= mask; | |
681 | } | |
682 | break; | |
683 | case LSHIFT_EXPR: | |
684 | case RSHIFT_EXPR: | |
685 | case LROTATE_EXPR: | |
686 | case RROTATE_EXPR: | |
687 | if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2))) | |
688 | return NULL; | |
689 | break; | |
690 | CASE_CONVERT: | |
691 | { | |
692 | int i, type_size, old_type_size; | |
693 | tree type; | |
694 | ||
695 | type = gimple_expr_type (stmt); | |
696 | type_size = TYPE_PRECISION (type); | |
697 | if (type_size % BITS_PER_UNIT != 0) | |
698 | return NULL; | |
699 | type_size /= BITS_PER_UNIT; | |
700 | if (type_size > 64 / BITS_PER_MARKER) | |
701 | return NULL; | |
702 | ||
703 | /* Sign extension: result is dependent on the value. */ | |
704 | old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT; | |
705 | if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size | |
706 | && HEAD_MARKER (n->n, old_type_size)) | |
707 | for (i = 0; i < type_size - old_type_size; i++) | |
708 | n->n |= (uint64_t) MARKER_BYTE_UNKNOWN | |
709 | << ((type_size - 1 - i) * BITS_PER_MARKER); | |
710 | ||
711 | if (type_size < 64 / BITS_PER_MARKER) | |
712 | { | |
713 | /* If STMT casts to a smaller type mask out the bits not | |
714 | belonging to the target type. */ | |
715 | n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1; | |
716 | } | |
717 | n->type = type; | |
718 | if (!n->base_addr) | |
719 | n->range = type_size; | |
720 | } | |
721 | break; | |
722 | default: | |
723 | return NULL; | |
724 | }; | |
725 | return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL; | |
726 | } | |
727 | ||
728 | /* Handle binary rhs. */ | |
729 | ||
730 | if (rhs_class == GIMPLE_BINARY_RHS) | |
731 | { | |
732 | struct symbolic_number n1, n2; | |
733 | gimple *source_stmt, *source_stmt2; | |
734 | ||
735 | if (code != BIT_IOR_EXPR) | |
736 | return NULL; | |
737 | ||
738 | if (TREE_CODE (rhs2) != SSA_NAME) | |
739 | return NULL; | |
740 | ||
741 | rhs2_stmt = SSA_NAME_DEF_STMT (rhs2); | |
742 | ||
743 | switch (code) | |
744 | { | |
745 | case BIT_IOR_EXPR: | |
746 | source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1); | |
747 | ||
748 | if (!source_stmt1) | |
749 | return NULL; | |
750 | ||
751 | source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1); | |
752 | ||
753 | if (!source_stmt2) | |
754 | return NULL; | |
755 | ||
756 | if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type)) | |
757 | return NULL; | |
758 | ||
509ab8cd | 759 | if (n1.vuse != n2.vuse) |
4aba7bd3 | 760 | return NULL; |
761 | ||
762 | source_stmt | |
763 | = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n); | |
764 | ||
765 | if (!source_stmt) | |
766 | return NULL; | |
767 | ||
768 | if (!verify_symbolic_number_p (n, stmt)) | |
769 | return NULL; | |
770 | ||
771 | break; | |
772 | default: | |
773 | return NULL; | |
774 | } | |
775 | return source_stmt; | |
776 | } | |
777 | return NULL; | |
778 | } | |
779 | ||
509ab8cd | 780 | /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute |
781 | *CMPXCHG, *CMPNOP and adjust *N. */ | |
4aba7bd3 | 782 | |
509ab8cd | 783 | void |
784 | find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg, | |
785 | uint64_t *cmpnop) | |
4aba7bd3 | 786 | { |
787 | unsigned rsize; | |
788 | uint64_t tmpn, mask; | |
4aba7bd3 | 789 | |
509ab8cd | 790 | /* The number which the find_bswap_or_nop_1 result should match in order |
791 | to have a full byte swap. The number is shifted to the right | |
792 | according to the size of the symbolic number before using it. */ | |
793 | *cmpxchg = CMPXCHG; | |
794 | *cmpnop = CMPNOP; | |
4aba7bd3 | 795 | |
796 | /* Find real size of result (highest non-zero byte). */ | |
797 | if (n->base_addr) | |
798 | for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++); | |
799 | else | |
800 | rsize = n->range; | |
801 | ||
802 | /* Zero out the bits corresponding to untouched bytes in original gimple | |
803 | expression. */ | |
804 | if (n->range < (int) sizeof (int64_t)) | |
805 | { | |
806 | mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1; | |
509ab8cd | 807 | *cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER; |
808 | *cmpnop &= mask; | |
4aba7bd3 | 809 | } |
810 | ||
811 | /* Zero out the bits corresponding to unused bytes in the result of the | |
812 | gimple expression. */ | |
813 | if (rsize < n->range) | |
814 | { | |
815 | if (BYTES_BIG_ENDIAN) | |
816 | { | |
817 | mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1; | |
509ab8cd | 818 | *cmpxchg &= mask; |
819 | *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER; | |
4aba7bd3 | 820 | } |
821 | else | |
822 | { | |
823 | mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1; | |
509ab8cd | 824 | *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER; |
825 | *cmpnop &= mask; | |
4aba7bd3 | 826 | } |
827 | n->range = rsize; | |
828 | } | |
829 | ||
509ab8cd | 830 | n->range *= BITS_PER_UNIT; |
831 | } | |
832 | ||
833 | /* Check if STMT completes a bswap implementation or a read in a given | |
834 | endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP | |
835 | accordingly. It also sets N to represent the kind of operations | |
836 | performed: size of the resulting expression and whether it works on | |
837 | a memory source, and if so alias-set and vuse. At last, the | |
838 | function returns a stmt whose rhs's first tree is the source | |
839 | expression. */ | |
840 | ||
841 | gimple * | |
842 | find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap) | |
843 | { | |
844 | /* The last parameter determines the depth search limit. It usually | |
845 | correlates directly to the number n of bytes to be touched. We | |
846 | increase that number by log2(n) + 1 here in order to also | |
847 | cover signed -> unsigned conversions of the src operand as can be seen | |
848 | in libgcc, and for initial shift/and operation of the src operand. */ | |
849 | int limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt))); | |
850 | limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit); | |
851 | gimple *ins_stmt = find_bswap_or_nop_1 (stmt, n, limit); | |
852 | ||
853 | if (!ins_stmt) | |
854 | return NULL; | |
855 | ||
856 | uint64_t cmpxchg, cmpnop; | |
857 | find_bswap_or_nop_finalize (n, &cmpxchg, &cmpnop); | |
858 | ||
4aba7bd3 | 859 | /* A complete byte swap should make the symbolic number to start with |
860 | the largest digit in the highest order byte. Unchanged symbolic | |
861 | number indicates a read with same endianness as target architecture. */ | |
862 | if (n->n == cmpnop) | |
863 | *bswap = false; | |
864 | else if (n->n == cmpxchg) | |
865 | *bswap = true; | |
866 | else | |
867 | return NULL; | |
868 | ||
869 | /* Useless bit manipulation performed by code. */ | |
870 | if (!n->base_addr && n->n == cmpnop && n->n_ops == 1) | |
871 | return NULL; | |
872 | ||
4aba7bd3 | 873 | return ins_stmt; |
874 | } | |
875 | ||
876 | const pass_data pass_data_optimize_bswap = | |
877 | { | |
878 | GIMPLE_PASS, /* type */ | |
879 | "bswap", /* name */ | |
880 | OPTGROUP_NONE, /* optinfo_flags */ | |
881 | TV_NONE, /* tv_id */ | |
882 | PROP_ssa, /* properties_required */ | |
883 | 0, /* properties_provided */ | |
884 | 0, /* properties_destroyed */ | |
885 | 0, /* todo_flags_start */ | |
886 | 0, /* todo_flags_finish */ | |
887 | }; | |
888 | ||
889 | class pass_optimize_bswap : public gimple_opt_pass | |
890 | { | |
891 | public: | |
892 | pass_optimize_bswap (gcc::context *ctxt) | |
893 | : gimple_opt_pass (pass_data_optimize_bswap, ctxt) | |
894 | {} | |
895 | ||
896 | /* opt_pass methods: */ | |
897 | virtual bool gate (function *) | |
898 | { | |
899 | return flag_expensive_optimizations && optimize && BITS_PER_UNIT == 8; | |
900 | } | |
901 | ||
902 | virtual unsigned int execute (function *); | |
903 | ||
904 | }; // class pass_optimize_bswap | |
905 | ||
906 | /* Perform the bswap optimization: replace the expression computed in the rhs | |
509ab8cd | 907 | of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent |
908 | bswap, load or load + bswap expression. | |
4aba7bd3 | 909 | Which of these alternatives replace the rhs is given by N->base_addr (non |
910 | null if a load is needed) and BSWAP. The type, VUSE and set-alias of the | |
911 | load to perform are also given in N while the builtin bswap invoke is given | |
509ab8cd | 912 | in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the |
913 | load statements involved to construct the rhs in gsi_stmt (GSI) and | |
914 | N->range gives the size of the rhs expression for maintaining some | |
915 | statistics. | |
4aba7bd3 | 916 | |
509ab8cd | 917 | Note that if the replacement involve a load and if gsi_stmt (GSI) is |
918 | non-NULL, that stmt is moved just after INS_STMT to do the load with the | |
919 | same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */ | |
4aba7bd3 | 920 | |
509ab8cd | 921 | tree |
922 | bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl, | |
4aba7bd3 | 923 | tree bswap_type, tree load_type, struct symbolic_number *n, |
924 | bool bswap) | |
925 | { | |
509ab8cd | 926 | tree src, tmp, tgt = NULL_TREE; |
4aba7bd3 | 927 | gimple *bswap_stmt; |
928 | ||
509ab8cd | 929 | gimple *cur_stmt = gsi_stmt (gsi); |
4aba7bd3 | 930 | src = n->src; |
509ab8cd | 931 | if (cur_stmt) |
932 | tgt = gimple_assign_lhs (cur_stmt); | |
4aba7bd3 | 933 | |
934 | /* Need to load the value from memory first. */ | |
935 | if (n->base_addr) | |
936 | { | |
509ab8cd | 937 | gimple_stmt_iterator gsi_ins = gsi; |
938 | if (ins_stmt) | |
939 | gsi_ins = gsi_for_stmt (ins_stmt); | |
4aba7bd3 | 940 | tree addr_expr, addr_tmp, val_expr, val_tmp; |
941 | tree load_offset_ptr, aligned_load_type; | |
509ab8cd | 942 | gimple *load_stmt; |
943 | unsigned align = get_object_alignment (src); | |
08454aa5 | 944 | poly_int64 load_offset = 0; |
4aba7bd3 | 945 | |
509ab8cd | 946 | if (cur_stmt) |
947 | { | |
948 | basic_block ins_bb = gimple_bb (ins_stmt); | |
949 | basic_block cur_bb = gimple_bb (cur_stmt); | |
950 | if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb)) | |
951 | return NULL_TREE; | |
952 | ||
953 | /* Move cur_stmt just before one of the load of the original | |
954 | to ensure it has the same VUSE. See PR61517 for what could | |
955 | go wrong. */ | |
956 | if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt)) | |
957 | reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt)); | |
958 | gsi_move_before (&gsi, &gsi_ins); | |
959 | gsi = gsi_for_stmt (cur_stmt); | |
960 | } | |
961 | else | |
962 | gsi = gsi_ins; | |
4aba7bd3 | 963 | |
964 | /* Compute address to load from and cast according to the size | |
965 | of the load. */ | |
509ab8cd | 966 | addr_expr = build_fold_addr_expr (src); |
4aba7bd3 | 967 | if (is_gimple_mem_ref_addr (addr_expr)) |
509ab8cd | 968 | addr_tmp = unshare_expr (addr_expr); |
4aba7bd3 | 969 | else |
970 | { | |
509ab8cd | 971 | addr_tmp = unshare_expr (n->base_addr); |
972 | if (!is_gimple_mem_ref_addr (addr_tmp)) | |
973 | addr_tmp = force_gimple_operand_gsi_1 (&gsi, addr_tmp, | |
974 | is_gimple_mem_ref_addr, | |
975 | NULL_TREE, true, | |
976 | GSI_SAME_STMT); | |
977 | load_offset = n->bytepos; | |
978 | if (n->offset) | |
979 | { | |
980 | tree off | |
981 | = force_gimple_operand_gsi (&gsi, unshare_expr (n->offset), | |
982 | true, NULL_TREE, true, | |
983 | GSI_SAME_STMT); | |
984 | gimple *stmt | |
985 | = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp)), | |
986 | POINTER_PLUS_EXPR, addr_tmp, off); | |
987 | gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); | |
988 | addr_tmp = gimple_assign_lhs (stmt); | |
989 | } | |
4aba7bd3 | 990 | } |
991 | ||
992 | /* Perform the load. */ | |
993 | aligned_load_type = load_type; | |
994 | if (align < TYPE_ALIGN (load_type)) | |
995 | aligned_load_type = build_aligned_type (load_type, align); | |
996 | load_offset_ptr = build_int_cst (n->alias_set, load_offset); | |
997 | val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp, | |
998 | load_offset_ptr); | |
999 | ||
1000 | if (!bswap) | |
1001 | { | |
1002 | if (n->range == 16) | |
1003 | nop_stats.found_16bit++; | |
1004 | else if (n->range == 32) | |
1005 | nop_stats.found_32bit++; | |
1006 | else | |
1007 | { | |
1008 | gcc_assert (n->range == 64); | |
1009 | nop_stats.found_64bit++; | |
1010 | } | |
1011 | ||
1012 | /* Convert the result of load if necessary. */ | |
509ab8cd | 1013 | if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), load_type)) |
4aba7bd3 | 1014 | { |
1015 | val_tmp = make_temp_ssa_name (aligned_load_type, NULL, | |
1016 | "load_dst"); | |
1017 | load_stmt = gimple_build_assign (val_tmp, val_expr); | |
1018 | gimple_set_vuse (load_stmt, n->vuse); | |
1019 | gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT); | |
1020 | gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp); | |
509ab8cd | 1021 | update_stmt (cur_stmt); |
4aba7bd3 | 1022 | } |
509ab8cd | 1023 | else if (cur_stmt) |
4aba7bd3 | 1024 | { |
1025 | gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr); | |
1026 | gimple_set_vuse (cur_stmt, n->vuse); | |
509ab8cd | 1027 | update_stmt (cur_stmt); |
1028 | } | |
1029 | else | |
1030 | { | |
1031 | tgt = make_ssa_name (load_type); | |
1032 | cur_stmt = gimple_build_assign (tgt, MEM_REF, val_expr); | |
1033 | gimple_set_vuse (cur_stmt, n->vuse); | |
1034 | gsi_insert_before (&gsi, cur_stmt, GSI_SAME_STMT); | |
4aba7bd3 | 1035 | } |
4aba7bd3 | 1036 | |
1037 | if (dump_file) | |
1038 | { | |
1039 | fprintf (dump_file, | |
1040 | "%d bit load in target endianness found at: ", | |
1041 | (int) n->range); | |
1042 | print_gimple_stmt (dump_file, cur_stmt, 0); | |
1043 | } | |
509ab8cd | 1044 | return tgt; |
4aba7bd3 | 1045 | } |
1046 | else | |
1047 | { | |
1048 | val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst"); | |
1049 | load_stmt = gimple_build_assign (val_tmp, val_expr); | |
1050 | gimple_set_vuse (load_stmt, n->vuse); | |
1051 | gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT); | |
1052 | } | |
1053 | src = val_tmp; | |
1054 | } | |
1055 | else if (!bswap) | |
1056 | { | |
509ab8cd | 1057 | gimple *g = NULL; |
1058 | if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src))) | |
4aba7bd3 | 1059 | { |
1060 | if (!is_gimple_val (src)) | |
509ab8cd | 1061 | return NULL_TREE; |
4aba7bd3 | 1062 | g = gimple_build_assign (tgt, NOP_EXPR, src); |
1063 | } | |
509ab8cd | 1064 | else if (cur_stmt) |
4aba7bd3 | 1065 | g = gimple_build_assign (tgt, src); |
509ab8cd | 1066 | else |
1067 | tgt = src; | |
4aba7bd3 | 1068 | if (n->range == 16) |
1069 | nop_stats.found_16bit++; | |
1070 | else if (n->range == 32) | |
1071 | nop_stats.found_32bit++; | |
1072 | else | |
1073 | { | |
1074 | gcc_assert (n->range == 64); | |
1075 | nop_stats.found_64bit++; | |
1076 | } | |
1077 | if (dump_file) | |
1078 | { | |
1079 | fprintf (dump_file, | |
1080 | "%d bit reshuffle in target endianness found at: ", | |
1081 | (int) n->range); | |
509ab8cd | 1082 | if (cur_stmt) |
1083 | print_gimple_stmt (dump_file, cur_stmt, 0); | |
1084 | else | |
1085 | { | |
54e7de93 | 1086 | print_generic_expr (dump_file, tgt, TDF_NONE); |
509ab8cd | 1087 | fprintf (dump_file, "\n"); |
1088 | } | |
4aba7bd3 | 1089 | } |
509ab8cd | 1090 | if (cur_stmt) |
1091 | gsi_replace (&gsi, g, true); | |
1092 | return tgt; | |
4aba7bd3 | 1093 | } |
1094 | else if (TREE_CODE (src) == BIT_FIELD_REF) | |
1095 | src = TREE_OPERAND (src, 0); | |
1096 | ||
1097 | if (n->range == 16) | |
1098 | bswap_stats.found_16bit++; | |
1099 | else if (n->range == 32) | |
1100 | bswap_stats.found_32bit++; | |
1101 | else | |
1102 | { | |
1103 | gcc_assert (n->range == 64); | |
1104 | bswap_stats.found_64bit++; | |
1105 | } | |
1106 | ||
1107 | tmp = src; | |
1108 | ||
1109 | /* Convert the src expression if necessary. */ | |
1110 | if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type)) | |
1111 | { | |
1112 | gimple *convert_stmt; | |
1113 | ||
1114 | tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc"); | |
1115 | convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src); | |
1116 | gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT); | |
1117 | } | |
1118 | ||
1119 | /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values | |
1120 | are considered as rotation of 2N bit values by N bits is generally not | |
1121 | equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which | |
1122 | gives 0x03040102 while a bswap for that value is 0x04030201. */ | |
1123 | if (bswap && n->range == 16) | |
1124 | { | |
1125 | tree count = build_int_cst (NULL, BITS_PER_UNIT); | |
1126 | src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count); | |
1127 | bswap_stmt = gimple_build_assign (NULL, src); | |
1128 | } | |
1129 | else | |
1130 | bswap_stmt = gimple_build_call (fndecl, 1, tmp); | |
1131 | ||
509ab8cd | 1132 | if (tgt == NULL_TREE) |
1133 | tgt = make_ssa_name (bswap_type); | |
4aba7bd3 | 1134 | tmp = tgt; |
1135 | ||
1136 | /* Convert the result if necessary. */ | |
1137 | if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type)) | |
1138 | { | |
1139 | gimple *convert_stmt; | |
1140 | ||
1141 | tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst"); | |
1142 | convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp); | |
1143 | gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT); | |
1144 | } | |
1145 | ||
1146 | gimple_set_lhs (bswap_stmt, tmp); | |
1147 | ||
1148 | if (dump_file) | |
1149 | { | |
1150 | fprintf (dump_file, "%d bit bswap implementation found at: ", | |
1151 | (int) n->range); | |
509ab8cd | 1152 | if (cur_stmt) |
1153 | print_gimple_stmt (dump_file, cur_stmt, 0); | |
1154 | else | |
1155 | { | |
54e7de93 | 1156 | print_generic_expr (dump_file, tgt, TDF_NONE); |
509ab8cd | 1157 | fprintf (dump_file, "\n"); |
1158 | } | |
4aba7bd3 | 1159 | } |
1160 | ||
509ab8cd | 1161 | if (cur_stmt) |
1162 | { | |
1163 | gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT); | |
1164 | gsi_remove (&gsi, true); | |
1165 | } | |
1166 | else | |
1167 | gsi_insert_before (&gsi, bswap_stmt, GSI_SAME_STMT); | |
1168 | return tgt; | |
4aba7bd3 | 1169 | } |
1170 | ||
1171 | /* Find manual byte swap implementations as well as load in a given | |
1172 | endianness. Byte swaps are turned into a bswap builtin invokation | |
1173 | while endian loads are converted to bswap builtin invokation or | |
1174 | simple load according to the target endianness. */ | |
1175 | ||
1176 | unsigned int | |
1177 | pass_optimize_bswap::execute (function *fun) | |
1178 | { | |
1179 | basic_block bb; | |
1180 | bool bswap32_p, bswap64_p; | |
1181 | bool changed = false; | |
1182 | tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE; | |
1183 | ||
1184 | bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32) | |
1185 | && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing); | |
1186 | bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64) | |
1187 | && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing | |
1188 | || (bswap32_p && word_mode == SImode))); | |
1189 | ||
1190 | /* Determine the argument type of the builtins. The code later on | |
1191 | assumes that the return and argument type are the same. */ | |
1192 | if (bswap32_p) | |
1193 | { | |
1194 | tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32); | |
1195 | bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); | |
1196 | } | |
1197 | ||
1198 | if (bswap64_p) | |
1199 | { | |
1200 | tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64); | |
1201 | bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); | |
1202 | } | |
1203 | ||
1204 | memset (&nop_stats, 0, sizeof (nop_stats)); | |
1205 | memset (&bswap_stats, 0, sizeof (bswap_stats)); | |
1206 | calculate_dominance_info (CDI_DOMINATORS); | |
1207 | ||
1208 | FOR_EACH_BB_FN (bb, fun) | |
1209 | { | |
1210 | gimple_stmt_iterator gsi; | |
1211 | ||
1212 | /* We do a reverse scan for bswap patterns to make sure we get the | |
1213 | widest match. As bswap pattern matching doesn't handle previously | |
1214 | inserted smaller bswap replacements as sub-patterns, the wider | |
1215 | variant wouldn't be detected. */ | |
1216 | for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);) | |
1217 | { | |
1218 | gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi); | |
1219 | tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type; | |
1220 | enum tree_code code; | |
1221 | struct symbolic_number n; | |
1222 | bool bswap; | |
1223 | ||
1224 | /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt | |
1225 | might be moved to a different basic block by bswap_replace and gsi | |
1226 | must not points to it if that's the case. Moving the gsi_prev | |
1227 | there make sure that gsi points to the statement previous to | |
1228 | cur_stmt while still making sure that all statements are | |
1229 | considered in this basic block. */ | |
1230 | gsi_prev (&gsi); | |
1231 | ||
1232 | if (!is_gimple_assign (cur_stmt)) | |
1233 | continue; | |
1234 | ||
1235 | code = gimple_assign_rhs_code (cur_stmt); | |
1236 | switch (code) | |
1237 | { | |
1238 | case LROTATE_EXPR: | |
1239 | case RROTATE_EXPR: | |
1240 | if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt)) | |
1241 | || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt)) | |
1242 | % BITS_PER_UNIT) | |
1243 | continue; | |
1244 | /* Fall through. */ | |
1245 | case BIT_IOR_EXPR: | |
1246 | break; | |
1247 | default: | |
1248 | continue; | |
1249 | } | |
1250 | ||
1251 | ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap); | |
1252 | ||
1253 | if (!ins_stmt) | |
1254 | continue; | |
1255 | ||
1256 | switch (n.range) | |
1257 | { | |
1258 | case 16: | |
1259 | /* Already in canonical form, nothing to do. */ | |
1260 | if (code == LROTATE_EXPR || code == RROTATE_EXPR) | |
1261 | continue; | |
1262 | load_type = bswap_type = uint16_type_node; | |
1263 | break; | |
1264 | case 32: | |
1265 | load_type = uint32_type_node; | |
1266 | if (bswap32_p) | |
1267 | { | |
1268 | fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32); | |
1269 | bswap_type = bswap32_type; | |
1270 | } | |
1271 | break; | |
1272 | case 64: | |
1273 | load_type = uint64_type_node; | |
1274 | if (bswap64_p) | |
1275 | { | |
1276 | fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64); | |
1277 | bswap_type = bswap64_type; | |
1278 | } | |
1279 | break; | |
1280 | default: | |
1281 | continue; | |
1282 | } | |
1283 | ||
1284 | if (bswap && !fndecl && n.range != 16) | |
1285 | continue; | |
1286 | ||
509ab8cd | 1287 | if (bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl, |
1288 | bswap_type, load_type, &n, bswap)) | |
4aba7bd3 | 1289 | changed = true; |
1290 | } | |
1291 | } | |
1292 | ||
1293 | statistics_counter_event (fun, "16-bit nop implementations found", | |
1294 | nop_stats.found_16bit); | |
1295 | statistics_counter_event (fun, "32-bit nop implementations found", | |
1296 | nop_stats.found_32bit); | |
1297 | statistics_counter_event (fun, "64-bit nop implementations found", | |
1298 | nop_stats.found_64bit); | |
1299 | statistics_counter_event (fun, "16-bit bswap implementations found", | |
1300 | bswap_stats.found_16bit); | |
1301 | statistics_counter_event (fun, "32-bit bswap implementations found", | |
1302 | bswap_stats.found_32bit); | |
1303 | statistics_counter_event (fun, "64-bit bswap implementations found", | |
1304 | bswap_stats.found_64bit); | |
1305 | ||
1306 | return (changed ? TODO_update_ssa : 0); | |
1307 | } | |
1308 | ||
1309 | } // anon namespace | |
1310 | ||
1311 | gimple_opt_pass * | |
1312 | make_pass_optimize_bswap (gcc::context *ctxt) | |
1313 | { | |
1314 | return new pass_optimize_bswap (ctxt); | |
1315 | } | |
1316 | ||
1317 | namespace { | |
1318 | ||
9991d1d3 | 1319 | /* Struct recording one operand for the store, which is either a constant, |
10f0d48d | 1320 | then VAL represents the constant and all the other fields are zero, or |
1321 | a memory load, then VAL represents the reference, BASE_ADDR is non-NULL | |
1322 | and the other fields also reflect the memory load, or an SSA name, then | |
1323 | VAL represents the SSA name and all the other fields are zero, */ | |
9991d1d3 | 1324 | |
1325 | struct store_operand_info | |
1326 | { | |
1327 | tree val; | |
1328 | tree base_addr; | |
e61263f2 | 1329 | poly_uint64 bitsize; |
1330 | poly_uint64 bitpos; | |
1331 | poly_uint64 bitregion_start; | |
1332 | poly_uint64 bitregion_end; | |
9991d1d3 | 1333 | gimple *stmt; |
c35548ce | 1334 | bool bit_not_p; |
9991d1d3 | 1335 | store_operand_info (); |
1336 | }; | |
1337 | ||
1338 | store_operand_info::store_operand_info () | |
1339 | : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0), | |
c35548ce | 1340 | bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false) |
9991d1d3 | 1341 | { |
1342 | } | |
1343 | ||
3d3e04ac | 1344 | /* Struct recording the information about a single store of an immediate |
1345 | to memory. These are created in the first phase and coalesced into | |
1346 | merged_store_group objects in the second phase. */ | |
1347 | ||
1348 | struct store_immediate_info | |
1349 | { | |
1350 | unsigned HOST_WIDE_INT bitsize; | |
1351 | unsigned HOST_WIDE_INT bitpos; | |
902cb3b7 | 1352 | unsigned HOST_WIDE_INT bitregion_start; |
1353 | /* This is one past the last bit of the bit region. */ | |
1354 | unsigned HOST_WIDE_INT bitregion_end; | |
3d3e04ac | 1355 | gimple *stmt; |
1356 | unsigned int order; | |
10f0d48d | 1357 | /* INTEGER_CST for constant stores, MEM_REF for memory copy, |
1358 | BIT_*_EXPR for logical bitwise operation, BIT_INSERT_EXPR | |
1359 | for bit insertion. | |
509ab8cd | 1360 | LROTATE_EXPR if it can be only bswap optimized and |
1361 | ops are not really meaningful. | |
1362 | NOP_EXPR if bswap optimization detected identity, ops | |
1363 | are not meaningful. */ | |
9991d1d3 | 1364 | enum tree_code rhs_code; |
509ab8cd | 1365 | /* Two fields for bswap optimization purposes. */ |
1366 | struct symbolic_number n; | |
1367 | gimple *ins_stmt; | |
aa0a1d29 | 1368 | /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */ |
832a73b9 | 1369 | bool bit_not_p; |
aa0a1d29 | 1370 | /* True if ops have been swapped and thus ops[1] represents |
1371 | rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */ | |
1372 | bool ops_swapped_p; | |
9991d1d3 | 1373 | /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise |
1374 | just the first one. */ | |
1375 | store_operand_info ops[2]; | |
f85e7cb7 | 1376 | store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, |
902cb3b7 | 1377 | unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, |
509ab8cd | 1378 | gimple *, unsigned int, enum tree_code, |
1379 | struct symbolic_number &, gimple *, bool, | |
9991d1d3 | 1380 | const store_operand_info &, |
1381 | const store_operand_info &); | |
3d3e04ac | 1382 | }; |
1383 | ||
1384 | store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs, | |
f85e7cb7 | 1385 | unsigned HOST_WIDE_INT bp, |
902cb3b7 | 1386 | unsigned HOST_WIDE_INT brs, |
1387 | unsigned HOST_WIDE_INT bre, | |
f85e7cb7 | 1388 | gimple *st, |
9991d1d3 | 1389 | unsigned int ord, |
1390 | enum tree_code rhscode, | |
509ab8cd | 1391 | struct symbolic_number &nr, |
1392 | gimple *ins_stmtp, | |
832a73b9 | 1393 | bool bitnotp, |
9991d1d3 | 1394 | const store_operand_info &op0r, |
1395 | const store_operand_info &op1r) | |
902cb3b7 | 1396 | : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre), |
509ab8cd | 1397 | stmt (st), order (ord), rhs_code (rhscode), n (nr), |
1398 | ins_stmt (ins_stmtp), bit_not_p (bitnotp), ops_swapped_p (false) | |
9991d1d3 | 1399 | #if __cplusplus >= 201103L |
1400 | , ops { op0r, op1r } | |
1401 | { | |
1402 | } | |
1403 | #else | |
3d3e04ac | 1404 | { |
9991d1d3 | 1405 | ops[0] = op0r; |
1406 | ops[1] = op1r; | |
3d3e04ac | 1407 | } |
9991d1d3 | 1408 | #endif |
3d3e04ac | 1409 | |
1410 | /* Struct representing a group of stores to contiguous memory locations. | |
1411 | These are produced by the second phase (coalescing) and consumed in the | |
1412 | third phase that outputs the widened stores. */ | |
1413 | ||
1414 | struct merged_store_group | |
1415 | { | |
1416 | unsigned HOST_WIDE_INT start; | |
1417 | unsigned HOST_WIDE_INT width; | |
902cb3b7 | 1418 | unsigned HOST_WIDE_INT bitregion_start; |
1419 | unsigned HOST_WIDE_INT bitregion_end; | |
1420 | /* The size of the allocated memory for val and mask. */ | |
3d3e04ac | 1421 | unsigned HOST_WIDE_INT buf_size; |
902cb3b7 | 1422 | unsigned HOST_WIDE_INT align_base; |
e61263f2 | 1423 | poly_uint64 load_align_base[2]; |
3d3e04ac | 1424 | |
1425 | unsigned int align; | |
9991d1d3 | 1426 | unsigned int load_align[2]; |
3d3e04ac | 1427 | unsigned int first_order; |
1428 | unsigned int last_order; | |
f9ceb302 | 1429 | bool bit_insertion; |
3d3e04ac | 1430 | |
902cb3b7 | 1431 | auto_vec<store_immediate_info *> stores; |
3d3e04ac | 1432 | /* We record the first and last original statements in the sequence because |
1433 | we'll need their vuse/vdef and replacement position. It's easier to keep | |
1434 | track of them separately as 'stores' is reordered by apply_stores. */ | |
1435 | gimple *last_stmt; | |
1436 | gimple *first_stmt; | |
1437 | unsigned char *val; | |
902cb3b7 | 1438 | unsigned char *mask; |
3d3e04ac | 1439 | |
1440 | merged_store_group (store_immediate_info *); | |
1441 | ~merged_store_group (); | |
f9ceb302 | 1442 | bool can_be_merged_into (store_immediate_info *); |
3d3e04ac | 1443 | void merge_into (store_immediate_info *); |
1444 | void merge_overlapping (store_immediate_info *); | |
1445 | bool apply_stores (); | |
902cb3b7 | 1446 | private: |
1447 | void do_merge (store_immediate_info *); | |
3d3e04ac | 1448 | }; |
1449 | ||
1450 | /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */ | |
1451 | ||
1452 | static void | |
1453 | dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len) | |
1454 | { | |
1455 | if (!fd) | |
1456 | return; | |
1457 | ||
1458 | for (unsigned int i = 0; i < len; i++) | |
10f0d48d | 1459 | fprintf (fd, "%02x ", ptr[i]); |
3d3e04ac | 1460 | fprintf (fd, "\n"); |
1461 | } | |
1462 | ||
3d3e04ac | 1463 | /* Shift left the bytes in PTR of SZ elements by AMNT bits, carrying over the |
1464 | bits between adjacent elements. AMNT should be within | |
1465 | [0, BITS_PER_UNIT). | |
1466 | Example, AMNT = 2: | |
1467 | 00011111|11100000 << 2 = 01111111|10000000 | |
1468 | PTR[1] | PTR[0] PTR[1] | PTR[0]. */ | |
1469 | ||
1470 | static void | |
1471 | shift_bytes_in_array (unsigned char *ptr, unsigned int sz, unsigned int amnt) | |
1472 | { | |
1473 | if (amnt == 0) | |
1474 | return; | |
1475 | ||
1476 | unsigned char carry_over = 0U; | |
b1c71535 | 1477 | unsigned char carry_mask = (~0U) << (unsigned char) (BITS_PER_UNIT - amnt); |
3d3e04ac | 1478 | unsigned char clear_mask = (~0U) << amnt; |
1479 | ||
1480 | for (unsigned int i = 0; i < sz; i++) | |
1481 | { | |
1482 | unsigned prev_carry_over = carry_over; | |
b1c71535 | 1483 | carry_over = (ptr[i] & carry_mask) >> (BITS_PER_UNIT - amnt); |
3d3e04ac | 1484 | |
1485 | ptr[i] <<= amnt; | |
1486 | if (i != 0) | |
1487 | { | |
1488 | ptr[i] &= clear_mask; | |
1489 | ptr[i] |= prev_carry_over; | |
1490 | } | |
1491 | } | |
1492 | } | |
1493 | ||
1494 | /* Like shift_bytes_in_array but for big-endian. | |
1495 | Shift right the bytes in PTR of SZ elements by AMNT bits, carrying over the | |
1496 | bits between adjacent elements. AMNT should be within | |
1497 | [0, BITS_PER_UNIT). | |
1498 | Example, AMNT = 2: | |
1499 | 00011111|11100000 >> 2 = 00000111|11111000 | |
1500 | PTR[0] | PTR[1] PTR[0] | PTR[1]. */ | |
1501 | ||
1502 | static void | |
1503 | shift_bytes_in_array_right (unsigned char *ptr, unsigned int sz, | |
1504 | unsigned int amnt) | |
1505 | { | |
1506 | if (amnt == 0) | |
1507 | return; | |
1508 | ||
1509 | unsigned char carry_over = 0U; | |
1510 | unsigned char carry_mask = ~(~0U << amnt); | |
1511 | ||
1512 | for (unsigned int i = 0; i < sz; i++) | |
1513 | { | |
1514 | unsigned prev_carry_over = carry_over; | |
b1c71535 | 1515 | carry_over = ptr[i] & carry_mask; |
3d3e04ac | 1516 | |
a425d9af | 1517 | carry_over <<= (unsigned char) BITS_PER_UNIT - amnt; |
1518 | ptr[i] >>= amnt; | |
1519 | ptr[i] |= prev_carry_over; | |
3d3e04ac | 1520 | } |
1521 | } | |
1522 | ||
1523 | /* Clear out LEN bits starting from bit START in the byte array | |
1524 | PTR. This clears the bits to the *right* from START. | |
1525 | START must be within [0, BITS_PER_UNIT) and counts starting from | |
1526 | the least significant bit. */ | |
1527 | ||
1528 | static void | |
1529 | clear_bit_region_be (unsigned char *ptr, unsigned int start, | |
1530 | unsigned int len) | |
1531 | { | |
1532 | if (len == 0) | |
1533 | return; | |
1534 | /* Clear len bits to the right of start. */ | |
1535 | else if (len <= start + 1) | |
1536 | { | |
1537 | unsigned char mask = (~(~0U << len)); | |
1538 | mask = mask << (start + 1U - len); | |
1539 | ptr[0] &= ~mask; | |
1540 | } | |
1541 | else if (start != BITS_PER_UNIT - 1) | |
1542 | { | |
1543 | clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1); | |
1544 | clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1, | |
1545 | len - (start % BITS_PER_UNIT) - 1); | |
1546 | } | |
1547 | else if (start == BITS_PER_UNIT - 1 | |
1548 | && len > BITS_PER_UNIT) | |
1549 | { | |
1550 | unsigned int nbytes = len / BITS_PER_UNIT; | |
902cb3b7 | 1551 | memset (ptr, 0, nbytes); |
3d3e04ac | 1552 | if (len % BITS_PER_UNIT != 0) |
1553 | clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1, | |
1554 | len % BITS_PER_UNIT); | |
1555 | } | |
1556 | else | |
1557 | gcc_unreachable (); | |
1558 | } | |
1559 | ||
1560 | /* In the byte array PTR clear the bit region starting at bit | |
1561 | START and is LEN bits wide. | |
1562 | For regions spanning multiple bytes do this recursively until we reach | |
1563 | zero LEN or a region contained within a single byte. */ | |
1564 | ||
1565 | static void | |
1566 | clear_bit_region (unsigned char *ptr, unsigned int start, | |
1567 | unsigned int len) | |
1568 | { | |
1569 | /* Degenerate base case. */ | |
1570 | if (len == 0) | |
1571 | return; | |
1572 | else if (start >= BITS_PER_UNIT) | |
1573 | clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len); | |
1574 | /* Second base case. */ | |
1575 | else if ((start + len) <= BITS_PER_UNIT) | |
1576 | { | |
b1c71535 | 1577 | unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len); |
3d3e04ac | 1578 | mask >>= BITS_PER_UNIT - (start + len); |
1579 | ||
1580 | ptr[0] &= ~mask; | |
1581 | ||
1582 | return; | |
1583 | } | |
1584 | /* Clear most significant bits in a byte and proceed with the next byte. */ | |
1585 | else if (start != 0) | |
1586 | { | |
1587 | clear_bit_region (ptr, start, BITS_PER_UNIT - start); | |
3d6071e9 | 1588 | clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start)); |
3d3e04ac | 1589 | } |
1590 | /* Whole bytes need to be cleared. */ | |
1591 | else if (start == 0 && len > BITS_PER_UNIT) | |
1592 | { | |
1593 | unsigned int nbytes = len / BITS_PER_UNIT; | |
7839cdcc | 1594 | /* We could recurse on each byte but we clear whole bytes, so a simple |
1595 | memset will do. */ | |
b1c71535 | 1596 | memset (ptr, '\0', nbytes); |
3d3e04ac | 1597 | /* Clear the remaining sub-byte region if there is one. */ |
1598 | if (len % BITS_PER_UNIT != 0) | |
1599 | clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT); | |
1600 | } | |
1601 | else | |
1602 | gcc_unreachable (); | |
1603 | } | |
1604 | ||
1605 | /* Write BITLEN bits of EXPR to the byte array PTR at | |
1606 | bit position BITPOS. PTR should contain TOTAL_BYTES elements. | |
1607 | Return true if the operation succeeded. */ | |
1608 | ||
1609 | static bool | |
1610 | encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos, | |
b1c71535 | 1611 | unsigned int total_bytes) |
3d3e04ac | 1612 | { |
1613 | unsigned int first_byte = bitpos / BITS_PER_UNIT; | |
1614 | tree tmp_int = expr; | |
a425d9af | 1615 | bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT) |
1616 | || (bitpos % BITS_PER_UNIT) | |
517be012 | 1617 | || !int_mode_for_size (bitlen, 0).exists ()); |
3d3e04ac | 1618 | |
1619 | if (!sub_byte_op_p) | |
63eabc9b | 1620 | return native_encode_expr (tmp_int, ptr + first_byte, total_bytes) != 0; |
3d3e04ac | 1621 | |
1622 | /* LITTLE-ENDIAN | |
1623 | We are writing a non byte-sized quantity or at a position that is not | |
1624 | at a byte boundary. | |
1625 | |--------|--------|--------| ptr + first_byte | |
1626 | ^ ^ | |
1627 | xxx xxxxxxxx xxx< bp> | |
1628 | |______EXPR____| | |
1629 | ||
b1c71535 | 1630 | First native_encode_expr EXPR into a temporary buffer and shift each |
3d3e04ac | 1631 | byte in the buffer by 'bp' (carrying the bits over as necessary). |
1632 | |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000| | |
1633 | <------bitlen---->< bp> | |
1634 | Then we clear the destination bits: | |
1635 | |---00000|00000000|000-----| ptr + first_byte | |
1636 | <-------bitlen--->< bp> | |
1637 | ||
1638 | Finally we ORR the bytes of the shifted EXPR into the cleared region: | |
1639 | |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte. | |
1640 | ||
1641 | BIG-ENDIAN | |
1642 | We are writing a non byte-sized quantity or at a position that is not | |
1643 | at a byte boundary. | |
1644 | ptr + first_byte |--------|--------|--------| | |
1645 | ^ ^ | |
1646 | <bp >xxx xxxxxxxx xxx | |
1647 | |_____EXPR_____| | |
1648 | ||
b1c71535 | 1649 | First native_encode_expr EXPR into a temporary buffer and shift each |
3d3e04ac | 1650 | byte in the buffer to the right by (carrying the bits over as necessary). |
1651 | We shift by as much as needed to align the most significant bit of EXPR | |
1652 | with bitpos: | |
1653 | |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000| | |
1654 | <---bitlen----> <bp ><-----bitlen-----> | |
1655 | Then we clear the destination bits: | |
1656 | ptr + first_byte |-----000||00000000||00000---| | |
1657 | <bp ><-------bitlen-----> | |
1658 | ||
1659 | Finally we ORR the bytes of the shifted EXPR into the cleared region: | |
1660 | ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|. | |
1661 | The awkwardness comes from the fact that bitpos is counted from the | |
1662 | most significant bit of a byte. */ | |
1663 | ||
d2401312 | 1664 | /* We must be dealing with fixed-size data at this point, since the |
1665 | total size is also fixed. */ | |
1666 | fixed_size_mode mode = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr))); | |
3d3e04ac | 1667 | /* Allocate an extra byte so that we have space to shift into. */ |
d2401312 | 1668 | unsigned int byte_size = GET_MODE_SIZE (mode) + 1; |
3d3e04ac | 1669 | unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size); |
b1c71535 | 1670 | memset (tmpbuf, '\0', byte_size); |
3d3e04ac | 1671 | /* The store detection code should only have allowed constants that are |
1672 | accepted by native_encode_expr. */ | |
63eabc9b | 1673 | if (native_encode_expr (expr, tmpbuf, byte_size - 1) == 0) |
3d3e04ac | 1674 | gcc_unreachable (); |
1675 | ||
1676 | /* The native_encode_expr machinery uses TYPE_MODE to determine how many | |
1677 | bytes to write. This means it can write more than | |
1678 | ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example | |
1679 | write 8 bytes for a bitlen of 40). Skip the bytes that are not within | |
1680 | bitlen and zero out the bits that are not relevant as well (that may | |
1681 | contain a sign bit due to sign-extension). */ | |
1682 | unsigned int padding | |
1683 | = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1; | |
a425d9af | 1684 | /* On big-endian the padding is at the 'front' so just skip the initial |
1685 | bytes. */ | |
1686 | if (BYTES_BIG_ENDIAN) | |
1687 | tmpbuf += padding; | |
1688 | ||
1689 | byte_size -= padding; | |
1690 | ||
1691 | if (bitlen % BITS_PER_UNIT != 0) | |
3d3e04ac | 1692 | { |
5e922e43 | 1693 | if (BYTES_BIG_ENDIAN) |
a425d9af | 1694 | clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1, |
1695 | BITS_PER_UNIT - (bitlen % BITS_PER_UNIT)); | |
1696 | else | |
1697 | clear_bit_region (tmpbuf, bitlen, | |
1698 | byte_size * BITS_PER_UNIT - bitlen); | |
3d3e04ac | 1699 | } |
a425d9af | 1700 | /* Left shifting relies on the last byte being clear if bitlen is |
1701 | a multiple of BITS_PER_UNIT, which might not be clear if | |
1702 | there are padding bytes. */ | |
1703 | else if (!BYTES_BIG_ENDIAN) | |
1704 | tmpbuf[byte_size - 1] = '\0'; | |
3d3e04ac | 1705 | |
1706 | /* Clear the bit region in PTR where the bits from TMPBUF will be | |
b1c71535 | 1707 | inserted into. */ |
3d3e04ac | 1708 | if (BYTES_BIG_ENDIAN) |
1709 | clear_bit_region_be (ptr + first_byte, | |
1710 | BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen); | |
1711 | else | |
1712 | clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen); | |
1713 | ||
1714 | int shift_amnt; | |
1715 | int bitlen_mod = bitlen % BITS_PER_UNIT; | |
1716 | int bitpos_mod = bitpos % BITS_PER_UNIT; | |
1717 | ||
1718 | bool skip_byte = false; | |
1719 | if (BYTES_BIG_ENDIAN) | |
1720 | { | |
1721 | /* BITPOS and BITLEN are exactly aligned and no shifting | |
1722 | is necessary. */ | |
1723 | if (bitpos_mod + bitlen_mod == BITS_PER_UNIT | |
1724 | || (bitpos_mod == 0 && bitlen_mod == 0)) | |
1725 | shift_amnt = 0; | |
1726 | /* |. . . . . . . .| | |
1727 | <bp > <blen >. | |
1728 | We always shift right for BYTES_BIG_ENDIAN so shift the beginning | |
1729 | of the value until it aligns with 'bp' in the next byte over. */ | |
1730 | else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT) | |
1731 | { | |
1732 | shift_amnt = bitlen_mod + bitpos_mod; | |
1733 | skip_byte = bitlen_mod != 0; | |
1734 | } | |
1735 | /* |. . . . . . . .| | |
1736 | <----bp---> | |
1737 | <---blen---->. | |
1738 | Shift the value right within the same byte so it aligns with 'bp'. */ | |
1739 | else | |
1740 | shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT; | |
1741 | } | |
1742 | else | |
1743 | shift_amnt = bitpos % BITS_PER_UNIT; | |
1744 | ||
1745 | /* Create the shifted version of EXPR. */ | |
1746 | if (!BYTES_BIG_ENDIAN) | |
b1c71535 | 1747 | { |
1748 | shift_bytes_in_array (tmpbuf, byte_size, shift_amnt); | |
1749 | if (shift_amnt == 0) | |
1750 | byte_size--; | |
1751 | } | |
3d3e04ac | 1752 | else |
1753 | { | |
1754 | gcc_assert (BYTES_BIG_ENDIAN); | |
1755 | shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt); | |
1756 | /* If shifting right forced us to move into the next byte skip the now | |
1757 | empty byte. */ | |
1758 | if (skip_byte) | |
1759 | { | |
1760 | tmpbuf++; | |
1761 | byte_size--; | |
1762 | } | |
1763 | } | |
1764 | ||
1765 | /* Insert the bits from TMPBUF. */ | |
1766 | for (unsigned int i = 0; i < byte_size; i++) | |
1767 | ptr[first_byte + i] |= tmpbuf[i]; | |
1768 | ||
1769 | return true; | |
1770 | } | |
1771 | ||
1772 | /* Sorting function for store_immediate_info objects. | |
1773 | Sorts them by bitposition. */ | |
1774 | ||
1775 | static int | |
1776 | sort_by_bitpos (const void *x, const void *y) | |
1777 | { | |
1778 | store_immediate_info *const *tmp = (store_immediate_info * const *) x; | |
1779 | store_immediate_info *const *tmp2 = (store_immediate_info * const *) y; | |
1780 | ||
61d052e5 | 1781 | if ((*tmp)->bitpos < (*tmp2)->bitpos) |
3d3e04ac | 1782 | return -1; |
1783 | else if ((*tmp)->bitpos > (*tmp2)->bitpos) | |
1784 | return 1; | |
61d052e5 | 1785 | else |
ca4982c2 | 1786 | /* If they are the same let's use the order which is guaranteed to |
1787 | be different. */ | |
1788 | return (*tmp)->order - (*tmp2)->order; | |
3d3e04ac | 1789 | } |
1790 | ||
1791 | /* Sorting function for store_immediate_info objects. | |
1792 | Sorts them by the order field. */ | |
1793 | ||
1794 | static int | |
1795 | sort_by_order (const void *x, const void *y) | |
1796 | { | |
1797 | store_immediate_info *const *tmp = (store_immediate_info * const *) x; | |
1798 | store_immediate_info *const *tmp2 = (store_immediate_info * const *) y; | |
1799 | ||
1800 | if ((*tmp)->order < (*tmp2)->order) | |
1801 | return -1; | |
1802 | else if ((*tmp)->order > (*tmp2)->order) | |
1803 | return 1; | |
1804 | ||
1805 | gcc_unreachable (); | |
1806 | } | |
1807 | ||
1808 | /* Initialize a merged_store_group object from a store_immediate_info | |
1809 | object. */ | |
1810 | ||
1811 | merged_store_group::merged_store_group (store_immediate_info *info) | |
1812 | { | |
1813 | start = info->bitpos; | |
1814 | width = info->bitsize; | |
902cb3b7 | 1815 | bitregion_start = info->bitregion_start; |
1816 | bitregion_end = info->bitregion_end; | |
3d3e04ac | 1817 | /* VAL has memory allocated for it in apply_stores once the group |
1818 | width has been finalized. */ | |
1819 | val = NULL; | |
902cb3b7 | 1820 | mask = NULL; |
10f0d48d | 1821 | bit_insertion = false; |
902cb3b7 | 1822 | unsigned HOST_WIDE_INT align_bitpos = 0; |
1823 | get_object_alignment_1 (gimple_assign_lhs (info->stmt), | |
1824 | &align, &align_bitpos); | |
1825 | align_base = start - align_bitpos; | |
9991d1d3 | 1826 | for (int i = 0; i < 2; ++i) |
1827 | { | |
1828 | store_operand_info &op = info->ops[i]; | |
1829 | if (op.base_addr == NULL_TREE) | |
1830 | { | |
1831 | load_align[i] = 0; | |
1832 | load_align_base[i] = 0; | |
1833 | } | |
1834 | else | |
1835 | { | |
1836 | get_object_alignment_1 (op.val, &load_align[i], &align_bitpos); | |
1837 | load_align_base[i] = op.bitpos - align_bitpos; | |
1838 | } | |
1839 | } | |
3d3e04ac | 1840 | stores.create (1); |
1841 | stores.safe_push (info); | |
1842 | last_stmt = info->stmt; | |
1843 | last_order = info->order; | |
1844 | first_stmt = last_stmt; | |
1845 | first_order = last_order; | |
1846 | buf_size = 0; | |
1847 | } | |
1848 | ||
1849 | merged_store_group::~merged_store_group () | |
1850 | { | |
1851 | if (val) | |
1852 | XDELETEVEC (val); | |
1853 | } | |
1854 | ||
f9ceb302 | 1855 | /* Return true if the store described by INFO can be merged into the group. */ |
1856 | ||
1857 | bool | |
1858 | merged_store_group::can_be_merged_into (store_immediate_info *info) | |
1859 | { | |
1860 | /* Do not merge bswap patterns. */ | |
1861 | if (info->rhs_code == LROTATE_EXPR) | |
1862 | return false; | |
1863 | ||
1864 | /* The canonical case. */ | |
1865 | if (info->rhs_code == stores[0]->rhs_code) | |
1866 | return true; | |
1867 | ||
1868 | /* BIT_INSERT_EXPR is compatible with INTEGER_CST. */ | |
1869 | if (info->rhs_code == BIT_INSERT_EXPR && stores[0]->rhs_code == INTEGER_CST) | |
1870 | return true; | |
1871 | ||
1872 | if (stores[0]->rhs_code == BIT_INSERT_EXPR && info->rhs_code == INTEGER_CST) | |
1873 | return true; | |
1874 | ||
1875 | /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */ | |
1876 | if (info->rhs_code == MEM_REF | |
1877 | && (stores[0]->rhs_code == INTEGER_CST | |
1878 | || stores[0]->rhs_code == BIT_INSERT_EXPR) | |
1879 | && info->bitregion_start == stores[0]->bitregion_start | |
1880 | && info->bitregion_end == stores[0]->bitregion_end) | |
1881 | return true; | |
1882 | ||
1883 | if (stores[0]->rhs_code == MEM_REF | |
1884 | && (info->rhs_code == INTEGER_CST | |
1885 | || info->rhs_code == BIT_INSERT_EXPR) | |
1886 | && info->bitregion_start == stores[0]->bitregion_start | |
1887 | && info->bitregion_end == stores[0]->bitregion_end) | |
1888 | return true; | |
1889 | ||
1890 | return false; | |
1891 | } | |
1892 | ||
902cb3b7 | 1893 | /* Helper method for merge_into and merge_overlapping to do |
1894 | the common part. */ | |
f9ceb302 | 1895 | |
3d3e04ac | 1896 | void |
902cb3b7 | 1897 | merged_store_group::do_merge (store_immediate_info *info) |
3d3e04ac | 1898 | { |
902cb3b7 | 1899 | bitregion_start = MIN (bitregion_start, info->bitregion_start); |
1900 | bitregion_end = MAX (bitregion_end, info->bitregion_end); | |
1901 | ||
1902 | unsigned int this_align; | |
1903 | unsigned HOST_WIDE_INT align_bitpos = 0; | |
1904 | get_object_alignment_1 (gimple_assign_lhs (info->stmt), | |
1905 | &this_align, &align_bitpos); | |
1906 | if (this_align > align) | |
1907 | { | |
1908 | align = this_align; | |
1909 | align_base = info->bitpos - align_bitpos; | |
1910 | } | |
9991d1d3 | 1911 | for (int i = 0; i < 2; ++i) |
1912 | { | |
1913 | store_operand_info &op = info->ops[i]; | |
1914 | if (!op.base_addr) | |
1915 | continue; | |
1916 | ||
1917 | get_object_alignment_1 (op.val, &this_align, &align_bitpos); | |
1918 | if (this_align > load_align[i]) | |
1919 | { | |
1920 | load_align[i] = this_align; | |
1921 | load_align_base[i] = op.bitpos - align_bitpos; | |
1922 | } | |
1923 | } | |
3d3e04ac | 1924 | |
3d3e04ac | 1925 | gimple *stmt = info->stmt; |
1926 | stores.safe_push (info); | |
1927 | if (info->order > last_order) | |
1928 | { | |
1929 | last_order = info->order; | |
1930 | last_stmt = stmt; | |
1931 | } | |
1932 | else if (info->order < first_order) | |
1933 | { | |
1934 | first_order = info->order; | |
1935 | first_stmt = stmt; | |
1936 | } | |
1937 | } | |
1938 | ||
902cb3b7 | 1939 | /* Merge a store recorded by INFO into this merged store. |
1940 | The store is not overlapping with the existing recorded | |
1941 | stores. */ | |
1942 | ||
1943 | void | |
1944 | merged_store_group::merge_into (store_immediate_info *info) | |
1945 | { | |
902cb3b7 | 1946 | /* Make sure we're inserting in the position we think we're inserting. */ |
1947 | gcc_assert (info->bitpos >= start + width | |
1948 | && info->bitregion_start <= bitregion_end); | |
1949 | ||
cf5e422b | 1950 | width = info->bitpos + info->bitsize - start; |
902cb3b7 | 1951 | do_merge (info); |
1952 | } | |
1953 | ||
3d3e04ac | 1954 | /* Merge a store described by INFO into this merged store. |
1955 | INFO overlaps in some way with the current store (i.e. it's not contiguous | |
1956 | which is handled by merged_store_group::merge_into). */ | |
1957 | ||
1958 | void | |
1959 | merged_store_group::merge_overlapping (store_immediate_info *info) | |
1960 | { | |
3d3e04ac | 1961 | /* If the store extends the size of the group, extend the width. */ |
902cb3b7 | 1962 | if (info->bitpos + info->bitsize > start + width) |
cf5e422b | 1963 | width = info->bitpos + info->bitsize - start; |
3d3e04ac | 1964 | |
902cb3b7 | 1965 | do_merge (info); |
3d3e04ac | 1966 | } |
1967 | ||
1968 | /* Go through all the recorded stores in this group in program order and | |
1969 | apply their values to the VAL byte array to create the final merged | |
1970 | value. Return true if the operation succeeded. */ | |
1971 | ||
1972 | bool | |
1973 | merged_store_group::apply_stores () | |
1974 | { | |
902cb3b7 | 1975 | /* Make sure we have more than one store in the group, otherwise we cannot |
1976 | merge anything. */ | |
1977 | if (bitregion_start % BITS_PER_UNIT != 0 | |
1978 | || bitregion_end % BITS_PER_UNIT != 0 | |
3d3e04ac | 1979 | || stores.length () == 1) |
1980 | return false; | |
1981 | ||
1982 | stores.qsort (sort_by_order); | |
902cb3b7 | 1983 | store_immediate_info *info; |
3d3e04ac | 1984 | unsigned int i; |
10f0d48d | 1985 | /* Create a power-of-2-sized buffer for native_encode_expr. */ |
1986 | buf_size = 1 << ceil_log2 ((bitregion_end - bitregion_start) / BITS_PER_UNIT); | |
902cb3b7 | 1987 | val = XNEWVEC (unsigned char, 2 * buf_size); |
1988 | mask = val + buf_size; | |
1989 | memset (val, 0, buf_size); | |
1990 | memset (mask, ~0U, buf_size); | |
3d3e04ac | 1991 | |
1992 | FOR_EACH_VEC_ELT (stores, i, info) | |
1993 | { | |
902cb3b7 | 1994 | unsigned int pos_in_buffer = info->bitpos - bitregion_start; |
10f0d48d | 1995 | tree cst; |
9991d1d3 | 1996 | if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE) |
1997 | cst = info->ops[0].val; | |
1998 | else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE) | |
1999 | cst = info->ops[1].val; | |
10f0d48d | 2000 | else |
2001 | cst = NULL_TREE; | |
9991d1d3 | 2002 | bool ret = true; |
2003 | if (cst) | |
10f0d48d | 2004 | { |
2005 | if (info->rhs_code == BIT_INSERT_EXPR) | |
2006 | bit_insertion = true; | |
2007 | else | |
2008 | ret = encode_tree_to_bitpos (cst, val, info->bitsize, | |
2009 | pos_in_buffer, buf_size); | |
2010 | } | |
2011 | unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT); | |
2012 | if (BYTES_BIG_ENDIAN) | |
2013 | clear_bit_region_be (m, (BITS_PER_UNIT - 1 | |
2014 | - (pos_in_buffer % BITS_PER_UNIT)), | |
2015 | info->bitsize); | |
2016 | else | |
2017 | clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize); | |
9991d1d3 | 2018 | if (cst && dump_file && (dump_flags & TDF_DETAILS)) |
3d3e04ac | 2019 | { |
2020 | if (ret) | |
2021 | { | |
10f0d48d | 2022 | fputs ("After writing ", dump_file); |
54e7de93 | 2023 | print_generic_expr (dump_file, cst, TDF_NONE); |
3d3e04ac | 2024 | fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC |
10f0d48d | 2025 | " at position %d\n", info->bitsize, pos_in_buffer); |
2026 | fputs (" the merged value contains ", dump_file); | |
3d3e04ac | 2027 | dump_char_array (dump_file, val, buf_size); |
10f0d48d | 2028 | fputs (" the merged mask contains ", dump_file); |
2029 | dump_char_array (dump_file, mask, buf_size); | |
2030 | if (bit_insertion) | |
2031 | fputs (" bit insertion is required\n", dump_file); | |
3d3e04ac | 2032 | } |
2033 | else | |
2034 | fprintf (dump_file, "Failed to merge stores\n"); | |
509ab8cd | 2035 | } |
3d3e04ac | 2036 | if (!ret) |
2037 | return false; | |
2038 | } | |
509ab8cd | 2039 | stores.qsort (sort_by_bitpos); |
3d3e04ac | 2040 | return true; |
2041 | } | |
2042 | ||
2043 | /* Structure describing the store chain. */ | |
2044 | ||
2045 | struct imm_store_chain_info | |
2046 | { | |
3a3ba7de | 2047 | /* Doubly-linked list that imposes an order on chain processing. |
2048 | PNXP (prev's next pointer) points to the head of a list, or to | |
2049 | the next field in the previous chain in the list. | |
2050 | See pass_store_merging::m_stores_head for more rationale. */ | |
2051 | imm_store_chain_info *next, **pnxp; | |
f85e7cb7 | 2052 | tree base_addr; |
902cb3b7 | 2053 | auto_vec<store_immediate_info *> m_store_info; |
3d3e04ac | 2054 | auto_vec<merged_store_group *> m_merged_store_groups; |
2055 | ||
3a3ba7de | 2056 | imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a) |
2057 | : next (inspt), pnxp (&inspt), base_addr (b_a) | |
2058 | { | |
2059 | inspt = this; | |
2060 | if (next) | |
2061 | { | |
2062 | gcc_checking_assert (pnxp == next->pnxp); | |
2063 | next->pnxp = &next; | |
2064 | } | |
2065 | } | |
2066 | ~imm_store_chain_info () | |
2067 | { | |
2068 | *pnxp = next; | |
2069 | if (next) | |
2070 | { | |
2071 | gcc_checking_assert (&next == next->pnxp); | |
2072 | next->pnxp = pnxp; | |
2073 | } | |
2074 | } | |
f85e7cb7 | 2075 | bool terminate_and_process_chain (); |
509ab8cd | 2076 | bool try_coalesce_bswap (merged_store_group *, unsigned int, unsigned int); |
3d3e04ac | 2077 | bool coalesce_immediate_stores (); |
f85e7cb7 | 2078 | bool output_merged_store (merged_store_group *); |
2079 | bool output_merged_stores (); | |
3d3e04ac | 2080 | }; |
2081 | ||
2082 | const pass_data pass_data_tree_store_merging = { | |
2083 | GIMPLE_PASS, /* type */ | |
2084 | "store-merging", /* name */ | |
2085 | OPTGROUP_NONE, /* optinfo_flags */ | |
2086 | TV_GIMPLE_STORE_MERGING, /* tv_id */ | |
2087 | PROP_ssa, /* properties_required */ | |
2088 | 0, /* properties_provided */ | |
2089 | 0, /* properties_destroyed */ | |
2090 | 0, /* todo_flags_start */ | |
2091 | TODO_update_ssa, /* todo_flags_finish */ | |
2092 | }; | |
2093 | ||
2094 | class pass_store_merging : public gimple_opt_pass | |
2095 | { | |
2096 | public: | |
2097 | pass_store_merging (gcc::context *ctxt) | |
2d27e5c1 | 2098 | : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head () |
3d3e04ac | 2099 | { |
2100 | } | |
2101 | ||
10f0d48d | 2102 | /* Pass not supported for PDP-endian, nor for insane hosts or |
2103 | target character sizes where native_{encode,interpret}_expr | |
902cb3b7 | 2104 | doesn't work properly. */ |
3d3e04ac | 2105 | virtual bool |
2106 | gate (function *) | |
2107 | { | |
902cb3b7 | 2108 | return flag_store_merging |
10f0d48d | 2109 | && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN |
902cb3b7 | 2110 | && CHAR_BIT == 8 |
2111 | && BITS_PER_UNIT == 8; | |
3d3e04ac | 2112 | } |
2113 | ||
2114 | virtual unsigned int execute (function *); | |
2115 | ||
2116 | private: | |
2117 | hash_map<tree_operand_hash, struct imm_store_chain_info *> m_stores; | |
2118 | ||
3a3ba7de | 2119 | /* Form a doubly-linked stack of the elements of m_stores, so that |
2120 | we can iterate over them in a predictable way. Using this order | |
2121 | avoids extraneous differences in the compiler output just because | |
2122 | of tree pointer variations (e.g. different chains end up in | |
2123 | different positions of m_stores, so they are handled in different | |
2124 | orders, so they allocate or release SSA names in different | |
2125 | orders, and when they get reused, subsequent passes end up | |
2126 | getting different SSA names, which may ultimately change | |
2127 | decisions when going out of SSA). */ | |
2128 | imm_store_chain_info *m_stores_head; | |
2129 | ||
9991d1d3 | 2130 | void process_store (gimple *); |
3d3e04ac | 2131 | bool terminate_and_process_all_chains (); |
c35548ce | 2132 | bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *); |
f85e7cb7 | 2133 | bool terminate_and_release_chain (imm_store_chain_info *); |
3d3e04ac | 2134 | }; // class pass_store_merging |
2135 | ||
2136 | /* Terminate and process all recorded chains. Return true if any changes | |
2137 | were made. */ | |
2138 | ||
2139 | bool | |
2140 | pass_store_merging::terminate_and_process_all_chains () | |
2141 | { | |
3d3e04ac | 2142 | bool ret = false; |
3a3ba7de | 2143 | while (m_stores_head) |
2144 | ret |= terminate_and_release_chain (m_stores_head); | |
2145 | gcc_assert (m_stores.elements () == 0); | |
2146 | gcc_assert (m_stores_head == NULL); | |
3d3e04ac | 2147 | |
2148 | return ret; | |
2149 | } | |
2150 | ||
c35548ce | 2151 | /* Terminate all chains that are affected by the statement STMT. |
2152 | CHAIN_INFO is the chain we should ignore from the checks if | |
2153 | non-NULL. */ | |
3d3e04ac | 2154 | |
2155 | bool | |
4de7f8df | 2156 | pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info |
f85e7cb7 | 2157 | **chain_info, |
3d3e04ac | 2158 | gimple *stmt) |
2159 | { | |
2160 | bool ret = false; | |
2161 | ||
2162 | /* If the statement doesn't touch memory it can't alias. */ | |
2163 | if (!gimple_vuse (stmt)) | |
2164 | return false; | |
2165 | ||
9fead2ab | 2166 | tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE; |
c35548ce | 2167 | for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next) |
3d3e04ac | 2168 | { |
c35548ce | 2169 | next = cur->next; |
2170 | ||
2171 | /* We already checked all the stores in chain_info and terminated the | |
2172 | chain if necessary. Skip it here. */ | |
2173 | if (chain_info && *chain_info == cur) | |
2174 | continue; | |
2175 | ||
9991d1d3 | 2176 | store_immediate_info *info; |
2177 | unsigned int i; | |
c35548ce | 2178 | FOR_EACH_VEC_ELT (cur->m_store_info, i, info) |
3d3e04ac | 2179 | { |
9fead2ab | 2180 | tree lhs = gimple_assign_lhs (info->stmt); |
2181 | if (ref_maybe_used_by_stmt_p (stmt, lhs) | |
2182 | || stmt_may_clobber_ref_p (stmt, lhs) | |
2183 | || (store_lhs && refs_output_dependent_p (store_lhs, lhs))) | |
3d3e04ac | 2184 | { |
9991d1d3 | 2185 | if (dump_file && (dump_flags & TDF_DETAILS)) |
3d3e04ac | 2186 | { |
9991d1d3 | 2187 | fprintf (dump_file, "stmt causes chain termination:\n"); |
2188 | print_gimple_stmt (dump_file, stmt, 0); | |
3d3e04ac | 2189 | } |
c35548ce | 2190 | terminate_and_release_chain (cur); |
9991d1d3 | 2191 | ret = true; |
2192 | break; | |
3d3e04ac | 2193 | } |
2194 | } | |
2195 | } | |
2196 | ||
3d3e04ac | 2197 | return ret; |
2198 | } | |
2199 | ||
2200 | /* Helper function. Terminate the recorded chain storing to base object | |
2201 | BASE. Return true if the merging and output was successful. The m_stores | |
2202 | entry is removed after the processing in any case. */ | |
2203 | ||
2204 | bool | |
f85e7cb7 | 2205 | pass_store_merging::terminate_and_release_chain (imm_store_chain_info *chain_info) |
3d3e04ac | 2206 | { |
f85e7cb7 | 2207 | bool ret = chain_info->terminate_and_process_chain (); |
2208 | m_stores.remove (chain_info->base_addr); | |
2209 | delete chain_info; | |
3d3e04ac | 2210 | return ret; |
2211 | } | |
2212 | ||
9991d1d3 | 2213 | /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive) |
2214 | may clobber REF. FIRST and LAST must be in the same basic block and | |
509ab8cd | 2215 | have non-NULL vdef. We want to be able to sink load of REF across |
2216 | stores between FIRST and LAST, up to right before LAST. */ | |
9991d1d3 | 2217 | |
2218 | bool | |
2219 | stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref) | |
2220 | { | |
2221 | ao_ref r; | |
2222 | ao_ref_init (&r, ref); | |
2223 | unsigned int count = 0; | |
2224 | tree vop = gimple_vdef (last); | |
2225 | gimple *stmt; | |
2226 | ||
2227 | gcc_checking_assert (gimple_bb (first) == gimple_bb (last)); | |
2228 | do | |
2229 | { | |
2230 | stmt = SSA_NAME_DEF_STMT (vop); | |
2231 | if (stmt_may_clobber_ref_p_1 (stmt, &r)) | |
2232 | return true; | |
509ab8cd | 2233 | if (gimple_store_p (stmt) |
2234 | && refs_anti_dependent_p (ref, gimple_get_lhs (stmt))) | |
2235 | return true; | |
9991d1d3 | 2236 | /* Avoid quadratic compile time by bounding the number of checks |
2237 | we perform. */ | |
2238 | if (++count > MAX_STORE_ALIAS_CHECKS) | |
2239 | return true; | |
2240 | vop = gimple_vuse (stmt); | |
2241 | } | |
2242 | while (stmt != first); | |
2243 | return false; | |
2244 | } | |
2245 | ||
2246 | /* Return true if INFO->ops[IDX] is mergeable with the | |
2247 | corresponding loads already in MERGED_STORE group. | |
2248 | BASE_ADDR is the base address of the whole store group. */ | |
2249 | ||
2250 | bool | |
2251 | compatible_load_p (merged_store_group *merged_store, | |
2252 | store_immediate_info *info, | |
2253 | tree base_addr, int idx) | |
2254 | { | |
2255 | store_immediate_info *infof = merged_store->stores[0]; | |
2256 | if (!info->ops[idx].base_addr | |
e61263f2 | 2257 | || maybe_ne (info->ops[idx].bitpos - infof->ops[idx].bitpos, |
2258 | info->bitpos - infof->bitpos) | |
9991d1d3 | 2259 | || !operand_equal_p (info->ops[idx].base_addr, |
2260 | infof->ops[idx].base_addr, 0)) | |
2261 | return false; | |
2262 | ||
2263 | store_immediate_info *infol = merged_store->stores.last (); | |
2264 | tree load_vuse = gimple_vuse (info->ops[idx].stmt); | |
2265 | /* In this case all vuses should be the same, e.g. | |
2266 | _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4; | |
2267 | or | |
2268 | _1 = s.a; _2 = s.b; t.a = _1; t.b = _2; | |
2269 | and we can emit the coalesced load next to any of those loads. */ | |
2270 | if (gimple_vuse (infof->ops[idx].stmt) == load_vuse | |
2271 | && gimple_vuse (infol->ops[idx].stmt) == load_vuse) | |
2272 | return true; | |
2273 | ||
2274 | /* Otherwise, at least for now require that the load has the same | |
2275 | vuse as the store. See following examples. */ | |
2276 | if (gimple_vuse (info->stmt) != load_vuse) | |
2277 | return false; | |
2278 | ||
2279 | if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt) | |
2280 | || (infof != infol | |
2281 | && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt))) | |
2282 | return false; | |
2283 | ||
2284 | /* If the load is from the same location as the store, already | |
2285 | the construction of the immediate chain info guarantees no intervening | |
2286 | stores, so no further checks are needed. Example: | |
2287 | _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */ | |
e61263f2 | 2288 | if (known_eq (info->ops[idx].bitpos, info->bitpos) |
9991d1d3 | 2289 | && operand_equal_p (info->ops[idx].base_addr, base_addr, 0)) |
2290 | return true; | |
2291 | ||
2292 | /* Otherwise, we need to punt if any of the loads can be clobbered by any | |
2293 | of the stores in the group, or any other stores in between those. | |
2294 | Previous calls to compatible_load_p ensured that for all the | |
2295 | merged_store->stores IDX loads, no stmts starting with | |
2296 | merged_store->first_stmt and ending right before merged_store->last_stmt | |
2297 | clobbers those loads. */ | |
2298 | gimple *first = merged_store->first_stmt; | |
2299 | gimple *last = merged_store->last_stmt; | |
2300 | unsigned int i; | |
2301 | store_immediate_info *infoc; | |
2302 | /* The stores are sorted by increasing store bitpos, so if info->stmt store | |
2303 | comes before the so far first load, we'll be changing | |
2304 | merged_store->first_stmt. In that case we need to give up if | |
2305 | any of the earlier processed loads clobber with the stmts in the new | |
2306 | range. */ | |
2307 | if (info->order < merged_store->first_order) | |
2308 | { | |
2309 | FOR_EACH_VEC_ELT (merged_store->stores, i, infoc) | |
2310 | if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val)) | |
2311 | return false; | |
2312 | first = info->stmt; | |
2313 | } | |
2314 | /* Similarly, we could change merged_store->last_stmt, so ensure | |
2315 | in that case no stmts in the new range clobber any of the earlier | |
2316 | processed loads. */ | |
2317 | else if (info->order > merged_store->last_order) | |
2318 | { | |
2319 | FOR_EACH_VEC_ELT (merged_store->stores, i, infoc) | |
2320 | if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val)) | |
2321 | return false; | |
2322 | last = info->stmt; | |
2323 | } | |
2324 | /* And finally, we'd be adding a new load to the set, ensure it isn't | |
2325 | clobbered in the new range. */ | |
2326 | if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val)) | |
2327 | return false; | |
2328 | ||
2329 | /* Otherwise, we are looking for: | |
2330 | _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4; | |
2331 | or | |
2332 | _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */ | |
2333 | return true; | |
2334 | } | |
2335 | ||
509ab8cd | 2336 | /* Add all refs loaded to compute VAL to REFS vector. */ |
2337 | ||
2338 | void | |
2339 | gather_bswap_load_refs (vec<tree> *refs, tree val) | |
2340 | { | |
2341 | if (TREE_CODE (val) != SSA_NAME) | |
2342 | return; | |
2343 | ||
2344 | gimple *stmt = SSA_NAME_DEF_STMT (val); | |
2345 | if (!is_gimple_assign (stmt)) | |
2346 | return; | |
2347 | ||
2348 | if (gimple_assign_load_p (stmt)) | |
2349 | { | |
2350 | refs->safe_push (gimple_assign_rhs1 (stmt)); | |
2351 | return; | |
2352 | } | |
2353 | ||
2354 | switch (gimple_assign_rhs_class (stmt)) | |
2355 | { | |
2356 | case GIMPLE_BINARY_RHS: | |
2357 | gather_bswap_load_refs (refs, gimple_assign_rhs2 (stmt)); | |
2358 | /* FALLTHRU */ | |
2359 | case GIMPLE_UNARY_RHS: | |
2360 | gather_bswap_load_refs (refs, gimple_assign_rhs1 (stmt)); | |
2361 | break; | |
2362 | default: | |
2363 | gcc_unreachable (); | |
2364 | } | |
2365 | } | |
2366 | ||
cf5e422b | 2367 | /* Check if there are any stores in M_STORE_INFO after index I |
2368 | (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap | |
2369 | a potential group ending with END that have their order | |
2370 | smaller than LAST_ORDER. RHS_CODE is the kind of store in the | |
2371 | group. Return true if there are no such stores. | |
2372 | Consider: | |
2373 | MEM[(long long int *)p_28] = 0; | |
2374 | MEM[(long long int *)p_28 + 8B] = 0; | |
2375 | MEM[(long long int *)p_28 + 16B] = 0; | |
2376 | MEM[(long long int *)p_28 + 24B] = 0; | |
2377 | _129 = (int) _130; | |
2378 | MEM[(int *)p_28 + 8B] = _129; | |
2379 | MEM[(int *)p_28].a = -1; | |
2380 | We already have | |
2381 | MEM[(long long int *)p_28] = 0; | |
2382 | MEM[(int *)p_28].a = -1; | |
2383 | stmts in the current group and need to consider if it is safe to | |
2384 | add MEM[(long long int *)p_28 + 8B] = 0; store into the same group. | |
2385 | There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129; | |
2386 | store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0; | |
2387 | into the group and merging of those 3 stores is successful, merged | |
2388 | stmts will be emitted at the latest store from that group, i.e. | |
2389 | LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store. | |
2390 | The MEM[(int *)p_28 + 8B] = _129; store that originally follows | |
2391 | the MEM[(long long int *)p_28 + 8B] = 0; would now be before it, | |
2392 | so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0; | |
2393 | into the group. That way it will be its own store group and will | |
2394 | not be touched. If RHS_CODE is INTEGER_CST and there are overlapping | |
2395 | INTEGER_CST stores, those are mergeable using merge_overlapping, | |
2396 | so don't return false for those. */ | |
2397 | ||
2398 | static bool | |
2399 | check_no_overlap (vec<store_immediate_info *> m_store_info, unsigned int i, | |
2400 | enum tree_code rhs_code, unsigned int last_order, | |
2401 | unsigned HOST_WIDE_INT end) | |
2402 | { | |
2403 | unsigned int len = m_store_info.length (); | |
2404 | for (++i; i < len; ++i) | |
2405 | { | |
2406 | store_immediate_info *info = m_store_info[i]; | |
2407 | if (info->bitpos >= end) | |
2408 | break; | |
2409 | if (info->order < last_order | |
2410 | && (rhs_code != INTEGER_CST || info->rhs_code != INTEGER_CST)) | |
2411 | return false; | |
2412 | } | |
2413 | return true; | |
2414 | } | |
2415 | ||
509ab8cd | 2416 | /* Return true if m_store_info[first] and at least one following store |
2417 | form a group which store try_size bitsize value which is byte swapped | |
2418 | from a memory load or some value, or identity from some value. | |
2419 | This uses the bswap pass APIs. */ | |
2420 | ||
2421 | bool | |
2422 | imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store, | |
2423 | unsigned int first, | |
2424 | unsigned int try_size) | |
2425 | { | |
2426 | unsigned int len = m_store_info.length (), last = first; | |
2427 | unsigned HOST_WIDE_INT width = m_store_info[first]->bitsize; | |
2428 | if (width >= try_size) | |
2429 | return false; | |
2430 | for (unsigned int i = first + 1; i < len; ++i) | |
2431 | { | |
2432 | if (m_store_info[i]->bitpos != m_store_info[first]->bitpos + width | |
2433 | || m_store_info[i]->ins_stmt == NULL) | |
2434 | return false; | |
2435 | width += m_store_info[i]->bitsize; | |
2436 | if (width >= try_size) | |
2437 | { | |
2438 | last = i; | |
2439 | break; | |
2440 | } | |
2441 | } | |
2442 | if (width != try_size) | |
2443 | return false; | |
2444 | ||
2445 | bool allow_unaligned | |
2446 | = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED); | |
2447 | /* Punt if the combined store would not be aligned and we need alignment. */ | |
2448 | if (!allow_unaligned) | |
2449 | { | |
2450 | unsigned int align = merged_store->align; | |
2451 | unsigned HOST_WIDE_INT align_base = merged_store->align_base; | |
2452 | for (unsigned int i = first + 1; i <= last; ++i) | |
2453 | { | |
2454 | unsigned int this_align; | |
2455 | unsigned HOST_WIDE_INT align_bitpos = 0; | |
2456 | get_object_alignment_1 (gimple_assign_lhs (m_store_info[i]->stmt), | |
2457 | &this_align, &align_bitpos); | |
2458 | if (this_align > align) | |
2459 | { | |
2460 | align = this_align; | |
2461 | align_base = m_store_info[i]->bitpos - align_bitpos; | |
2462 | } | |
2463 | } | |
2464 | unsigned HOST_WIDE_INT align_bitpos | |
2465 | = (m_store_info[first]->bitpos - align_base) & (align - 1); | |
2466 | if (align_bitpos) | |
2467 | align = least_bit_hwi (align_bitpos); | |
2468 | if (align < try_size) | |
2469 | return false; | |
2470 | } | |
2471 | ||
2472 | tree type; | |
2473 | switch (try_size) | |
2474 | { | |
2475 | case 16: type = uint16_type_node; break; | |
2476 | case 32: type = uint32_type_node; break; | |
2477 | case 64: type = uint64_type_node; break; | |
2478 | default: gcc_unreachable (); | |
2479 | } | |
2480 | struct symbolic_number n; | |
2481 | gimple *ins_stmt = NULL; | |
2482 | int vuse_store = -1; | |
2483 | unsigned int first_order = merged_store->first_order; | |
2484 | unsigned int last_order = merged_store->last_order; | |
2485 | gimple *first_stmt = merged_store->first_stmt; | |
2486 | gimple *last_stmt = merged_store->last_stmt; | |
cf5e422b | 2487 | unsigned HOST_WIDE_INT end = merged_store->start + merged_store->width; |
509ab8cd | 2488 | store_immediate_info *infof = m_store_info[first]; |
2489 | ||
2490 | for (unsigned int i = first; i <= last; ++i) | |
2491 | { | |
2492 | store_immediate_info *info = m_store_info[i]; | |
2493 | struct symbolic_number this_n = info->n; | |
2494 | this_n.type = type; | |
2495 | if (!this_n.base_addr) | |
2496 | this_n.range = try_size / BITS_PER_UNIT; | |
58cff6a2 | 2497 | else |
2498 | /* Update vuse in case it has changed by output_merged_stores. */ | |
2499 | this_n.vuse = gimple_vuse (info->ins_stmt); | |
509ab8cd | 2500 | unsigned int bitpos = info->bitpos - infof->bitpos; |
2501 | if (!do_shift_rotate (LSHIFT_EXPR, &this_n, | |
2502 | BYTES_BIG_ENDIAN | |
2503 | ? try_size - info->bitsize - bitpos | |
2504 | : bitpos)) | |
2505 | return false; | |
1636105f | 2506 | if (this_n.base_addr && vuse_store) |
509ab8cd | 2507 | { |
2508 | unsigned int j; | |
2509 | for (j = first; j <= last; ++j) | |
2510 | if (this_n.vuse == gimple_vuse (m_store_info[j]->stmt)) | |
2511 | break; | |
2512 | if (j > last) | |
2513 | { | |
2514 | if (vuse_store == 1) | |
2515 | return false; | |
2516 | vuse_store = 0; | |
2517 | } | |
2518 | } | |
2519 | if (i == first) | |
2520 | { | |
2521 | n = this_n; | |
2522 | ins_stmt = info->ins_stmt; | |
2523 | } | |
2524 | else | |
2525 | { | |
cf5e422b | 2526 | if (n.base_addr && n.vuse != this_n.vuse) |
509ab8cd | 2527 | { |
cf5e422b | 2528 | if (vuse_store == 0) |
2529 | return false; | |
2530 | vuse_store = 1; | |
509ab8cd | 2531 | } |
cf5e422b | 2532 | if (info->order > last_order) |
2533 | { | |
2534 | last_order = info->order; | |
2535 | last_stmt = info->stmt; | |
2536 | } | |
2537 | else if (info->order < first_order) | |
2538 | { | |
2539 | first_order = info->order; | |
2540 | first_stmt = info->stmt; | |
2541 | } | |
2542 | end = MAX (end, info->bitpos + info->bitsize); | |
509ab8cd | 2543 | |
2544 | ins_stmt = perform_symbolic_merge (ins_stmt, &n, info->ins_stmt, | |
2545 | &this_n, &n); | |
2546 | if (ins_stmt == NULL) | |
2547 | return false; | |
2548 | } | |
2549 | } | |
2550 | ||
2551 | uint64_t cmpxchg, cmpnop; | |
2552 | find_bswap_or_nop_finalize (&n, &cmpxchg, &cmpnop); | |
2553 | ||
2554 | /* A complete byte swap should make the symbolic number to start with | |
2555 | the largest digit in the highest order byte. Unchanged symbolic | |
2556 | number indicates a read with same endianness as target architecture. */ | |
2557 | if (n.n != cmpnop && n.n != cmpxchg) | |
2558 | return false; | |
2559 | ||
2560 | if (n.base_addr == NULL_TREE && !is_gimple_val (n.src)) | |
2561 | return false; | |
2562 | ||
cf5e422b | 2563 | if (!check_no_overlap (m_store_info, last, LROTATE_EXPR, last_order, end)) |
2564 | return false; | |
2565 | ||
509ab8cd | 2566 | /* Don't handle memory copy this way if normal non-bswap processing |
2567 | would handle it too. */ | |
2568 | if (n.n == cmpnop && (unsigned) n.n_ops == last - first + 1) | |
2569 | { | |
2570 | unsigned int i; | |
2571 | for (i = first; i <= last; ++i) | |
2572 | if (m_store_info[i]->rhs_code != MEM_REF) | |
2573 | break; | |
2574 | if (i == last + 1) | |
2575 | return false; | |
2576 | } | |
2577 | ||
2578 | if (n.n == cmpxchg) | |
2579 | switch (try_size) | |
2580 | { | |
2581 | case 16: | |
2582 | /* Will emit LROTATE_EXPR. */ | |
2583 | break; | |
2584 | case 32: | |
2585 | if (builtin_decl_explicit_p (BUILT_IN_BSWAP32) | |
2586 | && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing) | |
2587 | break; | |
2588 | return false; | |
2589 | case 64: | |
2590 | if (builtin_decl_explicit_p (BUILT_IN_BSWAP64) | |
2591 | && optab_handler (bswap_optab, DImode) != CODE_FOR_nothing) | |
2592 | break; | |
2593 | return false; | |
2594 | default: | |
2595 | gcc_unreachable (); | |
2596 | } | |
2597 | ||
2598 | if (!allow_unaligned && n.base_addr) | |
2599 | { | |
2600 | unsigned int align = get_object_alignment (n.src); | |
2601 | if (align < try_size) | |
2602 | return false; | |
2603 | } | |
2604 | ||
2605 | /* If each load has vuse of the corresponding store, need to verify | |
2606 | the loads can be sunk right before the last store. */ | |
2607 | if (vuse_store == 1) | |
2608 | { | |
2609 | auto_vec<tree, 64> refs; | |
2610 | for (unsigned int i = first; i <= last; ++i) | |
2611 | gather_bswap_load_refs (&refs, | |
2612 | gimple_assign_rhs1 (m_store_info[i]->stmt)); | |
2613 | ||
2614 | unsigned int i; | |
2615 | tree ref; | |
2616 | FOR_EACH_VEC_ELT (refs, i, ref) | |
2617 | if (stmts_may_clobber_ref_p (first_stmt, last_stmt, ref)) | |
2618 | return false; | |
2619 | n.vuse = NULL_TREE; | |
2620 | } | |
2621 | ||
2622 | infof->n = n; | |
2623 | infof->ins_stmt = ins_stmt; | |
2624 | for (unsigned int i = first; i <= last; ++i) | |
2625 | { | |
2626 | m_store_info[i]->rhs_code = n.n == cmpxchg ? LROTATE_EXPR : NOP_EXPR; | |
2627 | m_store_info[i]->ops[0].base_addr = NULL_TREE; | |
2628 | m_store_info[i]->ops[1].base_addr = NULL_TREE; | |
2629 | if (i != first) | |
2630 | merged_store->merge_into (m_store_info[i]); | |
2631 | } | |
2632 | ||
2633 | return true; | |
2634 | } | |
2635 | ||
3d3e04ac | 2636 | /* Go through the candidate stores recorded in m_store_info and merge them |
2637 | into merged_store_group objects recorded into m_merged_store_groups | |
2638 | representing the widened stores. Return true if coalescing was successful | |
2639 | and the number of widened stores is fewer than the original number | |
2640 | of stores. */ | |
2641 | ||
2642 | bool | |
2643 | imm_store_chain_info::coalesce_immediate_stores () | |
2644 | { | |
2645 | /* Anything less can't be processed. */ | |
2646 | if (m_store_info.length () < 2) | |
2647 | return false; | |
2648 | ||
2649 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
10f0d48d | 2650 | fprintf (dump_file, "Attempting to coalesce %u stores in chain\n", |
3d3e04ac | 2651 | m_store_info.length ()); |
2652 | ||
2653 | store_immediate_info *info; | |
509ab8cd | 2654 | unsigned int i, ignore = 0; |
3d3e04ac | 2655 | |
2656 | /* Order the stores by the bitposition they write to. */ | |
2657 | m_store_info.qsort (sort_by_bitpos); | |
2658 | ||
2659 | info = m_store_info[0]; | |
2660 | merged_store_group *merged_store = new merged_store_group (info); | |
10f0d48d | 2661 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2662 | fputs ("New store group\n", dump_file); | |
3d3e04ac | 2663 | |
2664 | FOR_EACH_VEC_ELT (m_store_info, i, info) | |
2665 | { | |
509ab8cd | 2666 | if (i <= ignore) |
10f0d48d | 2667 | goto done; |
3d3e04ac | 2668 | |
509ab8cd | 2669 | /* First try to handle group of stores like: |
2670 | p[0] = data >> 24; | |
2671 | p[1] = data >> 16; | |
2672 | p[2] = data >> 8; | |
2673 | p[3] = data; | |
2674 | using the bswap framework. */ | |
2675 | if (info->bitpos == merged_store->start + merged_store->width | |
2676 | && merged_store->stores.length () == 1 | |
2677 | && merged_store->stores[0]->ins_stmt != NULL | |
2678 | && info->ins_stmt != NULL) | |
2679 | { | |
2680 | unsigned int try_size; | |
2681 | for (try_size = 64; try_size >= 16; try_size >>= 1) | |
2682 | if (try_coalesce_bswap (merged_store, i - 1, try_size)) | |
2683 | break; | |
2684 | ||
2685 | if (try_size >= 16) | |
2686 | { | |
2687 | ignore = i + merged_store->stores.length () - 1; | |
2688 | m_merged_store_groups.safe_push (merged_store); | |
2689 | if (ignore < m_store_info.length ()) | |
2690 | merged_store = new merged_store_group (m_store_info[ignore]); | |
2691 | else | |
2692 | merged_store = NULL; | |
10f0d48d | 2693 | goto done; |
509ab8cd | 2694 | } |
2695 | } | |
2696 | ||
3d3e04ac | 2697 | /* |---store 1---| |
2698 | |---store 2---| | |
509ab8cd | 2699 | Overlapping stores. */ |
2700 | if (IN_RANGE (info->bitpos, merged_store->start, | |
3d3e04ac | 2701 | merged_store->start + merged_store->width - 1)) |
2702 | { | |
9991d1d3 | 2703 | /* Only allow overlapping stores of constants. */ |
2d81e760 | 2704 | if (info->rhs_code == INTEGER_CST) |
9991d1d3 | 2705 | { |
2d81e760 | 2706 | bool only_constants = true; |
2707 | store_immediate_info *infoj; | |
2708 | unsigned int j; | |
2709 | FOR_EACH_VEC_ELT (merged_store->stores, j, infoj) | |
2710 | if (infoj->rhs_code != INTEGER_CST) | |
2711 | { | |
2712 | only_constants = false; | |
2713 | break; | |
2714 | } | |
a1cf1dfe | 2715 | unsigned int last_order |
2716 | = MAX (merged_store->last_order, info->order); | |
2717 | unsigned HOST_WIDE_INT end | |
2718 | = MAX (merged_store->start + merged_store->width, | |
2719 | info->bitpos + info->bitsize); | |
2d81e760 | 2720 | if (only_constants |
2721 | && check_no_overlap (m_store_info, i, INTEGER_CST, | |
2722 | last_order, end)) | |
a1cf1dfe | 2723 | { |
2724 | /* check_no_overlap call above made sure there are no | |
2725 | overlapping stores with non-INTEGER_CST rhs_code | |
2726 | in between the first and last of the stores we've | |
2727 | just merged. If there are any INTEGER_CST rhs_code | |
2728 | stores in between, we need to merge_overlapping them | |
2729 | even if in the sort_by_bitpos order there are other | |
2730 | overlapping stores in between. Keep those stores as is. | |
2731 | Example: | |
2732 | MEM[(int *)p_28] = 0; | |
2733 | MEM[(char *)p_28 + 3B] = 1; | |
2734 | MEM[(char *)p_28 + 1B] = 2; | |
2735 | MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B]; | |
2736 | We can't merge the zero store with the store of two and | |
2737 | not merge anything else, because the store of one is | |
2738 | in the original order in between those two, but in | |
2739 | store_by_bitpos order it comes after the last store that | |
2740 | we can't merge with them. We can merge the first 3 stores | |
2741 | and keep the last store as is though. */ | |
2742 | unsigned int len = m_store_info.length (), k = i; | |
2743 | for (unsigned int j = i + 1; j < len; ++j) | |
2744 | { | |
2745 | store_immediate_info *info2 = m_store_info[j]; | |
2746 | if (info2->bitpos >= end) | |
2747 | break; | |
2748 | if (info2->order < last_order) | |
2749 | { | |
2750 | if (info2->rhs_code != INTEGER_CST) | |
2751 | { | |
2752 | /* Normally check_no_overlap makes sure this | |
2753 | doesn't happen, but if end grows below, then | |
2754 | we need to process more stores than | |
2755 | check_no_overlap verified. Example: | |
2756 | MEM[(int *)p_5] = 0; | |
2757 | MEM[(short *)p_5 + 3B] = 1; | |
2758 | MEM[(char *)p_5 + 4B] = _9; | |
2759 | MEM[(char *)p_5 + 2B] = 2; */ | |
2760 | k = 0; | |
2761 | break; | |
2762 | } | |
2763 | k = j; | |
2764 | end = MAX (end, info2->bitpos + info2->bitsize); | |
2765 | } | |
2766 | } | |
2767 | ||
2768 | if (k != 0) | |
2769 | { | |
2770 | merged_store->merge_overlapping (info); | |
2771 | ||
2772 | for (unsigned int j = i + 1; j <= k; j++) | |
2773 | { | |
2774 | store_immediate_info *info2 = m_store_info[j]; | |
2775 | gcc_assert (info2->bitpos < end); | |
2776 | if (info2->order < last_order) | |
2777 | { | |
2778 | gcc_assert (info2->rhs_code == INTEGER_CST); | |
2779 | merged_store->merge_overlapping (info2); | |
2780 | } | |
2781 | /* Other stores are kept and not merged in any | |
2782 | way. */ | |
2783 | } | |
2784 | ignore = k; | |
2785 | goto done; | |
2786 | } | |
2787 | } | |
9991d1d3 | 2788 | } |
3d3e04ac | 2789 | } |
9991d1d3 | 2790 | /* |---store 1---||---store 2---| |
2791 | This store is consecutive to the previous one. | |
2792 | Merge it into the current store group. There can be gaps in between | |
2793 | the stores, but there can't be gaps in between bitregions. */ | |
10f0d48d | 2794 | else if (info->bitregion_start <= merged_store->bitregion_end |
f9ceb302 | 2795 | && merged_store->can_be_merged_into (info)) |
3d3e04ac | 2796 | { |
9991d1d3 | 2797 | store_immediate_info *infof = merged_store->stores[0]; |
2798 | ||
2799 | /* All the rhs_code ops that take 2 operands are commutative, | |
2800 | swap the operands if it could make the operands compatible. */ | |
2801 | if (infof->ops[0].base_addr | |
2802 | && infof->ops[1].base_addr | |
2803 | && info->ops[0].base_addr | |
2804 | && info->ops[1].base_addr | |
e61263f2 | 2805 | && known_eq (info->ops[1].bitpos - infof->ops[0].bitpos, |
2806 | info->bitpos - infof->bitpos) | |
9991d1d3 | 2807 | && operand_equal_p (info->ops[1].base_addr, |
2808 | infof->ops[0].base_addr, 0)) | |
aa0a1d29 | 2809 | { |
2810 | std::swap (info->ops[0], info->ops[1]); | |
2811 | info->ops_swapped_p = true; | |
2812 | } | |
f9ceb302 | 2813 | if (check_no_overlap (m_store_info, i, info->rhs_code, |
c7b16284 | 2814 | MAX (merged_store->last_order, info->order), |
2815 | MAX (merged_store->start + merged_store->width, | |
f9ceb302 | 2816 | info->bitpos + info->bitsize))) |
9991d1d3 | 2817 | { |
f9ceb302 | 2818 | /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */ |
2819 | if (info->rhs_code == MEM_REF && infof->rhs_code != MEM_REF) | |
2820 | { | |
2821 | info->rhs_code = BIT_INSERT_EXPR; | |
2822 | info->ops[0].val = gimple_assign_rhs1 (info->stmt); | |
2823 | info->ops[0].base_addr = NULL_TREE; | |
2824 | } | |
2825 | else if (infof->rhs_code == MEM_REF && info->rhs_code != MEM_REF) | |
2826 | { | |
2827 | store_immediate_info *infoj; | |
2828 | unsigned int j; | |
2829 | FOR_EACH_VEC_ELT (merged_store->stores, j, infoj) | |
2830 | { | |
2831 | infoj->rhs_code = BIT_INSERT_EXPR; | |
2832 | infoj->ops[0].val = gimple_assign_rhs1 (infoj->stmt); | |
2833 | infoj->ops[0].base_addr = NULL_TREE; | |
2834 | } | |
2835 | } | |
2836 | if ((infof->ops[0].base_addr | |
2837 | ? compatible_load_p (merged_store, info, base_addr, 0) | |
2838 | : !info->ops[0].base_addr) | |
2839 | && (infof->ops[1].base_addr | |
2840 | ? compatible_load_p (merged_store, info, base_addr, 1) | |
2841 | : !info->ops[1].base_addr)) | |
2842 | { | |
2843 | merged_store->merge_into (info); | |
2844 | goto done; | |
2845 | } | |
9991d1d3 | 2846 | } |
2847 | } | |
3d3e04ac | 2848 | |
9991d1d3 | 2849 | /* |---store 1---| <gap> |---store 2---|. |
2850 | Gap between stores or the rhs not compatible. Start a new group. */ | |
3d3e04ac | 2851 | |
9991d1d3 | 2852 | /* Try to apply all the stores recorded for the group to determine |
2853 | the bitpattern they write and discard it if that fails. | |
2854 | This will also reject single-store groups. */ | |
10f0d48d | 2855 | if (merged_store->apply_stores ()) |
9991d1d3 | 2856 | m_merged_store_groups.safe_push (merged_store); |
10f0d48d | 2857 | else |
2858 | delete merged_store; | |
3d3e04ac | 2859 | |
9991d1d3 | 2860 | merged_store = new merged_store_group (info); |
10f0d48d | 2861 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2862 | fputs ("New store group\n", dump_file); | |
2863 | ||
2864 | done: | |
2865 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2866 | { | |
2867 | fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC | |
2868 | " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:", | |
2869 | i, info->bitsize, info->bitpos); | |
2870 | print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt)); | |
2871 | fputc ('\n', dump_file); | |
2872 | } | |
3d3e04ac | 2873 | } |
2874 | ||
902cb3b7 | 2875 | /* Record or discard the last store group. */ |
509ab8cd | 2876 | if (merged_store) |
2877 | { | |
10f0d48d | 2878 | if (merged_store->apply_stores ()) |
509ab8cd | 2879 | m_merged_store_groups.safe_push (merged_store); |
10f0d48d | 2880 | else |
2881 | delete merged_store; | |
509ab8cd | 2882 | } |
3d3e04ac | 2883 | |
2884 | gcc_assert (m_merged_store_groups.length () <= m_store_info.length ()); | |
10f0d48d | 2885 | |
3d3e04ac | 2886 | bool success |
2887 | = !m_merged_store_groups.is_empty () | |
2888 | && m_merged_store_groups.length () < m_store_info.length (); | |
2889 | ||
2890 | if (success && dump_file) | |
10f0d48d | 2891 | fprintf (dump_file, "Coalescing successful!\nMerged into %u stores\n", |
902cb3b7 | 2892 | m_merged_store_groups.length ()); |
3d3e04ac | 2893 | |
2894 | return success; | |
2895 | } | |
2896 | ||
9991d1d3 | 2897 | /* Return the type to use for the merged stores or loads described by STMTS. |
2898 | This is needed to get the alias sets right. If IS_LOAD, look for rhs, | |
2899 | otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_* | |
2900 | of the MEM_REFs if any. */ | |
3d3e04ac | 2901 | |
2902 | static tree | |
9991d1d3 | 2903 | get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load, |
2904 | unsigned short *cliquep, unsigned short *basep) | |
3d3e04ac | 2905 | { |
2906 | gimple *stmt; | |
2907 | unsigned int i; | |
9991d1d3 | 2908 | tree type = NULL_TREE; |
2909 | tree ret = NULL_TREE; | |
2910 | *cliquep = 0; | |
2911 | *basep = 0; | |
3d3e04ac | 2912 | |
2913 | FOR_EACH_VEC_ELT (stmts, i, stmt) | |
2914 | { | |
9991d1d3 | 2915 | tree ref = is_load ? gimple_assign_rhs1 (stmt) |
2916 | : gimple_assign_lhs (stmt); | |
2917 | tree type1 = reference_alias_ptr_type (ref); | |
2918 | tree base = get_base_address (ref); | |
3d3e04ac | 2919 | |
9991d1d3 | 2920 | if (i == 0) |
2921 | { | |
2922 | if (TREE_CODE (base) == MEM_REF) | |
2923 | { | |
2924 | *cliquep = MR_DEPENDENCE_CLIQUE (base); | |
2925 | *basep = MR_DEPENDENCE_BASE (base); | |
2926 | } | |
2927 | ret = type = type1; | |
2928 | continue; | |
2929 | } | |
3d3e04ac | 2930 | if (!alias_ptr_types_compatible_p (type, type1)) |
9991d1d3 | 2931 | ret = ptr_type_node; |
2932 | if (TREE_CODE (base) != MEM_REF | |
2933 | || *cliquep != MR_DEPENDENCE_CLIQUE (base) | |
2934 | || *basep != MR_DEPENDENCE_BASE (base)) | |
2935 | { | |
2936 | *cliquep = 0; | |
2937 | *basep = 0; | |
2938 | } | |
3d3e04ac | 2939 | } |
9991d1d3 | 2940 | return ret; |
3d3e04ac | 2941 | } |
2942 | ||
2943 | /* Return the location_t information we can find among the statements | |
2944 | in STMTS. */ | |
2945 | ||
2946 | static location_t | |
9991d1d3 | 2947 | get_location_for_stmts (vec<gimple *> &stmts) |
3d3e04ac | 2948 | { |
2949 | gimple *stmt; | |
2950 | unsigned int i; | |
2951 | ||
2952 | FOR_EACH_VEC_ELT (stmts, i, stmt) | |
2953 | if (gimple_has_location (stmt)) | |
2954 | return gimple_location (stmt); | |
2955 | ||
2956 | return UNKNOWN_LOCATION; | |
2957 | } | |
2958 | ||
2959 | /* Used to decribe a store resulting from splitting a wide store in smaller | |
2960 | regularly-sized stores in split_group. */ | |
2961 | ||
2962 | struct split_store | |
2963 | { | |
2964 | unsigned HOST_WIDE_INT bytepos; | |
2965 | unsigned HOST_WIDE_INT size; | |
2966 | unsigned HOST_WIDE_INT align; | |
9991d1d3 | 2967 | auto_vec<store_immediate_info *> orig_stores; |
902cb3b7 | 2968 | /* True if there is a single orig stmt covering the whole split store. */ |
2969 | bool orig; | |
3d3e04ac | 2970 | split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, |
2971 | unsigned HOST_WIDE_INT); | |
2972 | }; | |
2973 | ||
2974 | /* Simple constructor. */ | |
2975 | ||
2976 | split_store::split_store (unsigned HOST_WIDE_INT bp, | |
2977 | unsigned HOST_WIDE_INT sz, | |
2978 | unsigned HOST_WIDE_INT al) | |
902cb3b7 | 2979 | : bytepos (bp), size (sz), align (al), orig (false) |
3d3e04ac | 2980 | { |
9991d1d3 | 2981 | orig_stores.create (0); |
3d3e04ac | 2982 | } |
2983 | ||
9991d1d3 | 2984 | /* Record all stores in GROUP that write to the region starting at BITPOS and |
2985 | is of size BITSIZE. Record infos for such statements in STORES if | |
2986 | non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO | |
2987 | if there is exactly one original store in the range. */ | |
3d3e04ac | 2988 | |
902cb3b7 | 2989 | static store_immediate_info * |
9991d1d3 | 2990 | find_constituent_stores (struct merged_store_group *group, |
2991 | vec<store_immediate_info *> *stores, | |
2992 | unsigned int *first, | |
2993 | unsigned HOST_WIDE_INT bitpos, | |
2994 | unsigned HOST_WIDE_INT bitsize) | |
3d3e04ac | 2995 | { |
902cb3b7 | 2996 | store_immediate_info *info, *ret = NULL; |
3d3e04ac | 2997 | unsigned int i; |
902cb3b7 | 2998 | bool second = false; |
2999 | bool update_first = true; | |
3d3e04ac | 3000 | unsigned HOST_WIDE_INT end = bitpos + bitsize; |
902cb3b7 | 3001 | for (i = *first; group->stores.iterate (i, &info); ++i) |
3d3e04ac | 3002 | { |
3003 | unsigned HOST_WIDE_INT stmt_start = info->bitpos; | |
3004 | unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize; | |
902cb3b7 | 3005 | if (stmt_end <= bitpos) |
3006 | { | |
3007 | /* BITPOS passed to this function never decreases from within the | |
3008 | same split_group call, so optimize and don't scan info records | |
3009 | which are known to end before or at BITPOS next time. | |
3010 | Only do it if all stores before this one also pass this. */ | |
3011 | if (update_first) | |
3012 | *first = i + 1; | |
3013 | continue; | |
3014 | } | |
3015 | else | |
3016 | update_first = false; | |
3017 | ||
3d3e04ac | 3018 | /* The stores in GROUP are ordered by bitposition so if we're past |
902cb3b7 | 3019 | the region for this group return early. */ |
3020 | if (stmt_start >= end) | |
3021 | return ret; | |
3022 | ||
9991d1d3 | 3023 | if (stores) |
902cb3b7 | 3024 | { |
9991d1d3 | 3025 | stores->safe_push (info); |
902cb3b7 | 3026 | if (ret) |
3027 | { | |
3028 | ret = NULL; | |
3029 | second = true; | |
3030 | } | |
3031 | } | |
3032 | else if (ret) | |
3033 | return NULL; | |
3034 | if (!second) | |
3035 | ret = info; | |
3d3e04ac | 3036 | } |
902cb3b7 | 3037 | return ret; |
3d3e04ac | 3038 | } |
3039 | ||
871a91ec | 3040 | /* Return how many SSA_NAMEs used to compute value to store in the INFO |
3041 | store have multiple uses. If any SSA_NAME has multiple uses, also | |
3042 | count statements needed to compute it. */ | |
3043 | ||
3044 | static unsigned | |
3045 | count_multiple_uses (store_immediate_info *info) | |
3046 | { | |
3047 | gimple *stmt = info->stmt; | |
3048 | unsigned ret = 0; | |
3049 | switch (info->rhs_code) | |
3050 | { | |
3051 | case INTEGER_CST: | |
3052 | return 0; | |
3053 | case BIT_AND_EXPR: | |
3054 | case BIT_IOR_EXPR: | |
3055 | case BIT_XOR_EXPR: | |
832a73b9 | 3056 | if (info->bit_not_p) |
3057 | { | |
3058 | if (!has_single_use (gimple_assign_rhs1 (stmt))) | |
3059 | ret = 1; /* Fall through below to return | |
3060 | the BIT_NOT_EXPR stmt and then | |
3061 | BIT_{AND,IOR,XOR}_EXPR and anything it | |
3062 | uses. */ | |
3063 | else | |
3064 | /* stmt is after this the BIT_NOT_EXPR. */ | |
3065 | stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
3066 | } | |
871a91ec | 3067 | if (!has_single_use (gimple_assign_rhs1 (stmt))) |
3068 | { | |
3069 | ret += 1 + info->ops[0].bit_not_p; | |
3070 | if (info->ops[1].base_addr) | |
3071 | ret += 1 + info->ops[1].bit_not_p; | |
3072 | return ret + 1; | |
3073 | } | |
3074 | stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
3075 | /* stmt is now the BIT_*_EXPR. */ | |
3076 | if (!has_single_use (gimple_assign_rhs1 (stmt))) | |
aa0a1d29 | 3077 | ret += 1 + info->ops[info->ops_swapped_p].bit_not_p; |
3078 | else if (info->ops[info->ops_swapped_p].bit_not_p) | |
871a91ec | 3079 | { |
3080 | gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
3081 | if (!has_single_use (gimple_assign_rhs1 (stmt2))) | |
3082 | ++ret; | |
3083 | } | |
3084 | if (info->ops[1].base_addr == NULL_TREE) | |
aa0a1d29 | 3085 | { |
3086 | gcc_checking_assert (!info->ops_swapped_p); | |
3087 | return ret; | |
3088 | } | |
871a91ec | 3089 | if (!has_single_use (gimple_assign_rhs2 (stmt))) |
aa0a1d29 | 3090 | ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p; |
3091 | else if (info->ops[1 - info->ops_swapped_p].bit_not_p) | |
871a91ec | 3092 | { |
3093 | gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt)); | |
3094 | if (!has_single_use (gimple_assign_rhs1 (stmt2))) | |
3095 | ++ret; | |
3096 | } | |
3097 | return ret; | |
3098 | case MEM_REF: | |
3099 | if (!has_single_use (gimple_assign_rhs1 (stmt))) | |
3100 | return 1 + info->ops[0].bit_not_p; | |
3101 | else if (info->ops[0].bit_not_p) | |
3102 | { | |
3103 | stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
3104 | if (!has_single_use (gimple_assign_rhs1 (stmt))) | |
3105 | return 1; | |
3106 | } | |
3107 | return 0; | |
10f0d48d | 3108 | case BIT_INSERT_EXPR: |
3109 | return has_single_use (gimple_assign_rhs1 (stmt)) ? 0 : 1; | |
871a91ec | 3110 | default: |
3111 | gcc_unreachable (); | |
3112 | } | |
3113 | } | |
3114 | ||
3d3e04ac | 3115 | /* Split a merged store described by GROUP by populating the SPLIT_STORES |
902cb3b7 | 3116 | vector (if non-NULL) with split_store structs describing the byte offset |
3117 | (from the base), the bit size and alignment of each store as well as the | |
3118 | original statements involved in each such split group. | |
3d3e04ac | 3119 | This is to separate the splitting strategy from the statement |
3120 | building/emission/linking done in output_merged_store. | |
902cb3b7 | 3121 | Return number of new stores. |
9991d1d3 | 3122 | If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned. |
3123 | If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned. | |
902cb3b7 | 3124 | If SPLIT_STORES is NULL, it is just a dry run to count number of |
3125 | new stores. */ | |
3d3e04ac | 3126 | |
902cb3b7 | 3127 | static unsigned int |
9991d1d3 | 3128 | split_group (merged_store_group *group, bool allow_unaligned_store, |
3129 | bool allow_unaligned_load, | |
871a91ec | 3130 | vec<struct split_store *> *split_stores, |
3131 | unsigned *total_orig, | |
3132 | unsigned *total_new) | |
3d3e04ac | 3133 | { |
902cb3b7 | 3134 | unsigned HOST_WIDE_INT pos = group->bitregion_start; |
3135 | unsigned HOST_WIDE_INT size = group->bitregion_end - pos; | |
3d3e04ac | 3136 | unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT; |
902cb3b7 | 3137 | unsigned HOST_WIDE_INT group_align = group->align; |
3138 | unsigned HOST_WIDE_INT align_base = group->align_base; | |
9991d1d3 | 3139 | unsigned HOST_WIDE_INT group_load_align = group_align; |
871a91ec | 3140 | bool any_orig = false; |
3d3e04ac | 3141 | |
3d3e04ac | 3142 | gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0)); |
3143 | ||
509ab8cd | 3144 | if (group->stores[0]->rhs_code == LROTATE_EXPR |
3145 | || group->stores[0]->rhs_code == NOP_EXPR) | |
3146 | { | |
3147 | /* For bswap framework using sets of stores, all the checking | |
3148 | has been done earlier in try_coalesce_bswap and needs to be | |
3149 | emitted as a single store. */ | |
3150 | if (total_orig) | |
3151 | { | |
3152 | /* Avoid the old/new stmt count heuristics. It should be | |
3153 | always beneficial. */ | |
3154 | total_new[0] = 1; | |
3155 | total_orig[0] = 2; | |
3156 | } | |
3157 | ||
3158 | if (split_stores) | |
3159 | { | |
3160 | unsigned HOST_WIDE_INT align_bitpos | |
3161 | = (group->start - align_base) & (group_align - 1); | |
3162 | unsigned HOST_WIDE_INT align = group_align; | |
3163 | if (align_bitpos) | |
3164 | align = least_bit_hwi (align_bitpos); | |
3165 | bytepos = group->start / BITS_PER_UNIT; | |
3166 | struct split_store *store | |
3167 | = new split_store (bytepos, group->width, align); | |
3168 | unsigned int first = 0; | |
3169 | find_constituent_stores (group, &store->orig_stores, | |
3170 | &first, group->start, group->width); | |
3171 | split_stores->safe_push (store); | |
3172 | } | |
3173 | ||
3174 | return 1; | |
3175 | } | |
3176 | ||
902cb3b7 | 3177 | unsigned int ret = 0, first = 0; |
3d3e04ac | 3178 | unsigned HOST_WIDE_INT try_pos = bytepos; |
3d3e04ac | 3179 | |
871a91ec | 3180 | if (total_orig) |
3181 | { | |
3182 | unsigned int i; | |
3183 | store_immediate_info *info = group->stores[0]; | |
3184 | ||
3185 | total_new[0] = 0; | |
3186 | total_orig[0] = 1; /* The orig store. */ | |
3187 | info = group->stores[0]; | |
3188 | if (info->ops[0].base_addr) | |
9deedf62 | 3189 | total_orig[0]++; |
871a91ec | 3190 | if (info->ops[1].base_addr) |
9deedf62 | 3191 | total_orig[0]++; |
871a91ec | 3192 | switch (info->rhs_code) |
3193 | { | |
3194 | case BIT_AND_EXPR: | |
3195 | case BIT_IOR_EXPR: | |
3196 | case BIT_XOR_EXPR: | |
3197 | total_orig[0]++; /* The orig BIT_*_EXPR stmt. */ | |
3198 | break; | |
3199 | default: | |
3200 | break; | |
3201 | } | |
3202 | total_orig[0] *= group->stores.length (); | |
3203 | ||
3204 | FOR_EACH_VEC_ELT (group->stores, i, info) | |
9deedf62 | 3205 | { |
3206 | total_new[0] += count_multiple_uses (info); | |
3207 | total_orig[0] += (info->bit_not_p | |
3208 | + info->ops[0].bit_not_p | |
3209 | + info->ops[1].bit_not_p); | |
3210 | } | |
871a91ec | 3211 | } |
3212 | ||
9991d1d3 | 3213 | if (!allow_unaligned_load) |
3214 | for (int i = 0; i < 2; ++i) | |
3215 | if (group->load_align[i]) | |
3216 | group_load_align = MIN (group_load_align, group->load_align[i]); | |
3217 | ||
3d3e04ac | 3218 | while (size > 0) |
3219 | { | |
9991d1d3 | 3220 | if ((allow_unaligned_store || group_align <= BITS_PER_UNIT) |
902cb3b7 | 3221 | && group->mask[try_pos - bytepos] == (unsigned char) ~0U) |
3222 | { | |
3223 | /* Skip padding bytes. */ | |
3224 | ++try_pos; | |
3225 | size -= BITS_PER_UNIT; | |
3226 | continue; | |
3227 | } | |
3228 | ||
3d3e04ac | 3229 | unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT; |
902cb3b7 | 3230 | unsigned int try_size = MAX_STORE_BITSIZE, nonmasked; |
3231 | unsigned HOST_WIDE_INT align_bitpos | |
3232 | = (try_bitpos - align_base) & (group_align - 1); | |
3233 | unsigned HOST_WIDE_INT align = group_align; | |
3234 | if (align_bitpos) | |
3235 | align = least_bit_hwi (align_bitpos); | |
9991d1d3 | 3236 | if (!allow_unaligned_store) |
902cb3b7 | 3237 | try_size = MIN (try_size, align); |
9991d1d3 | 3238 | if (!allow_unaligned_load) |
3239 | { | |
3240 | /* If we can't do or don't want to do unaligned stores | |
3241 | as well as loads, we need to take the loads into account | |
3242 | as well. */ | |
3243 | unsigned HOST_WIDE_INT load_align = group_load_align; | |
3244 | align_bitpos = (try_bitpos - align_base) & (load_align - 1); | |
3245 | if (align_bitpos) | |
3246 | load_align = least_bit_hwi (align_bitpos); | |
3247 | for (int i = 0; i < 2; ++i) | |
3248 | if (group->load_align[i]) | |
3249 | { | |
e61263f2 | 3250 | align_bitpos |
3251 | = known_alignment (try_bitpos | |
3252 | - group->stores[0]->bitpos | |
3253 | + group->stores[0]->ops[i].bitpos | |
3254 | - group->load_align_base[i]); | |
3255 | if (align_bitpos & (group_load_align - 1)) | |
9991d1d3 | 3256 | { |
3257 | unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos); | |
3258 | load_align = MIN (load_align, a); | |
3259 | } | |
3260 | } | |
3261 | try_size = MIN (try_size, load_align); | |
3262 | } | |
902cb3b7 | 3263 | store_immediate_info *info |
9991d1d3 | 3264 | = find_constituent_stores (group, NULL, &first, try_bitpos, try_size); |
902cb3b7 | 3265 | if (info) |
3266 | { | |
3267 | /* If there is just one original statement for the range, see if | |
3268 | we can just reuse the original store which could be even larger | |
3269 | than try_size. */ | |
3270 | unsigned HOST_WIDE_INT stmt_end | |
3271 | = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT); | |
9991d1d3 | 3272 | info = find_constituent_stores (group, NULL, &first, try_bitpos, |
3273 | stmt_end - try_bitpos); | |
902cb3b7 | 3274 | if (info && info->bitpos >= try_bitpos) |
3275 | { | |
3276 | try_size = stmt_end - try_bitpos; | |
3277 | goto found; | |
3278 | } | |
3279 | } | |
3d3e04ac | 3280 | |
902cb3b7 | 3281 | /* Approximate store bitsize for the case when there are no padding |
3282 | bits. */ | |
3283 | while (try_size > size) | |
3284 | try_size /= 2; | |
3285 | /* Now look for whole padding bytes at the end of that bitsize. */ | |
3286 | for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked) | |
3287 | if (group->mask[try_pos - bytepos + nonmasked - 1] | |
3288 | != (unsigned char) ~0U) | |
3289 | break; | |
3290 | if (nonmasked == 0) | |
3291 | { | |
3292 | /* If entire try_size range is padding, skip it. */ | |
3293 | try_pos += try_size / BITS_PER_UNIT; | |
3294 | size -= try_size; | |
3295 | continue; | |
3296 | } | |
3297 | /* Otherwise try to decrease try_size if second half, last 3 quarters | |
3298 | etc. are padding. */ | |
3299 | nonmasked *= BITS_PER_UNIT; | |
3300 | while (nonmasked <= try_size / 2) | |
3301 | try_size /= 2; | |
9991d1d3 | 3302 | if (!allow_unaligned_store && group_align > BITS_PER_UNIT) |
902cb3b7 | 3303 | { |
3304 | /* Now look for whole padding bytes at the start of that bitsize. */ | |
3305 | unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked; | |
3306 | for (masked = 0; masked < try_bytesize; ++masked) | |
3307 | if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U) | |
3308 | break; | |
3309 | masked *= BITS_PER_UNIT; | |
3310 | gcc_assert (masked < try_size); | |
3311 | if (masked >= try_size / 2) | |
3312 | { | |
3313 | while (masked >= try_size / 2) | |
3314 | { | |
3315 | try_size /= 2; | |
3316 | try_pos += try_size / BITS_PER_UNIT; | |
3317 | size -= try_size; | |
3318 | masked -= try_size; | |
3319 | } | |
3320 | /* Need to recompute the alignment, so just retry at the new | |
3321 | position. */ | |
3322 | continue; | |
3323 | } | |
3324 | } | |
3325 | ||
3326 | found: | |
3327 | ++ret; | |
3d3e04ac | 3328 | |
902cb3b7 | 3329 | if (split_stores) |
3330 | { | |
3331 | struct split_store *store | |
3332 | = new split_store (try_pos, try_size, align); | |
9991d1d3 | 3333 | info = find_constituent_stores (group, &store->orig_stores, |
3334 | &first, try_bitpos, try_size); | |
902cb3b7 | 3335 | if (info |
3336 | && info->bitpos >= try_bitpos | |
3337 | && info->bitpos + info->bitsize <= try_bitpos + try_size) | |
871a91ec | 3338 | { |
3339 | store->orig = true; | |
3340 | any_orig = true; | |
3341 | } | |
902cb3b7 | 3342 | split_stores->safe_push (store); |
3343 | } | |
3344 | ||
3345 | try_pos += try_size / BITS_PER_UNIT; | |
3d3e04ac | 3346 | size -= try_size; |
3d3e04ac | 3347 | } |
902cb3b7 | 3348 | |
871a91ec | 3349 | if (total_orig) |
3350 | { | |
9deedf62 | 3351 | unsigned int i; |
3352 | struct split_store *store; | |
871a91ec | 3353 | /* If we are reusing some original stores and any of the |
3354 | original SSA_NAMEs had multiple uses, we need to subtract | |
3355 | those now before we add the new ones. */ | |
3356 | if (total_new[0] && any_orig) | |
3357 | { | |
871a91ec | 3358 | FOR_EACH_VEC_ELT (*split_stores, i, store) |
3359 | if (store->orig) | |
3360 | total_new[0] -= count_multiple_uses (store->orig_stores[0]); | |
3361 | } | |
3362 | total_new[0] += ret; /* The new store. */ | |
3363 | store_immediate_info *info = group->stores[0]; | |
3364 | if (info->ops[0].base_addr) | |
9deedf62 | 3365 | total_new[0] += ret; |
871a91ec | 3366 | if (info->ops[1].base_addr) |
9deedf62 | 3367 | total_new[0] += ret; |
871a91ec | 3368 | switch (info->rhs_code) |
3369 | { | |
3370 | case BIT_AND_EXPR: | |
3371 | case BIT_IOR_EXPR: | |
3372 | case BIT_XOR_EXPR: | |
3373 | total_new[0] += ret; /* The new BIT_*_EXPR stmt. */ | |
3374 | break; | |
3375 | default: | |
3376 | break; | |
3377 | } | |
9deedf62 | 3378 | FOR_EACH_VEC_ELT (*split_stores, i, store) |
3379 | { | |
3380 | unsigned int j; | |
3381 | bool bit_not_p[3] = { false, false, false }; | |
3382 | /* If all orig_stores have certain bit_not_p set, then | |
3383 | we'd use a BIT_NOT_EXPR stmt and need to account for it. | |
3384 | If some orig_stores have certain bit_not_p set, then | |
3385 | we'd use a BIT_XOR_EXPR with a mask and need to account for | |
3386 | it. */ | |
3387 | FOR_EACH_VEC_ELT (store->orig_stores, j, info) | |
3388 | { | |
3389 | if (info->ops[0].bit_not_p) | |
3390 | bit_not_p[0] = true; | |
3391 | if (info->ops[1].bit_not_p) | |
3392 | bit_not_p[1] = true; | |
3393 | if (info->bit_not_p) | |
3394 | bit_not_p[2] = true; | |
3395 | } | |
3396 | total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2]; | |
3397 | } | |
3398 | ||
871a91ec | 3399 | } |
3400 | ||
902cb3b7 | 3401 | return ret; |
3d3e04ac | 3402 | } |
3403 | ||
9deedf62 | 3404 | /* Return the operation through which the operand IDX (if < 2) or |
3405 | result (IDX == 2) should be inverted. If NOP_EXPR, no inversion | |
3406 | is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR, | |
3407 | the bits should be xored with mask. */ | |
3408 | ||
3409 | static enum tree_code | |
3410 | invert_op (split_store *split_store, int idx, tree int_type, tree &mask) | |
3411 | { | |
3412 | unsigned int i; | |
3413 | store_immediate_info *info; | |
3414 | unsigned int cnt = 0; | |
6187b750 | 3415 | bool any_paddings = false; |
9deedf62 | 3416 | FOR_EACH_VEC_ELT (split_store->orig_stores, i, info) |
3417 | { | |
3418 | bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p; | |
3419 | if (bit_not_p) | |
6187b750 | 3420 | { |
3421 | ++cnt; | |
3422 | tree lhs = gimple_assign_lhs (info->stmt); | |
3423 | if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) | |
3424 | && TYPE_PRECISION (TREE_TYPE (lhs)) < info->bitsize) | |
3425 | any_paddings = true; | |
3426 | } | |
9deedf62 | 3427 | } |
3428 | mask = NULL_TREE; | |
3429 | if (cnt == 0) | |
3430 | return NOP_EXPR; | |
6187b750 | 3431 | if (cnt == split_store->orig_stores.length () && !any_paddings) |
9deedf62 | 3432 | return BIT_NOT_EXPR; |
3433 | ||
3434 | unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT; | |
3435 | unsigned buf_size = split_store->size / BITS_PER_UNIT; | |
3436 | unsigned char *buf | |
3437 | = XALLOCAVEC (unsigned char, buf_size); | |
3438 | memset (buf, ~0U, buf_size); | |
3439 | FOR_EACH_VEC_ELT (split_store->orig_stores, i, info) | |
3440 | { | |
3441 | bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p; | |
3442 | if (!bit_not_p) | |
3443 | continue; | |
3444 | /* Clear regions with bit_not_p and invert afterwards, rather than | |
3445 | clear regions with !bit_not_p, so that gaps in between stores aren't | |
3446 | set in the mask. */ | |
3447 | unsigned HOST_WIDE_INT bitsize = info->bitsize; | |
6187b750 | 3448 | unsigned HOST_WIDE_INT prec = bitsize; |
9deedf62 | 3449 | unsigned int pos_in_buffer = 0; |
6187b750 | 3450 | if (any_paddings) |
3451 | { | |
3452 | tree lhs = gimple_assign_lhs (info->stmt); | |
3453 | if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) | |
3454 | && TYPE_PRECISION (TREE_TYPE (lhs)) < bitsize) | |
3455 | prec = TYPE_PRECISION (TREE_TYPE (lhs)); | |
3456 | } | |
9deedf62 | 3457 | if (info->bitpos < try_bitpos) |
3458 | { | |
3459 | gcc_assert (info->bitpos + bitsize > try_bitpos); | |
6187b750 | 3460 | if (!BYTES_BIG_ENDIAN) |
3461 | { | |
3462 | if (prec <= try_bitpos - info->bitpos) | |
3463 | continue; | |
3464 | prec -= try_bitpos - info->bitpos; | |
3465 | } | |
3466 | bitsize -= try_bitpos - info->bitpos; | |
3467 | if (BYTES_BIG_ENDIAN && prec > bitsize) | |
3468 | prec = bitsize; | |
9deedf62 | 3469 | } |
3470 | else | |
3471 | pos_in_buffer = info->bitpos - try_bitpos; | |
6187b750 | 3472 | if (prec < bitsize) |
3473 | { | |
3474 | /* If this is a bool inversion, invert just the least significant | |
3475 | prec bits rather than all bits of it. */ | |
3476 | if (BYTES_BIG_ENDIAN) | |
3477 | { | |
3478 | pos_in_buffer += bitsize - prec; | |
3479 | if (pos_in_buffer >= split_store->size) | |
3480 | continue; | |
3481 | } | |
3482 | bitsize = prec; | |
3483 | } | |
9deedf62 | 3484 | if (pos_in_buffer + bitsize > split_store->size) |
3485 | bitsize = split_store->size - pos_in_buffer; | |
3486 | unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT); | |
3487 | if (BYTES_BIG_ENDIAN) | |
3488 | clear_bit_region_be (p, (BITS_PER_UNIT - 1 | |
3489 | - (pos_in_buffer % BITS_PER_UNIT)), bitsize); | |
3490 | else | |
3491 | clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize); | |
3492 | } | |
3493 | for (unsigned int i = 0; i < buf_size; ++i) | |
3494 | buf[i] = ~buf[i]; | |
3495 | mask = native_interpret_expr (int_type, buf, buf_size); | |
3496 | return BIT_XOR_EXPR; | |
3497 | } | |
3498 | ||
3d3e04ac | 3499 | /* Given a merged store group GROUP output the widened version of it. |
3500 | The store chain is against the base object BASE. | |
3501 | Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output | |
3502 | unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive. | |
3503 | Make sure that the number of statements output is less than the number of | |
3504 | original statements. If a better sequence is possible emit it and | |
3505 | return true. */ | |
3506 | ||
3507 | bool | |
f85e7cb7 | 3508 | imm_store_chain_info::output_merged_store (merged_store_group *group) |
3d3e04ac | 3509 | { |
79b9cc46 | 3510 | split_store *split_store; |
3511 | unsigned int i; | |
902cb3b7 | 3512 | unsigned HOST_WIDE_INT start_byte_pos |
3513 | = group->bitregion_start / BITS_PER_UNIT; | |
3d3e04ac | 3514 | |
3515 | unsigned int orig_num_stmts = group->stores.length (); | |
3516 | if (orig_num_stmts < 2) | |
3517 | return false; | |
3518 | ||
902cb3b7 | 3519 | auto_vec<struct split_store *, 32> split_stores; |
9991d1d3 | 3520 | bool allow_unaligned_store |
902cb3b7 | 3521 | = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED); |
9991d1d3 | 3522 | bool allow_unaligned_load = allow_unaligned_store; |
3523 | if (allow_unaligned_store) | |
902cb3b7 | 3524 | { |
3525 | /* If unaligned stores are allowed, see how many stores we'd emit | |
3526 | for unaligned and how many stores we'd emit for aligned stores. | |
3527 | Only use unaligned stores if it allows fewer stores than aligned. */ | |
9991d1d3 | 3528 | unsigned aligned_cnt |
871a91ec | 3529 | = split_group (group, false, allow_unaligned_load, NULL, NULL, NULL); |
9991d1d3 | 3530 | unsigned unaligned_cnt |
871a91ec | 3531 | = split_group (group, true, allow_unaligned_load, NULL, NULL, NULL); |
902cb3b7 | 3532 | if (aligned_cnt <= unaligned_cnt) |
9991d1d3 | 3533 | allow_unaligned_store = false; |
902cb3b7 | 3534 | } |
871a91ec | 3535 | unsigned total_orig, total_new; |
9991d1d3 | 3536 | split_group (group, allow_unaligned_store, allow_unaligned_load, |
871a91ec | 3537 | &split_stores, &total_orig, &total_new); |
902cb3b7 | 3538 | |
3539 | if (split_stores.length () >= orig_num_stmts) | |
3540 | { | |
3541 | /* We didn't manage to reduce the number of statements. Bail out. */ | |
3542 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
871a91ec | 3543 | fprintf (dump_file, "Exceeded original number of stmts (%u)." |
3544 | " Not profitable to emit new sequence.\n", | |
3545 | orig_num_stmts); | |
79b9cc46 | 3546 | FOR_EACH_VEC_ELT (split_stores, i, split_store) |
3547 | delete split_store; | |
902cb3b7 | 3548 | return false; |
3549 | } | |
871a91ec | 3550 | if (total_orig <= total_new) |
3551 | { | |
3552 | /* If number of estimated new statements is above estimated original | |
3553 | statements, bail out too. */ | |
3554 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
3555 | fprintf (dump_file, "Estimated number of original stmts (%u)" | |
3556 | " not larger than estimated number of new" | |
3557 | " stmts (%u).\n", | |
3558 | total_orig, total_new); | |
79b9cc46 | 3559 | FOR_EACH_VEC_ELT (split_stores, i, split_store) |
3560 | delete split_store; | |
509ab8cd | 3561 | return false; |
871a91ec | 3562 | } |
3d3e04ac | 3563 | |
3564 | gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt); | |
3565 | gimple_seq seq = NULL; | |
3d3e04ac | 3566 | tree last_vdef, new_vuse; |
3567 | last_vdef = gimple_vdef (group->last_stmt); | |
3568 | new_vuse = gimple_vuse (group->last_stmt); | |
509ab8cd | 3569 | tree bswap_res = NULL_TREE; |
3570 | ||
3571 | if (group->stores[0]->rhs_code == LROTATE_EXPR | |
3572 | || group->stores[0]->rhs_code == NOP_EXPR) | |
3573 | { | |
3574 | tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type; | |
3575 | gimple *ins_stmt = group->stores[0]->ins_stmt; | |
3576 | struct symbolic_number *n = &group->stores[0]->n; | |
3577 | bool bswap = group->stores[0]->rhs_code == LROTATE_EXPR; | |
3578 | ||
3579 | switch (n->range) | |
3580 | { | |
3581 | case 16: | |
3582 | load_type = bswap_type = uint16_type_node; | |
3583 | break; | |
3584 | case 32: | |
3585 | load_type = uint32_type_node; | |
3586 | if (bswap) | |
3587 | { | |
3588 | fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32); | |
3589 | bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); | |
3590 | } | |
3591 | break; | |
3592 | case 64: | |
3593 | load_type = uint64_type_node; | |
3594 | if (bswap) | |
3595 | { | |
3596 | fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64); | |
3597 | bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); | |
3598 | } | |
3599 | break; | |
3600 | default: | |
3601 | gcc_unreachable (); | |
3602 | } | |
3603 | ||
3604 | /* If the loads have each vuse of the corresponding store, | |
3605 | we've checked the aliasing already in try_coalesce_bswap and | |
3606 | we want to sink the need load into seq. So need to use new_vuse | |
3607 | on the load. */ | |
58cff6a2 | 3608 | if (n->base_addr) |
509ab8cd | 3609 | { |
58cff6a2 | 3610 | if (n->vuse == NULL) |
3611 | { | |
3612 | n->vuse = new_vuse; | |
3613 | ins_stmt = NULL; | |
3614 | } | |
3615 | else | |
3616 | /* Update vuse in case it has changed by output_merged_stores. */ | |
3617 | n->vuse = gimple_vuse (ins_stmt); | |
509ab8cd | 3618 | } |
3619 | bswap_res = bswap_replace (gsi_start (seq), ins_stmt, fndecl, | |
3620 | bswap_type, load_type, n, bswap); | |
3621 | gcc_assert (bswap_res); | |
3622 | } | |
3d3e04ac | 3623 | |
3624 | gimple *stmt = NULL; | |
9991d1d3 | 3625 | auto_vec<gimple *, 32> orig_stmts; |
509ab8cd | 3626 | gimple_seq this_seq; |
3627 | tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq, | |
427223f1 | 3628 | is_gimple_mem_ref_addr, NULL_TREE); |
509ab8cd | 3629 | gimple_seq_add_seq_without_update (&seq, this_seq); |
9991d1d3 | 3630 | |
3631 | tree load_addr[2] = { NULL_TREE, NULL_TREE }; | |
3632 | gimple_seq load_seq[2] = { NULL, NULL }; | |
3633 | gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () }; | |
3634 | for (int j = 0; j < 2; ++j) | |
3635 | { | |
3636 | store_operand_info &op = group->stores[0]->ops[j]; | |
3637 | if (op.base_addr == NULL_TREE) | |
3638 | continue; | |
3639 | ||
3640 | store_immediate_info *infol = group->stores.last (); | |
3641 | if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt)) | |
3642 | { | |
e0099abc | 3643 | /* We can't pick the location randomly; while we've verified |
3644 | all the loads have the same vuse, they can be still in different | |
3645 | basic blocks and we need to pick the one from the last bb: | |
3646 | int x = q[0]; | |
3647 | if (x == N) return; | |
3648 | int y = q[1]; | |
3649 | p[0] = x; | |
3650 | p[1] = y; | |
3651 | otherwise if we put the wider load at the q[0] load, we might | |
3652 | segfault if q[1] is not mapped. */ | |
3653 | basic_block bb = gimple_bb (op.stmt); | |
3654 | gimple *ostmt = op.stmt; | |
3655 | store_immediate_info *info; | |
3656 | FOR_EACH_VEC_ELT (group->stores, i, info) | |
3657 | { | |
3658 | gimple *tstmt = info->ops[j].stmt; | |
3659 | basic_block tbb = gimple_bb (tstmt); | |
3660 | if (dominated_by_p (CDI_DOMINATORS, tbb, bb)) | |
3661 | { | |
3662 | ostmt = tstmt; | |
3663 | bb = tbb; | |
3664 | } | |
3665 | } | |
3666 | load_gsi[j] = gsi_for_stmt (ostmt); | |
9991d1d3 | 3667 | load_addr[j] |
3668 | = force_gimple_operand_1 (unshare_expr (op.base_addr), | |
3669 | &load_seq[j], is_gimple_mem_ref_addr, | |
3670 | NULL_TREE); | |
3671 | } | |
3672 | else if (operand_equal_p (base_addr, op.base_addr, 0)) | |
3673 | load_addr[j] = addr; | |
3674 | else | |
ad3e5b2f | 3675 | { |
ad3e5b2f | 3676 | load_addr[j] |
3677 | = force_gimple_operand_1 (unshare_expr (op.base_addr), | |
3678 | &this_seq, is_gimple_mem_ref_addr, | |
3679 | NULL_TREE); | |
3680 | gimple_seq_add_seq_without_update (&seq, this_seq); | |
3681 | } | |
9991d1d3 | 3682 | } |
3683 | ||
3d3e04ac | 3684 | FOR_EACH_VEC_ELT (split_stores, i, split_store) |
3685 | { | |
3686 | unsigned HOST_WIDE_INT try_size = split_store->size; | |
3687 | unsigned HOST_WIDE_INT try_pos = split_store->bytepos; | |
10f0d48d | 3688 | unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT; |
3d3e04ac | 3689 | unsigned HOST_WIDE_INT align = split_store->align; |
902cb3b7 | 3690 | tree dest, src; |
3691 | location_t loc; | |
3692 | if (split_store->orig) | |
3693 | { | |
3694 | /* If there is just a single constituent store which covers | |
3695 | the whole area, just reuse the lhs and rhs. */ | |
9991d1d3 | 3696 | gimple *orig_stmt = split_store->orig_stores[0]->stmt; |
3697 | dest = gimple_assign_lhs (orig_stmt); | |
3698 | src = gimple_assign_rhs1 (orig_stmt); | |
3699 | loc = gimple_location (orig_stmt); | |
902cb3b7 | 3700 | } |
3701 | else | |
3702 | { | |
9991d1d3 | 3703 | store_immediate_info *info; |
3704 | unsigned short clique, base; | |
3705 | unsigned int k; | |
3706 | FOR_EACH_VEC_ELT (split_store->orig_stores, k, info) | |
3707 | orig_stmts.safe_push (info->stmt); | |
902cb3b7 | 3708 | tree offset_type |
9991d1d3 | 3709 | = get_alias_type_for_stmts (orig_stmts, false, &clique, &base); |
3710 | loc = get_location_for_stmts (orig_stmts); | |
3711 | orig_stmts.truncate (0); | |
902cb3b7 | 3712 | |
3713 | tree int_type = build_nonstandard_integer_type (try_size, UNSIGNED); | |
3714 | int_type = build_aligned_type (int_type, align); | |
3715 | dest = fold_build2 (MEM_REF, int_type, addr, | |
3716 | build_int_cst (offset_type, try_pos)); | |
9991d1d3 | 3717 | if (TREE_CODE (dest) == MEM_REF) |
3718 | { | |
3719 | MR_DEPENDENCE_CLIQUE (dest) = clique; | |
3720 | MR_DEPENDENCE_BASE (dest) = base; | |
3721 | } | |
3722 | ||
10f0d48d | 3723 | tree mask; |
3724 | if (bswap_res) | |
3725 | mask = integer_zero_node; | |
3726 | else | |
509ab8cd | 3727 | mask = native_interpret_expr (int_type, |
3728 | group->mask + try_pos | |
3729 | - start_byte_pos, | |
3730 | group->buf_size); | |
9991d1d3 | 3731 | |
3732 | tree ops[2]; | |
3733 | for (int j = 0; | |
3734 | j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE); | |
3735 | ++j) | |
3736 | { | |
3737 | store_operand_info &op = split_store->orig_stores[0]->ops[j]; | |
509ab8cd | 3738 | if (bswap_res) |
3739 | ops[j] = bswap_res; | |
3740 | else if (op.base_addr) | |
9991d1d3 | 3741 | { |
3742 | FOR_EACH_VEC_ELT (split_store->orig_stores, k, info) | |
3743 | orig_stmts.safe_push (info->ops[j].stmt); | |
3744 | ||
3745 | offset_type = get_alias_type_for_stmts (orig_stmts, true, | |
3746 | &clique, &base); | |
3747 | location_t load_loc = get_location_for_stmts (orig_stmts); | |
3748 | orig_stmts.truncate (0); | |
3749 | ||
3750 | unsigned HOST_WIDE_INT load_align = group->load_align[j]; | |
3751 | unsigned HOST_WIDE_INT align_bitpos | |
10f0d48d | 3752 | = known_alignment (try_bitpos |
e61263f2 | 3753 | - split_store->orig_stores[0]->bitpos |
3754 | + op.bitpos); | |
3755 | if (align_bitpos & (load_align - 1)) | |
9991d1d3 | 3756 | load_align = least_bit_hwi (align_bitpos); |
3757 | ||
3758 | tree load_int_type | |
3759 | = build_nonstandard_integer_type (try_size, UNSIGNED); | |
3760 | load_int_type | |
3761 | = build_aligned_type (load_int_type, load_align); | |
3762 | ||
e61263f2 | 3763 | poly_uint64 load_pos |
10f0d48d | 3764 | = exact_div (try_bitpos |
e61263f2 | 3765 | - split_store->orig_stores[0]->bitpos |
3766 | + op.bitpos, | |
3767 | BITS_PER_UNIT); | |
9991d1d3 | 3768 | ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j], |
3769 | build_int_cst (offset_type, load_pos)); | |
3770 | if (TREE_CODE (ops[j]) == MEM_REF) | |
3771 | { | |
3772 | MR_DEPENDENCE_CLIQUE (ops[j]) = clique; | |
3773 | MR_DEPENDENCE_BASE (ops[j]) = base; | |
3774 | } | |
3775 | if (!integer_zerop (mask)) | |
3776 | /* The load might load some bits (that will be masked off | |
3777 | later on) uninitialized, avoid -W*uninitialized | |
3778 | warnings in that case. */ | |
3779 | TREE_NO_WARNING (ops[j]) = 1; | |
3780 | ||
3781 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
3782 | ops[j]); | |
3783 | gimple_set_location (stmt, load_loc); | |
3784 | if (gsi_bb (load_gsi[j])) | |
3785 | { | |
3786 | gimple_set_vuse (stmt, gimple_vuse (op.stmt)); | |
3787 | gimple_seq_add_stmt_without_update (&load_seq[j], stmt); | |
3788 | } | |
3789 | else | |
3790 | { | |
3791 | gimple_set_vuse (stmt, new_vuse); | |
3792 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3793 | } | |
3794 | ops[j] = gimple_assign_lhs (stmt); | |
9deedf62 | 3795 | tree xor_mask; |
3796 | enum tree_code inv_op | |
3797 | = invert_op (split_store, j, int_type, xor_mask); | |
3798 | if (inv_op != NOP_EXPR) | |
c35548ce | 3799 | { |
3800 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
9deedf62 | 3801 | inv_op, ops[j], xor_mask); |
c35548ce | 3802 | gimple_set_location (stmt, load_loc); |
3803 | ops[j] = gimple_assign_lhs (stmt); | |
3804 | ||
3805 | if (gsi_bb (load_gsi[j])) | |
3806 | gimple_seq_add_stmt_without_update (&load_seq[j], | |
3807 | stmt); | |
3808 | else | |
3809 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3810 | } | |
9991d1d3 | 3811 | } |
3812 | else | |
3813 | ops[j] = native_interpret_expr (int_type, | |
3814 | group->val + try_pos | |
3815 | - start_byte_pos, | |
3816 | group->buf_size); | |
3817 | } | |
3818 | ||
3819 | switch (split_store->orig_stores[0]->rhs_code) | |
3820 | { | |
3821 | case BIT_AND_EXPR: | |
3822 | case BIT_IOR_EXPR: | |
3823 | case BIT_XOR_EXPR: | |
3824 | FOR_EACH_VEC_ELT (split_store->orig_stores, k, info) | |
3825 | { | |
3826 | tree rhs1 = gimple_assign_rhs1 (info->stmt); | |
3827 | orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1)); | |
3828 | } | |
3829 | location_t bit_loc; | |
3830 | bit_loc = get_location_for_stmts (orig_stmts); | |
3831 | orig_stmts.truncate (0); | |
3832 | ||
3833 | stmt | |
3834 | = gimple_build_assign (make_ssa_name (int_type), | |
3835 | split_store->orig_stores[0]->rhs_code, | |
3836 | ops[0], ops[1]); | |
3837 | gimple_set_location (stmt, bit_loc); | |
3838 | /* If there is just one load and there is a separate | |
3839 | load_seq[0], emit the bitwise op right after it. */ | |
3840 | if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0])) | |
3841 | gimple_seq_add_stmt_without_update (&load_seq[0], stmt); | |
3842 | /* Otherwise, if at least one load is in seq, we need to | |
3843 | emit the bitwise op right before the store. If there | |
3844 | are two loads and are emitted somewhere else, it would | |
3845 | be better to emit the bitwise op as early as possible; | |
3846 | we don't track where that would be possible right now | |
3847 | though. */ | |
3848 | else | |
3849 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3850 | src = gimple_assign_lhs (stmt); | |
9deedf62 | 3851 | tree xor_mask; |
3852 | enum tree_code inv_op; | |
3853 | inv_op = invert_op (split_store, 2, int_type, xor_mask); | |
3854 | if (inv_op != NOP_EXPR) | |
832a73b9 | 3855 | { |
3856 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
9deedf62 | 3857 | inv_op, src, xor_mask); |
832a73b9 | 3858 | gimple_set_location (stmt, bit_loc); |
3859 | if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0])) | |
3860 | gimple_seq_add_stmt_without_update (&load_seq[0], stmt); | |
3861 | else | |
3862 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3863 | src = gimple_assign_lhs (stmt); | |
3864 | } | |
9991d1d3 | 3865 | break; |
509ab8cd | 3866 | case LROTATE_EXPR: |
3867 | case NOP_EXPR: | |
3868 | src = ops[0]; | |
3869 | if (!is_gimple_val (src)) | |
3870 | { | |
3871 | stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (src)), | |
3872 | src); | |
3873 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3874 | src = gimple_assign_lhs (stmt); | |
3875 | } | |
3876 | if (!useless_type_conversion_p (int_type, TREE_TYPE (src))) | |
3877 | { | |
3878 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
3879 | NOP_EXPR, src); | |
3880 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3881 | src = gimple_assign_lhs (stmt); | |
3882 | } | |
665dafda | 3883 | inv_op = invert_op (split_store, 2, int_type, xor_mask); |
3884 | if (inv_op != NOP_EXPR) | |
3885 | { | |
3886 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
3887 | inv_op, src, xor_mask); | |
3888 | gimple_set_location (stmt, loc); | |
3889 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3890 | src = gimple_assign_lhs (stmt); | |
3891 | } | |
509ab8cd | 3892 | break; |
9991d1d3 | 3893 | default: |
3894 | src = ops[0]; | |
3895 | break; | |
3896 | } | |
3897 | ||
10f0d48d | 3898 | /* If bit insertion is required, we use the source as an accumulator |
3899 | into which the successive bit-field values are manually inserted. | |
3900 | FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */ | |
3901 | if (group->bit_insertion) | |
3902 | FOR_EACH_VEC_ELT (split_store->orig_stores, k, info) | |
3903 | if (info->rhs_code == BIT_INSERT_EXPR | |
3904 | && info->bitpos < try_bitpos + try_size | |
3905 | && info->bitpos + info->bitsize > try_bitpos) | |
3906 | { | |
3907 | /* Mask, truncate, convert to final type, shift and ior into | |
3908 | the accumulator. Note that every step can be a no-op. */ | |
3909 | const HOST_WIDE_INT start_gap = info->bitpos - try_bitpos; | |
3910 | const HOST_WIDE_INT end_gap | |
3911 | = (try_bitpos + try_size) - (info->bitpos + info->bitsize); | |
3912 | tree tem = info->ops[0].val; | |
1bcb04d5 | 3913 | if (TYPE_PRECISION (TREE_TYPE (tem)) <= info->bitsize) |
3914 | { | |
3915 | tree bitfield_type | |
3916 | = build_nonstandard_integer_type (info->bitsize, | |
3917 | UNSIGNED); | |
3918 | tem = gimple_convert (&seq, loc, bitfield_type, tem); | |
3919 | } | |
3920 | else if ((BYTES_BIG_ENDIAN ? start_gap : end_gap) > 0) | |
10f0d48d | 3921 | { |
3922 | const unsigned HOST_WIDE_INT imask | |
3923 | = (HOST_WIDE_INT_1U << info->bitsize) - 1; | |
3924 | tem = gimple_build (&seq, loc, | |
3925 | BIT_AND_EXPR, TREE_TYPE (tem), tem, | |
3926 | build_int_cst (TREE_TYPE (tem), | |
3927 | imask)); | |
3928 | } | |
3929 | const HOST_WIDE_INT shift | |
3930 | = (BYTES_BIG_ENDIAN ? end_gap : start_gap); | |
3931 | if (shift < 0) | |
3932 | tem = gimple_build (&seq, loc, | |
3933 | RSHIFT_EXPR, TREE_TYPE (tem), tem, | |
3934 | build_int_cst (NULL_TREE, -shift)); | |
3935 | tem = gimple_convert (&seq, loc, int_type, tem); | |
3936 | if (shift > 0) | |
3937 | tem = gimple_build (&seq, loc, | |
3938 | LSHIFT_EXPR, int_type, tem, | |
3939 | build_int_cst (NULL_TREE, shift)); | |
3940 | src = gimple_build (&seq, loc, | |
3941 | BIT_IOR_EXPR, int_type, tem, src); | |
3942 | } | |
3943 | ||
902cb3b7 | 3944 | if (!integer_zerop (mask)) |
3945 | { | |
3946 | tree tem = make_ssa_name (int_type); | |
3947 | tree load_src = unshare_expr (dest); | |
3948 | /* The load might load some or all bits uninitialized, | |
3949 | avoid -W*uninitialized warnings in that case. | |
3950 | As optimization, it would be nice if all the bits are | |
3951 | provably uninitialized (no stores at all yet or previous | |
3952 | store a CLOBBER) we'd optimize away the load and replace | |
3953 | it e.g. with 0. */ | |
3954 | TREE_NO_WARNING (load_src) = 1; | |
3955 | stmt = gimple_build_assign (tem, load_src); | |
3956 | gimple_set_location (stmt, loc); | |
3957 | gimple_set_vuse (stmt, new_vuse); | |
3958 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3959 | ||
3960 | /* FIXME: If there is a single chunk of zero bits in mask, | |
3961 | perhaps use BIT_INSERT_EXPR instead? */ | |
3962 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
3963 | BIT_AND_EXPR, tem, mask); | |
3964 | gimple_set_location (stmt, loc); | |
3965 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3966 | tem = gimple_assign_lhs (stmt); | |
3967 | ||
9991d1d3 | 3968 | if (TREE_CODE (src) == INTEGER_CST) |
3969 | src = wide_int_to_tree (int_type, | |
3970 | wi::bit_and_not (wi::to_wide (src), | |
3971 | wi::to_wide (mask))); | |
3972 | else | |
3973 | { | |
3974 | tree nmask | |
3975 | = wide_int_to_tree (int_type, | |
3976 | wi::bit_not (wi::to_wide (mask))); | |
3977 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
3978 | BIT_AND_EXPR, src, nmask); | |
3979 | gimple_set_location (stmt, loc); | |
3980 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3981 | src = gimple_assign_lhs (stmt); | |
3982 | } | |
902cb3b7 | 3983 | stmt = gimple_build_assign (make_ssa_name (int_type), |
3984 | BIT_IOR_EXPR, tem, src); | |
3985 | gimple_set_location (stmt, loc); | |
3986 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3987 | src = gimple_assign_lhs (stmt); | |
3988 | } | |
3989 | } | |
3d3e04ac | 3990 | |
3991 | stmt = gimple_build_assign (dest, src); | |
3992 | gimple_set_location (stmt, loc); | |
3993 | gimple_set_vuse (stmt, new_vuse); | |
3994 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
3995 | ||
3d3e04ac | 3996 | tree new_vdef; |
3997 | if (i < split_stores.length () - 1) | |
902cb3b7 | 3998 | new_vdef = make_ssa_name (gimple_vop (cfun), stmt); |
3d3e04ac | 3999 | else |
4000 | new_vdef = last_vdef; | |
4001 | ||
4002 | gimple_set_vdef (stmt, new_vdef); | |
4003 | SSA_NAME_DEF_STMT (new_vdef) = stmt; | |
4004 | new_vuse = new_vdef; | |
4005 | } | |
4006 | ||
4007 | FOR_EACH_VEC_ELT (split_stores, i, split_store) | |
4008 | delete split_store; | |
4009 | ||
3d3e04ac | 4010 | gcc_assert (seq); |
4011 | if (dump_file) | |
4012 | { | |
4013 | fprintf (dump_file, | |
10f0d48d | 4014 | "New sequence of %u stores to replace old one of %u stores\n", |
902cb3b7 | 4015 | split_stores.length (), orig_num_stmts); |
3d3e04ac | 4016 | if (dump_flags & TDF_DETAILS) |
4017 | print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS); | |
4018 | } | |
4019 | gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT); | |
9991d1d3 | 4020 | for (int j = 0; j < 2; ++j) |
4021 | if (load_seq[j]) | |
4022 | gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT); | |
3d3e04ac | 4023 | |
4024 | return true; | |
4025 | } | |
4026 | ||
4027 | /* Process the merged_store_group objects created in the coalescing phase. | |
4028 | The stores are all against the base object BASE. | |
4029 | Try to output the widened stores and delete the original statements if | |
4030 | successful. Return true iff any changes were made. */ | |
4031 | ||
4032 | bool | |
f85e7cb7 | 4033 | imm_store_chain_info::output_merged_stores () |
3d3e04ac | 4034 | { |
4035 | unsigned int i; | |
4036 | merged_store_group *merged_store; | |
4037 | bool ret = false; | |
4038 | FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store) | |
4039 | { | |
f85e7cb7 | 4040 | if (output_merged_store (merged_store)) |
3d3e04ac | 4041 | { |
4042 | unsigned int j; | |
4043 | store_immediate_info *store; | |
4044 | FOR_EACH_VEC_ELT (merged_store->stores, j, store) | |
4045 | { | |
4046 | gimple *stmt = store->stmt; | |
4047 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
4048 | gsi_remove (&gsi, true); | |
4049 | if (stmt != merged_store->last_stmt) | |
4050 | { | |
4051 | unlink_stmt_vdef (stmt); | |
4052 | release_defs (stmt); | |
4053 | } | |
4054 | } | |
4055 | ret = true; | |
4056 | } | |
4057 | } | |
4058 | if (ret && dump_file) | |
4059 | fprintf (dump_file, "Merging successful!\n"); | |
4060 | ||
4061 | return ret; | |
4062 | } | |
4063 | ||
4064 | /* Coalesce the store_immediate_info objects recorded against the base object | |
4065 | BASE in the first phase and output them. | |
4066 | Delete the allocated structures. | |
4067 | Return true if any changes were made. */ | |
4068 | ||
4069 | bool | |
f85e7cb7 | 4070 | imm_store_chain_info::terminate_and_process_chain () |
3d3e04ac | 4071 | { |
4072 | /* Process store chain. */ | |
4073 | bool ret = false; | |
4074 | if (m_store_info.length () > 1) | |
4075 | { | |
4076 | ret = coalesce_immediate_stores (); | |
4077 | if (ret) | |
f85e7cb7 | 4078 | ret = output_merged_stores (); |
3d3e04ac | 4079 | } |
4080 | ||
4081 | /* Delete all the entries we allocated ourselves. */ | |
4082 | store_immediate_info *info; | |
4083 | unsigned int i; | |
4084 | FOR_EACH_VEC_ELT (m_store_info, i, info) | |
4085 | delete info; | |
4086 | ||
4087 | merged_store_group *merged_info; | |
4088 | FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info) | |
4089 | delete merged_info; | |
4090 | ||
4091 | return ret; | |
4092 | } | |
4093 | ||
4094 | /* Return true iff LHS is a destination potentially interesting for | |
4095 | store merging. In practice these are the codes that get_inner_reference | |
4096 | can process. */ | |
4097 | ||
4098 | static bool | |
4099 | lhs_valid_for_store_merging_p (tree lhs) | |
4100 | { | |
4101 | tree_code code = TREE_CODE (lhs); | |
4102 | ||
4103 | if (code == ARRAY_REF || code == ARRAY_RANGE_REF || code == MEM_REF | |
4104 | || code == COMPONENT_REF || code == BIT_FIELD_REF) | |
4105 | return true; | |
4106 | ||
4107 | return false; | |
4108 | } | |
4109 | ||
4110 | /* Return true if the tree RHS is a constant we want to consider | |
4111 | during store merging. In practice accept all codes that | |
4112 | native_encode_expr accepts. */ | |
4113 | ||
4114 | static bool | |
4115 | rhs_valid_for_store_merging_p (tree rhs) | |
4116 | { | |
52acb7ae | 4117 | unsigned HOST_WIDE_INT size; |
4118 | return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (&size) | |
4119 | && native_encode_expr (rhs, NULL, size) != 0); | |
3d3e04ac | 4120 | } |
4121 | ||
9991d1d3 | 4122 | /* If MEM is a memory reference usable for store merging (either as |
4123 | store destination or for loads), return the non-NULL base_addr | |
4124 | and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END. | |
4125 | Otherwise return NULL, *PBITPOS should be still valid even for that | |
4126 | case. */ | |
4127 | ||
4128 | static tree | |
e61263f2 | 4129 | mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize, |
4130 | poly_uint64 *pbitpos, | |
4131 | poly_uint64 *pbitregion_start, | |
4132 | poly_uint64 *pbitregion_end) | |
9991d1d3 | 4133 | { |
e61263f2 | 4134 | poly_int64 bitsize, bitpos; |
4135 | poly_uint64 bitregion_start = 0, bitregion_end = 0; | |
9991d1d3 | 4136 | machine_mode mode; |
4137 | int unsignedp = 0, reversep = 0, volatilep = 0; | |
4138 | tree offset; | |
4139 | tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode, | |
4140 | &unsignedp, &reversep, &volatilep); | |
4141 | *pbitsize = bitsize; | |
e61263f2 | 4142 | if (known_eq (bitsize, 0)) |
9991d1d3 | 4143 | return NULL_TREE; |
4144 | ||
4145 | if (TREE_CODE (mem) == COMPONENT_REF | |
4146 | && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1))) | |
4147 | { | |
4148 | get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset); | |
e61263f2 | 4149 | if (maybe_ne (bitregion_end, 0U)) |
4150 | bitregion_end += 1; | |
9991d1d3 | 4151 | } |
4152 | ||
4153 | if (reversep) | |
4154 | return NULL_TREE; | |
4155 | ||
4156 | /* We do not want to rewrite TARGET_MEM_REFs. */ | |
4157 | if (TREE_CODE (base_addr) == TARGET_MEM_REF) | |
4158 | return NULL_TREE; | |
4159 | /* In some cases get_inner_reference may return a | |
4160 | MEM_REF [ptr + byteoffset]. For the purposes of this pass | |
4161 | canonicalize the base_addr to MEM_REF [ptr] and take | |
4162 | byteoffset into account in the bitpos. This occurs in | |
4163 | PR 23684 and this way we can catch more chains. */ | |
4164 | else if (TREE_CODE (base_addr) == MEM_REF) | |
4165 | { | |
e61263f2 | 4166 | poly_offset_int byte_off = mem_ref_offset (base_addr); |
4167 | poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT; | |
9991d1d3 | 4168 | bit_off += bitpos; |
e61263f2 | 4169 | if (known_ge (bit_off, 0) && bit_off.to_shwi (&bitpos)) |
9991d1d3 | 4170 | { |
e61263f2 | 4171 | if (maybe_ne (bitregion_end, 0U)) |
9991d1d3 | 4172 | { |
4173 | bit_off = byte_off << LOG2_BITS_PER_UNIT; | |
4174 | bit_off += bitregion_start; | |
e61263f2 | 4175 | if (bit_off.to_uhwi (&bitregion_start)) |
9991d1d3 | 4176 | { |
9991d1d3 | 4177 | bit_off = byte_off << LOG2_BITS_PER_UNIT; |
4178 | bit_off += bitregion_end; | |
e61263f2 | 4179 | if (!bit_off.to_uhwi (&bitregion_end)) |
9991d1d3 | 4180 | bitregion_end = 0; |
4181 | } | |
4182 | else | |
4183 | bitregion_end = 0; | |
4184 | } | |
4185 | } | |
4186 | else | |
4187 | return NULL_TREE; | |
4188 | base_addr = TREE_OPERAND (base_addr, 0); | |
4189 | } | |
4190 | /* get_inner_reference returns the base object, get at its | |
4191 | address now. */ | |
4192 | else | |
4193 | { | |
e61263f2 | 4194 | if (maybe_lt (bitpos, 0)) |
9991d1d3 | 4195 | return NULL_TREE; |
4196 | base_addr = build_fold_addr_expr (base_addr); | |
4197 | } | |
4198 | ||
e61263f2 | 4199 | if (known_eq (bitregion_end, 0U)) |
9991d1d3 | 4200 | { |
e61263f2 | 4201 | bitregion_start = round_down_to_byte_boundary (bitpos); |
b870fc0c | 4202 | bitregion_end = bitpos; |
4203 | bitregion_end = round_up_to_byte_boundary (bitregion_end + bitsize); | |
9991d1d3 | 4204 | } |
4205 | ||
4206 | if (offset != NULL_TREE) | |
4207 | { | |
4208 | /* If the access is variable offset then a base decl has to be | |
4209 | address-taken to be able to emit pointer-based stores to it. | |
4210 | ??? We might be able to get away with re-using the original | |
4211 | base up to the first variable part and then wrapping that inside | |
4212 | a BIT_FIELD_REF. */ | |
4213 | tree base = get_base_address (base_addr); | |
4214 | if (! base | |
4215 | || (DECL_P (base) && ! TREE_ADDRESSABLE (base))) | |
4216 | return NULL_TREE; | |
4217 | ||
4218 | base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr), | |
4219 | base_addr, offset); | |
4220 | } | |
4221 | ||
4222 | *pbitsize = bitsize; | |
4223 | *pbitpos = bitpos; | |
4224 | *pbitregion_start = bitregion_start; | |
4225 | *pbitregion_end = bitregion_end; | |
4226 | return base_addr; | |
4227 | } | |
4228 | ||
4229 | /* Return true if STMT is a load that can be used for store merging. | |
4230 | In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and | |
4231 | BITREGION_END are properties of the corresponding store. */ | |
4232 | ||
4233 | static bool | |
4234 | handled_load (gimple *stmt, store_operand_info *op, | |
e61263f2 | 4235 | poly_uint64 bitsize, poly_uint64 bitpos, |
4236 | poly_uint64 bitregion_start, poly_uint64 bitregion_end) | |
9991d1d3 | 4237 | { |
c35548ce | 4238 | if (!is_gimple_assign (stmt)) |
9991d1d3 | 4239 | return false; |
c35548ce | 4240 | if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR) |
4241 | { | |
4242 | tree rhs1 = gimple_assign_rhs1 (stmt); | |
4243 | if (TREE_CODE (rhs1) == SSA_NAME | |
c35548ce | 4244 | && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos, |
4245 | bitregion_start, bitregion_end)) | |
4246 | { | |
832a73b9 | 4247 | /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have |
4248 | been optimized earlier, but if allowed here, would confuse the | |
4249 | multiple uses counting. */ | |
4250 | if (op->bit_not_p) | |
4251 | return false; | |
c35548ce | 4252 | op->bit_not_p = !op->bit_not_p; |
4253 | return true; | |
4254 | } | |
4255 | return false; | |
4256 | } | |
4257 | if (gimple_vuse (stmt) | |
4258 | && gimple_assign_load_p (stmt) | |
aac19106 | 4259 | && !stmt_can_throw_internal (cfun, stmt) |
9991d1d3 | 4260 | && !gimple_has_volatile_ops (stmt)) |
4261 | { | |
4262 | tree mem = gimple_assign_rhs1 (stmt); | |
4263 | op->base_addr | |
4264 | = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos, | |
4265 | &op->bitregion_start, | |
4266 | &op->bitregion_end); | |
4267 | if (op->base_addr != NULL_TREE | |
e61263f2 | 4268 | && known_eq (op->bitsize, bitsize) |
4269 | && multiple_p (op->bitpos - bitpos, BITS_PER_UNIT) | |
4270 | && known_ge (op->bitpos - op->bitregion_start, | |
4271 | bitpos - bitregion_start) | |
4272 | && known_ge (op->bitregion_end - op->bitpos, | |
4273 | bitregion_end - bitpos)) | |
9991d1d3 | 4274 | { |
4275 | op->stmt = stmt; | |
4276 | op->val = mem; | |
c35548ce | 4277 | op->bit_not_p = false; |
9991d1d3 | 4278 | return true; |
4279 | } | |
4280 | } | |
4281 | return false; | |
4282 | } | |
4283 | ||
4284 | /* Record the store STMT for store merging optimization if it can be | |
4285 | optimized. */ | |
4286 | ||
4287 | void | |
4288 | pass_store_merging::process_store (gimple *stmt) | |
4289 | { | |
4290 | tree lhs = gimple_assign_lhs (stmt); | |
4291 | tree rhs = gimple_assign_rhs1 (stmt); | |
e61263f2 | 4292 | poly_uint64 bitsize, bitpos; |
4293 | poly_uint64 bitregion_start, bitregion_end; | |
9991d1d3 | 4294 | tree base_addr |
4295 | = mem_valid_for_store_merging (lhs, &bitsize, &bitpos, | |
4296 | &bitregion_start, &bitregion_end); | |
e61263f2 | 4297 | if (known_eq (bitsize, 0U)) |
9991d1d3 | 4298 | return; |
4299 | ||
4300 | bool invalid = (base_addr == NULL_TREE | |
e61263f2 | 4301 | || (maybe_gt (bitsize, |
4302 | (unsigned int) MAX_BITSIZE_MODE_ANY_INT) | |
4303 | && (TREE_CODE (rhs) != INTEGER_CST))); | |
9991d1d3 | 4304 | enum tree_code rhs_code = ERROR_MARK; |
832a73b9 | 4305 | bool bit_not_p = false; |
509ab8cd | 4306 | struct symbolic_number n; |
4307 | gimple *ins_stmt = NULL; | |
9991d1d3 | 4308 | store_operand_info ops[2]; |
4309 | if (invalid) | |
4310 | ; | |
4311 | else if (rhs_valid_for_store_merging_p (rhs)) | |
4312 | { | |
4313 | rhs_code = INTEGER_CST; | |
4314 | ops[0].val = rhs; | |
4315 | } | |
871a91ec | 4316 | else if (TREE_CODE (rhs) != SSA_NAME) |
9991d1d3 | 4317 | invalid = true; |
4318 | else | |
4319 | { | |
4320 | gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2; | |
4321 | if (!is_gimple_assign (def_stmt)) | |
4322 | invalid = true; | |
4323 | else if (handled_load (def_stmt, &ops[0], bitsize, bitpos, | |
4324 | bitregion_start, bitregion_end)) | |
4325 | rhs_code = MEM_REF; | |
832a73b9 | 4326 | else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR) |
4327 | { | |
4328 | tree rhs1 = gimple_assign_rhs1 (def_stmt); | |
4329 | if (TREE_CODE (rhs1) == SSA_NAME | |
4330 | && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1))) | |
4331 | { | |
4332 | bit_not_p = true; | |
4333 | def_stmt = SSA_NAME_DEF_STMT (rhs1); | |
4334 | } | |
4335 | } | |
10f0d48d | 4336 | |
832a73b9 | 4337 | if (rhs_code == ERROR_MARK && !invalid) |
9991d1d3 | 4338 | switch ((rhs_code = gimple_assign_rhs_code (def_stmt))) |
4339 | { | |
4340 | case BIT_AND_EXPR: | |
4341 | case BIT_IOR_EXPR: | |
4342 | case BIT_XOR_EXPR: | |
4343 | tree rhs1, rhs2; | |
4344 | rhs1 = gimple_assign_rhs1 (def_stmt); | |
4345 | rhs2 = gimple_assign_rhs2 (def_stmt); | |
4346 | invalid = true; | |
871a91ec | 4347 | if (TREE_CODE (rhs1) != SSA_NAME) |
9991d1d3 | 4348 | break; |
4349 | def_stmt1 = SSA_NAME_DEF_STMT (rhs1); | |
4350 | if (!is_gimple_assign (def_stmt1) | |
4351 | || !handled_load (def_stmt1, &ops[0], bitsize, bitpos, | |
4352 | bitregion_start, bitregion_end)) | |
4353 | break; | |
4354 | if (rhs_valid_for_store_merging_p (rhs2)) | |
4355 | ops[1].val = rhs2; | |
871a91ec | 4356 | else if (TREE_CODE (rhs2) != SSA_NAME) |
9991d1d3 | 4357 | break; |
4358 | else | |
4359 | { | |
4360 | def_stmt2 = SSA_NAME_DEF_STMT (rhs2); | |
4361 | if (!is_gimple_assign (def_stmt2)) | |
4362 | break; | |
4363 | else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos, | |
4364 | bitregion_start, bitregion_end)) | |
4365 | break; | |
4366 | } | |
4367 | invalid = false; | |
4368 | break; | |
4369 | default: | |
4370 | invalid = true; | |
4371 | break; | |
4372 | } | |
10f0d48d | 4373 | |
e61263f2 | 4374 | unsigned HOST_WIDE_INT const_bitsize; |
4375 | if (bitsize.is_constant (&const_bitsize) | |
10f0d48d | 4376 | && (const_bitsize % BITS_PER_UNIT) == 0 |
e61263f2 | 4377 | && const_bitsize <= 64 |
10f0d48d | 4378 | && multiple_p (bitpos, BITS_PER_UNIT)) |
509ab8cd | 4379 | { |
4380 | ins_stmt = find_bswap_or_nop_1 (def_stmt, &n, 12); | |
4381 | if (ins_stmt) | |
4382 | { | |
4383 | uint64_t nn = n.n; | |
4384 | for (unsigned HOST_WIDE_INT i = 0; | |
e61263f2 | 4385 | i < const_bitsize; |
4386 | i += BITS_PER_UNIT, nn >>= BITS_PER_MARKER) | |
509ab8cd | 4387 | if ((nn & MARKER_MASK) == 0 |
4388 | || (nn & MARKER_MASK) == MARKER_BYTE_UNKNOWN) | |
4389 | { | |
4390 | ins_stmt = NULL; | |
4391 | break; | |
4392 | } | |
4393 | if (ins_stmt) | |
4394 | { | |
4395 | if (invalid) | |
4396 | { | |
4397 | rhs_code = LROTATE_EXPR; | |
4398 | ops[0].base_addr = NULL_TREE; | |
4399 | ops[1].base_addr = NULL_TREE; | |
4400 | } | |
4401 | invalid = false; | |
4402 | } | |
4403 | } | |
4404 | } | |
10f0d48d | 4405 | |
4406 | if (invalid | |
4407 | && bitsize.is_constant (&const_bitsize) | |
4408 | && ((const_bitsize % BITS_PER_UNIT) != 0 | |
4409 | || !multiple_p (bitpos, BITS_PER_UNIT)) | |
4410 | && const_bitsize <= 64) | |
4411 | { | |
1bcb04d5 | 4412 | /* Bypass a conversion to the bit-field type. */ |
4d885f78 | 4413 | if (!bit_not_p |
4414 | && is_gimple_assign (def_stmt) | |
4415 | && CONVERT_EXPR_CODE_P (rhs_code)) | |
10f0d48d | 4416 | { |
4417 | tree rhs1 = gimple_assign_rhs1 (def_stmt); | |
4418 | if (TREE_CODE (rhs1) == SSA_NAME | |
1bcb04d5 | 4419 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) |
10f0d48d | 4420 | rhs = rhs1; |
4421 | } | |
4422 | rhs_code = BIT_INSERT_EXPR; | |
4d885f78 | 4423 | bit_not_p = false; |
10f0d48d | 4424 | ops[0].val = rhs; |
4425 | ops[0].base_addr = NULL_TREE; | |
4426 | ops[1].base_addr = NULL_TREE; | |
4427 | invalid = false; | |
4428 | } | |
9991d1d3 | 4429 | } |
4430 | ||
e61263f2 | 4431 | unsigned HOST_WIDE_INT const_bitsize, const_bitpos; |
4432 | unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end; | |
4433 | if (invalid | |
4434 | || !bitsize.is_constant (&const_bitsize) | |
4435 | || !bitpos.is_constant (&const_bitpos) | |
4436 | || !bitregion_start.is_constant (&const_bitregion_start) | |
4437 | || !bitregion_end.is_constant (&const_bitregion_end)) | |
9991d1d3 | 4438 | { |
c35548ce | 4439 | terminate_all_aliasing_chains (NULL, stmt); |
9991d1d3 | 4440 | return; |
4441 | } | |
4442 | ||
509ab8cd | 4443 | if (!ins_stmt) |
4444 | memset (&n, 0, sizeof (n)); | |
4445 | ||
c35548ce | 4446 | struct imm_store_chain_info **chain_info = NULL; |
4447 | if (base_addr) | |
4448 | chain_info = m_stores.get (base_addr); | |
4449 | ||
9991d1d3 | 4450 | store_immediate_info *info; |
4451 | if (chain_info) | |
4452 | { | |
4453 | unsigned int ord = (*chain_info)->m_store_info.length (); | |
e61263f2 | 4454 | info = new store_immediate_info (const_bitsize, const_bitpos, |
4455 | const_bitregion_start, | |
4456 | const_bitregion_end, | |
4457 | stmt, ord, rhs_code, n, ins_stmt, | |
832a73b9 | 4458 | bit_not_p, ops[0], ops[1]); |
9991d1d3 | 4459 | if (dump_file && (dump_flags & TDF_DETAILS)) |
4460 | { | |
4461 | fprintf (dump_file, "Recording immediate store from stmt:\n"); | |
4462 | print_gimple_stmt (dump_file, stmt, 0); | |
4463 | } | |
4464 | (*chain_info)->m_store_info.safe_push (info); | |
c35548ce | 4465 | terminate_all_aliasing_chains (chain_info, stmt); |
9991d1d3 | 4466 | /* If we reach the limit of stores to merge in a chain terminate and |
4467 | process the chain now. */ | |
4468 | if ((*chain_info)->m_store_info.length () | |
4469 | == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE)) | |
4470 | { | |
4471 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
4472 | fprintf (dump_file, | |
4473 | "Reached maximum number of statements to merge:\n"); | |
4474 | terminate_and_release_chain (*chain_info); | |
4475 | } | |
4476 | return; | |
4477 | } | |
4478 | ||
4479 | /* Store aliases any existing chain? */ | |
c35548ce | 4480 | terminate_all_aliasing_chains (NULL, stmt); |
9991d1d3 | 4481 | /* Start a new chain. */ |
4482 | struct imm_store_chain_info *new_chain | |
4483 | = new imm_store_chain_info (m_stores_head, base_addr); | |
e61263f2 | 4484 | info = new store_immediate_info (const_bitsize, const_bitpos, |
4485 | const_bitregion_start, | |
4486 | const_bitregion_end, | |
4487 | stmt, 0, rhs_code, n, ins_stmt, | |
832a73b9 | 4488 | bit_not_p, ops[0], ops[1]); |
9991d1d3 | 4489 | new_chain->m_store_info.safe_push (info); |
4490 | m_stores.put (base_addr, new_chain); | |
4491 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
4492 | { | |
4493 | fprintf (dump_file, "Starting new chain with statement:\n"); | |
4494 | print_gimple_stmt (dump_file, stmt, 0); | |
4495 | fprintf (dump_file, "The base object is:\n"); | |
4496 | print_generic_expr (dump_file, base_addr); | |
4497 | fprintf (dump_file, "\n"); | |
4498 | } | |
4499 | } | |
4500 | ||
3d3e04ac | 4501 | /* Entry point for the pass. Go over each basic block recording chains of |
9991d1d3 | 4502 | immediate stores. Upon encountering a terminating statement (as defined |
4503 | by stmt_terminates_chain_p) process the recorded stores and emit the widened | |
4504 | variants. */ | |
3d3e04ac | 4505 | |
4506 | unsigned int | |
4507 | pass_store_merging::execute (function *fun) | |
4508 | { | |
4509 | basic_block bb; | |
4510 | hash_set<gimple *> orig_stmts; | |
4511 | ||
509ab8cd | 4512 | calculate_dominance_info (CDI_DOMINATORS); |
4513 | ||
3d3e04ac | 4514 | FOR_EACH_BB_FN (bb, fun) |
4515 | { | |
4516 | gimple_stmt_iterator gsi; | |
4517 | unsigned HOST_WIDE_INT num_statements = 0; | |
4518 | /* Record the original statements so that we can keep track of | |
4519 | statements emitted in this pass and not re-process new | |
4520 | statements. */ | |
4521 | for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4522 | { | |
4523 | if (is_gimple_debug (gsi_stmt (gsi))) | |
4524 | continue; | |
4525 | ||
63eabc9b | 4526 | if (++num_statements >= 2) |
3d3e04ac | 4527 | break; |
4528 | } | |
4529 | ||
4530 | if (num_statements < 2) | |
4531 | continue; | |
4532 | ||
4533 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
4534 | fprintf (dump_file, "Processing basic block <%d>:\n", bb->index); | |
4535 | ||
4536 | for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4537 | { | |
4538 | gimple *stmt = gsi_stmt (gsi); | |
4539 | ||
3a3ba7de | 4540 | if (is_gimple_debug (stmt)) |
4541 | continue; | |
4542 | ||
3d3e04ac | 4543 | if (gimple_has_volatile_ops (stmt)) |
4544 | { | |
4545 | /* Terminate all chains. */ | |
4546 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
4547 | fprintf (dump_file, "Volatile access terminates " | |
4548 | "all chains\n"); | |
4549 | terminate_and_process_all_chains (); | |
4550 | continue; | |
4551 | } | |
4552 | ||
3d3e04ac | 4553 | if (gimple_assign_single_p (stmt) && gimple_vdef (stmt) |
aac19106 | 4554 | && !stmt_can_throw_internal (cfun, stmt) |
3d3e04ac | 4555 | && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt))) |
9991d1d3 | 4556 | process_store (stmt); |
4557 | else | |
4558 | terminate_all_aliasing_chains (NULL, stmt); | |
3d3e04ac | 4559 | } |
4560 | terminate_and_process_all_chains (); | |
4561 | } | |
4562 | return 0; | |
4563 | } | |
4564 | ||
4565 | } // anon namespace | |
4566 | ||
4567 | /* Construct and return a store merging pass object. */ | |
4568 | ||
4569 | gimple_opt_pass * | |
4570 | make_pass_store_merging (gcc::context *ctxt) | |
4571 | { | |
4572 | return new pass_store_merging (ctxt); | |
4573 | } | |
3d9a2fb3 | 4574 | |
4575 | #if CHECKING_P | |
4576 | ||
4577 | namespace selftest { | |
4578 | ||
4579 | /* Selftests for store merging helpers. */ | |
4580 | ||
4581 | /* Assert that all elements of the byte arrays X and Y, both of length N | |
4582 | are equal. */ | |
4583 | ||
4584 | static void | |
4585 | verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n) | |
4586 | { | |
4587 | for (unsigned int i = 0; i < n; i++) | |
4588 | { | |
4589 | if (x[i] != y[i]) | |
4590 | { | |
4591 | fprintf (stderr, "Arrays do not match. X:\n"); | |
4592 | dump_char_array (stderr, x, n); | |
4593 | fprintf (stderr, "Y:\n"); | |
4594 | dump_char_array (stderr, y, n); | |
4595 | } | |
4596 | ASSERT_EQ (x[i], y[i]); | |
4597 | } | |
4598 | } | |
4599 | ||
4600 | /* Test shift_bytes_in_array and that it carries bits across between | |
4601 | bytes correctly. */ | |
4602 | ||
4603 | static void | |
4604 | verify_shift_bytes_in_array (void) | |
4605 | { | |
4606 | /* byte 1 | byte 0 | |
4607 | 00011111 | 11100000. */ | |
4608 | unsigned char orig[2] = { 0xe0, 0x1f }; | |
4609 | unsigned char in[2]; | |
4610 | memcpy (in, orig, sizeof orig); | |
4611 | ||
4612 | unsigned char expected[2] = { 0x80, 0x7f }; | |
4613 | shift_bytes_in_array (in, sizeof (in), 2); | |
4614 | verify_array_eq (in, expected, sizeof (in)); | |
4615 | ||
4616 | memcpy (in, orig, sizeof orig); | |
4617 | memcpy (expected, orig, sizeof orig); | |
4618 | /* Check that shifting by zero doesn't change anything. */ | |
4619 | shift_bytes_in_array (in, sizeof (in), 0); | |
4620 | verify_array_eq (in, expected, sizeof (in)); | |
4621 | ||
4622 | } | |
4623 | ||
4624 | /* Test shift_bytes_in_array_right and that it carries bits across between | |
4625 | bytes correctly. */ | |
4626 | ||
4627 | static void | |
4628 | verify_shift_bytes_in_array_right (void) | |
4629 | { | |
4630 | /* byte 1 | byte 0 | |
4631 | 00011111 | 11100000. */ | |
4632 | unsigned char orig[2] = { 0x1f, 0xe0}; | |
4633 | unsigned char in[2]; | |
4634 | memcpy (in, orig, sizeof orig); | |
4635 | unsigned char expected[2] = { 0x07, 0xf8}; | |
4636 | shift_bytes_in_array_right (in, sizeof (in), 2); | |
4637 | verify_array_eq (in, expected, sizeof (in)); | |
4638 | ||
4639 | memcpy (in, orig, sizeof orig); | |
4640 | memcpy (expected, orig, sizeof orig); | |
4641 | /* Check that shifting by zero doesn't change anything. */ | |
4642 | shift_bytes_in_array_right (in, sizeof (in), 0); | |
4643 | verify_array_eq (in, expected, sizeof (in)); | |
4644 | } | |
4645 | ||
4646 | /* Test clear_bit_region that it clears exactly the bits asked and | |
4647 | nothing more. */ | |
4648 | ||
4649 | static void | |
4650 | verify_clear_bit_region (void) | |
4651 | { | |
4652 | /* Start with all bits set and test clearing various patterns in them. */ | |
4653 | unsigned char orig[3] = { 0xff, 0xff, 0xff}; | |
4654 | unsigned char in[3]; | |
4655 | unsigned char expected[3]; | |
4656 | memcpy (in, orig, sizeof in); | |
4657 | ||
4658 | /* Check zeroing out all the bits. */ | |
4659 | clear_bit_region (in, 0, 3 * BITS_PER_UNIT); | |
4660 | expected[0] = expected[1] = expected[2] = 0; | |
4661 | verify_array_eq (in, expected, sizeof in); | |
4662 | ||
4663 | memcpy (in, orig, sizeof in); | |
4664 | /* Leave the first and last bits intact. */ | |
4665 | clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2); | |
4666 | expected[0] = 0x1; | |
4667 | expected[1] = 0; | |
4668 | expected[2] = 0x80; | |
4669 | verify_array_eq (in, expected, sizeof in); | |
4670 | } | |
4671 | ||
4672 | /* Test verify_clear_bit_region_be that it clears exactly the bits asked and | |
4673 | nothing more. */ | |
4674 | ||
4675 | static void | |
4676 | verify_clear_bit_region_be (void) | |
4677 | { | |
4678 | /* Start with all bits set and test clearing various patterns in them. */ | |
4679 | unsigned char orig[3] = { 0xff, 0xff, 0xff}; | |
4680 | unsigned char in[3]; | |
4681 | unsigned char expected[3]; | |
4682 | memcpy (in, orig, sizeof in); | |
4683 | ||
4684 | /* Check zeroing out all the bits. */ | |
4685 | clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT); | |
4686 | expected[0] = expected[1] = expected[2] = 0; | |
4687 | verify_array_eq (in, expected, sizeof in); | |
4688 | ||
4689 | memcpy (in, orig, sizeof in); | |
4690 | /* Leave the first and last bits intact. */ | |
4691 | clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2); | |
4692 | expected[0] = 0x80; | |
4693 | expected[1] = 0; | |
4694 | expected[2] = 0x1; | |
4695 | verify_array_eq (in, expected, sizeof in); | |
4696 | } | |
4697 | ||
4698 | ||
4699 | /* Run all of the selftests within this file. */ | |
4700 | ||
4701 | void | |
4702 | store_merging_c_tests (void) | |
4703 | { | |
4704 | verify_shift_bytes_in_array (); | |
4705 | verify_shift_bytes_in_array_right (); | |
4706 | verify_clear_bit_region (); | |
4707 | verify_clear_bit_region_be (); | |
4708 | } | |
4709 | ||
4710 | } // namespace selftest | |
4711 | #endif /* CHECKING_P. */ |