]>
Commit | Line | Data |
---|---|---|
3d3e04ac | 1 | /* GIMPLE store merging pass. |
aad93da1 | 2 | Copyright (C) 2016-2017 Free Software Foundation, Inc. |
3d3e04ac | 3 | Contributed by ARM Ltd. |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it | |
8 | under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 3, or (at your option) | |
10 | any later version. | |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but | |
13 | WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with GCC; see the file COPYING3. If not see | |
19 | <http://www.gnu.org/licenses/>. */ | |
20 | ||
21 | /* The purpose of this pass is to combine multiple memory stores of | |
9991d1d3 | 22 | constant values, values loaded from memory or bitwise operations |
23 | on those to consecutive memory locations into fewer wider stores. | |
3d3e04ac | 24 | For example, if we have a sequence peforming four byte stores to |
25 | consecutive memory locations: | |
26 | [p ] := imm1; | |
27 | [p + 1B] := imm2; | |
28 | [p + 2B] := imm3; | |
29 | [p + 3B] := imm4; | |
30 | we can transform this into a single 4-byte store if the target supports it: | |
31 | [p] := imm1:imm2:imm3:imm4 //concatenated immediates according to endianness. | |
32 | ||
9991d1d3 | 33 | Or: |
34 | [p ] := [q ]; | |
35 | [p + 1B] := [q + 1B]; | |
36 | [p + 2B] := [q + 2B]; | |
37 | [p + 3B] := [q + 3B]; | |
38 | if there is no overlap can be transformed into a single 4-byte | |
39 | load followed by single 4-byte store. | |
40 | ||
41 | Or: | |
42 | [p ] := [q ] ^ imm1; | |
43 | [p + 1B] := [q + 1B] ^ imm2; | |
44 | [p + 2B] := [q + 2B] ^ imm3; | |
45 | [p + 3B] := [q + 3B] ^ imm4; | |
46 | if there is no overlap can be transformed into a single 4-byte | |
47 | load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store. | |
48 | ||
3d3e04ac | 49 | The algorithm is applied to each basic block in three phases: |
50 | ||
9991d1d3 | 51 | 1) Scan through the basic block recording assignments to |
3d3e04ac | 52 | destinations that can be expressed as a store to memory of a certain size |
9991d1d3 | 53 | at a certain bit offset from expressions we can handle. For bit-fields |
54 | we also note the surrounding bit region, bits that could be stored in | |
55 | a read-modify-write operation when storing the bit-field. Record store | |
56 | chains to different bases in a hash_map (m_stores) and make sure to | |
57 | terminate such chains when appropriate (for example when when the stored | |
58 | values get used subsequently). | |
3d3e04ac | 59 | These stores can be a result of structure element initializers, array stores |
60 | etc. A store_immediate_info object is recorded for every such store. | |
61 | Record as many such assignments to a single base as possible until a | |
62 | statement that interferes with the store sequence is encountered. | |
9991d1d3 | 63 | Each store has up to 2 operands, which can be an immediate constant |
64 | or a memory load, from which the value to be stored can be computed. | |
65 | At most one of the operands can be a constant. The operands are recorded | |
66 | in store_operand_info struct. | |
3d3e04ac | 67 | |
68 | 2) Analyze the chain of stores recorded in phase 1) (i.e. the vector of | |
69 | store_immediate_info objects) and coalesce contiguous stores into | |
9991d1d3 | 70 | merged_store_group objects. For bit-fields stores, we don't need to |
71 | require the stores to be contiguous, just their surrounding bit regions | |
72 | have to be contiguous. If the expression being stored is different | |
73 | between adjacent stores, such as one store storing a constant and | |
74 | following storing a value loaded from memory, or if the loaded memory | |
75 | objects are not adjacent, a new merged_store_group is created as well. | |
3d3e04ac | 76 | |
77 | For example, given the stores: | |
78 | [p ] := 0; | |
79 | [p + 1B] := 1; | |
80 | [p + 3B] := 0; | |
81 | [p + 4B] := 1; | |
82 | [p + 5B] := 0; | |
83 | [p + 6B] := 0; | |
84 | This phase would produce two merged_store_group objects, one recording the | |
85 | two bytes stored in the memory region [p : p + 1] and another | |
86 | recording the four bytes stored in the memory region [p + 3 : p + 6]. | |
87 | ||
88 | 3) The merged_store_group objects produced in phase 2) are processed | |
89 | to generate the sequence of wider stores that set the contiguous memory | |
90 | regions to the sequence of bytes that correspond to it. This may emit | |
91 | multiple stores per store group to handle contiguous stores that are not | |
92 | of a size that is a power of 2. For example it can try to emit a 40-bit | |
93 | store as a 32-bit store followed by an 8-bit store. | |
94 | We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT or | |
dfdced85 | 95 | TARGET_SLOW_UNALIGNED_ACCESS rules. |
3d3e04ac | 96 | |
97 | Note on endianness and example: | |
98 | Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores: | |
99 | [p ] := 0x1234; | |
100 | [p + 2B] := 0x5678; | |
101 | [p + 4B] := 0xab; | |
102 | [p + 5B] := 0xcd; | |
103 | ||
104 | The memory layout for little-endian (LE) and big-endian (BE) must be: | |
105 | p |LE|BE| | |
106 | --------- | |
107 | 0 |34|12| | |
108 | 1 |12|34| | |
109 | 2 |78|56| | |
110 | 3 |56|78| | |
111 | 4 |ab|ab| | |
112 | 5 |cd|cd| | |
113 | ||
114 | To merge these into a single 48-bit merged value 'val' in phase 2) | |
115 | on little-endian we insert stores to higher (consecutive) bitpositions | |
116 | into the most significant bits of the merged value. | |
117 | The final merged value would be: 0xcdab56781234 | |
118 | ||
119 | For big-endian we insert stores to higher bitpositions into the least | |
120 | significant bits of the merged value. | |
121 | The final merged value would be: 0x12345678abcd | |
122 | ||
123 | Then, in phase 3), we want to emit this 48-bit value as a 32-bit store | |
124 | followed by a 16-bit store. Again, we must consider endianness when | |
125 | breaking down the 48-bit value 'val' computed above. | |
126 | For little endian we emit: | |
127 | [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff; | |
128 | [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32; | |
129 | ||
130 | Whereas for big-endian we emit: | |
131 | [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16; | |
132 | [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */ | |
133 | ||
134 | #include "config.h" | |
135 | #include "system.h" | |
136 | #include "coretypes.h" | |
137 | #include "backend.h" | |
138 | #include "tree.h" | |
139 | #include "gimple.h" | |
140 | #include "builtins.h" | |
141 | #include "fold-const.h" | |
142 | #include "tree-pass.h" | |
143 | #include "ssa.h" | |
144 | #include "gimple-pretty-print.h" | |
145 | #include "alias.h" | |
146 | #include "fold-const.h" | |
147 | #include "params.h" | |
148 | #include "print-tree.h" | |
149 | #include "tree-hash-traits.h" | |
150 | #include "gimple-iterator.h" | |
151 | #include "gimplify.h" | |
152 | #include "stor-layout.h" | |
153 | #include "timevar.h" | |
154 | #include "tree-cfg.h" | |
155 | #include "tree-eh.h" | |
156 | #include "target.h" | |
427223f1 | 157 | #include "gimplify-me.h" |
902cb3b7 | 158 | #include "rtl.h" |
159 | #include "expr.h" /* For get_bit_range. */ | |
3d9a2fb3 | 160 | #include "selftest.h" |
3d3e04ac | 161 | |
162 | /* The maximum size (in bits) of the stores this pass should generate. */ | |
163 | #define MAX_STORE_BITSIZE (BITS_PER_WORD) | |
164 | #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT) | |
165 | ||
9991d1d3 | 166 | /* Limit to bound the number of aliasing checks for loads with the same |
167 | vuse as the corresponding store. */ | |
168 | #define MAX_STORE_ALIAS_CHECKS 64 | |
169 | ||
3d3e04ac | 170 | namespace { |
171 | ||
9991d1d3 | 172 | /* Struct recording one operand for the store, which is either a constant, |
173 | then VAL represents the constant and all the other fields are zero, | |
174 | or a memory load, then VAL represents the reference, BASE_ADDR is non-NULL | |
175 | and the other fields also reflect the memory load. */ | |
176 | ||
177 | struct store_operand_info | |
178 | { | |
179 | tree val; | |
180 | tree base_addr; | |
181 | unsigned HOST_WIDE_INT bitsize; | |
182 | unsigned HOST_WIDE_INT bitpos; | |
183 | unsigned HOST_WIDE_INT bitregion_start; | |
184 | unsigned HOST_WIDE_INT bitregion_end; | |
185 | gimple *stmt; | |
c35548ce | 186 | bool bit_not_p; |
9991d1d3 | 187 | store_operand_info (); |
188 | }; | |
189 | ||
190 | store_operand_info::store_operand_info () | |
191 | : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0), | |
c35548ce | 192 | bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false) |
9991d1d3 | 193 | { |
194 | } | |
195 | ||
3d3e04ac | 196 | /* Struct recording the information about a single store of an immediate |
197 | to memory. These are created in the first phase and coalesced into | |
198 | merged_store_group objects in the second phase. */ | |
199 | ||
200 | struct store_immediate_info | |
201 | { | |
202 | unsigned HOST_WIDE_INT bitsize; | |
203 | unsigned HOST_WIDE_INT bitpos; | |
902cb3b7 | 204 | unsigned HOST_WIDE_INT bitregion_start; |
205 | /* This is one past the last bit of the bit region. */ | |
206 | unsigned HOST_WIDE_INT bitregion_end; | |
3d3e04ac | 207 | gimple *stmt; |
208 | unsigned int order; | |
9991d1d3 | 209 | /* INTEGER_CST for constant stores, MEM_REF for memory copy or |
210 | BIT_*_EXPR for logical bitwise operation. */ | |
211 | enum tree_code rhs_code; | |
832a73b9 | 212 | bool bit_not_p; |
9991d1d3 | 213 | /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise |
214 | just the first one. */ | |
215 | store_operand_info ops[2]; | |
f85e7cb7 | 216 | store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, |
902cb3b7 | 217 | unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, |
832a73b9 | 218 | gimple *, unsigned int, enum tree_code, bool, |
9991d1d3 | 219 | const store_operand_info &, |
220 | const store_operand_info &); | |
3d3e04ac | 221 | }; |
222 | ||
223 | store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs, | |
f85e7cb7 | 224 | unsigned HOST_WIDE_INT bp, |
902cb3b7 | 225 | unsigned HOST_WIDE_INT brs, |
226 | unsigned HOST_WIDE_INT bre, | |
f85e7cb7 | 227 | gimple *st, |
9991d1d3 | 228 | unsigned int ord, |
229 | enum tree_code rhscode, | |
832a73b9 | 230 | bool bitnotp, |
9991d1d3 | 231 | const store_operand_info &op0r, |
232 | const store_operand_info &op1r) | |
902cb3b7 | 233 | : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre), |
832a73b9 | 234 | stmt (st), order (ord), rhs_code (rhscode), bit_not_p (bitnotp) |
9991d1d3 | 235 | #if __cplusplus >= 201103L |
236 | , ops { op0r, op1r } | |
237 | { | |
238 | } | |
239 | #else | |
3d3e04ac | 240 | { |
9991d1d3 | 241 | ops[0] = op0r; |
242 | ops[1] = op1r; | |
3d3e04ac | 243 | } |
9991d1d3 | 244 | #endif |
3d3e04ac | 245 | |
246 | /* Struct representing a group of stores to contiguous memory locations. | |
247 | These are produced by the second phase (coalescing) and consumed in the | |
248 | third phase that outputs the widened stores. */ | |
249 | ||
250 | struct merged_store_group | |
251 | { | |
252 | unsigned HOST_WIDE_INT start; | |
253 | unsigned HOST_WIDE_INT width; | |
902cb3b7 | 254 | unsigned HOST_WIDE_INT bitregion_start; |
255 | unsigned HOST_WIDE_INT bitregion_end; | |
256 | /* The size of the allocated memory for val and mask. */ | |
3d3e04ac | 257 | unsigned HOST_WIDE_INT buf_size; |
902cb3b7 | 258 | unsigned HOST_WIDE_INT align_base; |
9991d1d3 | 259 | unsigned HOST_WIDE_INT load_align_base[2]; |
3d3e04ac | 260 | |
261 | unsigned int align; | |
9991d1d3 | 262 | unsigned int load_align[2]; |
3d3e04ac | 263 | unsigned int first_order; |
264 | unsigned int last_order; | |
265 | ||
902cb3b7 | 266 | auto_vec<store_immediate_info *> stores; |
3d3e04ac | 267 | /* We record the first and last original statements in the sequence because |
268 | we'll need their vuse/vdef and replacement position. It's easier to keep | |
269 | track of them separately as 'stores' is reordered by apply_stores. */ | |
270 | gimple *last_stmt; | |
271 | gimple *first_stmt; | |
272 | unsigned char *val; | |
902cb3b7 | 273 | unsigned char *mask; |
3d3e04ac | 274 | |
275 | merged_store_group (store_immediate_info *); | |
276 | ~merged_store_group (); | |
277 | void merge_into (store_immediate_info *); | |
278 | void merge_overlapping (store_immediate_info *); | |
279 | bool apply_stores (); | |
902cb3b7 | 280 | private: |
281 | void do_merge (store_immediate_info *); | |
3d3e04ac | 282 | }; |
283 | ||
284 | /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */ | |
285 | ||
286 | static void | |
287 | dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len) | |
288 | { | |
289 | if (!fd) | |
290 | return; | |
291 | ||
292 | for (unsigned int i = 0; i < len; i++) | |
293 | fprintf (fd, "%x ", ptr[i]); | |
294 | fprintf (fd, "\n"); | |
295 | } | |
296 | ||
3d3e04ac | 297 | /* Shift left the bytes in PTR of SZ elements by AMNT bits, carrying over the |
298 | bits between adjacent elements. AMNT should be within | |
299 | [0, BITS_PER_UNIT). | |
300 | Example, AMNT = 2: | |
301 | 00011111|11100000 << 2 = 01111111|10000000 | |
302 | PTR[1] | PTR[0] PTR[1] | PTR[0]. */ | |
303 | ||
304 | static void | |
305 | shift_bytes_in_array (unsigned char *ptr, unsigned int sz, unsigned int amnt) | |
306 | { | |
307 | if (amnt == 0) | |
308 | return; | |
309 | ||
310 | unsigned char carry_over = 0U; | |
b1c71535 | 311 | unsigned char carry_mask = (~0U) << (unsigned char) (BITS_PER_UNIT - amnt); |
3d3e04ac | 312 | unsigned char clear_mask = (~0U) << amnt; |
313 | ||
314 | for (unsigned int i = 0; i < sz; i++) | |
315 | { | |
316 | unsigned prev_carry_over = carry_over; | |
b1c71535 | 317 | carry_over = (ptr[i] & carry_mask) >> (BITS_PER_UNIT - amnt); |
3d3e04ac | 318 | |
319 | ptr[i] <<= amnt; | |
320 | if (i != 0) | |
321 | { | |
322 | ptr[i] &= clear_mask; | |
323 | ptr[i] |= prev_carry_over; | |
324 | } | |
325 | } | |
326 | } | |
327 | ||
328 | /* Like shift_bytes_in_array but for big-endian. | |
329 | Shift right the bytes in PTR of SZ elements by AMNT bits, carrying over the | |
330 | bits between adjacent elements. AMNT should be within | |
331 | [0, BITS_PER_UNIT). | |
332 | Example, AMNT = 2: | |
333 | 00011111|11100000 >> 2 = 00000111|11111000 | |
334 | PTR[0] | PTR[1] PTR[0] | PTR[1]. */ | |
335 | ||
336 | static void | |
337 | shift_bytes_in_array_right (unsigned char *ptr, unsigned int sz, | |
338 | unsigned int amnt) | |
339 | { | |
340 | if (amnt == 0) | |
341 | return; | |
342 | ||
343 | unsigned char carry_over = 0U; | |
344 | unsigned char carry_mask = ~(~0U << amnt); | |
345 | ||
346 | for (unsigned int i = 0; i < sz; i++) | |
347 | { | |
348 | unsigned prev_carry_over = carry_over; | |
b1c71535 | 349 | carry_over = ptr[i] & carry_mask; |
3d3e04ac | 350 | |
a425d9af | 351 | carry_over <<= (unsigned char) BITS_PER_UNIT - amnt; |
352 | ptr[i] >>= amnt; | |
353 | ptr[i] |= prev_carry_over; | |
3d3e04ac | 354 | } |
355 | } | |
356 | ||
357 | /* Clear out LEN bits starting from bit START in the byte array | |
358 | PTR. This clears the bits to the *right* from START. | |
359 | START must be within [0, BITS_PER_UNIT) and counts starting from | |
360 | the least significant bit. */ | |
361 | ||
362 | static void | |
363 | clear_bit_region_be (unsigned char *ptr, unsigned int start, | |
364 | unsigned int len) | |
365 | { | |
366 | if (len == 0) | |
367 | return; | |
368 | /* Clear len bits to the right of start. */ | |
369 | else if (len <= start + 1) | |
370 | { | |
371 | unsigned char mask = (~(~0U << len)); | |
372 | mask = mask << (start + 1U - len); | |
373 | ptr[0] &= ~mask; | |
374 | } | |
375 | else if (start != BITS_PER_UNIT - 1) | |
376 | { | |
377 | clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1); | |
378 | clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1, | |
379 | len - (start % BITS_PER_UNIT) - 1); | |
380 | } | |
381 | else if (start == BITS_PER_UNIT - 1 | |
382 | && len > BITS_PER_UNIT) | |
383 | { | |
384 | unsigned int nbytes = len / BITS_PER_UNIT; | |
902cb3b7 | 385 | memset (ptr, 0, nbytes); |
3d3e04ac | 386 | if (len % BITS_PER_UNIT != 0) |
387 | clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1, | |
388 | len % BITS_PER_UNIT); | |
389 | } | |
390 | else | |
391 | gcc_unreachable (); | |
392 | } | |
393 | ||
394 | /* In the byte array PTR clear the bit region starting at bit | |
395 | START and is LEN bits wide. | |
396 | For regions spanning multiple bytes do this recursively until we reach | |
397 | zero LEN or a region contained within a single byte. */ | |
398 | ||
399 | static void | |
400 | clear_bit_region (unsigned char *ptr, unsigned int start, | |
401 | unsigned int len) | |
402 | { | |
403 | /* Degenerate base case. */ | |
404 | if (len == 0) | |
405 | return; | |
406 | else if (start >= BITS_PER_UNIT) | |
407 | clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len); | |
408 | /* Second base case. */ | |
409 | else if ((start + len) <= BITS_PER_UNIT) | |
410 | { | |
b1c71535 | 411 | unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len); |
3d3e04ac | 412 | mask >>= BITS_PER_UNIT - (start + len); |
413 | ||
414 | ptr[0] &= ~mask; | |
415 | ||
416 | return; | |
417 | } | |
418 | /* Clear most significant bits in a byte and proceed with the next byte. */ | |
419 | else if (start != 0) | |
420 | { | |
421 | clear_bit_region (ptr, start, BITS_PER_UNIT - start); | |
3d6071e9 | 422 | clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start)); |
3d3e04ac | 423 | } |
424 | /* Whole bytes need to be cleared. */ | |
425 | else if (start == 0 && len > BITS_PER_UNIT) | |
426 | { | |
427 | unsigned int nbytes = len / BITS_PER_UNIT; | |
7839cdcc | 428 | /* We could recurse on each byte but we clear whole bytes, so a simple |
429 | memset will do. */ | |
b1c71535 | 430 | memset (ptr, '\0', nbytes); |
3d3e04ac | 431 | /* Clear the remaining sub-byte region if there is one. */ |
432 | if (len % BITS_PER_UNIT != 0) | |
433 | clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT); | |
434 | } | |
435 | else | |
436 | gcc_unreachable (); | |
437 | } | |
438 | ||
439 | /* Write BITLEN bits of EXPR to the byte array PTR at | |
440 | bit position BITPOS. PTR should contain TOTAL_BYTES elements. | |
441 | Return true if the operation succeeded. */ | |
442 | ||
443 | static bool | |
444 | encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos, | |
b1c71535 | 445 | unsigned int total_bytes) |
3d3e04ac | 446 | { |
447 | unsigned int first_byte = bitpos / BITS_PER_UNIT; | |
448 | tree tmp_int = expr; | |
a425d9af | 449 | bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT) |
450 | || (bitpos % BITS_PER_UNIT) | |
517be012 | 451 | || !int_mode_for_size (bitlen, 0).exists ()); |
3d3e04ac | 452 | |
453 | if (!sub_byte_op_p) | |
63eabc9b | 454 | return native_encode_expr (tmp_int, ptr + first_byte, total_bytes) != 0; |
3d3e04ac | 455 | |
456 | /* LITTLE-ENDIAN | |
457 | We are writing a non byte-sized quantity or at a position that is not | |
458 | at a byte boundary. | |
459 | |--------|--------|--------| ptr + first_byte | |
460 | ^ ^ | |
461 | xxx xxxxxxxx xxx< bp> | |
462 | |______EXPR____| | |
463 | ||
b1c71535 | 464 | First native_encode_expr EXPR into a temporary buffer and shift each |
3d3e04ac | 465 | byte in the buffer by 'bp' (carrying the bits over as necessary). |
466 | |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000| | |
467 | <------bitlen---->< bp> | |
468 | Then we clear the destination bits: | |
469 | |---00000|00000000|000-----| ptr + first_byte | |
470 | <-------bitlen--->< bp> | |
471 | ||
472 | Finally we ORR the bytes of the shifted EXPR into the cleared region: | |
473 | |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte. | |
474 | ||
475 | BIG-ENDIAN | |
476 | We are writing a non byte-sized quantity or at a position that is not | |
477 | at a byte boundary. | |
478 | ptr + first_byte |--------|--------|--------| | |
479 | ^ ^ | |
480 | <bp >xxx xxxxxxxx xxx | |
481 | |_____EXPR_____| | |
482 | ||
b1c71535 | 483 | First native_encode_expr EXPR into a temporary buffer and shift each |
3d3e04ac | 484 | byte in the buffer to the right by (carrying the bits over as necessary). |
485 | We shift by as much as needed to align the most significant bit of EXPR | |
486 | with bitpos: | |
487 | |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000| | |
488 | <---bitlen----> <bp ><-----bitlen-----> | |
489 | Then we clear the destination bits: | |
490 | ptr + first_byte |-----000||00000000||00000---| | |
491 | <bp ><-------bitlen-----> | |
492 | ||
493 | Finally we ORR the bytes of the shifted EXPR into the cleared region: | |
494 | ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|. | |
495 | The awkwardness comes from the fact that bitpos is counted from the | |
496 | most significant bit of a byte. */ | |
497 | ||
d2401312 | 498 | /* We must be dealing with fixed-size data at this point, since the |
499 | total size is also fixed. */ | |
500 | fixed_size_mode mode = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr))); | |
3d3e04ac | 501 | /* Allocate an extra byte so that we have space to shift into. */ |
d2401312 | 502 | unsigned int byte_size = GET_MODE_SIZE (mode) + 1; |
3d3e04ac | 503 | unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size); |
b1c71535 | 504 | memset (tmpbuf, '\0', byte_size); |
3d3e04ac | 505 | /* The store detection code should only have allowed constants that are |
506 | accepted by native_encode_expr. */ | |
63eabc9b | 507 | if (native_encode_expr (expr, tmpbuf, byte_size - 1) == 0) |
3d3e04ac | 508 | gcc_unreachable (); |
509 | ||
510 | /* The native_encode_expr machinery uses TYPE_MODE to determine how many | |
511 | bytes to write. This means it can write more than | |
512 | ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example | |
513 | write 8 bytes for a bitlen of 40). Skip the bytes that are not within | |
514 | bitlen and zero out the bits that are not relevant as well (that may | |
515 | contain a sign bit due to sign-extension). */ | |
516 | unsigned int padding | |
517 | = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1; | |
a425d9af | 518 | /* On big-endian the padding is at the 'front' so just skip the initial |
519 | bytes. */ | |
520 | if (BYTES_BIG_ENDIAN) | |
521 | tmpbuf += padding; | |
522 | ||
523 | byte_size -= padding; | |
524 | ||
525 | if (bitlen % BITS_PER_UNIT != 0) | |
3d3e04ac | 526 | { |
5e922e43 | 527 | if (BYTES_BIG_ENDIAN) |
a425d9af | 528 | clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1, |
529 | BITS_PER_UNIT - (bitlen % BITS_PER_UNIT)); | |
530 | else | |
531 | clear_bit_region (tmpbuf, bitlen, | |
532 | byte_size * BITS_PER_UNIT - bitlen); | |
3d3e04ac | 533 | } |
a425d9af | 534 | /* Left shifting relies on the last byte being clear if bitlen is |
535 | a multiple of BITS_PER_UNIT, which might not be clear if | |
536 | there are padding bytes. */ | |
537 | else if (!BYTES_BIG_ENDIAN) | |
538 | tmpbuf[byte_size - 1] = '\0'; | |
3d3e04ac | 539 | |
540 | /* Clear the bit region in PTR where the bits from TMPBUF will be | |
b1c71535 | 541 | inserted into. */ |
3d3e04ac | 542 | if (BYTES_BIG_ENDIAN) |
543 | clear_bit_region_be (ptr + first_byte, | |
544 | BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen); | |
545 | else | |
546 | clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen); | |
547 | ||
548 | int shift_amnt; | |
549 | int bitlen_mod = bitlen % BITS_PER_UNIT; | |
550 | int bitpos_mod = bitpos % BITS_PER_UNIT; | |
551 | ||
552 | bool skip_byte = false; | |
553 | if (BYTES_BIG_ENDIAN) | |
554 | { | |
555 | /* BITPOS and BITLEN are exactly aligned and no shifting | |
556 | is necessary. */ | |
557 | if (bitpos_mod + bitlen_mod == BITS_PER_UNIT | |
558 | || (bitpos_mod == 0 && bitlen_mod == 0)) | |
559 | shift_amnt = 0; | |
560 | /* |. . . . . . . .| | |
561 | <bp > <blen >. | |
562 | We always shift right for BYTES_BIG_ENDIAN so shift the beginning | |
563 | of the value until it aligns with 'bp' in the next byte over. */ | |
564 | else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT) | |
565 | { | |
566 | shift_amnt = bitlen_mod + bitpos_mod; | |
567 | skip_byte = bitlen_mod != 0; | |
568 | } | |
569 | /* |. . . . . . . .| | |
570 | <----bp---> | |
571 | <---blen---->. | |
572 | Shift the value right within the same byte so it aligns with 'bp'. */ | |
573 | else | |
574 | shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT; | |
575 | } | |
576 | else | |
577 | shift_amnt = bitpos % BITS_PER_UNIT; | |
578 | ||
579 | /* Create the shifted version of EXPR. */ | |
580 | if (!BYTES_BIG_ENDIAN) | |
b1c71535 | 581 | { |
582 | shift_bytes_in_array (tmpbuf, byte_size, shift_amnt); | |
583 | if (shift_amnt == 0) | |
584 | byte_size--; | |
585 | } | |
3d3e04ac | 586 | else |
587 | { | |
588 | gcc_assert (BYTES_BIG_ENDIAN); | |
589 | shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt); | |
590 | /* If shifting right forced us to move into the next byte skip the now | |
591 | empty byte. */ | |
592 | if (skip_byte) | |
593 | { | |
594 | tmpbuf++; | |
595 | byte_size--; | |
596 | } | |
597 | } | |
598 | ||
599 | /* Insert the bits from TMPBUF. */ | |
600 | for (unsigned int i = 0; i < byte_size; i++) | |
601 | ptr[first_byte + i] |= tmpbuf[i]; | |
602 | ||
603 | return true; | |
604 | } | |
605 | ||
606 | /* Sorting function for store_immediate_info objects. | |
607 | Sorts them by bitposition. */ | |
608 | ||
609 | static int | |
610 | sort_by_bitpos (const void *x, const void *y) | |
611 | { | |
612 | store_immediate_info *const *tmp = (store_immediate_info * const *) x; | |
613 | store_immediate_info *const *tmp2 = (store_immediate_info * const *) y; | |
614 | ||
61d052e5 | 615 | if ((*tmp)->bitpos < (*tmp2)->bitpos) |
3d3e04ac | 616 | return -1; |
617 | else if ((*tmp)->bitpos > (*tmp2)->bitpos) | |
618 | return 1; | |
61d052e5 | 619 | else |
ca4982c2 | 620 | /* If they are the same let's use the order which is guaranteed to |
621 | be different. */ | |
622 | return (*tmp)->order - (*tmp2)->order; | |
3d3e04ac | 623 | } |
624 | ||
625 | /* Sorting function for store_immediate_info objects. | |
626 | Sorts them by the order field. */ | |
627 | ||
628 | static int | |
629 | sort_by_order (const void *x, const void *y) | |
630 | { | |
631 | store_immediate_info *const *tmp = (store_immediate_info * const *) x; | |
632 | store_immediate_info *const *tmp2 = (store_immediate_info * const *) y; | |
633 | ||
634 | if ((*tmp)->order < (*tmp2)->order) | |
635 | return -1; | |
636 | else if ((*tmp)->order > (*tmp2)->order) | |
637 | return 1; | |
638 | ||
639 | gcc_unreachable (); | |
640 | } | |
641 | ||
642 | /* Initialize a merged_store_group object from a store_immediate_info | |
643 | object. */ | |
644 | ||
645 | merged_store_group::merged_store_group (store_immediate_info *info) | |
646 | { | |
647 | start = info->bitpos; | |
648 | width = info->bitsize; | |
902cb3b7 | 649 | bitregion_start = info->bitregion_start; |
650 | bitregion_end = info->bitregion_end; | |
3d3e04ac | 651 | /* VAL has memory allocated for it in apply_stores once the group |
652 | width has been finalized. */ | |
653 | val = NULL; | |
902cb3b7 | 654 | mask = NULL; |
655 | unsigned HOST_WIDE_INT align_bitpos = 0; | |
656 | get_object_alignment_1 (gimple_assign_lhs (info->stmt), | |
657 | &align, &align_bitpos); | |
658 | align_base = start - align_bitpos; | |
9991d1d3 | 659 | for (int i = 0; i < 2; ++i) |
660 | { | |
661 | store_operand_info &op = info->ops[i]; | |
662 | if (op.base_addr == NULL_TREE) | |
663 | { | |
664 | load_align[i] = 0; | |
665 | load_align_base[i] = 0; | |
666 | } | |
667 | else | |
668 | { | |
669 | get_object_alignment_1 (op.val, &load_align[i], &align_bitpos); | |
670 | load_align_base[i] = op.bitpos - align_bitpos; | |
671 | } | |
672 | } | |
3d3e04ac | 673 | stores.create (1); |
674 | stores.safe_push (info); | |
675 | last_stmt = info->stmt; | |
676 | last_order = info->order; | |
677 | first_stmt = last_stmt; | |
678 | first_order = last_order; | |
679 | buf_size = 0; | |
680 | } | |
681 | ||
682 | merged_store_group::~merged_store_group () | |
683 | { | |
684 | if (val) | |
685 | XDELETEVEC (val); | |
686 | } | |
687 | ||
902cb3b7 | 688 | /* Helper method for merge_into and merge_overlapping to do |
689 | the common part. */ | |
3d3e04ac | 690 | void |
902cb3b7 | 691 | merged_store_group::do_merge (store_immediate_info *info) |
3d3e04ac | 692 | { |
902cb3b7 | 693 | bitregion_start = MIN (bitregion_start, info->bitregion_start); |
694 | bitregion_end = MAX (bitregion_end, info->bitregion_end); | |
695 | ||
696 | unsigned int this_align; | |
697 | unsigned HOST_WIDE_INT align_bitpos = 0; | |
698 | get_object_alignment_1 (gimple_assign_lhs (info->stmt), | |
699 | &this_align, &align_bitpos); | |
700 | if (this_align > align) | |
701 | { | |
702 | align = this_align; | |
703 | align_base = info->bitpos - align_bitpos; | |
704 | } | |
9991d1d3 | 705 | for (int i = 0; i < 2; ++i) |
706 | { | |
707 | store_operand_info &op = info->ops[i]; | |
708 | if (!op.base_addr) | |
709 | continue; | |
710 | ||
711 | get_object_alignment_1 (op.val, &this_align, &align_bitpos); | |
712 | if (this_align > load_align[i]) | |
713 | { | |
714 | load_align[i] = this_align; | |
715 | load_align_base[i] = op.bitpos - align_bitpos; | |
716 | } | |
717 | } | |
3d3e04ac | 718 | |
3d3e04ac | 719 | gimple *stmt = info->stmt; |
720 | stores.safe_push (info); | |
721 | if (info->order > last_order) | |
722 | { | |
723 | last_order = info->order; | |
724 | last_stmt = stmt; | |
725 | } | |
726 | else if (info->order < first_order) | |
727 | { | |
728 | first_order = info->order; | |
729 | first_stmt = stmt; | |
730 | } | |
731 | } | |
732 | ||
902cb3b7 | 733 | /* Merge a store recorded by INFO into this merged store. |
734 | The store is not overlapping with the existing recorded | |
735 | stores. */ | |
736 | ||
737 | void | |
738 | merged_store_group::merge_into (store_immediate_info *info) | |
739 | { | |
740 | unsigned HOST_WIDE_INT wid = info->bitsize; | |
741 | /* Make sure we're inserting in the position we think we're inserting. */ | |
742 | gcc_assert (info->bitpos >= start + width | |
743 | && info->bitregion_start <= bitregion_end); | |
744 | ||
745 | width += wid; | |
746 | do_merge (info); | |
747 | } | |
748 | ||
3d3e04ac | 749 | /* Merge a store described by INFO into this merged store. |
750 | INFO overlaps in some way with the current store (i.e. it's not contiguous | |
751 | which is handled by merged_store_group::merge_into). */ | |
752 | ||
753 | void | |
754 | merged_store_group::merge_overlapping (store_immediate_info *info) | |
755 | { | |
3d3e04ac | 756 | /* If the store extends the size of the group, extend the width. */ |
902cb3b7 | 757 | if (info->bitpos + info->bitsize > start + width) |
3d3e04ac | 758 | width += info->bitpos + info->bitsize - (start + width); |
759 | ||
902cb3b7 | 760 | do_merge (info); |
3d3e04ac | 761 | } |
762 | ||
763 | /* Go through all the recorded stores in this group in program order and | |
764 | apply their values to the VAL byte array to create the final merged | |
765 | value. Return true if the operation succeeded. */ | |
766 | ||
767 | bool | |
768 | merged_store_group::apply_stores () | |
769 | { | |
902cb3b7 | 770 | /* Make sure we have more than one store in the group, otherwise we cannot |
771 | merge anything. */ | |
772 | if (bitregion_start % BITS_PER_UNIT != 0 | |
773 | || bitregion_end % BITS_PER_UNIT != 0 | |
3d3e04ac | 774 | || stores.length () == 1) |
775 | return false; | |
776 | ||
777 | stores.qsort (sort_by_order); | |
902cb3b7 | 778 | store_immediate_info *info; |
3d3e04ac | 779 | unsigned int i; |
780 | /* Create a buffer of a size that is 2 times the number of bytes we're | |
781 | storing. That way native_encode_expr can write power-of-2-sized | |
782 | chunks without overrunning. */ | |
902cb3b7 | 783 | buf_size = 2 * ((bitregion_end - bitregion_start) / BITS_PER_UNIT); |
784 | val = XNEWVEC (unsigned char, 2 * buf_size); | |
785 | mask = val + buf_size; | |
786 | memset (val, 0, buf_size); | |
787 | memset (mask, ~0U, buf_size); | |
3d3e04ac | 788 | |
789 | FOR_EACH_VEC_ELT (stores, i, info) | |
790 | { | |
902cb3b7 | 791 | unsigned int pos_in_buffer = info->bitpos - bitregion_start; |
9991d1d3 | 792 | tree cst = NULL_TREE; |
793 | if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE) | |
794 | cst = info->ops[0].val; | |
795 | else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE) | |
796 | cst = info->ops[1].val; | |
797 | bool ret = true; | |
798 | if (cst) | |
799 | ret = encode_tree_to_bitpos (cst, val, info->bitsize, | |
800 | pos_in_buffer, buf_size); | |
801 | if (cst && dump_file && (dump_flags & TDF_DETAILS)) | |
3d3e04ac | 802 | { |
803 | if (ret) | |
804 | { | |
805 | fprintf (dump_file, "After writing "); | |
9991d1d3 | 806 | print_generic_expr (dump_file, cst, 0); |
3d3e04ac | 807 | fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC |
808 | " at position %d the merged region contains:\n", | |
809 | info->bitsize, pos_in_buffer); | |
810 | dump_char_array (dump_file, val, buf_size); | |
811 | } | |
812 | else | |
813 | fprintf (dump_file, "Failed to merge stores\n"); | |
814 | } | |
815 | if (!ret) | |
816 | return false; | |
902cb3b7 | 817 | unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT); |
818 | if (BYTES_BIG_ENDIAN) | |
a5961a9d | 819 | clear_bit_region_be (m, (BITS_PER_UNIT - 1 |
820 | - (pos_in_buffer % BITS_PER_UNIT)), | |
821 | info->bitsize); | |
902cb3b7 | 822 | else |
823 | clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize); | |
3d3e04ac | 824 | } |
825 | return true; | |
826 | } | |
827 | ||
828 | /* Structure describing the store chain. */ | |
829 | ||
830 | struct imm_store_chain_info | |
831 | { | |
3a3ba7de | 832 | /* Doubly-linked list that imposes an order on chain processing. |
833 | PNXP (prev's next pointer) points to the head of a list, or to | |
834 | the next field in the previous chain in the list. | |
835 | See pass_store_merging::m_stores_head for more rationale. */ | |
836 | imm_store_chain_info *next, **pnxp; | |
f85e7cb7 | 837 | tree base_addr; |
902cb3b7 | 838 | auto_vec<store_immediate_info *> m_store_info; |
3d3e04ac | 839 | auto_vec<merged_store_group *> m_merged_store_groups; |
840 | ||
3a3ba7de | 841 | imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a) |
842 | : next (inspt), pnxp (&inspt), base_addr (b_a) | |
843 | { | |
844 | inspt = this; | |
845 | if (next) | |
846 | { | |
847 | gcc_checking_assert (pnxp == next->pnxp); | |
848 | next->pnxp = &next; | |
849 | } | |
850 | } | |
851 | ~imm_store_chain_info () | |
852 | { | |
853 | *pnxp = next; | |
854 | if (next) | |
855 | { | |
856 | gcc_checking_assert (&next == next->pnxp); | |
857 | next->pnxp = pnxp; | |
858 | } | |
859 | } | |
f85e7cb7 | 860 | bool terminate_and_process_chain (); |
3d3e04ac | 861 | bool coalesce_immediate_stores (); |
f85e7cb7 | 862 | bool output_merged_store (merged_store_group *); |
863 | bool output_merged_stores (); | |
3d3e04ac | 864 | }; |
865 | ||
866 | const pass_data pass_data_tree_store_merging = { | |
867 | GIMPLE_PASS, /* type */ | |
868 | "store-merging", /* name */ | |
869 | OPTGROUP_NONE, /* optinfo_flags */ | |
870 | TV_GIMPLE_STORE_MERGING, /* tv_id */ | |
871 | PROP_ssa, /* properties_required */ | |
872 | 0, /* properties_provided */ | |
873 | 0, /* properties_destroyed */ | |
874 | 0, /* todo_flags_start */ | |
875 | TODO_update_ssa, /* todo_flags_finish */ | |
876 | }; | |
877 | ||
878 | class pass_store_merging : public gimple_opt_pass | |
879 | { | |
880 | public: | |
881 | pass_store_merging (gcc::context *ctxt) | |
2d27e5c1 | 882 | : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head () |
3d3e04ac | 883 | { |
884 | } | |
885 | ||
902cb3b7 | 886 | /* Pass not supported for PDP-endianness, nor for insane hosts |
887 | or target character sizes where native_{encode,interpret}_expr | |
888 | doesn't work properly. */ | |
3d3e04ac | 889 | virtual bool |
890 | gate (function *) | |
891 | { | |
902cb3b7 | 892 | return flag_store_merging |
893 | && WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN | |
894 | && CHAR_BIT == 8 | |
895 | && BITS_PER_UNIT == 8; | |
3d3e04ac | 896 | } |
897 | ||
898 | virtual unsigned int execute (function *); | |
899 | ||
900 | private: | |
901 | hash_map<tree_operand_hash, struct imm_store_chain_info *> m_stores; | |
902 | ||
3a3ba7de | 903 | /* Form a doubly-linked stack of the elements of m_stores, so that |
904 | we can iterate over them in a predictable way. Using this order | |
905 | avoids extraneous differences in the compiler output just because | |
906 | of tree pointer variations (e.g. different chains end up in | |
907 | different positions of m_stores, so they are handled in different | |
908 | orders, so they allocate or release SSA names in different | |
909 | orders, and when they get reused, subsequent passes end up | |
910 | getting different SSA names, which may ultimately change | |
911 | decisions when going out of SSA). */ | |
912 | imm_store_chain_info *m_stores_head; | |
913 | ||
9991d1d3 | 914 | void process_store (gimple *); |
3d3e04ac | 915 | bool terminate_and_process_all_chains (); |
c35548ce | 916 | bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *); |
f85e7cb7 | 917 | bool terminate_and_release_chain (imm_store_chain_info *); |
3d3e04ac | 918 | }; // class pass_store_merging |
919 | ||
920 | /* Terminate and process all recorded chains. Return true if any changes | |
921 | were made. */ | |
922 | ||
923 | bool | |
924 | pass_store_merging::terminate_and_process_all_chains () | |
925 | { | |
3d3e04ac | 926 | bool ret = false; |
3a3ba7de | 927 | while (m_stores_head) |
928 | ret |= terminate_and_release_chain (m_stores_head); | |
929 | gcc_assert (m_stores.elements () == 0); | |
930 | gcc_assert (m_stores_head == NULL); | |
3d3e04ac | 931 | |
932 | return ret; | |
933 | } | |
934 | ||
c35548ce | 935 | /* Terminate all chains that are affected by the statement STMT. |
936 | CHAIN_INFO is the chain we should ignore from the checks if | |
937 | non-NULL. */ | |
3d3e04ac | 938 | |
939 | bool | |
4de7f8df | 940 | pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info |
f85e7cb7 | 941 | **chain_info, |
3d3e04ac | 942 | gimple *stmt) |
943 | { | |
944 | bool ret = false; | |
945 | ||
946 | /* If the statement doesn't touch memory it can't alias. */ | |
947 | if (!gimple_vuse (stmt)) | |
948 | return false; | |
949 | ||
c35548ce | 950 | for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next) |
3d3e04ac | 951 | { |
c35548ce | 952 | next = cur->next; |
953 | ||
954 | /* We already checked all the stores in chain_info and terminated the | |
955 | chain if necessary. Skip it here. */ | |
956 | if (chain_info && *chain_info == cur) | |
957 | continue; | |
958 | ||
9991d1d3 | 959 | store_immediate_info *info; |
960 | unsigned int i; | |
c35548ce | 961 | FOR_EACH_VEC_ELT (cur->m_store_info, i, info) |
3d3e04ac | 962 | { |
9991d1d3 | 963 | if (ref_maybe_used_by_stmt_p (stmt, gimple_assign_lhs (info->stmt)) |
964 | || stmt_may_clobber_ref_p (stmt, gimple_assign_lhs (info->stmt))) | |
3d3e04ac | 965 | { |
9991d1d3 | 966 | if (dump_file && (dump_flags & TDF_DETAILS)) |
3d3e04ac | 967 | { |
9991d1d3 | 968 | fprintf (dump_file, "stmt causes chain termination:\n"); |
969 | print_gimple_stmt (dump_file, stmt, 0); | |
3d3e04ac | 970 | } |
c35548ce | 971 | terminate_and_release_chain (cur); |
9991d1d3 | 972 | ret = true; |
973 | break; | |
3d3e04ac | 974 | } |
975 | } | |
976 | } | |
977 | ||
3d3e04ac | 978 | return ret; |
979 | } | |
980 | ||
981 | /* Helper function. Terminate the recorded chain storing to base object | |
982 | BASE. Return true if the merging and output was successful. The m_stores | |
983 | entry is removed after the processing in any case. */ | |
984 | ||
985 | bool | |
f85e7cb7 | 986 | pass_store_merging::terminate_and_release_chain (imm_store_chain_info *chain_info) |
3d3e04ac | 987 | { |
f85e7cb7 | 988 | bool ret = chain_info->terminate_and_process_chain (); |
989 | m_stores.remove (chain_info->base_addr); | |
990 | delete chain_info; | |
3d3e04ac | 991 | return ret; |
992 | } | |
993 | ||
9991d1d3 | 994 | /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive) |
995 | may clobber REF. FIRST and LAST must be in the same basic block and | |
996 | have non-NULL vdef. */ | |
997 | ||
998 | bool | |
999 | stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref) | |
1000 | { | |
1001 | ao_ref r; | |
1002 | ao_ref_init (&r, ref); | |
1003 | unsigned int count = 0; | |
1004 | tree vop = gimple_vdef (last); | |
1005 | gimple *stmt; | |
1006 | ||
1007 | gcc_checking_assert (gimple_bb (first) == gimple_bb (last)); | |
1008 | do | |
1009 | { | |
1010 | stmt = SSA_NAME_DEF_STMT (vop); | |
1011 | if (stmt_may_clobber_ref_p_1 (stmt, &r)) | |
1012 | return true; | |
1013 | /* Avoid quadratic compile time by bounding the number of checks | |
1014 | we perform. */ | |
1015 | if (++count > MAX_STORE_ALIAS_CHECKS) | |
1016 | return true; | |
1017 | vop = gimple_vuse (stmt); | |
1018 | } | |
1019 | while (stmt != first); | |
1020 | return false; | |
1021 | } | |
1022 | ||
1023 | /* Return true if INFO->ops[IDX] is mergeable with the | |
1024 | corresponding loads already in MERGED_STORE group. | |
1025 | BASE_ADDR is the base address of the whole store group. */ | |
1026 | ||
1027 | bool | |
1028 | compatible_load_p (merged_store_group *merged_store, | |
1029 | store_immediate_info *info, | |
1030 | tree base_addr, int idx) | |
1031 | { | |
1032 | store_immediate_info *infof = merged_store->stores[0]; | |
1033 | if (!info->ops[idx].base_addr | |
c35548ce | 1034 | || info->ops[idx].bit_not_p != infof->ops[idx].bit_not_p |
9991d1d3 | 1035 | || (info->ops[idx].bitpos - infof->ops[idx].bitpos |
1036 | != info->bitpos - infof->bitpos) | |
1037 | || !operand_equal_p (info->ops[idx].base_addr, | |
1038 | infof->ops[idx].base_addr, 0)) | |
1039 | return false; | |
1040 | ||
1041 | store_immediate_info *infol = merged_store->stores.last (); | |
1042 | tree load_vuse = gimple_vuse (info->ops[idx].stmt); | |
1043 | /* In this case all vuses should be the same, e.g. | |
1044 | _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4; | |
1045 | or | |
1046 | _1 = s.a; _2 = s.b; t.a = _1; t.b = _2; | |
1047 | and we can emit the coalesced load next to any of those loads. */ | |
1048 | if (gimple_vuse (infof->ops[idx].stmt) == load_vuse | |
1049 | && gimple_vuse (infol->ops[idx].stmt) == load_vuse) | |
1050 | return true; | |
1051 | ||
1052 | /* Otherwise, at least for now require that the load has the same | |
1053 | vuse as the store. See following examples. */ | |
1054 | if (gimple_vuse (info->stmt) != load_vuse) | |
1055 | return false; | |
1056 | ||
1057 | if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt) | |
1058 | || (infof != infol | |
1059 | && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt))) | |
1060 | return false; | |
1061 | ||
1062 | /* If the load is from the same location as the store, already | |
1063 | the construction of the immediate chain info guarantees no intervening | |
1064 | stores, so no further checks are needed. Example: | |
1065 | _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */ | |
1066 | if (info->ops[idx].bitpos == info->bitpos | |
1067 | && operand_equal_p (info->ops[idx].base_addr, base_addr, 0)) | |
1068 | return true; | |
1069 | ||
1070 | /* Otherwise, we need to punt if any of the loads can be clobbered by any | |
1071 | of the stores in the group, or any other stores in between those. | |
1072 | Previous calls to compatible_load_p ensured that for all the | |
1073 | merged_store->stores IDX loads, no stmts starting with | |
1074 | merged_store->first_stmt and ending right before merged_store->last_stmt | |
1075 | clobbers those loads. */ | |
1076 | gimple *first = merged_store->first_stmt; | |
1077 | gimple *last = merged_store->last_stmt; | |
1078 | unsigned int i; | |
1079 | store_immediate_info *infoc; | |
1080 | /* The stores are sorted by increasing store bitpos, so if info->stmt store | |
1081 | comes before the so far first load, we'll be changing | |
1082 | merged_store->first_stmt. In that case we need to give up if | |
1083 | any of the earlier processed loads clobber with the stmts in the new | |
1084 | range. */ | |
1085 | if (info->order < merged_store->first_order) | |
1086 | { | |
1087 | FOR_EACH_VEC_ELT (merged_store->stores, i, infoc) | |
1088 | if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val)) | |
1089 | return false; | |
1090 | first = info->stmt; | |
1091 | } | |
1092 | /* Similarly, we could change merged_store->last_stmt, so ensure | |
1093 | in that case no stmts in the new range clobber any of the earlier | |
1094 | processed loads. */ | |
1095 | else if (info->order > merged_store->last_order) | |
1096 | { | |
1097 | FOR_EACH_VEC_ELT (merged_store->stores, i, infoc) | |
1098 | if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val)) | |
1099 | return false; | |
1100 | last = info->stmt; | |
1101 | } | |
1102 | /* And finally, we'd be adding a new load to the set, ensure it isn't | |
1103 | clobbered in the new range. */ | |
1104 | if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val)) | |
1105 | return false; | |
1106 | ||
1107 | /* Otherwise, we are looking for: | |
1108 | _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4; | |
1109 | or | |
1110 | _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */ | |
1111 | return true; | |
1112 | } | |
1113 | ||
3d3e04ac | 1114 | /* Go through the candidate stores recorded in m_store_info and merge them |
1115 | into merged_store_group objects recorded into m_merged_store_groups | |
1116 | representing the widened stores. Return true if coalescing was successful | |
1117 | and the number of widened stores is fewer than the original number | |
1118 | of stores. */ | |
1119 | ||
1120 | bool | |
1121 | imm_store_chain_info::coalesce_immediate_stores () | |
1122 | { | |
1123 | /* Anything less can't be processed. */ | |
1124 | if (m_store_info.length () < 2) | |
1125 | return false; | |
1126 | ||
1127 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1128 | fprintf (dump_file, "Attempting to coalesce %u stores in chain.\n", | |
1129 | m_store_info.length ()); | |
1130 | ||
1131 | store_immediate_info *info; | |
1132 | unsigned int i; | |
1133 | ||
1134 | /* Order the stores by the bitposition they write to. */ | |
1135 | m_store_info.qsort (sort_by_bitpos); | |
1136 | ||
1137 | info = m_store_info[0]; | |
1138 | merged_store_group *merged_store = new merged_store_group (info); | |
1139 | ||
1140 | FOR_EACH_VEC_ELT (m_store_info, i, info) | |
1141 | { | |
1142 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1143 | { | |
1144 | fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC | |
1145 | " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:\n", | |
1146 | i, info->bitsize, info->bitpos); | |
1ffa4346 | 1147 | print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt)); |
3d3e04ac | 1148 | fprintf (dump_file, "\n------------\n"); |
1149 | } | |
1150 | ||
1151 | if (i == 0) | |
1152 | continue; | |
1153 | ||
1154 | /* |---store 1---| | |
1155 | |---store 2---| | |
1156 | Overlapping stores. */ | |
1157 | unsigned HOST_WIDE_INT start = info->bitpos; | |
1158 | if (IN_RANGE (start, merged_store->start, | |
1159 | merged_store->start + merged_store->width - 1)) | |
1160 | { | |
9991d1d3 | 1161 | /* Only allow overlapping stores of constants. */ |
1162 | if (info->rhs_code == INTEGER_CST | |
1163 | && merged_store->stores[0]->rhs_code == INTEGER_CST) | |
1164 | { | |
1165 | merged_store->merge_overlapping (info); | |
1166 | continue; | |
1167 | } | |
3d3e04ac | 1168 | } |
9991d1d3 | 1169 | /* |---store 1---||---store 2---| |
1170 | This store is consecutive to the previous one. | |
1171 | Merge it into the current store group. There can be gaps in between | |
1172 | the stores, but there can't be gaps in between bitregions. */ | |
1173 | else if (info->bitregion_start <= merged_store->bitregion_end | |
832a73b9 | 1174 | && info->rhs_code == merged_store->stores[0]->rhs_code |
1175 | && info->bit_not_p == merged_store->stores[0]->bit_not_p) | |
3d3e04ac | 1176 | { |
9991d1d3 | 1177 | store_immediate_info *infof = merged_store->stores[0]; |
1178 | ||
1179 | /* All the rhs_code ops that take 2 operands are commutative, | |
1180 | swap the operands if it could make the operands compatible. */ | |
1181 | if (infof->ops[0].base_addr | |
1182 | && infof->ops[1].base_addr | |
1183 | && info->ops[0].base_addr | |
1184 | && info->ops[1].base_addr | |
1185 | && (info->ops[1].bitpos - infof->ops[0].bitpos | |
1186 | == info->bitpos - infof->bitpos) | |
1187 | && operand_equal_p (info->ops[1].base_addr, | |
1188 | infof->ops[0].base_addr, 0)) | |
1189 | std::swap (info->ops[0], info->ops[1]); | |
1190 | if ((!infof->ops[0].base_addr | |
1191 | || compatible_load_p (merged_store, info, base_addr, 0)) | |
1192 | && (!infof->ops[1].base_addr | |
1193 | || compatible_load_p (merged_store, info, base_addr, 1))) | |
1194 | { | |
1195 | merged_store->merge_into (info); | |
1196 | continue; | |
1197 | } | |
1198 | } | |
3d3e04ac | 1199 | |
9991d1d3 | 1200 | /* |---store 1---| <gap> |---store 2---|. |
1201 | Gap between stores or the rhs not compatible. Start a new group. */ | |
3d3e04ac | 1202 | |
9991d1d3 | 1203 | /* Try to apply all the stores recorded for the group to determine |
1204 | the bitpattern they write and discard it if that fails. | |
1205 | This will also reject single-store groups. */ | |
1206 | if (!merged_store->apply_stores ()) | |
1207 | delete merged_store; | |
1208 | else | |
1209 | m_merged_store_groups.safe_push (merged_store); | |
3d3e04ac | 1210 | |
9991d1d3 | 1211 | merged_store = new merged_store_group (info); |
3d3e04ac | 1212 | } |
1213 | ||
902cb3b7 | 1214 | /* Record or discard the last store group. */ |
1215 | if (!merged_store->apply_stores ()) | |
1216 | delete merged_store; | |
1217 | else | |
1218 | m_merged_store_groups.safe_push (merged_store); | |
3d3e04ac | 1219 | |
1220 | gcc_assert (m_merged_store_groups.length () <= m_store_info.length ()); | |
1221 | bool success | |
1222 | = !m_merged_store_groups.is_empty () | |
1223 | && m_merged_store_groups.length () < m_store_info.length (); | |
1224 | ||
1225 | if (success && dump_file) | |
1226 | fprintf (dump_file, "Coalescing successful!\n" | |
902cb3b7 | 1227 | "Merged into %u stores\n", |
1228 | m_merged_store_groups.length ()); | |
3d3e04ac | 1229 | |
1230 | return success; | |
1231 | } | |
1232 | ||
9991d1d3 | 1233 | /* Return the type to use for the merged stores or loads described by STMTS. |
1234 | This is needed to get the alias sets right. If IS_LOAD, look for rhs, | |
1235 | otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_* | |
1236 | of the MEM_REFs if any. */ | |
3d3e04ac | 1237 | |
1238 | static tree | |
9991d1d3 | 1239 | get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load, |
1240 | unsigned short *cliquep, unsigned short *basep) | |
3d3e04ac | 1241 | { |
1242 | gimple *stmt; | |
1243 | unsigned int i; | |
9991d1d3 | 1244 | tree type = NULL_TREE; |
1245 | tree ret = NULL_TREE; | |
1246 | *cliquep = 0; | |
1247 | *basep = 0; | |
3d3e04ac | 1248 | |
1249 | FOR_EACH_VEC_ELT (stmts, i, stmt) | |
1250 | { | |
9991d1d3 | 1251 | tree ref = is_load ? gimple_assign_rhs1 (stmt) |
1252 | : gimple_assign_lhs (stmt); | |
1253 | tree type1 = reference_alias_ptr_type (ref); | |
1254 | tree base = get_base_address (ref); | |
3d3e04ac | 1255 | |
9991d1d3 | 1256 | if (i == 0) |
1257 | { | |
1258 | if (TREE_CODE (base) == MEM_REF) | |
1259 | { | |
1260 | *cliquep = MR_DEPENDENCE_CLIQUE (base); | |
1261 | *basep = MR_DEPENDENCE_BASE (base); | |
1262 | } | |
1263 | ret = type = type1; | |
1264 | continue; | |
1265 | } | |
3d3e04ac | 1266 | if (!alias_ptr_types_compatible_p (type, type1)) |
9991d1d3 | 1267 | ret = ptr_type_node; |
1268 | if (TREE_CODE (base) != MEM_REF | |
1269 | || *cliquep != MR_DEPENDENCE_CLIQUE (base) | |
1270 | || *basep != MR_DEPENDENCE_BASE (base)) | |
1271 | { | |
1272 | *cliquep = 0; | |
1273 | *basep = 0; | |
1274 | } | |
3d3e04ac | 1275 | } |
9991d1d3 | 1276 | return ret; |
3d3e04ac | 1277 | } |
1278 | ||
1279 | /* Return the location_t information we can find among the statements | |
1280 | in STMTS. */ | |
1281 | ||
1282 | static location_t | |
9991d1d3 | 1283 | get_location_for_stmts (vec<gimple *> &stmts) |
3d3e04ac | 1284 | { |
1285 | gimple *stmt; | |
1286 | unsigned int i; | |
1287 | ||
1288 | FOR_EACH_VEC_ELT (stmts, i, stmt) | |
1289 | if (gimple_has_location (stmt)) | |
1290 | return gimple_location (stmt); | |
1291 | ||
1292 | return UNKNOWN_LOCATION; | |
1293 | } | |
1294 | ||
1295 | /* Used to decribe a store resulting from splitting a wide store in smaller | |
1296 | regularly-sized stores in split_group. */ | |
1297 | ||
1298 | struct split_store | |
1299 | { | |
1300 | unsigned HOST_WIDE_INT bytepos; | |
1301 | unsigned HOST_WIDE_INT size; | |
1302 | unsigned HOST_WIDE_INT align; | |
9991d1d3 | 1303 | auto_vec<store_immediate_info *> orig_stores; |
902cb3b7 | 1304 | /* True if there is a single orig stmt covering the whole split store. */ |
1305 | bool orig; | |
3d3e04ac | 1306 | split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, |
1307 | unsigned HOST_WIDE_INT); | |
1308 | }; | |
1309 | ||
1310 | /* Simple constructor. */ | |
1311 | ||
1312 | split_store::split_store (unsigned HOST_WIDE_INT bp, | |
1313 | unsigned HOST_WIDE_INT sz, | |
1314 | unsigned HOST_WIDE_INT al) | |
902cb3b7 | 1315 | : bytepos (bp), size (sz), align (al), orig (false) |
3d3e04ac | 1316 | { |
9991d1d3 | 1317 | orig_stores.create (0); |
3d3e04ac | 1318 | } |
1319 | ||
9991d1d3 | 1320 | /* Record all stores in GROUP that write to the region starting at BITPOS and |
1321 | is of size BITSIZE. Record infos for such statements in STORES if | |
1322 | non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO | |
1323 | if there is exactly one original store in the range. */ | |
3d3e04ac | 1324 | |
902cb3b7 | 1325 | static store_immediate_info * |
9991d1d3 | 1326 | find_constituent_stores (struct merged_store_group *group, |
1327 | vec<store_immediate_info *> *stores, | |
1328 | unsigned int *first, | |
1329 | unsigned HOST_WIDE_INT bitpos, | |
1330 | unsigned HOST_WIDE_INT bitsize) | |
3d3e04ac | 1331 | { |
902cb3b7 | 1332 | store_immediate_info *info, *ret = NULL; |
3d3e04ac | 1333 | unsigned int i; |
902cb3b7 | 1334 | bool second = false; |
1335 | bool update_first = true; | |
3d3e04ac | 1336 | unsigned HOST_WIDE_INT end = bitpos + bitsize; |
902cb3b7 | 1337 | for (i = *first; group->stores.iterate (i, &info); ++i) |
3d3e04ac | 1338 | { |
1339 | unsigned HOST_WIDE_INT stmt_start = info->bitpos; | |
1340 | unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize; | |
902cb3b7 | 1341 | if (stmt_end <= bitpos) |
1342 | { | |
1343 | /* BITPOS passed to this function never decreases from within the | |
1344 | same split_group call, so optimize and don't scan info records | |
1345 | which are known to end before or at BITPOS next time. | |
1346 | Only do it if all stores before this one also pass this. */ | |
1347 | if (update_first) | |
1348 | *first = i + 1; | |
1349 | continue; | |
1350 | } | |
1351 | else | |
1352 | update_first = false; | |
1353 | ||
3d3e04ac | 1354 | /* The stores in GROUP are ordered by bitposition so if we're past |
902cb3b7 | 1355 | the region for this group return early. */ |
1356 | if (stmt_start >= end) | |
1357 | return ret; | |
1358 | ||
9991d1d3 | 1359 | if (stores) |
902cb3b7 | 1360 | { |
9991d1d3 | 1361 | stores->safe_push (info); |
902cb3b7 | 1362 | if (ret) |
1363 | { | |
1364 | ret = NULL; | |
1365 | second = true; | |
1366 | } | |
1367 | } | |
1368 | else if (ret) | |
1369 | return NULL; | |
1370 | if (!second) | |
1371 | ret = info; | |
3d3e04ac | 1372 | } |
902cb3b7 | 1373 | return ret; |
3d3e04ac | 1374 | } |
1375 | ||
871a91ec | 1376 | /* Return how many SSA_NAMEs used to compute value to store in the INFO |
1377 | store have multiple uses. If any SSA_NAME has multiple uses, also | |
1378 | count statements needed to compute it. */ | |
1379 | ||
1380 | static unsigned | |
1381 | count_multiple_uses (store_immediate_info *info) | |
1382 | { | |
1383 | gimple *stmt = info->stmt; | |
1384 | unsigned ret = 0; | |
1385 | switch (info->rhs_code) | |
1386 | { | |
1387 | case INTEGER_CST: | |
1388 | return 0; | |
1389 | case BIT_AND_EXPR: | |
1390 | case BIT_IOR_EXPR: | |
1391 | case BIT_XOR_EXPR: | |
832a73b9 | 1392 | if (info->bit_not_p) |
1393 | { | |
1394 | if (!has_single_use (gimple_assign_rhs1 (stmt))) | |
1395 | ret = 1; /* Fall through below to return | |
1396 | the BIT_NOT_EXPR stmt and then | |
1397 | BIT_{AND,IOR,XOR}_EXPR and anything it | |
1398 | uses. */ | |
1399 | else | |
1400 | /* stmt is after this the BIT_NOT_EXPR. */ | |
1401 | stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
1402 | } | |
871a91ec | 1403 | if (!has_single_use (gimple_assign_rhs1 (stmt))) |
1404 | { | |
1405 | ret += 1 + info->ops[0].bit_not_p; | |
1406 | if (info->ops[1].base_addr) | |
1407 | ret += 1 + info->ops[1].bit_not_p; | |
1408 | return ret + 1; | |
1409 | } | |
1410 | stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
1411 | /* stmt is now the BIT_*_EXPR. */ | |
1412 | if (!has_single_use (gimple_assign_rhs1 (stmt))) | |
1413 | ret += 1 + info->ops[0].bit_not_p; | |
1414 | else if (info->ops[0].bit_not_p) | |
1415 | { | |
1416 | gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
1417 | if (!has_single_use (gimple_assign_rhs1 (stmt2))) | |
1418 | ++ret; | |
1419 | } | |
1420 | if (info->ops[1].base_addr == NULL_TREE) | |
1421 | return ret; | |
1422 | if (!has_single_use (gimple_assign_rhs2 (stmt))) | |
1423 | ret += 1 + info->ops[1].bit_not_p; | |
1424 | else if (info->ops[1].bit_not_p) | |
1425 | { | |
1426 | gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt)); | |
1427 | if (!has_single_use (gimple_assign_rhs1 (stmt2))) | |
1428 | ++ret; | |
1429 | } | |
1430 | return ret; | |
1431 | case MEM_REF: | |
1432 | if (!has_single_use (gimple_assign_rhs1 (stmt))) | |
1433 | return 1 + info->ops[0].bit_not_p; | |
1434 | else if (info->ops[0].bit_not_p) | |
1435 | { | |
1436 | stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); | |
1437 | if (!has_single_use (gimple_assign_rhs1 (stmt))) | |
1438 | return 1; | |
1439 | } | |
1440 | return 0; | |
1441 | default: | |
1442 | gcc_unreachable (); | |
1443 | } | |
1444 | } | |
1445 | ||
3d3e04ac | 1446 | /* Split a merged store described by GROUP by populating the SPLIT_STORES |
902cb3b7 | 1447 | vector (if non-NULL) with split_store structs describing the byte offset |
1448 | (from the base), the bit size and alignment of each store as well as the | |
1449 | original statements involved in each such split group. | |
3d3e04ac | 1450 | This is to separate the splitting strategy from the statement |
1451 | building/emission/linking done in output_merged_store. | |
902cb3b7 | 1452 | Return number of new stores. |
9991d1d3 | 1453 | If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned. |
1454 | If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned. | |
902cb3b7 | 1455 | If SPLIT_STORES is NULL, it is just a dry run to count number of |
1456 | new stores. */ | |
3d3e04ac | 1457 | |
902cb3b7 | 1458 | static unsigned int |
9991d1d3 | 1459 | split_group (merged_store_group *group, bool allow_unaligned_store, |
1460 | bool allow_unaligned_load, | |
871a91ec | 1461 | vec<struct split_store *> *split_stores, |
1462 | unsigned *total_orig, | |
1463 | unsigned *total_new) | |
3d3e04ac | 1464 | { |
902cb3b7 | 1465 | unsigned HOST_WIDE_INT pos = group->bitregion_start; |
1466 | unsigned HOST_WIDE_INT size = group->bitregion_end - pos; | |
3d3e04ac | 1467 | unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT; |
902cb3b7 | 1468 | unsigned HOST_WIDE_INT group_align = group->align; |
1469 | unsigned HOST_WIDE_INT align_base = group->align_base; | |
9991d1d3 | 1470 | unsigned HOST_WIDE_INT group_load_align = group_align; |
871a91ec | 1471 | bool any_orig = false; |
3d3e04ac | 1472 | |
3d3e04ac | 1473 | gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0)); |
1474 | ||
902cb3b7 | 1475 | unsigned int ret = 0, first = 0; |
3d3e04ac | 1476 | unsigned HOST_WIDE_INT try_pos = bytepos; |
1477 | group->stores.qsort (sort_by_bitpos); | |
1478 | ||
871a91ec | 1479 | if (total_orig) |
1480 | { | |
1481 | unsigned int i; | |
1482 | store_immediate_info *info = group->stores[0]; | |
1483 | ||
1484 | total_new[0] = 0; | |
1485 | total_orig[0] = 1; /* The orig store. */ | |
1486 | info = group->stores[0]; | |
1487 | if (info->ops[0].base_addr) | |
1488 | total_orig[0] += 1 + info->ops[0].bit_not_p; | |
1489 | if (info->ops[1].base_addr) | |
1490 | total_orig[0] += 1 + info->ops[1].bit_not_p; | |
1491 | switch (info->rhs_code) | |
1492 | { | |
1493 | case BIT_AND_EXPR: | |
1494 | case BIT_IOR_EXPR: | |
1495 | case BIT_XOR_EXPR: | |
832a73b9 | 1496 | if (info->bit_not_p) |
1497 | total_orig[0]++; /* The orig BIT_NOT_EXPR stmt. */ | |
871a91ec | 1498 | total_orig[0]++; /* The orig BIT_*_EXPR stmt. */ |
1499 | break; | |
1500 | default: | |
1501 | break; | |
1502 | } | |
1503 | total_orig[0] *= group->stores.length (); | |
1504 | ||
1505 | FOR_EACH_VEC_ELT (group->stores, i, info) | |
1506 | total_new[0] += count_multiple_uses (info); | |
1507 | } | |
1508 | ||
9991d1d3 | 1509 | if (!allow_unaligned_load) |
1510 | for (int i = 0; i < 2; ++i) | |
1511 | if (group->load_align[i]) | |
1512 | group_load_align = MIN (group_load_align, group->load_align[i]); | |
1513 | ||
3d3e04ac | 1514 | while (size > 0) |
1515 | { | |
9991d1d3 | 1516 | if ((allow_unaligned_store || group_align <= BITS_PER_UNIT) |
902cb3b7 | 1517 | && group->mask[try_pos - bytepos] == (unsigned char) ~0U) |
1518 | { | |
1519 | /* Skip padding bytes. */ | |
1520 | ++try_pos; | |
1521 | size -= BITS_PER_UNIT; | |
1522 | continue; | |
1523 | } | |
1524 | ||
3d3e04ac | 1525 | unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT; |
902cb3b7 | 1526 | unsigned int try_size = MAX_STORE_BITSIZE, nonmasked; |
1527 | unsigned HOST_WIDE_INT align_bitpos | |
1528 | = (try_bitpos - align_base) & (group_align - 1); | |
1529 | unsigned HOST_WIDE_INT align = group_align; | |
1530 | if (align_bitpos) | |
1531 | align = least_bit_hwi (align_bitpos); | |
9991d1d3 | 1532 | if (!allow_unaligned_store) |
902cb3b7 | 1533 | try_size = MIN (try_size, align); |
9991d1d3 | 1534 | if (!allow_unaligned_load) |
1535 | { | |
1536 | /* If we can't do or don't want to do unaligned stores | |
1537 | as well as loads, we need to take the loads into account | |
1538 | as well. */ | |
1539 | unsigned HOST_WIDE_INT load_align = group_load_align; | |
1540 | align_bitpos = (try_bitpos - align_base) & (load_align - 1); | |
1541 | if (align_bitpos) | |
1542 | load_align = least_bit_hwi (align_bitpos); | |
1543 | for (int i = 0; i < 2; ++i) | |
1544 | if (group->load_align[i]) | |
1545 | { | |
1546 | align_bitpos = try_bitpos - group->stores[0]->bitpos; | |
1547 | align_bitpos += group->stores[0]->ops[i].bitpos; | |
1548 | align_bitpos -= group->load_align_base[i]; | |
1549 | align_bitpos &= (group_load_align - 1); | |
1550 | if (align_bitpos) | |
1551 | { | |
1552 | unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos); | |
1553 | load_align = MIN (load_align, a); | |
1554 | } | |
1555 | } | |
1556 | try_size = MIN (try_size, load_align); | |
1557 | } | |
902cb3b7 | 1558 | store_immediate_info *info |
9991d1d3 | 1559 | = find_constituent_stores (group, NULL, &first, try_bitpos, try_size); |
902cb3b7 | 1560 | if (info) |
1561 | { | |
1562 | /* If there is just one original statement for the range, see if | |
1563 | we can just reuse the original store which could be even larger | |
1564 | than try_size. */ | |
1565 | unsigned HOST_WIDE_INT stmt_end | |
1566 | = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT); | |
9991d1d3 | 1567 | info = find_constituent_stores (group, NULL, &first, try_bitpos, |
1568 | stmt_end - try_bitpos); | |
902cb3b7 | 1569 | if (info && info->bitpos >= try_bitpos) |
1570 | { | |
1571 | try_size = stmt_end - try_bitpos; | |
1572 | goto found; | |
1573 | } | |
1574 | } | |
3d3e04ac | 1575 | |
902cb3b7 | 1576 | /* Approximate store bitsize for the case when there are no padding |
1577 | bits. */ | |
1578 | while (try_size > size) | |
1579 | try_size /= 2; | |
1580 | /* Now look for whole padding bytes at the end of that bitsize. */ | |
1581 | for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked) | |
1582 | if (group->mask[try_pos - bytepos + nonmasked - 1] | |
1583 | != (unsigned char) ~0U) | |
1584 | break; | |
1585 | if (nonmasked == 0) | |
1586 | { | |
1587 | /* If entire try_size range is padding, skip it. */ | |
1588 | try_pos += try_size / BITS_PER_UNIT; | |
1589 | size -= try_size; | |
1590 | continue; | |
1591 | } | |
1592 | /* Otherwise try to decrease try_size if second half, last 3 quarters | |
1593 | etc. are padding. */ | |
1594 | nonmasked *= BITS_PER_UNIT; | |
1595 | while (nonmasked <= try_size / 2) | |
1596 | try_size /= 2; | |
9991d1d3 | 1597 | if (!allow_unaligned_store && group_align > BITS_PER_UNIT) |
902cb3b7 | 1598 | { |
1599 | /* Now look for whole padding bytes at the start of that bitsize. */ | |
1600 | unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked; | |
1601 | for (masked = 0; masked < try_bytesize; ++masked) | |
1602 | if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U) | |
1603 | break; | |
1604 | masked *= BITS_PER_UNIT; | |
1605 | gcc_assert (masked < try_size); | |
1606 | if (masked >= try_size / 2) | |
1607 | { | |
1608 | while (masked >= try_size / 2) | |
1609 | { | |
1610 | try_size /= 2; | |
1611 | try_pos += try_size / BITS_PER_UNIT; | |
1612 | size -= try_size; | |
1613 | masked -= try_size; | |
1614 | } | |
1615 | /* Need to recompute the alignment, so just retry at the new | |
1616 | position. */ | |
1617 | continue; | |
1618 | } | |
1619 | } | |
1620 | ||
1621 | found: | |
1622 | ++ret; | |
3d3e04ac | 1623 | |
902cb3b7 | 1624 | if (split_stores) |
1625 | { | |
1626 | struct split_store *store | |
1627 | = new split_store (try_pos, try_size, align); | |
9991d1d3 | 1628 | info = find_constituent_stores (group, &store->orig_stores, |
1629 | &first, try_bitpos, try_size); | |
902cb3b7 | 1630 | if (info |
1631 | && info->bitpos >= try_bitpos | |
1632 | && info->bitpos + info->bitsize <= try_bitpos + try_size) | |
871a91ec | 1633 | { |
1634 | store->orig = true; | |
1635 | any_orig = true; | |
1636 | } | |
902cb3b7 | 1637 | split_stores->safe_push (store); |
1638 | } | |
1639 | ||
1640 | try_pos += try_size / BITS_PER_UNIT; | |
3d3e04ac | 1641 | size -= try_size; |
3d3e04ac | 1642 | } |
902cb3b7 | 1643 | |
871a91ec | 1644 | if (total_orig) |
1645 | { | |
1646 | /* If we are reusing some original stores and any of the | |
1647 | original SSA_NAMEs had multiple uses, we need to subtract | |
1648 | those now before we add the new ones. */ | |
1649 | if (total_new[0] && any_orig) | |
1650 | { | |
1651 | unsigned int i; | |
1652 | struct split_store *store; | |
1653 | FOR_EACH_VEC_ELT (*split_stores, i, store) | |
1654 | if (store->orig) | |
1655 | total_new[0] -= count_multiple_uses (store->orig_stores[0]); | |
1656 | } | |
1657 | total_new[0] += ret; /* The new store. */ | |
1658 | store_immediate_info *info = group->stores[0]; | |
1659 | if (info->ops[0].base_addr) | |
1660 | total_new[0] += ret * (1 + info->ops[0].bit_not_p); | |
1661 | if (info->ops[1].base_addr) | |
1662 | total_new[0] += ret * (1 + info->ops[1].bit_not_p); | |
1663 | switch (info->rhs_code) | |
1664 | { | |
1665 | case BIT_AND_EXPR: | |
1666 | case BIT_IOR_EXPR: | |
1667 | case BIT_XOR_EXPR: | |
832a73b9 | 1668 | if (info->bit_not_p) |
1669 | total_new[0] += ret; /* The new BIT_NOT_EXPR stmt. */ | |
871a91ec | 1670 | total_new[0] += ret; /* The new BIT_*_EXPR stmt. */ |
1671 | break; | |
1672 | default: | |
1673 | break; | |
1674 | } | |
1675 | } | |
1676 | ||
902cb3b7 | 1677 | return ret; |
3d3e04ac | 1678 | } |
1679 | ||
1680 | /* Given a merged store group GROUP output the widened version of it. | |
1681 | The store chain is against the base object BASE. | |
1682 | Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output | |
1683 | unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive. | |
1684 | Make sure that the number of statements output is less than the number of | |
1685 | original statements. If a better sequence is possible emit it and | |
1686 | return true. */ | |
1687 | ||
1688 | bool | |
f85e7cb7 | 1689 | imm_store_chain_info::output_merged_store (merged_store_group *group) |
3d3e04ac | 1690 | { |
902cb3b7 | 1691 | unsigned HOST_WIDE_INT start_byte_pos |
1692 | = group->bitregion_start / BITS_PER_UNIT; | |
3d3e04ac | 1693 | |
1694 | unsigned int orig_num_stmts = group->stores.length (); | |
1695 | if (orig_num_stmts < 2) | |
1696 | return false; | |
1697 | ||
902cb3b7 | 1698 | auto_vec<struct split_store *, 32> split_stores; |
3d3e04ac | 1699 | split_stores.create (0); |
9991d1d3 | 1700 | bool allow_unaligned_store |
902cb3b7 | 1701 | = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED); |
9991d1d3 | 1702 | bool allow_unaligned_load = allow_unaligned_store; |
1703 | if (allow_unaligned_store) | |
902cb3b7 | 1704 | { |
1705 | /* If unaligned stores are allowed, see how many stores we'd emit | |
1706 | for unaligned and how many stores we'd emit for aligned stores. | |
1707 | Only use unaligned stores if it allows fewer stores than aligned. */ | |
9991d1d3 | 1708 | unsigned aligned_cnt |
871a91ec | 1709 | = split_group (group, false, allow_unaligned_load, NULL, NULL, NULL); |
9991d1d3 | 1710 | unsigned unaligned_cnt |
871a91ec | 1711 | = split_group (group, true, allow_unaligned_load, NULL, NULL, NULL); |
902cb3b7 | 1712 | if (aligned_cnt <= unaligned_cnt) |
9991d1d3 | 1713 | allow_unaligned_store = false; |
902cb3b7 | 1714 | } |
871a91ec | 1715 | unsigned total_orig, total_new; |
9991d1d3 | 1716 | split_group (group, allow_unaligned_store, allow_unaligned_load, |
871a91ec | 1717 | &split_stores, &total_orig, &total_new); |
902cb3b7 | 1718 | |
1719 | if (split_stores.length () >= orig_num_stmts) | |
1720 | { | |
1721 | /* We didn't manage to reduce the number of statements. Bail out. */ | |
1722 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
871a91ec | 1723 | fprintf (dump_file, "Exceeded original number of stmts (%u)." |
1724 | " Not profitable to emit new sequence.\n", | |
1725 | orig_num_stmts); | |
902cb3b7 | 1726 | return false; |
1727 | } | |
871a91ec | 1728 | if (total_orig <= total_new) |
1729 | { | |
1730 | /* If number of estimated new statements is above estimated original | |
1731 | statements, bail out too. */ | |
1732 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
1733 | fprintf (dump_file, "Estimated number of original stmts (%u)" | |
1734 | " not larger than estimated number of new" | |
1735 | " stmts (%u).\n", | |
1736 | total_orig, total_new); | |
1737 | } | |
3d3e04ac | 1738 | |
1739 | gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt); | |
1740 | gimple_seq seq = NULL; | |
3d3e04ac | 1741 | tree last_vdef, new_vuse; |
1742 | last_vdef = gimple_vdef (group->last_stmt); | |
1743 | new_vuse = gimple_vuse (group->last_stmt); | |
1744 | ||
1745 | gimple *stmt = NULL; | |
3d3e04ac | 1746 | split_store *split_store; |
1747 | unsigned int i; | |
9991d1d3 | 1748 | auto_vec<gimple *, 32> orig_stmts; |
427223f1 | 1749 | tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &seq, |
1750 | is_gimple_mem_ref_addr, NULL_TREE); | |
9991d1d3 | 1751 | |
1752 | tree load_addr[2] = { NULL_TREE, NULL_TREE }; | |
1753 | gimple_seq load_seq[2] = { NULL, NULL }; | |
1754 | gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () }; | |
1755 | for (int j = 0; j < 2; ++j) | |
1756 | { | |
1757 | store_operand_info &op = group->stores[0]->ops[j]; | |
1758 | if (op.base_addr == NULL_TREE) | |
1759 | continue; | |
1760 | ||
1761 | store_immediate_info *infol = group->stores.last (); | |
1762 | if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt)) | |
1763 | { | |
1764 | load_gsi[j] = gsi_for_stmt (op.stmt); | |
1765 | load_addr[j] | |
1766 | = force_gimple_operand_1 (unshare_expr (op.base_addr), | |
1767 | &load_seq[j], is_gimple_mem_ref_addr, | |
1768 | NULL_TREE); | |
1769 | } | |
1770 | else if (operand_equal_p (base_addr, op.base_addr, 0)) | |
1771 | load_addr[j] = addr; | |
1772 | else | |
ad3e5b2f | 1773 | { |
1774 | gimple_seq this_seq; | |
1775 | load_addr[j] | |
1776 | = force_gimple_operand_1 (unshare_expr (op.base_addr), | |
1777 | &this_seq, is_gimple_mem_ref_addr, | |
1778 | NULL_TREE); | |
1779 | gimple_seq_add_seq_without_update (&seq, this_seq); | |
1780 | } | |
9991d1d3 | 1781 | } |
1782 | ||
3d3e04ac | 1783 | FOR_EACH_VEC_ELT (split_stores, i, split_store) |
1784 | { | |
1785 | unsigned HOST_WIDE_INT try_size = split_store->size; | |
1786 | unsigned HOST_WIDE_INT try_pos = split_store->bytepos; | |
1787 | unsigned HOST_WIDE_INT align = split_store->align; | |
902cb3b7 | 1788 | tree dest, src; |
1789 | location_t loc; | |
1790 | if (split_store->orig) | |
1791 | { | |
1792 | /* If there is just a single constituent store which covers | |
1793 | the whole area, just reuse the lhs and rhs. */ | |
9991d1d3 | 1794 | gimple *orig_stmt = split_store->orig_stores[0]->stmt; |
1795 | dest = gimple_assign_lhs (orig_stmt); | |
1796 | src = gimple_assign_rhs1 (orig_stmt); | |
1797 | loc = gimple_location (orig_stmt); | |
902cb3b7 | 1798 | } |
1799 | else | |
1800 | { | |
9991d1d3 | 1801 | store_immediate_info *info; |
1802 | unsigned short clique, base; | |
1803 | unsigned int k; | |
1804 | FOR_EACH_VEC_ELT (split_store->orig_stores, k, info) | |
1805 | orig_stmts.safe_push (info->stmt); | |
902cb3b7 | 1806 | tree offset_type |
9991d1d3 | 1807 | = get_alias_type_for_stmts (orig_stmts, false, &clique, &base); |
1808 | loc = get_location_for_stmts (orig_stmts); | |
1809 | orig_stmts.truncate (0); | |
902cb3b7 | 1810 | |
1811 | tree int_type = build_nonstandard_integer_type (try_size, UNSIGNED); | |
1812 | int_type = build_aligned_type (int_type, align); | |
1813 | dest = fold_build2 (MEM_REF, int_type, addr, | |
1814 | build_int_cst (offset_type, try_pos)); | |
9991d1d3 | 1815 | if (TREE_CODE (dest) == MEM_REF) |
1816 | { | |
1817 | MR_DEPENDENCE_CLIQUE (dest) = clique; | |
1818 | MR_DEPENDENCE_BASE (dest) = base; | |
1819 | } | |
1820 | ||
902cb3b7 | 1821 | tree mask |
1822 | = native_interpret_expr (int_type, | |
1823 | group->mask + try_pos - start_byte_pos, | |
1824 | group->buf_size); | |
9991d1d3 | 1825 | |
1826 | tree ops[2]; | |
1827 | for (int j = 0; | |
1828 | j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE); | |
1829 | ++j) | |
1830 | { | |
1831 | store_operand_info &op = split_store->orig_stores[0]->ops[j]; | |
1832 | if (op.base_addr) | |
1833 | { | |
1834 | FOR_EACH_VEC_ELT (split_store->orig_stores, k, info) | |
1835 | orig_stmts.safe_push (info->ops[j].stmt); | |
1836 | ||
1837 | offset_type = get_alias_type_for_stmts (orig_stmts, true, | |
1838 | &clique, &base); | |
1839 | location_t load_loc = get_location_for_stmts (orig_stmts); | |
1840 | orig_stmts.truncate (0); | |
1841 | ||
1842 | unsigned HOST_WIDE_INT load_align = group->load_align[j]; | |
1843 | unsigned HOST_WIDE_INT align_bitpos | |
1844 | = (try_pos * BITS_PER_UNIT | |
1845 | - split_store->orig_stores[0]->bitpos | |
1846 | + op.bitpos) & (load_align - 1); | |
1847 | if (align_bitpos) | |
1848 | load_align = least_bit_hwi (align_bitpos); | |
1849 | ||
1850 | tree load_int_type | |
1851 | = build_nonstandard_integer_type (try_size, UNSIGNED); | |
1852 | load_int_type | |
1853 | = build_aligned_type (load_int_type, load_align); | |
1854 | ||
1855 | unsigned HOST_WIDE_INT load_pos | |
1856 | = (try_pos * BITS_PER_UNIT | |
1857 | - split_store->orig_stores[0]->bitpos | |
1858 | + op.bitpos) / BITS_PER_UNIT; | |
1859 | ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j], | |
1860 | build_int_cst (offset_type, load_pos)); | |
1861 | if (TREE_CODE (ops[j]) == MEM_REF) | |
1862 | { | |
1863 | MR_DEPENDENCE_CLIQUE (ops[j]) = clique; | |
1864 | MR_DEPENDENCE_BASE (ops[j]) = base; | |
1865 | } | |
1866 | if (!integer_zerop (mask)) | |
1867 | /* The load might load some bits (that will be masked off | |
1868 | later on) uninitialized, avoid -W*uninitialized | |
1869 | warnings in that case. */ | |
1870 | TREE_NO_WARNING (ops[j]) = 1; | |
1871 | ||
1872 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
1873 | ops[j]); | |
1874 | gimple_set_location (stmt, load_loc); | |
1875 | if (gsi_bb (load_gsi[j])) | |
1876 | { | |
1877 | gimple_set_vuse (stmt, gimple_vuse (op.stmt)); | |
1878 | gimple_seq_add_stmt_without_update (&load_seq[j], stmt); | |
1879 | } | |
1880 | else | |
1881 | { | |
1882 | gimple_set_vuse (stmt, new_vuse); | |
1883 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
1884 | } | |
1885 | ops[j] = gimple_assign_lhs (stmt); | |
c35548ce | 1886 | if (op.bit_not_p) |
1887 | { | |
1888 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
1889 | BIT_NOT_EXPR, ops[j]); | |
1890 | gimple_set_location (stmt, load_loc); | |
1891 | ops[j] = gimple_assign_lhs (stmt); | |
1892 | ||
1893 | if (gsi_bb (load_gsi[j])) | |
1894 | gimple_seq_add_stmt_without_update (&load_seq[j], | |
1895 | stmt); | |
1896 | else | |
1897 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
1898 | } | |
9991d1d3 | 1899 | } |
1900 | else | |
1901 | ops[j] = native_interpret_expr (int_type, | |
1902 | group->val + try_pos | |
1903 | - start_byte_pos, | |
1904 | group->buf_size); | |
1905 | } | |
1906 | ||
1907 | switch (split_store->orig_stores[0]->rhs_code) | |
1908 | { | |
1909 | case BIT_AND_EXPR: | |
1910 | case BIT_IOR_EXPR: | |
1911 | case BIT_XOR_EXPR: | |
1912 | FOR_EACH_VEC_ELT (split_store->orig_stores, k, info) | |
1913 | { | |
1914 | tree rhs1 = gimple_assign_rhs1 (info->stmt); | |
1915 | orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1)); | |
1916 | } | |
1917 | location_t bit_loc; | |
1918 | bit_loc = get_location_for_stmts (orig_stmts); | |
1919 | orig_stmts.truncate (0); | |
1920 | ||
1921 | stmt | |
1922 | = gimple_build_assign (make_ssa_name (int_type), | |
1923 | split_store->orig_stores[0]->rhs_code, | |
1924 | ops[0], ops[1]); | |
1925 | gimple_set_location (stmt, bit_loc); | |
1926 | /* If there is just one load and there is a separate | |
1927 | load_seq[0], emit the bitwise op right after it. */ | |
1928 | if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0])) | |
1929 | gimple_seq_add_stmt_without_update (&load_seq[0], stmt); | |
1930 | /* Otherwise, if at least one load is in seq, we need to | |
1931 | emit the bitwise op right before the store. If there | |
1932 | are two loads and are emitted somewhere else, it would | |
1933 | be better to emit the bitwise op as early as possible; | |
1934 | we don't track where that would be possible right now | |
1935 | though. */ | |
1936 | else | |
1937 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
1938 | src = gimple_assign_lhs (stmt); | |
832a73b9 | 1939 | if (split_store->orig_stores[0]->bit_not_p) |
1940 | { | |
1941 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
1942 | BIT_NOT_EXPR, src); | |
1943 | gimple_set_location (stmt, bit_loc); | |
1944 | if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0])) | |
1945 | gimple_seq_add_stmt_without_update (&load_seq[0], stmt); | |
1946 | else | |
1947 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
1948 | src = gimple_assign_lhs (stmt); | |
1949 | } | |
9991d1d3 | 1950 | break; |
1951 | default: | |
1952 | src = ops[0]; | |
1953 | break; | |
1954 | } | |
1955 | ||
902cb3b7 | 1956 | if (!integer_zerop (mask)) |
1957 | { | |
1958 | tree tem = make_ssa_name (int_type); | |
1959 | tree load_src = unshare_expr (dest); | |
1960 | /* The load might load some or all bits uninitialized, | |
1961 | avoid -W*uninitialized warnings in that case. | |
1962 | As optimization, it would be nice if all the bits are | |
1963 | provably uninitialized (no stores at all yet or previous | |
1964 | store a CLOBBER) we'd optimize away the load and replace | |
1965 | it e.g. with 0. */ | |
1966 | TREE_NO_WARNING (load_src) = 1; | |
1967 | stmt = gimple_build_assign (tem, load_src); | |
1968 | gimple_set_location (stmt, loc); | |
1969 | gimple_set_vuse (stmt, new_vuse); | |
1970 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
1971 | ||
1972 | /* FIXME: If there is a single chunk of zero bits in mask, | |
1973 | perhaps use BIT_INSERT_EXPR instead? */ | |
1974 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
1975 | BIT_AND_EXPR, tem, mask); | |
1976 | gimple_set_location (stmt, loc); | |
1977 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
1978 | tem = gimple_assign_lhs (stmt); | |
1979 | ||
9991d1d3 | 1980 | if (TREE_CODE (src) == INTEGER_CST) |
1981 | src = wide_int_to_tree (int_type, | |
1982 | wi::bit_and_not (wi::to_wide (src), | |
1983 | wi::to_wide (mask))); | |
1984 | else | |
1985 | { | |
1986 | tree nmask | |
1987 | = wide_int_to_tree (int_type, | |
1988 | wi::bit_not (wi::to_wide (mask))); | |
1989 | stmt = gimple_build_assign (make_ssa_name (int_type), | |
1990 | BIT_AND_EXPR, src, nmask); | |
1991 | gimple_set_location (stmt, loc); | |
1992 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
1993 | src = gimple_assign_lhs (stmt); | |
1994 | } | |
902cb3b7 | 1995 | stmt = gimple_build_assign (make_ssa_name (int_type), |
1996 | BIT_IOR_EXPR, tem, src); | |
1997 | gimple_set_location (stmt, loc); | |
1998 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
1999 | src = gimple_assign_lhs (stmt); | |
2000 | } | |
2001 | } | |
3d3e04ac | 2002 | |
2003 | stmt = gimple_build_assign (dest, src); | |
2004 | gimple_set_location (stmt, loc); | |
2005 | gimple_set_vuse (stmt, new_vuse); | |
2006 | gimple_seq_add_stmt_without_update (&seq, stmt); | |
2007 | ||
3d3e04ac | 2008 | tree new_vdef; |
2009 | if (i < split_stores.length () - 1) | |
902cb3b7 | 2010 | new_vdef = make_ssa_name (gimple_vop (cfun), stmt); |
3d3e04ac | 2011 | else |
2012 | new_vdef = last_vdef; | |
2013 | ||
2014 | gimple_set_vdef (stmt, new_vdef); | |
2015 | SSA_NAME_DEF_STMT (new_vdef) = stmt; | |
2016 | new_vuse = new_vdef; | |
2017 | } | |
2018 | ||
2019 | FOR_EACH_VEC_ELT (split_stores, i, split_store) | |
2020 | delete split_store; | |
2021 | ||
3d3e04ac | 2022 | gcc_assert (seq); |
2023 | if (dump_file) | |
2024 | { | |
2025 | fprintf (dump_file, | |
2026 | "New sequence of %u stmts to replace old one of %u stmts\n", | |
902cb3b7 | 2027 | split_stores.length (), orig_num_stmts); |
3d3e04ac | 2028 | if (dump_flags & TDF_DETAILS) |
2029 | print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS); | |
2030 | } | |
2031 | gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT); | |
9991d1d3 | 2032 | for (int j = 0; j < 2; ++j) |
2033 | if (load_seq[j]) | |
2034 | gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT); | |
3d3e04ac | 2035 | |
2036 | return true; | |
2037 | } | |
2038 | ||
2039 | /* Process the merged_store_group objects created in the coalescing phase. | |
2040 | The stores are all against the base object BASE. | |
2041 | Try to output the widened stores and delete the original statements if | |
2042 | successful. Return true iff any changes were made. */ | |
2043 | ||
2044 | bool | |
f85e7cb7 | 2045 | imm_store_chain_info::output_merged_stores () |
3d3e04ac | 2046 | { |
2047 | unsigned int i; | |
2048 | merged_store_group *merged_store; | |
2049 | bool ret = false; | |
2050 | FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store) | |
2051 | { | |
f85e7cb7 | 2052 | if (output_merged_store (merged_store)) |
3d3e04ac | 2053 | { |
2054 | unsigned int j; | |
2055 | store_immediate_info *store; | |
2056 | FOR_EACH_VEC_ELT (merged_store->stores, j, store) | |
2057 | { | |
2058 | gimple *stmt = store->stmt; | |
2059 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
2060 | gsi_remove (&gsi, true); | |
2061 | if (stmt != merged_store->last_stmt) | |
2062 | { | |
2063 | unlink_stmt_vdef (stmt); | |
2064 | release_defs (stmt); | |
2065 | } | |
2066 | } | |
2067 | ret = true; | |
2068 | } | |
2069 | } | |
2070 | if (ret && dump_file) | |
2071 | fprintf (dump_file, "Merging successful!\n"); | |
2072 | ||
2073 | return ret; | |
2074 | } | |
2075 | ||
2076 | /* Coalesce the store_immediate_info objects recorded against the base object | |
2077 | BASE in the first phase and output them. | |
2078 | Delete the allocated structures. | |
2079 | Return true if any changes were made. */ | |
2080 | ||
2081 | bool | |
f85e7cb7 | 2082 | imm_store_chain_info::terminate_and_process_chain () |
3d3e04ac | 2083 | { |
2084 | /* Process store chain. */ | |
2085 | bool ret = false; | |
2086 | if (m_store_info.length () > 1) | |
2087 | { | |
2088 | ret = coalesce_immediate_stores (); | |
2089 | if (ret) | |
f85e7cb7 | 2090 | ret = output_merged_stores (); |
3d3e04ac | 2091 | } |
2092 | ||
2093 | /* Delete all the entries we allocated ourselves. */ | |
2094 | store_immediate_info *info; | |
2095 | unsigned int i; | |
2096 | FOR_EACH_VEC_ELT (m_store_info, i, info) | |
2097 | delete info; | |
2098 | ||
2099 | merged_store_group *merged_info; | |
2100 | FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info) | |
2101 | delete merged_info; | |
2102 | ||
2103 | return ret; | |
2104 | } | |
2105 | ||
2106 | /* Return true iff LHS is a destination potentially interesting for | |
2107 | store merging. In practice these are the codes that get_inner_reference | |
2108 | can process. */ | |
2109 | ||
2110 | static bool | |
2111 | lhs_valid_for_store_merging_p (tree lhs) | |
2112 | { | |
2113 | tree_code code = TREE_CODE (lhs); | |
2114 | ||
2115 | if (code == ARRAY_REF || code == ARRAY_RANGE_REF || code == MEM_REF | |
2116 | || code == COMPONENT_REF || code == BIT_FIELD_REF) | |
2117 | return true; | |
2118 | ||
2119 | return false; | |
2120 | } | |
2121 | ||
2122 | /* Return true if the tree RHS is a constant we want to consider | |
2123 | during store merging. In practice accept all codes that | |
2124 | native_encode_expr accepts. */ | |
2125 | ||
2126 | static bool | |
2127 | rhs_valid_for_store_merging_p (tree rhs) | |
2128 | { | |
63eabc9b | 2129 | return native_encode_expr (rhs, NULL, |
2130 | GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs)))) != 0; | |
3d3e04ac | 2131 | } |
2132 | ||
9991d1d3 | 2133 | /* If MEM is a memory reference usable for store merging (either as |
2134 | store destination or for loads), return the non-NULL base_addr | |
2135 | and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END. | |
2136 | Otherwise return NULL, *PBITPOS should be still valid even for that | |
2137 | case. */ | |
2138 | ||
2139 | static tree | |
2140 | mem_valid_for_store_merging (tree mem, unsigned HOST_WIDE_INT *pbitsize, | |
2141 | unsigned HOST_WIDE_INT *pbitpos, | |
2142 | unsigned HOST_WIDE_INT *pbitregion_start, | |
2143 | unsigned HOST_WIDE_INT *pbitregion_end) | |
2144 | { | |
2145 | HOST_WIDE_INT bitsize; | |
2146 | HOST_WIDE_INT bitpos; | |
2147 | unsigned HOST_WIDE_INT bitregion_start = 0; | |
2148 | unsigned HOST_WIDE_INT bitregion_end = 0; | |
2149 | machine_mode mode; | |
2150 | int unsignedp = 0, reversep = 0, volatilep = 0; | |
2151 | tree offset; | |
2152 | tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode, | |
2153 | &unsignedp, &reversep, &volatilep); | |
2154 | *pbitsize = bitsize; | |
2155 | if (bitsize == 0) | |
2156 | return NULL_TREE; | |
2157 | ||
2158 | if (TREE_CODE (mem) == COMPONENT_REF | |
2159 | && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1))) | |
2160 | { | |
2161 | get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset); | |
2162 | if (bitregion_end) | |
2163 | ++bitregion_end; | |
2164 | } | |
2165 | ||
2166 | if (reversep) | |
2167 | return NULL_TREE; | |
2168 | ||
2169 | /* We do not want to rewrite TARGET_MEM_REFs. */ | |
2170 | if (TREE_CODE (base_addr) == TARGET_MEM_REF) | |
2171 | return NULL_TREE; | |
2172 | /* In some cases get_inner_reference may return a | |
2173 | MEM_REF [ptr + byteoffset]. For the purposes of this pass | |
2174 | canonicalize the base_addr to MEM_REF [ptr] and take | |
2175 | byteoffset into account in the bitpos. This occurs in | |
2176 | PR 23684 and this way we can catch more chains. */ | |
2177 | else if (TREE_CODE (base_addr) == MEM_REF) | |
2178 | { | |
2179 | offset_int bit_off, byte_off = mem_ref_offset (base_addr); | |
2180 | bit_off = byte_off << LOG2_BITS_PER_UNIT; | |
2181 | bit_off += bitpos; | |
2182 | if (!wi::neg_p (bit_off) && wi::fits_shwi_p (bit_off)) | |
2183 | { | |
2184 | bitpos = bit_off.to_shwi (); | |
2185 | if (bitregion_end) | |
2186 | { | |
2187 | bit_off = byte_off << LOG2_BITS_PER_UNIT; | |
2188 | bit_off += bitregion_start; | |
2189 | if (wi::fits_uhwi_p (bit_off)) | |
2190 | { | |
2191 | bitregion_start = bit_off.to_uhwi (); | |
2192 | bit_off = byte_off << LOG2_BITS_PER_UNIT; | |
2193 | bit_off += bitregion_end; | |
2194 | if (wi::fits_uhwi_p (bit_off)) | |
2195 | bitregion_end = bit_off.to_uhwi (); | |
2196 | else | |
2197 | bitregion_end = 0; | |
2198 | } | |
2199 | else | |
2200 | bitregion_end = 0; | |
2201 | } | |
2202 | } | |
2203 | else | |
2204 | return NULL_TREE; | |
2205 | base_addr = TREE_OPERAND (base_addr, 0); | |
2206 | } | |
2207 | /* get_inner_reference returns the base object, get at its | |
2208 | address now. */ | |
2209 | else | |
2210 | { | |
2211 | if (bitpos < 0) | |
2212 | return NULL_TREE; | |
2213 | base_addr = build_fold_addr_expr (base_addr); | |
2214 | } | |
2215 | ||
2216 | if (!bitregion_end) | |
2217 | { | |
2218 | bitregion_start = ROUND_DOWN (bitpos, BITS_PER_UNIT); | |
2219 | bitregion_end = ROUND_UP (bitpos + bitsize, BITS_PER_UNIT); | |
2220 | } | |
2221 | ||
2222 | if (offset != NULL_TREE) | |
2223 | { | |
2224 | /* If the access is variable offset then a base decl has to be | |
2225 | address-taken to be able to emit pointer-based stores to it. | |
2226 | ??? We might be able to get away with re-using the original | |
2227 | base up to the first variable part and then wrapping that inside | |
2228 | a BIT_FIELD_REF. */ | |
2229 | tree base = get_base_address (base_addr); | |
2230 | if (! base | |
2231 | || (DECL_P (base) && ! TREE_ADDRESSABLE (base))) | |
2232 | return NULL_TREE; | |
2233 | ||
2234 | base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr), | |
2235 | base_addr, offset); | |
2236 | } | |
2237 | ||
2238 | *pbitsize = bitsize; | |
2239 | *pbitpos = bitpos; | |
2240 | *pbitregion_start = bitregion_start; | |
2241 | *pbitregion_end = bitregion_end; | |
2242 | return base_addr; | |
2243 | } | |
2244 | ||
2245 | /* Return true if STMT is a load that can be used for store merging. | |
2246 | In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and | |
2247 | BITREGION_END are properties of the corresponding store. */ | |
2248 | ||
2249 | static bool | |
2250 | handled_load (gimple *stmt, store_operand_info *op, | |
2251 | unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitpos, | |
2252 | unsigned HOST_WIDE_INT bitregion_start, | |
2253 | unsigned HOST_WIDE_INT bitregion_end) | |
2254 | { | |
c35548ce | 2255 | if (!is_gimple_assign (stmt)) |
9991d1d3 | 2256 | return false; |
c35548ce | 2257 | if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR) |
2258 | { | |
2259 | tree rhs1 = gimple_assign_rhs1 (stmt); | |
2260 | if (TREE_CODE (rhs1) == SSA_NAME | |
c35548ce | 2261 | && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos, |
2262 | bitregion_start, bitregion_end)) | |
2263 | { | |
832a73b9 | 2264 | /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have |
2265 | been optimized earlier, but if allowed here, would confuse the | |
2266 | multiple uses counting. */ | |
2267 | if (op->bit_not_p) | |
2268 | return false; | |
c35548ce | 2269 | op->bit_not_p = !op->bit_not_p; |
2270 | return true; | |
2271 | } | |
2272 | return false; | |
2273 | } | |
2274 | if (gimple_vuse (stmt) | |
2275 | && gimple_assign_load_p (stmt) | |
9991d1d3 | 2276 | && !stmt_can_throw_internal (stmt) |
2277 | && !gimple_has_volatile_ops (stmt)) | |
2278 | { | |
2279 | tree mem = gimple_assign_rhs1 (stmt); | |
2280 | op->base_addr | |
2281 | = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos, | |
2282 | &op->bitregion_start, | |
2283 | &op->bitregion_end); | |
2284 | if (op->base_addr != NULL_TREE | |
2285 | && op->bitsize == bitsize | |
2286 | && ((op->bitpos - bitpos) % BITS_PER_UNIT) == 0 | |
2287 | && op->bitpos - op->bitregion_start >= bitpos - bitregion_start | |
2288 | && op->bitregion_end - op->bitpos >= bitregion_end - bitpos) | |
2289 | { | |
2290 | op->stmt = stmt; | |
2291 | op->val = mem; | |
c35548ce | 2292 | op->bit_not_p = false; |
9991d1d3 | 2293 | return true; |
2294 | } | |
2295 | } | |
2296 | return false; | |
2297 | } | |
2298 | ||
2299 | /* Record the store STMT for store merging optimization if it can be | |
2300 | optimized. */ | |
2301 | ||
2302 | void | |
2303 | pass_store_merging::process_store (gimple *stmt) | |
2304 | { | |
2305 | tree lhs = gimple_assign_lhs (stmt); | |
2306 | tree rhs = gimple_assign_rhs1 (stmt); | |
2307 | unsigned HOST_WIDE_INT bitsize, bitpos; | |
2308 | unsigned HOST_WIDE_INT bitregion_start; | |
2309 | unsigned HOST_WIDE_INT bitregion_end; | |
2310 | tree base_addr | |
2311 | = mem_valid_for_store_merging (lhs, &bitsize, &bitpos, | |
2312 | &bitregion_start, &bitregion_end); | |
2313 | if (bitsize == 0) | |
2314 | return; | |
2315 | ||
2316 | bool invalid = (base_addr == NULL_TREE | |
2317 | || ((bitsize > MAX_BITSIZE_MODE_ANY_INT) | |
2318 | && (TREE_CODE (rhs) != INTEGER_CST))); | |
2319 | enum tree_code rhs_code = ERROR_MARK; | |
832a73b9 | 2320 | bool bit_not_p = false; |
9991d1d3 | 2321 | store_operand_info ops[2]; |
2322 | if (invalid) | |
2323 | ; | |
2324 | else if (rhs_valid_for_store_merging_p (rhs)) | |
2325 | { | |
2326 | rhs_code = INTEGER_CST; | |
2327 | ops[0].val = rhs; | |
2328 | } | |
871a91ec | 2329 | else if (TREE_CODE (rhs) != SSA_NAME) |
9991d1d3 | 2330 | invalid = true; |
2331 | else | |
2332 | { | |
2333 | gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2; | |
2334 | if (!is_gimple_assign (def_stmt)) | |
2335 | invalid = true; | |
2336 | else if (handled_load (def_stmt, &ops[0], bitsize, bitpos, | |
2337 | bitregion_start, bitregion_end)) | |
2338 | rhs_code = MEM_REF; | |
832a73b9 | 2339 | else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR) |
2340 | { | |
2341 | tree rhs1 = gimple_assign_rhs1 (def_stmt); | |
2342 | if (TREE_CODE (rhs1) == SSA_NAME | |
2343 | && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1))) | |
2344 | { | |
2345 | bit_not_p = true; | |
2346 | def_stmt = SSA_NAME_DEF_STMT (rhs1); | |
2347 | } | |
2348 | } | |
2349 | if (rhs_code == ERROR_MARK && !invalid) | |
9991d1d3 | 2350 | switch ((rhs_code = gimple_assign_rhs_code (def_stmt))) |
2351 | { | |
2352 | case BIT_AND_EXPR: | |
2353 | case BIT_IOR_EXPR: | |
2354 | case BIT_XOR_EXPR: | |
2355 | tree rhs1, rhs2; | |
2356 | rhs1 = gimple_assign_rhs1 (def_stmt); | |
2357 | rhs2 = gimple_assign_rhs2 (def_stmt); | |
2358 | invalid = true; | |
871a91ec | 2359 | if (TREE_CODE (rhs1) != SSA_NAME) |
9991d1d3 | 2360 | break; |
2361 | def_stmt1 = SSA_NAME_DEF_STMT (rhs1); | |
2362 | if (!is_gimple_assign (def_stmt1) | |
2363 | || !handled_load (def_stmt1, &ops[0], bitsize, bitpos, | |
2364 | bitregion_start, bitregion_end)) | |
2365 | break; | |
2366 | if (rhs_valid_for_store_merging_p (rhs2)) | |
2367 | ops[1].val = rhs2; | |
871a91ec | 2368 | else if (TREE_CODE (rhs2) != SSA_NAME) |
9991d1d3 | 2369 | break; |
2370 | else | |
2371 | { | |
2372 | def_stmt2 = SSA_NAME_DEF_STMT (rhs2); | |
2373 | if (!is_gimple_assign (def_stmt2)) | |
2374 | break; | |
2375 | else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos, | |
2376 | bitregion_start, bitregion_end)) | |
2377 | break; | |
2378 | } | |
2379 | invalid = false; | |
2380 | break; | |
2381 | default: | |
2382 | invalid = true; | |
2383 | break; | |
2384 | } | |
2385 | } | |
2386 | ||
9991d1d3 | 2387 | if (invalid) |
2388 | { | |
c35548ce | 2389 | terminate_all_aliasing_chains (NULL, stmt); |
9991d1d3 | 2390 | return; |
2391 | } | |
2392 | ||
c35548ce | 2393 | struct imm_store_chain_info **chain_info = NULL; |
2394 | if (base_addr) | |
2395 | chain_info = m_stores.get (base_addr); | |
2396 | ||
9991d1d3 | 2397 | store_immediate_info *info; |
2398 | if (chain_info) | |
2399 | { | |
2400 | unsigned int ord = (*chain_info)->m_store_info.length (); | |
2401 | info = new store_immediate_info (bitsize, bitpos, bitregion_start, | |
2402 | bitregion_end, stmt, ord, rhs_code, | |
832a73b9 | 2403 | bit_not_p, ops[0], ops[1]); |
9991d1d3 | 2404 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2405 | { | |
2406 | fprintf (dump_file, "Recording immediate store from stmt:\n"); | |
2407 | print_gimple_stmt (dump_file, stmt, 0); | |
2408 | } | |
2409 | (*chain_info)->m_store_info.safe_push (info); | |
c35548ce | 2410 | terminate_all_aliasing_chains (chain_info, stmt); |
9991d1d3 | 2411 | /* If we reach the limit of stores to merge in a chain terminate and |
2412 | process the chain now. */ | |
2413 | if ((*chain_info)->m_store_info.length () | |
2414 | == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE)) | |
2415 | { | |
2416 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2417 | fprintf (dump_file, | |
2418 | "Reached maximum number of statements to merge:\n"); | |
2419 | terminate_and_release_chain (*chain_info); | |
2420 | } | |
2421 | return; | |
2422 | } | |
2423 | ||
2424 | /* Store aliases any existing chain? */ | |
c35548ce | 2425 | terminate_all_aliasing_chains (NULL, stmt); |
9991d1d3 | 2426 | /* Start a new chain. */ |
2427 | struct imm_store_chain_info *new_chain | |
2428 | = new imm_store_chain_info (m_stores_head, base_addr); | |
2429 | info = new store_immediate_info (bitsize, bitpos, bitregion_start, | |
2430 | bitregion_end, stmt, 0, rhs_code, | |
832a73b9 | 2431 | bit_not_p, ops[0], ops[1]); |
9991d1d3 | 2432 | new_chain->m_store_info.safe_push (info); |
2433 | m_stores.put (base_addr, new_chain); | |
2434 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2435 | { | |
2436 | fprintf (dump_file, "Starting new chain with statement:\n"); | |
2437 | print_gimple_stmt (dump_file, stmt, 0); | |
2438 | fprintf (dump_file, "The base object is:\n"); | |
2439 | print_generic_expr (dump_file, base_addr); | |
2440 | fprintf (dump_file, "\n"); | |
2441 | } | |
2442 | } | |
2443 | ||
3d3e04ac | 2444 | /* Entry point for the pass. Go over each basic block recording chains of |
9991d1d3 | 2445 | immediate stores. Upon encountering a terminating statement (as defined |
2446 | by stmt_terminates_chain_p) process the recorded stores and emit the widened | |
2447 | variants. */ | |
3d3e04ac | 2448 | |
2449 | unsigned int | |
2450 | pass_store_merging::execute (function *fun) | |
2451 | { | |
2452 | basic_block bb; | |
2453 | hash_set<gimple *> orig_stmts; | |
2454 | ||
2455 | FOR_EACH_BB_FN (bb, fun) | |
2456 | { | |
2457 | gimple_stmt_iterator gsi; | |
2458 | unsigned HOST_WIDE_INT num_statements = 0; | |
2459 | /* Record the original statements so that we can keep track of | |
2460 | statements emitted in this pass and not re-process new | |
2461 | statements. */ | |
2462 | for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
2463 | { | |
2464 | if (is_gimple_debug (gsi_stmt (gsi))) | |
2465 | continue; | |
2466 | ||
63eabc9b | 2467 | if (++num_statements >= 2) |
3d3e04ac | 2468 | break; |
2469 | } | |
2470 | ||
2471 | if (num_statements < 2) | |
2472 | continue; | |
2473 | ||
2474 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2475 | fprintf (dump_file, "Processing basic block <%d>:\n", bb->index); | |
2476 | ||
2477 | for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
2478 | { | |
2479 | gimple *stmt = gsi_stmt (gsi); | |
2480 | ||
3a3ba7de | 2481 | if (is_gimple_debug (stmt)) |
2482 | continue; | |
2483 | ||
3d3e04ac | 2484 | if (gimple_has_volatile_ops (stmt)) |
2485 | { | |
2486 | /* Terminate all chains. */ | |
2487 | if (dump_file && (dump_flags & TDF_DETAILS)) | |
2488 | fprintf (dump_file, "Volatile access terminates " | |
2489 | "all chains\n"); | |
2490 | terminate_and_process_all_chains (); | |
2491 | continue; | |
2492 | } | |
2493 | ||
3d3e04ac | 2494 | if (gimple_assign_single_p (stmt) && gimple_vdef (stmt) |
2495 | && !stmt_can_throw_internal (stmt) | |
2496 | && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt))) | |
9991d1d3 | 2497 | process_store (stmt); |
2498 | else | |
2499 | terminate_all_aliasing_chains (NULL, stmt); | |
3d3e04ac | 2500 | } |
2501 | terminate_and_process_all_chains (); | |
2502 | } | |
2503 | return 0; | |
2504 | } | |
2505 | ||
2506 | } // anon namespace | |
2507 | ||
2508 | /* Construct and return a store merging pass object. */ | |
2509 | ||
2510 | gimple_opt_pass * | |
2511 | make_pass_store_merging (gcc::context *ctxt) | |
2512 | { | |
2513 | return new pass_store_merging (ctxt); | |
2514 | } | |
3d9a2fb3 | 2515 | |
2516 | #if CHECKING_P | |
2517 | ||
2518 | namespace selftest { | |
2519 | ||
2520 | /* Selftests for store merging helpers. */ | |
2521 | ||
2522 | /* Assert that all elements of the byte arrays X and Y, both of length N | |
2523 | are equal. */ | |
2524 | ||
2525 | static void | |
2526 | verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n) | |
2527 | { | |
2528 | for (unsigned int i = 0; i < n; i++) | |
2529 | { | |
2530 | if (x[i] != y[i]) | |
2531 | { | |
2532 | fprintf (stderr, "Arrays do not match. X:\n"); | |
2533 | dump_char_array (stderr, x, n); | |
2534 | fprintf (stderr, "Y:\n"); | |
2535 | dump_char_array (stderr, y, n); | |
2536 | } | |
2537 | ASSERT_EQ (x[i], y[i]); | |
2538 | } | |
2539 | } | |
2540 | ||
2541 | /* Test shift_bytes_in_array and that it carries bits across between | |
2542 | bytes correctly. */ | |
2543 | ||
2544 | static void | |
2545 | verify_shift_bytes_in_array (void) | |
2546 | { | |
2547 | /* byte 1 | byte 0 | |
2548 | 00011111 | 11100000. */ | |
2549 | unsigned char orig[2] = { 0xe0, 0x1f }; | |
2550 | unsigned char in[2]; | |
2551 | memcpy (in, orig, sizeof orig); | |
2552 | ||
2553 | unsigned char expected[2] = { 0x80, 0x7f }; | |
2554 | shift_bytes_in_array (in, sizeof (in), 2); | |
2555 | verify_array_eq (in, expected, sizeof (in)); | |
2556 | ||
2557 | memcpy (in, orig, sizeof orig); | |
2558 | memcpy (expected, orig, sizeof orig); | |
2559 | /* Check that shifting by zero doesn't change anything. */ | |
2560 | shift_bytes_in_array (in, sizeof (in), 0); | |
2561 | verify_array_eq (in, expected, sizeof (in)); | |
2562 | ||
2563 | } | |
2564 | ||
2565 | /* Test shift_bytes_in_array_right and that it carries bits across between | |
2566 | bytes correctly. */ | |
2567 | ||
2568 | static void | |
2569 | verify_shift_bytes_in_array_right (void) | |
2570 | { | |
2571 | /* byte 1 | byte 0 | |
2572 | 00011111 | 11100000. */ | |
2573 | unsigned char orig[2] = { 0x1f, 0xe0}; | |
2574 | unsigned char in[2]; | |
2575 | memcpy (in, orig, sizeof orig); | |
2576 | unsigned char expected[2] = { 0x07, 0xf8}; | |
2577 | shift_bytes_in_array_right (in, sizeof (in), 2); | |
2578 | verify_array_eq (in, expected, sizeof (in)); | |
2579 | ||
2580 | memcpy (in, orig, sizeof orig); | |
2581 | memcpy (expected, orig, sizeof orig); | |
2582 | /* Check that shifting by zero doesn't change anything. */ | |
2583 | shift_bytes_in_array_right (in, sizeof (in), 0); | |
2584 | verify_array_eq (in, expected, sizeof (in)); | |
2585 | } | |
2586 | ||
2587 | /* Test clear_bit_region that it clears exactly the bits asked and | |
2588 | nothing more. */ | |
2589 | ||
2590 | static void | |
2591 | verify_clear_bit_region (void) | |
2592 | { | |
2593 | /* Start with all bits set and test clearing various patterns in them. */ | |
2594 | unsigned char orig[3] = { 0xff, 0xff, 0xff}; | |
2595 | unsigned char in[3]; | |
2596 | unsigned char expected[3]; | |
2597 | memcpy (in, orig, sizeof in); | |
2598 | ||
2599 | /* Check zeroing out all the bits. */ | |
2600 | clear_bit_region (in, 0, 3 * BITS_PER_UNIT); | |
2601 | expected[0] = expected[1] = expected[2] = 0; | |
2602 | verify_array_eq (in, expected, sizeof in); | |
2603 | ||
2604 | memcpy (in, orig, sizeof in); | |
2605 | /* Leave the first and last bits intact. */ | |
2606 | clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2); | |
2607 | expected[0] = 0x1; | |
2608 | expected[1] = 0; | |
2609 | expected[2] = 0x80; | |
2610 | verify_array_eq (in, expected, sizeof in); | |
2611 | } | |
2612 | ||
2613 | /* Test verify_clear_bit_region_be that it clears exactly the bits asked and | |
2614 | nothing more. */ | |
2615 | ||
2616 | static void | |
2617 | verify_clear_bit_region_be (void) | |
2618 | { | |
2619 | /* Start with all bits set and test clearing various patterns in them. */ | |
2620 | unsigned char orig[3] = { 0xff, 0xff, 0xff}; | |
2621 | unsigned char in[3]; | |
2622 | unsigned char expected[3]; | |
2623 | memcpy (in, orig, sizeof in); | |
2624 | ||
2625 | /* Check zeroing out all the bits. */ | |
2626 | clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT); | |
2627 | expected[0] = expected[1] = expected[2] = 0; | |
2628 | verify_array_eq (in, expected, sizeof in); | |
2629 | ||
2630 | memcpy (in, orig, sizeof in); | |
2631 | /* Leave the first and last bits intact. */ | |
2632 | clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2); | |
2633 | expected[0] = 0x80; | |
2634 | expected[1] = 0; | |
2635 | expected[2] = 0x1; | |
2636 | verify_array_eq (in, expected, sizeof in); | |
2637 | } | |
2638 | ||
2639 | ||
2640 | /* Run all of the selftests within this file. */ | |
2641 | ||
2642 | void | |
2643 | store_merging_c_tests (void) | |
2644 | { | |
2645 | verify_shift_bytes_in_array (); | |
2646 | verify_shift_bytes_in_array_right (); | |
2647 | verify_clear_bit_region (); | |
2648 | verify_clear_bit_region_be (); | |
2649 | } | |
2650 | ||
2651 | } // namespace selftest | |
2652 | #endif /* CHECKING_P. */ |