]>
Commit | Line | Data |
---|---|---|
b8698a0f | 1 | /* Data References Analysis and Manipulation Utilities for Vectorization. |
23a5b65a | 2 | Copyright (C) 2003-2014 Free Software Foundation, Inc. |
b8698a0f | 3 | Contributed by Dorit Naishlos <dorit@il.ibm.com> |
ebfd146a IR |
4 | and Ira Rosen <irar@il.ibm.com> |
5 | ||
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify it under | |
9 | the terms of the GNU General Public License as published by the Free | |
10 | Software Foundation; either version 3, or (at your option) any later | |
11 | version. | |
12 | ||
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with GCC; see the file COPYING3. If not see | |
20 | <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #include "config.h" | |
23 | #include "system.h" | |
24 | #include "coretypes.h" | |
78c60e3d | 25 | #include "dumpfile.h" |
ebfd146a | 26 | #include "tm.h" |
ebfd146a | 27 | #include "tree.h" |
d8a2d370 | 28 | #include "stor-layout.h" |
237e9c04 | 29 | #include "tm_p.h" |
ebfd146a | 30 | #include "target.h" |
60393bbc AM |
31 | #include "predict.h" |
32 | #include "vec.h" | |
33 | #include "hashtab.h" | |
34 | #include "hash-set.h" | |
35 | #include "machmode.h" | |
36 | #include "hard-reg-set.h" | |
37 | #include "input.h" | |
38 | #include "function.h" | |
39 | #include "dominance.h" | |
40 | #include "cfg.h" | |
ebfd146a | 41 | #include "basic-block.h" |
cf835838 | 42 | #include "gimple-pretty-print.h" |
2fb9a547 AM |
43 | #include "tree-ssa-alias.h" |
44 | #include "internal-fn.h" | |
45 | #include "tree-eh.h" | |
46 | #include "gimple-expr.h" | |
47 | #include "is-a.h" | |
18f429e2 | 48 | #include "gimple.h" |
45b0be94 | 49 | #include "gimplify.h" |
5be5c238 | 50 | #include "gimple-iterator.h" |
18f429e2 | 51 | #include "gimplify-me.h" |
442b4905 AM |
52 | #include "gimple-ssa.h" |
53 | #include "tree-phinodes.h" | |
54 | #include "ssa-iterators.h" | |
d8a2d370 | 55 | #include "stringpool.h" |
442b4905 | 56 | #include "tree-ssanames.h" |
e28030cf AM |
57 | #include "tree-ssa-loop-ivopts.h" |
58 | #include "tree-ssa-loop-manip.h" | |
442b4905 | 59 | #include "tree-ssa-loop.h" |
7ee2468b | 60 | #include "dumpfile.h" |
ebfd146a | 61 | #include "cfgloop.h" |
ebfd146a IR |
62 | #include "tree-chrec.h" |
63 | #include "tree-scalar-evolution.h" | |
64 | #include "tree-vectorizer.h" | |
718f9c0f | 65 | #include "diagnostic-core.h" |
c582198b AM |
66 | #include "hash-map.h" |
67 | #include "plugin-api.h" | |
68 | #include "ipa-ref.h" | |
0136f8f0 | 69 | #include "cgraph.h" |
2eb79bbb SB |
70 | /* Need to include rtl.h, expr.h, etc. for optabs. */ |
71 | #include "expr.h" | |
b0710fe1 | 72 | #include "insn-codes.h" |
2eb79bbb | 73 | #include "optabs.h" |
9b2b7279 | 74 | #include "builtins.h" |
6ad386b7 | 75 | #include "varasm.h" |
ebfd146a | 76 | |
272c6793 RS |
77 | /* Return true if load- or store-lanes optab OPTAB is implemented for |
78 | COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */ | |
79 | ||
80 | static bool | |
81 | vect_lanes_optab_supported_p (const char *name, convert_optab optab, | |
82 | tree vectype, unsigned HOST_WIDE_INT count) | |
83 | { | |
ef4bddc2 | 84 | machine_mode mode, array_mode; |
272c6793 RS |
85 | bool limit_p; |
86 | ||
87 | mode = TYPE_MODE (vectype); | |
88 | limit_p = !targetm.array_mode_supported_p (mode, count); | |
89 | array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode), | |
90 | MODE_INT, limit_p); | |
91 | ||
92 | if (array_mode == BLKmode) | |
93 | { | |
73fbfcad | 94 | if (dump_enabled_p ()) |
e645e942 TJ |
95 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
96 | "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n", | |
78c60e3d | 97 | GET_MODE_NAME (mode), count); |
272c6793 RS |
98 | return false; |
99 | } | |
100 | ||
101 | if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing) | |
102 | { | |
73fbfcad | 103 | if (dump_enabled_p ()) |
78c60e3d | 104 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 105 | "cannot use %s<%s><%s>\n", name, |
78c60e3d | 106 | GET_MODE_NAME (array_mode), GET_MODE_NAME (mode)); |
272c6793 RS |
107 | return false; |
108 | } | |
109 | ||
73fbfcad | 110 | if (dump_enabled_p ()) |
78c60e3d | 111 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 112 | "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode), |
78c60e3d | 113 | GET_MODE_NAME (mode)); |
272c6793 RS |
114 | |
115 | return true; | |
116 | } | |
117 | ||
118 | ||
ebfd146a | 119 | /* Return the smallest scalar part of STMT. |
ff802fa1 IR |
120 | This is used to determine the vectype of the stmt. We generally set the |
121 | vectype according to the type of the result (lhs). For stmts whose | |
ebfd146a | 122 | result-type is different than the type of the arguments (e.g., demotion, |
b8698a0f | 123 | promotion), vectype will be reset appropriately (later). Note that we have |
ebfd146a | 124 | to visit the smallest datatype in this function, because that determines the |
ff802fa1 | 125 | VF. If the smallest datatype in the loop is present only as the rhs of a |
ebfd146a IR |
126 | promotion operation - we'd miss it. |
127 | Such a case, where a variable of this datatype does not appear in the lhs | |
128 | anywhere in the loop, can only occur if it's an invariant: e.g.: | |
b8698a0f | 129 | 'int_x = (int) short_inv', which we'd expect to have been optimized away by |
ff802fa1 IR |
130 | invariant motion. However, we cannot rely on invariant motion to always |
131 | take invariants out of the loop, and so in the case of promotion we also | |
132 | have to check the rhs. | |
ebfd146a IR |
133 | LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding |
134 | types. */ | |
135 | ||
136 | tree | |
137 | vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit, | |
138 | HOST_WIDE_INT *rhs_size_unit) | |
139 | { | |
140 | tree scalar_type = gimple_expr_type (stmt); | |
141 | HOST_WIDE_INT lhs, rhs; | |
142 | ||
143 | lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); | |
144 | ||
145 | if (is_gimple_assign (stmt) | |
146 | && (gimple_assign_cast_p (stmt) | |
147 | || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR | |
39f3fed6 | 148 | || gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR |
ebfd146a IR |
149 | || gimple_assign_rhs_code (stmt) == FLOAT_EXPR)) |
150 | { | |
151 | tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); | |
152 | ||
153 | rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type)); | |
154 | if (rhs < lhs) | |
155 | scalar_type = rhs_type; | |
156 | } | |
b8698a0f L |
157 | |
158 | *lhs_size_unit = lhs; | |
ebfd146a IR |
159 | *rhs_size_unit = rhs; |
160 | return scalar_type; | |
161 | } | |
162 | ||
163 | ||
ebfd146a IR |
164 | /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be |
165 | tested at run-time. Return TRUE if DDR was successfully inserted. | |
166 | Return false if versioning is not supported. */ | |
167 | ||
168 | static bool | |
169 | vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo) | |
170 | { | |
171 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
172 | ||
173 | if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0) | |
174 | return false; | |
175 | ||
73fbfcad | 176 | if (dump_enabled_p ()) |
ebfd146a | 177 | { |
78c60e3d SS |
178 | dump_printf_loc (MSG_NOTE, vect_location, |
179 | "mark for run-time aliasing test between "); | |
180 | dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr))); | |
181 | dump_printf (MSG_NOTE, " and "); | |
182 | dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr))); | |
e645e942 | 183 | dump_printf (MSG_NOTE, "\n"); |
ebfd146a IR |
184 | } |
185 | ||
186 | if (optimize_loop_nest_for_size_p (loop)) | |
187 | { | |
73fbfcad | 188 | if (dump_enabled_p ()) |
e645e942 TJ |
189 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
190 | "versioning not supported when optimizing" | |
191 | " for size.\n"); | |
ebfd146a IR |
192 | return false; |
193 | } | |
194 | ||
195 | /* FORNOW: We don't support versioning with outer-loop vectorization. */ | |
196 | if (loop->inner) | |
197 | { | |
73fbfcad | 198 | if (dump_enabled_p ()) |
e645e942 TJ |
199 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
200 | "versioning not yet supported for outer-loops.\n"); | |
ebfd146a IR |
201 | return false; |
202 | } | |
203 | ||
319e6439 RG |
204 | /* FORNOW: We don't support creating runtime alias tests for non-constant |
205 | step. */ | |
206 | if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST | |
207 | || TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST) | |
208 | { | |
73fbfcad | 209 | if (dump_enabled_p ()) |
e645e942 | 210 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78c60e3d | 211 | "versioning not yet supported for non-constant " |
e645e942 | 212 | "step\n"); |
319e6439 RG |
213 | return false; |
214 | } | |
215 | ||
9771b263 | 216 | LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr); |
ebfd146a IR |
217 | return true; |
218 | } | |
219 | ||
a70d6342 | 220 | |
ebfd146a IR |
221 | /* Function vect_analyze_data_ref_dependence. |
222 | ||
223 | Return TRUE if there (might) exist a dependence between a memory-reference | |
224 | DRA and a memory-reference DRB. When versioning for alias may check a | |
777e1f09 RG |
225 | dependence at run-time, return FALSE. Adjust *MAX_VF according to |
226 | the data dependence. */ | |
b8698a0f | 227 | |
ebfd146a IR |
228 | static bool |
229 | vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr, | |
5bfdb7d8 | 230 | loop_vec_info loop_vinfo, int *max_vf) |
ebfd146a IR |
231 | { |
232 | unsigned int i; | |
5abe1e05 | 233 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
ebfd146a IR |
234 | struct data_reference *dra = DDR_A (ddr); |
235 | struct data_reference *drb = DDR_B (ddr); | |
b8698a0f | 236 | stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); |
ebfd146a | 237 | stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); |
ebfd146a IR |
238 | lambda_vector dist_v; |
239 | unsigned int loop_depth; | |
b8698a0f | 240 | |
5abe1e05 | 241 | /* In loop analysis all data references should be vectorizable. */ |
4b5caab7 IR |
242 | if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a) |
243 | || !STMT_VINFO_VECTORIZABLE (stmtinfo_b)) | |
5abe1e05 | 244 | gcc_unreachable (); |
4b5caab7 | 245 | |
5abe1e05 | 246 | /* Independent data accesses. */ |
ebfd146a | 247 | if (DDR_ARE_DEPENDENT (ddr) == chrec_known) |
5abe1e05 | 248 | return false; |
a70d6342 | 249 | |
5abe1e05 RB |
250 | if (dra == drb |
251 | || (DR_IS_READ (dra) && DR_IS_READ (drb))) | |
ebfd146a | 252 | return false; |
5961d779 RB |
253 | |
254 | /* Even if we have an anti-dependence then, as the vectorized loop covers at | |
255 | least two scalar iterations, there is always also a true dependence. | |
256 | As the vectorizer does not re-order loads and stores we can ignore | |
257 | the anti-dependence if TBAA can disambiguate both DRs similar to the | |
258 | case with known negative distance anti-dependences (positive | |
259 | distance anti-dependences would violate TBAA constraints). */ | |
260 | if (((DR_IS_READ (dra) && DR_IS_WRITE (drb)) | |
261 | || (DR_IS_WRITE (dra) && DR_IS_READ (drb))) | |
262 | && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)), | |
263 | get_alias_set (DR_REF (drb)))) | |
264 | return false; | |
b8698a0f | 265 | |
5abe1e05 | 266 | /* Unknown data dependence. */ |
ebfd146a IR |
267 | if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) |
268 | { | |
74bf76ed JJ |
269 | /* If user asserted safelen consecutive iterations can be |
270 | executed concurrently, assume independence. */ | |
271 | if (loop->safelen >= 2) | |
272 | { | |
273 | if (loop->safelen < *max_vf) | |
274 | *max_vf = loop->safelen; | |
d1417442 | 275 | LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false; |
74bf76ed JJ |
276 | return false; |
277 | } | |
278 | ||
90eb75f2 RB |
279 | if (STMT_VINFO_GATHER_P (stmtinfo_a) |
280 | || STMT_VINFO_GATHER_P (stmtinfo_b)) | |
281 | { | |
282 | if (dump_enabled_p ()) | |
283 | { | |
284 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
285 | "versioning for alias not supported for: " | |
286 | "can't determine dependence between "); | |
287 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
288 | DR_REF (dra)); | |
289 | dump_printf (MSG_MISSED_OPTIMIZATION, " and "); | |
290 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
291 | DR_REF (drb)); | |
e645e942 | 292 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
90eb75f2 | 293 | } |
fdf6a7b9 | 294 | return true; |
90eb75f2 RB |
295 | } |
296 | ||
73fbfcad | 297 | if (dump_enabled_p ()) |
5abe1e05 RB |
298 | { |
299 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
300 | "versioning for alias required: " | |
301 | "can't determine dependence between "); | |
302 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
303 | DR_REF (dra)); | |
304 | dump_printf (MSG_MISSED_OPTIMIZATION, " and "); | |
305 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
306 | DR_REF (drb)); | |
e645e942 | 307 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
5abe1e05 | 308 | } |
e4a707c4 | 309 | |
5abe1e05 RB |
310 | /* Add to list of ddrs that need to be tested at run-time. */ |
311 | return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo); | |
a70d6342 IR |
312 | } |
313 | ||
5abe1e05 | 314 | /* Known data dependence. */ |
ebfd146a IR |
315 | if (DDR_NUM_DIST_VECTS (ddr) == 0) |
316 | { | |
74bf76ed JJ |
317 | /* If user asserted safelen consecutive iterations can be |
318 | executed concurrently, assume independence. */ | |
319 | if (loop->safelen >= 2) | |
320 | { | |
321 | if (loop->safelen < *max_vf) | |
322 | *max_vf = loop->safelen; | |
d1417442 | 323 | LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false; |
74bf76ed JJ |
324 | return false; |
325 | } | |
326 | ||
90eb75f2 RB |
327 | if (STMT_VINFO_GATHER_P (stmtinfo_a) |
328 | || STMT_VINFO_GATHER_P (stmtinfo_b)) | |
329 | { | |
330 | if (dump_enabled_p ()) | |
331 | { | |
332 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
333 | "versioning for alias not supported for: " | |
334 | "bad dist vector for "); | |
335 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
336 | DR_REF (dra)); | |
337 | dump_printf (MSG_MISSED_OPTIMIZATION, " and "); | |
338 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
339 | DR_REF (drb)); | |
e645e942 | 340 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
90eb75f2 | 341 | } |
fdf6a7b9 | 342 | return true; |
90eb75f2 RB |
343 | } |
344 | ||
73fbfcad | 345 | if (dump_enabled_p ()) |
ebfd146a | 346 | { |
e645e942 | 347 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78c60e3d SS |
348 | "versioning for alias required: " |
349 | "bad dist vector for "); | |
350 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra)); | |
351 | dump_printf (MSG_MISSED_OPTIMIZATION, " and "); | |
352 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb)); | |
e645e942 | 353 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
ebfd146a IR |
354 | } |
355 | /* Add to list of ddrs that need to be tested at run-time. */ | |
356 | return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo); | |
b8698a0f | 357 | } |
ebfd146a IR |
358 | |
359 | loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr)); | |
9771b263 | 360 | FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v) |
ebfd146a IR |
361 | { |
362 | int dist = dist_v[loop_depth]; | |
363 | ||
73fbfcad | 364 | if (dump_enabled_p ()) |
78c60e3d | 365 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 366 | "dependence distance = %d.\n", dist); |
ebfd146a | 367 | |
777e1f09 | 368 | if (dist == 0) |
ebfd146a | 369 | { |
73fbfcad | 370 | if (dump_enabled_p ()) |
ebfd146a | 371 | { |
e645e942 TJ |
372 | dump_printf_loc (MSG_NOTE, vect_location, |
373 | "dependence distance == 0 between "); | |
78c60e3d SS |
374 | dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); |
375 | dump_printf (MSG_NOTE, " and "); | |
376 | dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); | |
e645e942 | 377 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
ebfd146a IR |
378 | } |
379 | ||
5185d248 RB |
380 | /* When we perform grouped accesses and perform implicit CSE |
381 | by detecting equal accesses and doing disambiguation with | |
382 | runtime alias tests like for | |
383 | .. = a[i]; | |
384 | .. = a[i+1]; | |
385 | a[i] = ..; | |
386 | a[i+1] = ..; | |
387 | *p = ..; | |
388 | .. = a[i]; | |
389 | .. = a[i+1]; | |
390 | where we will end up loading { a[i], a[i+1] } once, make | |
391 | sure that inserting group loads before the first load and | |
e33f43b9 RB |
392 | stores after the last store will do the right thing. |
393 | Similar for groups like | |
394 | a[i] = ...; | |
395 | ... = a[i]; | |
396 | a[i+1] = ...; | |
397 | where loads from the group interleave with the store. */ | |
398 | if (STMT_VINFO_GROUPED_ACCESS (stmtinfo_a) | |
399 | || STMT_VINFO_GROUPED_ACCESS (stmtinfo_b)) | |
5185d248 RB |
400 | { |
401 | gimple earlier_stmt; | |
402 | earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb)); | |
403 | if (DR_IS_WRITE | |
404 | (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt)))) | |
405 | { | |
406 | if (dump_enabled_p ()) | |
407 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
e645e942 TJ |
408 | "READ_WRITE dependence in interleaving." |
409 | "\n"); | |
5185d248 RB |
410 | return true; |
411 | } | |
ebfd146a | 412 | } |
b8698a0f | 413 | |
777e1f09 RG |
414 | continue; |
415 | } | |
416 | ||
417 | if (dist > 0 && DDR_REVERSED_P (ddr)) | |
418 | { | |
419 | /* If DDR_REVERSED_P the order of the data-refs in DDR was | |
420 | reversed (to make distance vector positive), and the actual | |
421 | distance is negative. */ | |
73fbfcad | 422 | if (dump_enabled_p ()) |
78c60e3d | 423 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 424 | "dependence distance negative.\n"); |
f2556b68 RB |
425 | /* Record a negative dependence distance to later limit the |
426 | amount of stmt copying / unrolling we can perform. | |
427 | Only need to handle read-after-write dependence. */ | |
428 | if (DR_IS_READ (drb) | |
429 | && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0 | |
430 | || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist)) | |
431 | STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist; | |
777e1f09 RG |
432 | continue; |
433 | } | |
434 | ||
435 | if (abs (dist) >= 2 | |
436 | && abs (dist) < *max_vf) | |
437 | { | |
438 | /* The dependence distance requires reduction of the maximal | |
439 | vectorization factor. */ | |
440 | *max_vf = abs (dist); | |
73fbfcad | 441 | if (dump_enabled_p ()) |
78c60e3d | 442 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 TJ |
443 | "adjusting maximal vectorization factor to %i\n", |
444 | *max_vf); | |
ebfd146a IR |
445 | } |
446 | ||
777e1f09 | 447 | if (abs (dist) >= *max_vf) |
ebfd146a | 448 | { |
b8698a0f | 449 | /* Dependence distance does not create dependence, as far as |
777e1f09 | 450 | vectorization is concerned, in this case. */ |
73fbfcad | 451 | if (dump_enabled_p ()) |
78c60e3d | 452 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 453 | "dependence distance >= VF.\n"); |
ebfd146a IR |
454 | continue; |
455 | } | |
456 | ||
73fbfcad | 457 | if (dump_enabled_p ()) |
ebfd146a | 458 | { |
78c60e3d | 459 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 TJ |
460 | "not vectorized, possible dependence " |
461 | "between data-refs "); | |
78c60e3d SS |
462 | dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); |
463 | dump_printf (MSG_NOTE, " and "); | |
464 | dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); | |
e645e942 | 465 | dump_printf (MSG_NOTE, "\n"); |
ebfd146a IR |
466 | } |
467 | ||
468 | return true; | |
469 | } | |
470 | ||
471 | return false; | |
472 | } | |
473 | ||
474 | /* Function vect_analyze_data_ref_dependences. | |
b8698a0f | 475 | |
ebfd146a | 476 | Examine all the data references in the loop, and make sure there do not |
777e1f09 RG |
477 | exist any data dependences between them. Set *MAX_VF according to |
478 | the maximum vectorization factor the data dependences allow. */ | |
b8698a0f | 479 | |
ebfd146a | 480 | bool |
5abe1e05 | 481 | vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf) |
ebfd146a IR |
482 | { |
483 | unsigned int i; | |
ebfd146a IR |
484 | struct data_dependence_relation *ddr; |
485 | ||
73fbfcad | 486 | if (dump_enabled_p ()) |
78c60e3d | 487 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 488 | "=== vect_analyze_data_ref_dependences ===\n"); |
5abe1e05 | 489 | |
d1417442 | 490 | LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true; |
5abe1e05 RB |
491 | if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo), |
492 | &LOOP_VINFO_DDRS (loop_vinfo), | |
493 | LOOP_VINFO_LOOP_NEST (loop_vinfo), true)) | |
494 | return false; | |
495 | ||
496 | FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr) | |
497 | if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf)) | |
498 | return false; | |
499 | ||
500 | return true; | |
501 | } | |
502 | ||
503 | ||
504 | /* Function vect_slp_analyze_data_ref_dependence. | |
505 | ||
506 | Return TRUE if there (might) exist a dependence between a memory-reference | |
507 | DRA and a memory-reference DRB. When versioning for alias may check a | |
508 | dependence at run-time, return FALSE. Adjust *MAX_VF according to | |
509 | the data dependence. */ | |
510 | ||
511 | static bool | |
512 | vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr) | |
513 | { | |
514 | struct data_reference *dra = DDR_A (ddr); | |
515 | struct data_reference *drb = DDR_B (ddr); | |
516 | ||
517 | /* We need to check dependences of statements marked as unvectorizable | |
518 | as well, they still can prohibit vectorization. */ | |
519 | ||
520 | /* Independent data accesses. */ | |
521 | if (DDR_ARE_DEPENDENT (ddr) == chrec_known) | |
522 | return false; | |
523 | ||
524 | if (dra == drb) | |
525 | return false; | |
526 | ||
527 | /* Read-read is OK. */ | |
528 | if (DR_IS_READ (dra) && DR_IS_READ (drb)) | |
529 | return false; | |
530 | ||
e6c9d234 RB |
531 | /* If dra and drb are part of the same interleaving chain consider |
532 | them independent. */ | |
533 | if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra))) | |
534 | && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra))) | |
535 | == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb))))) | |
536 | return false; | |
537 | ||
5abe1e05 RB |
538 | /* Unknown data dependence. */ |
539 | if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) | |
fcac74a1 | 540 | { |
649d196d RB |
541 | if (dump_enabled_p ()) |
542 | { | |
543 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
544 | "can't determine dependence between "); | |
545 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra)); | |
546 | dump_printf (MSG_MISSED_OPTIMIZATION, " and "); | |
547 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb)); | |
e645e942 | 548 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
649d196d | 549 | } |
fcac74a1 | 550 | } |
649d196d | 551 | else if (dump_enabled_p ()) |
fcac74a1 | 552 | { |
5abe1e05 RB |
553 | dump_printf_loc (MSG_NOTE, vect_location, |
554 | "determined dependence between "); | |
555 | dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); | |
556 | dump_printf (MSG_NOTE, " and "); | |
557 | dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); | |
e645e942 | 558 | dump_printf (MSG_NOTE, "\n"); |
fcac74a1 | 559 | } |
b8698a0f | 560 | |
649d196d | 561 | /* We do not vectorize basic blocks with write-write dependencies. */ |
5abe1e05 RB |
562 | if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb)) |
563 | return true; | |
564 | ||
649d196d | 565 | /* If we have a read-write dependence check that the load is before the store. |
5abe1e05 RB |
566 | When we vectorize basic blocks, vector load can be only before |
567 | corresponding scalar load, and vector store can be only after its | |
568 | corresponding scalar store. So the order of the acceses is preserved in | |
569 | case the load is before the store. */ | |
649d196d | 570 | gimple earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb)); |
5abe1e05 | 571 | if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt)))) |
649d196d RB |
572 | { |
573 | /* That only holds for load-store pairs taking part in vectorization. */ | |
574 | if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dra))) | |
575 | && STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (drb)))) | |
576 | return false; | |
577 | } | |
5abe1e05 RB |
578 | |
579 | return true; | |
580 | } | |
581 | ||
582 | ||
583 | /* Function vect_analyze_data_ref_dependences. | |
584 | ||
585 | Examine all the data references in the basic-block, and make sure there | |
586 | do not exist any data dependences between them. Set *MAX_VF according to | |
587 | the maximum vectorization factor the data dependences allow. */ | |
588 | ||
589 | bool | |
590 | vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo) | |
591 | { | |
592 | struct data_dependence_relation *ddr; | |
593 | unsigned int i; | |
594 | ||
595 | if (dump_enabled_p ()) | |
596 | dump_printf_loc (MSG_NOTE, vect_location, | |
e645e942 | 597 | "=== vect_slp_analyze_data_ref_dependences ===\n"); |
5abe1e05 RB |
598 | |
599 | if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo), | |
600 | &BB_VINFO_DDRS (bb_vinfo), | |
601 | vNULL, true)) | |
602 | return false; | |
603 | ||
604 | FOR_EACH_VEC_ELT (BB_VINFO_DDRS (bb_vinfo), i, ddr) | |
605 | if (vect_slp_analyze_data_ref_dependence (ddr)) | |
ebfd146a IR |
606 | return false; |
607 | ||
608 | return true; | |
609 | } | |
610 | ||
611 | ||
612 | /* Function vect_compute_data_ref_alignment | |
613 | ||
614 | Compute the misalignment of the data reference DR. | |
615 | ||
616 | Output: | |
617 | 1. If during the misalignment computation it is found that the data reference | |
618 | cannot be vectorized then false is returned. | |
619 | 2. DR_MISALIGNMENT (DR) is defined. | |
620 | ||
621 | FOR NOW: No analysis is actually performed. Misalignment is calculated | |
622 | only for trivial cases. TODO. */ | |
623 | ||
624 | static bool | |
625 | vect_compute_data_ref_alignment (struct data_reference *dr) | |
626 | { | |
627 | gimple stmt = DR_STMT (dr); | |
b8698a0f | 628 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
ebfd146a | 629 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
a70d6342 | 630 | struct loop *loop = NULL; |
ebfd146a IR |
631 | tree ref = DR_REF (dr); |
632 | tree vectype; | |
633 | tree base, base_addr; | |
634 | bool base_aligned; | |
635 | tree misalign; | |
636 | tree aligned_to, alignment; | |
b8698a0f | 637 | |
73fbfcad | 638 | if (dump_enabled_p ()) |
78c60e3d | 639 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 640 | "vect_compute_data_ref_alignment:\n"); |
ebfd146a | 641 | |
a70d6342 IR |
642 | if (loop_vinfo) |
643 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
b8698a0f | 644 | |
ebfd146a IR |
645 | /* Initialize misalignment to unknown. */ |
646 | SET_DR_MISALIGNMENT (dr, -1); | |
647 | ||
7595989b RG |
648 | /* Strided loads perform only component accesses, misalignment information |
649 | is irrelevant for them. */ | |
650 | if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) | |
651 | return true; | |
652 | ||
ebfd146a IR |
653 | misalign = DR_INIT (dr); |
654 | aligned_to = DR_ALIGNED_TO (dr); | |
655 | base_addr = DR_BASE_ADDRESS (dr); | |
656 | vectype = STMT_VINFO_VECTYPE (stmt_info); | |
657 | ||
658 | /* In case the dataref is in an inner-loop of the loop that is being | |
659 | vectorized (LOOP), we use the base and misalignment information | |
ff802fa1 | 660 | relative to the outer-loop (LOOP). This is ok only if the misalignment |
ebfd146a IR |
661 | stays the same throughout the execution of the inner-loop, which is why |
662 | we have to check that the stride of the dataref in the inner-loop evenly | |
663 | divides by the vector size. */ | |
a70d6342 | 664 | if (loop && nested_in_vect_loop_p (loop, stmt)) |
ebfd146a IR |
665 | { |
666 | tree step = DR_STEP (dr); | |
667 | HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); | |
b8698a0f | 668 | |
ebfd146a IR |
669 | if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0) |
670 | { | |
73fbfcad | 671 | if (dump_enabled_p ()) |
78c60e3d | 672 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 673 | "inner step divides the vector-size.\n"); |
ebfd146a IR |
674 | misalign = STMT_VINFO_DR_INIT (stmt_info); |
675 | aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info); | |
676 | base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info); | |
677 | } | |
678 | else | |
679 | { | |
73fbfcad | 680 | if (dump_enabled_p ()) |
78c60e3d | 681 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 682 | "inner step doesn't divide the vector-size.\n"); |
ebfd146a IR |
683 | misalign = NULL_TREE; |
684 | } | |
685 | } | |
686 | ||
3ebde0e9 UW |
687 | /* Similarly, if we're doing basic-block vectorization, we can only use |
688 | base and misalignment information relative to an innermost loop if the | |
689 | misalignment stays the same throughout the execution of the loop. | |
690 | As above, this is the case if the stride of the dataref evenly divides | |
691 | by the vector size. */ | |
692 | if (!loop) | |
693 | { | |
694 | tree step = DR_STEP (dr); | |
695 | HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); | |
696 | ||
697 | if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0) | |
698 | { | |
73fbfcad | 699 | if (dump_enabled_p ()) |
e645e942 TJ |
700 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
701 | "SLP: step doesn't divide the vector-size.\n"); | |
3ebde0e9 UW |
702 | misalign = NULL_TREE; |
703 | } | |
704 | } | |
705 | ||
ebfd146a IR |
706 | base = build_fold_indirect_ref (base_addr); |
707 | alignment = ssize_int (TYPE_ALIGN (vectype)/BITS_PER_UNIT); | |
708 | ||
709 | if ((aligned_to && tree_int_cst_compare (aligned_to, alignment) < 0) | |
710 | || !misalign) | |
711 | { | |
73fbfcad | 712 | if (dump_enabled_p ()) |
ebfd146a | 713 | { |
78c60e3d | 714 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 715 | "Unknown alignment for access: "); |
78c60e3d | 716 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, base); |
e645e942 | 717 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
ebfd146a IR |
718 | } |
719 | return true; | |
720 | } | |
721 | ||
b8698a0f | 722 | if ((DECL_P (base) |
ebfd146a IR |
723 | && tree_int_cst_compare (ssize_int (DECL_ALIGN_UNIT (base)), |
724 | alignment) >= 0) | |
725 | || (TREE_CODE (base_addr) == SSA_NAME | |
726 | && tree_int_cst_compare (ssize_int (TYPE_ALIGN_UNIT (TREE_TYPE ( | |
727 | TREE_TYPE (base_addr)))), | |
7cf64710 | 728 | alignment) >= 0) |
0eb77834 | 729 | || (get_pointer_alignment (base_addr) >= TYPE_ALIGN (vectype))) |
ebfd146a IR |
730 | base_aligned = true; |
731 | else | |
b8698a0f | 732 | base_aligned = false; |
ebfd146a | 733 | |
b8698a0f | 734 | if (!base_aligned) |
ebfd146a | 735 | { |
d6682315 RG |
736 | /* Do not change the alignment of global variables here if |
737 | flag_section_anchors is enabled as we already generated | |
738 | RTL for other functions. Most global variables should | |
739 | have been aligned during the IPA increase_alignment pass. */ | |
740 | if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype)) | |
741 | || (TREE_STATIC (base) && flag_section_anchors)) | |
ebfd146a | 742 | { |
73fbfcad | 743 | if (dump_enabled_p ()) |
ebfd146a | 744 | { |
78c60e3d | 745 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 746 | "can't force alignment of ref: "); |
78c60e3d | 747 | dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); |
e645e942 | 748 | dump_printf (MSG_NOTE, "\n"); |
ebfd146a IR |
749 | } |
750 | return true; | |
751 | } | |
b8698a0f | 752 | |
ebfd146a IR |
753 | /* Force the alignment of the decl. |
754 | NOTE: This is the only change to the code we make during | |
755 | the analysis phase, before deciding to vectorize the loop. */ | |
73fbfcad | 756 | if (dump_enabled_p ()) |
720f5239 | 757 | { |
78c60e3d SS |
758 | dump_printf_loc (MSG_NOTE, vect_location, "force alignment of "); |
759 | dump_generic_expr (MSG_NOTE, TDF_SLIM, ref); | |
e645e942 | 760 | dump_printf (MSG_NOTE, "\n"); |
720f5239 IR |
761 | } |
762 | ||
c716e67f XDL |
763 | ((dataref_aux *)dr->aux)->base_decl = base; |
764 | ((dataref_aux *)dr->aux)->base_misaligned = true; | |
ebfd146a IR |
765 | } |
766 | ||
46241ea9 RG |
767 | /* If this is a backward running DR then first access in the larger |
768 | vectype actually is N-1 elements before the address in the DR. | |
769 | Adjust misalign accordingly. */ | |
770 | if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0) | |
771 | { | |
772 | tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1); | |
773 | /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type, | |
774 | otherwise we wouldn't be here. */ | |
775 | offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr)); | |
776 | /* PLUS because DR_STEP was negative. */ | |
777 | misalign = size_binop (PLUS_EXPR, misalign, offset); | |
778 | } | |
779 | ||
ebfd146a IR |
780 | /* Modulo alignment. */ |
781 | misalign = size_binop (FLOOR_MOD_EXPR, misalign, alignment); | |
782 | ||
cc269bb6 | 783 | if (!tree_fits_uhwi_p (misalign)) |
ebfd146a IR |
784 | { |
785 | /* Negative or overflowed misalignment value. */ | |
73fbfcad | 786 | if (dump_enabled_p ()) |
78c60e3d | 787 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 788 | "unexpected misalign value\n"); |
ebfd146a IR |
789 | return false; |
790 | } | |
791 | ||
eb1ce453 | 792 | SET_DR_MISALIGNMENT (dr, tree_to_uhwi (misalign)); |
ebfd146a | 793 | |
73fbfcad | 794 | if (dump_enabled_p ()) |
ebfd146a | 795 | { |
78c60e3d SS |
796 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
797 | "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr)); | |
798 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref); | |
e645e942 | 799 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
ebfd146a IR |
800 | } |
801 | ||
802 | return true; | |
803 | } | |
804 | ||
805 | ||
806 | /* Function vect_compute_data_refs_alignment | |
807 | ||
808 | Compute the misalignment of data references in the loop. | |
809 | Return FALSE if a data reference is found that cannot be vectorized. */ | |
810 | ||
811 | static bool | |
b8698a0f | 812 | vect_compute_data_refs_alignment (loop_vec_info loop_vinfo, |
a70d6342 | 813 | bb_vec_info bb_vinfo) |
ebfd146a | 814 | { |
9771b263 | 815 | vec<data_reference_p> datarefs; |
ebfd146a IR |
816 | struct data_reference *dr; |
817 | unsigned int i; | |
818 | ||
a70d6342 IR |
819 | if (loop_vinfo) |
820 | datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); | |
821 | else | |
822 | datarefs = BB_VINFO_DATAREFS (bb_vinfo); | |
b8698a0f | 823 | |
9771b263 | 824 | FOR_EACH_VEC_ELT (datarefs, i, dr) |
4b5caab7 IR |
825 | if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) |
826 | && !vect_compute_data_ref_alignment (dr)) | |
827 | { | |
828 | if (bb_vinfo) | |
829 | { | |
830 | /* Mark unsupported statement as unvectorizable. */ | |
831 | STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false; | |
832 | continue; | |
833 | } | |
834 | else | |
835 | return false; | |
836 | } | |
ebfd146a IR |
837 | |
838 | return true; | |
839 | } | |
840 | ||
841 | ||
842 | /* Function vect_update_misalignment_for_peel | |
843 | ||
844 | DR - the data reference whose misalignment is to be adjusted. | |
845 | DR_PEEL - the data reference whose misalignment is being made | |
846 | zero in the vector loop by the peel. | |
847 | NPEEL - the number of iterations in the peel loop if the misalignment | |
848 | of DR_PEEL is known at compile time. */ | |
849 | ||
850 | static void | |
851 | vect_update_misalignment_for_peel (struct data_reference *dr, | |
852 | struct data_reference *dr_peel, int npeel) | |
853 | { | |
854 | unsigned int i; | |
9771b263 | 855 | vec<dr_p> same_align_drs; |
ebfd146a IR |
856 | struct data_reference *current_dr; |
857 | int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr)))); | |
858 | int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel)))); | |
859 | stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr)); | |
860 | stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel)); | |
861 | ||
862 | /* For interleaved data accesses the step in the loop must be multiplied by | |
863 | the size of the interleaving group. */ | |
0d0293ac | 864 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
e14c1050 | 865 | dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info))); |
0d0293ac | 866 | if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info)) |
e14c1050 | 867 | dr_peel_size *= GROUP_SIZE (peel_stmt_info); |
ebfd146a IR |
868 | |
869 | /* It can be assumed that the data refs with the same alignment as dr_peel | |
870 | are aligned in the vector loop. */ | |
871 | same_align_drs | |
872 | = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel))); | |
9771b263 | 873 | FOR_EACH_VEC_ELT (same_align_drs, i, current_dr) |
ebfd146a IR |
874 | { |
875 | if (current_dr != dr) | |
876 | continue; | |
877 | gcc_assert (DR_MISALIGNMENT (dr) / dr_size == | |
878 | DR_MISALIGNMENT (dr_peel) / dr_peel_size); | |
879 | SET_DR_MISALIGNMENT (dr, 0); | |
880 | return; | |
881 | } | |
882 | ||
883 | if (known_alignment_for_access_p (dr) | |
884 | && known_alignment_for_access_p (dr_peel)) | |
885 | { | |
d8ba5b19 | 886 | bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0; |
ebfd146a IR |
887 | int misal = DR_MISALIGNMENT (dr); |
888 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
d8ba5b19 | 889 | misal += negative ? -npeel * dr_size : npeel * dr_size; |
5aea1e76 | 890 | misal &= (TYPE_ALIGN (vectype) / BITS_PER_UNIT) - 1; |
ebfd146a IR |
891 | SET_DR_MISALIGNMENT (dr, misal); |
892 | return; | |
893 | } | |
894 | ||
73fbfcad | 895 | if (dump_enabled_p ()) |
e645e942 | 896 | dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.\n"); |
ebfd146a IR |
897 | SET_DR_MISALIGNMENT (dr, -1); |
898 | } | |
899 | ||
900 | ||
901 | /* Function vect_verify_datarefs_alignment | |
902 | ||
903 | Return TRUE if all data references in the loop can be | |
904 | handled with respect to alignment. */ | |
905 | ||
a70d6342 IR |
906 | bool |
907 | vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) | |
ebfd146a | 908 | { |
9771b263 | 909 | vec<data_reference_p> datarefs; |
ebfd146a IR |
910 | struct data_reference *dr; |
911 | enum dr_alignment_support supportable_dr_alignment; | |
912 | unsigned int i; | |
913 | ||
a70d6342 IR |
914 | if (loop_vinfo) |
915 | datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); | |
916 | else | |
917 | datarefs = BB_VINFO_DATAREFS (bb_vinfo); | |
918 | ||
9771b263 | 919 | FOR_EACH_VEC_ELT (datarefs, i, dr) |
ebfd146a IR |
920 | { |
921 | gimple stmt = DR_STMT (dr); | |
922 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
923 | ||
38eec4c6 UW |
924 | if (!STMT_VINFO_RELEVANT_P (stmt_info)) |
925 | continue; | |
926 | ||
4b5caab7 IR |
927 | /* For interleaving, only the alignment of the first access matters. |
928 | Skip statements marked as not vectorizable. */ | |
0d0293ac | 929 | if ((STMT_VINFO_GROUPED_ACCESS (stmt_info) |
e14c1050 | 930 | && GROUP_FIRST_ELEMENT (stmt_info) != stmt) |
4b5caab7 | 931 | || !STMT_VINFO_VECTORIZABLE (stmt_info)) |
ebfd146a IR |
932 | continue; |
933 | ||
a82960aa RG |
934 | /* Strided loads perform only component accesses, alignment is |
935 | irrelevant for them. */ | |
936 | if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) | |
937 | continue; | |
938 | ||
720f5239 | 939 | supportable_dr_alignment = vect_supportable_dr_alignment (dr, false); |
ebfd146a IR |
940 | if (!supportable_dr_alignment) |
941 | { | |
73fbfcad | 942 | if (dump_enabled_p ()) |
ebfd146a IR |
943 | { |
944 | if (DR_IS_READ (dr)) | |
78c60e3d SS |
945 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
946 | "not vectorized: unsupported unaligned load."); | |
ebfd146a | 947 | else |
78c60e3d SS |
948 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
949 | "not vectorized: unsupported unaligned " | |
950 | "store."); | |
4b5caab7 | 951 | |
78c60e3d SS |
952 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
953 | DR_REF (dr)); | |
e645e942 | 954 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
ebfd146a IR |
955 | } |
956 | return false; | |
957 | } | |
73fbfcad | 958 | if (supportable_dr_alignment != dr_aligned && dump_enabled_p ()) |
78c60e3d | 959 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 960 | "Vectorizing an unaligned access.\n"); |
ebfd146a IR |
961 | } |
962 | return true; | |
963 | } | |
964 | ||
4c9bcf89 RG |
965 | /* Given an memory reference EXP return whether its alignment is less |
966 | than its size. */ | |
967 | ||
968 | static bool | |
969 | not_size_aligned (tree exp) | |
970 | { | |
cc269bb6 | 971 | if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp)))) |
4c9bcf89 RG |
972 | return true; |
973 | ||
eb1ce453 | 974 | return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp))) |
4c9bcf89 RG |
975 | > get_object_alignment (exp)); |
976 | } | |
ebfd146a IR |
977 | |
978 | /* Function vector_alignment_reachable_p | |
979 | ||
980 | Return true if vector alignment for DR is reachable by peeling | |
981 | a few loop iterations. Return false otherwise. */ | |
982 | ||
983 | static bool | |
984 | vector_alignment_reachable_p (struct data_reference *dr) | |
985 | { | |
986 | gimple stmt = DR_STMT (dr); | |
987 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
988 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
989 | ||
0d0293ac | 990 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
ebfd146a IR |
991 | { |
992 | /* For interleaved access we peel only if number of iterations in | |
993 | the prolog loop ({VF - misalignment}), is a multiple of the | |
994 | number of the interleaved accesses. */ | |
995 | int elem_size, mis_in_elements; | |
996 | int nelements = TYPE_VECTOR_SUBPARTS (vectype); | |
997 | ||
998 | /* FORNOW: handle only known alignment. */ | |
999 | if (!known_alignment_for_access_p (dr)) | |
1000 | return false; | |
1001 | ||
1002 | elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements; | |
1003 | mis_in_elements = DR_MISALIGNMENT (dr) / elem_size; | |
1004 | ||
e14c1050 | 1005 | if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info)) |
ebfd146a IR |
1006 | return false; |
1007 | } | |
1008 | ||
1009 | /* If misalignment is known at the compile time then allow peeling | |
1010 | only if natural alignment is reachable through peeling. */ | |
1011 | if (known_alignment_for_access_p (dr) && !aligned_access_p (dr)) | |
1012 | { | |
b8698a0f | 1013 | HOST_WIDE_INT elmsize = |
ebfd146a | 1014 | int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype))); |
73fbfcad | 1015 | if (dump_enabled_p ()) |
ebfd146a | 1016 | { |
e645e942 TJ |
1017 | dump_printf_loc (MSG_NOTE, vect_location, |
1018 | "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize); | |
1019 | dump_printf (MSG_NOTE, | |
1020 | ". misalignment = %d.\n", DR_MISALIGNMENT (dr)); | |
ebfd146a IR |
1021 | } |
1022 | if (DR_MISALIGNMENT (dr) % elmsize) | |
1023 | { | |
73fbfcad | 1024 | if (dump_enabled_p ()) |
e645e942 TJ |
1025 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
1026 | "data size does not divide the misalignment.\n"); | |
ebfd146a IR |
1027 | return false; |
1028 | } | |
1029 | } | |
1030 | ||
1031 | if (!known_alignment_for_access_p (dr)) | |
1032 | { | |
4c9bcf89 RG |
1033 | tree type = TREE_TYPE (DR_REF (dr)); |
1034 | bool is_packed = not_size_aligned (DR_REF (dr)); | |
73fbfcad | 1035 | if (dump_enabled_p ()) |
e645e942 TJ |
1036 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
1037 | "Unknown misalignment, is_packed = %d\n",is_packed); | |
afb119be RB |
1038 | if ((TYPE_USER_ALIGN (type) && !is_packed) |
1039 | || targetm.vectorize.vector_alignment_reachable (type, is_packed)) | |
ebfd146a IR |
1040 | return true; |
1041 | else | |
1042 | return false; | |
1043 | } | |
1044 | ||
1045 | return true; | |
1046 | } | |
1047 | ||
720f5239 IR |
1048 | |
1049 | /* Calculate the cost of the memory access represented by DR. */ | |
1050 | ||
92345349 | 1051 | static void |
720f5239 IR |
1052 | vect_get_data_access_cost (struct data_reference *dr, |
1053 | unsigned int *inside_cost, | |
92345349 BS |
1054 | unsigned int *outside_cost, |
1055 | stmt_vector_for_cost *body_cost_vec) | |
720f5239 IR |
1056 | { |
1057 | gimple stmt = DR_STMT (dr); | |
1058 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
1059 | int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)); | |
1060 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
1061 | int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
1062 | int ncopies = vf / nunits; | |
720f5239 | 1063 | |
38eec4c6 | 1064 | if (DR_IS_READ (dr)) |
92345349 BS |
1065 | vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost, |
1066 | NULL, body_cost_vec, false); | |
720f5239 | 1067 | else |
92345349 | 1068 | vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec); |
720f5239 | 1069 | |
73fbfcad | 1070 | if (dump_enabled_p ()) |
78c60e3d SS |
1071 | dump_printf_loc (MSG_NOTE, vect_location, |
1072 | "vect_get_data_access_cost: inside_cost = %d, " | |
e645e942 | 1073 | "outside_cost = %d.\n", *inside_cost, *outside_cost); |
720f5239 IR |
1074 | } |
1075 | ||
1076 | ||
720f5239 IR |
1077 | /* Insert DR into peeling hash table with NPEEL as key. */ |
1078 | ||
1079 | static void | |
1080 | vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr, | |
1081 | int npeel) | |
1082 | { | |
1083 | struct _vect_peel_info elem, *slot; | |
bf190e8d | 1084 | _vect_peel_info **new_slot; |
720f5239 IR |
1085 | bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true); |
1086 | ||
1087 | elem.npeel = npeel; | |
c203e8a7 | 1088 | slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find (&elem); |
720f5239 IR |
1089 | if (slot) |
1090 | slot->count++; | |
1091 | else | |
1092 | { | |
1093 | slot = XNEW (struct _vect_peel_info); | |
1094 | slot->npeel = npeel; | |
1095 | slot->dr = dr; | |
1096 | slot->count = 1; | |
c203e8a7 TS |
1097 | new_slot |
1098 | = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find_slot (slot, INSERT); | |
720f5239 IR |
1099 | *new_slot = slot; |
1100 | } | |
1101 | ||
8b5e1202 SO |
1102 | if (!supportable_dr_alignment |
1103 | && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) | |
720f5239 IR |
1104 | slot->count += VECT_MAX_COST; |
1105 | } | |
1106 | ||
1107 | ||
1108 | /* Traverse peeling hash table to find peeling option that aligns maximum | |
1109 | number of data accesses. */ | |
1110 | ||
bf190e8d LC |
1111 | int |
1112 | vect_peeling_hash_get_most_frequent (_vect_peel_info **slot, | |
1113 | _vect_peel_extended_info *max) | |
720f5239 | 1114 | { |
bf190e8d | 1115 | vect_peel_info elem = *slot; |
720f5239 | 1116 | |
44542f8e IR |
1117 | if (elem->count > max->peel_info.count |
1118 | || (elem->count == max->peel_info.count | |
1119 | && max->peel_info.npeel > elem->npeel)) | |
720f5239 IR |
1120 | { |
1121 | max->peel_info.npeel = elem->npeel; | |
1122 | max->peel_info.count = elem->count; | |
1123 | max->peel_info.dr = elem->dr; | |
1124 | } | |
1125 | ||
1126 | return 1; | |
1127 | } | |
1128 | ||
1129 | ||
ff802fa1 IR |
1130 | /* Traverse peeling hash table and calculate cost for each peeling option. |
1131 | Find the one with the lowest cost. */ | |
720f5239 | 1132 | |
bf190e8d LC |
1133 | int |
1134 | vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot, | |
1135 | _vect_peel_extended_info *min) | |
720f5239 | 1136 | { |
bf190e8d | 1137 | vect_peel_info elem = *slot; |
720f5239 IR |
1138 | int save_misalignment, dummy; |
1139 | unsigned int inside_cost = 0, outside_cost = 0, i; | |
1140 | gimple stmt = DR_STMT (elem->dr); | |
1141 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
1142 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
9771b263 | 1143 | vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
720f5239 | 1144 | struct data_reference *dr; |
92345349 BS |
1145 | stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec; |
1146 | int single_iter_cost; | |
1147 | ||
9771b263 DN |
1148 | prologue_cost_vec.create (2); |
1149 | body_cost_vec.create (2); | |
1150 | epilogue_cost_vec.create (2); | |
720f5239 | 1151 | |
9771b263 | 1152 | FOR_EACH_VEC_ELT (datarefs, i, dr) |
720f5239 IR |
1153 | { |
1154 | stmt = DR_STMT (dr); | |
1155 | stmt_info = vinfo_for_stmt (stmt); | |
1156 | /* For interleaving, only the alignment of the first access | |
1157 | matters. */ | |
0d0293ac | 1158 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
e14c1050 | 1159 | && GROUP_FIRST_ELEMENT (stmt_info) != stmt) |
720f5239 IR |
1160 | continue; |
1161 | ||
1162 | save_misalignment = DR_MISALIGNMENT (dr); | |
1163 | vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel); | |
92345349 BS |
1164 | vect_get_data_access_cost (dr, &inside_cost, &outside_cost, |
1165 | &body_cost_vec); | |
720f5239 IR |
1166 | SET_DR_MISALIGNMENT (dr, save_misalignment); |
1167 | } | |
1168 | ||
92345349 BS |
1169 | single_iter_cost = vect_get_single_scalar_iteration_cost (loop_vinfo); |
1170 | outside_cost += vect_get_known_peeling_cost (loop_vinfo, elem->npeel, | |
1171 | &dummy, single_iter_cost, | |
1172 | &prologue_cost_vec, | |
1173 | &epilogue_cost_vec); | |
1174 | ||
1175 | /* Prologue and epilogue costs are added to the target model later. | |
1176 | These costs depend only on the scalar iteration cost, the | |
1177 | number of peeling iterations finally chosen, and the number of | |
1178 | misaligned statements. So discard the information found here. */ | |
9771b263 DN |
1179 | prologue_cost_vec.release (); |
1180 | epilogue_cost_vec.release (); | |
720f5239 IR |
1181 | |
1182 | if (inside_cost < min->inside_cost | |
1183 | || (inside_cost == min->inside_cost && outside_cost < min->outside_cost)) | |
1184 | { | |
1185 | min->inside_cost = inside_cost; | |
1186 | min->outside_cost = outside_cost; | |
9771b263 | 1187 | min->body_cost_vec.release (); |
92345349 | 1188 | min->body_cost_vec = body_cost_vec; |
720f5239 IR |
1189 | min->peel_info.dr = elem->dr; |
1190 | min->peel_info.npeel = elem->npeel; | |
1191 | } | |
92345349 | 1192 | else |
9771b263 | 1193 | body_cost_vec.release (); |
720f5239 IR |
1194 | |
1195 | return 1; | |
1196 | } | |
1197 | ||
1198 | ||
1199 | /* Choose best peeling option by traversing peeling hash table and either | |
1200 | choosing an option with the lowest cost (if cost model is enabled) or the | |
1201 | option that aligns as many accesses as possible. */ | |
1202 | ||
1203 | static struct data_reference * | |
1204 | vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo, | |
c3e7ee41 | 1205 | unsigned int *npeel, |
92345349 | 1206 | stmt_vector_for_cost *body_cost_vec) |
720f5239 IR |
1207 | { |
1208 | struct _vect_peel_extended_info res; | |
1209 | ||
1210 | res.peel_info.dr = NULL; | |
c3284718 | 1211 | res.body_cost_vec = stmt_vector_for_cost (); |
720f5239 | 1212 | |
8b5e1202 | 1213 | if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) |
720f5239 IR |
1214 | { |
1215 | res.inside_cost = INT_MAX; | |
1216 | res.outside_cost = INT_MAX; | |
bf190e8d | 1217 | LOOP_VINFO_PEELING_HTAB (loop_vinfo) |
c203e8a7 TS |
1218 | ->traverse <_vect_peel_extended_info *, |
1219 | vect_peeling_hash_get_lowest_cost> (&res); | |
720f5239 IR |
1220 | } |
1221 | else | |
1222 | { | |
1223 | res.peel_info.count = 0; | |
bf190e8d | 1224 | LOOP_VINFO_PEELING_HTAB (loop_vinfo) |
c203e8a7 TS |
1225 | ->traverse <_vect_peel_extended_info *, |
1226 | vect_peeling_hash_get_most_frequent> (&res); | |
720f5239 IR |
1227 | } |
1228 | ||
1229 | *npeel = res.peel_info.npeel; | |
92345349 | 1230 | *body_cost_vec = res.body_cost_vec; |
720f5239 IR |
1231 | return res.peel_info.dr; |
1232 | } | |
1233 | ||
1234 | ||
ebfd146a IR |
1235 | /* Function vect_enhance_data_refs_alignment |
1236 | ||
1237 | This pass will use loop versioning and loop peeling in order to enhance | |
1238 | the alignment of data references in the loop. | |
1239 | ||
1240 | FOR NOW: we assume that whatever versioning/peeling takes place, only the | |
ff802fa1 | 1241 | original loop is to be vectorized. Any other loops that are created by |
ebfd146a | 1242 | the transformations performed in this pass - are not supposed to be |
ff802fa1 | 1243 | vectorized. This restriction will be relaxed. |
ebfd146a IR |
1244 | |
1245 | This pass will require a cost model to guide it whether to apply peeling | |
ff802fa1 | 1246 | or versioning or a combination of the two. For example, the scheme that |
ebfd146a IR |
1247 | intel uses when given a loop with several memory accesses, is as follows: |
1248 | choose one memory access ('p') which alignment you want to force by doing | |
ff802fa1 | 1249 | peeling. Then, either (1) generate a loop in which 'p' is aligned and all |
ebfd146a IR |
1250 | other accesses are not necessarily aligned, or (2) use loop versioning to |
1251 | generate one loop in which all accesses are aligned, and another loop in | |
1252 | which only 'p' is necessarily aligned. | |
1253 | ||
1254 | ("Automatic Intra-Register Vectorization for the Intel Architecture", | |
1255 | Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International | |
1256 | Journal of Parallel Programming, Vol. 30, No. 2, April 2002.) | |
1257 | ||
ff802fa1 | 1258 | Devising a cost model is the most critical aspect of this work. It will |
ebfd146a | 1259 | guide us on which access to peel for, whether to use loop versioning, how |
ff802fa1 | 1260 | many versions to create, etc. The cost model will probably consist of |
ebfd146a IR |
1261 | generic considerations as well as target specific considerations (on |
1262 | powerpc for example, misaligned stores are more painful than misaligned | |
1263 | loads). | |
1264 | ||
1265 | Here are the general steps involved in alignment enhancements: | |
1266 | ||
1267 | -- original loop, before alignment analysis: | |
1268 | for (i=0; i<N; i++){ | |
1269 | x = q[i]; # DR_MISALIGNMENT(q) = unknown | |
1270 | p[i] = y; # DR_MISALIGNMENT(p) = unknown | |
1271 | } | |
1272 | ||
1273 | -- After vect_compute_data_refs_alignment: | |
1274 | for (i=0; i<N; i++){ | |
1275 | x = q[i]; # DR_MISALIGNMENT(q) = 3 | |
1276 | p[i] = y; # DR_MISALIGNMENT(p) = unknown | |
1277 | } | |
1278 | ||
1279 | -- Possibility 1: we do loop versioning: | |
1280 | if (p is aligned) { | |
1281 | for (i=0; i<N; i++){ # loop 1A | |
1282 | x = q[i]; # DR_MISALIGNMENT(q) = 3 | |
1283 | p[i] = y; # DR_MISALIGNMENT(p) = 0 | |
1284 | } | |
1285 | } | |
1286 | else { | |
1287 | for (i=0; i<N; i++){ # loop 1B | |
1288 | x = q[i]; # DR_MISALIGNMENT(q) = 3 | |
1289 | p[i] = y; # DR_MISALIGNMENT(p) = unaligned | |
1290 | } | |
1291 | } | |
1292 | ||
1293 | -- Possibility 2: we do loop peeling: | |
1294 | for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized). | |
1295 | x = q[i]; | |
1296 | p[i] = y; | |
1297 | } | |
1298 | for (i = 3; i < N; i++){ # loop 2A | |
1299 | x = q[i]; # DR_MISALIGNMENT(q) = 0 | |
1300 | p[i] = y; # DR_MISALIGNMENT(p) = unknown | |
1301 | } | |
1302 | ||
1303 | -- Possibility 3: combination of loop peeling and versioning: | |
1304 | for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized). | |
1305 | x = q[i]; | |
1306 | p[i] = y; | |
1307 | } | |
1308 | if (p is aligned) { | |
1309 | for (i = 3; i<N; i++){ # loop 3A | |
1310 | x = q[i]; # DR_MISALIGNMENT(q) = 0 | |
1311 | p[i] = y; # DR_MISALIGNMENT(p) = 0 | |
1312 | } | |
1313 | } | |
1314 | else { | |
1315 | for (i = 3; i<N; i++){ # loop 3B | |
1316 | x = q[i]; # DR_MISALIGNMENT(q) = 0 | |
1317 | p[i] = y; # DR_MISALIGNMENT(p) = unaligned | |
1318 | } | |
1319 | } | |
1320 | ||
ff802fa1 | 1321 | These loops are later passed to loop_transform to be vectorized. The |
ebfd146a IR |
1322 | vectorizer will use the alignment information to guide the transformation |
1323 | (whether to generate regular loads/stores, or with special handling for | |
1324 | misalignment). */ | |
1325 | ||
1326 | bool | |
1327 | vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) | |
1328 | { | |
9771b263 | 1329 | vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
ebfd146a IR |
1330 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); |
1331 | enum dr_alignment_support supportable_dr_alignment; | |
720f5239 | 1332 | struct data_reference *dr0 = NULL, *first_store = NULL; |
ebfd146a | 1333 | struct data_reference *dr; |
720f5239 | 1334 | unsigned int i, j; |
ebfd146a IR |
1335 | bool do_peeling = false; |
1336 | bool do_versioning = false; | |
1337 | bool stat; | |
1338 | gimple stmt; | |
1339 | stmt_vec_info stmt_info; | |
720f5239 IR |
1340 | unsigned int npeel = 0; |
1341 | bool all_misalignments_unknown = true; | |
1342 | unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
1343 | unsigned possible_npeel_number = 1; | |
1344 | tree vectype; | |
1345 | unsigned int nelements, mis, same_align_drs_max = 0; | |
c3284718 | 1346 | stmt_vector_for_cost body_cost_vec = stmt_vector_for_cost (); |
ebfd146a | 1347 | |
73fbfcad | 1348 | if (dump_enabled_p ()) |
78c60e3d | 1349 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 1350 | "=== vect_enhance_data_refs_alignment ===\n"); |
ebfd146a IR |
1351 | |
1352 | /* While cost model enhancements are expected in the future, the high level | |
1353 | view of the code at this time is as follows: | |
1354 | ||
673beced RE |
1355 | A) If there is a misaligned access then see if peeling to align |
1356 | this access can make all data references satisfy | |
8f439681 RE |
1357 | vect_supportable_dr_alignment. If so, update data structures |
1358 | as needed and return true. | |
ebfd146a IR |
1359 | |
1360 | B) If peeling wasn't possible and there is a data reference with an | |
1361 | unknown misalignment that does not satisfy vect_supportable_dr_alignment | |
1362 | then see if loop versioning checks can be used to make all data | |
1363 | references satisfy vect_supportable_dr_alignment. If so, update | |
1364 | data structures as needed and return true. | |
1365 | ||
1366 | C) If neither peeling nor versioning were successful then return false if | |
1367 | any data reference does not satisfy vect_supportable_dr_alignment. | |
1368 | ||
1369 | D) Return true (all data references satisfy vect_supportable_dr_alignment). | |
1370 | ||
1371 | Note, Possibility 3 above (which is peeling and versioning together) is not | |
1372 | being done at this time. */ | |
1373 | ||
1374 | /* (1) Peeling to force alignment. */ | |
1375 | ||
1376 | /* (1.1) Decide whether to perform peeling, and how many iterations to peel: | |
1377 | Considerations: | |
1378 | + How many accesses will become aligned due to the peeling | |
1379 | - How many accesses will become unaligned due to the peeling, | |
1380 | and the cost of misaligned accesses. | |
b8698a0f | 1381 | - The cost of peeling (the extra runtime checks, the increase |
720f5239 | 1382 | in code size). */ |
ebfd146a | 1383 | |
9771b263 | 1384 | FOR_EACH_VEC_ELT (datarefs, i, dr) |
ebfd146a IR |
1385 | { |
1386 | stmt = DR_STMT (dr); | |
1387 | stmt_info = vinfo_for_stmt (stmt); | |
1388 | ||
38eec4c6 | 1389 | if (!STMT_VINFO_RELEVANT_P (stmt_info)) |
39becbac RG |
1390 | continue; |
1391 | ||
ebfd146a IR |
1392 | /* For interleaving, only the alignment of the first access |
1393 | matters. */ | |
0d0293ac | 1394 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
e14c1050 | 1395 | && GROUP_FIRST_ELEMENT (stmt_info) != stmt) |
ebfd146a IR |
1396 | continue; |
1397 | ||
39becbac RG |
1398 | /* For invariant accesses there is nothing to enhance. */ |
1399 | if (integer_zerop (DR_STEP (dr))) | |
1400 | continue; | |
1401 | ||
319e6439 RG |
1402 | /* Strided loads perform only component accesses, alignment is |
1403 | irrelevant for them. */ | |
1404 | if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) | |
1405 | continue; | |
1406 | ||
720f5239 IR |
1407 | supportable_dr_alignment = vect_supportable_dr_alignment (dr, true); |
1408 | do_peeling = vector_alignment_reachable_p (dr); | |
1409 | if (do_peeling) | |
ebfd146a | 1410 | { |
720f5239 IR |
1411 | if (known_alignment_for_access_p (dr)) |
1412 | { | |
1413 | unsigned int npeel_tmp; | |
d8ba5b19 RG |
1414 | bool negative = tree_int_cst_compare (DR_STEP (dr), |
1415 | size_zero_node) < 0; | |
720f5239 IR |
1416 | |
1417 | /* Save info about DR in the hash table. */ | |
c203e8a7 TS |
1418 | if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo)) |
1419 | LOOP_VINFO_PEELING_HTAB (loop_vinfo) | |
1420 | = new hash_table<peel_info_hasher> (1); | |
720f5239 IR |
1421 | |
1422 | vectype = STMT_VINFO_VECTYPE (stmt_info); | |
1423 | nelements = TYPE_VECTOR_SUBPARTS (vectype); | |
1424 | mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE ( | |
1425 | TREE_TYPE (DR_REF (dr)))); | |
d8ba5b19 | 1426 | npeel_tmp = (negative |
8b8bba2d RG |
1427 | ? (mis - nelements) : (nelements - mis)) |
1428 | & (nelements - 1); | |
720f5239 IR |
1429 | |
1430 | /* For multiple types, it is possible that the bigger type access | |
ff802fa1 | 1431 | will have more than one peeling option. E.g., a loop with two |
720f5239 | 1432 | types: one of size (vector size / 4), and the other one of |
ff802fa1 | 1433 | size (vector size / 8). Vectorization factor will 8. If both |
720f5239 | 1434 | access are misaligned by 3, the first one needs one scalar |
ff802fa1 | 1435 | iteration to be aligned, and the second one needs 5. But the |
720f5239 IR |
1436 | the first one will be aligned also by peeling 5 scalar |
1437 | iterations, and in that case both accesses will be aligned. | |
1438 | Hence, except for the immediate peeling amount, we also want | |
1439 | to try to add full vector size, while we don't exceed | |
1440 | vectorization factor. | |
1441 | We do this automtically for cost model, since we calculate cost | |
1442 | for every peeling option. */ | |
8b5e1202 | 1443 | if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) |
720f5239 IR |
1444 | possible_npeel_number = vf /nelements; |
1445 | ||
1446 | /* Handle the aligned case. We may decide to align some other | |
1447 | access, making DR unaligned. */ | |
1448 | if (DR_MISALIGNMENT (dr) == 0) | |
1449 | { | |
1450 | npeel_tmp = 0; | |
8b5e1202 | 1451 | if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) |
720f5239 IR |
1452 | possible_npeel_number++; |
1453 | } | |
1454 | ||
1455 | for (j = 0; j < possible_npeel_number; j++) | |
1456 | { | |
1457 | gcc_assert (npeel_tmp <= vf); | |
1458 | vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp); | |
1459 | npeel_tmp += nelements; | |
1460 | } | |
1461 | ||
1462 | all_misalignments_unknown = false; | |
1463 | /* Data-ref that was chosen for the case that all the | |
1464 | misalignments are unknown is not relevant anymore, since we | |
1465 | have a data-ref with known alignment. */ | |
1466 | dr0 = NULL; | |
1467 | } | |
1468 | else | |
1469 | { | |
4ba5ea11 RB |
1470 | /* If we don't know any misalignment values, we prefer |
1471 | peeling for data-ref that has the maximum number of data-refs | |
720f5239 IR |
1472 | with the same alignment, unless the target prefers to align |
1473 | stores over load. */ | |
1474 | if (all_misalignments_unknown) | |
1475 | { | |
4ba5ea11 RB |
1476 | unsigned same_align_drs |
1477 | = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length (); | |
1478 | if (!dr0 | |
1479 | || same_align_drs_max < same_align_drs) | |
720f5239 | 1480 | { |
4ba5ea11 | 1481 | same_align_drs_max = same_align_drs; |
720f5239 IR |
1482 | dr0 = dr; |
1483 | } | |
4ba5ea11 RB |
1484 | /* For data-refs with the same number of related |
1485 | accesses prefer the one where the misalign | |
1486 | computation will be invariant in the outermost loop. */ | |
1487 | else if (same_align_drs_max == same_align_drs) | |
1488 | { | |
1489 | struct loop *ivloop0, *ivloop; | |
1490 | ivloop0 = outermost_invariant_loop_for_expr | |
1491 | (loop, DR_BASE_ADDRESS (dr0)); | |
1492 | ivloop = outermost_invariant_loop_for_expr | |
1493 | (loop, DR_BASE_ADDRESS (dr)); | |
1494 | if ((ivloop && !ivloop0) | |
1495 | || (ivloop && ivloop0 | |
1496 | && flow_loop_nested_p (ivloop, ivloop0))) | |
1497 | dr0 = dr; | |
1498 | } | |
720f5239 | 1499 | |
b0af49c4 | 1500 | if (!first_store && DR_IS_WRITE (dr)) |
720f5239 IR |
1501 | first_store = dr; |
1502 | } | |
1503 | ||
1504 | /* If there are both known and unknown misaligned accesses in the | |
1505 | loop, we choose peeling amount according to the known | |
1506 | accesses. */ | |
720f5239 IR |
1507 | if (!supportable_dr_alignment) |
1508 | { | |
1509 | dr0 = dr; | |
b0af49c4 | 1510 | if (!first_store && DR_IS_WRITE (dr)) |
720f5239 IR |
1511 | first_store = dr; |
1512 | } | |
1513 | } | |
1514 | } | |
1515 | else | |
1516 | { | |
1517 | if (!aligned_access_p (dr)) | |
1518 | { | |
73fbfcad | 1519 | if (dump_enabled_p ()) |
e645e942 TJ |
1520 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
1521 | "vector alignment may not be reachable\n"); | |
720f5239 IR |
1522 | break; |
1523 | } | |
1524 | } | |
ebfd146a IR |
1525 | } |
1526 | ||
afb119be RB |
1527 | /* Check if we can possibly peel the loop. */ |
1528 | if (!vect_can_advance_ivs_p (loop_vinfo) | |
ebfd146a IR |
1529 | || !slpeel_can_duplicate_loop_p (loop, single_exit (loop))) |
1530 | do_peeling = false; | |
1531 | ||
b1aef01e RB |
1532 | /* If we don't know how many times the peeling loop will run |
1533 | assume it will run VF-1 times and disable peeling if the remaining | |
1534 | iters are less than the vectorization factor. */ | |
1535 | if (do_peeling | |
1536 | && all_misalignments_unknown | |
1537 | && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) | |
1538 | && (LOOP_VINFO_INT_NITERS (loop_vinfo) | |
1539 | < 2 * (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1)) | |
1540 | do_peeling = false; | |
1541 | ||
1542 | if (do_peeling | |
1543 | && all_misalignments_unknown | |
720f5239 IR |
1544 | && vect_supportable_dr_alignment (dr0, false)) |
1545 | { | |
720f5239 IR |
1546 | /* Check if the target requires to prefer stores over loads, i.e., if |
1547 | misaligned stores are more expensive than misaligned loads (taking | |
1548 | drs with same alignment into account). */ | |
1549 | if (first_store && DR_IS_READ (dr0)) | |
1550 | { | |
1551 | unsigned int load_inside_cost = 0, load_outside_cost = 0; | |
1552 | unsigned int store_inside_cost = 0, store_outside_cost = 0; | |
1553 | unsigned int load_inside_penalty = 0, load_outside_penalty = 0; | |
1554 | unsigned int store_inside_penalty = 0, store_outside_penalty = 0; | |
9771b263 DN |
1555 | stmt_vector_for_cost dummy; |
1556 | dummy.create (2); | |
92345349 BS |
1557 | |
1558 | vect_get_data_access_cost (dr0, &load_inside_cost, &load_outside_cost, | |
1559 | &dummy); | |
1560 | vect_get_data_access_cost (first_store, &store_inside_cost, | |
1561 | &store_outside_cost, &dummy); | |
720f5239 | 1562 | |
9771b263 | 1563 | dummy.release (); |
720f5239 IR |
1564 | |
1565 | /* Calculate the penalty for leaving FIRST_STORE unaligned (by | |
1566 | aligning the load DR0). */ | |
1567 | load_inside_penalty = store_inside_cost; | |
1568 | load_outside_penalty = store_outside_cost; | |
9771b263 DN |
1569 | for (i = 0; |
1570 | STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt ( | |
1571 | DR_STMT (first_store))).iterate (i, &dr); | |
720f5239 IR |
1572 | i++) |
1573 | if (DR_IS_READ (dr)) | |
1574 | { | |
1575 | load_inside_penalty += load_inside_cost; | |
1576 | load_outside_penalty += load_outside_cost; | |
1577 | } | |
1578 | else | |
1579 | { | |
1580 | load_inside_penalty += store_inside_cost; | |
1581 | load_outside_penalty += store_outside_cost; | |
1582 | } | |
1583 | ||
1584 | /* Calculate the penalty for leaving DR0 unaligned (by | |
1585 | aligning the FIRST_STORE). */ | |
1586 | store_inside_penalty = load_inside_cost; | |
1587 | store_outside_penalty = load_outside_cost; | |
9771b263 DN |
1588 | for (i = 0; |
1589 | STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt ( | |
1590 | DR_STMT (dr0))).iterate (i, &dr); | |
720f5239 IR |
1591 | i++) |
1592 | if (DR_IS_READ (dr)) | |
1593 | { | |
1594 | store_inside_penalty += load_inside_cost; | |
1595 | store_outside_penalty += load_outside_cost; | |
1596 | } | |
1597 | else | |
1598 | { | |
1599 | store_inside_penalty += store_inside_cost; | |
1600 | store_outside_penalty += store_outside_cost; | |
1601 | } | |
1602 | ||
1603 | if (load_inside_penalty > store_inside_penalty | |
1604 | || (load_inside_penalty == store_inside_penalty | |
1605 | && load_outside_penalty > store_outside_penalty)) | |
1606 | dr0 = first_store; | |
1607 | } | |
1608 | ||
1609 | /* In case there are only loads with different unknown misalignments, use | |
1610 | peeling only if it may help to align other accesses in the loop. */ | |
9771b263 DN |
1611 | if (!first_store |
1612 | && !STMT_VINFO_SAME_ALIGN_REFS ( | |
1613 | vinfo_for_stmt (DR_STMT (dr0))).length () | |
720f5239 IR |
1614 | && vect_supportable_dr_alignment (dr0, false) |
1615 | != dr_unaligned_supported) | |
1616 | do_peeling = false; | |
1617 | } | |
1618 | ||
1619 | if (do_peeling && !dr0) | |
1620 | { | |
1621 | /* Peeling is possible, but there is no data access that is not supported | |
1622 | unless aligned. So we try to choose the best possible peeling. */ | |
1623 | ||
1624 | /* We should get here only if there are drs with known misalignment. */ | |
1625 | gcc_assert (!all_misalignments_unknown); | |
1626 | ||
1627 | /* Choose the best peeling from the hash table. */ | |
c3e7ee41 | 1628 | dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel, |
92345349 | 1629 | &body_cost_vec); |
720f5239 IR |
1630 | if (!dr0 || !npeel) |
1631 | do_peeling = false; | |
b1aef01e RB |
1632 | |
1633 | /* If peeling by npeel will result in a remaining loop not iterating | |
1634 | enough to be vectorized then do not peel. */ | |
1635 | if (do_peeling | |
1636 | && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) | |
1637 | && (LOOP_VINFO_INT_NITERS (loop_vinfo) | |
1638 | < LOOP_VINFO_VECT_FACTOR (loop_vinfo) + npeel)) | |
1639 | do_peeling = false; | |
720f5239 IR |
1640 | } |
1641 | ||
ebfd146a IR |
1642 | if (do_peeling) |
1643 | { | |
720f5239 IR |
1644 | stmt = DR_STMT (dr0); |
1645 | stmt_info = vinfo_for_stmt (stmt); | |
1646 | vectype = STMT_VINFO_VECTYPE (stmt_info); | |
1647 | nelements = TYPE_VECTOR_SUBPARTS (vectype); | |
ebfd146a IR |
1648 | |
1649 | if (known_alignment_for_access_p (dr0)) | |
1650 | { | |
d8ba5b19 RG |
1651 | bool negative = tree_int_cst_compare (DR_STEP (dr0), |
1652 | size_zero_node) < 0; | |
720f5239 IR |
1653 | if (!npeel) |
1654 | { | |
1655 | /* Since it's known at compile time, compute the number of | |
1656 | iterations in the peeled loop (the peeling factor) for use in | |
1657 | updating DR_MISALIGNMENT values. The peeling factor is the | |
1658 | vectorization factor minus the misalignment as an element | |
1659 | count. */ | |
1660 | mis = DR_MISALIGNMENT (dr0); | |
1661 | mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0)))); | |
8b8bba2d RG |
1662 | npeel = ((negative ? mis - nelements : nelements - mis) |
1663 | & (nelements - 1)); | |
720f5239 | 1664 | } |
ebfd146a | 1665 | |
b8698a0f | 1666 | /* For interleaved data access every iteration accesses all the |
ebfd146a IR |
1667 | members of the group, therefore we divide the number of iterations |
1668 | by the group size. */ | |
b8698a0f | 1669 | stmt_info = vinfo_for_stmt (DR_STMT (dr0)); |
0d0293ac | 1670 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) |
e14c1050 | 1671 | npeel /= GROUP_SIZE (stmt_info); |
ebfd146a | 1672 | |
73fbfcad | 1673 | if (dump_enabled_p ()) |
78c60e3d | 1674 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 1675 | "Try peeling by %d\n", npeel); |
ebfd146a IR |
1676 | } |
1677 | ||
1678 | /* Ensure that all data refs can be vectorized after the peel. */ | |
9771b263 | 1679 | FOR_EACH_VEC_ELT (datarefs, i, dr) |
ebfd146a IR |
1680 | { |
1681 | int save_misalignment; | |
1682 | ||
1683 | if (dr == dr0) | |
1684 | continue; | |
1685 | ||
1686 | stmt = DR_STMT (dr); | |
1687 | stmt_info = vinfo_for_stmt (stmt); | |
1688 | /* For interleaving, only the alignment of the first access | |
1689 | matters. */ | |
0d0293ac | 1690 | if (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
e14c1050 | 1691 | && GROUP_FIRST_ELEMENT (stmt_info) != stmt) |
ebfd146a IR |
1692 | continue; |
1693 | ||
319e6439 RG |
1694 | /* Strided loads perform only component accesses, alignment is |
1695 | irrelevant for them. */ | |
1696 | if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) | |
1697 | continue; | |
1698 | ||
ebfd146a IR |
1699 | save_misalignment = DR_MISALIGNMENT (dr); |
1700 | vect_update_misalignment_for_peel (dr, dr0, npeel); | |
720f5239 | 1701 | supportable_dr_alignment = vect_supportable_dr_alignment (dr, false); |
ebfd146a | 1702 | SET_DR_MISALIGNMENT (dr, save_misalignment); |
b8698a0f | 1703 | |
ebfd146a IR |
1704 | if (!supportable_dr_alignment) |
1705 | { | |
1706 | do_peeling = false; | |
1707 | break; | |
1708 | } | |
1709 | } | |
1710 | ||
720f5239 IR |
1711 | if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0) |
1712 | { | |
1713 | stat = vect_verify_datarefs_alignment (loop_vinfo, NULL); | |
1714 | if (!stat) | |
1715 | do_peeling = false; | |
1716 | else | |
c7e62a26 | 1717 | { |
9771b263 | 1718 | body_cost_vec.release (); |
c7e62a26 RG |
1719 | return stat; |
1720 | } | |
720f5239 IR |
1721 | } |
1722 | ||
4f17aa0b XDL |
1723 | if (do_peeling) |
1724 | { | |
1725 | unsigned max_allowed_peel | |
1726 | = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT); | |
1727 | if (max_allowed_peel != (unsigned)-1) | |
1728 | { | |
1729 | unsigned max_peel = npeel; | |
1730 | if (max_peel == 0) | |
1731 | { | |
1732 | gimple dr_stmt = DR_STMT (dr0); | |
1733 | stmt_vec_info vinfo = vinfo_for_stmt (dr_stmt); | |
1734 | tree vtype = STMT_VINFO_VECTYPE (vinfo); | |
1735 | max_peel = TYPE_VECTOR_SUBPARTS (vtype) - 1; | |
1736 | } | |
1737 | if (max_peel > max_allowed_peel) | |
1738 | { | |
1739 | do_peeling = false; | |
1740 | if (dump_enabled_p ()) | |
1741 | dump_printf_loc (MSG_NOTE, vect_location, | |
1742 | "Disable peeling, max peels reached: %d\n", max_peel); | |
1743 | } | |
1744 | } | |
1745 | } | |
1746 | ||
ebfd146a IR |
1747 | if (do_peeling) |
1748 | { | |
c3e7ee41 | 1749 | stmt_info_for_cost *si; |
92345349 | 1750 | void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); |
c3e7ee41 | 1751 | |
ebfd146a IR |
1752 | /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i. |
1753 | If the misalignment of DR_i is identical to that of dr0 then set | |
1754 | DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and | |
1755 | dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i) | |
1756 | by the peeling factor times the element size of DR_i (MOD the | |
1757 | vectorization factor times the size). Otherwise, the | |
1758 | misalignment of DR_i must be set to unknown. */ | |
9771b263 | 1759 | FOR_EACH_VEC_ELT (datarefs, i, dr) |
ebfd146a IR |
1760 | if (dr != dr0) |
1761 | vect_update_misalignment_for_peel (dr, dr0, npeel); | |
1762 | ||
1763 | LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0; | |
720f5239 | 1764 | if (npeel) |
15e693cc | 1765 | LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel; |
720f5239 | 1766 | else |
15e693cc RB |
1767 | LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) |
1768 | = DR_MISALIGNMENT (dr0); | |
ebfd146a | 1769 | SET_DR_MISALIGNMENT (dr0, 0); |
73fbfcad | 1770 | if (dump_enabled_p ()) |
78c60e3d SS |
1771 | { |
1772 | dump_printf_loc (MSG_NOTE, vect_location, | |
e645e942 | 1773 | "Alignment of access forced using peeling.\n"); |
78c60e3d | 1774 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 1775 | "Peeling for alignment will be applied.\n"); |
78c60e3d | 1776 | } |
c3e7ee41 BS |
1777 | /* We've delayed passing the inside-loop peeling costs to the |
1778 | target cost model until we were sure peeling would happen. | |
1779 | Do so now. */ | |
9771b263 | 1780 | if (body_cost_vec.exists ()) |
c3e7ee41 | 1781 | { |
9771b263 | 1782 | FOR_EACH_VEC_ELT (body_cost_vec, i, si) |
92345349 BS |
1783 | { |
1784 | struct _stmt_vec_info *stmt_info | |
1785 | = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; | |
1786 | (void) add_stmt_cost (data, si->count, si->kind, stmt_info, | |
1787 | si->misalign, vect_body); | |
1788 | } | |
9771b263 | 1789 | body_cost_vec.release (); |
c3e7ee41 BS |
1790 | } |
1791 | ||
a70d6342 | 1792 | stat = vect_verify_datarefs_alignment (loop_vinfo, NULL); |
ebfd146a IR |
1793 | gcc_assert (stat); |
1794 | return stat; | |
1795 | } | |
1796 | } | |
1797 | ||
9771b263 | 1798 | body_cost_vec.release (); |
ebfd146a IR |
1799 | |
1800 | /* (2) Versioning to force alignment. */ | |
1801 | ||
1802 | /* Try versioning if: | |
d6d11272 XDL |
1803 | 1) optimize loop for speed |
1804 | 2) there is at least one unsupported misaligned data ref with an unknown | |
ebfd146a | 1805 | misalignment, and |
d6d11272 XDL |
1806 | 3) all misaligned data refs with a known misalignment are supported, and |
1807 | 4) the number of runtime alignment checks is within reason. */ | |
ebfd146a | 1808 | |
b8698a0f | 1809 | do_versioning = |
d6d11272 | 1810 | optimize_loop_nest_for_speed_p (loop) |
ebfd146a IR |
1811 | && (!loop->inner); /* FORNOW */ |
1812 | ||
1813 | if (do_versioning) | |
1814 | { | |
9771b263 | 1815 | FOR_EACH_VEC_ELT (datarefs, i, dr) |
ebfd146a IR |
1816 | { |
1817 | stmt = DR_STMT (dr); | |
1818 | stmt_info = vinfo_for_stmt (stmt); | |
1819 | ||
1820 | /* For interleaving, only the alignment of the first access | |
1821 | matters. */ | |
1822 | if (aligned_access_p (dr) | |
0d0293ac | 1823 | || (STMT_VINFO_GROUPED_ACCESS (stmt_info) |
e14c1050 | 1824 | && GROUP_FIRST_ELEMENT (stmt_info) != stmt)) |
ebfd146a IR |
1825 | continue; |
1826 | ||
319e6439 RG |
1827 | /* Strided loads perform only component accesses, alignment is |
1828 | irrelevant for them. */ | |
1829 | if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)) | |
1830 | continue; | |
1831 | ||
720f5239 | 1832 | supportable_dr_alignment = vect_supportable_dr_alignment (dr, false); |
ebfd146a IR |
1833 | |
1834 | if (!supportable_dr_alignment) | |
1835 | { | |
1836 | gimple stmt; | |
1837 | int mask; | |
1838 | tree vectype; | |
1839 | ||
1840 | if (known_alignment_for_access_p (dr) | |
9771b263 | 1841 | || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length () |
ebfd146a IR |
1842 | >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS)) |
1843 | { | |
1844 | do_versioning = false; | |
1845 | break; | |
1846 | } | |
1847 | ||
1848 | stmt = DR_STMT (dr); | |
1849 | vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); | |
1850 | gcc_assert (vectype); | |
b8698a0f | 1851 | |
ebfd146a IR |
1852 | /* The rightmost bits of an aligned address must be zeros. |
1853 | Construct the mask needed for this test. For example, | |
1854 | GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the | |
1855 | mask must be 15 = 0xf. */ | |
1856 | mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1; | |
1857 | ||
1858 | /* FORNOW: use the same mask to test all potentially unaligned | |
1859 | references in the loop. The vectorizer currently supports | |
1860 | a single vector size, see the reference to | |
1861 | GET_MODE_NUNITS (TYPE_MODE (vectype)) where the | |
1862 | vectorization factor is computed. */ | |
1863 | gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo) | |
1864 | || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask); | |
1865 | LOOP_VINFO_PTR_MASK (loop_vinfo) = mask; | |
9771b263 DN |
1866 | LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push ( |
1867 | DR_STMT (dr)); | |
ebfd146a IR |
1868 | } |
1869 | } | |
b8698a0f | 1870 | |
ebfd146a | 1871 | /* Versioning requires at least one misaligned data reference. */ |
e9dbe7bb | 1872 | if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)) |
ebfd146a IR |
1873 | do_versioning = false; |
1874 | else if (!do_versioning) | |
9771b263 | 1875 | LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0); |
ebfd146a IR |
1876 | } |
1877 | ||
1878 | if (do_versioning) | |
1879 | { | |
9771b263 | 1880 | vec<gimple> may_misalign_stmts |
ebfd146a IR |
1881 | = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo); |
1882 | gimple stmt; | |
1883 | ||
1884 | /* It can now be assumed that the data references in the statements | |
1885 | in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version | |
1886 | of the loop being vectorized. */ | |
9771b263 | 1887 | FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt) |
ebfd146a IR |
1888 | { |
1889 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
1890 | dr = STMT_VINFO_DATA_REF (stmt_info); | |
1891 | SET_DR_MISALIGNMENT (dr, 0); | |
73fbfcad | 1892 | if (dump_enabled_p ()) |
e645e942 TJ |
1893 | dump_printf_loc (MSG_NOTE, vect_location, |
1894 | "Alignment of access forced using versioning.\n"); | |
ebfd146a IR |
1895 | } |
1896 | ||
73fbfcad | 1897 | if (dump_enabled_p ()) |
e645e942 TJ |
1898 | dump_printf_loc (MSG_NOTE, vect_location, |
1899 | "Versioning for alignment will be applied.\n"); | |
ebfd146a IR |
1900 | |
1901 | /* Peeling and versioning can't be done together at this time. */ | |
1902 | gcc_assert (! (do_peeling && do_versioning)); | |
1903 | ||
a70d6342 | 1904 | stat = vect_verify_datarefs_alignment (loop_vinfo, NULL); |
ebfd146a IR |
1905 | gcc_assert (stat); |
1906 | return stat; | |
1907 | } | |
1908 | ||
1909 | /* This point is reached if neither peeling nor versioning is being done. */ | |
1910 | gcc_assert (! (do_peeling || do_versioning)); | |
1911 | ||
a70d6342 | 1912 | stat = vect_verify_datarefs_alignment (loop_vinfo, NULL); |
ebfd146a IR |
1913 | return stat; |
1914 | } | |
1915 | ||
1916 | ||
777e1f09 RG |
1917 | /* Function vect_find_same_alignment_drs. |
1918 | ||
1919 | Update group and alignment relations according to the chosen | |
1920 | vectorization factor. */ | |
1921 | ||
1922 | static void | |
1923 | vect_find_same_alignment_drs (struct data_dependence_relation *ddr, | |
1924 | loop_vec_info loop_vinfo) | |
1925 | { | |
1926 | unsigned int i; | |
1927 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
1928 | int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
1929 | struct data_reference *dra = DDR_A (ddr); | |
1930 | struct data_reference *drb = DDR_B (ddr); | |
1931 | stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); | |
1932 | stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); | |
1933 | int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra)))); | |
1934 | int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb)))); | |
1935 | lambda_vector dist_v; | |
1936 | unsigned int loop_depth; | |
1937 | ||
1938 | if (DDR_ARE_DEPENDENT (ddr) == chrec_known) | |
1939 | return; | |
1940 | ||
720f5239 | 1941 | if (dra == drb) |
777e1f09 RG |
1942 | return; |
1943 | ||
1944 | if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know) | |
1945 | return; | |
1946 | ||
1947 | /* Loop-based vectorization and known data dependence. */ | |
1948 | if (DDR_NUM_DIST_VECTS (ddr) == 0) | |
1949 | return; | |
1950 | ||
46241ea9 RG |
1951 | /* Data-dependence analysis reports a distance vector of zero |
1952 | for data-references that overlap only in the first iteration | |
1953 | but have different sign step (see PR45764). | |
1954 | So as a sanity check require equal DR_STEP. */ | |
1955 | if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0)) | |
1956 | return; | |
1957 | ||
777e1f09 | 1958 | loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr)); |
9771b263 | 1959 | FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v) |
777e1f09 RG |
1960 | { |
1961 | int dist = dist_v[loop_depth]; | |
1962 | ||
73fbfcad | 1963 | if (dump_enabled_p ()) |
78c60e3d | 1964 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 1965 | "dependence distance = %d.\n", dist); |
777e1f09 RG |
1966 | |
1967 | /* Same loop iteration. */ | |
1968 | if (dist == 0 | |
1969 | || (dist % vectorization_factor == 0 && dra_size == drb_size)) | |
1970 | { | |
1971 | /* Two references with distance zero have the same alignment. */ | |
9771b263 DN |
1972 | STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb); |
1973 | STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra); | |
73fbfcad | 1974 | if (dump_enabled_p ()) |
777e1f09 | 1975 | { |
e645e942 TJ |
1976 | dump_printf_loc (MSG_NOTE, vect_location, |
1977 | "accesses have the same alignment.\n"); | |
78c60e3d | 1978 | dump_printf (MSG_NOTE, |
e645e942 | 1979 | "dependence distance modulo vf == 0 between "); |
78c60e3d SS |
1980 | dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); |
1981 | dump_printf (MSG_NOTE, " and "); | |
1982 | dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); | |
e645e942 | 1983 | dump_printf (MSG_NOTE, "\n"); |
777e1f09 RG |
1984 | } |
1985 | } | |
1986 | } | |
1987 | } | |
1988 | ||
1989 | ||
ebfd146a IR |
1990 | /* Function vect_analyze_data_refs_alignment |
1991 | ||
1992 | Analyze the alignment of the data-references in the loop. | |
1993 | Return FALSE if a data reference is found that cannot be vectorized. */ | |
1994 | ||
1995 | bool | |
b8698a0f | 1996 | vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo, |
a70d6342 | 1997 | bb_vec_info bb_vinfo) |
ebfd146a | 1998 | { |
73fbfcad | 1999 | if (dump_enabled_p ()) |
78c60e3d | 2000 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 2001 | "=== vect_analyze_data_refs_alignment ===\n"); |
ebfd146a | 2002 | |
777e1f09 RG |
2003 | /* Mark groups of data references with same alignment using |
2004 | data dependence information. */ | |
2005 | if (loop_vinfo) | |
2006 | { | |
9771b263 | 2007 | vec<ddr_p> ddrs = LOOP_VINFO_DDRS (loop_vinfo); |
777e1f09 RG |
2008 | struct data_dependence_relation *ddr; |
2009 | unsigned int i; | |
2010 | ||
9771b263 | 2011 | FOR_EACH_VEC_ELT (ddrs, i, ddr) |
777e1f09 RG |
2012 | vect_find_same_alignment_drs (ddr, loop_vinfo); |
2013 | } | |
2014 | ||
a70d6342 | 2015 | if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo)) |
ebfd146a | 2016 | { |
73fbfcad | 2017 | if (dump_enabled_p ()) |
e645e942 TJ |
2018 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
2019 | "not vectorized: can't calculate alignment " | |
2020 | "for data ref.\n"); | |
ebfd146a IR |
2021 | return false; |
2022 | } | |
2023 | ||
2024 | return true; | |
2025 | } | |
2026 | ||
2027 | ||
0d0293ac MM |
2028 | /* Analyze groups of accesses: check that DR belongs to a group of |
2029 | accesses of legal size, step, etc. Detect gaps, single element | |
2030 | interleaving, and other special cases. Set grouped access info. | |
ebfd146a IR |
2031 | Collect groups of strided stores for further use in SLP analysis. */ |
2032 | ||
2033 | static bool | |
2034 | vect_analyze_group_access (struct data_reference *dr) | |
2035 | { | |
2036 | tree step = DR_STEP (dr); | |
2037 | tree scalar_type = TREE_TYPE (DR_REF (dr)); | |
2038 | HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); | |
2039 | gimple stmt = DR_STMT (dr); | |
2040 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
2041 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
a70d6342 | 2042 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
ebfd146a | 2043 | HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); |
0d0293ac | 2044 | HOST_WIDE_INT groupsize, last_accessed_element = 1; |
ebfd146a | 2045 | bool slp_impossible = false; |
deaf836c IR |
2046 | struct loop *loop = NULL; |
2047 | ||
2048 | if (loop_vinfo) | |
2049 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
ebfd146a | 2050 | |
0d0293ac MM |
2051 | /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the |
2052 | size of the interleaving group (including gaps). */ | |
08940f33 | 2053 | groupsize = absu_hwi (dr_step) / type_size; |
ebfd146a IR |
2054 | |
2055 | /* Not consecutive access is possible only if it is a part of interleaving. */ | |
e14c1050 | 2056 | if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) |
ebfd146a IR |
2057 | { |
2058 | /* Check if it this DR is a part of interleaving, and is a single | |
2059 | element of the group that is accessed in the loop. */ | |
b8698a0f | 2060 | |
ebfd146a IR |
2061 | /* Gaps are supported only for loads. STEP must be a multiple of the type |
2062 | size. The size of the group must be a power of 2. */ | |
2063 | if (DR_IS_READ (dr) | |
2064 | && (dr_step % type_size) == 0 | |
0d0293ac MM |
2065 | && groupsize > 0 |
2066 | && exact_log2 (groupsize) != -1) | |
ebfd146a | 2067 | { |
e14c1050 | 2068 | GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt; |
0d0293ac | 2069 | GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize; |
73fbfcad | 2070 | if (dump_enabled_p ()) |
ebfd146a | 2071 | { |
e645e942 TJ |
2072 | dump_printf_loc (MSG_NOTE, vect_location, |
2073 | "Detected single element interleaving "); | |
78c60e3d SS |
2074 | dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr)); |
2075 | dump_printf (MSG_NOTE, " step "); | |
2076 | dump_generic_expr (MSG_NOTE, TDF_SLIM, step); | |
e645e942 | 2077 | dump_printf (MSG_NOTE, "\n"); |
ebfd146a | 2078 | } |
48df3fa6 IR |
2079 | |
2080 | if (loop_vinfo) | |
2081 | { | |
73fbfcad | 2082 | if (dump_enabled_p ()) |
78c60e3d | 2083 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 TJ |
2084 | "Data access with gaps requires scalar " |
2085 | "epilogue loop\n"); | |
deaf836c IR |
2086 | if (loop->inner) |
2087 | { | |
73fbfcad | 2088 | if (dump_enabled_p ()) |
78c60e3d SS |
2089 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
2090 | "Peeling for outer loop is not" | |
e645e942 | 2091 | " supported\n"); |
deaf836c IR |
2092 | return false; |
2093 | } | |
2094 | ||
2095 | LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true; | |
48df3fa6 IR |
2096 | } |
2097 | ||
ebfd146a IR |
2098 | return true; |
2099 | } | |
4b5caab7 | 2100 | |
73fbfcad | 2101 | if (dump_enabled_p ()) |
4b5caab7 | 2102 | { |
78c60e3d | 2103 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 TJ |
2104 | "not consecutive access "); |
2105 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
2106 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
4b5caab7 IR |
2107 | } |
2108 | ||
2109 | if (bb_vinfo) | |
2110 | { | |
2111 | /* Mark the statement as unvectorizable. */ | |
2112 | STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false; | |
2113 | return true; | |
2114 | } | |
78c60e3d | 2115 | |
ebfd146a IR |
2116 | return false; |
2117 | } | |
2118 | ||
e14c1050 | 2119 | if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt) |
ebfd146a IR |
2120 | { |
2121 | /* First stmt in the interleaving chain. Check the chain. */ | |
e14c1050 | 2122 | gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)); |
ebfd146a | 2123 | struct data_reference *data_ref = dr; |
df398a37 | 2124 | unsigned int count = 1; |
ebfd146a IR |
2125 | tree prev_init = DR_INIT (data_ref); |
2126 | gimple prev = stmt; | |
08940f33 RB |
2127 | HOST_WIDE_INT diff, gaps = 0; |
2128 | unsigned HOST_WIDE_INT count_in_bytes; | |
ebfd146a IR |
2129 | |
2130 | while (next) | |
2131 | { | |
ff802fa1 IR |
2132 | /* Skip same data-refs. In case that two or more stmts share |
2133 | data-ref (supported only for loads), we vectorize only the first | |
2134 | stmt, and the rest get their vectorized loads from the first | |
2135 | one. */ | |
ebfd146a IR |
2136 | if (!tree_int_cst_compare (DR_INIT (data_ref), |
2137 | DR_INIT (STMT_VINFO_DATA_REF ( | |
2138 | vinfo_for_stmt (next))))) | |
2139 | { | |
b0af49c4 | 2140 | if (DR_IS_WRITE (data_ref)) |
ebfd146a | 2141 | { |
73fbfcad | 2142 | if (dump_enabled_p ()) |
e645e942 TJ |
2143 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
2144 | "Two store stmts share the same dr.\n"); | |
ebfd146a IR |
2145 | return false; |
2146 | } | |
2147 | ||
ebfd146a | 2148 | /* For load use the same data-ref load. */ |
e14c1050 | 2149 | GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev; |
ebfd146a IR |
2150 | |
2151 | prev = next; | |
e14c1050 | 2152 | next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); |
ebfd146a IR |
2153 | continue; |
2154 | } | |
48df3fa6 | 2155 | |
ebfd146a | 2156 | prev = next; |
08940f33 | 2157 | data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next)); |
ebfd146a | 2158 | |
08940f33 RB |
2159 | /* All group members have the same STEP by construction. */ |
2160 | gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0)); | |
ebfd146a | 2161 | |
ebfd146a IR |
2162 | /* Check that the distance between two accesses is equal to the type |
2163 | size. Otherwise, we have gaps. */ | |
2164 | diff = (TREE_INT_CST_LOW (DR_INIT (data_ref)) | |
2165 | - TREE_INT_CST_LOW (prev_init)) / type_size; | |
2166 | if (diff != 1) | |
2167 | { | |
2168 | /* FORNOW: SLP of accesses with gaps is not supported. */ | |
2169 | slp_impossible = true; | |
b0af49c4 | 2170 | if (DR_IS_WRITE (data_ref)) |
ebfd146a | 2171 | { |
73fbfcad | 2172 | if (dump_enabled_p ()) |
e645e942 TJ |
2173 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
2174 | "interleaved store with gaps\n"); | |
ebfd146a IR |
2175 | return false; |
2176 | } | |
4da39468 IR |
2177 | |
2178 | gaps += diff - 1; | |
ebfd146a IR |
2179 | } |
2180 | ||
48df3fa6 IR |
2181 | last_accessed_element += diff; |
2182 | ||
ebfd146a | 2183 | /* Store the gap from the previous member of the group. If there is no |
e14c1050 IR |
2184 | gap in the access, GROUP_GAP is always 1. */ |
2185 | GROUP_GAP (vinfo_for_stmt (next)) = diff; | |
ebfd146a IR |
2186 | |
2187 | prev_init = DR_INIT (data_ref); | |
e14c1050 | 2188 | next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); |
ebfd146a IR |
2189 | /* Count the number of data-refs in the chain. */ |
2190 | count++; | |
2191 | } | |
2192 | ||
2193 | /* COUNT is the number of accesses found, we multiply it by the size of | |
2194 | the type to get COUNT_IN_BYTES. */ | |
2195 | count_in_bytes = type_size * count; | |
2196 | ||
b8698a0f | 2197 | /* Check that the size of the interleaving (including gaps) is not |
a70d6342 | 2198 | greater than STEP. */ |
08940f33 RB |
2199 | if (dr_step != 0 |
2200 | && absu_hwi (dr_step) < count_in_bytes + gaps * type_size) | |
ebfd146a | 2201 | { |
73fbfcad | 2202 | if (dump_enabled_p ()) |
ebfd146a | 2203 | { |
e645e942 | 2204 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78c60e3d | 2205 | "interleaving size is greater than step for "); |
e645e942 TJ |
2206 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, |
2207 | DR_REF (dr)); | |
2208 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); | |
ebfd146a IR |
2209 | } |
2210 | return false; | |
2211 | } | |
2212 | ||
2213 | /* Check that the size of the interleaving is equal to STEP for stores, | |
2214 | i.e., that there are no gaps. */ | |
08940f33 RB |
2215 | if (dr_step != 0 |
2216 | && absu_hwi (dr_step) != count_in_bytes) | |
ebfd146a IR |
2217 | { |
2218 | if (DR_IS_READ (dr)) | |
2219 | { | |
2220 | slp_impossible = true; | |
2221 | /* There is a gap after the last load in the group. This gap is a | |
0d0293ac MM |
2222 | difference between the groupsize and the number of elements. |
2223 | When there is no gap, this difference should be 0. */ | |
2224 | GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - count; | |
ebfd146a IR |
2225 | } |
2226 | else | |
2227 | { | |
73fbfcad | 2228 | if (dump_enabled_p ()) |
e645e942 TJ |
2229 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
2230 | "interleaved store with gaps\n"); | |
ebfd146a IR |
2231 | return false; |
2232 | } | |
2233 | } | |
2234 | ||
2235 | /* Check that STEP is a multiple of type size. */ | |
08940f33 RB |
2236 | if (dr_step != 0 |
2237 | && (dr_step % type_size) != 0) | |
ebfd146a | 2238 | { |
73fbfcad | 2239 | if (dump_enabled_p ()) |
ebfd146a | 2240 | { |
78c60e3d SS |
2241 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
2242 | "step is not a multiple of type size: step "); | |
2243 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, step); | |
2244 | dump_printf (MSG_MISSED_OPTIMIZATION, " size "); | |
2245 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, | |
2246 | TYPE_SIZE_UNIT (scalar_type)); | |
e645e942 | 2247 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
ebfd146a IR |
2248 | } |
2249 | return false; | |
2250 | } | |
2251 | ||
0d0293ac MM |
2252 | if (groupsize == 0) |
2253 | groupsize = count; | |
b8698a0f | 2254 | |
0d0293ac | 2255 | GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize; |
73fbfcad | 2256 | if (dump_enabled_p ()) |
e645e942 TJ |
2257 | dump_printf_loc (MSG_NOTE, vect_location, |
2258 | "Detected interleaving of size %d\n", (int)groupsize); | |
ebfd146a | 2259 | |
b8698a0f | 2260 | /* SLP: create an SLP data structure for every interleaving group of |
ebfd146a | 2261 | stores for further analysis in vect_analyse_slp. */ |
b0af49c4 | 2262 | if (DR_IS_WRITE (dr) && !slp_impossible) |
a70d6342 IR |
2263 | { |
2264 | if (loop_vinfo) | |
9771b263 | 2265 | LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt); |
a70d6342 | 2266 | if (bb_vinfo) |
9771b263 | 2267 | BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt); |
a70d6342 | 2268 | } |
48df3fa6 IR |
2269 | |
2270 | /* There is a gap in the end of the group. */ | |
0d0293ac | 2271 | if (groupsize - last_accessed_element > 0 && loop_vinfo) |
48df3fa6 | 2272 | { |
73fbfcad | 2273 | if (dump_enabled_p ()) |
78c60e3d | 2274 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 TJ |
2275 | "Data access with gaps requires scalar " |
2276 | "epilogue loop\n"); | |
deaf836c IR |
2277 | if (loop->inner) |
2278 | { | |
73fbfcad | 2279 | if (dump_enabled_p ()) |
e645e942 TJ |
2280 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
2281 | "Peeling for outer loop is not supported\n"); | |
deaf836c IR |
2282 | return false; |
2283 | } | |
2284 | ||
2285 | LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true; | |
48df3fa6 | 2286 | } |
ebfd146a IR |
2287 | } |
2288 | ||
2289 | return true; | |
2290 | } | |
2291 | ||
2292 | ||
2293 | /* Analyze the access pattern of the data-reference DR. | |
2294 | In case of non-consecutive accesses call vect_analyze_group_access() to | |
0d0293ac | 2295 | analyze groups of accesses. */ |
ebfd146a IR |
2296 | |
2297 | static bool | |
2298 | vect_analyze_data_ref_access (struct data_reference *dr) | |
2299 | { | |
2300 | tree step = DR_STEP (dr); | |
2301 | tree scalar_type = TREE_TYPE (DR_REF (dr)); | |
2302 | gimple stmt = DR_STMT (dr); | |
2303 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
2304 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
a70d6342 | 2305 | struct loop *loop = NULL; |
ebfd146a | 2306 | |
a70d6342 IR |
2307 | if (loop_vinfo) |
2308 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
b8698a0f | 2309 | |
a70d6342 | 2310 | if (loop_vinfo && !step) |
ebfd146a | 2311 | { |
73fbfcad | 2312 | if (dump_enabled_p ()) |
e645e942 TJ |
2313 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
2314 | "bad data-ref access in loop\n"); | |
ebfd146a IR |
2315 | return false; |
2316 | } | |
2317 | ||
6e8dad05 | 2318 | /* Allow invariant loads in not nested loops. */ |
319e6439 | 2319 | if (loop_vinfo && integer_zerop (step)) |
39becbac RG |
2320 | { |
2321 | GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; | |
6e8dad05 RB |
2322 | if (nested_in_vect_loop_p (loop, stmt)) |
2323 | { | |
2324 | if (dump_enabled_p ()) | |
2325 | dump_printf_loc (MSG_NOTE, vect_location, | |
e645e942 | 2326 | "zero step in inner loop of nest\n"); |
6e8dad05 RB |
2327 | return false; |
2328 | } | |
39becbac RG |
2329 | return DR_IS_READ (dr); |
2330 | } | |
ebfd146a | 2331 | |
a70d6342 | 2332 | if (loop && nested_in_vect_loop_p (loop, stmt)) |
ebfd146a IR |
2333 | { |
2334 | /* Interleaved accesses are not yet supported within outer-loop | |
2335 | vectorization for references in the inner-loop. */ | |
e14c1050 | 2336 | GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; |
ebfd146a IR |
2337 | |
2338 | /* For the rest of the analysis we use the outer-loop step. */ | |
2339 | step = STMT_VINFO_DR_STEP (stmt_info); | |
319e6439 | 2340 | if (integer_zerop (step)) |
ebfd146a | 2341 | { |
73fbfcad | 2342 | if (dump_enabled_p ()) |
78c60e3d | 2343 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 2344 | "zero step in outer loop.\n"); |
ebfd146a | 2345 | if (DR_IS_READ (dr)) |
b8698a0f | 2346 | return true; |
ebfd146a IR |
2347 | else |
2348 | return false; | |
2349 | } | |
2350 | } | |
2351 | ||
2352 | /* Consecutive? */ | |
319e6439 | 2353 | if (TREE_CODE (step) == INTEGER_CST) |
ebfd146a | 2354 | { |
319e6439 RG |
2355 | HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); |
2356 | if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type)) | |
2357 | || (dr_step < 0 | |
2358 | && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step))) | |
2359 | { | |
2360 | /* Mark that it is not interleaving. */ | |
2361 | GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; | |
2362 | return true; | |
2363 | } | |
ebfd146a IR |
2364 | } |
2365 | ||
a70d6342 | 2366 | if (loop && nested_in_vect_loop_p (loop, stmt)) |
ebfd146a | 2367 | { |
73fbfcad | 2368 | if (dump_enabled_p ()) |
78c60e3d | 2369 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 2370 | "grouped access in outer loop.\n"); |
ebfd146a IR |
2371 | return false; |
2372 | } | |
2373 | ||
319e6439 RG |
2374 | /* Assume this is a DR handled by non-constant strided load case. */ |
2375 | if (TREE_CODE (step) != INTEGER_CST) | |
2376 | return STMT_VINFO_STRIDE_LOAD_P (stmt_info); | |
2377 | ||
ebfd146a IR |
2378 | /* Not consecutive access - check if it's a part of interleaving group. */ |
2379 | return vect_analyze_group_access (dr); | |
2380 | } | |
2381 | ||
839c74bc CH |
2382 | |
2383 | ||
2384 | /* A helper function used in the comparator function to sort data | |
2385 | references. T1 and T2 are two data references to be compared. | |
2386 | The function returns -1, 0, or 1. */ | |
2387 | ||
2388 | static int | |
2389 | compare_tree (tree t1, tree t2) | |
2390 | { | |
2391 | int i, cmp; | |
2392 | enum tree_code code; | |
2393 | char tclass; | |
2394 | ||
2395 | if (t1 == t2) | |
2396 | return 0; | |
2397 | if (t1 == NULL) | |
2398 | return -1; | |
2399 | if (t2 == NULL) | |
2400 | return 1; | |
2401 | ||
2402 | ||
2403 | if (TREE_CODE (t1) != TREE_CODE (t2)) | |
2404 | return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1; | |
2405 | ||
2406 | code = TREE_CODE (t1); | |
2407 | switch (code) | |
2408 | { | |
2409 | /* For const values, we can just use hash values for comparisons. */ | |
2410 | case INTEGER_CST: | |
2411 | case REAL_CST: | |
2412 | case FIXED_CST: | |
2413 | case STRING_CST: | |
2414 | case COMPLEX_CST: | |
2415 | case VECTOR_CST: | |
2416 | { | |
2417 | hashval_t h1 = iterative_hash_expr (t1, 0); | |
2418 | hashval_t h2 = iterative_hash_expr (t2, 0); | |
2419 | if (h1 != h2) | |
2420 | return h1 < h2 ? -1 : 1; | |
2421 | break; | |
2422 | } | |
2423 | ||
2424 | case SSA_NAME: | |
2425 | cmp = compare_tree (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2)); | |
2426 | if (cmp != 0) | |
2427 | return cmp; | |
2428 | ||
2429 | if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2)) | |
2430 | return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1; | |
2431 | break; | |
2432 | ||
2433 | default: | |
2434 | tclass = TREE_CODE_CLASS (code); | |
2435 | ||
2436 | /* For var-decl, we could compare their UIDs. */ | |
2437 | if (tclass == tcc_declaration) | |
2438 | { | |
2439 | if (DECL_UID (t1) != DECL_UID (t2)) | |
2440 | return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1; | |
2441 | break; | |
2442 | } | |
2443 | ||
2444 | /* For expressions with operands, compare their operands recursively. */ | |
2445 | for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i) | |
2446 | { | |
2447 | cmp = compare_tree (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i)); | |
2448 | if (cmp != 0) | |
2449 | return cmp; | |
2450 | } | |
2451 | } | |
2452 | ||
2453 | return 0; | |
2454 | } | |
2455 | ||
2456 | ||
5abe1e05 RB |
2457 | /* Compare two data-references DRA and DRB to group them into chunks |
2458 | suitable for grouping. */ | |
2459 | ||
2460 | static int | |
2461 | dr_group_sort_cmp (const void *dra_, const void *drb_) | |
2462 | { | |
2463 | data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_); | |
2464 | data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_); | |
5abe1e05 RB |
2465 | int cmp; |
2466 | ||
2467 | /* Stabilize sort. */ | |
2468 | if (dra == drb) | |
2469 | return 0; | |
2470 | ||
2471 | /* Ordering of DRs according to base. */ | |
2472 | if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)) | |
2473 | { | |
839c74bc CH |
2474 | cmp = compare_tree (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb)); |
2475 | if (cmp != 0) | |
2476 | return cmp; | |
5abe1e05 RB |
2477 | } |
2478 | ||
2479 | /* And according to DR_OFFSET. */ | |
2480 | if (!dr_equal_offsets_p (dra, drb)) | |
2481 | { | |
839c74bc CH |
2482 | cmp = compare_tree (DR_OFFSET (dra), DR_OFFSET (drb)); |
2483 | if (cmp != 0) | |
2484 | return cmp; | |
5abe1e05 RB |
2485 | } |
2486 | ||
2487 | /* Put reads before writes. */ | |
2488 | if (DR_IS_READ (dra) != DR_IS_READ (drb)) | |
2489 | return DR_IS_READ (dra) ? -1 : 1; | |
2490 | ||
2491 | /* Then sort after access size. */ | |
2492 | if (!operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))), | |
2493 | TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))), 0)) | |
2494 | { | |
839c74bc CH |
2495 | cmp = compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))), |
2496 | TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)))); | |
2497 | if (cmp != 0) | |
2498 | return cmp; | |
5abe1e05 RB |
2499 | } |
2500 | ||
2501 | /* And after step. */ | |
2502 | if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0)) | |
2503 | { | |
839c74bc CH |
2504 | cmp = compare_tree (DR_STEP (dra), DR_STEP (drb)); |
2505 | if (cmp != 0) | |
2506 | return cmp; | |
5abe1e05 RB |
2507 | } |
2508 | ||
2509 | /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */ | |
2510 | cmp = tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)); | |
2511 | if (cmp == 0) | |
2512 | return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1; | |
2513 | return cmp; | |
2514 | } | |
ebfd146a IR |
2515 | |
2516 | /* Function vect_analyze_data_ref_accesses. | |
2517 | ||
2518 | Analyze the access pattern of all the data references in the loop. | |
2519 | ||
2520 | FORNOW: the only access pattern that is considered vectorizable is a | |
2521 | simple step 1 (consecutive) access. | |
2522 | ||
2523 | FORNOW: handle only arrays and pointer accesses. */ | |
2524 | ||
2525 | bool | |
a70d6342 | 2526 | vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) |
ebfd146a IR |
2527 | { |
2528 | unsigned int i; | |
9771b263 | 2529 | vec<data_reference_p> datarefs; |
ebfd146a IR |
2530 | struct data_reference *dr; |
2531 | ||
73fbfcad | 2532 | if (dump_enabled_p ()) |
78c60e3d | 2533 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 2534 | "=== vect_analyze_data_ref_accesses ===\n"); |
ebfd146a | 2535 | |
a70d6342 IR |
2536 | if (loop_vinfo) |
2537 | datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); | |
2538 | else | |
2539 | datarefs = BB_VINFO_DATAREFS (bb_vinfo); | |
2540 | ||
5abe1e05 RB |
2541 | if (datarefs.is_empty ()) |
2542 | return true; | |
2543 | ||
2544 | /* Sort the array of datarefs to make building the interleaving chains | |
3d54b29d JJ |
2545 | linear. Don't modify the original vector's order, it is needed for |
2546 | determining what dependencies are reversed. */ | |
2547 | vec<data_reference_p> datarefs_copy = datarefs.copy (); | |
75509ba2 | 2548 | datarefs_copy.qsort (dr_group_sort_cmp); |
5abe1e05 RB |
2549 | |
2550 | /* Build the interleaving chains. */ | |
3d54b29d | 2551 | for (i = 0; i < datarefs_copy.length () - 1;) |
5abe1e05 | 2552 | { |
3d54b29d | 2553 | data_reference_p dra = datarefs_copy[i]; |
5abe1e05 RB |
2554 | stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra)); |
2555 | stmt_vec_info lastinfo = NULL; | |
3d54b29d | 2556 | for (i = i + 1; i < datarefs_copy.length (); ++i) |
5abe1e05 | 2557 | { |
3d54b29d | 2558 | data_reference_p drb = datarefs_copy[i]; |
5abe1e05 RB |
2559 | stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb)); |
2560 | ||
2561 | /* ??? Imperfect sorting (non-compatible types, non-modulo | |
2562 | accesses, same accesses) can lead to a group to be artificially | |
2563 | split here as we don't just skip over those. If it really | |
2564 | matters we can push those to a worklist and re-iterate | |
2565 | over them. The we can just skip ahead to the next DR here. */ | |
2566 | ||
2567 | /* Check that the data-refs have same first location (except init) | |
61331c48 JJ |
2568 | and they are both either store or load (not load and store, |
2569 | not masked loads or stores). */ | |
5abe1e05 RB |
2570 | if (DR_IS_READ (dra) != DR_IS_READ (drb) |
2571 | || !operand_equal_p (DR_BASE_ADDRESS (dra), | |
2572 | DR_BASE_ADDRESS (drb), 0) | |
61331c48 JJ |
2573 | || !dr_equal_offsets_p (dra, drb) |
2574 | || !gimple_assign_single_p (DR_STMT (dra)) | |
2575 | || !gimple_assign_single_p (DR_STMT (drb))) | |
5abe1e05 RB |
2576 | break; |
2577 | ||
2578 | /* Check that the data-refs have the same constant size and step. */ | |
2579 | tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))); | |
2580 | tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))); | |
cc269bb6 RS |
2581 | if (!tree_fits_uhwi_p (sza) |
2582 | || !tree_fits_uhwi_p (szb) | |
5abe1e05 | 2583 | || !tree_int_cst_equal (sza, szb) |
9541ffee RS |
2584 | || !tree_fits_shwi_p (DR_STEP (dra)) |
2585 | || !tree_fits_shwi_p (DR_STEP (drb)) | |
5abe1e05 RB |
2586 | || !tree_int_cst_equal (DR_STEP (dra), DR_STEP (drb))) |
2587 | break; | |
2588 | ||
2589 | /* Do not place the same access in the interleaving chain twice. */ | |
2590 | if (tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) == 0) | |
2591 | break; | |
2592 | ||
2593 | /* Check the types are compatible. | |
2594 | ??? We don't distinguish this during sorting. */ | |
2595 | if (!types_compatible_p (TREE_TYPE (DR_REF (dra)), | |
2596 | TREE_TYPE (DR_REF (drb)))) | |
2597 | break; | |
2598 | ||
2599 | /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */ | |
2600 | HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra)); | |
2601 | HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb)); | |
2602 | gcc_assert (init_a < init_b); | |
2603 | ||
2604 | /* If init_b == init_a + the size of the type * k, we have an | |
2605 | interleaving, and DRA is accessed before DRB. */ | |
eb1ce453 | 2606 | HOST_WIDE_INT type_size_a = tree_to_uhwi (sza); |
5abe1e05 RB |
2607 | if ((init_b - init_a) % type_size_a != 0) |
2608 | break; | |
2609 | ||
2610 | /* The step (if not zero) is greater than the difference between | |
2611 | data-refs' inits. This splits groups into suitable sizes. */ | |
eb1ce453 | 2612 | HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra)); |
5abe1e05 RB |
2613 | if (step != 0 && step <= (init_b - init_a)) |
2614 | break; | |
2615 | ||
2616 | if (dump_enabled_p ()) | |
2617 | { | |
2618 | dump_printf_loc (MSG_NOTE, vect_location, | |
2619 | "Detected interleaving "); | |
2620 | dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra)); | |
2621 | dump_printf (MSG_NOTE, " and "); | |
2622 | dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb)); | |
e645e942 | 2623 | dump_printf (MSG_NOTE, "\n"); |
5abe1e05 RB |
2624 | } |
2625 | ||
2626 | /* Link the found element into the group list. */ | |
2627 | if (!GROUP_FIRST_ELEMENT (stmtinfo_a)) | |
2628 | { | |
2629 | GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra); | |
2630 | lastinfo = stmtinfo_a; | |
2631 | } | |
2632 | GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra); | |
2633 | GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb); | |
2634 | lastinfo = stmtinfo_b; | |
2635 | } | |
2636 | } | |
2637 | ||
3d54b29d | 2638 | FOR_EACH_VEC_ELT (datarefs_copy, i, dr) |
4b5caab7 IR |
2639 | if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) |
2640 | && !vect_analyze_data_ref_access (dr)) | |
ebfd146a | 2641 | { |
73fbfcad | 2642 | if (dump_enabled_p ()) |
e645e942 TJ |
2643 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
2644 | "not vectorized: complicated access pattern.\n"); | |
4b5caab7 IR |
2645 | |
2646 | if (bb_vinfo) | |
2647 | { | |
2648 | /* Mark the statement as not vectorizable. */ | |
2649 | STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false; | |
2650 | continue; | |
2651 | } | |
2652 | else | |
3d54b29d JJ |
2653 | { |
2654 | datarefs_copy.release (); | |
2655 | return false; | |
2656 | } | |
ebfd146a IR |
2657 | } |
2658 | ||
3d54b29d | 2659 | datarefs_copy.release (); |
ebfd146a IR |
2660 | return true; |
2661 | } | |
2662 | ||
a05a89fa | 2663 | |
93bdc3ed | 2664 | /* Operator == between two dr_with_seg_len objects. |
a05a89fa CH |
2665 | |
2666 | This equality operator is used to make sure two data refs | |
2667 | are the same one so that we will consider to combine the | |
2668 | aliasing checks of those two pairs of data dependent data | |
2669 | refs. */ | |
2670 | ||
2671 | static bool | |
93bdc3ed CH |
2672 | operator == (const dr_with_seg_len& d1, |
2673 | const dr_with_seg_len& d2) | |
a05a89fa | 2674 | { |
93bdc3ed CH |
2675 | return operand_equal_p (DR_BASE_ADDRESS (d1.dr), |
2676 | DR_BASE_ADDRESS (d2.dr), 0) | |
2677 | && compare_tree (d1.offset, d2.offset) == 0 | |
2678 | && compare_tree (d1.seg_len, d2.seg_len) == 0; | |
a05a89fa CH |
2679 | } |
2680 | ||
93bdc3ed | 2681 | /* Function comp_dr_with_seg_len_pair. |
a05a89fa | 2682 | |
93bdc3ed | 2683 | Comparison function for sorting objects of dr_with_seg_len_pair_t |
a05a89fa CH |
2684 | so that we can combine aliasing checks in one scan. */ |
2685 | ||
2686 | static int | |
93bdc3ed | 2687 | comp_dr_with_seg_len_pair (const void *p1_, const void *p2_) |
a05a89fa | 2688 | { |
93bdc3ed CH |
2689 | const dr_with_seg_len_pair_t* p1 = (const dr_with_seg_len_pair_t *) p1_; |
2690 | const dr_with_seg_len_pair_t* p2 = (const dr_with_seg_len_pair_t *) p2_; | |
2691 | ||
2692 | const dr_with_seg_len &p11 = p1->first, | |
2693 | &p12 = p1->second, | |
2694 | &p21 = p2->first, | |
2695 | &p22 = p2->second; | |
2696 | ||
2697 | /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks | |
2698 | if a and c have the same basic address snd step, and b and d have the same | |
2699 | address and step. Therefore, if any a&c or b&d don't have the same address | |
2700 | and step, we don't care the order of those two pairs after sorting. */ | |
2701 | int comp_res; | |
2702 | ||
2703 | if ((comp_res = compare_tree (DR_BASE_ADDRESS (p11.dr), | |
2704 | DR_BASE_ADDRESS (p21.dr))) != 0) | |
a05a89fa | 2705 | return comp_res; |
93bdc3ed CH |
2706 | if ((comp_res = compare_tree (DR_BASE_ADDRESS (p12.dr), |
2707 | DR_BASE_ADDRESS (p22.dr))) != 0) | |
2708 | return comp_res; | |
2709 | if ((comp_res = compare_tree (DR_STEP (p11.dr), DR_STEP (p21.dr))) != 0) | |
2710 | return comp_res; | |
2711 | if ((comp_res = compare_tree (DR_STEP (p12.dr), DR_STEP (p22.dr))) != 0) | |
2712 | return comp_res; | |
2713 | if ((comp_res = compare_tree (p11.offset, p21.offset)) != 0) | |
2714 | return comp_res; | |
2715 | if ((comp_res = compare_tree (p12.offset, p22.offset)) != 0) | |
a05a89fa | 2716 | return comp_res; |
a05a89fa CH |
2717 | |
2718 | return 0; | |
2719 | } | |
2720 | ||
a05a89fa CH |
2721 | /* Function vect_vfa_segment_size. |
2722 | ||
2723 | Create an expression that computes the size of segment | |
2724 | that will be accessed for a data reference. The functions takes into | |
2725 | account that realignment loads may access one more vector. | |
2726 | ||
2727 | Input: | |
2728 | DR: The data reference. | |
2729 | LENGTH_FACTOR: segment length to consider. | |
2730 | ||
2731 | Return an expression whose value is the size of segment which will be | |
2732 | accessed by DR. */ | |
2733 | ||
2734 | static tree | |
2735 | vect_vfa_segment_size (struct data_reference *dr, tree length_factor) | |
2736 | { | |
2737 | tree segment_length; | |
2738 | ||
2739 | if (integer_zerop (DR_STEP (dr))) | |
2740 | segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr))); | |
2741 | else | |
2742 | segment_length = size_binop (MULT_EXPR, | |
93bdc3ed CH |
2743 | fold_convert (sizetype, DR_STEP (dr)), |
2744 | fold_convert (sizetype, length_factor)); | |
a05a89fa CH |
2745 | |
2746 | if (vect_supportable_dr_alignment (dr, false) | |
93bdc3ed | 2747 | == dr_explicit_realign_optimized) |
a05a89fa CH |
2748 | { |
2749 | tree vector_size = TYPE_SIZE_UNIT | |
2750 | (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)))); | |
2751 | ||
2752 | segment_length = size_binop (PLUS_EXPR, segment_length, vector_size); | |
2753 | } | |
2754 | return segment_length; | |
2755 | } | |
2756 | ||
ebfd146a IR |
2757 | /* Function vect_prune_runtime_alias_test_list. |
2758 | ||
2759 | Prune a list of ddrs to be tested at run-time by versioning for alias. | |
a05a89fa | 2760 | Merge several alias checks into one if possible. |
ebfd146a IR |
2761 | Return FALSE if resulting list of ddrs is longer then allowed by |
2762 | PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */ | |
2763 | ||
2764 | bool | |
2765 | vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) | |
2766 | { | |
a05a89fa | 2767 | vec<ddr_p> may_alias_ddrs = |
ebfd146a | 2768 | LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo); |
93bdc3ed | 2769 | vec<dr_with_seg_len_pair_t>& comp_alias_ddrs = |
a05a89fa CH |
2770 | LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo); |
2771 | int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); | |
2772 | tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo); | |
2773 | ||
2774 | ddr_p ddr; | |
2775 | unsigned int i; | |
2776 | tree length_factor; | |
ebfd146a | 2777 | |
73fbfcad | 2778 | if (dump_enabled_p ()) |
78c60e3d | 2779 | dump_printf_loc (MSG_NOTE, vect_location, |
e645e942 | 2780 | "=== vect_prune_runtime_alias_test_list ===\n"); |
ebfd146a | 2781 | |
a05a89fa CH |
2782 | if (may_alias_ddrs.is_empty ()) |
2783 | return true; | |
2784 | ||
2785 | /* Basically, for each pair of dependent data refs store_ptr_0 | |
2786 | and load_ptr_0, we create an expression: | |
2787 | ||
2788 | ((store_ptr_0 + store_segment_length_0) <= load_ptr_0) | |
2789 | || (load_ptr_0 + load_segment_length_0) <= store_ptr_0)) | |
2790 | ||
2791 | for aliasing checks. However, in some cases we can decrease | |
2792 | the number of checks by combining two checks into one. For | |
2793 | example, suppose we have another pair of data refs store_ptr_0 | |
2794 | and load_ptr_1, and if the following condition is satisfied: | |
2795 | ||
2796 | load_ptr_0 < load_ptr_1 && | |
2797 | load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0 | |
2798 | ||
2799 | (this condition means, in each iteration of vectorized loop, | |
2800 | the accessed memory of store_ptr_0 cannot be between the memory | |
2801 | of load_ptr_0 and load_ptr_1.) | |
2802 | ||
2803 | we then can use only the following expression to finish the | |
2804 | alising checks between store_ptr_0 & load_ptr_0 and | |
2805 | store_ptr_0 & load_ptr_1: | |
2806 | ||
2807 | ((store_ptr_0 + store_segment_length_0) <= load_ptr_0) | |
2808 | || (load_ptr_1 + load_segment_length_1 <= store_ptr_0)) | |
2809 | ||
2810 | Note that we only consider that load_ptr_0 and load_ptr_1 have the | |
2811 | same basic address. */ | |
2812 | ||
2813 | comp_alias_ddrs.create (may_alias_ddrs.length ()); | |
2814 | ||
2815 | /* First, we collect all data ref pairs for aliasing checks. */ | |
2816 | FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr) | |
ebfd146a | 2817 | { |
a05a89fa CH |
2818 | struct data_reference *dr_a, *dr_b; |
2819 | gimple dr_group_first_a, dr_group_first_b; | |
2820 | tree segment_length_a, segment_length_b; | |
2821 | gimple stmt_a, stmt_b; | |
2822 | ||
2823 | dr_a = DDR_A (ddr); | |
2824 | stmt_a = DR_STMT (DDR_A (ddr)); | |
2825 | dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a)); | |
2826 | if (dr_group_first_a) | |
2827 | { | |
2828 | stmt_a = dr_group_first_a; | |
2829 | dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a)); | |
2830 | } | |
ebfd146a | 2831 | |
a05a89fa CH |
2832 | dr_b = DDR_B (ddr); |
2833 | stmt_b = DR_STMT (DDR_B (ddr)); | |
2834 | dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b)); | |
2835 | if (dr_group_first_b) | |
2836 | { | |
2837 | stmt_b = dr_group_first_b; | |
2838 | dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b)); | |
2839 | } | |
ebfd146a | 2840 | |
a05a89fa CH |
2841 | if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0)) |
2842 | length_factor = scalar_loop_iters; | |
2843 | else | |
2844 | length_factor = size_int (vect_factor); | |
2845 | segment_length_a = vect_vfa_segment_size (dr_a, length_factor); | |
2846 | segment_length_b = vect_vfa_segment_size (dr_b, length_factor); | |
2847 | ||
93bdc3ed CH |
2848 | dr_with_seg_len_pair_t dr_with_seg_len_pair |
2849 | (dr_with_seg_len (dr_a, segment_length_a), | |
2850 | dr_with_seg_len (dr_b, segment_length_b)); | |
2851 | ||
2852 | if (compare_tree (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b)) > 0) | |
9310366b | 2853 | std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second); |
a05a89fa CH |
2854 | |
2855 | comp_alias_ddrs.safe_push (dr_with_seg_len_pair); | |
2856 | } | |
2857 | ||
2858 | /* Second, we sort the collected data ref pairs so that we can scan | |
2859 | them once to combine all possible aliasing checks. */ | |
93bdc3ed | 2860 | comp_alias_ddrs.qsort (comp_dr_with_seg_len_pair); |
ebfd146a | 2861 | |
a05a89fa CH |
2862 | /* Third, we scan the sorted dr pairs and check if we can combine |
2863 | alias checks of two neighbouring dr pairs. */ | |
2864 | for (size_t i = 1; i < comp_alias_ddrs.length (); ++i) | |
2865 | { | |
2866 | /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */ | |
93bdc3ed CH |
2867 | dr_with_seg_len *dr_a1 = &comp_alias_ddrs[i-1].first, |
2868 | *dr_b1 = &comp_alias_ddrs[i-1].second, | |
2869 | *dr_a2 = &comp_alias_ddrs[i].first, | |
2870 | *dr_b2 = &comp_alias_ddrs[i].second; | |
a05a89fa CH |
2871 | |
2872 | /* Remove duplicate data ref pairs. */ | |
2873 | if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2) | |
2874 | { | |
2875 | if (dump_enabled_p ()) | |
ebfd146a | 2876 | { |
a05a89fa CH |
2877 | dump_printf_loc (MSG_NOTE, vect_location, |
2878 | "found equal ranges "); | |
2879 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
2880 | DR_REF (dr_a1->dr)); | |
2881 | dump_printf (MSG_NOTE, ", "); | |
2882 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
2883 | DR_REF (dr_b1->dr)); | |
2884 | dump_printf (MSG_NOTE, " and "); | |
2885 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
2886 | DR_REF (dr_a2->dr)); | |
2887 | dump_printf (MSG_NOTE, ", "); | |
2888 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
2889 | DR_REF (dr_b2->dr)); | |
2890 | dump_printf (MSG_NOTE, "\n"); | |
ebfd146a | 2891 | } |
a05a89fa CH |
2892 | |
2893 | comp_alias_ddrs.ordered_remove (i--); | |
2894 | continue; | |
ebfd146a | 2895 | } |
b8698a0f | 2896 | |
a05a89fa CH |
2897 | if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2) |
2898 | { | |
2899 | /* We consider the case that DR_B1 and DR_B2 are same memrefs, | |
2900 | and DR_A1 and DR_A2 are two consecutive memrefs. */ | |
2901 | if (*dr_a1 == *dr_a2) | |
2902 | { | |
9310366b UB |
2903 | std::swap (dr_a1, dr_b1); |
2904 | std::swap (dr_a2, dr_b2); | |
a05a89fa CH |
2905 | } |
2906 | ||
93bdc3ed CH |
2907 | if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr), |
2908 | DR_BASE_ADDRESS (dr_a2->dr), | |
2909 | 0) | |
9541ffee RS |
2910 | || !tree_fits_shwi_p (dr_a1->offset) |
2911 | || !tree_fits_shwi_p (dr_a2->offset)) | |
a05a89fa CH |
2912 | continue; |
2913 | ||
eb1ce453 KZ |
2914 | HOST_WIDE_INT diff = (tree_to_shwi (dr_a2->offset) |
2915 | - tree_to_shwi (dr_a1->offset)); | |
a05a89fa CH |
2916 | |
2917 | ||
2918 | /* Now we check if the following condition is satisfied: | |
2919 | ||
2920 | DIFF - SEGMENT_LENGTH_A < SEGMENT_LENGTH_B | |
2921 | ||
2922 | where DIFF = DR_A2->OFFSET - DR_A1->OFFSET. However, | |
2923 | SEGMENT_LENGTH_A or SEGMENT_LENGTH_B may not be constant so we | |
2924 | have to make a best estimation. We can get the minimum value | |
2925 | of SEGMENT_LENGTH_B as a constant, represented by MIN_SEG_LEN_B, | |
2926 | then either of the following two conditions can guarantee the | |
2927 | one above: | |
2928 | ||
2929 | 1: DIFF <= MIN_SEG_LEN_B | |
2930 | 2: DIFF - SEGMENT_LENGTH_A < MIN_SEG_LEN_B | |
2931 | ||
2932 | */ | |
2933 | ||
807e902e KZ |
2934 | HOST_WIDE_INT min_seg_len_b = (tree_fits_shwi_p (dr_b1->seg_len) |
2935 | ? tree_to_shwi (dr_b1->seg_len) | |
2936 | : vect_factor); | |
a05a89fa CH |
2937 | |
2938 | if (diff <= min_seg_len_b | |
807e902e KZ |
2939 | || (tree_fits_shwi_p (dr_a1->seg_len) |
2940 | && diff - tree_to_shwi (dr_a1->seg_len) < min_seg_len_b)) | |
a05a89fa | 2941 | { |
d55d9ed0 RB |
2942 | if (dump_enabled_p ()) |
2943 | { | |
2944 | dump_printf_loc (MSG_NOTE, vect_location, | |
2945 | "merging ranges for "); | |
2946 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
2947 | DR_REF (dr_a1->dr)); | |
2948 | dump_printf (MSG_NOTE, ", "); | |
2949 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
2950 | DR_REF (dr_b1->dr)); | |
2951 | dump_printf (MSG_NOTE, " and "); | |
2952 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
2953 | DR_REF (dr_a2->dr)); | |
2954 | dump_printf (MSG_NOTE, ", "); | |
2955 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
2956 | DR_REF (dr_b2->dr)); | |
2957 | dump_printf (MSG_NOTE, "\n"); | |
2958 | } | |
2959 | ||
a05a89fa CH |
2960 | dr_a1->seg_len = size_binop (PLUS_EXPR, |
2961 | dr_a2->seg_len, size_int (diff)); | |
2962 | comp_alias_ddrs.ordered_remove (i--); | |
2963 | } | |
2964 | } | |
ebfd146a IR |
2965 | } |
2966 | ||
d55d9ed0 RB |
2967 | dump_printf_loc (MSG_NOTE, vect_location, |
2968 | "improved number of alias checks from %d to %d\n", | |
2969 | may_alias_ddrs.length (), comp_alias_ddrs.length ()); | |
a05a89fa CH |
2970 | if ((int) comp_alias_ddrs.length () > |
2971 | PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS)) | |
d55d9ed0 | 2972 | return false; |
ebfd146a IR |
2973 | |
2974 | return true; | |
2975 | } | |
2976 | ||
aec7ae7d JJ |
2977 | /* Check whether a non-affine read in stmt is suitable for gather load |
2978 | and if so, return a builtin decl for that operation. */ | |
2979 | ||
2980 | tree | |
2981 | vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep, | |
2982 | tree *offp, int *scalep) | |
2983 | { | |
2984 | HOST_WIDE_INT scale = 1, pbitpos, pbitsize; | |
2985 | struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); | |
2986 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
2987 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); | |
2988 | tree offtype = NULL_TREE; | |
2989 | tree decl, base, off; | |
ef4bddc2 | 2990 | machine_mode pmode; |
aec7ae7d JJ |
2991 | int punsignedp, pvolatilep; |
2992 | ||
5ce9450f JJ |
2993 | base = DR_REF (dr); |
2994 | /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF, | |
2995 | see if we can use the def stmt of the address. */ | |
2996 | if (is_gimple_call (stmt) | |
2997 | && gimple_call_internal_p (stmt) | |
2998 | && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD | |
2999 | || gimple_call_internal_fn (stmt) == IFN_MASK_STORE) | |
3000 | && TREE_CODE (base) == MEM_REF | |
3001 | && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME | |
3002 | && integer_zerop (TREE_OPERAND (base, 1)) | |
3003 | && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0))) | |
3004 | { | |
3005 | gimple def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0)); | |
3006 | if (is_gimple_assign (def_stmt) | |
3007 | && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR) | |
3008 | base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0); | |
3009 | } | |
3010 | ||
aec7ae7d JJ |
3011 | /* The gather builtins need address of the form |
3012 | loop_invariant + vector * {1, 2, 4, 8} | |
3013 | or | |
3014 | loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }. | |
3015 | Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture | |
3016 | of loop invariants/SSA_NAMEs defined in the loop, with casts, | |
3017 | multiplications and additions in it. To get a vector, we need | |
3018 | a single SSA_NAME that will be defined in the loop and will | |
3019 | contain everything that is not loop invariant and that can be | |
3020 | vectorized. The following code attempts to find such a preexistng | |
3021 | SSA_NAME OFF and put the loop invariants into a tree BASE | |
3022 | that can be gimplified before the loop. */ | |
5ce9450f | 3023 | base = get_inner_reference (base, &pbitsize, &pbitpos, &off, |
b3ecff82 | 3024 | &pmode, &punsignedp, &pvolatilep, false); |
aec7ae7d JJ |
3025 | gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0); |
3026 | ||
3027 | if (TREE_CODE (base) == MEM_REF) | |
3028 | { | |
3029 | if (!integer_zerop (TREE_OPERAND (base, 1))) | |
3030 | { | |
3031 | if (off == NULL_TREE) | |
3032 | { | |
807e902e KZ |
3033 | offset_int moff = mem_ref_offset (base); |
3034 | off = wide_int_to_tree (sizetype, moff); | |
aec7ae7d JJ |
3035 | } |
3036 | else | |
3037 | off = size_binop (PLUS_EXPR, off, | |
3038 | fold_convert (sizetype, TREE_OPERAND (base, 1))); | |
3039 | } | |
3040 | base = TREE_OPERAND (base, 0); | |
3041 | } | |
3042 | else | |
3043 | base = build_fold_addr_expr (base); | |
3044 | ||
3045 | if (off == NULL_TREE) | |
3046 | off = size_zero_node; | |
3047 | ||
3048 | /* If base is not loop invariant, either off is 0, then we start with just | |
3049 | the constant offset in the loop invariant BASE and continue with base | |
3050 | as OFF, otherwise give up. | |
3051 | We could handle that case by gimplifying the addition of base + off | |
3052 | into some SSA_NAME and use that as off, but for now punt. */ | |
3053 | if (!expr_invariant_in_loop_p (loop, base)) | |
3054 | { | |
3055 | if (!integer_zerop (off)) | |
3056 | return NULL_TREE; | |
3057 | off = base; | |
3058 | base = size_int (pbitpos / BITS_PER_UNIT); | |
3059 | } | |
3060 | /* Otherwise put base + constant offset into the loop invariant BASE | |
3061 | and continue with OFF. */ | |
3062 | else | |
3063 | { | |
3064 | base = fold_convert (sizetype, base); | |
3065 | base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT)); | |
3066 | } | |
3067 | ||
3068 | /* OFF at this point may be either a SSA_NAME or some tree expression | |
3069 | from get_inner_reference. Try to peel off loop invariants from it | |
3070 | into BASE as long as possible. */ | |
3071 | STRIP_NOPS (off); | |
3072 | while (offtype == NULL_TREE) | |
3073 | { | |
3074 | enum tree_code code; | |
3075 | tree op0, op1, add = NULL_TREE; | |
3076 | ||
3077 | if (TREE_CODE (off) == SSA_NAME) | |
3078 | { | |
3079 | gimple def_stmt = SSA_NAME_DEF_STMT (off); | |
3080 | ||
3081 | if (expr_invariant_in_loop_p (loop, off)) | |
3082 | return NULL_TREE; | |
3083 | ||
3084 | if (gimple_code (def_stmt) != GIMPLE_ASSIGN) | |
3085 | break; | |
3086 | ||
3087 | op0 = gimple_assign_rhs1 (def_stmt); | |
3088 | code = gimple_assign_rhs_code (def_stmt); | |
3089 | op1 = gimple_assign_rhs2 (def_stmt); | |
3090 | } | |
3091 | else | |
3092 | { | |
3093 | if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS) | |
3094 | return NULL_TREE; | |
3095 | code = TREE_CODE (off); | |
3096 | extract_ops_from_tree (off, &code, &op0, &op1); | |
3097 | } | |
3098 | switch (code) | |
3099 | { | |
3100 | case POINTER_PLUS_EXPR: | |
3101 | case PLUS_EXPR: | |
3102 | if (expr_invariant_in_loop_p (loop, op0)) | |
3103 | { | |
3104 | add = op0; | |
3105 | off = op1; | |
3106 | do_add: | |
3107 | add = fold_convert (sizetype, add); | |
3108 | if (scale != 1) | |
3109 | add = size_binop (MULT_EXPR, add, size_int (scale)); | |
3110 | base = size_binop (PLUS_EXPR, base, add); | |
3111 | continue; | |
3112 | } | |
3113 | if (expr_invariant_in_loop_p (loop, op1)) | |
3114 | { | |
3115 | add = op1; | |
3116 | off = op0; | |
3117 | goto do_add; | |
3118 | } | |
3119 | break; | |
3120 | case MINUS_EXPR: | |
3121 | if (expr_invariant_in_loop_p (loop, op1)) | |
3122 | { | |
3123 | add = fold_convert (sizetype, op1); | |
3124 | add = size_binop (MINUS_EXPR, size_zero_node, add); | |
3125 | off = op0; | |
3126 | goto do_add; | |
3127 | } | |
3128 | break; | |
3129 | case MULT_EXPR: | |
9541ffee | 3130 | if (scale == 1 && tree_fits_shwi_p (op1)) |
aec7ae7d | 3131 | { |
9439e9a1 | 3132 | scale = tree_to_shwi (op1); |
aec7ae7d JJ |
3133 | off = op0; |
3134 | continue; | |
3135 | } | |
3136 | break; | |
3137 | case SSA_NAME: | |
3138 | off = op0; | |
3139 | continue; | |
3140 | CASE_CONVERT: | |
3141 | if (!POINTER_TYPE_P (TREE_TYPE (op0)) | |
3142 | && !INTEGRAL_TYPE_P (TREE_TYPE (op0))) | |
3143 | break; | |
3144 | if (TYPE_PRECISION (TREE_TYPE (op0)) | |
3145 | == TYPE_PRECISION (TREE_TYPE (off))) | |
3146 | { | |
3147 | off = op0; | |
3148 | continue; | |
3149 | } | |
3150 | if (TYPE_PRECISION (TREE_TYPE (op0)) | |
3151 | < TYPE_PRECISION (TREE_TYPE (off))) | |
3152 | { | |
3153 | off = op0; | |
3154 | offtype = TREE_TYPE (off); | |
3155 | STRIP_NOPS (off); | |
3156 | continue; | |
3157 | } | |
3158 | break; | |
3159 | default: | |
3160 | break; | |
3161 | } | |
3162 | break; | |
3163 | } | |
3164 | ||
3165 | /* If at the end OFF still isn't a SSA_NAME or isn't | |
3166 | defined in the loop, punt. */ | |
3167 | if (TREE_CODE (off) != SSA_NAME | |
3168 | || expr_invariant_in_loop_p (loop, off)) | |
3169 | return NULL_TREE; | |
3170 | ||
3171 | if (offtype == NULL_TREE) | |
3172 | offtype = TREE_TYPE (off); | |
3173 | ||
3174 | decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info), | |
3175 | offtype, scale); | |
3176 | if (decl == NULL_TREE) | |
3177 | return NULL_TREE; | |
3178 | ||
3179 | if (basep) | |
3180 | *basep = base; | |
3181 | if (offp) | |
3182 | *offp = off; | |
3183 | if (scalep) | |
3184 | *scalep = scale; | |
3185 | return decl; | |
3186 | } | |
3187 | ||
ebfd146a IR |
3188 | /* Function vect_analyze_data_refs. |
3189 | ||
a70d6342 | 3190 | Find all the data references in the loop or basic block. |
ebfd146a IR |
3191 | |
3192 | The general structure of the analysis of data refs in the vectorizer is as | |
3193 | follows: | |
b8698a0f | 3194 | 1- vect_analyze_data_refs(loop/bb): call |
a70d6342 IR |
3195 | compute_data_dependences_for_loop/bb to find and analyze all data-refs |
3196 | in the loop/bb and their dependences. | |
ebfd146a IR |
3197 | 2- vect_analyze_dependences(): apply dependence testing using ddrs. |
3198 | 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok. | |
3199 | 4- vect_analyze_drs_access(): check that ref_stmt.step is ok. | |
3200 | ||
3201 | */ | |
3202 | ||
3203 | bool | |
777e1f09 RG |
3204 | vect_analyze_data_refs (loop_vec_info loop_vinfo, |
3205 | bb_vec_info bb_vinfo, | |
1428105c | 3206 | int *min_vf, unsigned *n_stmts) |
ebfd146a | 3207 | { |
a70d6342 IR |
3208 | struct loop *loop = NULL; |
3209 | basic_block bb = NULL; | |
ebfd146a | 3210 | unsigned int i; |
9771b263 | 3211 | vec<data_reference_p> datarefs; |
ebfd146a IR |
3212 | struct data_reference *dr; |
3213 | tree scalar_type; | |
3214 | ||
73fbfcad | 3215 | if (dump_enabled_p ()) |
78c60e3d SS |
3216 | dump_printf_loc (MSG_NOTE, vect_location, |
3217 | "=== vect_analyze_data_refs ===\n"); | |
b8698a0f | 3218 | |
a70d6342 IR |
3219 | if (loop_vinfo) |
3220 | { | |
0136f8f0 AH |
3221 | basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); |
3222 | ||
a70d6342 | 3223 | loop = LOOP_VINFO_LOOP (loop_vinfo); |
0136f8f0 AH |
3224 | datarefs = LOOP_VINFO_DATAREFS (loop_vinfo); |
3225 | if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo))) | |
22a8be9e | 3226 | { |
73fbfcad | 3227 | if (dump_enabled_p ()) |
e645e942 TJ |
3228 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
3229 | "not vectorized: loop contains function calls" | |
3230 | " or data references that cannot be analyzed\n"); | |
22a8be9e SP |
3231 | return false; |
3232 | } | |
3233 | ||
0136f8f0 AH |
3234 | for (i = 0; i < loop->num_nodes; i++) |
3235 | { | |
3236 | gimple_stmt_iterator gsi; | |
3237 | ||
3238 | for (gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3239 | { | |
3240 | gimple stmt = gsi_stmt (gsi); | |
1428105c RB |
3241 | if (is_gimple_debug (stmt)) |
3242 | continue; | |
3243 | ++*n_stmts; | |
0136f8f0 AH |
3244 | if (!find_data_references_in_stmt (loop, stmt, &datarefs)) |
3245 | { | |
3246 | if (is_gimple_call (stmt) && loop->safelen) | |
3247 | { | |
3248 | tree fndecl = gimple_call_fndecl (stmt), op; | |
3249 | if (fndecl != NULL_TREE) | |
3250 | { | |
d52f5295 | 3251 | struct cgraph_node *node = cgraph_node::get (fndecl); |
0136f8f0 AH |
3252 | if (node != NULL && node->simd_clones != NULL) |
3253 | { | |
3254 | unsigned int j, n = gimple_call_num_args (stmt); | |
3255 | for (j = 0; j < n; j++) | |
3256 | { | |
3257 | op = gimple_call_arg (stmt, j); | |
3258 | if (DECL_P (op) | |
3259 | || (REFERENCE_CLASS_P (op) | |
3260 | && get_base_address (op))) | |
3261 | break; | |
3262 | } | |
3263 | op = gimple_call_lhs (stmt); | |
3264 | /* Ignore #pragma omp declare simd functions | |
3265 | if they don't have data references in the | |
3266 | call stmt itself. */ | |
3267 | if (j == n | |
3268 | && !(op | |
3269 | && (DECL_P (op) | |
3270 | || (REFERENCE_CLASS_P (op) | |
3271 | && get_base_address (op))))) | |
3272 | continue; | |
3273 | } | |
3274 | } | |
3275 | } | |
3276 | LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs; | |
3277 | if (dump_enabled_p ()) | |
3278 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
3279 | "not vectorized: loop contains function " | |
3280 | "calls or data references that cannot " | |
3281 | "be analyzed\n"); | |
3282 | return false; | |
3283 | } | |
3284 | } | |
3285 | } | |
3286 | ||
3287 | LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs; | |
a70d6342 IR |
3288 | } |
3289 | else | |
3290 | { | |
1aedeafe RG |
3291 | gimple_stmt_iterator gsi; |
3292 | ||
a70d6342 | 3293 | bb = BB_VINFO_BB (bb_vinfo); |
1aedeafe RG |
3294 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
3295 | { | |
3296 | gimple stmt = gsi_stmt (gsi); | |
1428105c RB |
3297 | if (is_gimple_debug (stmt)) |
3298 | continue; | |
3299 | ++*n_stmts; | |
1aedeafe RG |
3300 | if (!find_data_references_in_stmt (NULL, stmt, |
3301 | &BB_VINFO_DATAREFS (bb_vinfo))) | |
3302 | { | |
3303 | /* Mark the rest of the basic-block as unvectorizable. */ | |
3304 | for (; !gsi_end_p (gsi); gsi_next (&gsi)) | |
d4d5e146 RG |
3305 | { |
3306 | stmt = gsi_stmt (gsi); | |
3307 | STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)) = false; | |
3308 | } | |
1aedeafe RG |
3309 | break; |
3310 | } | |
3311 | } | |
22a8be9e | 3312 | |
a70d6342 IR |
3313 | datarefs = BB_VINFO_DATAREFS (bb_vinfo); |
3314 | } | |
ebfd146a | 3315 | |
ff802fa1 IR |
3316 | /* Go through the data-refs, check that the analysis succeeded. Update |
3317 | pointer from stmt_vec_info struct to DR and vectype. */ | |
ebfd146a | 3318 | |
9771b263 | 3319 | FOR_EACH_VEC_ELT (datarefs, i, dr) |
ebfd146a IR |
3320 | { |
3321 | gimple stmt; | |
3322 | stmt_vec_info stmt_info; | |
b8698a0f | 3323 | tree base, offset, init; |
aec7ae7d | 3324 | bool gather = false; |
74bf76ed | 3325 | bool simd_lane_access = false; |
777e1f09 | 3326 | int vf; |
b8698a0f | 3327 | |
fbd7e877 | 3328 | again: |
ebfd146a IR |
3329 | if (!dr || !DR_REF (dr)) |
3330 | { | |
73fbfcad | 3331 | if (dump_enabled_p ()) |
78c60e3d | 3332 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 3333 | "not vectorized: unhandled data-ref\n"); |
ebfd146a IR |
3334 | return false; |
3335 | } | |
3336 | ||
3337 | stmt = DR_STMT (dr); | |
3338 | stmt_info = vinfo_for_stmt (stmt); | |
3339 | ||
fbd7e877 RB |
3340 | /* Discard clobbers from the dataref vector. We will remove |
3341 | clobber stmts during vectorization. */ | |
3342 | if (gimple_clobber_p (stmt)) | |
3343 | { | |
d3ef8c53 | 3344 | free_data_ref (dr); |
fbd7e877 RB |
3345 | if (i == datarefs.length () - 1) |
3346 | { | |
3347 | datarefs.pop (); | |
3348 | break; | |
3349 | } | |
41475e96 JJ |
3350 | datarefs.ordered_remove (i); |
3351 | dr = datarefs[i]; | |
fbd7e877 RB |
3352 | goto again; |
3353 | } | |
3354 | ||
ebfd146a IR |
3355 | /* Check that analysis of the data-ref succeeded. */ |
3356 | if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr) | |
aec7ae7d | 3357 | || !DR_STEP (dr)) |
ebfd146a | 3358 | { |
74bf76ed JJ |
3359 | bool maybe_gather |
3360 | = DR_IS_READ (dr) | |
aec7ae7d | 3361 | && !TREE_THIS_VOLATILE (DR_REF (dr)) |
74bf76ed JJ |
3362 | && targetm.vectorize.builtin_gather != NULL; |
3363 | bool maybe_simd_lane_access | |
3364 | = loop_vinfo && loop->simduid; | |
3365 | ||
3366 | /* If target supports vector gather loads, or if this might be | |
3367 | a SIMD lane access, see if they can't be used. */ | |
3368 | if (loop_vinfo | |
3369 | && (maybe_gather || maybe_simd_lane_access) | |
aec7ae7d JJ |
3370 | && !nested_in_vect_loop_p (loop, stmt)) |
3371 | { | |
3372 | struct data_reference *newdr | |
3373 | = create_data_ref (NULL, loop_containing_stmt (stmt), | |
3374 | DR_REF (dr), stmt, true); | |
3375 | gcc_assert (newdr != NULL && DR_REF (newdr)); | |
3376 | if (DR_BASE_ADDRESS (newdr) | |
3377 | && DR_OFFSET (newdr) | |
3378 | && DR_INIT (newdr) | |
3379 | && DR_STEP (newdr) | |
3380 | && integer_zerop (DR_STEP (newdr))) | |
3381 | { | |
74bf76ed JJ |
3382 | if (maybe_simd_lane_access) |
3383 | { | |
3384 | tree off = DR_OFFSET (newdr); | |
3385 | STRIP_NOPS (off); | |
3386 | if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST | |
3387 | && TREE_CODE (off) == MULT_EXPR | |
cc269bb6 | 3388 | && tree_fits_uhwi_p (TREE_OPERAND (off, 1))) |
74bf76ed JJ |
3389 | { |
3390 | tree step = TREE_OPERAND (off, 1); | |
3391 | off = TREE_OPERAND (off, 0); | |
3392 | STRIP_NOPS (off); | |
3393 | if (CONVERT_EXPR_P (off) | |
3394 | && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off, | |
3395 | 0))) | |
3396 | < TYPE_PRECISION (TREE_TYPE (off))) | |
3397 | off = TREE_OPERAND (off, 0); | |
3398 | if (TREE_CODE (off) == SSA_NAME) | |
3399 | { | |
3400 | gimple def = SSA_NAME_DEF_STMT (off); | |
3401 | tree reft = TREE_TYPE (DR_REF (newdr)); | |
cd4447e2 JJ |
3402 | if (is_gimple_call (def) |
3403 | && gimple_call_internal_p (def) | |
3404 | && (gimple_call_internal_fn (def) | |
3405 | == IFN_GOMP_SIMD_LANE)) | |
74bf76ed JJ |
3406 | { |
3407 | tree arg = gimple_call_arg (def, 0); | |
3408 | gcc_assert (TREE_CODE (arg) == SSA_NAME); | |
3409 | arg = SSA_NAME_VAR (arg); | |
3410 | if (arg == loop->simduid | |
3411 | /* For now. */ | |
3412 | && tree_int_cst_equal | |
3413 | (TYPE_SIZE_UNIT (reft), | |
3414 | step)) | |
3415 | { | |
3416 | DR_OFFSET (newdr) = ssize_int (0); | |
3417 | DR_STEP (newdr) = step; | |
995a1b4a JJ |
3418 | DR_ALIGNED_TO (newdr) |
3419 | = size_int (BIGGEST_ALIGNMENT); | |
74bf76ed JJ |
3420 | dr = newdr; |
3421 | simd_lane_access = true; | |
3422 | } | |
3423 | } | |
3424 | } | |
3425 | } | |
3426 | } | |
3427 | if (!simd_lane_access && maybe_gather) | |
3428 | { | |
3429 | dr = newdr; | |
3430 | gather = true; | |
3431 | } | |
aec7ae7d | 3432 | } |
74bf76ed | 3433 | if (!gather && !simd_lane_access) |
aec7ae7d JJ |
3434 | free_data_ref (newdr); |
3435 | } | |
4b5caab7 | 3436 | |
74bf76ed | 3437 | if (!gather && !simd_lane_access) |
aec7ae7d | 3438 | { |
73fbfcad | 3439 | if (dump_enabled_p ()) |
aec7ae7d | 3440 | { |
e645e942 | 3441 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78c60e3d SS |
3442 | "not vectorized: data ref analysis " |
3443 | "failed "); | |
3444 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
e645e942 | 3445 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
aec7ae7d | 3446 | } |
ba65ae42 | 3447 | |
aec7ae7d | 3448 | if (bb_vinfo) |
fcac74a1 | 3449 | break; |
aec7ae7d JJ |
3450 | |
3451 | return false; | |
3452 | } | |
ebfd146a IR |
3453 | } |
3454 | ||
3455 | if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST) | |
3456 | { | |
73fbfcad | 3457 | if (dump_enabled_p ()) |
78c60e3d SS |
3458 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
3459 | "not vectorized: base addr of dr is a " | |
e645e942 | 3460 | "constant\n"); |
ba65ae42 IR |
3461 | |
3462 | if (bb_vinfo) | |
fcac74a1 | 3463 | break; |
ba65ae42 | 3464 | |
74bf76ed | 3465 | if (gather || simd_lane_access) |
aec7ae7d JJ |
3466 | free_data_ref (dr); |
3467 | return false; | |
ebfd146a IR |
3468 | } |
3469 | ||
8f7de592 IR |
3470 | if (TREE_THIS_VOLATILE (DR_REF (dr))) |
3471 | { | |
73fbfcad | 3472 | if (dump_enabled_p ()) |
8f7de592 | 3473 | { |
78c60e3d SS |
3474 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
3475 | "not vectorized: volatile type "); | |
3476 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
e645e942 | 3477 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
8f7de592 | 3478 | } |
ba65ae42 IR |
3479 | |
3480 | if (bb_vinfo) | |
fcac74a1 | 3481 | break; |
ba65ae42 | 3482 | |
8f7de592 IR |
3483 | return false; |
3484 | } | |
3485 | ||
822ba6d7 | 3486 | if (stmt_can_throw_internal (stmt)) |
5a2c1986 | 3487 | { |
73fbfcad | 3488 | if (dump_enabled_p ()) |
5a2c1986 | 3489 | { |
78c60e3d SS |
3490 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
3491 | "not vectorized: statement can throw an " | |
3492 | "exception "); | |
3493 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
e645e942 | 3494 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
5a2c1986 | 3495 | } |
ba65ae42 IR |
3496 | |
3497 | if (bb_vinfo) | |
fcac74a1 | 3498 | break; |
ba65ae42 | 3499 | |
74bf76ed | 3500 | if (gather || simd_lane_access) |
aec7ae7d | 3501 | free_data_ref (dr); |
5a2c1986 IR |
3502 | return false; |
3503 | } | |
3504 | ||
508ef0c6 RG |
3505 | if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF |
3506 | && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1))) | |
3507 | { | |
73fbfcad | 3508 | if (dump_enabled_p ()) |
508ef0c6 | 3509 | { |
78c60e3d SS |
3510 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
3511 | "not vectorized: statement is bitfield " | |
3512 | "access "); | |
3513 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
e645e942 | 3514 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
508ef0c6 RG |
3515 | } |
3516 | ||
3517 | if (bb_vinfo) | |
fcac74a1 | 3518 | break; |
508ef0c6 | 3519 | |
74bf76ed | 3520 | if (gather || simd_lane_access) |
508ef0c6 RG |
3521 | free_data_ref (dr); |
3522 | return false; | |
3523 | } | |
3524 | ||
3525 | base = unshare_expr (DR_BASE_ADDRESS (dr)); | |
3526 | offset = unshare_expr (DR_OFFSET (dr)); | |
3527 | init = unshare_expr (DR_INIT (dr)); | |
3528 | ||
5ce9450f JJ |
3529 | if (is_gimple_call (stmt) |
3530 | && (!gimple_call_internal_p (stmt) | |
3531 | || (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD | |
3532 | && gimple_call_internal_fn (stmt) != IFN_MASK_STORE))) | |
9c239085 | 3533 | { |
73fbfcad | 3534 | if (dump_enabled_p ()) |
9c239085 | 3535 | { |
78c60e3d | 3536 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e645e942 | 3537 | "not vectorized: dr in a call "); |
78c60e3d | 3538 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); |
e645e942 | 3539 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
9c239085 JJ |
3540 | } |
3541 | ||
3542 | if (bb_vinfo) | |
fcac74a1 | 3543 | break; |
9c239085 | 3544 | |
74bf76ed | 3545 | if (gather || simd_lane_access) |
9c239085 JJ |
3546 | free_data_ref (dr); |
3547 | return false; | |
3548 | } | |
3549 | ||
ebfd146a | 3550 | /* Update DR field in stmt_vec_info struct. */ |
ebfd146a IR |
3551 | |
3552 | /* If the dataref is in an inner-loop of the loop that is considered for | |
3553 | for vectorization, we also want to analyze the access relative to | |
b8698a0f | 3554 | the outer-loop (DR contains information only relative to the |
ebfd146a IR |
3555 | inner-most enclosing loop). We do that by building a reference to the |
3556 | first location accessed by the inner-loop, and analyze it relative to | |
b8698a0f L |
3557 | the outer-loop. */ |
3558 | if (loop && nested_in_vect_loop_p (loop, stmt)) | |
ebfd146a IR |
3559 | { |
3560 | tree outer_step, outer_base, outer_init; | |
3561 | HOST_WIDE_INT pbitsize, pbitpos; | |
3562 | tree poffset; | |
ef4bddc2 | 3563 | machine_mode pmode; |
ebfd146a IR |
3564 | int punsignedp, pvolatilep; |
3565 | affine_iv base_iv, offset_iv; | |
3566 | tree dinit; | |
3567 | ||
b8698a0f | 3568 | /* Build a reference to the first location accessed by the |
ff802fa1 | 3569 | inner-loop: *(BASE+INIT). (The first location is actually |
ebfd146a IR |
3570 | BASE+INIT+OFFSET, but we add OFFSET separately later). */ |
3571 | tree inner_base = build_fold_indirect_ref | |
5d49b6a7 | 3572 | (fold_build_pointer_plus (base, init)); |
ebfd146a | 3573 | |
73fbfcad | 3574 | if (dump_enabled_p ()) |
ebfd146a | 3575 | { |
78c60e3d SS |
3576 | dump_printf_loc (MSG_NOTE, vect_location, |
3577 | "analyze in outer-loop: "); | |
3578 | dump_generic_expr (MSG_NOTE, TDF_SLIM, inner_base); | |
e645e942 | 3579 | dump_printf (MSG_NOTE, "\n"); |
ebfd146a IR |
3580 | } |
3581 | ||
b8698a0f | 3582 | outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos, |
b3ecff82 | 3583 | &poffset, &pmode, &punsignedp, &pvolatilep, false); |
ebfd146a IR |
3584 | gcc_assert (outer_base != NULL_TREE); |
3585 | ||
3586 | if (pbitpos % BITS_PER_UNIT != 0) | |
3587 | { | |
73fbfcad | 3588 | if (dump_enabled_p ()) |
78c60e3d SS |
3589 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
3590 | "failed: bit offset alignment.\n"); | |
ebfd146a IR |
3591 | return false; |
3592 | } | |
3593 | ||
3594 | outer_base = build_fold_addr_expr (outer_base); | |
b8698a0f | 3595 | if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base, |
ebfd146a IR |
3596 | &base_iv, false)) |
3597 | { | |
73fbfcad | 3598 | if (dump_enabled_p ()) |
e645e942 | 3599 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78c60e3d | 3600 | "failed: evolution of base is not affine.\n"); |
ebfd146a IR |
3601 | return false; |
3602 | } | |
3603 | ||
3604 | if (offset) | |
3605 | { | |
3606 | if (poffset) | |
b8698a0f | 3607 | poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset, |
ebfd146a IR |
3608 | poffset); |
3609 | else | |
3610 | poffset = offset; | |
3611 | } | |
3612 | ||
3613 | if (!poffset) | |
3614 | { | |
3615 | offset_iv.base = ssize_int (0); | |
3616 | offset_iv.step = ssize_int (0); | |
3617 | } | |
b8698a0f | 3618 | else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset, |
ebfd146a IR |
3619 | &offset_iv, false)) |
3620 | { | |
73fbfcad | 3621 | if (dump_enabled_p ()) |
e645e942 | 3622 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78c60e3d | 3623 | "evolution of offset is not affine.\n"); |
ebfd146a IR |
3624 | return false; |
3625 | } | |
3626 | ||
3627 | outer_init = ssize_int (pbitpos / BITS_PER_UNIT); | |
3628 | split_constant_offset (base_iv.base, &base_iv.base, &dinit); | |
3629 | outer_init = size_binop (PLUS_EXPR, outer_init, dinit); | |
3630 | split_constant_offset (offset_iv.base, &offset_iv.base, &dinit); | |
3631 | outer_init = size_binop (PLUS_EXPR, outer_init, dinit); | |
3632 | ||
3633 | outer_step = size_binop (PLUS_EXPR, | |
3634 | fold_convert (ssizetype, base_iv.step), | |
3635 | fold_convert (ssizetype, offset_iv.step)); | |
3636 | ||
3637 | STMT_VINFO_DR_STEP (stmt_info) = outer_step; | |
3638 | /* FIXME: Use canonicalize_base_object_address (base_iv.base); */ | |
b8698a0f | 3639 | STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base; |
ebfd146a | 3640 | STMT_VINFO_DR_INIT (stmt_info) = outer_init; |
b8698a0f | 3641 | STMT_VINFO_DR_OFFSET (stmt_info) = |
ebfd146a | 3642 | fold_convert (ssizetype, offset_iv.base); |
b8698a0f | 3643 | STMT_VINFO_DR_ALIGNED_TO (stmt_info) = |
ebfd146a IR |
3644 | size_int (highest_pow2_factor (offset_iv.base)); |
3645 | ||
73fbfcad | 3646 | if (dump_enabled_p ()) |
ebfd146a | 3647 | { |
78c60e3d SS |
3648 | dump_printf_loc (MSG_NOTE, vect_location, |
3649 | "\touter base_address: "); | |
3650 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
3651 | STMT_VINFO_DR_BASE_ADDRESS (stmt_info)); | |
3652 | dump_printf (MSG_NOTE, "\n\touter offset from base address: "); | |
3653 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
3654 | STMT_VINFO_DR_OFFSET (stmt_info)); | |
3655 | dump_printf (MSG_NOTE, | |
3656 | "\n\touter constant offset from base address: "); | |
3657 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
3658 | STMT_VINFO_DR_INIT (stmt_info)); | |
3659 | dump_printf (MSG_NOTE, "\n\touter step: "); | |
3660 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
3661 | STMT_VINFO_DR_STEP (stmt_info)); | |
3662 | dump_printf (MSG_NOTE, "\n\touter aligned to: "); | |
3663 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
3664 | STMT_VINFO_DR_ALIGNED_TO (stmt_info)); | |
e645e942 | 3665 | dump_printf (MSG_NOTE, "\n"); |
ebfd146a IR |
3666 | } |
3667 | } | |
3668 | ||
3669 | if (STMT_VINFO_DATA_REF (stmt_info)) | |
3670 | { | |
73fbfcad | 3671 | if (dump_enabled_p ()) |
ebfd146a | 3672 | { |
78c60e3d SS |
3673 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
3674 | "not vectorized: more than one data ref " | |
3675 | "in stmt: "); | |
3676 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
e645e942 | 3677 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
ebfd146a | 3678 | } |
ba65ae42 IR |
3679 | |
3680 | if (bb_vinfo) | |
fcac74a1 | 3681 | break; |
ba65ae42 | 3682 | |
74bf76ed | 3683 | if (gather || simd_lane_access) |
aec7ae7d | 3684 | free_data_ref (dr); |
ebfd146a IR |
3685 | return false; |
3686 | } | |
8644a673 | 3687 | |
ebfd146a | 3688 | STMT_VINFO_DATA_REF (stmt_info) = dr; |
74bf76ed JJ |
3689 | if (simd_lane_access) |
3690 | { | |
3691 | STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true; | |
d3ef8c53 | 3692 | free_data_ref (datarefs[i]); |
74bf76ed JJ |
3693 | datarefs[i] = dr; |
3694 | } | |
b8698a0f | 3695 | |
ebfd146a IR |
3696 | /* Set vectype for STMT. */ |
3697 | scalar_type = TREE_TYPE (DR_REF (dr)); | |
d3ef8c53 JJ |
3698 | STMT_VINFO_VECTYPE (stmt_info) |
3699 | = get_vectype_for_scalar_type (scalar_type); | |
b8698a0f | 3700 | if (!STMT_VINFO_VECTYPE (stmt_info)) |
ebfd146a | 3701 | { |
73fbfcad | 3702 | if (dump_enabled_p ()) |
ebfd146a | 3703 | { |
e645e942 | 3704 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
78c60e3d SS |
3705 | "not vectorized: no vectype for stmt: "); |
3706 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
3707 | dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: "); | |
3708 | dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS, | |
3709 | scalar_type); | |
e645e942 | 3710 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
ebfd146a | 3711 | } |
4b5caab7 IR |
3712 | |
3713 | if (bb_vinfo) | |
fcac74a1 | 3714 | break; |
aec7ae7d | 3715 | |
74bf76ed | 3716 | if (gather || simd_lane_access) |
aec7ae7d JJ |
3717 | { |
3718 | STMT_VINFO_DATA_REF (stmt_info) = NULL; | |
d3ef8c53 JJ |
3719 | if (gather) |
3720 | free_data_ref (dr); | |
aec7ae7d JJ |
3721 | } |
3722 | return false; | |
ebfd146a | 3723 | } |
451dabda RB |
3724 | else |
3725 | { | |
3726 | if (dump_enabled_p ()) | |
3727 | { | |
3728 | dump_printf_loc (MSG_NOTE, vect_location, | |
3729 | "got vectype for stmt: "); | |
3730 | dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); | |
3731 | dump_generic_expr (MSG_NOTE, TDF_SLIM, | |
3732 | STMT_VINFO_VECTYPE (stmt_info)); | |
e645e942 | 3733 | dump_printf (MSG_NOTE, "\n"); |
451dabda RB |
3734 | } |
3735 | } | |
777e1f09 RG |
3736 | |
3737 | /* Adjust the minimal vectorization factor according to the | |
3738 | vector type. */ | |
3739 | vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)); | |
3740 | if (vf > *min_vf) | |
3741 | *min_vf = vf; | |
aec7ae7d JJ |
3742 | |
3743 | if (gather) | |
3744 | { | |
aec7ae7d | 3745 | tree off; |
aec7ae7d | 3746 | |
7d75abc8 MM |
3747 | gather = 0 != vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL); |
3748 | if (gather | |
3749 | && get_vectype_for_scalar_type (TREE_TYPE (off)) == NULL_TREE) | |
3750 | gather = false; | |
319e6439 | 3751 | if (!gather) |
aec7ae7d | 3752 | { |
6f723d33 JJ |
3753 | STMT_VINFO_DATA_REF (stmt_info) = NULL; |
3754 | free_data_ref (dr); | |
73fbfcad | 3755 | if (dump_enabled_p ()) |
aec7ae7d | 3756 | { |
78c60e3d SS |
3757 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
3758 | "not vectorized: not suitable for gather " | |
3759 | "load "); | |
3760 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
e645e942 | 3761 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
aec7ae7d JJ |
3762 | } |
3763 | return false; | |
3764 | } | |
3765 | ||
9771b263 | 3766 | datarefs[i] = dr; |
319e6439 RG |
3767 | STMT_VINFO_GATHER_P (stmt_info) = true; |
3768 | } | |
3769 | else if (loop_vinfo | |
3770 | && TREE_CODE (DR_STEP (dr)) != INTEGER_CST) | |
3771 | { | |
51a905b2 RB |
3772 | if (nested_in_vect_loop_p (loop, stmt) |
3773 | || !DR_IS_READ (dr)) | |
319e6439 | 3774 | { |
73fbfcad | 3775 | if (dump_enabled_p ()) |
319e6439 | 3776 | { |
78c60e3d SS |
3777 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
3778 | "not vectorized: not suitable for strided " | |
3779 | "load "); | |
3780 | dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); | |
e645e942 | 3781 | dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); |
319e6439 RG |
3782 | } |
3783 | return false; | |
3784 | } | |
3785 | STMT_VINFO_STRIDE_LOAD_P (stmt_info) = true; | |
aec7ae7d | 3786 | } |
ebfd146a | 3787 | } |
b8698a0f | 3788 | |
fcac74a1 RB |
3789 | /* If we stopped analysis at the first dataref we could not analyze |
3790 | when trying to vectorize a basic-block mark the rest of the datarefs | |
3791 | as not vectorizable and truncate the vector of datarefs. That | |
3792 | avoids spending useless time in analyzing their dependence. */ | |
3793 | if (i != datarefs.length ()) | |
3794 | { | |
3795 | gcc_assert (bb_vinfo != NULL); | |
3796 | for (unsigned j = i; j < datarefs.length (); ++j) | |
3797 | { | |
3798 | data_reference_p dr = datarefs[j]; | |
3799 | STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false; | |
3800 | free_data_ref (dr); | |
3801 | } | |
3802 | datarefs.truncate (i); | |
3803 | } | |
3804 | ||
ebfd146a IR |
3805 | return true; |
3806 | } | |
3807 | ||
3808 | ||
3809 | /* Function vect_get_new_vect_var. | |
3810 | ||
ff802fa1 | 3811 | Returns a name for a new variable. The current naming scheme appends the |
b8698a0f L |
3812 | prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to |
3813 | the name of vectorizer generated variables, and appends that to NAME if | |
ebfd146a IR |
3814 | provided. */ |
3815 | ||
3816 | tree | |
3817 | vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name) | |
3818 | { | |
3819 | const char *prefix; | |
3820 | tree new_vect_var; | |
3821 | ||
3822 | switch (var_kind) | |
3823 | { | |
3824 | case vect_simple_var: | |
451dabda | 3825 | prefix = "vect"; |
ebfd146a IR |
3826 | break; |
3827 | case vect_scalar_var: | |
451dabda | 3828 | prefix = "stmp"; |
ebfd146a IR |
3829 | break; |
3830 | case vect_pointer_var: | |
451dabda | 3831 | prefix = "vectp"; |
ebfd146a IR |
3832 | break; |
3833 | default: | |
3834 | gcc_unreachable (); | |
3835 | } | |
3836 | ||
3837 | if (name) | |
3838 | { | |
451dabda | 3839 | char* tmp = concat (prefix, "_", name, NULL); |
65876d24 | 3840 | new_vect_var = create_tmp_reg (type, tmp); |
ebfd146a IR |
3841 | free (tmp); |
3842 | } | |
3843 | else | |
65876d24 | 3844 | new_vect_var = create_tmp_reg (type, prefix); |
ebfd146a IR |
3845 | |
3846 | return new_vect_var; | |
3847 | } | |
3848 | ||
3849 | ||
3850 | /* Function vect_create_addr_base_for_vector_ref. | |
3851 | ||
3852 | Create an expression that computes the address of the first memory location | |
3853 | that will be accessed for a data reference. | |
3854 | ||
3855 | Input: | |
3856 | STMT: The statement containing the data reference. | |
3857 | NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list. | |
3858 | OFFSET: Optional. If supplied, it is be added to the initial address. | |
3859 | LOOP: Specify relative to which loop-nest should the address be computed. | |
3860 | For example, when the dataref is in an inner-loop nested in an | |
3861 | outer-loop that is now being vectorized, LOOP can be either the | |
ff802fa1 | 3862 | outer-loop, or the inner-loop. The first memory location accessed |
ebfd146a IR |
3863 | by the following dataref ('in' points to short): |
3864 | ||
3865 | for (i=0; i<N; i++) | |
3866 | for (j=0; j<M; j++) | |
3867 | s += in[i+j] | |
3868 | ||
3869 | is as follows: | |
3870 | if LOOP=i_loop: &in (relative to i_loop) | |
3871 | if LOOP=j_loop: &in+i*2B (relative to j_loop) | |
356bbc4c JJ |
3872 | BYTE_OFFSET: Optional, defaulted to NULL. If supplied, it is added to the |
3873 | initial address. Unlike OFFSET, which is number of elements to | |
3874 | be added, BYTE_OFFSET is measured in bytes. | |
ebfd146a IR |
3875 | |
3876 | Output: | |
b8698a0f | 3877 | 1. Return an SSA_NAME whose value is the address of the memory location of |
ebfd146a IR |
3878 | the first vector of the data reference. |
3879 | 2. If new_stmt_list is not NULL_TREE after return then the caller must insert | |
3880 | these statement(s) which define the returned SSA_NAME. | |
3881 | ||
3882 | FORNOW: We are only handling array accesses with step 1. */ | |
3883 | ||
3884 | tree | |
3885 | vect_create_addr_base_for_vector_ref (gimple stmt, | |
3886 | gimple_seq *new_stmt_list, | |
3887 | tree offset, | |
356bbc4c JJ |
3888 | struct loop *loop, |
3889 | tree byte_offset) | |
ebfd146a IR |
3890 | { |
3891 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
3892 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); | |
4bdd44c4 | 3893 | tree data_ref_base; |
595c2679 | 3894 | const char *base_name; |
4bdd44c4 | 3895 | tree addr_base; |
ebfd146a IR |
3896 | tree dest; |
3897 | gimple_seq seq = NULL; | |
4bdd44c4 RB |
3898 | tree base_offset; |
3899 | tree init; | |
8644a673 | 3900 | tree vect_ptr_type; |
ebfd146a | 3901 | tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr))); |
a70d6342 | 3902 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
ebfd146a | 3903 | |
a70d6342 | 3904 | if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father) |
ebfd146a | 3905 | { |
a70d6342 | 3906 | struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo); |
ebfd146a | 3907 | |
a70d6342 | 3908 | gcc_assert (nested_in_vect_loop_p (outer_loop, stmt)); |
ebfd146a IR |
3909 | |
3910 | data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info)); | |
3911 | base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info)); | |
3912 | init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info)); | |
3913 | } | |
4bdd44c4 RB |
3914 | else |
3915 | { | |
3916 | data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr)); | |
3917 | base_offset = unshare_expr (DR_OFFSET (dr)); | |
3918 | init = unshare_expr (DR_INIT (dr)); | |
3919 | } | |
ebfd146a | 3920 | |
a70d6342 | 3921 | if (loop_vinfo) |
595c2679 | 3922 | base_name = get_name (data_ref_base); |
a70d6342 IR |
3923 | else |
3924 | { | |
3925 | base_offset = ssize_int (0); | |
3926 | init = ssize_int (0); | |
595c2679 | 3927 | base_name = get_name (DR_REF (dr)); |
b8698a0f | 3928 | } |
a70d6342 | 3929 | |
ebfd146a IR |
3930 | /* Create base_offset */ |
3931 | base_offset = size_binop (PLUS_EXPR, | |
3932 | fold_convert (sizetype, base_offset), | |
3933 | fold_convert (sizetype, init)); | |
ebfd146a IR |
3934 | |
3935 | if (offset) | |
3936 | { | |
ebfd146a IR |
3937 | offset = fold_build2 (MULT_EXPR, sizetype, |
3938 | fold_convert (sizetype, offset), step); | |
3939 | base_offset = fold_build2 (PLUS_EXPR, sizetype, | |
3940 | base_offset, offset); | |
ebfd146a | 3941 | } |
356bbc4c JJ |
3942 | if (byte_offset) |
3943 | { | |
3944 | byte_offset = fold_convert (sizetype, byte_offset); | |
3945 | base_offset = fold_build2 (PLUS_EXPR, sizetype, | |
3946 | base_offset, byte_offset); | |
3947 | } | |
ebfd146a IR |
3948 | |
3949 | /* base + base_offset */ | |
a70d6342 | 3950 | if (loop_vinfo) |
5d49b6a7 | 3951 | addr_base = fold_build_pointer_plus (data_ref_base, base_offset); |
a70d6342 IR |
3952 | else |
3953 | { | |
70f34814 RG |
3954 | addr_base = build1 (ADDR_EXPR, |
3955 | build_pointer_type (TREE_TYPE (DR_REF (dr))), | |
3956 | unshare_expr (DR_REF (dr))); | |
a70d6342 | 3957 | } |
b8698a0f | 3958 | |
ebfd146a | 3959 | vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info)); |
4bdd44c4 RB |
3960 | addr_base = fold_convert (vect_ptr_type, addr_base); |
3961 | dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name); | |
3962 | addr_base = force_gimple_operand (addr_base, &seq, false, dest); | |
ebfd146a IR |
3963 | gimple_seq_add_seq (new_stmt_list, seq); |
3964 | ||
17fc049f | 3965 | if (DR_PTR_INFO (dr) |
4bdd44c4 | 3966 | && TREE_CODE (addr_base) == SSA_NAME) |
128aaeed | 3967 | { |
4bdd44c4 | 3968 | duplicate_ssa_name_ptr_info (addr_base, DR_PTR_INFO (dr)); |
417dfefb GW |
3969 | unsigned int align = TYPE_ALIGN_UNIT (STMT_VINFO_VECTYPE (stmt_info)); |
3970 | int misalign = DR_MISALIGNMENT (dr); | |
3971 | if (offset || byte_offset || (misalign == -1)) | |
4bdd44c4 | 3972 | mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base)); |
417dfefb GW |
3973 | else |
3974 | set_ptr_info_alignment (SSA_NAME_PTR_INFO (addr_base), align, misalign); | |
128aaeed | 3975 | } |
17fc049f | 3976 | |
73fbfcad | 3977 | if (dump_enabled_p ()) |
ebfd146a | 3978 | { |
78c60e3d | 3979 | dump_printf_loc (MSG_NOTE, vect_location, "created "); |
4bdd44c4 | 3980 | dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base); |
e645e942 | 3981 | dump_printf (MSG_NOTE, "\n"); |
ebfd146a | 3982 | } |
8644a673 | 3983 | |
4bdd44c4 | 3984 | return addr_base; |
ebfd146a IR |
3985 | } |
3986 | ||
3987 | ||
3988 | /* Function vect_create_data_ref_ptr. | |
3989 | ||
920e8172 RS |
3990 | Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first |
3991 | location accessed in the loop by STMT, along with the def-use update | |
3992 | chain to appropriately advance the pointer through the loop iterations. | |
3993 | Also set aliasing information for the pointer. This pointer is used by | |
3994 | the callers to this function to create a memory reference expression for | |
3995 | vector load/store access. | |
ebfd146a IR |
3996 | |
3997 | Input: | |
3998 | 1. STMT: a stmt that references memory. Expected to be of the form | |
3999 | GIMPLE_ASSIGN <name, data-ref> or | |
4000 | GIMPLE_ASSIGN <data-ref, name>. | |
920e8172 RS |
4001 | 2. AGGR_TYPE: the type of the reference, which should be either a vector |
4002 | or an array. | |
4003 | 3. AT_LOOP: the loop where the vector memref is to be created. | |
4004 | 4. OFFSET (optional): an offset to be added to the initial address accessed | |
ebfd146a | 4005 | by the data-ref in STMT. |
920e8172 RS |
4006 | 5. BSI: location where the new stmts are to be placed if there is no loop |
4007 | 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain | |
ebfd146a | 4008 | pointing to the initial address. |
356bbc4c JJ |
4009 | 7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added |
4010 | to the initial address accessed by the data-ref in STMT. This is | |
4011 | similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET | |
4012 | in bytes. | |
ebfd146a IR |
4013 | |
4014 | Output: | |
4015 | 1. Declare a new ptr to vector_type, and have it point to the base of the | |
4016 | data reference (initial addressed accessed by the data reference). | |
4017 | For example, for vector of type V8HI, the following code is generated: | |
4018 | ||
920e8172 RS |
4019 | v8hi *ap; |
4020 | ap = (v8hi *)initial_address; | |
ebfd146a IR |
4021 | |
4022 | if OFFSET is not supplied: | |
4023 | initial_address = &a[init]; | |
4024 | if OFFSET is supplied: | |
4025 | initial_address = &a[init + OFFSET]; | |
356bbc4c JJ |
4026 | if BYTE_OFFSET is supplied: |
4027 | initial_address = &a[init] + BYTE_OFFSET; | |
ebfd146a IR |
4028 | |
4029 | Return the initial_address in INITIAL_ADDRESS. | |
4030 | ||
4031 | 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also | |
b8698a0f | 4032 | update the pointer in each iteration of the loop. |
ebfd146a IR |
4033 | |
4034 | Return the increment stmt that updates the pointer in PTR_INCR. | |
4035 | ||
b8698a0f | 4036 | 3. Set INV_P to true if the access pattern of the data reference in the |
ff802fa1 | 4037 | vectorized loop is invariant. Set it to false otherwise. |
ebfd146a IR |
4038 | |
4039 | 4. Return the pointer. */ | |
4040 | ||
4041 | tree | |
920e8172 RS |
4042 | vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop, |
4043 | tree offset, tree *initial_address, | |
4044 | gimple_stmt_iterator *gsi, gimple *ptr_incr, | |
356bbc4c | 4045 | bool only_init, bool *inv_p, tree byte_offset) |
ebfd146a | 4046 | { |
595c2679 | 4047 | const char *base_name; |
ebfd146a IR |
4048 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); |
4049 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
a70d6342 IR |
4050 | struct loop *loop = NULL; |
4051 | bool nested_in_vect_loop = false; | |
4052 | struct loop *containing_loop = NULL; | |
920e8172 RS |
4053 | tree aggr_ptr_type; |
4054 | tree aggr_ptr; | |
ebfd146a IR |
4055 | tree new_temp; |
4056 | gimple vec_stmt; | |
4057 | gimple_seq new_stmt_list = NULL; | |
a70d6342 | 4058 | edge pe = NULL; |
ebfd146a | 4059 | basic_block new_bb; |
920e8172 | 4060 | tree aggr_ptr_init; |
ebfd146a | 4061 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); |
920e8172 | 4062 | tree aptr; |
ebfd146a IR |
4063 | gimple_stmt_iterator incr_gsi; |
4064 | bool insert_after; | |
4065 | tree indx_before_incr, indx_after_incr; | |
4066 | gimple incr; | |
4067 | tree step; | |
a70d6342 | 4068 | bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); |
b8698a0f | 4069 | |
920e8172 RS |
4070 | gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE |
4071 | || TREE_CODE (aggr_type) == VECTOR_TYPE); | |
4072 | ||
a70d6342 IR |
4073 | if (loop_vinfo) |
4074 | { | |
4075 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
4076 | nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); | |
4077 | containing_loop = (gimple_bb (stmt))->loop_father; | |
4078 | pe = loop_preheader_edge (loop); | |
4079 | } | |
4080 | else | |
4081 | { | |
4082 | gcc_assert (bb_vinfo); | |
4083 | only_init = true; | |
4084 | *ptr_incr = NULL; | |
4085 | } | |
b8698a0f | 4086 | |
ebfd146a IR |
4087 | /* Check the step (evolution) of the load in LOOP, and record |
4088 | whether it's invariant. */ | |
4089 | if (nested_in_vect_loop) | |
4090 | step = STMT_VINFO_DR_STEP (stmt_info); | |
4091 | else | |
4092 | step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info)); | |
b8698a0f | 4093 | |
08940f33 | 4094 | if (integer_zerop (step)) |
ebfd146a IR |
4095 | *inv_p = true; |
4096 | else | |
4097 | *inv_p = false; | |
4098 | ||
4099 | /* Create an expression for the first address accessed by this load | |
b8698a0f | 4100 | in LOOP. */ |
595c2679 | 4101 | base_name = get_name (DR_BASE_ADDRESS (dr)); |
ebfd146a | 4102 | |
73fbfcad | 4103 | if (dump_enabled_p ()) |
ebfd146a | 4104 | { |
595c2679 | 4105 | tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr)); |
78c60e3d SS |
4106 | dump_printf_loc (MSG_NOTE, vect_location, |
4107 | "create %s-pointer variable to type: ", | |
5806f481 | 4108 | get_tree_code_name (TREE_CODE (aggr_type))); |
78c60e3d | 4109 | dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type); |
595c2679 | 4110 | if (TREE_CODE (dr_base_type) == ARRAY_TYPE) |
78c60e3d | 4111 | dump_printf (MSG_NOTE, " vectorizing an array ref: "); |
38000232 MG |
4112 | else if (TREE_CODE (dr_base_type) == VECTOR_TYPE) |
4113 | dump_printf (MSG_NOTE, " vectorizing a vector ref: "); | |
595c2679 | 4114 | else if (TREE_CODE (dr_base_type) == RECORD_TYPE) |
78c60e3d | 4115 | dump_printf (MSG_NOTE, " vectorizing a record based array ref: "); |
595c2679 | 4116 | else |
78c60e3d | 4117 | dump_printf (MSG_NOTE, " vectorizing a pointer ref: "); |
595c2679 | 4118 | dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr)); |
e645e942 | 4119 | dump_printf (MSG_NOTE, "\n"); |
ebfd146a IR |
4120 | } |
4121 | ||
4bdd44c4 RB |
4122 | /* (1) Create the new aggregate-pointer variable. |
4123 | Vector and array types inherit the alias set of their component | |
920e8172 RS |
4124 | type by default so we need to use a ref-all pointer if the data |
4125 | reference does not conflict with the created aggregated data | |
4126 | reference because it is not addressable. */ | |
4bdd44c4 RB |
4127 | bool need_ref_all = false; |
4128 | if (!alias_sets_conflict_p (get_alias_set (aggr_type), | |
3f49ba3f | 4129 | get_alias_set (DR_REF (dr)))) |
4bdd44c4 | 4130 | need_ref_all = true; |
3f49ba3f | 4131 | /* Likewise for any of the data references in the stmt group. */ |
e14c1050 | 4132 | else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1) |
ebfd146a | 4133 | { |
e14c1050 | 4134 | gimple orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info); |
5006671f RG |
4135 | do |
4136 | { | |
4bdd44c4 RB |
4137 | stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt); |
4138 | struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo); | |
4139 | if (!alias_sets_conflict_p (get_alias_set (aggr_type), | |
4140 | get_alias_set (DR_REF (sdr)))) | |
5006671f | 4141 | { |
4bdd44c4 | 4142 | need_ref_all = true; |
5006671f RG |
4143 | break; |
4144 | } | |
4bdd44c4 | 4145 | orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo); |
5006671f RG |
4146 | } |
4147 | while (orig_stmt); | |
ebfd146a | 4148 | } |
4bdd44c4 RB |
4149 | aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode, |
4150 | need_ref_all); | |
4151 | aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name); | |
4152 | ||
ebfd146a | 4153 | |
ff802fa1 IR |
4154 | /* Note: If the dataref is in an inner-loop nested in LOOP, and we are |
4155 | vectorizing LOOP (i.e., outer-loop vectorization), we need to create two | |
4156 | def-use update cycles for the pointer: one relative to the outer-loop | |
4157 | (LOOP), which is what steps (3) and (4) below do. The other is relative | |
4158 | to the inner-loop (which is the inner-most loop containing the dataref), | |
4159 | and this is done be step (5) below. | |
ebfd146a | 4160 | |
ff802fa1 IR |
4161 | When vectorizing inner-most loops, the vectorized loop (LOOP) is also the |
4162 | inner-most loop, and so steps (3),(4) work the same, and step (5) is | |
4163 | redundant. Steps (3),(4) create the following: | |
ebfd146a IR |
4164 | |
4165 | vp0 = &base_addr; | |
4166 | LOOP: vp1 = phi(vp0,vp2) | |
b8698a0f | 4167 | ... |
ebfd146a IR |
4168 | ... |
4169 | vp2 = vp1 + step | |
4170 | goto LOOP | |
b8698a0f | 4171 | |
ff802fa1 IR |
4172 | If there is an inner-loop nested in loop, then step (5) will also be |
4173 | applied, and an additional update in the inner-loop will be created: | |
ebfd146a IR |
4174 | |
4175 | vp0 = &base_addr; | |
4176 | LOOP: vp1 = phi(vp0,vp2) | |
4177 | ... | |
4178 | inner: vp3 = phi(vp1,vp4) | |
4179 | vp4 = vp3 + inner_step | |
4180 | if () goto inner | |
4181 | ... | |
4182 | vp2 = vp1 + step | |
4183 | if () goto LOOP */ | |
4184 | ||
920e8172 RS |
4185 | /* (2) Calculate the initial address of the aggregate-pointer, and set |
4186 | the aggregate-pointer to point to it before the loop. */ | |
ebfd146a | 4187 | |
356bbc4c | 4188 | /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */ |
ebfd146a IR |
4189 | |
4190 | new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list, | |
356bbc4c | 4191 | offset, loop, byte_offset); |
ebfd146a IR |
4192 | if (new_stmt_list) |
4193 | { | |
a70d6342 IR |
4194 | if (pe) |
4195 | { | |
4196 | new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list); | |
4197 | gcc_assert (!new_bb); | |
4198 | } | |
4199 | else | |
1b29f05e | 4200 | gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT); |
ebfd146a IR |
4201 | } |
4202 | ||
4203 | *initial_address = new_temp; | |
4204 | ||
920e8172 | 4205 | /* Create: p = (aggr_type *) initial_base */ |
17fc049f | 4206 | if (TREE_CODE (new_temp) != SSA_NAME |
920e8172 | 4207 | || !useless_type_conversion_p (aggr_ptr_type, TREE_TYPE (new_temp))) |
a70d6342 | 4208 | { |
920e8172 RS |
4209 | vec_stmt = gimple_build_assign (aggr_ptr, |
4210 | fold_convert (aggr_ptr_type, new_temp)); | |
4211 | aggr_ptr_init = make_ssa_name (aggr_ptr, vec_stmt); | |
17fc049f RG |
4212 | /* Copy the points-to information if it exists. */ |
4213 | if (DR_PTR_INFO (dr)) | |
920e8172 RS |
4214 | duplicate_ssa_name_ptr_info (aggr_ptr_init, DR_PTR_INFO (dr)); |
4215 | gimple_assign_set_lhs (vec_stmt, aggr_ptr_init); | |
17fc049f RG |
4216 | if (pe) |
4217 | { | |
4218 | new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt); | |
4219 | gcc_assert (!new_bb); | |
4220 | } | |
4221 | else | |
1b29f05e | 4222 | gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT); |
a70d6342 IR |
4223 | } |
4224 | else | |
920e8172 | 4225 | aggr_ptr_init = new_temp; |
ebfd146a | 4226 | |
920e8172 | 4227 | /* (3) Handle the updating of the aggregate-pointer inside the loop. |
ff802fa1 IR |
4228 | This is needed when ONLY_INIT is false, and also when AT_LOOP is the |
4229 | inner-loop nested in LOOP (during outer-loop vectorization). */ | |
ebfd146a | 4230 | |
a70d6342 | 4231 | /* No update in loop is required. */ |
b8698a0f | 4232 | if (only_init && (!loop_vinfo || at_loop == loop)) |
920e8172 | 4233 | aptr = aggr_ptr_init; |
ebfd146a IR |
4234 | else |
4235 | { | |
920e8172 | 4236 | /* The step of the aggregate pointer is the type size. */ |
08940f33 | 4237 | tree iv_step = TYPE_SIZE_UNIT (aggr_type); |
b8698a0f | 4238 | /* One exception to the above is when the scalar step of the load in |
ebfd146a IR |
4239 | LOOP is zero. In this case the step here is also zero. */ |
4240 | if (*inv_p) | |
08940f33 RB |
4241 | iv_step = size_zero_node; |
4242 | else if (tree_int_cst_sgn (step) == -1) | |
4243 | iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step); | |
ebfd146a IR |
4244 | |
4245 | standard_iv_increment_position (loop, &incr_gsi, &insert_after); | |
4246 | ||
920e8172 | 4247 | create_iv (aggr_ptr_init, |
08940f33 | 4248 | fold_convert (aggr_ptr_type, iv_step), |
920e8172 | 4249 | aggr_ptr, loop, &incr_gsi, insert_after, |
ebfd146a IR |
4250 | &indx_before_incr, &indx_after_incr); |
4251 | incr = gsi_stmt (incr_gsi); | |
a70d6342 | 4252 | set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL)); |
ebfd146a IR |
4253 | |
4254 | /* Copy the points-to information if it exists. */ | |
4255 | if (DR_PTR_INFO (dr)) | |
4256 | { | |
4257 | duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr)); | |
4258 | duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr)); | |
4259 | } | |
ebfd146a IR |
4260 | if (ptr_incr) |
4261 | *ptr_incr = incr; | |
4262 | ||
920e8172 | 4263 | aptr = indx_before_incr; |
ebfd146a IR |
4264 | } |
4265 | ||
4266 | if (!nested_in_vect_loop || only_init) | |
920e8172 | 4267 | return aptr; |
ebfd146a IR |
4268 | |
4269 | ||
920e8172 | 4270 | /* (4) Handle the updating of the aggregate-pointer inside the inner-loop |
ff802fa1 | 4271 | nested in LOOP, if exists. */ |
ebfd146a IR |
4272 | |
4273 | gcc_assert (nested_in_vect_loop); | |
4274 | if (!only_init) | |
4275 | { | |
4276 | standard_iv_increment_position (containing_loop, &incr_gsi, | |
4277 | &insert_after); | |
920e8172 | 4278 | create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr, |
ebfd146a IR |
4279 | containing_loop, &incr_gsi, insert_after, &indx_before_incr, |
4280 | &indx_after_incr); | |
4281 | incr = gsi_stmt (incr_gsi); | |
a70d6342 | 4282 | set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL)); |
ebfd146a IR |
4283 | |
4284 | /* Copy the points-to information if it exists. */ | |
4285 | if (DR_PTR_INFO (dr)) | |
4286 | { | |
4287 | duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr)); | |
4288 | duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr)); | |
4289 | } | |
ebfd146a IR |
4290 | if (ptr_incr) |
4291 | *ptr_incr = incr; | |
4292 | ||
b8698a0f | 4293 | return indx_before_incr; |
ebfd146a IR |
4294 | } |
4295 | else | |
4296 | gcc_unreachable (); | |
4297 | } | |
4298 | ||
4299 | ||
4300 | /* Function bump_vector_ptr | |
4301 | ||
4302 | Increment a pointer (to a vector type) by vector-size. If requested, | |
b8698a0f | 4303 | i.e. if PTR-INCR is given, then also connect the new increment stmt |
ebfd146a IR |
4304 | to the existing def-use update-chain of the pointer, by modifying |
4305 | the PTR_INCR as illustrated below: | |
4306 | ||
4307 | The pointer def-use update-chain before this function: | |
4308 | DATAREF_PTR = phi (p_0, p_2) | |
4309 | .... | |
b8698a0f | 4310 | PTR_INCR: p_2 = DATAREF_PTR + step |
ebfd146a IR |
4311 | |
4312 | The pointer def-use update-chain after this function: | |
4313 | DATAREF_PTR = phi (p_0, p_2) | |
4314 | .... | |
4315 | NEW_DATAREF_PTR = DATAREF_PTR + BUMP | |
4316 | .... | |
4317 | PTR_INCR: p_2 = NEW_DATAREF_PTR + step | |
4318 | ||
4319 | Input: | |
b8698a0f | 4320 | DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated |
ebfd146a | 4321 | in the loop. |
b8698a0f | 4322 | PTR_INCR - optional. The stmt that updates the pointer in each iteration of |
ebfd146a | 4323 | the loop. The increment amount across iterations is expected |
b8698a0f | 4324 | to be vector_size. |
ebfd146a IR |
4325 | BSI - location where the new update stmt is to be placed. |
4326 | STMT - the original scalar memory-access stmt that is being vectorized. | |
4327 | BUMP - optional. The offset by which to bump the pointer. If not given, | |
4328 | the offset is assumed to be vector_size. | |
4329 | ||
4330 | Output: Return NEW_DATAREF_PTR as illustrated above. | |
b8698a0f | 4331 | |
ebfd146a IR |
4332 | */ |
4333 | ||
4334 | tree | |
4335 | bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi, | |
4336 | gimple stmt, tree bump) | |
4337 | { | |
4338 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
4339 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); | |
4340 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
ebfd146a | 4341 | tree update = TYPE_SIZE_UNIT (vectype); |
538dd0b7 | 4342 | gassign *incr_stmt; |
ebfd146a IR |
4343 | ssa_op_iter iter; |
4344 | use_operand_p use_p; | |
4345 | tree new_dataref_ptr; | |
4346 | ||
4347 | if (bump) | |
4348 | update = bump; | |
b8698a0f | 4349 | |
070ecdfd RG |
4350 | new_dataref_ptr = copy_ssa_name (dataref_ptr, NULL); |
4351 | incr_stmt = gimple_build_assign_with_ops (POINTER_PLUS_EXPR, new_dataref_ptr, | |
ebfd146a | 4352 | dataref_ptr, update); |
ebfd146a IR |
4353 | vect_finish_stmt_generation (stmt, incr_stmt, gsi); |
4354 | ||
4355 | /* Copy the points-to information if it exists. */ | |
4356 | if (DR_PTR_INFO (dr)) | |
128aaeed RB |
4357 | { |
4358 | duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr)); | |
644ffefd | 4359 | mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr)); |
128aaeed | 4360 | } |
ebfd146a IR |
4361 | |
4362 | if (!ptr_incr) | |
4363 | return new_dataref_ptr; | |
4364 | ||
4365 | /* Update the vector-pointer's cross-iteration increment. */ | |
4366 | FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE) | |
4367 | { | |
4368 | tree use = USE_FROM_PTR (use_p); | |
4369 | ||
4370 | if (use == dataref_ptr) | |
4371 | SET_USE (use_p, new_dataref_ptr); | |
4372 | else | |
4373 | gcc_assert (tree_int_cst_compare (use, update) == 0); | |
4374 | } | |
4375 | ||
4376 | return new_dataref_ptr; | |
4377 | } | |
4378 | ||
4379 | ||
4380 | /* Function vect_create_destination_var. | |
4381 | ||
4382 | Create a new temporary of type VECTYPE. */ | |
4383 | ||
4384 | tree | |
4385 | vect_create_destination_var (tree scalar_dest, tree vectype) | |
4386 | { | |
4387 | tree vec_dest; | |
451dabda RB |
4388 | const char *name; |
4389 | char *new_name; | |
ebfd146a IR |
4390 | tree type; |
4391 | enum vect_var_kind kind; | |
4392 | ||
4393 | kind = vectype ? vect_simple_var : vect_scalar_var; | |
4394 | type = vectype ? vectype : TREE_TYPE (scalar_dest); | |
4395 | ||
4396 | gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME); | |
4397 | ||
451dabda RB |
4398 | name = get_name (scalar_dest); |
4399 | if (name) | |
4400 | asprintf (&new_name, "%s_%u", name, SSA_NAME_VERSION (scalar_dest)); | |
4401 | else | |
4402 | asprintf (&new_name, "_%u", SSA_NAME_VERSION (scalar_dest)); | |
ebfd146a | 4403 | vec_dest = vect_get_new_vect_var (type, kind, new_name); |
451dabda | 4404 | free (new_name); |
ebfd146a IR |
4405 | |
4406 | return vec_dest; | |
4407 | } | |
4408 | ||
0d0293ac | 4409 | /* Function vect_grouped_store_supported. |
ebfd146a | 4410 | |
e2c83630 RH |
4411 | Returns TRUE if interleave high and interleave low permutations |
4412 | are supported, and FALSE otherwise. */ | |
ebfd146a IR |
4413 | |
4414 | bool | |
0d0293ac | 4415 | vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count) |
ebfd146a | 4416 | { |
ef4bddc2 | 4417 | machine_mode mode = TYPE_MODE (vectype); |
b8698a0f | 4418 | |
e1377713 ES |
4419 | /* vect_permute_store_chain requires the group size to be equal to 3 or |
4420 | be a power of two. */ | |
4421 | if (count != 3 && exact_log2 (count) == -1) | |
b602d918 | 4422 | { |
73fbfcad | 4423 | if (dump_enabled_p ()) |
78c60e3d | 4424 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
e1377713 ES |
4425 | "the size of the group of accesses" |
4426 | " is not a power of 2 or not eqaul to 3\n"); | |
b602d918 RS |
4427 | return false; |
4428 | } | |
4429 | ||
e2c83630 | 4430 | /* Check that the permutation is supported. */ |
3fcc1b55 JJ |
4431 | if (VECTOR_MODE_P (mode)) |
4432 | { | |
4433 | unsigned int i, nelt = GET_MODE_NUNITS (mode); | |
4434 | unsigned char *sel = XALLOCAVEC (unsigned char, nelt); | |
e1377713 ES |
4435 | |
4436 | if (count == 3) | |
3fcc1b55 | 4437 | { |
e1377713 ES |
4438 | unsigned int j0 = 0, j1 = 0, j2 = 0; |
4439 | unsigned int i, j; | |
4440 | ||
4441 | for (j = 0; j < 3; j++) | |
4442 | { | |
4443 | int nelt0 = ((3 - j) * nelt) % 3; | |
4444 | int nelt1 = ((3 - j) * nelt + 1) % 3; | |
4445 | int nelt2 = ((3 - j) * nelt + 2) % 3; | |
4446 | for (i = 0; i < nelt; i++) | |
4447 | { | |
4448 | if (3 * i + nelt0 < nelt) | |
4449 | sel[3 * i + nelt0] = j0++; | |
4450 | if (3 * i + nelt1 < nelt) | |
4451 | sel[3 * i + nelt1] = nelt + j1++; | |
4452 | if (3 * i + nelt2 < nelt) | |
4453 | sel[3 * i + nelt2] = 0; | |
4454 | } | |
4455 | if (!can_vec_perm_p (mode, false, sel)) | |
4456 | { | |
4457 | if (dump_enabled_p ()) | |
4458 | dump_printf (MSG_MISSED_OPTIMIZATION, | |
4459 | "permutaion op not supported by target.\n"); | |
4460 | return false; | |
4461 | } | |
4462 | ||
4463 | for (i = 0; i < nelt; i++) | |
4464 | { | |
4465 | if (3 * i + nelt0 < nelt) | |
4466 | sel[3 * i + nelt0] = 3 * i + nelt0; | |
4467 | if (3 * i + nelt1 < nelt) | |
4468 | sel[3 * i + nelt1] = 3 * i + nelt1; | |
4469 | if (3 * i + nelt2 < nelt) | |
4470 | sel[3 * i + nelt2] = nelt + j2++; | |
4471 | } | |
4472 | if (!can_vec_perm_p (mode, false, sel)) | |
4473 | { | |
4474 | if (dump_enabled_p ()) | |
4475 | dump_printf (MSG_MISSED_OPTIMIZATION, | |
4476 | "permutaion op not supported by target.\n"); | |
4477 | return false; | |
4478 | } | |
4479 | } | |
4480 | return true; | |
3fcc1b55 | 4481 | } |
e1377713 | 4482 | else |
3fcc1b55 | 4483 | { |
e1377713 ES |
4484 | /* If length is not equal to 3 then only power of 2 is supported. */ |
4485 | gcc_assert (exact_log2 (count) != -1); | |
4486 | ||
4487 | for (i = 0; i < nelt / 2; i++) | |
4488 | { | |
4489 | sel[i * 2] = i; | |
4490 | sel[i * 2 + 1] = i + nelt; | |
4491 | } | |
4492 | if (can_vec_perm_p (mode, false, sel)) | |
4493 | { | |
4494 | for (i = 0; i < nelt; i++) | |
4495 | sel[i] += nelt / 2; | |
4496 | if (can_vec_perm_p (mode, false, sel)) | |
4497 | return true; | |
4498 | } | |
3fcc1b55 JJ |
4499 | } |
4500 | } | |
ebfd146a | 4501 | |
73fbfcad | 4502 | if (dump_enabled_p ()) |
78c60e3d | 4503 | dump_printf (MSG_MISSED_OPTIMIZATION, |
e1377713 | 4504 | "permutaion op not supported by target.\n"); |
a6b3dfde | 4505 | return false; |
ebfd146a IR |
4506 | } |
4507 | ||
4508 | ||
272c6793 RS |
4509 | /* Return TRUE if vec_store_lanes is available for COUNT vectors of |
4510 | type VECTYPE. */ | |
4511 | ||
4512 | bool | |
4513 | vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count) | |
4514 | { | |
4515 | return vect_lanes_optab_supported_p ("vec_store_lanes", | |
4516 | vec_store_lanes_optab, | |
4517 | vectype, count); | |
4518 | } | |
4519 | ||
4520 | ||
ebfd146a IR |
4521 | /* Function vect_permute_store_chain. |
4522 | ||
4523 | Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be | |
e1377713 ES |
4524 | a power of 2 or equal to 3, generate interleave_high/low stmts to reorder |
4525 | the data correctly for the stores. Return the final references for stores | |
4526 | in RESULT_CHAIN. | |
ebfd146a IR |
4527 | |
4528 | E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8. | |
ff802fa1 IR |
4529 | The input is 4 vectors each containing 8 elements. We assign a number to |
4530 | each element, the input sequence is: | |
ebfd146a IR |
4531 | |
4532 | 1st vec: 0 1 2 3 4 5 6 7 | |
4533 | 2nd vec: 8 9 10 11 12 13 14 15 | |
b8698a0f | 4534 | 3rd vec: 16 17 18 19 20 21 22 23 |
ebfd146a IR |
4535 | 4th vec: 24 25 26 27 28 29 30 31 |
4536 | ||
4537 | The output sequence should be: | |
4538 | ||
4539 | 1st vec: 0 8 16 24 1 9 17 25 | |
4540 | 2nd vec: 2 10 18 26 3 11 19 27 | |
4541 | 3rd vec: 4 12 20 28 5 13 21 30 | |
4542 | 4th vec: 6 14 22 30 7 15 23 31 | |
4543 | ||
4544 | i.e., we interleave the contents of the four vectors in their order. | |
4545 | ||
ff802fa1 | 4546 | We use interleave_high/low instructions to create such output. The input of |
ebfd146a | 4547 | each interleave_high/low operation is two vectors: |
b8698a0f L |
4548 | 1st vec 2nd vec |
4549 | 0 1 2 3 4 5 6 7 | |
4550 | the even elements of the result vector are obtained left-to-right from the | |
ff802fa1 | 4551 | high/low elements of the first vector. The odd elements of the result are |
ebfd146a IR |
4552 | obtained left-to-right from the high/low elements of the second vector. |
4553 | The output of interleave_high will be: 0 4 1 5 | |
4554 | and of interleave_low: 2 6 3 7 | |
4555 | ||
b8698a0f | 4556 | |
ff802fa1 | 4557 | The permutation is done in log LENGTH stages. In each stage interleave_high |
b8698a0f L |
4558 | and interleave_low stmts are created for each pair of vectors in DR_CHAIN, |
4559 | where the first argument is taken from the first half of DR_CHAIN and the | |
4560 | second argument from it's second half. | |
4561 | In our example, | |
ebfd146a IR |
4562 | |
4563 | I1: interleave_high (1st vec, 3rd vec) | |
4564 | I2: interleave_low (1st vec, 3rd vec) | |
4565 | I3: interleave_high (2nd vec, 4th vec) | |
4566 | I4: interleave_low (2nd vec, 4th vec) | |
4567 | ||
4568 | The output for the first stage is: | |
4569 | ||
4570 | I1: 0 16 1 17 2 18 3 19 | |
4571 | I2: 4 20 5 21 6 22 7 23 | |
4572 | I3: 8 24 9 25 10 26 11 27 | |
4573 | I4: 12 28 13 29 14 30 15 31 | |
4574 | ||
4575 | The output of the second stage, i.e. the final result is: | |
4576 | ||
4577 | I1: 0 8 16 24 1 9 17 25 | |
4578 | I2: 2 10 18 26 3 11 19 27 | |
4579 | I3: 4 12 20 28 5 13 21 30 | |
4580 | I4: 6 14 22 30 7 15 23 31. */ | |
b8698a0f | 4581 | |
b602d918 | 4582 | void |
9771b263 | 4583 | vect_permute_store_chain (vec<tree> dr_chain, |
b8698a0f | 4584 | unsigned int length, |
ebfd146a IR |
4585 | gimple stmt, |
4586 | gimple_stmt_iterator *gsi, | |
9771b263 | 4587 | vec<tree> *result_chain) |
ebfd146a | 4588 | { |
83d5977e | 4589 | tree vect1, vect2, high, low; |
ebfd146a IR |
4590 | gimple perm_stmt; |
4591 | tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); | |
3fcc1b55 | 4592 | tree perm_mask_low, perm_mask_high; |
e1377713 ES |
4593 | tree data_ref; |
4594 | tree perm3_mask_low, perm3_mask_high; | |
4595 | unsigned int i, n, log_length = exact_log2 (length); | |
e2c83630 | 4596 | unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype); |
3fcc1b55 | 4597 | unsigned char *sel = XALLOCAVEC (unsigned char, nelt); |
b8698a0f | 4598 | |
b6b9227d JJ |
4599 | result_chain->quick_grow (length); |
4600 | memcpy (result_chain->address (), dr_chain.address (), | |
4601 | length * sizeof (tree)); | |
ebfd146a | 4602 | |
e1377713 | 4603 | if (length == 3) |
3fcc1b55 | 4604 | { |
e1377713 | 4605 | unsigned int j0 = 0, j1 = 0, j2 = 0; |
e2c83630 | 4606 | |
e1377713 ES |
4607 | for (j = 0; j < 3; j++) |
4608 | { | |
4609 | int nelt0 = ((3 - j) * nelt) % 3; | |
4610 | int nelt1 = ((3 - j) * nelt + 1) % 3; | |
4611 | int nelt2 = ((3 - j) * nelt + 2) % 3; | |
3fcc1b55 | 4612 | |
e1377713 ES |
4613 | for (i = 0; i < nelt; i++) |
4614 | { | |
4615 | if (3 * i + nelt0 < nelt) | |
4616 | sel[3 * i + nelt0] = j0++; | |
4617 | if (3 * i + nelt1 < nelt) | |
4618 | sel[3 * i + nelt1] = nelt + j1++; | |
4619 | if (3 * i + nelt2 < nelt) | |
4620 | sel[3 * i + nelt2] = 0; | |
4621 | } | |
557be5a8 | 4622 | perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel); |
e1377713 ES |
4623 | |
4624 | for (i = 0; i < nelt; i++) | |
4625 | { | |
4626 | if (3 * i + nelt0 < nelt) | |
4627 | sel[3 * i + nelt0] = 3 * i + nelt0; | |
4628 | if (3 * i + nelt1 < nelt) | |
4629 | sel[3 * i + nelt1] = 3 * i + nelt1; | |
4630 | if (3 * i + nelt2 < nelt) | |
4631 | sel[3 * i + nelt2] = nelt + j2++; | |
4632 | } | |
557be5a8 | 4633 | perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel); |
e1377713 ES |
4634 | |
4635 | vect1 = dr_chain[0]; | |
4636 | vect2 = dr_chain[1]; | |
ebfd146a IR |
4637 | |
4638 | /* Create interleaving stmt: | |
e1377713 ES |
4639 | low = VEC_PERM_EXPR <vect1, vect2, |
4640 | {j, nelt, *, j + 1, nelt + j + 1, *, | |
4641 | j + 2, nelt + j + 2, *, ...}> */ | |
4642 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low"); | |
4643 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, | |
4644 | vect1, vect2, | |
4645 | perm3_mask_low); | |
ebfd146a | 4646 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); |
ebfd146a | 4647 | |
e1377713 ES |
4648 | vect1 = data_ref; |
4649 | vect2 = dr_chain[2]; | |
ebfd146a | 4650 | /* Create interleaving stmt: |
e1377713 ES |
4651 | low = VEC_PERM_EXPR <vect1, vect2, |
4652 | {0, 1, nelt + j, 3, 4, nelt + j + 1, | |
4653 | 6, 7, nelt + j + 2, ...}> */ | |
4654 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high"); | |
4655 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, | |
4656 | vect1, vect2, | |
4657 | perm3_mask_high); | |
ebfd146a | 4658 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); |
e1377713 | 4659 | (*result_chain)[j] = data_ref; |
ebfd146a | 4660 | } |
e1377713 ES |
4661 | } |
4662 | else | |
4663 | { | |
4664 | /* If length is not equal to 3 then only power of 2 is supported. */ | |
4665 | gcc_assert (exact_log2 (length) != -1); | |
4666 | ||
4667 | for (i = 0, n = nelt / 2; i < n; i++) | |
4668 | { | |
4669 | sel[i * 2] = i; | |
4670 | sel[i * 2 + 1] = i + nelt; | |
4671 | } | |
557be5a8 | 4672 | perm_mask_high = vect_gen_perm_mask_checked (vectype, sel); |
e1377713 ES |
4673 | |
4674 | for (i = 0; i < nelt; i++) | |
4675 | sel[i] += nelt / 2; | |
557be5a8 | 4676 | perm_mask_low = vect_gen_perm_mask_checked (vectype, sel); |
e1377713 ES |
4677 | |
4678 | for (i = 0, n = log_length; i < n; i++) | |
4679 | { | |
4680 | for (j = 0; j < length/2; j++) | |
4681 | { | |
4682 | vect1 = dr_chain[j]; | |
4683 | vect2 = dr_chain[j+length/2]; | |
4684 | ||
4685 | /* Create interleaving stmt: | |
4686 | high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1, | |
4687 | ...}> */ | |
4688 | high = make_temp_ssa_name (vectype, NULL, "vect_inter_high"); | |
4689 | perm_stmt | |
4690 | = gimple_build_assign_with_ops (VEC_PERM_EXPR, high, | |
4691 | vect1, vect2, perm_mask_high); | |
4692 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); | |
4693 | (*result_chain)[2*j] = high; | |
4694 | ||
4695 | /* Create interleaving stmt: | |
4696 | low = VEC_PERM_EXPR <vect1, vect2, | |
4697 | {nelt/2, nelt*3/2, nelt/2+1, nelt*3/2+1, | |
4698 | ...}> */ | |
4699 | low = make_temp_ssa_name (vectype, NULL, "vect_inter_low"); | |
4700 | perm_stmt | |
4701 | = gimple_build_assign_with_ops (VEC_PERM_EXPR, low, | |
4702 | vect1, vect2, perm_mask_low); | |
4703 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); | |
4704 | (*result_chain)[2*j+1] = low; | |
4705 | } | |
4706 | memcpy (dr_chain.address (), result_chain->address (), | |
4707 | length * sizeof (tree)); | |
4708 | } | |
ebfd146a | 4709 | } |
ebfd146a IR |
4710 | } |
4711 | ||
4712 | /* Function vect_setup_realignment | |
b8698a0f | 4713 | |
ebfd146a IR |
4714 | This function is called when vectorizing an unaligned load using |
4715 | the dr_explicit_realign[_optimized] scheme. | |
4716 | This function generates the following code at the loop prolog: | |
4717 | ||
4718 | p = initial_addr; | |
4719 | x msq_init = *(floor(p)); # prolog load | |
b8698a0f | 4720 | realignment_token = call target_builtin; |
ebfd146a IR |
4721 | loop: |
4722 | x msq = phi (msq_init, ---) | |
4723 | ||
b8698a0f | 4724 | The stmts marked with x are generated only for the case of |
ebfd146a IR |
4725 | dr_explicit_realign_optimized. |
4726 | ||
b8698a0f | 4727 | The code above sets up a new (vector) pointer, pointing to the first |
ebfd146a IR |
4728 | location accessed by STMT, and a "floor-aligned" load using that pointer. |
4729 | It also generates code to compute the "realignment-token" (if the relevant | |
4730 | target hook was defined), and creates a phi-node at the loop-header bb | |
4731 | whose arguments are the result of the prolog-load (created by this | |
4732 | function) and the result of a load that takes place in the loop (to be | |
4733 | created by the caller to this function). | |
4734 | ||
4735 | For the case of dr_explicit_realign_optimized: | |
b8698a0f | 4736 | The caller to this function uses the phi-result (msq) to create the |
ebfd146a IR |
4737 | realignment code inside the loop, and sets up the missing phi argument, |
4738 | as follows: | |
b8698a0f | 4739 | loop: |
ebfd146a IR |
4740 | msq = phi (msq_init, lsq) |
4741 | lsq = *(floor(p')); # load in loop | |
4742 | result = realign_load (msq, lsq, realignment_token); | |
4743 | ||
4744 | For the case of dr_explicit_realign: | |
4745 | loop: | |
4746 | msq = *(floor(p)); # load in loop | |
4747 | p' = p + (VS-1); | |
4748 | lsq = *(floor(p')); # load in loop | |
4749 | result = realign_load (msq, lsq, realignment_token); | |
4750 | ||
4751 | Input: | |
4752 | STMT - (scalar) load stmt to be vectorized. This load accesses | |
4753 | a memory location that may be unaligned. | |
4754 | BSI - place where new code is to be inserted. | |
4755 | ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes | |
b8698a0f L |
4756 | is used. |
4757 | ||
ebfd146a IR |
4758 | Output: |
4759 | REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load | |
4760 | target hook, if defined. | |
4761 | Return value - the result of the loop-header phi node. */ | |
4762 | ||
4763 | tree | |
4764 | vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi, | |
4765 | tree *realignment_token, | |
4766 | enum dr_alignment_support alignment_support_scheme, | |
4767 | tree init_addr, | |
4768 | struct loop **at_loop) | |
4769 | { | |
4770 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
4771 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
4772 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
20ede5c6 | 4773 | struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); |
69f11a13 IR |
4774 | struct loop *loop = NULL; |
4775 | edge pe = NULL; | |
ebfd146a IR |
4776 | tree scalar_dest = gimple_assign_lhs (stmt); |
4777 | tree vec_dest; | |
4778 | gimple inc; | |
4779 | tree ptr; | |
4780 | tree data_ref; | |
ebfd146a IR |
4781 | basic_block new_bb; |
4782 | tree msq_init = NULL_TREE; | |
4783 | tree new_temp; | |
538dd0b7 | 4784 | gphi *phi_stmt; |
ebfd146a IR |
4785 | tree msq = NULL_TREE; |
4786 | gimple_seq stmts = NULL; | |
4787 | bool inv_p; | |
4788 | bool compute_in_loop = false; | |
69f11a13 | 4789 | bool nested_in_vect_loop = false; |
ebfd146a | 4790 | struct loop *containing_loop = (gimple_bb (stmt))->loop_father; |
69f11a13 IR |
4791 | struct loop *loop_for_initial_load = NULL; |
4792 | ||
4793 | if (loop_vinfo) | |
4794 | { | |
4795 | loop = LOOP_VINFO_LOOP (loop_vinfo); | |
4796 | nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); | |
4797 | } | |
ebfd146a IR |
4798 | |
4799 | gcc_assert (alignment_support_scheme == dr_explicit_realign | |
4800 | || alignment_support_scheme == dr_explicit_realign_optimized); | |
4801 | ||
4802 | /* We need to generate three things: | |
4803 | 1. the misalignment computation | |
4804 | 2. the extra vector load (for the optimized realignment scheme). | |
4805 | 3. the phi node for the two vectors from which the realignment is | |
ff802fa1 | 4806 | done (for the optimized realignment scheme). */ |
ebfd146a IR |
4807 | |
4808 | /* 1. Determine where to generate the misalignment computation. | |
4809 | ||
4810 | If INIT_ADDR is NULL_TREE, this indicates that the misalignment | |
4811 | calculation will be generated by this function, outside the loop (in the | |
4812 | preheader). Otherwise, INIT_ADDR had already been computed for us by the | |
4813 | caller, inside the loop. | |
4814 | ||
4815 | Background: If the misalignment remains fixed throughout the iterations of | |
4816 | the loop, then both realignment schemes are applicable, and also the | |
4817 | misalignment computation can be done outside LOOP. This is because we are | |
4818 | vectorizing LOOP, and so the memory accesses in LOOP advance in steps that | |
4819 | are a multiple of VS (the Vector Size), and therefore the misalignment in | |
4820 | different vectorized LOOP iterations is always the same. | |
4821 | The problem arises only if the memory access is in an inner-loop nested | |
4822 | inside LOOP, which is now being vectorized using outer-loop vectorization. | |
4823 | This is the only case when the misalignment of the memory access may not | |
4824 | remain fixed throughout the iterations of the inner-loop (as explained in | |
4825 | detail in vect_supportable_dr_alignment). In this case, not only is the | |
4826 | optimized realignment scheme not applicable, but also the misalignment | |
4827 | computation (and generation of the realignment token that is passed to | |
4828 | REALIGN_LOAD) have to be done inside the loop. | |
4829 | ||
4830 | In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode | |
4831 | or not, which in turn determines if the misalignment is computed inside | |
4832 | the inner-loop, or outside LOOP. */ | |
4833 | ||
69f11a13 | 4834 | if (init_addr != NULL_TREE || !loop_vinfo) |
ebfd146a IR |
4835 | { |
4836 | compute_in_loop = true; | |
4837 | gcc_assert (alignment_support_scheme == dr_explicit_realign); | |
4838 | } | |
4839 | ||
4840 | ||
4841 | /* 2. Determine where to generate the extra vector load. | |
4842 | ||
4843 | For the optimized realignment scheme, instead of generating two vector | |
4844 | loads in each iteration, we generate a single extra vector load in the | |
4845 | preheader of the loop, and in each iteration reuse the result of the | |
4846 | vector load from the previous iteration. In case the memory access is in | |
4847 | an inner-loop nested inside LOOP, which is now being vectorized using | |
4848 | outer-loop vectorization, we need to determine whether this initial vector | |
4849 | load should be generated at the preheader of the inner-loop, or can be | |
4850 | generated at the preheader of LOOP. If the memory access has no evolution | |
4851 | in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has | |
4852 | to be generated inside LOOP (in the preheader of the inner-loop). */ | |
4853 | ||
4854 | if (nested_in_vect_loop) | |
4855 | { | |
4856 | tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info); | |
4857 | bool invariant_in_outerloop = | |
4858 | (tree_int_cst_compare (outerloop_step, size_zero_node) == 0); | |
4859 | loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner); | |
4860 | } | |
4861 | else | |
4862 | loop_for_initial_load = loop; | |
4863 | if (at_loop) | |
4864 | *at_loop = loop_for_initial_load; | |
4865 | ||
69f11a13 IR |
4866 | if (loop_for_initial_load) |
4867 | pe = loop_preheader_edge (loop_for_initial_load); | |
4868 | ||
ebfd146a IR |
4869 | /* 3. For the case of the optimized realignment, create the first vector |
4870 | load at the loop preheader. */ | |
4871 | ||
4872 | if (alignment_support_scheme == dr_explicit_realign_optimized) | |
4873 | { | |
4874 | /* Create msq_init = *(floor(p1)) in the loop preheader */ | |
538dd0b7 | 4875 | gassign *new_stmt; |
ebfd146a IR |
4876 | |
4877 | gcc_assert (!compute_in_loop); | |
ebfd146a | 4878 | vec_dest = vect_create_destination_var (scalar_dest, vectype); |
920e8172 RS |
4879 | ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load, |
4880 | NULL_TREE, &init_addr, NULL, &inc, | |
4881 | true, &inv_p); | |
070ecdfd | 4882 | new_temp = copy_ssa_name (ptr, NULL); |
75421dcd | 4883 | new_stmt = gimple_build_assign_with_ops |
070ecdfd | 4884 | (BIT_AND_EXPR, new_temp, ptr, |
75421dcd RG |
4885 | build_int_cst (TREE_TYPE (ptr), |
4886 | -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype))); | |
75421dcd RG |
4887 | new_bb = gsi_insert_on_edge_immediate (pe, new_stmt); |
4888 | gcc_assert (!new_bb); | |
20ede5c6 RG |
4889 | data_ref |
4890 | = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp, | |
4891 | build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0)); | |
ebfd146a IR |
4892 | new_stmt = gimple_build_assign (vec_dest, data_ref); |
4893 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
4894 | gimple_assign_set_lhs (new_stmt, new_temp); | |
69f11a13 IR |
4895 | if (pe) |
4896 | { | |
4897 | new_bb = gsi_insert_on_edge_immediate (pe, new_stmt); | |
4898 | gcc_assert (!new_bb); | |
4899 | } | |
4900 | else | |
4901 | gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); | |
4902 | ||
ebfd146a IR |
4903 | msq_init = gimple_assign_lhs (new_stmt); |
4904 | } | |
4905 | ||
4906 | /* 4. Create realignment token using a target builtin, if available. | |
4907 | It is done either inside the containing loop, or before LOOP (as | |
4908 | determined above). */ | |
4909 | ||
4910 | if (targetm.vectorize.builtin_mask_for_load) | |
4911 | { | |
538dd0b7 | 4912 | gcall *new_stmt; |
ebfd146a IR |
4913 | tree builtin_decl; |
4914 | ||
4915 | /* Compute INIT_ADDR - the initial addressed accessed by this memref. */ | |
69f11a13 | 4916 | if (!init_addr) |
ebfd146a IR |
4917 | { |
4918 | /* Generate the INIT_ADDR computation outside LOOP. */ | |
4919 | init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts, | |
4920 | NULL_TREE, loop); | |
69f11a13 IR |
4921 | if (loop) |
4922 | { | |
4923 | pe = loop_preheader_edge (loop); | |
4924 | new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); | |
4925 | gcc_assert (!new_bb); | |
4926 | } | |
4927 | else | |
4928 | gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); | |
ebfd146a IR |
4929 | } |
4930 | ||
4931 | builtin_decl = targetm.vectorize.builtin_mask_for_load (); | |
4932 | new_stmt = gimple_build_call (builtin_decl, 1, init_addr); | |
4933 | vec_dest = | |
4934 | vect_create_destination_var (scalar_dest, | |
4935 | gimple_call_return_type (new_stmt)); | |
4936 | new_temp = make_ssa_name (vec_dest, new_stmt); | |
4937 | gimple_call_set_lhs (new_stmt, new_temp); | |
4938 | ||
4939 | if (compute_in_loop) | |
4940 | gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); | |
4941 | else | |
4942 | { | |
4943 | /* Generate the misalignment computation outside LOOP. */ | |
4944 | pe = loop_preheader_edge (loop); | |
4945 | new_bb = gsi_insert_on_edge_immediate (pe, new_stmt); | |
4946 | gcc_assert (!new_bb); | |
4947 | } | |
4948 | ||
4949 | *realignment_token = gimple_call_lhs (new_stmt); | |
4950 | ||
4951 | /* The result of the CALL_EXPR to this builtin is determined from | |
4952 | the value of the parameter and no global variables are touched | |
4953 | which makes the builtin a "const" function. Requiring the | |
4954 | builtin to have the "const" attribute makes it unnecessary | |
4955 | to call mark_call_clobbered. */ | |
4956 | gcc_assert (TREE_READONLY (builtin_decl)); | |
4957 | } | |
4958 | ||
4959 | if (alignment_support_scheme == dr_explicit_realign) | |
4960 | return msq; | |
4961 | ||
4962 | gcc_assert (!compute_in_loop); | |
4963 | gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized); | |
4964 | ||
4965 | ||
4966 | /* 5. Create msq = phi <msq_init, lsq> in loop */ | |
4967 | ||
4968 | pe = loop_preheader_edge (containing_loop); | |
4969 | vec_dest = vect_create_destination_var (scalar_dest, vectype); | |
4970 | msq = make_ssa_name (vec_dest, NULL); | |
4971 | phi_stmt = create_phi_node (msq, containing_loop->header); | |
9e227d60 | 4972 | add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION); |
ebfd146a IR |
4973 | |
4974 | return msq; | |
4975 | } | |
4976 | ||
4977 | ||
0d0293ac | 4978 | /* Function vect_grouped_load_supported. |
ebfd146a | 4979 | |
e2c83630 | 4980 | Returns TRUE if even and odd permutations are supported, |
ebfd146a IR |
4981 | and FALSE otherwise. */ |
4982 | ||
4983 | bool | |
0d0293ac | 4984 | vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count) |
ebfd146a | 4985 | { |
ef4bddc2 | 4986 | machine_mode mode = TYPE_MODE (vectype); |
ebfd146a | 4987 | |
2c23db6d ES |
4988 | /* vect_permute_load_chain requires the group size to be equal to 3 or |
4989 | be a power of two. */ | |
4990 | if (count != 3 && exact_log2 (count) == -1) | |
b602d918 | 4991 | { |
73fbfcad | 4992 | if (dump_enabled_p ()) |
78c60e3d | 4993 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
2c23db6d ES |
4994 | "the size of the group of accesses" |
4995 | " is not a power of 2 or not equal to 3\n"); | |
b602d918 RS |
4996 | return false; |
4997 | } | |
4998 | ||
e2c83630 RH |
4999 | /* Check that the permutation is supported. */ |
5000 | if (VECTOR_MODE_P (mode)) | |
5001 | { | |
2c23db6d | 5002 | unsigned int i, j, nelt = GET_MODE_NUNITS (mode); |
e2c83630 | 5003 | unsigned char *sel = XALLOCAVEC (unsigned char, nelt); |
ebfd146a | 5004 | |
2c23db6d | 5005 | if (count == 3) |
e2c83630 | 5006 | { |
2c23db6d ES |
5007 | unsigned int k; |
5008 | for (k = 0; k < 3; k++) | |
5009 | { | |
5010 | for (i = 0; i < nelt; i++) | |
5011 | if (3 * i + k < 2 * nelt) | |
5012 | sel[i] = 3 * i + k; | |
5013 | else | |
5014 | sel[i] = 0; | |
5015 | if (!can_vec_perm_p (mode, false, sel)) | |
5016 | { | |
5017 | if (dump_enabled_p ()) | |
5018 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5019 | "shuffle of 3 loads is not supported by" | |
5020 | " target\n"); | |
5021 | return false; | |
5022 | } | |
5023 | for (i = 0, j = 0; i < nelt; i++) | |
5024 | if (3 * i + k < 2 * nelt) | |
5025 | sel[i] = i; | |
5026 | else | |
5027 | sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++); | |
5028 | if (!can_vec_perm_p (mode, false, sel)) | |
5029 | { | |
5030 | if (dump_enabled_p ()) | |
5031 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5032 | "shuffle of 3 loads is not supported by" | |
5033 | " target\n"); | |
5034 | return false; | |
5035 | } | |
5036 | } | |
5037 | return true; | |
5038 | } | |
5039 | else | |
5040 | { | |
5041 | /* If length is not equal to 3 then only power of 2 is supported. */ | |
5042 | gcc_assert (exact_log2 (count) != -1); | |
e2c83630 | 5043 | for (i = 0; i < nelt; i++) |
2c23db6d | 5044 | sel[i] = i * 2; |
e2c83630 | 5045 | if (can_vec_perm_p (mode, false, sel)) |
2c23db6d ES |
5046 | { |
5047 | for (i = 0; i < nelt; i++) | |
5048 | sel[i] = i * 2 + 1; | |
5049 | if (can_vec_perm_p (mode, false, sel)) | |
5050 | return true; | |
5051 | } | |
5052 | } | |
e2c83630 | 5053 | } |
ebfd146a | 5054 | |
73fbfcad | 5055 | if (dump_enabled_p ()) |
78c60e3d | 5056 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, |
2c23db6d | 5057 | "extract even/odd not supported by target\n"); |
a6b3dfde | 5058 | return false; |
ebfd146a IR |
5059 | } |
5060 | ||
272c6793 RS |
5061 | /* Return TRUE if vec_load_lanes is available for COUNT vectors of |
5062 | type VECTYPE. */ | |
5063 | ||
5064 | bool | |
5065 | vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count) | |
5066 | { | |
5067 | return vect_lanes_optab_supported_p ("vec_load_lanes", | |
5068 | vec_load_lanes_optab, | |
5069 | vectype, count); | |
5070 | } | |
ebfd146a IR |
5071 | |
5072 | /* Function vect_permute_load_chain. | |
5073 | ||
5074 | Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be | |
2c23db6d ES |
5075 | a power of 2 or equal to 3, generate extract_even/odd stmts to reorder |
5076 | the input data correctly. Return the final references for loads in | |
5077 | RESULT_CHAIN. | |
ebfd146a IR |
5078 | |
5079 | E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8. | |
5080 | The input is 4 vectors each containing 8 elements. We assign a number to each | |
5081 | element, the input sequence is: | |
5082 | ||
5083 | 1st vec: 0 1 2 3 4 5 6 7 | |
5084 | 2nd vec: 8 9 10 11 12 13 14 15 | |
b8698a0f | 5085 | 3rd vec: 16 17 18 19 20 21 22 23 |
ebfd146a IR |
5086 | 4th vec: 24 25 26 27 28 29 30 31 |
5087 | ||
5088 | The output sequence should be: | |
5089 | ||
5090 | 1st vec: 0 4 8 12 16 20 24 28 | |
5091 | 2nd vec: 1 5 9 13 17 21 25 29 | |
b8698a0f | 5092 | 3rd vec: 2 6 10 14 18 22 26 30 |
ebfd146a IR |
5093 | 4th vec: 3 7 11 15 19 23 27 31 |
5094 | ||
5095 | i.e., the first output vector should contain the first elements of each | |
5096 | interleaving group, etc. | |
5097 | ||
ff802fa1 IR |
5098 | We use extract_even/odd instructions to create such output. The input of |
5099 | each extract_even/odd operation is two vectors | |
b8698a0f L |
5100 | 1st vec 2nd vec |
5101 | 0 1 2 3 4 5 6 7 | |
ebfd146a | 5102 | |
ff802fa1 | 5103 | and the output is the vector of extracted even/odd elements. The output of |
ebfd146a IR |
5104 | extract_even will be: 0 2 4 6 |
5105 | and of extract_odd: 1 3 5 7 | |
5106 | ||
b8698a0f | 5107 | |
ff802fa1 IR |
5108 | The permutation is done in log LENGTH stages. In each stage extract_even |
5109 | and extract_odd stmts are created for each pair of vectors in DR_CHAIN in | |
5110 | their order. In our example, | |
ebfd146a IR |
5111 | |
5112 | E1: extract_even (1st vec, 2nd vec) | |
5113 | E2: extract_odd (1st vec, 2nd vec) | |
5114 | E3: extract_even (3rd vec, 4th vec) | |
5115 | E4: extract_odd (3rd vec, 4th vec) | |
5116 | ||
5117 | The output for the first stage will be: | |
5118 | ||
5119 | E1: 0 2 4 6 8 10 12 14 | |
5120 | E2: 1 3 5 7 9 11 13 15 | |
b8698a0f | 5121 | E3: 16 18 20 22 24 26 28 30 |
ebfd146a IR |
5122 | E4: 17 19 21 23 25 27 29 31 |
5123 | ||
5124 | In order to proceed and create the correct sequence for the next stage (or | |
b8698a0f L |
5125 | for the correct output, if the second stage is the last one, as in our |
5126 | example), we first put the output of extract_even operation and then the | |
ebfd146a IR |
5127 | output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN). |
5128 | The input for the second stage is: | |
5129 | ||
5130 | 1st vec (E1): 0 2 4 6 8 10 12 14 | |
b8698a0f L |
5131 | 2nd vec (E3): 16 18 20 22 24 26 28 30 |
5132 | 3rd vec (E2): 1 3 5 7 9 11 13 15 | |
ebfd146a IR |
5133 | 4th vec (E4): 17 19 21 23 25 27 29 31 |
5134 | ||
5135 | The output of the second stage: | |
5136 | ||
5137 | E1: 0 4 8 12 16 20 24 28 | |
5138 | E2: 2 6 10 14 18 22 26 30 | |
5139 | E3: 1 5 9 13 17 21 25 29 | |
5140 | E4: 3 7 11 15 19 23 27 31 | |
5141 | ||
5142 | And RESULT_CHAIN after reordering: | |
5143 | ||
5144 | 1st vec (E1): 0 4 8 12 16 20 24 28 | |
5145 | 2nd vec (E3): 1 5 9 13 17 21 25 29 | |
b8698a0f | 5146 | 3rd vec (E2): 2 6 10 14 18 22 26 30 |
ebfd146a IR |
5147 | 4th vec (E4): 3 7 11 15 19 23 27 31. */ |
5148 | ||
b602d918 | 5149 | static void |
9771b263 | 5150 | vect_permute_load_chain (vec<tree> dr_chain, |
b8698a0f | 5151 | unsigned int length, |
ebfd146a IR |
5152 | gimple stmt, |
5153 | gimple_stmt_iterator *gsi, | |
9771b263 | 5154 | vec<tree> *result_chain) |
ebfd146a | 5155 | { |
83d5977e | 5156 | tree data_ref, first_vect, second_vect; |
e2c83630 | 5157 | tree perm_mask_even, perm_mask_odd; |
2c23db6d | 5158 | tree perm3_mask_low, perm3_mask_high; |
ebfd146a IR |
5159 | gimple perm_stmt; |
5160 | tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); | |
e2c83630 RH |
5161 | unsigned int i, j, log_length = exact_log2 (length); |
5162 | unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype); | |
5163 | unsigned char *sel = XALLOCAVEC (unsigned char, nelt); | |
ebfd146a | 5164 | |
3f292312 JJ |
5165 | result_chain->quick_grow (length); |
5166 | memcpy (result_chain->address (), dr_chain.address (), | |
5167 | length * sizeof (tree)); | |
e2c83630 | 5168 | |
2c23db6d | 5169 | if (length == 3) |
ebfd146a | 5170 | { |
2c23db6d | 5171 | unsigned int k; |
ebfd146a | 5172 | |
2c23db6d ES |
5173 | for (k = 0; k < 3; k++) |
5174 | { | |
5175 | for (i = 0; i < nelt; i++) | |
5176 | if (3 * i + k < 2 * nelt) | |
5177 | sel[i] = 3 * i + k; | |
5178 | else | |
5179 | sel[i] = 0; | |
557be5a8 | 5180 | perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel); |
2c23db6d ES |
5181 | |
5182 | for (i = 0, j = 0; i < nelt; i++) | |
5183 | if (3 * i + k < 2 * nelt) | |
5184 | sel[i] = i; | |
5185 | else | |
5186 | sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++); | |
5187 | ||
557be5a8 | 5188 | perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel); |
2c23db6d ES |
5189 | |
5190 | first_vect = dr_chain[0]; | |
5191 | second_vect = dr_chain[1]; | |
5192 | ||
5193 | /* Create interleaving stmt (low part of): | |
5194 | low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k, | |
5195 | ...}> */ | |
f598c55c | 5196 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low"); |
73804b12 RG |
5197 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, |
5198 | first_vect, second_vect, | |
2c23db6d | 5199 | perm3_mask_low); |
ebfd146a | 5200 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); |
b8698a0f | 5201 | |
2c23db6d ES |
5202 | /* Create interleaving stmt (high part of): |
5203 | high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k, | |
5204 | ...}> */ | |
5205 | first_vect = data_ref; | |
5206 | second_vect = dr_chain[2]; | |
f598c55c | 5207 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high"); |
73804b12 RG |
5208 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, |
5209 | first_vect, second_vect, | |
2c23db6d | 5210 | perm3_mask_high); |
ebfd146a | 5211 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); |
2c23db6d | 5212 | (*result_chain)[k] = data_ref; |
ebfd146a | 5213 | } |
ebfd146a | 5214 | } |
2c23db6d ES |
5215 | else |
5216 | { | |
5217 | /* If length is not equal to 3 then only power of 2 is supported. */ | |
5218 | gcc_assert (exact_log2 (length) != -1); | |
5219 | ||
5220 | for (i = 0; i < nelt; ++i) | |
5221 | sel[i] = i * 2; | |
557be5a8 | 5222 | perm_mask_even = vect_gen_perm_mask_checked (vectype, sel); |
2c23db6d ES |
5223 | |
5224 | for (i = 0; i < nelt; ++i) | |
5225 | sel[i] = i * 2 + 1; | |
557be5a8 | 5226 | perm_mask_odd = vect_gen_perm_mask_checked (vectype, sel); |
ebfd146a | 5227 | |
2c23db6d ES |
5228 | for (i = 0; i < log_length; i++) |
5229 | { | |
5230 | for (j = 0; j < length; j += 2) | |
5231 | { | |
5232 | first_vect = dr_chain[j]; | |
5233 | second_vect = dr_chain[j+1]; | |
5234 | ||
5235 | /* data_ref = permute_even (first_data_ref, second_data_ref); */ | |
5236 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even"); | |
5237 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, | |
5238 | first_vect, second_vect, | |
5239 | perm_mask_even); | |
5240 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); | |
5241 | (*result_chain)[j/2] = data_ref; | |
5242 | ||
5243 | /* data_ref = permute_odd (first_data_ref, second_data_ref); */ | |
5244 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd"); | |
5245 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, | |
5246 | first_vect, second_vect, | |
5247 | perm_mask_odd); | |
5248 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); | |
5249 | (*result_chain)[j/2+length/2] = data_ref; | |
5250 | } | |
5251 | memcpy (dr_chain.address (), result_chain->address (), | |
5252 | length * sizeof (tree)); | |
5253 | } | |
5254 | } | |
5255 | } | |
ebfd146a | 5256 | |
f7917029 ES |
5257 | /* Function vect_shift_permute_load_chain. |
5258 | ||
5259 | Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate | |
5260 | sequence of stmts to reorder the input data accordingly. | |
5261 | Return the final references for loads in RESULT_CHAIN. | |
5262 | Return true if successed, false otherwise. | |
5263 | ||
5264 | E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8. | |
5265 | The input is 3 vectors each containing 8 elements. We assign a | |
5266 | number to each element, the input sequence is: | |
5267 | ||
5268 | 1st vec: 0 1 2 3 4 5 6 7 | |
5269 | 2nd vec: 8 9 10 11 12 13 14 15 | |
5270 | 3rd vec: 16 17 18 19 20 21 22 23 | |
5271 | ||
5272 | The output sequence should be: | |
5273 | ||
5274 | 1st vec: 0 3 6 9 12 15 18 21 | |
5275 | 2nd vec: 1 4 7 10 13 16 19 22 | |
5276 | 3rd vec: 2 5 8 11 14 17 20 23 | |
5277 | ||
5278 | We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output. | |
5279 | ||
5280 | First we shuffle all 3 vectors to get correct elements order: | |
5281 | ||
5282 | 1st vec: ( 0 3 6) ( 1 4 7) ( 2 5) | |
5283 | 2nd vec: ( 8 11 14) ( 9 12 15) (10 13) | |
5284 | 3rd vec: (16 19 22) (17 20 23) (18 21) | |
5285 | ||
5286 | Next we unite and shift vector 3 times: | |
5287 | ||
5288 | 1st step: | |
5289 | shift right by 6 the concatenation of: | |
5290 | "1st vec" and "2nd vec" | |
5291 | ( 0 3 6) ( 1 4 7) |( 2 5) _ ( 8 11 14) ( 9 12 15)| (10 13) | |
5292 | "2nd vec" and "3rd vec" | |
5293 | ( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21) | |
5294 | "3rd vec" and "1st vec" | |
5295 | (16 19 22) (17 20 23) |(18 21) _ ( 0 3 6) ( 1 4 7)| ( 2 5) | |
5296 | | New vectors | | |
5297 | ||
5298 | So that now new vectors are: | |
5299 | ||
5300 | 1st vec: ( 2 5) ( 8 11 14) ( 9 12 15) | |
5301 | 2nd vec: (10 13) (16 19 22) (17 20 23) | |
5302 | 3rd vec: (18 21) ( 0 3 6) ( 1 4 7) | |
5303 | ||
5304 | 2nd step: | |
5305 | shift right by 5 the concatenation of: | |
5306 | "1st vec" and "3rd vec" | |
5307 | ( 2 5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0 3 6)| ( 1 4 7) | |
5308 | "2nd vec" and "1st vec" | |
5309 | (10 13) (16 19 22) |(17 20 23) _ ( 2 5) ( 8 11 14)| ( 9 12 15) | |
5310 | "3rd vec" and "2nd vec" | |
5311 | (18 21) ( 0 3 6) |( 1 4 7) _ (10 13) (16 19 22)| (17 20 23) | |
5312 | | New vectors | | |
5313 | ||
5314 | So that now new vectors are: | |
5315 | ||
5316 | 1st vec: ( 9 12 15) (18 21) ( 0 3 6) | |
5317 | 2nd vec: (17 20 23) ( 2 5) ( 8 11 14) | |
5318 | 3rd vec: ( 1 4 7) (10 13) (16 19 22) READY | |
5319 | ||
5320 | 3rd step: | |
5321 | shift right by 5 the concatenation of: | |
5322 | "1st vec" and "1st vec" | |
5323 | ( 9 12 15) (18 21) |( 0 3 6) _ ( 9 12 15) (18 21)| ( 0 3 6) | |
5324 | shift right by 3 the concatenation of: | |
5325 | "2nd vec" and "2nd vec" | |
5326 | (17 20 23) |( 2 5) ( 8 11 14) _ (17 20 23)| ( 2 5) ( 8 11 14) | |
5327 | | New vectors | | |
5328 | ||
5329 | So that now all vectors are READY: | |
5330 | 1st vec: ( 0 3 6) ( 9 12 15) (18 21) | |
5331 | 2nd vec: ( 2 5) ( 8 11 14) (17 20 23) | |
5332 | 3rd vec: ( 1 4 7) (10 13) (16 19 22) | |
5333 | ||
5334 | This algorithm is faster than one in vect_permute_load_chain if: | |
5335 | 1. "shift of a concatination" is faster than general permutation. | |
5336 | This is usually so. | |
5337 | 2. The TARGET machine can't execute vector instructions in parallel. | |
5338 | This is because each step of the algorithm depends on previous. | |
5339 | The algorithm in vect_permute_load_chain is much more parallel. | |
5340 | ||
5341 | The algorithm is applicable only for LOAD CHAIN LENGTH less than VF. | |
5342 | */ | |
5343 | ||
5344 | static bool | |
5345 | vect_shift_permute_load_chain (vec<tree> dr_chain, | |
5346 | unsigned int length, | |
5347 | gimple stmt, | |
5348 | gimple_stmt_iterator *gsi, | |
5349 | vec<tree> *result_chain) | |
5350 | { | |
5351 | tree vect[3], vect_shift[3], data_ref, first_vect, second_vect; | |
5352 | tree perm2_mask1, perm2_mask2, perm3_mask; | |
5353 | tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask; | |
5354 | gimple perm_stmt; | |
5355 | ||
5356 | tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); | |
5357 | unsigned int i; | |
5358 | unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype); | |
5359 | unsigned char *sel = XALLOCAVEC (unsigned char, nelt); | |
5360 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
5361 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
5362 | ||
5363 | result_chain->quick_grow (length); | |
5364 | memcpy (result_chain->address (), dr_chain.address (), | |
5365 | length * sizeof (tree)); | |
5366 | ||
af4c011e | 5367 | if (exact_log2 (length) != -1 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4) |
f7917029 | 5368 | { |
af4c011e | 5369 | unsigned int j, log_length = exact_log2 (length); |
f7917029 ES |
5370 | for (i = 0; i < nelt / 2; ++i) |
5371 | sel[i] = i * 2; | |
5372 | for (i = 0; i < nelt / 2; ++i) | |
5373 | sel[nelt / 2 + i] = i * 2 + 1; | |
5374 | if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) | |
5375 | { | |
5376 | if (dump_enabled_p ()) | |
5377 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5378 | "shuffle of 2 fields structure is not \ | |
5379 | supported by target\n"); | |
5380 | return false; | |
5381 | } | |
557be5a8 | 5382 | perm2_mask1 = vect_gen_perm_mask_checked (vectype, sel); |
f7917029 ES |
5383 | |
5384 | for (i = 0; i < nelt / 2; ++i) | |
5385 | sel[i] = i * 2 + 1; | |
5386 | for (i = 0; i < nelt / 2; ++i) | |
5387 | sel[nelt / 2 + i] = i * 2; | |
5388 | if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) | |
5389 | { | |
5390 | if (dump_enabled_p ()) | |
5391 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5392 | "shuffle of 2 fields structure is not \ | |
5393 | supported by target\n"); | |
5394 | return false; | |
5395 | } | |
557be5a8 | 5396 | perm2_mask2 = vect_gen_perm_mask_checked (vectype, sel); |
f7917029 ES |
5397 | |
5398 | /* Generating permutation constant to shift all elements. | |
5399 | For vector length 8 it is {4 5 6 7 8 9 10 11}. */ | |
5400 | for (i = 0; i < nelt; i++) | |
5401 | sel[i] = nelt / 2 + i; | |
5402 | if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) | |
5403 | { | |
5404 | if (dump_enabled_p ()) | |
5405 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5406 | "shift permutation is not supported by target\n"); | |
5407 | return false; | |
5408 | } | |
557be5a8 | 5409 | shift1_mask = vect_gen_perm_mask_checked (vectype, sel); |
f7917029 ES |
5410 | |
5411 | /* Generating permutation constant to select vector from 2. | |
5412 | For vector length 8 it is {0 1 2 3 12 13 14 15}. */ | |
5413 | for (i = 0; i < nelt / 2; i++) | |
5414 | sel[i] = i; | |
5415 | for (i = nelt / 2; i < nelt; i++) | |
5416 | sel[i] = nelt + i; | |
5417 | if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) | |
5418 | { | |
5419 | if (dump_enabled_p ()) | |
5420 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5421 | "select is not supported by target\n"); | |
5422 | return false; | |
5423 | } | |
557be5a8 | 5424 | select_mask = vect_gen_perm_mask_checked (vectype, sel); |
f7917029 | 5425 | |
af4c011e ES |
5426 | for (i = 0; i < log_length; i++) |
5427 | { | |
5428 | for (j = 0; j < length; j += 2) | |
5429 | { | |
5430 | first_vect = dr_chain[j]; | |
5431 | second_vect = dr_chain[j + 1]; | |
5432 | ||
5433 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2"); | |
5434 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, | |
5435 | first_vect, first_vect, | |
5436 | perm2_mask1); | |
5437 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); | |
5438 | vect[0] = data_ref; | |
5439 | ||
5440 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2"); | |
5441 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, | |
5442 | second_vect, second_vect, | |
5443 | perm2_mask2); | |
5444 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); | |
5445 | vect[1] = data_ref; | |
f7917029 | 5446 | |
af4c011e ES |
5447 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift"); |
5448 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, | |
5449 | vect[0], vect[1], | |
5450 | shift1_mask); | |
5451 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); | |
5452 | (*result_chain)[j/2 + length/2] = data_ref; | |
5453 | ||
5454 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_select"); | |
5455 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, | |
5456 | vect[0], vect[1], | |
5457 | select_mask); | |
5458 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); | |
5459 | (*result_chain)[j/2] = data_ref; | |
5460 | } | |
5461 | memcpy (dr_chain.address (), result_chain->address (), | |
5462 | length * sizeof (tree)); | |
5463 | } | |
f7917029 ES |
5464 | return true; |
5465 | } | |
5466 | if (length == 3 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 2) | |
5467 | { | |
5468 | unsigned int k = 0, l = 0; | |
5469 | ||
5470 | /* Generating permutation constant to get all elements in rigth order. | |
5471 | For vector length 8 it is {0 3 6 1 4 7 2 5}. */ | |
5472 | for (i = 0; i < nelt; i++) | |
5473 | { | |
5474 | if (3 * k + (l % 3) >= nelt) | |
5475 | { | |
5476 | k = 0; | |
5477 | l += (3 - (nelt % 3)); | |
5478 | } | |
5479 | sel[i] = 3 * k + (l % 3); | |
5480 | k++; | |
5481 | } | |
5482 | if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) | |
5483 | { | |
5484 | if (dump_enabled_p ()) | |
5485 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5486 | "shuffle of 3 fields structure is not \ | |
5487 | supported by target\n"); | |
5488 | return false; | |
5489 | } | |
557be5a8 | 5490 | perm3_mask = vect_gen_perm_mask_checked (vectype, sel); |
f7917029 ES |
5491 | |
5492 | /* Generating permutation constant to shift all elements. | |
5493 | For vector length 8 it is {6 7 8 9 10 11 12 13}. */ | |
5494 | for (i = 0; i < nelt; i++) | |
5495 | sel[i] = 2 * (nelt / 3) + (nelt % 3) + i; | |
5496 | if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) | |
5497 | { | |
5498 | if (dump_enabled_p ()) | |
5499 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5500 | "shift permutation is not supported by target\n"); | |
5501 | return false; | |
5502 | } | |
557be5a8 | 5503 | shift1_mask = vect_gen_perm_mask_checked (vectype, sel); |
f7917029 ES |
5504 | |
5505 | /* Generating permutation constant to shift all elements. | |
5506 | For vector length 8 it is {5 6 7 8 9 10 11 12}. */ | |
5507 | for (i = 0; i < nelt; i++) | |
5508 | sel[i] = 2 * (nelt / 3) + 1 + i; | |
5509 | if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) | |
5510 | { | |
5511 | if (dump_enabled_p ()) | |
5512 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5513 | "shift permutation is not supported by target\n"); | |
5514 | return false; | |
5515 | } | |
557be5a8 | 5516 | shift2_mask = vect_gen_perm_mask_checked (vectype, sel); |
f7917029 ES |
5517 | |
5518 | /* Generating permutation constant to shift all elements. | |
5519 | For vector length 8 it is {3 4 5 6 7 8 9 10}. */ | |
5520 | for (i = 0; i < nelt; i++) | |
5521 | sel[i] = (nelt / 3) + (nelt % 3) / 2 + i; | |
5522 | if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) | |
5523 | { | |
5524 | if (dump_enabled_p ()) | |
5525 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5526 | "shift permutation is not supported by target\n"); | |
5527 | return false; | |
5528 | } | |
557be5a8 | 5529 | shift3_mask = vect_gen_perm_mask_checked (vectype, sel); |
f7917029 ES |
5530 | |
5531 | /* Generating permutation constant to shift all elements. | |
5532 | For vector length 8 it is {5 6 7 8 9 10 11 12}. */ | |
5533 | for (i = 0; i < nelt; i++) | |
5534 | sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i; | |
5535 | if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel)) | |
5536 | { | |
5537 | if (dump_enabled_p ()) | |
5538 | dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, | |
5539 | "shift permutation is not supported by target\n"); | |
5540 | return false; | |
5541 | } | |
557be5a8 | 5542 | shift4_mask = vect_gen_perm_mask_checked (vectype, sel); |
f7917029 ES |
5543 | |
5544 | for (k = 0; k < 3; k++) | |
5545 | { | |
f598c55c | 5546 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3"); |
f7917029 ES |
5547 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, |
5548 | dr_chain[k], dr_chain[k], | |
5549 | perm3_mask); | |
5550 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); | |
5551 | vect[k] = data_ref; | |
5552 | } | |
5553 | ||
5554 | for (k = 0; k < 3; k++) | |
5555 | { | |
5556 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift1"); | |
5557 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, | |
5558 | vect[k % 3], | |
5559 | vect[(k + 1) % 3], | |
5560 | shift1_mask); | |
5561 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); | |
5562 | vect_shift[k] = data_ref; | |
5563 | } | |
5564 | ||
5565 | for (k = 0; k < 3; k++) | |
5566 | { | |
5567 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift2"); | |
5568 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, | |
5569 | vect_shift[(4 - k) % 3], | |
5570 | vect_shift[(3 - k) % 3], | |
5571 | shift2_mask); | |
5572 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); | |
5573 | vect[k] = data_ref; | |
5574 | } | |
5575 | ||
5576 | (*result_chain)[3 - (nelt % 3)] = vect[2]; | |
5577 | ||
5578 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3"); | |
5579 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, | |
5580 | vect[0], vect[0], | |
5581 | shift3_mask); | |
5582 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); | |
5583 | (*result_chain)[nelt % 3] = data_ref; | |
5584 | ||
5585 | data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4"); | |
5586 | perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref, | |
5587 | vect[1], vect[1], | |
5588 | shift4_mask); | |
5589 | vect_finish_stmt_generation (stmt, perm_stmt, gsi); | |
5590 | (*result_chain)[0] = data_ref; | |
5591 | return true; | |
5592 | } | |
5593 | return false; | |
5594 | } | |
5595 | ||
0d0293ac | 5596 | /* Function vect_transform_grouped_load. |
ebfd146a IR |
5597 | |
5598 | Given a chain of input interleaved data-refs (in DR_CHAIN), build statements | |
5599 | to perform their permutation and ascribe the result vectorized statements to | |
5600 | the scalar statements. | |
5601 | */ | |
5602 | ||
b602d918 | 5603 | void |
9771b263 | 5604 | vect_transform_grouped_load (gimple stmt, vec<tree> dr_chain, int size, |
ebfd146a IR |
5605 | gimple_stmt_iterator *gsi) |
5606 | { | |
ef4bddc2 | 5607 | machine_mode mode; |
6e1aa848 | 5608 | vec<tree> result_chain = vNULL; |
ebfd146a | 5609 | |
b8698a0f L |
5610 | /* DR_CHAIN contains input data-refs that are a part of the interleaving. |
5611 | RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted | |
ebfd146a | 5612 | vectors, that are ready for vector computation. */ |
9771b263 | 5613 | result_chain.create (size); |
f7917029 ES |
5614 | |
5615 | /* If reassociation width for vector type is 2 or greater target machine can | |
5616 | execute 2 or more vector instructions in parallel. Otherwise try to | |
5617 | get chain for loads group using vect_shift_permute_load_chain. */ | |
5618 | mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt))); | |
5619 | if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1 | |
5620 | || !vect_shift_permute_load_chain (dr_chain, size, stmt, | |
5621 | gsi, &result_chain)) | |
5622 | vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain); | |
0d0293ac | 5623 | vect_record_grouped_load_vectors (stmt, result_chain); |
9771b263 | 5624 | result_chain.release (); |
272c6793 RS |
5625 | } |
5626 | ||
0d0293ac | 5627 | /* RESULT_CHAIN contains the output of a group of grouped loads that were |
272c6793 RS |
5628 | generated as part of the vectorization of STMT. Assign the statement |
5629 | for each vector to the associated scalar statement. */ | |
5630 | ||
5631 | void | |
9771b263 | 5632 | vect_record_grouped_load_vectors (gimple stmt, vec<tree> result_chain) |
272c6793 | 5633 | { |
e14c1050 | 5634 | gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)); |
272c6793 RS |
5635 | gimple next_stmt, new_stmt; |
5636 | unsigned int i, gap_count; | |
5637 | tree tmp_data_ref; | |
ebfd146a | 5638 | |
b8698a0f L |
5639 | /* Put a permuted data-ref in the VECTORIZED_STMT field. |
5640 | Since we scan the chain starting from it's first node, their order | |
ebfd146a IR |
5641 | corresponds the order of data-refs in RESULT_CHAIN. */ |
5642 | next_stmt = first_stmt; | |
5643 | gap_count = 1; | |
9771b263 | 5644 | FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref) |
ebfd146a IR |
5645 | { |
5646 | if (!next_stmt) | |
5647 | break; | |
5648 | ||
ff802fa1 IR |
5649 | /* Skip the gaps. Loads created for the gaps will be removed by dead |
5650 | code elimination pass later. No need to check for the first stmt in | |
ebfd146a | 5651 | the group, since it always exists. |
e14c1050 IR |
5652 | GROUP_GAP is the number of steps in elements from the previous |
5653 | access (if there is no gap GROUP_GAP is 1). We skip loads that | |
ff802fa1 | 5654 | correspond to the gaps. */ |
b8698a0f | 5655 | if (next_stmt != first_stmt |
e14c1050 | 5656 | && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt))) |
ebfd146a IR |
5657 | { |
5658 | gap_count++; | |
5659 | continue; | |
5660 | } | |
5661 | ||
5662 | while (next_stmt) | |
5663 | { | |
5664 | new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref); | |
5665 | /* We assume that if VEC_STMT is not NULL, this is a case of multiple | |
5666 | copies, and we put the new vector statement in the first available | |
5667 | RELATED_STMT. */ | |
5668 | if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt))) | |
5669 | STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt; | |
5670 | else | |
5671 | { | |
e14c1050 | 5672 | if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt))) |
ebfd146a IR |
5673 | { |
5674 | gimple prev_stmt = | |
5675 | STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)); | |
5676 | gimple rel_stmt = | |
5677 | STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)); | |
5678 | while (rel_stmt) | |
5679 | { | |
5680 | prev_stmt = rel_stmt; | |
b8698a0f | 5681 | rel_stmt = |
ebfd146a IR |
5682 | STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt)); |
5683 | } | |
5684 | ||
b8698a0f | 5685 | STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) = |
ebfd146a IR |
5686 | new_stmt; |
5687 | } | |
5688 | } | |
5689 | ||
e14c1050 | 5690 | next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); |
ebfd146a IR |
5691 | gap_count = 1; |
5692 | /* If NEXT_STMT accesses the same DR as the previous statement, | |
5693 | put the same TMP_DATA_REF as its vectorized statement; otherwise | |
5694 | get the next data-ref from RESULT_CHAIN. */ | |
e14c1050 | 5695 | if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt))) |
ebfd146a IR |
5696 | break; |
5697 | } | |
5698 | } | |
ebfd146a IR |
5699 | } |
5700 | ||
5701 | /* Function vect_force_dr_alignment_p. | |
5702 | ||
5703 | Returns whether the alignment of a DECL can be forced to be aligned | |
5704 | on ALIGNMENT bit boundary. */ | |
5705 | ||
b8698a0f | 5706 | bool |
ebfd146a IR |
5707 | vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment) |
5708 | { | |
5709 | if (TREE_CODE (decl) != VAR_DECL) | |
5710 | return false; | |
5711 | ||
6192fa79 JH |
5712 | /* With -fno-toplevel-reorder we may have already output the constant. */ |
5713 | if (TREE_ASM_WRITTEN (decl)) | |
5714 | return false; | |
5715 | ||
5716 | /* Constant pool entries may be shared and not properly merged by LTO. */ | |
5717 | if (DECL_IN_CONSTANT_POOL (decl)) | |
5718 | return false; | |
ebfd146a | 5719 | |
6ad386b7 JH |
5720 | if (TREE_PUBLIC (decl) || DECL_EXTERNAL (decl)) |
5721 | { | |
5722 | symtab_node *snode; | |
5723 | ||
5724 | /* We cannot change alignment of symbols that may bind to symbols | |
5725 | in other translation unit that may contain a definition with lower | |
5726 | alignment. */ | |
5727 | if (!decl_binds_to_current_def_p (decl)) | |
5728 | return false; | |
5729 | ||
5730 | /* When compiling partition, be sure the symbol is not output by other | |
5731 | partition. */ | |
d52f5295 | 5732 | snode = symtab_node::get (decl); |
6ad386b7 JH |
5733 | if (flag_ltrans |
5734 | && (snode->in_other_partition | |
d52f5295 | 5735 | || snode->get_partitioning_class () == SYMBOL_DUPLICATE)) |
6ad386b7 JH |
5736 | return false; |
5737 | } | |
ebfd146a | 5738 | |
f89dcfd8 RG |
5739 | /* Do not override the alignment as specified by the ABI when the used |
5740 | attribute is set. */ | |
5741 | if (DECL_PRESERVE_P (decl)) | |
af4d0d91 RG |
5742 | return false; |
5743 | ||
79e02217 JJ |
5744 | /* Do not override explicit alignment set by the user when an explicit |
5745 | section name is also used. This is a common idiom used by many | |
5746 | software projects. */ | |
24d047a3 | 5747 | if (TREE_STATIC (decl) |
f961457f | 5748 | && DECL_SECTION_NAME (decl) != NULL |
d52f5295 | 5749 | && !symtab_node::get (decl)->implicit_section) |
79e02217 JJ |
5750 | return false; |
5751 | ||
6ad386b7 JH |
5752 | /* If symbol is an alias, we need to check that target is OK. */ |
5753 | if (TREE_STATIC (decl)) | |
5754 | { | |
d52f5295 | 5755 | tree target = symtab_node::get (decl)->ultimate_alias_target ()->decl; |
6ad386b7 JH |
5756 | if (target != decl) |
5757 | { | |
5758 | if (DECL_PRESERVE_P (target)) | |
5759 | return false; | |
5760 | decl = target; | |
5761 | } | |
5762 | } | |
5763 | ||
ebfd146a IR |
5764 | if (TREE_STATIC (decl)) |
5765 | return (alignment <= MAX_OFILE_ALIGNMENT); | |
5766 | else | |
5767 | return (alignment <= MAX_STACK_ALIGNMENT); | |
5768 | } | |
5769 | ||
ebfd146a | 5770 | |
720f5239 IR |
5771 | /* Return whether the data reference DR is supported with respect to its |
5772 | alignment. | |
5773 | If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even | |
5774 | it is aligned, i.e., check if it is possible to vectorize it with different | |
ebfd146a IR |
5775 | alignment. */ |
5776 | ||
5777 | enum dr_alignment_support | |
720f5239 IR |
5778 | vect_supportable_dr_alignment (struct data_reference *dr, |
5779 | bool check_aligned_accesses) | |
ebfd146a IR |
5780 | { |
5781 | gimple stmt = DR_STMT (dr); | |
5782 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
5783 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
ef4bddc2 | 5784 | machine_mode mode = TYPE_MODE (vectype); |
a70d6342 IR |
5785 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
5786 | struct loop *vect_loop = NULL; | |
5787 | bool nested_in_vect_loop = false; | |
ebfd146a | 5788 | |
720f5239 | 5789 | if (aligned_access_p (dr) && !check_aligned_accesses) |
ebfd146a IR |
5790 | return dr_aligned; |
5791 | ||
5ce9450f JJ |
5792 | /* For now assume all conditional loads/stores support unaligned |
5793 | access without any special code. */ | |
5794 | if (is_gimple_call (stmt) | |
5795 | && gimple_call_internal_p (stmt) | |
5796 | && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD | |
5797 | || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)) | |
5798 | return dr_unaligned_supported; | |
5799 | ||
69f11a13 IR |
5800 | if (loop_vinfo) |
5801 | { | |
5802 | vect_loop = LOOP_VINFO_LOOP (loop_vinfo); | |
5803 | nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt); | |
5804 | } | |
a70d6342 | 5805 | |
ebfd146a IR |
5806 | /* Possibly unaligned access. */ |
5807 | ||
5808 | /* We can choose between using the implicit realignment scheme (generating | |
5809 | a misaligned_move stmt) and the explicit realignment scheme (generating | |
ff802fa1 IR |
5810 | aligned loads with a REALIGN_LOAD). There are two variants to the |
5811 | explicit realignment scheme: optimized, and unoptimized. | |
ebfd146a IR |
5812 | We can optimize the realignment only if the step between consecutive |
5813 | vector loads is equal to the vector size. Since the vector memory | |
5814 | accesses advance in steps of VS (Vector Size) in the vectorized loop, it | |
5815 | is guaranteed that the misalignment amount remains the same throughout the | |
5816 | execution of the vectorized loop. Therefore, we can create the | |
5817 | "realignment token" (the permutation mask that is passed to REALIGN_LOAD) | |
5818 | at the loop preheader. | |
5819 | ||
5820 | However, in the case of outer-loop vectorization, when vectorizing a | |
5821 | memory access in the inner-loop nested within the LOOP that is now being | |
5822 | vectorized, while it is guaranteed that the misalignment of the | |
5823 | vectorized memory access will remain the same in different outer-loop | |
5824 | iterations, it is *not* guaranteed that is will remain the same throughout | |
5825 | the execution of the inner-loop. This is because the inner-loop advances | |
5826 | with the original scalar step (and not in steps of VS). If the inner-loop | |
5827 | step happens to be a multiple of VS, then the misalignment remains fixed | |
5828 | and we can use the optimized realignment scheme. For example: | |
5829 | ||
5830 | for (i=0; i<N; i++) | |
5831 | for (j=0; j<M; j++) | |
5832 | s += a[i+j]; | |
5833 | ||
5834 | When vectorizing the i-loop in the above example, the step between | |
5835 | consecutive vector loads is 1, and so the misalignment does not remain | |
5836 | fixed across the execution of the inner-loop, and the realignment cannot | |
5837 | be optimized (as illustrated in the following pseudo vectorized loop): | |
5838 | ||
5839 | for (i=0; i<N; i+=4) | |
5840 | for (j=0; j<M; j++){ | |
5841 | vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...} | |
5842 | // when j is {0,1,2,3,4,5,6,7,...} respectively. | |
5843 | // (assuming that we start from an aligned address). | |
5844 | } | |
5845 | ||
5846 | We therefore have to use the unoptimized realignment scheme: | |
5847 | ||
5848 | for (i=0; i<N; i+=4) | |
5849 | for (j=k; j<M; j+=4) | |
5850 | vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming | |
5851 | // that the misalignment of the initial address is | |
5852 | // 0). | |
5853 | ||
5854 | The loop can then be vectorized as follows: | |
5855 | ||
5856 | for (k=0; k<4; k++){ | |
5857 | rt = get_realignment_token (&vp[k]); | |
5858 | for (i=0; i<N; i+=4){ | |
5859 | v1 = vp[i+k]; | |
5860 | for (j=k; j<M; j+=4){ | |
5861 | v2 = vp[i+j+VS-1]; | |
5862 | va = REALIGN_LOAD <v1,v2,rt>; | |
5863 | vs += va; | |
5864 | v1 = v2; | |
5865 | } | |
5866 | } | |
5867 | } */ | |
5868 | ||
5869 | if (DR_IS_READ (dr)) | |
5870 | { | |
0601d0cf RE |
5871 | bool is_packed = false; |
5872 | tree type = (TREE_TYPE (DR_REF (dr))); | |
5873 | ||
947131ba | 5874 | if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing |
ebfd146a IR |
5875 | && (!targetm.vectorize.builtin_mask_for_load |
5876 | || targetm.vectorize.builtin_mask_for_load ())) | |
5877 | { | |
5878 | tree vectype = STMT_VINFO_VECTYPE (stmt_info); | |
69f11a13 IR |
5879 | if ((nested_in_vect_loop |
5880 | && (TREE_INT_CST_LOW (DR_STEP (dr)) | |
5881 | != GET_MODE_SIZE (TYPE_MODE (vectype)))) | |
5882 | || !loop_vinfo) | |
ebfd146a IR |
5883 | return dr_explicit_realign; |
5884 | else | |
5885 | return dr_explicit_realign_optimized; | |
5886 | } | |
0601d0cf | 5887 | if (!known_alignment_for_access_p (dr)) |
4c9bcf89 | 5888 | is_packed = not_size_aligned (DR_REF (dr)); |
b8698a0f | 5889 | |
afb119be RB |
5890 | if ((TYPE_USER_ALIGN (type) && !is_packed) |
5891 | || targetm.vectorize. | |
5892 | support_vector_misalignment (mode, type, | |
5893 | DR_MISALIGNMENT (dr), is_packed)) | |
ebfd146a IR |
5894 | /* Can't software pipeline the loads, but can at least do them. */ |
5895 | return dr_unaligned_supported; | |
5896 | } | |
0601d0cf RE |
5897 | else |
5898 | { | |
5899 | bool is_packed = false; | |
5900 | tree type = (TREE_TYPE (DR_REF (dr))); | |
ebfd146a | 5901 | |
0601d0cf | 5902 | if (!known_alignment_for_access_p (dr)) |
4c9bcf89 | 5903 | is_packed = not_size_aligned (DR_REF (dr)); |
b8698a0f | 5904 | |
afb119be RB |
5905 | if ((TYPE_USER_ALIGN (type) && !is_packed) |
5906 | || targetm.vectorize. | |
5907 | support_vector_misalignment (mode, type, | |
5908 | DR_MISALIGNMENT (dr), is_packed)) | |
0601d0cf RE |
5909 | return dr_unaligned_supported; |
5910 | } | |
b8698a0f | 5911 | |
ebfd146a IR |
5912 | /* Unsupported. */ |
5913 | return dr_unaligned_unsupported; | |
5914 | } |