]>
Commit | Line | Data |
---|---|---|
fb85abff | 1 | /* Vectorizer |
fbd26352 | 2 | Copyright (C) 2003-2019 Free Software Foundation, Inc. |
c91e8223 | 3 | Contributed by Dorit Naishlos <dorit@il.ibm.com> |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it under | |
8 | the terms of the GNU General Public License as published by the Free | |
8c4c00c1 | 9 | Software Foundation; either version 3, or (at your option) any later |
c91e8223 | 10 | version. |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
8c4c00c1 | 18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ | |
c91e8223 | 20 | |
21 | #ifndef GCC_TREE_VECTORIZER_H | |
22 | #define GCC_TREE_VECTORIZER_H | |
23 | ||
a477acc5 | 24 | typedef struct _stmt_vec_info *stmt_vec_info; |
1cb23a78 | 25 | |
fb85abff | 26 | #include "tree-data-ref.h" |
4f372c2c | 27 | #include "tree-hash-traits.h" |
f4ac3f3e | 28 | #include "target.h" |
fb85abff | 29 | |
c91e8223 | 30 | /* Used for naming of new temporaries. */ |
31 | enum vect_var_kind { | |
32 | vect_simple_var, | |
ea8f3370 | 33 | vect_pointer_var, |
dab48979 | 34 | vect_scalar_var, |
35 | vect_mask_var | |
c91e8223 | 36 | }; |
37 | ||
4a61a337 | 38 | /* Defines type of operation. */ |
c91e8223 | 39 | enum operation_type { |
40 | unary_op = 1, | |
4a61a337 | 41 | binary_op, |
42 | ternary_op | |
c91e8223 | 43 | }; |
44 | ||
1a9b4618 | 45 | /* Define type of available alignment support. */ |
46 | enum dr_alignment_support { | |
47 | dr_unaligned_unsupported, | |
48 | dr_unaligned_supported, | |
b0eb8c66 | 49 | dr_explicit_realign, |
50 | dr_explicit_realign_optimized, | |
1a9b4618 | 51 | dr_aligned |
52 | }; | |
53 | ||
ce10738f | 54 | /* Define type of def-use cross-iteration cycle. */ |
e12906b9 | 55 | enum vect_def_type { |
bc620c5c | 56 | vect_uninitialized_def = 0, |
f083cd24 | 57 | vect_constant_def = 1, |
58 | vect_external_def, | |
59 | vect_internal_def, | |
e12906b9 | 60 | vect_induction_def, |
61 | vect_reduction_def, | |
7aa0d350 | 62 | vect_double_reduction_def, |
ade2ac53 | 63 | vect_nested_cycle, |
e12906b9 | 64 | vect_unknown_def_type |
65 | }; | |
66 | ||
d09d8733 | 67 | /* Define type of reduction. */ |
68 | enum vect_reduction_type { | |
69 | TREE_CODE_REDUCTION, | |
b4552064 | 70 | COND_REDUCTION, |
56fb8e9d | 71 | INTEGER_INDUC_COND_REDUCTION, |
3bf95150 | 72 | CONST_COND_REDUCTION, |
73 | ||
74 | /* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop | |
75 | to implement: | |
76 | ||
77 | for (int i = 0; i < VF; ++i) | |
78 | res = cond[i] ? val[i] : res; */ | |
d77809a4 | 79 | EXTRACT_LAST_REDUCTION, |
80 | ||
81 | /* Use a folding reduction within the loop to implement: | |
82 | ||
83 | for (int i = 0; i < VF; ++i) | |
84 | res = res OP val[i]; | |
85 | ||
86 | (with no reassocation). */ | |
87 | FOLD_LEFT_REDUCTION | |
d09d8733 | 88 | }; |
89 | ||
07be02da | 90 | #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \ |
91 | || ((D) == vect_double_reduction_def) \ | |
92 | || ((D) == vect_nested_cycle)) | |
93 | ||
4db2b577 | 94 | /* Structure to encapsulate information about a group of like |
95 | instructions to be presented to the target cost model. */ | |
6dc50383 | 96 | struct stmt_info_for_cost { |
4db2b577 | 97 | int count; |
98 | enum vect_cost_for_stmt kind; | |
c863e35b | 99 | enum vect_cost_model_location where; |
1aeaa139 | 100 | stmt_vec_info stmt_info; |
4db2b577 | 101 | int misalign; |
6dc50383 | 102 | }; |
4db2b577 | 103 | |
f1f41a6c | 104 | typedef vec<stmt_info_for_cost> stmt_vector_for_cost; |
4db2b577 | 105 | |
4f372c2c | 106 | /* Maps base addresses to an innermost_loop_behavior that gives the maximum |
107 | known alignment for that base. */ | |
108 | typedef hash_map<tree_operand_hash, | |
109 | innermost_loop_behavior *> vec_base_alignments; | |
110 | ||
c6895939 | 111 | /************************************************************************ |
112 | SLP | |
113 | ************************************************************************/ | |
40bcc7c2 | 114 | typedef struct _slp_tree *slp_tree; |
c6895939 | 115 | |
b0f64919 | 116 | /* A computation tree of an SLP instance. Each node corresponds to a group of |
c6895939 | 117 | stmts to be packed in a SIMD stmt. */ |
40bcc7c2 | 118 | struct _slp_tree { |
b0f64919 | 119 | /* Nodes that contain def-stmts of this node statements operands. */ |
40bcc7c2 | 120 | vec<slp_tree> children; |
c6895939 | 121 | /* A group of scalar stmts to be vectorized together. */ |
06bb64b8 | 122 | vec<stmt_vec_info> stmts; |
678e3d6e | 123 | /* Load permutation relative to the stores, NULL if there is no |
124 | permutation. */ | |
125 | vec<unsigned> load_permutation; | |
c6895939 | 126 | /* Vectorized stmt/s. */ |
dc1fb456 | 127 | vec<stmt_vec_info> vec_stmts; |
48e1416a | 128 | /* Number of vector stmts that are created to replace the group of scalar |
129 | stmts. It is calculated during the transformation phase as the number of | |
130 | scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF | |
c6895939 | 131 | divided by vector size. */ |
132 | unsigned int vec_stmts_size; | |
f1c1105c | 133 | /* Reference count in the SLP graph. */ |
134 | unsigned int refcnt; | |
66e30248 | 135 | /* Whether the scalar computations use two different operators. */ |
136 | bool two_operators; | |
6d37c111 | 137 | /* The DEF type of this node. */ |
138 | enum vect_def_type def_type; | |
40bcc7c2 | 139 | }; |
c6895939 | 140 | |
141 | ||
142 | /* SLP instance is a sequence of stmts in a loop that can be packed into | |
143 | SIMD stmts. */ | |
144 | typedef struct _slp_instance { | |
145 | /* The root of SLP tree. */ | |
146 | slp_tree root; | |
147 | ||
148 | /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */ | |
149 | unsigned int group_size; | |
150 | ||
151 | /* The unrolling factor required to vectorized this SLP instance. */ | |
d75596cd | 152 | poly_uint64 unrolling_factor; |
c6895939 | 153 | |
a0515226 | 154 | /* The group of nodes that contain loads of this SLP instance. */ |
f1f41a6c | 155 | vec<slp_tree> loads; |
6154acba | 156 | |
157 | /* The SLP node containing the reduction PHIs. */ | |
158 | slp_tree reduc_phis; | |
c6895939 | 159 | } *slp_instance; |
160 | ||
c6895939 | 161 | |
162 | /* Access Functions. */ | |
163 | #define SLP_INSTANCE_TREE(S) (S)->root | |
164 | #define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size | |
165 | #define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor | |
a0515226 | 166 | #define SLP_INSTANCE_LOADS(S) (S)->loads |
c6895939 | 167 | |
b0f64919 | 168 | #define SLP_TREE_CHILDREN(S) (S)->children |
c6895939 | 169 | #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts |
170 | #define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts | |
171 | #define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size | |
678e3d6e | 172 | #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation |
66e30248 | 173 | #define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators |
6d37c111 | 174 | #define SLP_TREE_DEF_TYPE(S) (S)->def_type |
b0f64919 | 175 | |
b0f64919 | 176 | |
0822b158 | 177 | |
f68a7726 | 178 | /* Describes two objects whose addresses must be unequal for the vectorized |
179 | loop to be valid. */ | |
180 | typedef std::pair<tree, tree> vec_object_pair; | |
181 | ||
e85b4a5e | 182 | /* Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE. |
183 | UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */ | |
184 | struct vec_lower_bound { | |
185 | vec_lower_bound () {} | |
186 | vec_lower_bound (tree e, bool u, poly_uint64 m) | |
187 | : expr (e), unsigned_p (u), min_value (m) {} | |
188 | ||
189 | tree expr; | |
190 | bool unsigned_p; | |
191 | poly_uint64 min_value; | |
192 | }; | |
193 | ||
a99aba41 | 194 | /* Vectorizer state shared between different analyses like vector sizes |
195 | of the same CFG region. */ | |
196 | struct vec_info_shared { | |
197 | vec_info_shared(); | |
198 | ~vec_info_shared(); | |
199 | ||
200 | void save_datarefs(); | |
201 | void check_datarefs(); | |
202 | ||
203 | /* All data references. Freed by free_data_refs, so not an auto_vec. */ | |
204 | vec<data_reference_p> datarefs; | |
205 | vec<data_reference> datarefs_copy; | |
206 | ||
207 | /* The loop nest in which the data dependences are computed. */ | |
208 | auto_vec<loop_p> loop_nest; | |
209 | ||
210 | /* All data dependences. Freed by free_dependence_relations, so not | |
211 | an auto_vec. */ | |
212 | vec<ddr_p> ddrs; | |
213 | }; | |
214 | ||
e2c5c678 | 215 | /* Vectorizer state common between loop and basic-block vectorization. */ |
216 | struct vec_info { | |
e15e8a2a | 217 | enum vec_kind { bb, loop }; |
218 | ||
a99aba41 | 219 | vec_info (vec_kind, void *, vec_info_shared *); |
e15e8a2a | 220 | ~vec_info (); |
221 | ||
04b2391d | 222 | stmt_vec_info add_stmt (gimple *); |
03c0d666 | 223 | stmt_vec_info lookup_stmt (gimple *); |
9cfd4e76 | 224 | stmt_vec_info lookup_def (tree); |
aaac0b10 | 225 | stmt_vec_info lookup_single_use (tree); |
db72d3bf | 226 | struct dr_vec_info *lookup_dr (data_reference *); |
5f02ee72 | 227 | void move_dr (stmt_vec_info, stmt_vec_info); |
f525c1af | 228 | void remove_stmt (stmt_vec_info); |
a5071338 | 229 | void replace_stmt (gimple_stmt_iterator *, stmt_vec_info, gimple *); |
04b2391d | 230 | |
e15e8a2a | 231 | /* The type of vectorization. */ |
232 | vec_kind kind; | |
e2c5c678 | 233 | |
a99aba41 | 234 | /* Shared vectorizer state. */ |
235 | vec_info_shared *shared; | |
236 | ||
d8ef42d0 | 237 | /* The mapping of GIMPLE UID to stmt_vec_info. */ |
1cb23a78 | 238 | vec<stmt_vec_info> stmt_vec_infos; |
d8ef42d0 | 239 | |
e2c5c678 | 240 | /* All SLP instances. */ |
e15e8a2a | 241 | auto_vec<slp_instance> slp_instances; |
e2c5c678 | 242 | |
4f372c2c | 243 | /* Maps base addresses to an innermost_loop_behavior that gives the maximum |
244 | known alignment for that base. */ | |
245 | vec_base_alignments base_alignments; | |
246 | ||
e2c5c678 | 247 | /* All interleaving chains of stores, represented by the first |
248 | stmt in the chain. */ | |
14dca1d8 | 249 | auto_vec<stmt_vec_info> grouped_stores; |
e2c5c678 | 250 | |
251 | /* Cost data used by the target cost model. */ | |
252 | void *target_cost_data; | |
c626a338 | 253 | |
254 | private: | |
255 | stmt_vec_info new_stmt_vec_info (gimple *stmt); | |
256 | void set_vinfo_for_stmt (gimple *, stmt_vec_info); | |
257 | void free_stmt_vec_infos (); | |
258 | void free_stmt_vec_info (stmt_vec_info); | |
e2c5c678 | 259 | }; |
260 | ||
261 | struct _loop_vec_info; | |
262 | struct _bb_vec_info; | |
263 | ||
264 | template<> | |
265 | template<> | |
266 | inline bool | |
267 | is_a_helper <_loop_vec_info *>::test (vec_info *i) | |
268 | { | |
269 | return i->kind == vec_info::loop; | |
270 | } | |
271 | ||
272 | template<> | |
273 | template<> | |
274 | inline bool | |
275 | is_a_helper <_bb_vec_info *>::test (vec_info *i) | |
276 | { | |
277 | return i->kind == vec_info::bb; | |
278 | } | |
279 | ||
3e871d4d | 280 | |
60b29a7e | 281 | /* In general, we can divide the vector statements in a vectorized loop |
282 | into related groups ("rgroups") and say that for each rgroup there is | |
283 | some nS such that the rgroup operates on nS values from one scalar | |
284 | iteration followed by nS values from the next. That is, if VF is the | |
285 | vectorization factor of the loop, the rgroup operates on a sequence: | |
286 | ||
287 | (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS) | |
288 | ||
289 | where (i,j) represents a scalar value with index j in a scalar | |
290 | iteration with index i. | |
291 | ||
292 | [ We use the term "rgroup" to emphasise that this grouping isn't | |
293 | necessarily the same as the grouping of statements used elsewhere. | |
294 | For example, if we implement a group of scalar loads using gather | |
295 | loads, we'll use a separate gather load for each scalar load, and | |
296 | thus each gather load will belong to its own rgroup. ] | |
297 | ||
298 | In general this sequence will occupy nV vectors concatenated | |
299 | together. If these vectors have nL lanes each, the total number | |
300 | of scalar values N is given by: | |
301 | ||
302 | N = nS * VF = nV * nL | |
303 | ||
304 | None of nS, VF, nV and nL are required to be a power of 2. nS and nV | |
305 | are compile-time constants but VF and nL can be variable (if the target | |
306 | supports variable-length vectors). | |
307 | ||
308 | In classical vectorization, each iteration of the vector loop would | |
309 | handle exactly VF iterations of the original scalar loop. However, | |
310 | in a fully-masked loop, a particular iteration of the vector loop | |
311 | might handle fewer than VF iterations of the scalar loop. The vector | |
312 | lanes that correspond to iterations of the scalar loop are said to be | |
313 | "active" and the other lanes are said to be "inactive". | |
314 | ||
315 | In a fully-masked loop, many rgroups need to be masked to ensure that | |
316 | they have no effect for the inactive lanes. Each such rgroup needs a | |
317 | sequence of booleans in the same order as above, but with each (i,j) | |
318 | replaced by a boolean that indicates whether iteration i is active. | |
319 | This sequence occupies nV vector masks that again have nL lanes each. | |
320 | Thus the mask sequence as a whole consists of VF independent booleans | |
321 | that are each repeated nS times. | |
322 | ||
323 | We make the simplifying assumption that if a sequence of nV masks is | |
324 | suitable for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by | |
325 | VIEW_CONVERTing it. This holds for all current targets that support | |
326 | fully-masked loops. For example, suppose the scalar loop is: | |
327 | ||
328 | float *f; | |
329 | double *d; | |
330 | for (int i = 0; i < n; ++i) | |
331 | { | |
332 | f[i * 2 + 0] += 1.0f; | |
333 | f[i * 2 + 1] += 2.0f; | |
334 | d[i] += 3.0; | |
335 | } | |
336 | ||
337 | and suppose that vectors have 256 bits. The vectorized f accesses | |
338 | will belong to one rgroup and the vectorized d access to another: | |
339 | ||
340 | f rgroup: nS = 2, nV = 1, nL = 8 | |
341 | d rgroup: nS = 1, nV = 1, nL = 4 | |
342 | VF = 4 | |
343 | ||
344 | [ In this simple example the rgroups do correspond to the normal | |
345 | SLP grouping scheme. ] | |
346 | ||
347 | If only the first three lanes are active, the masks we need are: | |
348 | ||
349 | f rgroup: 1 1 | 1 1 | 1 1 | 0 0 | |
350 | d rgroup: 1 | 1 | 1 | 0 | |
351 | ||
352 | Here we can use a mask calculated for f's rgroup for d's, but not | |
353 | vice versa. | |
354 | ||
355 | Thus for each value of nV, it is enough to provide nV masks, with the | |
356 | mask being calculated based on the highest nL (or, equivalently, based | |
357 | on the highest nS) required by any rgroup with that nV. We therefore | |
358 | represent the entire collection of masks as a two-level table, with the | |
359 | first level being indexed by nV - 1 (since nV == 0 doesn't exist) and | |
360 | the second being indexed by the mask index 0 <= i < nV. */ | |
361 | ||
362 | /* The masks needed by rgroups with nV vectors, according to the | |
363 | description above. */ | |
364 | struct rgroup_masks { | |
365 | /* The largest nS for all rgroups that use these masks. */ | |
366 | unsigned int max_nscalars_per_iter; | |
367 | ||
368 | /* The type of mask to use, based on the highest nS recorded above. */ | |
369 | tree mask_type; | |
370 | ||
371 | /* A vector of nV masks, in iteration order. */ | |
372 | vec<tree> masks; | |
373 | }; | |
374 | ||
375 | typedef auto_vec<rgroup_masks> vec_loop_masks; | |
376 | ||
4e58562d | 377 | /*-----------------------------------------------------------------*/ |
378 | /* Info on vectorized loops. */ | |
379 | /*-----------------------------------------------------------------*/ | |
e2c5c678 | 380 | typedef struct _loop_vec_info : public vec_info { |
a99aba41 | 381 | _loop_vec_info (struct loop *, vec_info_shared *); |
e15e8a2a | 382 | ~_loop_vec_info (); |
4e58562d | 383 | |
384 | /* The loop to which this info struct refers to. */ | |
385 | struct loop *loop; | |
386 | ||
387 | /* The loop basic blocks. */ | |
388 | basic_block *bbs; | |
389 | ||
796f6cba | 390 | /* Number of latch executions. */ |
391 | tree num_itersm1; | |
4e58562d | 392 | /* Number of iterations. */ |
393 | tree num_iters; | |
796f6cba | 394 | /* Number of iterations of the original loop. */ |
be53c6d4 | 395 | tree num_iters_unchanged; |
d5e80d93 | 396 | /* Condition under which this loop is analyzed and versioned. */ |
397 | tree num_iters_assumptions; | |
4e58562d | 398 | |
f92474f8 | 399 | /* Threshold of number of iterations below which vectorization will not be |
004a94a5 | 400 | performed. It is calculated from MIN_PROFITABLE_ITERS and |
401 | PARAM_MIN_VECT_LOOP_BOUND. */ | |
402 | unsigned int th; | |
403 | ||
7456a7ea | 404 | /* When applying loop versioning, the vector form should only be used |
405 | if the number of scalar iterations is >= this value, on top of all | |
406 | the other requirements. Ignored when loop versioning is not being | |
407 | used. */ | |
408 | poly_uint64 versioning_threshold; | |
409 | ||
4e58562d | 410 | /* Unrolling factor */ |
d75596cd | 411 | poly_uint64 vectorization_factor; |
4e58562d | 412 | |
4a85c0b1 | 413 | /* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR |
414 | if there is no particular limit. */ | |
415 | unsigned HOST_WIDE_INT max_vectorization_factor; | |
416 | ||
60b29a7e | 417 | /* The masks that a fully-masked loop should use to avoid operating |
418 | on inactive scalars. */ | |
419 | vec_loop_masks masks; | |
420 | ||
6753a4bf | 421 | /* If we are using a loop mask to align memory addresses, this variable |
422 | contains the number of vector elements that we should skip in the | |
423 | first iteration of the vector loop (i.e. the number of leading | |
424 | elements that should be false in the first mask). */ | |
425 | tree mask_skip_niters; | |
426 | ||
60b29a7e | 427 | /* Type of the variables to use in the WHILE_ULT call for fully-masked |
428 | loops. */ | |
429 | tree mask_compare_type; | |
430 | ||
1d86b8dc | 431 | /* For #pragma omp simd if (x) loops the x expression. If constant 0, |
432 | the loop should not be vectorized, if constant non-zero, simd_if_cond | |
433 | shouldn't be set and loop vectorized normally, if SSA_NAME, the loop | |
434 | should be versioned on that condition, using scalar loop if the condition | |
435 | is false and vectorized loop otherwise. */ | |
436 | tree simd_if_cond; | |
437 | ||
ef871d99 | 438 | /* Type of the IV to use in the WHILE_ULT call for fully-masked |
439 | loops. */ | |
440 | tree iv_type; | |
441 | ||
4e58562d | 442 | /* Unknown DRs according to which loop was peeled. */ |
ec5bf0fb | 443 | struct dr_vec_info *unaligned_dr; |
4e58562d | 444 | |
39b8f742 | 445 | /* peeling_for_alignment indicates whether peeling for alignment will take |
446 | place, and what the peeling factor should be: | |
447 | peeling_for_alignment = X means: | |
448 | If X=0: Peeling for alignment will not be applied. | |
449 | If X>0: Peel first X iterations. | |
450 | If X=-1: Generate a runtime test to calculate the number of iterations | |
451 | to be peeled, using the dataref recorded in the field | |
452 | unaligned_dr. */ | |
453 | int peeling_for_alignment; | |
4e58562d | 454 | |
25e3c2e8 | 455 | /* The mask used to check the alignment of pointers or arrays. */ |
456 | int ptr_mask; | |
457 | ||
45b13dc3 | 458 | /* Data Dependence Relations defining address ranges that are candidates |
459 | for a run-time aliasing check. */ | |
e15e8a2a | 460 | auto_vec<ddr_p> may_alias_ddrs; |
45b13dc3 | 461 | |
8a7b0f48 | 462 | /* Data Dependence Relations defining address ranges together with segment |
463 | lengths from which the run-time aliasing check is built. */ | |
e15e8a2a | 464 | auto_vec<dr_with_seg_len_pair_t> comp_alias_ddrs; |
8a7b0f48 | 465 | |
f68a7726 | 466 | /* Check that the addresses of each pair of objects is unequal. */ |
e15e8a2a | 467 | auto_vec<vec_object_pair> check_unequal_addrs; |
f68a7726 | 468 | |
e85b4a5e | 469 | /* List of values that are required to be nonzero. This is used to check |
470 | whether things like "x[i * n] += 1;" are safe and eventually gets added | |
471 | to the checks for lower bounds below. */ | |
472 | auto_vec<tree> check_nonzero; | |
473 | ||
474 | /* List of values that need to be checked for a minimum value. */ | |
475 | auto_vec<vec_lower_bound> lower_bounds; | |
476 | ||
25e3c2e8 | 477 | /* Statements in the loop that have data references that are candidates for a |
478 | runtime (loop versioning) misalignment check. */ | |
ab98e625 | 479 | auto_vec<stmt_vec_info> may_misalign_stmts; |
25e3c2e8 | 480 | |
eefa05c8 | 481 | /* Reduction cycles detected in the loop. Used in loop-aware SLP. */ |
f4649a92 | 482 | auto_vec<stmt_vec_info> reductions; |
0822b158 | 483 | |
39a5d6b1 | 484 | /* All reduction chains in the loop, represented by the first |
485 | stmt in the chain. */ | |
14dca1d8 | 486 | auto_vec<stmt_vec_info> reduction_chains; |
39a5d6b1 | 487 | |
2a9a3444 | 488 | /* Cost vector for a single scalar iteration. */ |
e15e8a2a | 489 | auto_vec<stmt_info_for_cost> scalar_cost_vec; |
2a9a3444 | 490 | |
f404501a | 491 | /* Map of IV base/step expressions to inserted name in the preheader. */ |
492 | hash_map<tree_operand_hash, tree> *ivexpr_map; | |
493 | ||
487798e2 | 494 | /* The unrolling factor needed to SLP the loop. In case of that pure SLP is |
495 | applied to the loop, i.e., no unrolling is needed, this is 1. */ | |
d75596cd | 496 | poly_uint64 slp_unrolling_factor; |
487798e2 | 497 | |
2a9a3444 | 498 | /* Cost of a single scalar iteration. */ |
499 | int single_scalar_iteration_cost; | |
500 | ||
487798e2 | 501 | /* Is the loop vectorizable? */ |
502 | bool vectorizable; | |
503 | ||
60b29a7e | 504 | /* Records whether we still have the option of using a fully-masked loop. */ |
505 | bool can_fully_mask_p; | |
506 | ||
507 | /* True if have decided to use a fully-masked loop. */ | |
508 | bool fully_masked_p; | |
509 | ||
ee612634 | 510 | /* When we have grouped data accesses with gaps, we may introduce invalid |
a4ee7fac | 511 | memory accesses. We peel the last iteration of the loop to prevent |
512 | this. */ | |
513 | bool peeling_for_gaps; | |
514 | ||
36f39b2e | 515 | /* When the number of iterations is not a multiple of the vector size |
516 | we need to peel off iterations at the end to form an epilogue loop. */ | |
517 | bool peeling_for_niter; | |
518 | ||
ba69439f | 519 | /* Reductions are canonicalized so that the last operand is the reduction |
520 | operand. If this places a constant into RHS1, this decanonicalizes | |
521 | GIMPLE for other phases, so we must track when this has occurred and | |
522 | fix it up. */ | |
523 | bool operands_swapped; | |
524 | ||
c7a8722c | 525 | /* True if there are no loop carried data dependencies in the loop. |
526 | If loop->safelen <= 1, then this is always true, either the loop | |
527 | didn't have any loop carried data dependencies, or the loop is being | |
528 | vectorized guarded with some runtime alias checks, or couldn't | |
529 | be vectorized at all, but then this field shouldn't be used. | |
530 | For loop->safelen >= 2, the user has asserted that there are no | |
531 | backward dependencies, but there still could be loop carried forward | |
532 | dependencies in such loops. This flag will be false if normal | |
533 | vectorizer data dependency analysis would fail or require versioning | |
534 | for alias, but because of loop->safelen >= 2 it has been vectorized | |
535 | even without versioning for alias. E.g. in: | |
536 | #pragma omp simd | |
537 | for (int i = 0; i < m; i++) | |
538 | a[i] = a[i + k] * c; | |
539 | (or #pragma simd or #pragma ivdep) we can vectorize this and it will | |
540 | DTRT even for k > 0 && k < m, but without safelen we would not | |
541 | vectorize this, so this field would be false. */ | |
542 | bool no_data_dependencies; | |
543 | ||
487798e2 | 544 | /* Mark loops having masked stores. */ |
545 | bool has_mask_store; | |
546 | ||
c71d3c24 | 547 | /* If if-conversion versioned this loop before conversion, this is the |
548 | loop version without if-conversion. */ | |
549 | struct loop *scalar_loop; | |
550 | ||
5b631e09 | 551 | /* For loops being epilogues of already vectorized loops |
552 | this points to the original vectorized loop. Otherwise NULL. */ | |
553 | _loop_vec_info *orig_loop_info; | |
554 | ||
4e58562d | 555 | } *loop_vec_info; |
556 | ||
25e3c2e8 | 557 | /* Access Functions. */ |
10095225 | 558 | #define LOOP_VINFO_LOOP(L) (L)->loop |
559 | #define LOOP_VINFO_BBS(L) (L)->bbs | |
796f6cba | 560 | #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1 |
10095225 | 561 | #define LOOP_VINFO_NITERS(L) (L)->num_iters |
796f6cba | 562 | /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after |
563 | prologue peeling retain total unchanged scalar loop iterations for | |
564 | cost model. */ | |
10095225 | 565 | #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged |
d5e80d93 | 566 | #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions |
004a94a5 | 567 | #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th |
7456a7ea | 568 | #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold |
10095225 | 569 | #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable |
60b29a7e | 570 | #define LOOP_VINFO_CAN_FULLY_MASK_P(L) (L)->can_fully_mask_p |
571 | #define LOOP_VINFO_FULLY_MASKED_P(L) (L)->fully_masked_p | |
10095225 | 572 | #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor |
4a85c0b1 | 573 | #define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor |
60b29a7e | 574 | #define LOOP_VINFO_MASKS(L) (L)->masks |
6753a4bf | 575 | #define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters |
60b29a7e | 576 | #define LOOP_VINFO_MASK_COMPARE_TYPE(L) (L)->mask_compare_type |
ef871d99 | 577 | #define LOOP_VINFO_MASK_IV_TYPE(L) (L)->iv_type |
10095225 | 578 | #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask |
a99aba41 | 579 | #define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest |
580 | #define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs | |
581 | #define LOOP_VINFO_DDRS(L) (L)->shared->ddrs | |
10095225 | 582 | #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters)) |
313a5120 | 583 | #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment |
10095225 | 584 | #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr |
585 | #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts | |
10095225 | 586 | #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs |
8a7b0f48 | 587 | #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs |
f68a7726 | 588 | #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs |
e85b4a5e | 589 | #define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero |
590 | #define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds | |
ee612634 | 591 | #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores |
10095225 | 592 | #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances |
c6895939 | 593 | #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor |
eefa05c8 | 594 | #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions |
39a5d6b1 | 595 | #define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains |
4db2b577 | 596 | #define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data |
a4ee7fac | 597 | #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps |
ba69439f | 598 | #define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped |
313a5120 | 599 | #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter |
c7a8722c | 600 | #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies |
c71d3c24 | 601 | #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop |
cfd9ca84 | 602 | #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store |
2a9a3444 | 603 | #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec |
604 | #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost | |
5b631e09 | 605 | #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info |
1d86b8dc | 606 | #define LOOP_VINFO_SIMD_IF_COND(L) (L)->simd_if_cond |
4e58562d | 607 | |
d5e80d93 | 608 | #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \ |
72ffab3c | 609 | ((L)->may_misalign_stmts.length () > 0) |
d5e80d93 | 610 | #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \ |
f68a7726 | 611 | ((L)->comp_alias_ddrs.length () > 0 \ |
e85b4a5e | 612 | || (L)->check_unequal_addrs.length () > 0 \ |
613 | || (L)->lower_bounds.length () > 0) | |
d5e80d93 | 614 | #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \ |
615 | (LOOP_VINFO_NITERS_ASSUMPTIONS (L)) | |
1d86b8dc | 616 | #define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L) \ |
617 | (LOOP_VINFO_SIMD_IF_COND (L)) | |
d5e80d93 | 618 | #define LOOP_REQUIRES_VERSIONING(L) \ |
619 | (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \ | |
620 | || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \ | |
1d86b8dc | 621 | || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L) \ |
622 | || LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (L)) | |
33bbe730 | 623 | |
10095225 | 624 | #define LOOP_VINFO_NITERS_KNOWN_P(L) \ |
313a5120 | 625 | (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0) |
4e58562d | 626 | |
5b631e09 | 627 | #define LOOP_VINFO_EPILOGUE_P(L) \ |
628 | (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL) | |
629 | ||
4a85c0b1 | 630 | #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \ |
631 | (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L))) | |
5b631e09 | 632 | |
ed9370cc | 633 | /* Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL |
634 | value signifies success, and a NULL value signifies failure, supporting | |
635 | propagating an opt_problem * describing the failure back up the call | |
636 | stack. */ | |
637 | typedef opt_pointer_wrapper <loop_vec_info> opt_loop_vec_info; | |
638 | ||
221e9a92 | 639 | static inline loop_vec_info |
640 | loop_vec_info_for_loop (struct loop *loop) | |
641 | { | |
642 | return (loop_vec_info) loop->aux; | |
643 | } | |
644 | ||
e2c5c678 | 645 | typedef struct _bb_vec_info : public vec_info |
646 | { | |
a99aba41 | 647 | _bb_vec_info (gimple_stmt_iterator, gimple_stmt_iterator, vec_info_shared *); |
e15e8a2a | 648 | ~_bb_vec_info (); |
649 | ||
37545e54 | 650 | basic_block bb; |
4c7587f5 | 651 | gimple_stmt_iterator region_begin; |
652 | gimple_stmt_iterator region_end; | |
37545e54 | 653 | } *bb_vec_info; |
654 | ||
4db2b577 | 655 | #define BB_VINFO_BB(B) (B)->bb |
656 | #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores | |
657 | #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances | |
a99aba41 | 658 | #define BB_VINFO_DATAREFS(B) (B)->shared->datarefs |
659 | #define BB_VINFO_DDRS(B) (B)->shared->ddrs | |
4db2b577 | 660 | #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data |
37545e54 | 661 | |
662 | static inline bb_vec_info | |
663 | vec_info_for_bb (basic_block bb) | |
664 | { | |
665 | return (bb_vec_info) bb->aux; | |
666 | } | |
667 | ||
c91e8223 | 668 | /*-----------------------------------------------------------------*/ |
669 | /* Info on vectorized defs. */ | |
670 | /*-----------------------------------------------------------------*/ | |
671 | enum stmt_vec_info_type { | |
672 | undef_vec_info_type = 0, | |
673 | load_vec_info_type, | |
674 | store_vec_info_type, | |
09e31a48 | 675 | shift_vec_info_type, |
c91e8223 | 676 | op_vec_info_type, |
22c2f6bd | 677 | call_vec_info_type, |
d09768a4 | 678 | call_simd_clone_vec_info_type, |
e9705e7f | 679 | assignment_vec_info_type, |
ea8f3370 | 680 | condition_vec_info_type, |
dab48979 | 681 | comparison_vec_info_type, |
c6c91d61 | 682 | reduc_vec_info_type, |
6fada017 | 683 | induc_vec_info_type, |
c6c91d61 | 684 | type_promotion_vec_info_type, |
9d8bf4aa | 685 | type_demotion_vec_info_type, |
221e9a92 | 686 | type_conversion_vec_info_type, |
687 | loop_exit_ctrl_vec_info_type | |
c6c91d61 | 688 | }; |
689 | ||
48e1416a | 690 | /* Indicates whether/how a variable is used in the scope of loop/basic |
f083cd24 | 691 | block. */ |
c6c91d61 | 692 | enum vect_relevant { |
f083cd24 | 693 | vect_unused_in_scope = 0, |
75aae5b4 | 694 | |
695 | /* The def is only used outside the loop. */ | |
696 | vect_used_only_live, | |
ade2ac53 | 697 | /* The def is in the inner loop, and the use is in the outer loop, and the |
698 | use is a reduction stmt. */ | |
221e9a92 | 699 | vect_used_in_outer_by_reduction, |
ade2ac53 | 700 | /* The def is in the inner loop, and the use is in the outer loop (and is |
701 | not part of reduction). */ | |
221e9a92 | 702 | vect_used_in_outer, |
bfe8bfe9 | 703 | |
704 | /* defs that feed computations that end up (only) in a reduction. These | |
48e1416a | 705 | defs may be used by non-reduction stmts, but eventually, any |
706 | computations/values that are affected by these defs are used to compute | |
707 | a reduction (i.e. don't get stored to memory, for example). We use this | |
708 | to identify computations that we can change the order in which they are | |
bfe8bfe9 | 709 | computed. */ |
c6c91d61 | 710 | vect_used_by_reduction, |
bfe8bfe9 | 711 | |
48e1416a | 712 | vect_used_in_scope |
c91e8223 | 713 | }; |
714 | ||
c6895939 | 715 | /* The type of vectorization that can be applied to the stmt: regular loop-based |
716 | vectorization; pure SLP - the stmt is a part of SLP instances and does not | |
717 | have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is | |
718 | a part of SLP instance and also must be loop-based vectorized, since it has | |
48e1416a | 719 | uses outside SLP sequences. |
720 | ||
721 | In the loop context the meanings of pure and hybrid SLP are slightly | |
722 | different. By saying that pure SLP is applied to the loop, we mean that we | |
723 | exploit only intra-iteration parallelism in the loop; i.e., the loop can be | |
724 | vectorized without doing any conceptual unrolling, cause we don't pack | |
725 | together stmts from different iterations, only within a single iteration. | |
726 | Loop hybrid SLP means that we exploit both intra-iteration and | |
c6895939 | 727 | inter-iteration parallelism (e.g., number of elements in the vector is 4 |
48e1416a | 728 | and the slp-group-size is 2, in which case we don't have enough parallelism |
729 | within an iteration, so we obtain the rest of the parallelism from subsequent | |
c6895939 | 730 | iterations by unrolling the loop by 2). */ |
48e1416a | 731 | enum slp_vect_type { |
c6895939 | 732 | loop_vect = 0, |
733 | pure_slp, | |
734 | hybrid | |
735 | }; | |
736 | ||
0f54e40f | 737 | /* Says whether a statement is a load, a store of a vectorized statement |
738 | result, or a store of an invariant value. */ | |
739 | enum vec_load_store_type { | |
740 | VLS_LOAD, | |
741 | VLS_STORE, | |
742 | VLS_STORE_INVARIANT | |
743 | }; | |
744 | ||
85b53a1f | 745 | /* Describes how we're going to vectorize an individual load or store, |
746 | or a group of loads or stores. */ | |
747 | enum vect_memory_access_type { | |
989ceec3 | 748 | /* An access to an invariant address. This is used only for loads. */ |
749 | VMAT_INVARIANT, | |
750 | ||
85b53a1f | 751 | /* A simple contiguous access. */ |
752 | VMAT_CONTIGUOUS, | |
753 | ||
989ceec3 | 754 | /* A contiguous access that goes down in memory rather than up, |
755 | with no additional permutation. This is used only for stores | |
756 | of invariants. */ | |
757 | VMAT_CONTIGUOUS_DOWN, | |
758 | ||
85b53a1f | 759 | /* A simple contiguous access in which the elements need to be permuted |
760 | after loading or before storing. Only used for loop vectorization; | |
761 | SLP uses separate permutes. */ | |
762 | VMAT_CONTIGUOUS_PERMUTE, | |
763 | ||
989ceec3 | 764 | /* A simple contiguous access in which the elements need to be reversed |
765 | after loading or before storing. */ | |
766 | VMAT_CONTIGUOUS_REVERSE, | |
767 | ||
85b53a1f | 768 | /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */ |
769 | VMAT_LOAD_STORE_LANES, | |
770 | ||
771 | /* An access in which each scalar element is loaded or stored | |
772 | individually. */ | |
773 | VMAT_ELEMENTWISE, | |
774 | ||
775 | /* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped | |
776 | SLP accesses. Each unrolled iteration uses a contiguous load | |
777 | or store for the whole group, but the groups from separate iterations | |
778 | are combined in the same way as for VMAT_ELEMENTWISE. */ | |
779 | VMAT_STRIDED_SLP, | |
780 | ||
781 | /* The access uses gather loads or scatter stores. */ | |
782 | VMAT_GATHER_SCATTER | |
783 | }; | |
c6895939 | 784 | |
5f02ee72 | 785 | struct dr_vec_info { |
786 | /* The data reference itself. */ | |
787 | data_reference *dr; | |
788 | /* The statement that contains the data reference. */ | |
789 | stmt_vec_info stmt; | |
a99aba41 | 790 | /* The misalignment in bytes of the reference, or -1 if not known. */ |
791 | int misalignment; | |
792 | /* The byte alignment that we'd ideally like the reference to have, | |
793 | and the value that misalignment is measured against. */ | |
e092c20e | 794 | poly_uint64 target_alignment; |
a99aba41 | 795 | /* If true the alignment of base_decl needs to be increased. */ |
796 | bool base_misaligned; | |
797 | tree base_decl; | |
798 | }; | |
799 | ||
f1168a33 | 800 | typedef struct data_reference *dr_p; |
f1168a33 | 801 | |
04b2391d | 802 | struct _stmt_vec_info { |
c91e8223 | 803 | |
804 | enum stmt_vec_info_type type; | |
805 | ||
609c710b | 806 | /* Indicates whether this stmts is part of a computation whose result is |
807 | used outside the loop. */ | |
808 | bool live; | |
809 | ||
810 | /* Stmt is part of some pattern (computation idiom) */ | |
811 | bool in_pattern_p; | |
812 | ||
e05b01ad | 813 | /* True if the statement was created during pattern recognition as |
814 | part of the replacement for RELATED_STMT. This implies that the | |
815 | statement isn't part of any basic block, although for convenience | |
816 | its gimple_bb is the same as for RELATED_STMT. */ | |
817 | bool pattern_stmt_p; | |
818 | ||
487798e2 | 819 | /* Is this statement vectorizable or should it be skipped in (partial) |
820 | vectorization. */ | |
821 | bool vectorizable; | |
822 | ||
c91e8223 | 823 | /* The stmt to which this info struct refers to. */ |
42acab1c | 824 | gimple *stmt; |
c91e8223 | 825 | |
e2c5c678 | 826 | /* The vec_info with respect to which STMT is vectorized. */ |
827 | vec_info *vinfo; | |
c91e8223 | 828 | |
b334cbba | 829 | /* The vector type to be used for the LHS of this statement. */ |
c91e8223 | 830 | tree vectype; |
831 | ||
832 | /* The vectorized version of the stmt. */ | |
435515db | 833 | stmt_vec_info vectorized_stmt; |
c91e8223 | 834 | |
835 | ||
16ed3c2c | 836 | /* The following is relevant only for stmts that contain a non-scalar |
48e1416a | 837 | data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have |
16ed3c2c | 838 | at most one such data-ref. */ |
c91e8223 | 839 | |
5f02ee72 | 840 | dr_vec_info dr_aux; |
a99aba41 | 841 | |
b0eb8c66 | 842 | /* Information about the data-ref relative to this loop |
843 | nest (the loop that is being considered for vectorization). */ | |
9e879814 | 844 | innermost_loop_behavior dr_wrt_vec_loop; |
b0eb8c66 | 845 | |
559260b3 | 846 | /* For loop PHI nodes, the base and evolution part of it. This makes sure |
86faead7 | 847 | this information is still available in vect_update_ivs_after_vectorizer |
848 | where we may not be able to re-analyze the PHI nodes evolution as | |
849 | peeling for the prologue loop can make it unanalyzable. The evolution | |
559260b3 | 850 | part is still correct after peeling, but the base may have changed from |
851 | the version here. */ | |
852 | tree loop_phi_evolution_base_unchanged; | |
86faead7 | 853 | tree loop_phi_evolution_part; |
854 | ||
48e1416a | 855 | /* Used for various bookkeeping purposes, generally holding a pointer to |
856 | some other stmt S that is in some way "related" to this stmt. | |
4a61a337 | 857 | Current use of this field is: |
48e1416a | 858 | If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is |
859 | true): S is the "pattern stmt" that represents (and replaces) the | |
860 | sequence of stmts that constitutes the pattern. Similarly, the | |
861 | related_stmt of the "pattern stmt" points back to this stmt (which is | |
862 | the last stmt in the original sequence of stmts that constitutes the | |
4a61a337 | 863 | pattern). */ |
aebdbd31 | 864 | stmt_vec_info related_stmt; |
4a61a337 | 865 | |
da611310 | 866 | /* Used to keep a sequence of def stmts of a pattern stmt if such exists. |
867 | The sequence is attached to the original statement rather than the | |
868 | pattern statement. */ | |
18937389 | 869 | gimple_seq pattern_def_seq; |
45eea33f | 870 | |
f1168a33 | 871 | /* List of datarefs that are known to have the same alignment as the dataref |
872 | of this stmt. */ | |
f1f41a6c | 873 | vec<dr_p> same_align_refs; |
f1168a33 | 874 | |
295327ab | 875 | /* Selected SIMD clone's function info. First vector element |
876 | is SIMD clone's function decl, followed by a pair of trees (base + step) | |
877 | for linear arguments (pair of NULLs for other arguments). */ | |
878 | vec<tree> simd_clone_info; | |
d09768a4 | 879 | |
e12906b9 | 880 | /* Classify the def of this stmt. */ |
881 | enum vect_def_type def_type; | |
882 | ||
609c710b | 883 | /* Whether the stmt is SLPed, loop-based vectorized, or both. */ |
884 | enum slp_vect_type slp_type; | |
885 | ||
21009880 | 886 | /* Interleaving and reduction chains info. */ |
887 | /* First element in the group. */ | |
cd24aa3c | 888 | stmt_vec_info first_element; |
21009880 | 889 | /* Pointer to the next element in the group. */ |
cd24aa3c | 890 | stmt_vec_info next_element; |
21009880 | 891 | /* The size of the group. */ |
6b8dbb53 | 892 | unsigned int size; |
893 | /* For stores, number of stores from this group seen. We vectorize the last | |
894 | one. */ | |
895 | unsigned int store_count; | |
896 | /* For loads only, the gap from the previous load. For consecutive loads, GAP | |
897 | is 1. */ | |
898 | unsigned int gap; | |
609c710b | 899 | |
a8cf7702 | 900 | /* The minimum negative dependence distance this stmt participates in |
901 | or zero if none. */ | |
902 | unsigned int min_neg_dist; | |
903 | ||
609c710b | 904 | /* Not all stmts in the loop need to be vectorized. e.g, the increment |
905 | of the loop induction variable and computation of array indexes. relevant | |
906 | indicates whether the stmt needs to be vectorized. */ | |
907 | enum vect_relevant relevant; | |
867c03eb | 908 | |
0bd6d857 | 909 | /* For loads if this is a gather, for stores if this is a scatter. */ |
910 | bool gather_scatter_p; | |
e1c75243 | 911 | |
912 | /* True if this is an access with loop-invariant stride. */ | |
913 | bool strided_p; | |
3d483a94 | 914 | |
487798e2 | 915 | /* For both loads and stores. */ |
916 | bool simd_lane_access_p; | |
917 | ||
85b53a1f | 918 | /* Classifies how the load or store is going to be implemented |
919 | for loop vectorization. */ | |
920 | vect_memory_access_type memory_access_type; | |
921 | ||
d09d8733 | 922 | /* For reduction loops, this is the type of reduction. */ |
923 | enum vect_reduction_type v_reduc_type; | |
924 | ||
834a2c29 | 925 | /* For CONST_COND_REDUCTION, record the reduc code. */ |
926 | enum tree_code const_cond_reduc_code; | |
927 | ||
119a8852 | 928 | /* On a reduction PHI the reduction type as detected by |
929 | vect_force_simple_reduction. */ | |
930 | enum vect_reduction_type reduc_type; | |
931 | ||
44b24fa0 | 932 | /* On a reduction PHI the def returned by vect_force_simple_reduction. |
933 | On the def returned by vect_force_simple_reduction the | |
934 | corresponding PHI. */ | |
04eefad5 | 935 | stmt_vec_info reduc_def; |
119a8852 | 936 | |
0d85be19 | 937 | /* The number of scalar stmt references from active SLP instances. */ |
938 | unsigned int num_slp_uses; | |
18bbd2f1 | 939 | |
940 | /* If nonzero, the lhs of the statement could be truncated to this | |
941 | many bits without affecting any users of the result. */ | |
942 | unsigned int min_output_precision; | |
943 | ||
944 | /* If nonzero, all non-boolean input operands have the same precision, | |
945 | and they could each be truncated to this many bits without changing | |
946 | the result. */ | |
947 | unsigned int min_input_precision; | |
948 | ||
949 | /* If OPERATION_BITS is nonzero, the statement could be performed on | |
950 | an integer with the sign and number of bits given by OPERATION_SIGN | |
951 | and OPERATION_BITS without changing the result. */ | |
952 | unsigned int operation_precision; | |
953 | signop operation_sign; | |
f92474f8 | 954 | |
955 | /* True if this is only suitable for SLP vectorization. */ | |
956 | bool slp_vect_only_p; | |
04b2391d | 957 | }; |
c91e8223 | 958 | |
cf60da07 | 959 | /* Information about a gather/scatter call. */ |
960 | struct gather_scatter_info { | |
1619606c | 961 | /* The internal function to use for the gather/scatter operation, |
962 | or IFN_LAST if a built-in function should be used instead. */ | |
963 | internal_fn ifn; | |
964 | ||
965 | /* The FUNCTION_DECL for the built-in gather/scatter function, | |
966 | or null if an internal function should be used instead. */ | |
cf60da07 | 967 | tree decl; |
968 | ||
969 | /* The loop-invariant base value. */ | |
970 | tree base; | |
971 | ||
972 | /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */ | |
973 | tree offset; | |
974 | ||
975 | /* Each offset element should be multiplied by this amount before | |
976 | being added to the base. */ | |
977 | int scale; | |
978 | ||
979 | /* The definition type for the vectorized offset. */ | |
980 | enum vect_def_type offset_dt; | |
981 | ||
982 | /* The type of the vectorized offset. */ | |
983 | tree offset_vectype; | |
1619606c | 984 | |
985 | /* The type of the scalar elements after loading or before storing. */ | |
986 | tree element_type; | |
987 | ||
988 | /* The type of the scalar elements being loaded or stored. */ | |
989 | tree memory_type; | |
cf60da07 | 990 | }; |
991 | ||
c91e8223 | 992 | /* Access Functions. */ |
6b8dbb53 | 993 | #define STMT_VINFO_TYPE(S) (S)->type |
994 | #define STMT_VINFO_STMT(S) (S)->stmt | |
e2c5c678 | 995 | inline loop_vec_info |
996 | STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo) | |
997 | { | |
998 | if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo)) | |
999 | return loop_vinfo; | |
1000 | return NULL; | |
1001 | } | |
1002 | inline bb_vec_info | |
1003 | STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo) | |
1004 | { | |
1005 | if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo)) | |
1006 | return bb_vinfo; | |
1007 | return NULL; | |
1008 | } | |
6b8dbb53 | 1009 | #define STMT_VINFO_RELEVANT(S) (S)->relevant |
1010 | #define STMT_VINFO_LIVE_P(S) (S)->live | |
1011 | #define STMT_VINFO_VECTYPE(S) (S)->vectype | |
1012 | #define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt | |
6ea6a380 | 1013 | #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable |
5f02ee72 | 1014 | #define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0) |
0bd6d857 | 1015 | #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p |
e1c75243 | 1016 | #define STMT_VINFO_STRIDED_P(S) (S)->strided_p |
85b53a1f | 1017 | #define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type |
3d483a94 | 1018 | #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p |
d09d8733 | 1019 | #define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type |
834a2c29 | 1020 | #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code |
b0eb8c66 | 1021 | |
9e879814 | 1022 | #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop |
1023 | #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address | |
1024 | #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init | |
1025 | #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset | |
1026 | #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step | |
a5456a6d | 1027 | #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment |
1028 | #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \ | |
1029 | (S)->dr_wrt_vec_loop.base_misalignment | |
a7e05ef2 | 1030 | #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \ |
1031 | (S)->dr_wrt_vec_loop.offset_alignment | |
668dd7dc | 1032 | #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \ |
1033 | (S)->dr_wrt_vec_loop.step_alignment | |
b0eb8c66 | 1034 | |
5f02ee72 | 1035 | #define STMT_VINFO_DR_INFO(S) \ |
1036 | (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux) | |
1037 | ||
6b8dbb53 | 1038 | #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p |
1039 | #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt | |
18937389 | 1040 | #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq |
6b8dbb53 | 1041 | #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs |
295327ab | 1042 | #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info |
6b8dbb53 | 1043 | #define STMT_VINFO_DEF_TYPE(S) (S)->def_type |
5f02ee72 | 1044 | #define STMT_VINFO_GROUPED_ACCESS(S) \ |
1045 | ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S)) | |
559260b3 | 1046 | #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged |
86faead7 | 1047 | #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part |
a8cf7702 | 1048 | #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist |
0d85be19 | 1049 | #define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses |
119a8852 | 1050 | #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type |
1051 | #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def | |
f92474f8 | 1052 | #define STMT_VINFO_SLP_VECT_ONLY(S) (S)->slp_vect_only_p |
21009880 | 1053 | |
5f02ee72 | 1054 | #define DR_GROUP_FIRST_ELEMENT(S) \ |
1055 | (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element) | |
1056 | #define DR_GROUP_NEXT_ELEMENT(S) \ | |
1057 | (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element) | |
1058 | #define DR_GROUP_SIZE(S) \ | |
1059 | (gcc_checking_assert ((S)->dr_aux.dr), (S)->size) | |
1060 | #define DR_GROUP_STORE_COUNT(S) \ | |
1061 | (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count) | |
1062 | #define DR_GROUP_GAP(S) \ | |
1063 | (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap) | |
5f02ee72 | 1064 | |
1065 | #define REDUC_GROUP_FIRST_ELEMENT(S) \ | |
1066 | (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element) | |
1067 | #define REDUC_GROUP_NEXT_ELEMENT(S) \ | |
1068 | (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element) | |
1069 | #define REDUC_GROUP_SIZE(S) \ | |
1070 | (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size) | |
c91e8223 | 1071 | |
f083cd24 | 1072 | #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope) |
867c03eb | 1073 | |
c6895939 | 1074 | #define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid) |
1075 | #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp) | |
1076 | #define STMT_SLP_TYPE(S) (S)->slp_type | |
1077 | ||
0822b158 | 1078 | #define VECT_MAX_COST 1000 |
1079 | ||
862bb3cd | 1080 | /* The maximum number of intermediate steps required in multi-step type |
1081 | conversion. */ | |
1082 | #define MAX_INTERM_CVT_STEPS 3 | |
1083 | ||
d75596cd | 1084 | #define MAX_VECTORIZATION_FACTOR INT_MAX |
91a74fc6 | 1085 | |
69fcaae3 | 1086 | /* Nonzero if TYPE represents a (scalar) boolean type or type |
1087 | in the middle-end compatible with it (unsigned precision 1 integral | |
1088 | types). Used to determine which types should be vectorized as | |
1089 | VECTOR_BOOLEAN_TYPE_P. */ | |
1090 | ||
1091 | #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \ | |
1092 | (TREE_CODE (TYPE) == BOOLEAN_TYPE \ | |
1093 | || ((TREE_CODE (TYPE) == INTEGER_TYPE \ | |
1094 | || TREE_CODE (TYPE) == ENUMERAL_TYPE) \ | |
1095 | && TYPE_PRECISION (TYPE) == 1 \ | |
1096 | && TYPE_UNSIGNED (TYPE))) | |
1097 | ||
ecc42a77 | 1098 | static inline bool |
1099 | nested_in_vect_loop_p (struct loop *loop, stmt_vec_info stmt_info) | |
1100 | { | |
1101 | return (loop->inner | |
1102 | && (loop->inner == (gimple_bb (stmt_info->stmt))->loop_father)); | |
1103 | } | |
1104 | ||
282bf14c | 1105 | /* Return TRUE if a statement represented by STMT_INFO is a part of a |
1106 | pattern. */ | |
1107 | ||
213448e9 | 1108 | static inline bool |
1109 | is_pattern_stmt_p (stmt_vec_info stmt_info) | |
1110 | { | |
e05b01ad | 1111 | return stmt_info->pattern_stmt_p; |
213448e9 | 1112 | } |
1113 | ||
4a59791f | 1114 | /* If STMT_INFO is a pattern statement, return the statement that it |
1115 | replaces, otherwise return STMT_INFO itself. */ | |
1116 | ||
1117 | inline stmt_vec_info | |
1118 | vect_orig_stmt (stmt_vec_info stmt_info) | |
1119 | { | |
1120 | if (is_pattern_stmt_p (stmt_info)) | |
1121 | return STMT_VINFO_RELATED_STMT (stmt_info); | |
1122 | return stmt_info; | |
1123 | } | |
1124 | ||
eeab9fc5 | 1125 | /* Return the later statement between STMT1_INFO and STMT2_INFO. */ |
1126 | ||
1127 | static inline stmt_vec_info | |
1128 | get_later_stmt (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info) | |
1129 | { | |
1130 | if (gimple_uid (vect_orig_stmt (stmt1_info)->stmt) | |
1131 | > gimple_uid (vect_orig_stmt (stmt2_info)->stmt)) | |
1132 | return stmt1_info; | |
1133 | else | |
1134 | return stmt2_info; | |
1135 | } | |
1136 | ||
0b7ea3a9 | 1137 | /* If STMT_INFO has been replaced by a pattern statement, return the |
1138 | replacement statement, otherwise return STMT_INFO itself. */ | |
1139 | ||
1140 | inline stmt_vec_info | |
1141 | vect_stmt_to_vectorize (stmt_vec_info stmt_info) | |
1142 | { | |
1143 | if (STMT_VINFO_IN_PATTERN_P (stmt_info)) | |
1144 | return STMT_VINFO_RELATED_STMT (stmt_info); | |
1145 | return stmt_info; | |
1146 | } | |
1147 | ||
282bf14c | 1148 | /* Return true if BB is a loop header. */ |
1149 | ||
221e9a92 | 1150 | static inline bool |
1151 | is_loop_header_bb_p (basic_block bb) | |
1152 | { | |
1153 | if (bb == (bb->loop_father)->header) | |
1154 | return true; | |
e95895ef | 1155 | gcc_checking_assert (EDGE_COUNT (bb->preds) == 1); |
221e9a92 | 1156 | return false; |
1157 | } | |
1158 | ||
282bf14c | 1159 | /* Return pow2 (X). */ |
1160 | ||
862bb3cd | 1161 | static inline int |
1162 | vect_pow2 (int x) | |
1163 | { | |
1164 | int i, res = 1; | |
1165 | ||
1166 | for (i = 0; i < x; i++) | |
1167 | res *= 2; | |
1168 | ||
1169 | return res; | |
1170 | } | |
84a15e8f | 1171 | |
f97dec81 | 1172 | /* Alias targetm.vectorize.builtin_vectorization_cost. */ |
1173 | ||
1174 | static inline int | |
1175 | builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, | |
1176 | tree vectype, int misalign) | |
1177 | { | |
1178 | return targetm.vectorize.builtin_vectorization_cost (type_of_cost, | |
1179 | vectype, misalign); | |
1180 | } | |
1181 | ||
f4ac3f3e | 1182 | /* Get cost by calling cost target builtin. */ |
1183 | ||
1184 | static inline | |
1185 | int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost) | |
1186 | { | |
f97dec81 | 1187 | return builtin_vectorization_cost (type_of_cost, NULL, 0); |
f4ac3f3e | 1188 | } |
1189 | ||
4db2b577 | 1190 | /* Alias targetm.vectorize.init_cost. */ |
1191 | ||
1192 | static inline void * | |
1193 | init_cost (struct loop *loop_info) | |
1194 | { | |
1195 | return targetm.vectorize.init_cost (loop_info); | |
1196 | } | |
1197 | ||
c863e35b | 1198 | extern void dump_stmt_cost (FILE *, void *, int, enum vect_cost_for_stmt, |
524665d0 | 1199 | stmt_vec_info, int, unsigned, |
1200 | enum vect_cost_model_location); | |
c863e35b | 1201 | |
4db2b577 | 1202 | /* Alias targetm.vectorize.add_stmt_cost. */ |
1203 | ||
1204 | static inline unsigned | |
1205 | add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, | |
f97dec81 | 1206 | stmt_vec_info stmt_info, int misalign, |
1207 | enum vect_cost_model_location where) | |
4db2b577 | 1208 | { |
524665d0 | 1209 | unsigned cost = targetm.vectorize.add_stmt_cost (data, count, kind, |
1210 | stmt_info, misalign, where); | |
c863e35b | 1211 | if (dump_file && (dump_flags & TDF_DETAILS)) |
524665d0 | 1212 | dump_stmt_cost (dump_file, data, count, kind, stmt_info, misalign, |
1213 | cost, where); | |
1214 | return cost; | |
4db2b577 | 1215 | } |
1216 | ||
1217 | /* Alias targetm.vectorize.finish_cost. */ | |
1218 | ||
f97dec81 | 1219 | static inline void |
1220 | finish_cost (void *data, unsigned *prologue_cost, | |
1221 | unsigned *body_cost, unsigned *epilogue_cost) | |
4db2b577 | 1222 | { |
f97dec81 | 1223 | targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost); |
4db2b577 | 1224 | } |
1225 | ||
1226 | /* Alias targetm.vectorize.destroy_cost_data. */ | |
1227 | ||
1228 | static inline void | |
1229 | destroy_cost_data (void *data) | |
1230 | { | |
1231 | targetm.vectorize.destroy_cost_data (data); | |
1232 | } | |
1233 | ||
c863e35b | 1234 | inline void |
1235 | add_stmt_costs (void *data, stmt_vector_for_cost *cost_vec) | |
1236 | { | |
1237 | stmt_info_for_cost *cost; | |
1238 | unsigned i; | |
1239 | FOR_EACH_VEC_ELT (*cost_vec, i, cost) | |
1aeaa139 | 1240 | add_stmt_cost (data, cost->count, cost->kind, cost->stmt_info, |
c863e35b | 1241 | cost->misalign, cost->where); |
1242 | } | |
1243 | ||
c91e8223 | 1244 | /*-----------------------------------------------------------------*/ |
1245 | /* Info on data references alignment. */ | |
1246 | /*-----------------------------------------------------------------*/ | |
a99aba41 | 1247 | #define DR_MISALIGNMENT_UNKNOWN (-1) |
1248 | #define DR_MISALIGNMENT_UNINITIALIZED (-2) | |
1249 | ||
23e1875f | 1250 | inline void |
abc9513d | 1251 | set_dr_misalignment (dr_vec_info *dr_info, int val) |
23e1875f | 1252 | { |
abc9513d | 1253 | dr_info->misalignment = val; |
23e1875f | 1254 | } |
1255 | ||
1256 | inline int | |
abc9513d | 1257 | dr_misalignment (dr_vec_info *dr_info) |
23e1875f | 1258 | { |
abc9513d | 1259 | int misalign = dr_info->misalignment; |
a99aba41 | 1260 | gcc_assert (misalign != DR_MISALIGNMENT_UNINITIALIZED); |
1261 | return misalign; | |
23e1875f | 1262 | } |
c91e8223 | 1263 | |
39b8f742 | 1264 | /* Reflects actual alignment of first access in the vectorized loop, |
1265 | taking into account peeling/versioning if applied. */ | |
23e1875f | 1266 | #define DR_MISALIGNMENT(DR) dr_misalignment (DR) |
1267 | #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL) | |
c91e8223 | 1268 | |
aec313e5 | 1269 | /* Only defined once DR_MISALIGNMENT is defined. */ |
abc9513d | 1270 | #define DR_TARGET_ALIGNMENT(DR) ((DR)->target_alignment) |
aec313e5 | 1271 | |
abc9513d | 1272 | /* Return true if data access DR_INFO is aligned to its target alignment |
aec313e5 | 1273 | (which may be less than a full vector). */ |
282bf14c | 1274 | |
c91e8223 | 1275 | static inline bool |
abc9513d | 1276 | aligned_access_p (dr_vec_info *dr_info) |
c91e8223 | 1277 | { |
abc9513d | 1278 | return (DR_MISALIGNMENT (dr_info) == 0); |
c91e8223 | 1279 | } |
1280 | ||
282bf14c | 1281 | /* Return TRUE if the alignment of the data access is known, and FALSE |
1282 | otherwise. */ | |
1283 | ||
c91e8223 | 1284 | static inline bool |
abc9513d | 1285 | known_alignment_for_access_p (dr_vec_info *dr_info) |
c91e8223 | 1286 | { |
abc9513d | 1287 | return (DR_MISALIGNMENT (dr_info) != DR_MISALIGNMENT_UNKNOWN); |
c91e8223 | 1288 | } |
1289 | ||
aec313e5 | 1290 | /* Return the minimum alignment in bytes that the vectorized version |
abc9513d | 1291 | of DR_INFO is guaranteed to have. */ |
aec313e5 | 1292 | |
1293 | static inline unsigned int | |
abc9513d | 1294 | vect_known_alignment_in_bytes (dr_vec_info *dr_info) |
aec313e5 | 1295 | { |
abc9513d | 1296 | if (DR_MISALIGNMENT (dr_info) == DR_MISALIGNMENT_UNKNOWN) |
1297 | return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info->dr))); | |
1298 | if (DR_MISALIGNMENT (dr_info) == 0) | |
e092c20e | 1299 | return known_alignment (DR_TARGET_ALIGNMENT (dr_info)); |
abc9513d | 1300 | return DR_MISALIGNMENT (dr_info) & -DR_MISALIGNMENT (dr_info); |
aec313e5 | 1301 | } |
1302 | ||
abc9513d | 1303 | /* Return the behavior of DR_INFO with respect to the vectorization context |
9e879814 | 1304 | (which for outer loop vectorization might not be the behavior recorded |
abc9513d | 1305 | in DR_INFO itself). */ |
9e879814 | 1306 | |
1307 | static inline innermost_loop_behavior * | |
abc9513d | 1308 | vect_dr_behavior (dr_vec_info *dr_info) |
9e879814 | 1309 | { |
abc9513d | 1310 | stmt_vec_info stmt_info = dr_info->stmt; |
9e879814 | 1311 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); |
1312 | if (loop_vinfo == NULL | |
a73182ff | 1313 | || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt_info)) |
abc9513d | 1314 | return &DR_INNERMOST (dr_info->dr); |
9e879814 | 1315 | else |
1316 | return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info); | |
1317 | } | |
1dbf9bd1 | 1318 | |
1319 | /* Return true if the vect cost model is unlimited. */ | |
1320 | static inline bool | |
3e398f5b | 1321 | unlimited_cost_model (loop_p loop) |
1dbf9bd1 | 1322 | { |
4c73695b | 1323 | if (loop != NULL && loop->force_vectorize |
3e398f5b | 1324 | && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT) |
1325 | return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED; | |
1326 | return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED); | |
1dbf9bd1 | 1327 | } |
1328 | ||
6753a4bf | 1329 | /* Return true if the loop described by LOOP_VINFO is fully-masked and |
1330 | if the first iteration should use a partial mask in order to achieve | |
1331 | alignment. */ | |
1332 | ||
1333 | static inline bool | |
1334 | vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo) | |
1335 | { | |
1336 | return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) | |
1337 | && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)); | |
1338 | } | |
1339 | ||
d75596cd | 1340 | /* Return the number of vectors of type VECTYPE that are needed to get |
1341 | NUNITS elements. NUNITS should be based on the vectorization factor, | |
1342 | so it is always a known multiple of the number of elements in VECTYPE. */ | |
1343 | ||
1344 | static inline unsigned int | |
1345 | vect_get_num_vectors (poly_uint64 nunits, tree vectype) | |
1346 | { | |
1347 | return exact_div (nunits, TYPE_VECTOR_SUBPARTS (vectype)).to_constant (); | |
1348 | } | |
1349 | ||
4eb17cb6 | 1350 | /* Return the number of copies needed for loop vectorization when |
1351 | a statement operates on vectors of type VECTYPE. This is the | |
1352 | vectorization factor divided by the number of elements in | |
1353 | VECTYPE and is always known at compile time. */ | |
1354 | ||
1355 | static inline unsigned int | |
1356 | vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype) | |
1357 | { | |
d75596cd | 1358 | return vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo), vectype); |
1359 | } | |
1360 | ||
1361 | /* Update maximum unit count *MAX_NUNITS so that it accounts for | |
1362 | the number of units in vector type VECTYPE. *MAX_NUNITS can be 1 | |
1363 | if we haven't yet recorded any vector types. */ | |
1364 | ||
1365 | static inline void | |
1366 | vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype) | |
1367 | { | |
1368 | /* All unit counts have the form current_vector_size * X for some | |
1369 | rational X, so two unit sizes must have a common multiple. | |
1370 | Everything is a multiple of the initial value of 1. */ | |
1371 | poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); | |
1372 | *max_nunits = force_common_multiple (*max_nunits, nunits); | |
1373 | } | |
1374 | ||
1375 | /* Return the vectorization factor that should be used for costing | |
1376 | purposes while vectorizing the loop described by LOOP_VINFO. | |
1377 | Pick a reasonable estimate if the vectorization factor isn't | |
1378 | known at compile time. */ | |
1379 | ||
1380 | static inline unsigned int | |
1381 | vect_vf_for_cost (loop_vec_info loop_vinfo) | |
1382 | { | |
1383 | return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo)); | |
4eb17cb6 | 1384 | } |
1385 | ||
09de8b78 | 1386 | /* Estimate the number of elements in VEC_TYPE for costing purposes. |
1387 | Pick a reasonable estimate if the exact number isn't known at | |
1388 | compile time. */ | |
1389 | ||
1390 | static inline unsigned int | |
1391 | vect_nunits_for_cost (tree vec_type) | |
1392 | { | |
1393 | return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type)); | |
1394 | } | |
1395 | ||
60b29a7e | 1396 | /* Return the maximum possible vectorization factor for LOOP_VINFO. */ |
1397 | ||
1398 | static inline unsigned HOST_WIDE_INT | |
1399 | vect_max_vf (loop_vec_info loop_vinfo) | |
1400 | { | |
1401 | unsigned HOST_WIDE_INT vf; | |
1402 | if (LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf)) | |
1403 | return vf; | |
1404 | return MAX_VECTORIZATION_FACTOR; | |
1405 | } | |
1406 | ||
abc9513d | 1407 | /* Return the size of the value accessed by unvectorized data reference |
1408 | DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated | |
1409 | for the associated gimple statement, since that guarantees that DR_INFO | |
1410 | accesses either a scalar or a scalar equivalent. ("Scalar equivalent" | |
1411 | here includes things like V1SI, which can be vectorized in the same way | |
33482edf | 1412 | as a plain SI.) */ |
1413 | ||
1414 | inline unsigned int | |
abc9513d | 1415 | vect_get_scalar_dr_size (dr_vec_info *dr_info) |
33482edf | 1416 | { |
abc9513d | 1417 | return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_info->dr)))); |
33482edf | 1418 | } |
1419 | ||
c309657f | 1420 | /* Source location + hotness information. */ |
1421 | extern dump_user_location_t vect_location; | |
fb85abff | 1422 | |
b18ceb23 | 1423 | /* A macro for calling: |
1424 | dump_begin_scope (MSG, vect_location); | |
1425 | via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc, | |
1426 | and then calling | |
1427 | dump_end_scope (); | |
1428 | once the object goes out of scope, thus capturing the nesting of | |
9ddd8fa7 | 1429 | the scopes. |
1430 | ||
1431 | These scopes affect dump messages within them: dump messages at the | |
1432 | top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those | |
1433 | in a nested scope implicitly default to MSG_PRIORITY_INTERNALS. */ | |
88f6eb8f | 1434 | |
1435 | #define DUMP_VECT_SCOPE(MSG) \ | |
b18ceb23 | 1436 | AUTO_DUMP_SCOPE (MSG, vect_location) |
88f6eb8f | 1437 | |
72ea15e5 | 1438 | /* A sentinel class for ensuring that the "vect_location" global gets |
1439 | reset at the end of a scope. | |
1440 | ||
1441 | The "vect_location" global is used during dumping and contains a | |
1442 | location_t, which could contain references to a tree block via the | |
1443 | ad-hoc data. This data is used for tracking inlining information, | |
1444 | but it's not a GC root; it's simply assumed that such locations never | |
1445 | get accessed if the blocks are optimized away. | |
1446 | ||
1447 | Hence we need to ensure that such locations are purged at the end | |
1448 | of any operations using them (e.g. via this class). */ | |
1449 | ||
1450 | class auto_purge_vect_location | |
1451 | { | |
1452 | public: | |
1453 | ~auto_purge_vect_location (); | |
1454 | }; | |
1455 | ||
c91e8223 | 1456 | /*-----------------------------------------------------------------*/ |
1457 | /* Function prototypes. */ | |
1458 | /*-----------------------------------------------------------------*/ | |
1459 | ||
48e1416a | 1460 | /* Simple loop peeling and versioning utilities for vectorizer's purposes - |
fb85abff | 1461 | in tree-vect-loop-manip.c. */ |
60b29a7e | 1462 | extern void vect_set_loop_condition (struct loop *, loop_vec_info, |
1463 | tree, tree, tree, bool); | |
1f1872fd | 1464 | extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge); |
c71d3c24 | 1465 | struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *, |
1466 | struct loop *, edge); | |
44245620 | 1467 | struct loop *vect_loop_versioning (loop_vec_info, unsigned int, bool, |
1468 | poly_uint64); | |
5b631e09 | 1469 | extern struct loop *vect_do_peeling (loop_vec_info, tree, tree, |
cde959e7 | 1470 | tree *, tree *, tree *, int, bool, bool); |
6753a4bf | 1471 | extern void vect_prepare_for_masked_peels (loop_vec_info); |
c309657f | 1472 | extern dump_user_location_t find_loop_location (struct loop *); |
fb85abff | 1473 | extern bool vect_can_advance_ivs_p (loop_vec_info); |
c91e8223 | 1474 | |
fb85abff | 1475 | /* In tree-vect-stmts.c. */ |
3106770a | 1476 | extern poly_uint64 current_vector_size; |
f2983e95 | 1477 | extern tree get_vectype_for_scalar_type (tree); |
41b4a935 | 1478 | extern tree get_vectype_for_scalar_type_and_size (tree, poly_uint64); |
dab48979 | 1479 | extern tree get_mask_type_for_scalar_type (tree); |
b334cbba | 1480 | extern tree get_same_sized_vectype (tree, tree); |
60b29a7e | 1481 | extern bool vect_get_loop_mask_type (loop_vec_info); |
bf8b3614 | 1482 | extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, |
bfa5bad6 | 1483 | stmt_vec_info * = NULL, gimple ** = NULL); |
bf8b3614 | 1484 | extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, |
bfa5bad6 | 1485 | tree *, stmt_vec_info * = NULL, |
1486 | gimple ** = NULL); | |
ecc42a77 | 1487 | extern bool supportable_widening_operation (enum tree_code, stmt_vec_info, |
1488 | tree, tree, enum tree_code *, | |
42acab1c | 1489 | enum tree_code *, int *, |
1490 | vec<tree> *); | |
b334cbba | 1491 | extern bool supportable_narrowing_operation (enum tree_code, tree, tree, |
1492 | enum tree_code *, | |
f1f41a6c | 1493 | int *, vec<tree> *); |
4db2b577 | 1494 | extern unsigned record_stmt_cost (stmt_vector_for_cost *, int, |
f97dec81 | 1495 | enum vect_cost_for_stmt, stmt_vec_info, |
1496 | int, enum vect_cost_model_location); | |
ecc42a77 | 1497 | extern stmt_vec_info vect_finish_replace_stmt (stmt_vec_info, gimple *); |
1498 | extern stmt_vec_info vect_finish_stmt_generation (stmt_vec_info, gimple *, | |
585ed623 | 1499 | gimple_stmt_iterator *); |
ed9370cc | 1500 | extern opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info); |
ecc42a77 | 1501 | extern tree vect_get_store_rhs (stmt_vec_info); |
1502 | extern tree vect_get_vec_def_for_operand_1 (stmt_vec_info, enum vect_def_type); | |
1503 | extern tree vect_get_vec_def_for_operand (tree, stmt_vec_info, tree = NULL); | |
1504 | extern void vect_get_vec_defs (tree, tree, stmt_vec_info, vec<tree> *, | |
44b24fa0 | 1505 | vec<tree> *, slp_tree); |
c0dd122a | 1506 | extern void vect_get_vec_defs_for_stmt_copy (vec_info *, |
44b24fa0 | 1507 | vec<tree> *, vec<tree> *); |
ecc42a77 | 1508 | extern tree vect_init_vector (stmt_vec_info, tree, tree, |
fb85abff | 1509 | gimple_stmt_iterator *); |
c0dd122a | 1510 | extern tree vect_get_vec_def_for_stmt_copy (vec_info *, tree); |
ecc42a77 | 1511 | extern bool vect_transform_stmt (stmt_vec_info, gimple_stmt_iterator *, |
9632f098 | 1512 | slp_tree, slp_instance); |
ecc42a77 | 1513 | extern void vect_remove_stores (stmt_vec_info); |
ed9370cc | 1514 | extern opt_result vect_analyze_stmt (stmt_vec_info, bool *, slp_tree, |
1515 | slp_instance, stmt_vector_for_cost *); | |
ecc42a77 | 1516 | extern bool vectorizable_condition (stmt_vec_info, gimple_stmt_iterator *, |
98acf890 | 1517 | stmt_vec_info *, bool, slp_tree, |
c863e35b | 1518 | stmt_vector_for_cost *); |
2fbb03c0 | 1519 | extern bool vectorizable_shift (stmt_vec_info, gimple_stmt_iterator *, |
1520 | stmt_vec_info *, slp_tree, | |
1521 | stmt_vector_for_cost *); | |
1ce0a2db | 1522 | extern void vect_get_load_cost (stmt_vec_info, int, bool, |
4db2b577 | 1523 | unsigned int *, unsigned int *, |
f97dec81 | 1524 | stmt_vector_for_cost *, |
1525 | stmt_vector_for_cost *, bool); | |
1ce0a2db | 1526 | extern void vect_get_store_cost (stmt_vec_info, int, |
4db2b577 | 1527 | unsigned int *, stmt_vector_for_cost *); |
45eea33f | 1528 | extern bool vect_supportable_shift (enum tree_code, tree); |
25eb7c31 | 1529 | extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &); |
1530 | extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &); | |
cfd9ca84 | 1531 | extern void optimize_mask_stores (struct loop*); |
60b29a7e | 1532 | extern gcall *vect_gen_while (tree, tree, tree); |
6753a4bf | 1533 | extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree); |
ed9370cc | 1534 | extern opt_result vect_get_vector_types_for_stmt (stmt_vec_info, tree *, |
1535 | tree *); | |
1536 | extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info); | |
48e1416a | 1537 | |
fb85abff | 1538 | /* In tree-vect-data-refs.c. */ |
e092c20e | 1539 | extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64); |
fb85abff | 1540 | extern enum dr_alignment_support vect_supportable_dr_alignment |
abc9513d | 1541 | (dr_vec_info *, bool); |
ecc42a77 | 1542 | extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *, |
fb85abff | 1543 | HOST_WIDE_INT *); |
ed9370cc | 1544 | extern opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *); |
c256513d | 1545 | extern bool vect_slp_analyze_instance_dependence (slp_instance); |
ed9370cc | 1546 | extern opt_result vect_enhance_data_refs_alignment (loop_vec_info); |
1547 | extern opt_result vect_analyze_data_refs_alignment (loop_vec_info); | |
1548 | extern opt_result vect_verify_datarefs_alignment (loop_vec_info); | |
2f6fec15 | 1549 | extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance); |
ed9370cc | 1550 | extern opt_result vect_analyze_data_ref_accesses (vec_info *); |
1551 | extern opt_result vect_prune_runtime_alias_test_list (loop_vec_info); | |
1d2c127d | 1552 | extern bool vect_gather_scatter_fn_p (bool, bool, tree, tree, unsigned int, |
1553 | signop, int, internal_fn *, tree *); | |
ecc42a77 | 1554 | extern bool vect_check_gather_scatter (stmt_vec_info, loop_vec_info, |
cf60da07 | 1555 | gather_scatter_info *); |
ed9370cc | 1556 | extern opt_result vect_find_stmt_data_reference (loop_p, gimple *, |
1557 | vec<data_reference_p> *); | |
1558 | extern opt_result vect_analyze_data_refs (vec_info *, poly_uint64 *); | |
4f372c2c | 1559 | extern void vect_record_base_alignments (vec_info *); |
ecc42a77 | 1560 | extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, struct loop *, tree, |
bd5ba09f | 1561 | tree *, gimple_stmt_iterator *, |
3c8b7bc7 | 1562 | gimple **, bool, |
1f9a3b5c | 1563 | tree = NULL_TREE, tree = NULL_TREE); |
ecc42a77 | 1564 | extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, |
1565 | stmt_vec_info, tree); | |
1c4c7e32 | 1566 | extern void vect_copy_ref_info (tree, tree); |
fb85abff | 1567 | extern tree vect_create_destination_var (tree, tree); |
ee612634 | 1568 | extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT); |
2dd8e84c | 1569 | extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool); |
bc691ae4 | 1570 | extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT); |
2dd8e84c | 1571 | extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool); |
ecc42a77 | 1572 | extern void vect_permute_store_chain (vec<tree> ,unsigned int, stmt_vec_info, |
f1f41a6c | 1573 | gimple_stmt_iterator *, vec<tree> *); |
ecc42a77 | 1574 | extern tree vect_setup_realignment (stmt_vec_info, gimple_stmt_iterator *, |
1575 | tree *, enum dr_alignment_support, tree, | |
fb85abff | 1576 | struct loop **); |
ecc42a77 | 1577 | extern void vect_transform_grouped_load (stmt_vec_info, vec<tree> , int, |
fb85abff | 1578 | gimple_stmt_iterator *); |
ecc42a77 | 1579 | extern void vect_record_grouped_load_vectors (stmt_vec_info, vec<tree>); |
fb85abff | 1580 | extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *); |
23ffec42 | 1581 | extern tree vect_get_new_ssa_name (tree, enum vect_var_kind, |
1582 | const char * = NULL); | |
ecc42a77 | 1583 | extern tree vect_create_addr_base_for_vector_ref (stmt_vec_info, gimple_seq *, |
9e879814 | 1584 | tree, tree = NULL_TREE); |
fb85abff | 1585 | |
1586 | /* In tree-vect-loop.c. */ | |
1587 | /* FORNOW: Used in tree-parloops.c. */ | |
f4649a92 | 1588 | extern stmt_vec_info vect_force_simple_reduction (loop_vec_info, stmt_vec_info, |
1589 | bool *, bool); | |
ef871d99 | 1590 | extern widest_int vect_iv_limit_for_full_masking (loop_vec_info loop_vinfo); |
5051abaf | 1591 | /* Used in gimple-loop-interchange.c. */ |
c309657f | 1592 | extern bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree, |
5051abaf | 1593 | enum tree_code); |
fb85abff | 1594 | /* Drive for loop analysis stage. */ |
ed9370cc | 1595 | extern opt_loop_vec_info vect_analyze_loop (struct loop *, |
1596 | loop_vec_info, | |
1597 | vec_info_shared *); | |
3a815241 | 1598 | extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL); |
cde959e7 | 1599 | extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *, |
1600 | tree *, bool); | |
60b29a7e | 1601 | extern tree vect_halve_mask_nunits (tree); |
1602 | extern tree vect_double_mask_nunits (tree); | |
1603 | extern void vect_record_loop_mask (loop_vec_info, vec_loop_masks *, | |
1604 | unsigned int, tree); | |
1605 | extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *, | |
1606 | unsigned int, tree, unsigned int); | |
1607 | ||
fb85abff | 1608 | /* Drive for loop transformation stage. */ |
5b631e09 | 1609 | extern struct loop *vect_transform_loop (loop_vec_info); |
ed9370cc | 1610 | extern opt_loop_vec_info vect_analyze_loop_form (struct loop *, |
1611 | vec_info_shared *); | |
ecc42a77 | 1612 | extern bool vectorizable_live_operation (stmt_vec_info, gimple_stmt_iterator *, |
435515db | 1613 | slp_tree, int, stmt_vec_info *, |
c863e35b | 1614 | stmt_vector_for_cost *); |
ecc42a77 | 1615 | extern bool vectorizable_reduction (stmt_vec_info, gimple_stmt_iterator *, |
435515db | 1616 | stmt_vec_info *, slp_tree, slp_instance, |
c863e35b | 1617 | stmt_vector_for_cost *); |
ecc42a77 | 1618 | extern bool vectorizable_induction (stmt_vec_info, gimple_stmt_iterator *, |
435515db | 1619 | stmt_vec_info *, slp_tree, |
c863e35b | 1620 | stmt_vector_for_cost *); |
ecc42a77 | 1621 | extern tree get_initial_def_for_reduction (stmt_vec_info, tree, tree *); |
fec8b6d0 | 1622 | extern bool vect_worthwhile_without_simd_p (vec_info *, tree_code); |
7a66d0cf | 1623 | extern int vect_get_known_peeling_cost (loop_vec_info, int, int *, |
1624 | stmt_vector_for_cost *, | |
f97dec81 | 1625 | stmt_vector_for_cost *, |
1626 | stmt_vector_for_cost *); | |
f404501a | 1627 | extern tree cse_and_gimplify_to_preheader (loop_vec_info, tree); |
4a61a337 | 1628 | |
fb85abff | 1629 | /* In tree-vect-slp.c. */ |
2068679d | 1630 | extern void vect_free_slp_instance (slp_instance, bool); |
678e3d6e | 1631 | extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> , |
d75596cd | 1632 | gimple_stmt_iterator *, poly_uint64, |
1633 | slp_instance, bool, unsigned *); | |
1c57101b | 1634 | extern bool vect_slp_analyze_operations (vec_info *); |
02e9bec2 | 1635 | extern void vect_schedule_slp (vec_info *); |
ed9370cc | 1636 | extern opt_result vect_analyze_slp (vec_info *, unsigned); |
bc937a44 | 1637 | extern bool vect_make_slp_decision (loop_vec_info); |
fb85abff | 1638 | extern void vect_detect_hybrid_slp (loop_vec_info); |
4f0d4cce | 1639 | extern void vect_get_slp_defs (vec<tree> , slp_tree, vec<vec<tree> > *); |
0a08c1bc | 1640 | extern bool vect_slp_bb (basic_block); |
3d9c962c | 1641 | extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree); |
ecc42a77 | 1642 | extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info); |
633af029 | 1643 | extern bool can_duplicate_and_interleave_p (unsigned int, machine_mode, |
1644 | unsigned int * = NULL, | |
1645 | tree * = NULL, tree * = NULL); | |
1646 | extern void duplicate_and_interleave (gimple_seq *, tree, vec<tree>, | |
1647 | unsigned int, vec<tree> &); | |
ecc42a77 | 1648 | extern int vect_get_place_in_interleaving_chain (stmt_vec_info, stmt_vec_info); |
fb85abff | 1649 | |
1650 | /* In tree-vect-patterns.c. */ | |
4a61a337 | 1651 | /* Pattern recognition functions. |
1652 | Additional pattern recognition functions can (and will) be added | |
1653 | in the future. */ | |
e2c5c678 | 1654 | void vect_pattern_recog (vec_info *); |
4a61a337 | 1655 | |
10230637 | 1656 | /* In tree-vectorizer.c. */ |
1657 | unsigned vectorize_loops (void); | |
d5e80d93 | 1658 | void vect_free_loop_info_assumptions (struct loop *); |
0decb676 | 1659 | gimple *vect_loop_vectorized_call (struct loop *, gcond **cond = NULL); |
1660 | ||
c91e8223 | 1661 | |
1662 | #endif /* GCC_TREE_VECTORIZER_H */ |