]>
Commit | Line | Data |
---|---|---|
fb85abff | 1 | /* Vectorizer |
aad93da1 | 2 | Copyright (C) 2003-2017 Free Software Foundation, Inc. |
c91e8223 | 3 | Contributed by Dorit Naishlos <dorit@il.ibm.com> |
4 | ||
5 | This file is part of GCC. | |
6 | ||
7 | GCC is free software; you can redistribute it and/or modify it under | |
8 | the terms of the GNU General Public License as published by the Free | |
8c4c00c1 | 9 | Software Foundation; either version 3, or (at your option) any later |
c91e8223 | 10 | version. |
11 | ||
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
8c4c00c1 | 18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ | |
c91e8223 | 20 | |
21 | #ifndef GCC_TREE_VECTORIZER_H | |
22 | #define GCC_TREE_VECTORIZER_H | |
23 | ||
fb85abff | 24 | #include "tree-data-ref.h" |
4f372c2c | 25 | #include "tree-hash-traits.h" |
f4ac3f3e | 26 | #include "target.h" |
fb85abff | 27 | |
c91e8223 | 28 | /* Used for naming of new temporaries. */ |
29 | enum vect_var_kind { | |
30 | vect_simple_var, | |
ea8f3370 | 31 | vect_pointer_var, |
dab48979 | 32 | vect_scalar_var, |
33 | vect_mask_var | |
c91e8223 | 34 | }; |
35 | ||
4a61a337 | 36 | /* Defines type of operation. */ |
c91e8223 | 37 | enum operation_type { |
38 | unary_op = 1, | |
4a61a337 | 39 | binary_op, |
40 | ternary_op | |
c91e8223 | 41 | }; |
42 | ||
1a9b4618 | 43 | /* Define type of available alignment support. */ |
44 | enum dr_alignment_support { | |
45 | dr_unaligned_unsupported, | |
46 | dr_unaligned_supported, | |
b0eb8c66 | 47 | dr_explicit_realign, |
48 | dr_explicit_realign_optimized, | |
1a9b4618 | 49 | dr_aligned |
50 | }; | |
51 | ||
ce10738f | 52 | /* Define type of def-use cross-iteration cycle. */ |
e12906b9 | 53 | enum vect_def_type { |
bc620c5c | 54 | vect_uninitialized_def = 0, |
f083cd24 | 55 | vect_constant_def = 1, |
56 | vect_external_def, | |
57 | vect_internal_def, | |
e12906b9 | 58 | vect_induction_def, |
59 | vect_reduction_def, | |
7aa0d350 | 60 | vect_double_reduction_def, |
ade2ac53 | 61 | vect_nested_cycle, |
e12906b9 | 62 | vect_unknown_def_type |
63 | }; | |
64 | ||
d09d8733 | 65 | /* Define type of reduction. */ |
66 | enum vect_reduction_type { | |
67 | TREE_CODE_REDUCTION, | |
b4552064 | 68 | COND_REDUCTION, |
56fb8e9d | 69 | INTEGER_INDUC_COND_REDUCTION, |
70 | CONST_COND_REDUCTION | |
d09d8733 | 71 | }; |
72 | ||
07be02da | 73 | #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \ |
74 | || ((D) == vect_double_reduction_def) \ | |
75 | || ((D) == vect_nested_cycle)) | |
76 | ||
4db2b577 | 77 | /* Structure to encapsulate information about a group of like |
78 | instructions to be presented to the target cost model. */ | |
6dc50383 | 79 | struct stmt_info_for_cost { |
4db2b577 | 80 | int count; |
81 | enum vect_cost_for_stmt kind; | |
42acab1c | 82 | gimple *stmt; |
4db2b577 | 83 | int misalign; |
6dc50383 | 84 | }; |
4db2b577 | 85 | |
f1f41a6c | 86 | typedef vec<stmt_info_for_cost> stmt_vector_for_cost; |
4db2b577 | 87 | |
4f372c2c | 88 | /* Maps base addresses to an innermost_loop_behavior that gives the maximum |
89 | known alignment for that base. */ | |
90 | typedef hash_map<tree_operand_hash, | |
91 | innermost_loop_behavior *> vec_base_alignments; | |
92 | ||
c6895939 | 93 | /************************************************************************ |
94 | SLP | |
95 | ************************************************************************/ | |
40bcc7c2 | 96 | typedef struct _slp_tree *slp_tree; |
c6895939 | 97 | |
b0f64919 | 98 | /* A computation tree of an SLP instance. Each node corresponds to a group of |
c6895939 | 99 | stmts to be packed in a SIMD stmt. */ |
40bcc7c2 | 100 | struct _slp_tree { |
b0f64919 | 101 | /* Nodes that contain def-stmts of this node statements operands. */ |
40bcc7c2 | 102 | vec<slp_tree> children; |
c6895939 | 103 | /* A group of scalar stmts to be vectorized together. */ |
42acab1c | 104 | vec<gimple *> stmts; |
678e3d6e | 105 | /* Load permutation relative to the stores, NULL if there is no |
106 | permutation. */ | |
107 | vec<unsigned> load_permutation; | |
c6895939 | 108 | /* Vectorized stmt/s. */ |
42acab1c | 109 | vec<gimple *> vec_stmts; |
48e1416a | 110 | /* Number of vector stmts that are created to replace the group of scalar |
111 | stmts. It is calculated during the transformation phase as the number of | |
112 | scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF | |
c6895939 | 113 | divided by vector size. */ |
114 | unsigned int vec_stmts_size; | |
66e30248 | 115 | /* Whether the scalar computations use two different operators. */ |
116 | bool two_operators; | |
6d37c111 | 117 | /* The DEF type of this node. */ |
118 | enum vect_def_type def_type; | |
40bcc7c2 | 119 | }; |
c6895939 | 120 | |
121 | ||
122 | /* SLP instance is a sequence of stmts in a loop that can be packed into | |
123 | SIMD stmts. */ | |
124 | typedef struct _slp_instance { | |
125 | /* The root of SLP tree. */ | |
126 | slp_tree root; | |
127 | ||
128 | /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */ | |
129 | unsigned int group_size; | |
130 | ||
131 | /* The unrolling factor required to vectorized this SLP instance. */ | |
132 | unsigned int unrolling_factor; | |
133 | ||
a0515226 | 134 | /* The group of nodes that contain loads of this SLP instance. */ |
f1f41a6c | 135 | vec<slp_tree> loads; |
6154acba | 136 | |
137 | /* The SLP node containing the reduction PHIs. */ | |
138 | slp_tree reduc_phis; | |
c6895939 | 139 | } *slp_instance; |
140 | ||
c6895939 | 141 | |
142 | /* Access Functions. */ | |
143 | #define SLP_INSTANCE_TREE(S) (S)->root | |
144 | #define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size | |
145 | #define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor | |
a0515226 | 146 | #define SLP_INSTANCE_LOADS(S) (S)->loads |
c6895939 | 147 | |
b0f64919 | 148 | #define SLP_TREE_CHILDREN(S) (S)->children |
c6895939 | 149 | #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts |
150 | #define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts | |
151 | #define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size | |
678e3d6e | 152 | #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation |
66e30248 | 153 | #define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators |
6d37c111 | 154 | #define SLP_TREE_DEF_TYPE(S) (S)->def_type |
b0f64919 | 155 | |
b0f64919 | 156 | |
0822b158 | 157 | |
f68a7726 | 158 | /* Describes two objects whose addresses must be unequal for the vectorized |
159 | loop to be valid. */ | |
160 | typedef std::pair<tree, tree> vec_object_pair; | |
161 | ||
e2c5c678 | 162 | /* Vectorizer state common between loop and basic-block vectorization. */ |
163 | struct vec_info { | |
e15e8a2a | 164 | enum vec_kind { bb, loop }; |
165 | ||
166 | vec_info (vec_kind, void *); | |
167 | ~vec_info (); | |
168 | ||
169 | /* The type of vectorization. */ | |
170 | vec_kind kind; | |
e2c5c678 | 171 | |
172 | /* All SLP instances. */ | |
e15e8a2a | 173 | auto_vec<slp_instance> slp_instances; |
e2c5c678 | 174 | |
e15e8a2a | 175 | /* All data references. Freed by free_data_refs, so not an auto_vec. */ |
e2c5c678 | 176 | vec<data_reference_p> datarefs; |
177 | ||
4f372c2c | 178 | /* Maps base addresses to an innermost_loop_behavior that gives the maximum |
179 | known alignment for that base. */ | |
180 | vec_base_alignments base_alignments; | |
181 | ||
e15e8a2a | 182 | /* All data dependences. Freed by free_dependence_relations, so not |
183 | an auto_vec. */ | |
e2c5c678 | 184 | vec<ddr_p> ddrs; |
185 | ||
186 | /* All interleaving chains of stores, represented by the first | |
187 | stmt in the chain. */ | |
e15e8a2a | 188 | auto_vec<gimple *> grouped_stores; |
e2c5c678 | 189 | |
190 | /* Cost data used by the target cost model. */ | |
191 | void *target_cost_data; | |
192 | }; | |
193 | ||
194 | struct _loop_vec_info; | |
195 | struct _bb_vec_info; | |
196 | ||
197 | template<> | |
198 | template<> | |
199 | inline bool | |
200 | is_a_helper <_loop_vec_info *>::test (vec_info *i) | |
201 | { | |
202 | return i->kind == vec_info::loop; | |
203 | } | |
204 | ||
205 | template<> | |
206 | template<> | |
207 | inline bool | |
208 | is_a_helper <_bb_vec_info *>::test (vec_info *i) | |
209 | { | |
210 | return i->kind == vec_info::bb; | |
211 | } | |
212 | ||
3e871d4d | 213 | |
4e58562d | 214 | /*-----------------------------------------------------------------*/ |
215 | /* Info on vectorized loops. */ | |
216 | /*-----------------------------------------------------------------*/ | |
e2c5c678 | 217 | typedef struct _loop_vec_info : public vec_info { |
e15e8a2a | 218 | _loop_vec_info (struct loop *); |
219 | ~_loop_vec_info (); | |
4e58562d | 220 | |
221 | /* The loop to which this info struct refers to. */ | |
222 | struct loop *loop; | |
223 | ||
224 | /* The loop basic blocks. */ | |
225 | basic_block *bbs; | |
226 | ||
796f6cba | 227 | /* Number of latch executions. */ |
228 | tree num_itersm1; | |
4e58562d | 229 | /* Number of iterations. */ |
230 | tree num_iters; | |
796f6cba | 231 | /* Number of iterations of the original loop. */ |
be53c6d4 | 232 | tree num_iters_unchanged; |
d5e80d93 | 233 | /* Condition under which this loop is analyzed and versioned. */ |
234 | tree num_iters_assumptions; | |
4e58562d | 235 | |
004a94a5 | 236 | /* Threshold of number of iterations below which vectorzation will not be |
237 | performed. It is calculated from MIN_PROFITABLE_ITERS and | |
238 | PARAM_MIN_VECT_LOOP_BOUND. */ | |
239 | unsigned int th; | |
240 | ||
4e58562d | 241 | /* Unrolling factor */ |
242 | int vectorization_factor; | |
243 | ||
244 | /* Unknown DRs according to which loop was peeled. */ | |
245 | struct data_reference *unaligned_dr; | |
246 | ||
39b8f742 | 247 | /* peeling_for_alignment indicates whether peeling for alignment will take |
248 | place, and what the peeling factor should be: | |
249 | peeling_for_alignment = X means: | |
250 | If X=0: Peeling for alignment will not be applied. | |
251 | If X>0: Peel first X iterations. | |
252 | If X=-1: Generate a runtime test to calculate the number of iterations | |
253 | to be peeled, using the dataref recorded in the field | |
254 | unaligned_dr. */ | |
255 | int peeling_for_alignment; | |
4e58562d | 256 | |
25e3c2e8 | 257 | /* The mask used to check the alignment of pointers or arrays. */ |
258 | int ptr_mask; | |
259 | ||
a8af2e86 | 260 | /* The loop nest in which the data dependences are computed. */ |
e15e8a2a | 261 | auto_vec<loop_p> loop_nest; |
a8af2e86 | 262 | |
45b13dc3 | 263 | /* Data Dependence Relations defining address ranges that are candidates |
264 | for a run-time aliasing check. */ | |
e15e8a2a | 265 | auto_vec<ddr_p> may_alias_ddrs; |
45b13dc3 | 266 | |
8a7b0f48 | 267 | /* Data Dependence Relations defining address ranges together with segment |
268 | lengths from which the run-time aliasing check is built. */ | |
e15e8a2a | 269 | auto_vec<dr_with_seg_len_pair_t> comp_alias_ddrs; |
8a7b0f48 | 270 | |
f68a7726 | 271 | /* Check that the addresses of each pair of objects is unequal. */ |
e15e8a2a | 272 | auto_vec<vec_object_pair> check_unequal_addrs; |
f68a7726 | 273 | |
25e3c2e8 | 274 | /* Statements in the loop that have data references that are candidates for a |
275 | runtime (loop versioning) misalignment check. */ | |
e15e8a2a | 276 | auto_vec<gimple *> may_misalign_stmts; |
25e3c2e8 | 277 | |
eefa05c8 | 278 | /* Reduction cycles detected in the loop. Used in loop-aware SLP. */ |
e15e8a2a | 279 | auto_vec<gimple *> reductions; |
0822b158 | 280 | |
39a5d6b1 | 281 | /* All reduction chains in the loop, represented by the first |
282 | stmt in the chain. */ | |
e15e8a2a | 283 | auto_vec<gimple *> reduction_chains; |
39a5d6b1 | 284 | |
2a9a3444 | 285 | /* Cost vector for a single scalar iteration. */ |
e15e8a2a | 286 | auto_vec<stmt_info_for_cost> scalar_cost_vec; |
2a9a3444 | 287 | |
487798e2 | 288 | /* The unrolling factor needed to SLP the loop. In case of that pure SLP is |
289 | applied to the loop, i.e., no unrolling is needed, this is 1. */ | |
290 | unsigned slp_unrolling_factor; | |
291 | ||
2a9a3444 | 292 | /* Cost of a single scalar iteration. */ |
293 | int single_scalar_iteration_cost; | |
294 | ||
487798e2 | 295 | /* Is the loop vectorizable? */ |
296 | bool vectorizable; | |
297 | ||
ee612634 | 298 | /* When we have grouped data accesses with gaps, we may introduce invalid |
a4ee7fac | 299 | memory accesses. We peel the last iteration of the loop to prevent |
300 | this. */ | |
301 | bool peeling_for_gaps; | |
302 | ||
36f39b2e | 303 | /* When the number of iterations is not a multiple of the vector size |
304 | we need to peel off iterations at the end to form an epilogue loop. */ | |
305 | bool peeling_for_niter; | |
306 | ||
ba69439f | 307 | /* Reductions are canonicalized so that the last operand is the reduction |
308 | operand. If this places a constant into RHS1, this decanonicalizes | |
309 | GIMPLE for other phases, so we must track when this has occurred and | |
310 | fix it up. */ | |
311 | bool operands_swapped; | |
312 | ||
c7a8722c | 313 | /* True if there are no loop carried data dependencies in the loop. |
314 | If loop->safelen <= 1, then this is always true, either the loop | |
315 | didn't have any loop carried data dependencies, or the loop is being | |
316 | vectorized guarded with some runtime alias checks, or couldn't | |
317 | be vectorized at all, but then this field shouldn't be used. | |
318 | For loop->safelen >= 2, the user has asserted that there are no | |
319 | backward dependencies, but there still could be loop carried forward | |
320 | dependencies in such loops. This flag will be false if normal | |
321 | vectorizer data dependency analysis would fail or require versioning | |
322 | for alias, but because of loop->safelen >= 2 it has been vectorized | |
323 | even without versioning for alias. E.g. in: | |
324 | #pragma omp simd | |
325 | for (int i = 0; i < m; i++) | |
326 | a[i] = a[i + k] * c; | |
327 | (or #pragma simd or #pragma ivdep) we can vectorize this and it will | |
328 | DTRT even for k > 0 && k < m, but without safelen we would not | |
329 | vectorize this, so this field would be false. */ | |
330 | bool no_data_dependencies; | |
331 | ||
487798e2 | 332 | /* Mark loops having masked stores. */ |
333 | bool has_mask_store; | |
334 | ||
c71d3c24 | 335 | /* If if-conversion versioned this loop before conversion, this is the |
336 | loop version without if-conversion. */ | |
337 | struct loop *scalar_loop; | |
338 | ||
5b631e09 | 339 | /* For loops being epilogues of already vectorized loops |
340 | this points to the original vectorized loop. Otherwise NULL. */ | |
341 | _loop_vec_info *orig_loop_info; | |
342 | ||
4e58562d | 343 | } *loop_vec_info; |
344 | ||
25e3c2e8 | 345 | /* Access Functions. */ |
10095225 | 346 | #define LOOP_VINFO_LOOP(L) (L)->loop |
347 | #define LOOP_VINFO_BBS(L) (L)->bbs | |
796f6cba | 348 | #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1 |
10095225 | 349 | #define LOOP_VINFO_NITERS(L) (L)->num_iters |
796f6cba | 350 | /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after |
351 | prologue peeling retain total unchanged scalar loop iterations for | |
352 | cost model. */ | |
10095225 | 353 | #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged |
d5e80d93 | 354 | #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions |
004a94a5 | 355 | #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th |
10095225 | 356 | #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable |
357 | #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor | |
358 | #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask | |
a8af2e86 | 359 | #define LOOP_VINFO_LOOP_NEST(L) (L)->loop_nest |
10095225 | 360 | #define LOOP_VINFO_DATAREFS(L) (L)->datarefs |
361 | #define LOOP_VINFO_DDRS(L) (L)->ddrs | |
362 | #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters)) | |
313a5120 | 363 | #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment |
10095225 | 364 | #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr |
365 | #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts | |
10095225 | 366 | #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs |
8a7b0f48 | 367 | #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs |
f68a7726 | 368 | #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs |
ee612634 | 369 | #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores |
10095225 | 370 | #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances |
c6895939 | 371 | #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor |
eefa05c8 | 372 | #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions |
39a5d6b1 | 373 | #define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains |
4db2b577 | 374 | #define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data |
a4ee7fac | 375 | #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps |
ba69439f | 376 | #define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped |
313a5120 | 377 | #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter |
c7a8722c | 378 | #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies |
c71d3c24 | 379 | #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop |
cfd9ca84 | 380 | #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store |
2a9a3444 | 381 | #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec |
382 | #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost | |
5b631e09 | 383 | #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info |
4e58562d | 384 | |
d5e80d93 | 385 | #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \ |
72ffab3c | 386 | ((L)->may_misalign_stmts.length () > 0) |
d5e80d93 | 387 | #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \ |
f68a7726 | 388 | ((L)->comp_alias_ddrs.length () > 0 \ |
389 | || (L)->check_unequal_addrs.length () > 0) | |
d5e80d93 | 390 | #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \ |
391 | (LOOP_VINFO_NITERS_ASSUMPTIONS (L)) | |
392 | #define LOOP_REQUIRES_VERSIONING(L) \ | |
393 | (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \ | |
394 | || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \ | |
395 | || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L)) | |
33bbe730 | 396 | |
10095225 | 397 | #define LOOP_VINFO_NITERS_KNOWN_P(L) \ |
313a5120 | 398 | (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0) |
4e58562d | 399 | |
5b631e09 | 400 | #define LOOP_VINFO_EPILOGUE_P(L) \ |
401 | (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL) | |
402 | ||
403 | #define LOOP_VINFO_ORIG_VECT_FACTOR(L) \ | |
404 | (LOOP_VINFO_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L))) | |
405 | ||
221e9a92 | 406 | static inline loop_vec_info |
407 | loop_vec_info_for_loop (struct loop *loop) | |
408 | { | |
409 | return (loop_vec_info) loop->aux; | |
410 | } | |
411 | ||
412 | static inline bool | |
42acab1c | 413 | nested_in_vect_loop_p (struct loop *loop, gimple *stmt) |
221e9a92 | 414 | { |
48e1416a | 415 | return (loop->inner |
75a70cf9 | 416 | && (loop->inner == (gimple_bb (stmt))->loop_father)); |
221e9a92 | 417 | } |
418 | ||
e2c5c678 | 419 | typedef struct _bb_vec_info : public vec_info |
420 | { | |
e15e8a2a | 421 | _bb_vec_info (gimple_stmt_iterator, gimple_stmt_iterator); |
422 | ~_bb_vec_info (); | |
423 | ||
37545e54 | 424 | basic_block bb; |
4c7587f5 | 425 | gimple_stmt_iterator region_begin; |
426 | gimple_stmt_iterator region_end; | |
37545e54 | 427 | } *bb_vec_info; |
428 | ||
4db2b577 | 429 | #define BB_VINFO_BB(B) (B)->bb |
430 | #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores | |
431 | #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances | |
432 | #define BB_VINFO_DATAREFS(B) (B)->datarefs | |
433 | #define BB_VINFO_DDRS(B) (B)->ddrs | |
434 | #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data | |
37545e54 | 435 | |
436 | static inline bb_vec_info | |
437 | vec_info_for_bb (basic_block bb) | |
438 | { | |
439 | return (bb_vec_info) bb->aux; | |
440 | } | |
441 | ||
c91e8223 | 442 | /*-----------------------------------------------------------------*/ |
443 | /* Info on vectorized defs. */ | |
444 | /*-----------------------------------------------------------------*/ | |
445 | enum stmt_vec_info_type { | |
446 | undef_vec_info_type = 0, | |
447 | load_vec_info_type, | |
448 | store_vec_info_type, | |
09e31a48 | 449 | shift_vec_info_type, |
c91e8223 | 450 | op_vec_info_type, |
22c2f6bd | 451 | call_vec_info_type, |
d09768a4 | 452 | call_simd_clone_vec_info_type, |
e9705e7f | 453 | assignment_vec_info_type, |
ea8f3370 | 454 | condition_vec_info_type, |
dab48979 | 455 | comparison_vec_info_type, |
c6c91d61 | 456 | reduc_vec_info_type, |
6fada017 | 457 | induc_vec_info_type, |
c6c91d61 | 458 | type_promotion_vec_info_type, |
9d8bf4aa | 459 | type_demotion_vec_info_type, |
221e9a92 | 460 | type_conversion_vec_info_type, |
461 | loop_exit_ctrl_vec_info_type | |
c6c91d61 | 462 | }; |
463 | ||
48e1416a | 464 | /* Indicates whether/how a variable is used in the scope of loop/basic |
f083cd24 | 465 | block. */ |
c6c91d61 | 466 | enum vect_relevant { |
f083cd24 | 467 | vect_unused_in_scope = 0, |
75aae5b4 | 468 | |
469 | /* The def is only used outside the loop. */ | |
470 | vect_used_only_live, | |
ade2ac53 | 471 | /* The def is in the inner loop, and the use is in the outer loop, and the |
472 | use is a reduction stmt. */ | |
221e9a92 | 473 | vect_used_in_outer_by_reduction, |
ade2ac53 | 474 | /* The def is in the inner loop, and the use is in the outer loop (and is |
475 | not part of reduction). */ | |
221e9a92 | 476 | vect_used_in_outer, |
bfe8bfe9 | 477 | |
478 | /* defs that feed computations that end up (only) in a reduction. These | |
48e1416a | 479 | defs may be used by non-reduction stmts, but eventually, any |
480 | computations/values that are affected by these defs are used to compute | |
481 | a reduction (i.e. don't get stored to memory, for example). We use this | |
482 | to identify computations that we can change the order in which they are | |
bfe8bfe9 | 483 | computed. */ |
c6c91d61 | 484 | vect_used_by_reduction, |
bfe8bfe9 | 485 | |
48e1416a | 486 | vect_used_in_scope |
c91e8223 | 487 | }; |
488 | ||
c6895939 | 489 | /* The type of vectorization that can be applied to the stmt: regular loop-based |
490 | vectorization; pure SLP - the stmt is a part of SLP instances and does not | |
491 | have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is | |
492 | a part of SLP instance and also must be loop-based vectorized, since it has | |
48e1416a | 493 | uses outside SLP sequences. |
494 | ||
495 | In the loop context the meanings of pure and hybrid SLP are slightly | |
496 | different. By saying that pure SLP is applied to the loop, we mean that we | |
497 | exploit only intra-iteration parallelism in the loop; i.e., the loop can be | |
498 | vectorized without doing any conceptual unrolling, cause we don't pack | |
499 | together stmts from different iterations, only within a single iteration. | |
500 | Loop hybrid SLP means that we exploit both intra-iteration and | |
c6895939 | 501 | inter-iteration parallelism (e.g., number of elements in the vector is 4 |
48e1416a | 502 | and the slp-group-size is 2, in which case we don't have enough parallelism |
503 | within an iteration, so we obtain the rest of the parallelism from subsequent | |
c6895939 | 504 | iterations by unrolling the loop by 2). */ |
48e1416a | 505 | enum slp_vect_type { |
c6895939 | 506 | loop_vect = 0, |
507 | pure_slp, | |
508 | hybrid | |
509 | }; | |
510 | ||
85b53a1f | 511 | /* Describes how we're going to vectorize an individual load or store, |
512 | or a group of loads or stores. */ | |
513 | enum vect_memory_access_type { | |
989ceec3 | 514 | /* An access to an invariant address. This is used only for loads. */ |
515 | VMAT_INVARIANT, | |
516 | ||
85b53a1f | 517 | /* A simple contiguous access. */ |
518 | VMAT_CONTIGUOUS, | |
519 | ||
989ceec3 | 520 | /* A contiguous access that goes down in memory rather than up, |
521 | with no additional permutation. This is used only for stores | |
522 | of invariants. */ | |
523 | VMAT_CONTIGUOUS_DOWN, | |
524 | ||
85b53a1f | 525 | /* A simple contiguous access in which the elements need to be permuted |
526 | after loading or before storing. Only used for loop vectorization; | |
527 | SLP uses separate permutes. */ | |
528 | VMAT_CONTIGUOUS_PERMUTE, | |
529 | ||
989ceec3 | 530 | /* A simple contiguous access in which the elements need to be reversed |
531 | after loading or before storing. */ | |
532 | VMAT_CONTIGUOUS_REVERSE, | |
533 | ||
85b53a1f | 534 | /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */ |
535 | VMAT_LOAD_STORE_LANES, | |
536 | ||
537 | /* An access in which each scalar element is loaded or stored | |
538 | individually. */ | |
539 | VMAT_ELEMENTWISE, | |
540 | ||
541 | /* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped | |
542 | SLP accesses. Each unrolled iteration uses a contiguous load | |
543 | or store for the whole group, but the groups from separate iterations | |
544 | are combined in the same way as for VMAT_ELEMENTWISE. */ | |
545 | VMAT_STRIDED_SLP, | |
546 | ||
547 | /* The access uses gather loads or scatter stores. */ | |
548 | VMAT_GATHER_SCATTER | |
549 | }; | |
c6895939 | 550 | |
f1168a33 | 551 | typedef struct data_reference *dr_p; |
f1168a33 | 552 | |
c91e8223 | 553 | typedef struct _stmt_vec_info { |
554 | ||
555 | enum stmt_vec_info_type type; | |
556 | ||
609c710b | 557 | /* Indicates whether this stmts is part of a computation whose result is |
558 | used outside the loop. */ | |
559 | bool live; | |
560 | ||
561 | /* Stmt is part of some pattern (computation idiom) */ | |
562 | bool in_pattern_p; | |
563 | ||
487798e2 | 564 | /* Is this statement vectorizable or should it be skipped in (partial) |
565 | vectorization. */ | |
566 | bool vectorizable; | |
567 | ||
c91e8223 | 568 | /* The stmt to which this info struct refers to. */ |
42acab1c | 569 | gimple *stmt; |
c91e8223 | 570 | |
e2c5c678 | 571 | /* The vec_info with respect to which STMT is vectorized. */ |
572 | vec_info *vinfo; | |
c91e8223 | 573 | |
b334cbba | 574 | /* The vector type to be used for the LHS of this statement. */ |
c91e8223 | 575 | tree vectype; |
576 | ||
577 | /* The vectorized version of the stmt. */ | |
42acab1c | 578 | gimple *vectorized_stmt; |
c91e8223 | 579 | |
580 | ||
16ed3c2c | 581 | /* The following is relevant only for stmts that contain a non-scalar |
48e1416a | 582 | data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have |
16ed3c2c | 583 | at most one such data-ref. */ |
c91e8223 | 584 | |
b0eb8c66 | 585 | /* Information about the data-ref (access function, etc), |
586 | relative to the inner-most containing loop. */ | |
c91e8223 | 587 | struct data_reference *data_ref_info; |
588 | ||
b0eb8c66 | 589 | /* Information about the data-ref relative to this loop |
590 | nest (the loop that is being considered for vectorization). */ | |
9e879814 | 591 | innermost_loop_behavior dr_wrt_vec_loop; |
b0eb8c66 | 592 | |
559260b3 | 593 | /* For loop PHI nodes, the base and evolution part of it. This makes sure |
86faead7 | 594 | this information is still available in vect_update_ivs_after_vectorizer |
595 | where we may not be able to re-analyze the PHI nodes evolution as | |
596 | peeling for the prologue loop can make it unanalyzable. The evolution | |
559260b3 | 597 | part is still correct after peeling, but the base may have changed from |
598 | the version here. */ | |
599 | tree loop_phi_evolution_base_unchanged; | |
86faead7 | 600 | tree loop_phi_evolution_part; |
601 | ||
48e1416a | 602 | /* Used for various bookkeeping purposes, generally holding a pointer to |
603 | some other stmt S that is in some way "related" to this stmt. | |
4a61a337 | 604 | Current use of this field is: |
48e1416a | 605 | If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is |
606 | true): S is the "pattern stmt" that represents (and replaces) the | |
607 | sequence of stmts that constitutes the pattern. Similarly, the | |
608 | related_stmt of the "pattern stmt" points back to this stmt (which is | |
609 | the last stmt in the original sequence of stmts that constitutes the | |
4a61a337 | 610 | pattern). */ |
42acab1c | 611 | gimple *related_stmt; |
4a61a337 | 612 | |
18937389 | 613 | /* Used to keep a sequence of def stmts of a pattern stmt if such exists. */ |
614 | gimple_seq pattern_def_seq; | |
45eea33f | 615 | |
f1168a33 | 616 | /* List of datarefs that are known to have the same alignment as the dataref |
617 | of this stmt. */ | |
f1f41a6c | 618 | vec<dr_p> same_align_refs; |
f1168a33 | 619 | |
295327ab | 620 | /* Selected SIMD clone's function info. First vector element |
621 | is SIMD clone's function decl, followed by a pair of trees (base + step) | |
622 | for linear arguments (pair of NULLs for other arguments). */ | |
623 | vec<tree> simd_clone_info; | |
d09768a4 | 624 | |
e12906b9 | 625 | /* Classify the def of this stmt. */ |
626 | enum vect_def_type def_type; | |
627 | ||
609c710b | 628 | /* Whether the stmt is SLPed, loop-based vectorized, or both. */ |
629 | enum slp_vect_type slp_type; | |
630 | ||
21009880 | 631 | /* Interleaving and reduction chains info. */ |
632 | /* First element in the group. */ | |
42acab1c | 633 | gimple *first_element; |
21009880 | 634 | /* Pointer to the next element in the group. */ |
42acab1c | 635 | gimple *next_element; |
21009880 | 636 | /* For data-refs, in case that two or more stmts share data-ref, this is the |
637 | pointer to the previously detected stmt with the same dr. */ | |
42acab1c | 638 | gimple *same_dr_stmt; |
21009880 | 639 | /* The size of the group. */ |
6b8dbb53 | 640 | unsigned int size; |
641 | /* For stores, number of stores from this group seen. We vectorize the last | |
642 | one. */ | |
643 | unsigned int store_count; | |
644 | /* For loads only, the gap from the previous load. For consecutive loads, GAP | |
645 | is 1. */ | |
646 | unsigned int gap; | |
609c710b | 647 | |
a8cf7702 | 648 | /* The minimum negative dependence distance this stmt participates in |
649 | or zero if none. */ | |
650 | unsigned int min_neg_dist; | |
651 | ||
609c710b | 652 | /* Not all stmts in the loop need to be vectorized. e.g, the increment |
653 | of the loop induction variable and computation of array indexes. relevant | |
654 | indicates whether the stmt needs to be vectorized. */ | |
655 | enum vect_relevant relevant; | |
867c03eb | 656 | |
0bd6d857 | 657 | /* For loads if this is a gather, for stores if this is a scatter. */ |
658 | bool gather_scatter_p; | |
e1c75243 | 659 | |
660 | /* True if this is an access with loop-invariant stride. */ | |
661 | bool strided_p; | |
3d483a94 | 662 | |
487798e2 | 663 | /* For both loads and stores. */ |
664 | bool simd_lane_access_p; | |
665 | ||
85b53a1f | 666 | /* Classifies how the load or store is going to be implemented |
667 | for loop vectorization. */ | |
668 | vect_memory_access_type memory_access_type; | |
669 | ||
d09d8733 | 670 | /* For reduction loops, this is the type of reduction. */ |
671 | enum vect_reduction_type v_reduc_type; | |
672 | ||
834a2c29 | 673 | /* For CONST_COND_REDUCTION, record the reduc code. */ |
674 | enum tree_code const_cond_reduc_code; | |
675 | ||
119a8852 | 676 | /* On a reduction PHI the reduction type as detected by |
677 | vect_force_simple_reduction. */ | |
678 | enum vect_reduction_type reduc_type; | |
679 | ||
44b24fa0 | 680 | /* On a reduction PHI the def returned by vect_force_simple_reduction. |
681 | On the def returned by vect_force_simple_reduction the | |
682 | corresponding PHI. */ | |
119a8852 | 683 | gimple *reduc_def; |
684 | ||
0d85be19 | 685 | /* The number of scalar stmt references from active SLP instances. */ |
686 | unsigned int num_slp_uses; | |
c91e8223 | 687 | } *stmt_vec_info; |
688 | ||
cf60da07 | 689 | /* Information about a gather/scatter call. */ |
690 | struct gather_scatter_info { | |
691 | /* The FUNCTION_DECL for the built-in gather/scatter function. */ | |
692 | tree decl; | |
693 | ||
694 | /* The loop-invariant base value. */ | |
695 | tree base; | |
696 | ||
697 | /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */ | |
698 | tree offset; | |
699 | ||
700 | /* Each offset element should be multiplied by this amount before | |
701 | being added to the base. */ | |
702 | int scale; | |
703 | ||
704 | /* The definition type for the vectorized offset. */ | |
705 | enum vect_def_type offset_dt; | |
706 | ||
707 | /* The type of the vectorized offset. */ | |
708 | tree offset_vectype; | |
709 | }; | |
710 | ||
c91e8223 | 711 | /* Access Functions. */ |
6b8dbb53 | 712 | #define STMT_VINFO_TYPE(S) (S)->type |
713 | #define STMT_VINFO_STMT(S) (S)->stmt | |
e2c5c678 | 714 | inline loop_vec_info |
715 | STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo) | |
716 | { | |
717 | if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo)) | |
718 | return loop_vinfo; | |
719 | return NULL; | |
720 | } | |
721 | inline bb_vec_info | |
722 | STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo) | |
723 | { | |
724 | if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo)) | |
725 | return bb_vinfo; | |
726 | return NULL; | |
727 | } | |
6b8dbb53 | 728 | #define STMT_VINFO_RELEVANT(S) (S)->relevant |
729 | #define STMT_VINFO_LIVE_P(S) (S)->live | |
730 | #define STMT_VINFO_VECTYPE(S) (S)->vectype | |
731 | #define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt | |
6ea6a380 | 732 | #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable |
6b8dbb53 | 733 | #define STMT_VINFO_DATA_REF(S) (S)->data_ref_info |
0bd6d857 | 734 | #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p |
e1c75243 | 735 | #define STMT_VINFO_STRIDED_P(S) (S)->strided_p |
85b53a1f | 736 | #define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type |
3d483a94 | 737 | #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p |
d09d8733 | 738 | #define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type |
834a2c29 | 739 | #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code |
b0eb8c66 | 740 | |
9e879814 | 741 | #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop |
742 | #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address | |
743 | #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init | |
744 | #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset | |
745 | #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step | |
a5456a6d | 746 | #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment |
747 | #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \ | |
748 | (S)->dr_wrt_vec_loop.base_misalignment | |
a7e05ef2 | 749 | #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \ |
750 | (S)->dr_wrt_vec_loop.offset_alignment | |
668dd7dc | 751 | #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \ |
752 | (S)->dr_wrt_vec_loop.step_alignment | |
b0eb8c66 | 753 | |
6b8dbb53 | 754 | #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p |
755 | #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt | |
18937389 | 756 | #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq |
6b8dbb53 | 757 | #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs |
295327ab | 758 | #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info |
6b8dbb53 | 759 | #define STMT_VINFO_DEF_TYPE(S) (S)->def_type |
21009880 | 760 | #define STMT_VINFO_GROUP_FIRST_ELEMENT(S) (S)->first_element |
761 | #define STMT_VINFO_GROUP_NEXT_ELEMENT(S) (S)->next_element | |
762 | #define STMT_VINFO_GROUP_SIZE(S) (S)->size | |
763 | #define STMT_VINFO_GROUP_STORE_COUNT(S) (S)->store_count | |
764 | #define STMT_VINFO_GROUP_GAP(S) (S)->gap | |
765 | #define STMT_VINFO_GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt | |
ee612634 | 766 | #define STMT_VINFO_GROUPED_ACCESS(S) ((S)->first_element != NULL && (S)->data_ref_info) |
559260b3 | 767 | #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged |
86faead7 | 768 | #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part |
a8cf7702 | 769 | #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist |
0d85be19 | 770 | #define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses |
119a8852 | 771 | #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type |
772 | #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def | |
21009880 | 773 | |
774 | #define GROUP_FIRST_ELEMENT(S) (S)->first_element | |
775 | #define GROUP_NEXT_ELEMENT(S) (S)->next_element | |
776 | #define GROUP_SIZE(S) (S)->size | |
777 | #define GROUP_STORE_COUNT(S) (S)->store_count | |
778 | #define GROUP_GAP(S) (S)->gap | |
779 | #define GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt | |
c91e8223 | 780 | |
f083cd24 | 781 | #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope) |
867c03eb | 782 | |
c6895939 | 783 | #define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid) |
784 | #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp) | |
785 | #define STMT_SLP_TYPE(S) (S)->slp_type | |
786 | ||
23e1875f | 787 | struct dataref_aux { |
23e1875f | 788 | int misalignment; |
9dd88d41 | 789 | /* If true the alignment of base_decl needs to be increased. */ |
790 | bool base_misaligned; | |
9dd88d41 | 791 | tree base_decl; |
23e1875f | 792 | }; |
793 | ||
9dd88d41 | 794 | #define DR_VECT_AUX(dr) ((dataref_aux *)(dr)->aux) |
795 | ||
0822b158 | 796 | #define VECT_MAX_COST 1000 |
797 | ||
862bb3cd | 798 | /* The maximum number of intermediate steps required in multi-step type |
799 | conversion. */ | |
800 | #define MAX_INTERM_CVT_STEPS 3 | |
801 | ||
1706116d | 802 | /* The maximum vectorization factor supported by any target (V64QI). */ |
803 | #define MAX_VECTORIZATION_FACTOR 64 | |
91a74fc6 | 804 | |
69fcaae3 | 805 | /* Nonzero if TYPE represents a (scalar) boolean type or type |
806 | in the middle-end compatible with it (unsigned precision 1 integral | |
807 | types). Used to determine which types should be vectorized as | |
808 | VECTOR_BOOLEAN_TYPE_P. */ | |
809 | ||
810 | #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \ | |
811 | (TREE_CODE (TYPE) == BOOLEAN_TYPE \ | |
812 | || ((TREE_CODE (TYPE) == INTEGER_TYPE \ | |
813 | || TREE_CODE (TYPE) == ENUMERAL_TYPE) \ | |
814 | && TYPE_PRECISION (TYPE) == 1 \ | |
815 | && TYPE_UNSIGNED (TYPE))) | |
816 | ||
3702cf13 | 817 | extern vec<stmt_vec_info> stmt_vec_info_vec; |
75a70cf9 | 818 | |
819 | void init_stmt_vec_info_vec (void); | |
820 | void free_stmt_vec_info_vec (void); | |
821 | ||
282bf14c | 822 | /* Return a stmt_vec_info corresponding to STMT. */ |
823 | ||
75a70cf9 | 824 | static inline stmt_vec_info |
42acab1c | 825 | vinfo_for_stmt (gimple *stmt) |
c91e8223 | 826 | { |
e15e8a2a | 827 | int uid = gimple_uid (stmt); |
828 | if (uid <= 0) | |
75a70cf9 | 829 | return NULL; |
830 | ||
3702cf13 | 831 | return stmt_vec_info_vec[uid - 1]; |
c91e8223 | 832 | } |
833 | ||
282bf14c | 834 | /* Set vectorizer information INFO for STMT. */ |
835 | ||
75a70cf9 | 836 | static inline void |
42acab1c | 837 | set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info) |
c91e8223 | 838 | { |
75a70cf9 | 839 | unsigned int uid = gimple_uid (stmt); |
840 | if (uid == 0) | |
841 | { | |
e95895ef | 842 | gcc_checking_assert (info); |
f1f41a6c | 843 | uid = stmt_vec_info_vec.length () + 1; |
75a70cf9 | 844 | gimple_set_uid (stmt, uid); |
3702cf13 | 845 | stmt_vec_info_vec.safe_push (info); |
75a70cf9 | 846 | } |
847 | else | |
fc5f8be0 | 848 | { |
849 | gcc_checking_assert (info == NULL); | |
850 | stmt_vec_info_vec[uid - 1] = info; | |
851 | } | |
c91e8223 | 852 | } |
853 | ||
282bf14c | 854 | /* Return the earlier statement between STMT1 and STMT2. */ |
855 | ||
42acab1c | 856 | static inline gimple * |
857 | get_earlier_stmt (gimple *stmt1, gimple *stmt2) | |
bdc89b8f | 858 | { |
859 | unsigned int uid1, uid2; | |
860 | ||
861 | if (stmt1 == NULL) | |
862 | return stmt2; | |
863 | ||
864 | if (stmt2 == NULL) | |
865 | return stmt1; | |
866 | ||
867 | uid1 = gimple_uid (stmt1); | |
868 | uid2 = gimple_uid (stmt2); | |
869 | ||
870 | if (uid1 == 0 || uid2 == 0) | |
871 | return NULL; | |
872 | ||
f1f41a6c | 873 | gcc_checking_assert (uid1 <= stmt_vec_info_vec.length () |
874 | && uid2 <= stmt_vec_info_vec.length ()); | |
bdc89b8f | 875 | |
876 | if (uid1 < uid2) | |
877 | return stmt1; | |
878 | else | |
879 | return stmt2; | |
880 | } | |
881 | ||
282bf14c | 882 | /* Return the later statement between STMT1 and STMT2. */ |
883 | ||
42acab1c | 884 | static inline gimple * |
885 | get_later_stmt (gimple *stmt1, gimple *stmt2) | |
d4b21757 | 886 | { |
887 | unsigned int uid1, uid2; | |
888 | ||
889 | if (stmt1 == NULL) | |
890 | return stmt2; | |
891 | ||
892 | if (stmt2 == NULL) | |
893 | return stmt1; | |
894 | ||
895 | uid1 = gimple_uid (stmt1); | |
896 | uid2 = gimple_uid (stmt2); | |
897 | ||
898 | if (uid1 == 0 || uid2 == 0) | |
899 | return NULL; | |
900 | ||
f1f41a6c | 901 | gcc_assert (uid1 <= stmt_vec_info_vec.length ()); |
902 | gcc_assert (uid2 <= stmt_vec_info_vec.length ()); | |
d4b21757 | 903 | |
904 | if (uid1 > uid2) | |
905 | return stmt1; | |
906 | else | |
907 | return stmt2; | |
908 | } | |
909 | ||
282bf14c | 910 | /* Return TRUE if a statement represented by STMT_INFO is a part of a |
911 | pattern. */ | |
912 | ||
213448e9 | 913 | static inline bool |
914 | is_pattern_stmt_p (stmt_vec_info stmt_info) | |
915 | { | |
42acab1c | 916 | gimple *related_stmt; |
213448e9 | 917 | stmt_vec_info related_stmt_info; |
918 | ||
919 | related_stmt = STMT_VINFO_RELATED_STMT (stmt_info); | |
920 | if (related_stmt | |
921 | && (related_stmt_info = vinfo_for_stmt (related_stmt)) | |
922 | && STMT_VINFO_IN_PATTERN_P (related_stmt_info)) | |
923 | return true; | |
924 | ||
925 | return false; | |
926 | } | |
927 | ||
282bf14c | 928 | /* Return true if BB is a loop header. */ |
929 | ||
221e9a92 | 930 | static inline bool |
931 | is_loop_header_bb_p (basic_block bb) | |
932 | { | |
933 | if (bb == (bb->loop_father)->header) | |
934 | return true; | |
e95895ef | 935 | gcc_checking_assert (EDGE_COUNT (bb->preds) == 1); |
221e9a92 | 936 | return false; |
937 | } | |
938 | ||
282bf14c | 939 | /* Return pow2 (X). */ |
940 | ||
862bb3cd | 941 | static inline int |
942 | vect_pow2 (int x) | |
943 | { | |
944 | int i, res = 1; | |
945 | ||
946 | for (i = 0; i < x; i++) | |
947 | res *= 2; | |
948 | ||
949 | return res; | |
950 | } | |
84a15e8f | 951 | |
f97dec81 | 952 | /* Alias targetm.vectorize.builtin_vectorization_cost. */ |
953 | ||
954 | static inline int | |
955 | builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, | |
956 | tree vectype, int misalign) | |
957 | { | |
958 | return targetm.vectorize.builtin_vectorization_cost (type_of_cost, | |
959 | vectype, misalign); | |
960 | } | |
961 | ||
f4ac3f3e | 962 | /* Get cost by calling cost target builtin. */ |
963 | ||
964 | static inline | |
965 | int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost) | |
966 | { | |
f97dec81 | 967 | return builtin_vectorization_cost (type_of_cost, NULL, 0); |
f4ac3f3e | 968 | } |
969 | ||
4db2b577 | 970 | /* Alias targetm.vectorize.init_cost. */ |
971 | ||
972 | static inline void * | |
973 | init_cost (struct loop *loop_info) | |
974 | { | |
975 | return targetm.vectorize.init_cost (loop_info); | |
976 | } | |
977 | ||
978 | /* Alias targetm.vectorize.add_stmt_cost. */ | |
979 | ||
980 | static inline unsigned | |
981 | add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, | |
f97dec81 | 982 | stmt_vec_info stmt_info, int misalign, |
983 | enum vect_cost_model_location where) | |
4db2b577 | 984 | { |
985 | return targetm.vectorize.add_stmt_cost (data, count, kind, | |
f97dec81 | 986 | stmt_info, misalign, where); |
4db2b577 | 987 | } |
988 | ||
989 | /* Alias targetm.vectorize.finish_cost. */ | |
990 | ||
f97dec81 | 991 | static inline void |
992 | finish_cost (void *data, unsigned *prologue_cost, | |
993 | unsigned *body_cost, unsigned *epilogue_cost) | |
4db2b577 | 994 | { |
f97dec81 | 995 | targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost); |
4db2b577 | 996 | } |
997 | ||
998 | /* Alias targetm.vectorize.destroy_cost_data. */ | |
999 | ||
1000 | static inline void | |
1001 | destroy_cost_data (void *data) | |
1002 | { | |
1003 | targetm.vectorize.destroy_cost_data (data); | |
1004 | } | |
1005 | ||
c91e8223 | 1006 | /*-----------------------------------------------------------------*/ |
1007 | /* Info on data references alignment. */ | |
1008 | /*-----------------------------------------------------------------*/ | |
23e1875f | 1009 | inline void |
1010 | set_dr_misalignment (struct data_reference *dr, int val) | |
1011 | { | |
9dd88d41 | 1012 | dataref_aux *data_aux = DR_VECT_AUX (dr); |
23e1875f | 1013 | |
1014 | if (!data_aux) | |
1015 | { | |
1016 | data_aux = XCNEW (dataref_aux); | |
1017 | dr->aux = data_aux; | |
1018 | } | |
1019 | ||
1020 | data_aux->misalignment = val; | |
1021 | } | |
1022 | ||
1023 | inline int | |
1024 | dr_misalignment (struct data_reference *dr) | |
1025 | { | |
9dd88d41 | 1026 | return DR_VECT_AUX (dr)->misalignment; |
23e1875f | 1027 | } |
c91e8223 | 1028 | |
39b8f742 | 1029 | /* Reflects actual alignment of first access in the vectorized loop, |
1030 | taking into account peeling/versioning if applied. */ | |
23e1875f | 1031 | #define DR_MISALIGNMENT(DR) dr_misalignment (DR) |
1032 | #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL) | |
df8e9f7a | 1033 | #define DR_MISALIGNMENT_UNKNOWN (-1) |
c91e8223 | 1034 | |
282bf14c | 1035 | /* Return TRUE if the data access is aligned, and FALSE otherwise. */ |
1036 | ||
c91e8223 | 1037 | static inline bool |
1038 | aligned_access_p (struct data_reference *data_ref_info) | |
1039 | { | |
1040 | return (DR_MISALIGNMENT (data_ref_info) == 0); | |
1041 | } | |
1042 | ||
282bf14c | 1043 | /* Return TRUE if the alignment of the data access is known, and FALSE |
1044 | otherwise. */ | |
1045 | ||
c91e8223 | 1046 | static inline bool |
39b8f742 | 1047 | known_alignment_for_access_p (struct data_reference *data_ref_info) |
c91e8223 | 1048 | { |
df8e9f7a | 1049 | return (DR_MISALIGNMENT (data_ref_info) != DR_MISALIGNMENT_UNKNOWN); |
c91e8223 | 1050 | } |
1051 | ||
9e879814 | 1052 | /* Return the behavior of DR with respect to the vectorization context |
1053 | (which for outer loop vectorization might not be the behavior recorded | |
1054 | in DR itself). */ | |
1055 | ||
1056 | static inline innermost_loop_behavior * | |
1057 | vect_dr_behavior (data_reference *dr) | |
1058 | { | |
1059 | gimple *stmt = DR_STMT (dr); | |
1060 | stmt_vec_info stmt_info = vinfo_for_stmt (stmt); | |
1061 | loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); | |
1062 | if (loop_vinfo == NULL | |
1063 | || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt)) | |
1064 | return &DR_INNERMOST (dr); | |
1065 | else | |
1066 | return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info); | |
1067 | } | |
1dbf9bd1 | 1068 | |
1069 | /* Return true if the vect cost model is unlimited. */ | |
1070 | static inline bool | |
3e398f5b | 1071 | unlimited_cost_model (loop_p loop) |
1dbf9bd1 | 1072 | { |
4c73695b | 1073 | if (loop != NULL && loop->force_vectorize |
3e398f5b | 1074 | && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT) |
1075 | return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED; | |
1076 | return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED); | |
1dbf9bd1 | 1077 | } |
1078 | ||
4eb17cb6 | 1079 | /* Return the number of copies needed for loop vectorization when |
1080 | a statement operates on vectors of type VECTYPE. This is the | |
1081 | vectorization factor divided by the number of elements in | |
1082 | VECTYPE and is always known at compile time. */ | |
1083 | ||
1084 | static inline unsigned int | |
1085 | vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype) | |
1086 | { | |
1087 | gcc_checking_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo) | |
1088 | % TYPE_VECTOR_SUBPARTS (vectype) == 0); | |
1089 | return (LOOP_VINFO_VECT_FACTOR (loop_vinfo) | |
1090 | / TYPE_VECTOR_SUBPARTS (vectype)); | |
1091 | } | |
1092 | ||
7bd765d4 | 1093 | /* Source location */ |
36f39b2e | 1094 | extern source_location vect_location; |
fb85abff | 1095 | |
c91e8223 | 1096 | /*-----------------------------------------------------------------*/ |
1097 | /* Function prototypes. */ | |
1098 | /*-----------------------------------------------------------------*/ | |
1099 | ||
48e1416a | 1100 | /* Simple loop peeling and versioning utilities for vectorizer's purposes - |
fb85abff | 1101 | in tree-vect-loop-manip.c. */ |
f2983e95 | 1102 | extern void slpeel_make_loop_iterate_ntimes (struct loop *, tree); |
1f1872fd | 1103 | extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge); |
c71d3c24 | 1104 | struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *, |
1105 | struct loop *, edge); | |
e7430948 | 1106 | extern void vect_loop_versioning (loop_vec_info, unsigned int, bool); |
5b631e09 | 1107 | extern struct loop *vect_do_peeling (loop_vec_info, tree, tree, |
1108 | tree *, int, bool, bool); | |
36f39b2e | 1109 | extern source_location find_loop_location (struct loop *); |
fb85abff | 1110 | extern bool vect_can_advance_ivs_p (loop_vec_info); |
c91e8223 | 1111 | |
fb85abff | 1112 | /* In tree-vect-stmts.c. */ |
c4740c5d | 1113 | extern unsigned int current_vector_size; |
f2983e95 | 1114 | extern tree get_vectype_for_scalar_type (tree); |
dab48979 | 1115 | extern tree get_mask_type_for_scalar_type (tree); |
b334cbba | 1116 | extern tree get_same_sized_vectype (tree, tree); |
5cc2ea45 | 1117 | extern bool vect_is_simple_use (tree, vec_info *, gimple **, |
1118 | enum vect_def_type *); | |
1119 | extern bool vect_is_simple_use (tree, vec_info *, gimple **, | |
1120 | enum vect_def_type *, tree *); | |
42acab1c | 1121 | extern bool supportable_widening_operation (enum tree_code, gimple *, tree, |
1122 | tree, enum tree_code *, | |
1123 | enum tree_code *, int *, | |
1124 | vec<tree> *); | |
b334cbba | 1125 | extern bool supportable_narrowing_operation (enum tree_code, tree, tree, |
1126 | enum tree_code *, | |
f1f41a6c | 1127 | int *, vec<tree> *); |
e2c5c678 | 1128 | extern stmt_vec_info new_stmt_vec_info (gimple *stmt, vec_info *); |
42acab1c | 1129 | extern void free_stmt_vec_info (gimple *stmt); |
fb85abff | 1130 | extern void vect_model_simple_cost (stmt_vec_info, int, enum vect_def_type *, |
9bf6e01f | 1131 | int, stmt_vector_for_cost *, |
f97dec81 | 1132 | stmt_vector_for_cost *); |
85b53a1f | 1133 | extern void vect_model_store_cost (stmt_vec_info, int, vect_memory_access_type, |
4db2b577 | 1134 | enum vect_def_type, slp_tree, |
f97dec81 | 1135 | stmt_vector_for_cost *, |
4db2b577 | 1136 | stmt_vector_for_cost *); |
85b53a1f | 1137 | extern void vect_model_load_cost (stmt_vec_info, int, vect_memory_access_type, |
1138 | slp_tree, stmt_vector_for_cost *, | |
4db2b577 | 1139 | stmt_vector_for_cost *); |
1140 | extern unsigned record_stmt_cost (stmt_vector_for_cost *, int, | |
f97dec81 | 1141 | enum vect_cost_for_stmt, stmt_vec_info, |
1142 | int, enum vect_cost_model_location); | |
42acab1c | 1143 | extern void vect_finish_stmt_generation (gimple *, gimple *, |
fb85abff | 1144 | gimple_stmt_iterator *); |
1145 | extern bool vect_mark_stmts_to_be_vectorized (loop_vec_info); | |
707ba526 | 1146 | extern tree vect_get_vec_def_for_operand_1 (gimple *, enum vect_def_type); |
dab48979 | 1147 | extern tree vect_get_vec_def_for_operand (tree, gimple *, tree = NULL); |
44b24fa0 | 1148 | extern void vect_get_vec_defs (tree, tree, gimple *, vec<tree> *, |
1149 | vec<tree> *, slp_tree); | |
1150 | extern void vect_get_vec_defs_for_stmt_copy (enum vect_def_type *, | |
1151 | vec<tree> *, vec<tree> *); | |
42acab1c | 1152 | extern tree vect_init_vector (gimple *, tree, tree, |
fb85abff | 1153 | gimple_stmt_iterator *); |
1154 | extern tree vect_get_vec_def_for_stmt_copy (enum vect_def_type, tree); | |
42acab1c | 1155 | extern bool vect_transform_stmt (gimple *, gimple_stmt_iterator *, |
fb85abff | 1156 | bool *, slp_tree, slp_instance); |
42acab1c | 1157 | extern void vect_remove_stores (gimple *); |
6154acba | 1158 | extern bool vect_analyze_stmt (gimple *, bool *, slp_tree, slp_instance); |
42acab1c | 1159 | extern bool vectorizable_condition (gimple *, gimple_stmt_iterator *, |
1160 | gimple **, tree, int, slp_tree); | |
0822b158 | 1161 | extern void vect_get_load_cost (struct data_reference *, int, bool, |
4db2b577 | 1162 | unsigned int *, unsigned int *, |
f97dec81 | 1163 | stmt_vector_for_cost *, |
1164 | stmt_vector_for_cost *, bool); | |
4db2b577 | 1165 | extern void vect_get_store_cost (struct data_reference *, int, |
1166 | unsigned int *, stmt_vector_for_cost *); | |
45eea33f | 1167 | extern bool vect_supportable_shift (enum tree_code, tree); |
282dc861 | 1168 | extern tree vect_gen_perm_mask_any (tree, vec_perm_indices); |
1169 | extern tree vect_gen_perm_mask_checked (tree, vec_perm_indices); | |
cfd9ca84 | 1170 | extern void optimize_mask_stores (struct loop*); |
48e1416a | 1171 | |
fb85abff | 1172 | /* In tree-vect-data-refs.c. */ |
1173 | extern bool vect_can_force_dr_alignment_p (const_tree, unsigned int); | |
1174 | extern enum dr_alignment_support vect_supportable_dr_alignment | |
0822b158 | 1175 | (struct data_reference *, bool); |
42acab1c | 1176 | extern tree vect_get_smallest_scalar_type (gimple *, HOST_WIDE_INT *, |
fb85abff | 1177 | HOST_WIDE_INT *); |
68f15e9d | 1178 | extern bool vect_analyze_data_ref_dependences (loop_vec_info, int *); |
c256513d | 1179 | extern bool vect_slp_analyze_instance_dependence (slp_instance); |
fb85abff | 1180 | extern bool vect_enhance_data_refs_alignment (loop_vec_info); |
2f6fec15 | 1181 | extern bool vect_analyze_data_refs_alignment (loop_vec_info); |
1182 | extern bool vect_verify_datarefs_alignment (loop_vec_info); | |
1183 | extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance); | |
e2c5c678 | 1184 | extern bool vect_analyze_data_ref_accesses (vec_info *); |
fb85abff | 1185 | extern bool vect_prune_runtime_alias_test_list (loop_vec_info); |
cf60da07 | 1186 | extern bool vect_check_gather_scatter (gimple *, loop_vec_info, |
1187 | gather_scatter_info *); | |
0a08c1bc | 1188 | extern bool vect_analyze_data_refs (vec_info *, int *); |
4f372c2c | 1189 | extern void vect_record_base_alignments (vec_info *); |
42acab1c | 1190 | extern tree vect_create_data_ref_ptr (gimple *, tree, struct loop *, tree, |
bd5ba09f | 1191 | tree *, gimple_stmt_iterator *, |
42acab1c | 1192 | gimple **, bool, bool *, |
1ec61bbd | 1193 | tree = NULL_TREE); |
42acab1c | 1194 | extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, gimple *, |
1195 | tree); | |
fb85abff | 1196 | extern tree vect_create_destination_var (tree, tree); |
ee612634 | 1197 | extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT); |
94b7b4dd | 1198 | extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT); |
bc691ae4 | 1199 | extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT); |
94b7b4dd | 1200 | extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT); |
42acab1c | 1201 | extern void vect_permute_store_chain (vec<tree> ,unsigned int, gimple *, |
f1f41a6c | 1202 | gimple_stmt_iterator *, vec<tree> *); |
42acab1c | 1203 | extern tree vect_setup_realignment (gimple *, gimple_stmt_iterator *, tree *, |
48e1416a | 1204 | enum dr_alignment_support, tree, |
fb85abff | 1205 | struct loop **); |
42acab1c | 1206 | extern void vect_transform_grouped_load (gimple *, vec<tree> , int, |
fb85abff | 1207 | gimple_stmt_iterator *); |
42acab1c | 1208 | extern void vect_record_grouped_load_vectors (gimple *, vec<tree> ); |
fb85abff | 1209 | extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *); |
23ffec42 | 1210 | extern tree vect_get_new_ssa_name (tree, enum vect_var_kind, |
1211 | const char * = NULL); | |
42acab1c | 1212 | extern tree vect_create_addr_base_for_vector_ref (gimple *, gimple_seq *, |
9e879814 | 1213 | tree, tree = NULL_TREE); |
fb85abff | 1214 | |
1215 | /* In tree-vect-loop.c. */ | |
1216 | /* FORNOW: Used in tree-parloops.c. */ | |
119a8852 | 1217 | extern gimple *vect_force_simple_reduction (loop_vec_info, gimple *, |
42acab1c | 1218 | bool *, bool); |
fb85abff | 1219 | /* Drive for loop analysis stage. */ |
5b631e09 | 1220 | extern loop_vec_info vect_analyze_loop (struct loop *, loop_vec_info); |
3a815241 | 1221 | extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL); |
6c6a3430 | 1222 | extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *, bool); |
fb85abff | 1223 | /* Drive for loop transformation stage. */ |
5b631e09 | 1224 | extern struct loop *vect_transform_loop (loop_vec_info); |
cb7f680b | 1225 | extern loop_vec_info vect_analyze_loop_form (struct loop *); |
42acab1c | 1226 | extern bool vectorizable_live_operation (gimple *, gimple_stmt_iterator *, |
75aae5b4 | 1227 | slp_tree, int, gimple **); |
42acab1c | 1228 | extern bool vectorizable_reduction (gimple *, gimple_stmt_iterator *, |
6154acba | 1229 | gimple **, slp_tree, slp_instance); |
5cc7beaa | 1230 | extern bool vectorizable_induction (gimple *, gimple_stmt_iterator *, |
1231 | gimple **, slp_tree); | |
42acab1c | 1232 | extern tree get_initial_def_for_reduction (gimple *, tree, tree *); |
fb85abff | 1233 | extern int vect_min_worthwhile_factor (enum tree_code); |
7a66d0cf | 1234 | extern int vect_get_known_peeling_cost (loop_vec_info, int, int *, |
1235 | stmt_vector_for_cost *, | |
f97dec81 | 1236 | stmt_vector_for_cost *, |
1237 | stmt_vector_for_cost *); | |
4a61a337 | 1238 | |
fb85abff | 1239 | /* In tree-vect-slp.c. */ |
1240 | extern void vect_free_slp_instance (slp_instance); | |
678e3d6e | 1241 | extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> , |
48e1416a | 1242 | gimple_stmt_iterator *, int, |
37c39def | 1243 | slp_instance, bool, unsigned *); |
31bf2c9a | 1244 | extern bool vect_slp_analyze_operations (vec<slp_instance> slp_instances, |
1245 | void *); | |
e2c5c678 | 1246 | extern bool vect_schedule_slp (vec_info *); |
1247 | extern bool vect_analyze_slp (vec_info *, unsigned); | |
bc937a44 | 1248 | extern bool vect_make_slp_decision (loop_vec_info); |
fb85abff | 1249 | extern void vect_detect_hybrid_slp (loop_vec_info); |
4f0d4cce | 1250 | extern void vect_get_slp_defs (vec<tree> , slp_tree, vec<vec<tree> > *); |
0a08c1bc | 1251 | extern bool vect_slp_bb (basic_block); |
77d241ed | 1252 | extern gimple *vect_find_last_scalar_stmt_in_slp (slp_tree); |
75aae5b4 | 1253 | extern bool is_simple_and_all_uses_invariant (gimple *, loop_vec_info); |
fb85abff | 1254 | |
1255 | /* In tree-vect-patterns.c. */ | |
4a61a337 | 1256 | /* Pattern recognition functions. |
1257 | Additional pattern recognition functions can (and will) be added | |
1258 | in the future. */ | |
42acab1c | 1259 | typedef gimple *(* vect_recog_func_ptr) (vec<gimple *> *, tree *, tree *); |
959c4b00 | 1260 | #define NUM_PATTERNS 14 |
e2c5c678 | 1261 | void vect_pattern_recog (vec_info *); |
4a61a337 | 1262 | |
10230637 | 1263 | /* In tree-vectorizer.c. */ |
1264 | unsigned vectorize_loops (void); | |
4c7587f5 | 1265 | bool vect_stmt_in_region_p (vec_info *, gimple *); |
d5e80d93 | 1266 | void vect_free_loop_info_assumptions (struct loop *); |
c91e8223 | 1267 | |
1268 | #endif /* GCC_TREE_VECTORIZER_H */ |