]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-vectorizer.h
PR67945: Fix oscillation between pow representations
[thirdparty/gcc.git] / gcc / tree-vectorizer.h
CommitLineData
fb85abff 1/* Vectorizer
d353bf18 2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
c91e8223 3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
8c4c00c1 9Software Foundation; either version 3, or (at your option) any later
c91e8223 10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
8c4c00c1 18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
c91e8223 20
21#ifndef GCC_TREE_VECTORIZER_H
22#define GCC_TREE_VECTORIZER_H
23
fb85abff 24#include "tree-data-ref.h"
f4ac3f3e 25#include "target.h"
fb85abff 26
c91e8223 27/* Used for naming of new temporaries. */
28enum vect_var_kind {
29 vect_simple_var,
ea8f3370 30 vect_pointer_var,
31 vect_scalar_var
c91e8223 32};
33
4a61a337 34/* Defines type of operation. */
c91e8223 35enum operation_type {
36 unary_op = 1,
4a61a337 37 binary_op,
38 ternary_op
c91e8223 39};
40
1a9b4618 41/* Define type of available alignment support. */
42enum dr_alignment_support {
43 dr_unaligned_unsupported,
44 dr_unaligned_supported,
b0eb8c66 45 dr_explicit_realign,
46 dr_explicit_realign_optimized,
1a9b4618 47 dr_aligned
48};
49
ce10738f 50/* Define type of def-use cross-iteration cycle. */
e12906b9 51enum vect_def_type {
bc620c5c 52 vect_uninitialized_def = 0,
f083cd24 53 vect_constant_def = 1,
54 vect_external_def,
55 vect_internal_def,
e12906b9 56 vect_induction_def,
57 vect_reduction_def,
7aa0d350 58 vect_double_reduction_def,
ade2ac53 59 vect_nested_cycle,
e12906b9 60 vect_unknown_def_type
61};
62
07be02da 63#define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \
64 || ((D) == vect_double_reduction_def) \
65 || ((D) == vect_nested_cycle))
66
4db2b577 67/* Structure to encapsulate information about a group of like
68 instructions to be presented to the target cost model. */
6dc50383 69struct stmt_info_for_cost {
4db2b577 70 int count;
71 enum vect_cost_for_stmt kind;
42acab1c 72 gimple *stmt;
4db2b577 73 int misalign;
6dc50383 74};
4db2b577 75
f1f41a6c 76typedef vec<stmt_info_for_cost> stmt_vector_for_cost;
4db2b577 77
c6895939 78/************************************************************************
79 SLP
80 ************************************************************************/
40bcc7c2 81typedef struct _slp_tree *slp_tree;
c6895939 82
b0f64919 83/* A computation tree of an SLP instance. Each node corresponds to a group of
c6895939 84 stmts to be packed in a SIMD stmt. */
40bcc7c2 85struct _slp_tree {
b0f64919 86 /* Nodes that contain def-stmts of this node statements operands. */
40bcc7c2 87 vec<slp_tree> children;
c6895939 88 /* A group of scalar stmts to be vectorized together. */
42acab1c 89 vec<gimple *> stmts;
678e3d6e 90 /* Load permutation relative to the stores, NULL if there is no
91 permutation. */
92 vec<unsigned> load_permutation;
c6895939 93 /* Vectorized stmt/s. */
42acab1c 94 vec<gimple *> vec_stmts;
48e1416a 95 /* Number of vector stmts that are created to replace the group of scalar
96 stmts. It is calculated during the transformation phase as the number of
97 scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
c6895939 98 divided by vector size. */
99 unsigned int vec_stmts_size;
66e30248 100 /* Whether the scalar computations use two different operators. */
101 bool two_operators;
40bcc7c2 102};
c6895939 103
104
105/* SLP instance is a sequence of stmts in a loop that can be packed into
106 SIMD stmts. */
107typedef struct _slp_instance {
108 /* The root of SLP tree. */
109 slp_tree root;
110
111 /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */
112 unsigned int group_size;
113
114 /* The unrolling factor required to vectorized this SLP instance. */
115 unsigned int unrolling_factor;
116
a0515226 117 /* The group of nodes that contain loads of this SLP instance. */
f1f41a6c 118 vec<slp_tree> loads;
c6895939 119} *slp_instance;
120
c6895939 121
122/* Access Functions. */
123#define SLP_INSTANCE_TREE(S) (S)->root
124#define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size
125#define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor
a0515226 126#define SLP_INSTANCE_LOADS(S) (S)->loads
c6895939 127
b0f64919 128#define SLP_TREE_CHILDREN(S) (S)->children
c6895939 129#define SLP_TREE_SCALAR_STMTS(S) (S)->stmts
130#define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts
131#define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size
678e3d6e 132#define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation
66e30248 133#define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators
b0f64919 134
b0f64919 135
0822b158 136
8a7b0f48 137/* This struct is used to store the information of a data reference,
43d14b66 138 including the data ref itself, the access offset (calculated by summing its
139 offset and init) and the segment length for aliasing checks.
140 This is used to merge alias checks. */
8a7b0f48 141
43d14b66 142struct dr_with_seg_len
8a7b0f48 143{
43d14b66 144 dr_with_seg_len (data_reference_p d, tree len)
145 : dr (d),
146 offset (size_binop (PLUS_EXPR, DR_OFFSET (d), DR_INIT (d))),
147 seg_len (len) {}
8a7b0f48 148
43d14b66 149 data_reference_p dr;
8a7b0f48 150 tree offset;
151 tree seg_len;
152};
153
43d14b66 154/* This struct contains two dr_with_seg_len objects with aliasing data
8a7b0f48 155 refs. Two comparisons are generated from them. */
156
43d14b66 157struct dr_with_seg_len_pair_t
8a7b0f48 158{
43d14b66 159 dr_with_seg_len_pair_t (const dr_with_seg_len& d1,
160 const dr_with_seg_len& d2)
8a7b0f48 161 : first (d1), second (d2) {}
162
43d14b66 163 dr_with_seg_len first;
164 dr_with_seg_len second;
8a7b0f48 165};
166
167
3e871d4d 168
e2c5c678 169/* Vectorizer state common between loop and basic-block vectorization. */
170struct vec_info {
171 enum { bb, loop } kind;
172
173 /* All SLP instances. */
174 vec<slp_instance> slp_instances;
175
176 /* All data references. */
177 vec<data_reference_p> datarefs;
178
179 /* All data dependences. */
180 vec<ddr_p> ddrs;
181
182 /* All interleaving chains of stores, represented by the first
183 stmt in the chain. */
184 vec<gimple *> grouped_stores;
185
186 /* Cost data used by the target cost model. */
187 void *target_cost_data;
188};
189
190struct _loop_vec_info;
191struct _bb_vec_info;
192
193template<>
194template<>
195inline bool
196is_a_helper <_loop_vec_info *>::test (vec_info *i)
197{
198 return i->kind == vec_info::loop;
199}
200
201template<>
202template<>
203inline bool
204is_a_helper <_bb_vec_info *>::test (vec_info *i)
205{
206 return i->kind == vec_info::bb;
207}
208
3e871d4d 209
4e58562d 210/*-----------------------------------------------------------------*/
211/* Info on vectorized loops. */
212/*-----------------------------------------------------------------*/
e2c5c678 213typedef struct _loop_vec_info : public vec_info {
4e58562d 214
215 /* The loop to which this info struct refers to. */
216 struct loop *loop;
217
218 /* The loop basic blocks. */
219 basic_block *bbs;
220
796f6cba 221 /* Number of latch executions. */
222 tree num_itersm1;
4e58562d 223 /* Number of iterations. */
224 tree num_iters;
796f6cba 225 /* Number of iterations of the original loop. */
be53c6d4 226 tree num_iters_unchanged;
4e58562d 227
004a94a5 228 /* Threshold of number of iterations below which vectorzation will not be
229 performed. It is calculated from MIN_PROFITABLE_ITERS and
230 PARAM_MIN_VECT_LOOP_BOUND. */
231 unsigned int th;
232
4e58562d 233 /* Is the loop vectorizable? */
234 bool vectorizable;
235
236 /* Unrolling factor */
237 int vectorization_factor;
238
239 /* Unknown DRs according to which loop was peeled. */
240 struct data_reference *unaligned_dr;
241
39b8f742 242 /* peeling_for_alignment indicates whether peeling for alignment will take
243 place, and what the peeling factor should be:
244 peeling_for_alignment = X means:
245 If X=0: Peeling for alignment will not be applied.
246 If X>0: Peel first X iterations.
247 If X=-1: Generate a runtime test to calculate the number of iterations
248 to be peeled, using the dataref recorded in the field
249 unaligned_dr. */
250 int peeling_for_alignment;
4e58562d 251
25e3c2e8 252 /* The mask used to check the alignment of pointers or arrays. */
253 int ptr_mask;
254
a8af2e86 255 /* The loop nest in which the data dependences are computed. */
f1f41a6c 256 vec<loop_p> loop_nest;
a8af2e86 257
45b13dc3 258 /* Data Dependence Relations defining address ranges that are candidates
259 for a run-time aliasing check. */
f1f41a6c 260 vec<ddr_p> may_alias_ddrs;
45b13dc3 261
8a7b0f48 262 /* Data Dependence Relations defining address ranges together with segment
263 lengths from which the run-time aliasing check is built. */
43d14b66 264 vec<dr_with_seg_len_pair_t> comp_alias_ddrs;
8a7b0f48 265
25e3c2e8 266 /* Statements in the loop that have data references that are candidates for a
267 runtime (loop versioning) misalignment check. */
42acab1c 268 vec<gimple *> may_misalign_stmts;
25e3c2e8 269
48e1416a 270 /* The unrolling factor needed to SLP the loop. In case of that pure SLP is
c6895939 271 applied to the loop, i.e., no unrolling is needed, this is 1. */
272 unsigned slp_unrolling_factor;
eefa05c8 273
274 /* Reduction cycles detected in the loop. Used in loop-aware SLP. */
42acab1c 275 vec<gimple *> reductions;
0822b158 276
39a5d6b1 277 /* All reduction chains in the loop, represented by the first
278 stmt in the chain. */
42acab1c 279 vec<gimple *> reduction_chains;
39a5d6b1 280
2a9a3444 281 /* Cost vector for a single scalar iteration. */
282 vec<stmt_info_for_cost> scalar_cost_vec;
283
284 /* Cost of a single scalar iteration. */
285 int single_scalar_iteration_cost;
286
ee612634 287 /* When we have grouped data accesses with gaps, we may introduce invalid
a4ee7fac 288 memory accesses. We peel the last iteration of the loop to prevent
289 this. */
290 bool peeling_for_gaps;
291
36f39b2e 292 /* When the number of iterations is not a multiple of the vector size
293 we need to peel off iterations at the end to form an epilogue loop. */
294 bool peeling_for_niter;
295
ba69439f 296 /* Reductions are canonicalized so that the last operand is the reduction
297 operand. If this places a constant into RHS1, this decanonicalizes
298 GIMPLE for other phases, so we must track when this has occurred and
299 fix it up. */
300 bool operands_swapped;
301
c7a8722c 302 /* True if there are no loop carried data dependencies in the loop.
303 If loop->safelen <= 1, then this is always true, either the loop
304 didn't have any loop carried data dependencies, or the loop is being
305 vectorized guarded with some runtime alias checks, or couldn't
306 be vectorized at all, but then this field shouldn't be used.
307 For loop->safelen >= 2, the user has asserted that there are no
308 backward dependencies, but there still could be loop carried forward
309 dependencies in such loops. This flag will be false if normal
310 vectorizer data dependency analysis would fail or require versioning
311 for alias, but because of loop->safelen >= 2 it has been vectorized
312 even without versioning for alias. E.g. in:
313 #pragma omp simd
314 for (int i = 0; i < m; i++)
315 a[i] = a[i + k] * c;
316 (or #pragma simd or #pragma ivdep) we can vectorize this and it will
317 DTRT even for k > 0 && k < m, but without safelen we would not
318 vectorize this, so this field would be false. */
319 bool no_data_dependencies;
320
c71d3c24 321 /* If if-conversion versioned this loop before conversion, this is the
322 loop version without if-conversion. */
323 struct loop *scalar_loop;
324
4e58562d 325} *loop_vec_info;
326
25e3c2e8 327/* Access Functions. */
10095225 328#define LOOP_VINFO_LOOP(L) (L)->loop
329#define LOOP_VINFO_BBS(L) (L)->bbs
796f6cba 330#define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1
10095225 331#define LOOP_VINFO_NITERS(L) (L)->num_iters
796f6cba 332/* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
333 prologue peeling retain total unchanged scalar loop iterations for
334 cost model. */
10095225 335#define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged
004a94a5 336#define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th
10095225 337#define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable
338#define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor
339#define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask
a8af2e86 340#define LOOP_VINFO_LOOP_NEST(L) (L)->loop_nest
10095225 341#define LOOP_VINFO_DATAREFS(L) (L)->datarefs
342#define LOOP_VINFO_DDRS(L) (L)->ddrs
343#define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters))
313a5120 344#define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment
10095225 345#define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr
346#define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts
10095225 347#define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs
8a7b0f48 348#define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs
ee612634 349#define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores
10095225 350#define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances
c6895939 351#define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
eefa05c8 352#define LOOP_VINFO_REDUCTIONS(L) (L)->reductions
39a5d6b1 353#define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains
4db2b577 354#define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data
a4ee7fac 355#define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps
ba69439f 356#define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped
313a5120 357#define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter
c7a8722c 358#define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies
c71d3c24 359#define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop
2a9a3444 360#define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec
361#define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost
4e58562d 362
10095225 363#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
72ffab3c 364 ((L)->may_misalign_stmts.length () > 0)
10095225 365#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \
72ffab3c 366 ((L)->may_alias_ddrs.length () > 0)
33bbe730 367
10095225 368#define LOOP_VINFO_NITERS_KNOWN_P(L) \
313a5120 369 (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
4e58562d 370
221e9a92 371static inline loop_vec_info
372loop_vec_info_for_loop (struct loop *loop)
373{
374 return (loop_vec_info) loop->aux;
375}
376
377static inline bool
42acab1c 378nested_in_vect_loop_p (struct loop *loop, gimple *stmt)
221e9a92 379{
48e1416a 380 return (loop->inner
75a70cf9 381 && (loop->inner == (gimple_bb (stmt))->loop_father));
221e9a92 382}
383
e2c5c678 384typedef struct _bb_vec_info : public vec_info
385{
37545e54 386 basic_block bb;
37545e54 387} *bb_vec_info;
388
4db2b577 389#define BB_VINFO_BB(B) (B)->bb
390#define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores
391#define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances
392#define BB_VINFO_DATAREFS(B) (B)->datarefs
393#define BB_VINFO_DDRS(B) (B)->ddrs
394#define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data
37545e54 395
396static inline bb_vec_info
397vec_info_for_bb (basic_block bb)
398{
399 return (bb_vec_info) bb->aux;
400}
401
c91e8223 402/*-----------------------------------------------------------------*/
403/* Info on vectorized defs. */
404/*-----------------------------------------------------------------*/
405enum stmt_vec_info_type {
406 undef_vec_info_type = 0,
407 load_vec_info_type,
408 store_vec_info_type,
09e31a48 409 shift_vec_info_type,
c91e8223 410 op_vec_info_type,
22c2f6bd 411 call_vec_info_type,
d09768a4 412 call_simd_clone_vec_info_type,
e9705e7f 413 assignment_vec_info_type,
ea8f3370 414 condition_vec_info_type,
c6c91d61 415 reduc_vec_info_type,
6fada017 416 induc_vec_info_type,
c6c91d61 417 type_promotion_vec_info_type,
9d8bf4aa 418 type_demotion_vec_info_type,
221e9a92 419 type_conversion_vec_info_type,
420 loop_exit_ctrl_vec_info_type
c6c91d61 421};
422
48e1416a 423/* Indicates whether/how a variable is used in the scope of loop/basic
f083cd24 424 block. */
c6c91d61 425enum vect_relevant {
f083cd24 426 vect_unused_in_scope = 0,
ade2ac53 427 /* The def is in the inner loop, and the use is in the outer loop, and the
428 use is a reduction stmt. */
221e9a92 429 vect_used_in_outer_by_reduction,
ade2ac53 430 /* The def is in the inner loop, and the use is in the outer loop (and is
431 not part of reduction). */
221e9a92 432 vect_used_in_outer,
bfe8bfe9 433
434 /* defs that feed computations that end up (only) in a reduction. These
48e1416a 435 defs may be used by non-reduction stmts, but eventually, any
436 computations/values that are affected by these defs are used to compute
437 a reduction (i.e. don't get stored to memory, for example). We use this
438 to identify computations that we can change the order in which they are
bfe8bfe9 439 computed. */
c6c91d61 440 vect_used_by_reduction,
bfe8bfe9 441
48e1416a 442 vect_used_in_scope
c91e8223 443};
444
c6895939 445/* The type of vectorization that can be applied to the stmt: regular loop-based
446 vectorization; pure SLP - the stmt is a part of SLP instances and does not
447 have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
448 a part of SLP instance and also must be loop-based vectorized, since it has
48e1416a 449 uses outside SLP sequences.
450
451 In the loop context the meanings of pure and hybrid SLP are slightly
452 different. By saying that pure SLP is applied to the loop, we mean that we
453 exploit only intra-iteration parallelism in the loop; i.e., the loop can be
454 vectorized without doing any conceptual unrolling, cause we don't pack
455 together stmts from different iterations, only within a single iteration.
456 Loop hybrid SLP means that we exploit both intra-iteration and
c6895939 457 inter-iteration parallelism (e.g., number of elements in the vector is 4
48e1416a 458 and the slp-group-size is 2, in which case we don't have enough parallelism
459 within an iteration, so we obtain the rest of the parallelism from subsequent
c6895939 460 iterations by unrolling the loop by 2). */
48e1416a 461enum slp_vect_type {
c6895939 462 loop_vect = 0,
463 pure_slp,
464 hybrid
465};
466
467
f1168a33 468typedef struct data_reference *dr_p;
f1168a33 469
c91e8223 470typedef struct _stmt_vec_info {
471
472 enum stmt_vec_info_type type;
473
609c710b 474 /* Indicates whether this stmts is part of a computation whose result is
475 used outside the loop. */
476 bool live;
477
478 /* Stmt is part of some pattern (computation idiom) */
479 bool in_pattern_p;
480
c91e8223 481 /* The stmt to which this info struct refers to. */
42acab1c 482 gimple *stmt;
c91e8223 483
e2c5c678 484 /* The vec_info with respect to which STMT is vectorized. */
485 vec_info *vinfo;
c91e8223 486
b334cbba 487 /* The vector type to be used for the LHS of this statement. */
c91e8223 488 tree vectype;
489
490 /* The vectorized version of the stmt. */
42acab1c 491 gimple *vectorized_stmt;
c91e8223 492
493
494 /** The following is relevant only for stmts that contain a non-scalar
48e1416a 495 data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
c91e8223 496 at most one such data-ref. **/
497
b0eb8c66 498 /* Information about the data-ref (access function, etc),
499 relative to the inner-most containing loop. */
c91e8223 500 struct data_reference *data_ref_info;
501
b0eb8c66 502 /* Information about the data-ref relative to this loop
503 nest (the loop that is being considered for vectorization). */
504 tree dr_base_address;
505 tree dr_init;
506 tree dr_offset;
507 tree dr_step;
508 tree dr_aligned_to;
509
86faead7 510 /* For loop PHI nodes, the evolution part of it. This makes sure
511 this information is still available in vect_update_ivs_after_vectorizer
512 where we may not be able to re-analyze the PHI nodes evolution as
513 peeling for the prologue loop can make it unanalyzable. The evolution
514 part is still correct though. */
515 tree loop_phi_evolution_part;
516
48e1416a 517 /* Used for various bookkeeping purposes, generally holding a pointer to
518 some other stmt S that is in some way "related" to this stmt.
4a61a337 519 Current use of this field is:
48e1416a 520 If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
521 true): S is the "pattern stmt" that represents (and replaces) the
522 sequence of stmts that constitutes the pattern. Similarly, the
523 related_stmt of the "pattern stmt" points back to this stmt (which is
524 the last stmt in the original sequence of stmts that constitutes the
4a61a337 525 pattern). */
42acab1c 526 gimple *related_stmt;
4a61a337 527
18937389 528 /* Used to keep a sequence of def stmts of a pattern stmt if such exists. */
529 gimple_seq pattern_def_seq;
45eea33f 530
f1168a33 531 /* List of datarefs that are known to have the same alignment as the dataref
532 of this stmt. */
f1f41a6c 533 vec<dr_p> same_align_refs;
f1168a33 534
295327ab 535 /* Selected SIMD clone's function info. First vector element
536 is SIMD clone's function decl, followed by a pair of trees (base + step)
537 for linear arguments (pair of NULLs for other arguments). */
538 vec<tree> simd_clone_info;
d09768a4 539
e12906b9 540 /* Classify the def of this stmt. */
541 enum vect_def_type def_type;
542
609c710b 543 /* Whether the stmt is SLPed, loop-based vectorized, or both. */
544 enum slp_vect_type slp_type;
545
21009880 546 /* Interleaving and reduction chains info. */
547 /* First element in the group. */
42acab1c 548 gimple *first_element;
21009880 549 /* Pointer to the next element in the group. */
42acab1c 550 gimple *next_element;
21009880 551 /* For data-refs, in case that two or more stmts share data-ref, this is the
552 pointer to the previously detected stmt with the same dr. */
42acab1c 553 gimple *same_dr_stmt;
21009880 554 /* The size of the group. */
6b8dbb53 555 unsigned int size;
556 /* For stores, number of stores from this group seen. We vectorize the last
557 one. */
558 unsigned int store_count;
559 /* For loads only, the gap from the previous load. For consecutive loads, GAP
560 is 1. */
561 unsigned int gap;
609c710b 562
a8cf7702 563 /* The minimum negative dependence distance this stmt participates in
564 or zero if none. */
565 unsigned int min_neg_dist;
566
609c710b 567 /* Not all stmts in the loop need to be vectorized. e.g, the increment
568 of the loop induction variable and computation of array indexes. relevant
569 indicates whether the stmt needs to be vectorized. */
570 enum vect_relevant relevant;
867c03eb 571
6ea6a380 572 /* Is this statement vectorizable or should it be skipped in (partial)
573 vectorization. */
574 bool vectorizable;
16dfb112 575
0bd6d857 576 /* For loads if this is a gather, for stores if this is a scatter. */
577 bool gather_scatter_p;
e1c75243 578
579 /* True if this is an access with loop-invariant stride. */
580 bool strided_p;
3d483a94 581
582 /* For both loads and stores. */
583 bool simd_lane_access_p;
c91e8223 584} *stmt_vec_info;
585
586/* Access Functions. */
6b8dbb53 587#define STMT_VINFO_TYPE(S) (S)->type
588#define STMT_VINFO_STMT(S) (S)->stmt
e2c5c678 589inline loop_vec_info
590STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo)
591{
592 if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo))
593 return loop_vinfo;
594 return NULL;
595}
596inline bb_vec_info
597STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo)
598{
599 if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo))
600 return bb_vinfo;
601 return NULL;
602}
6b8dbb53 603#define STMT_VINFO_RELEVANT(S) (S)->relevant
604#define STMT_VINFO_LIVE_P(S) (S)->live
605#define STMT_VINFO_VECTYPE(S) (S)->vectype
606#define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt
6ea6a380 607#define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable
6b8dbb53 608#define STMT_VINFO_DATA_REF(S) (S)->data_ref_info
0bd6d857 609#define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p
e1c75243 610#define STMT_VINFO_STRIDED_P(S) (S)->strided_p
3d483a94 611#define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p
b0eb8c66 612
613#define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_base_address
614#define STMT_VINFO_DR_INIT(S) (S)->dr_init
615#define STMT_VINFO_DR_OFFSET(S) (S)->dr_offset
616#define STMT_VINFO_DR_STEP(S) (S)->dr_step
617#define STMT_VINFO_DR_ALIGNED_TO(S) (S)->dr_aligned_to
618
6b8dbb53 619#define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p
620#define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt
18937389 621#define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq
6b8dbb53 622#define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs
295327ab 623#define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info
6b8dbb53 624#define STMT_VINFO_DEF_TYPE(S) (S)->def_type
21009880 625#define STMT_VINFO_GROUP_FIRST_ELEMENT(S) (S)->first_element
626#define STMT_VINFO_GROUP_NEXT_ELEMENT(S) (S)->next_element
627#define STMT_VINFO_GROUP_SIZE(S) (S)->size
628#define STMT_VINFO_GROUP_STORE_COUNT(S) (S)->store_count
629#define STMT_VINFO_GROUP_GAP(S) (S)->gap
630#define STMT_VINFO_GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt
ee612634 631#define STMT_VINFO_GROUPED_ACCESS(S) ((S)->first_element != NULL && (S)->data_ref_info)
86faead7 632#define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
a8cf7702 633#define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist
21009880 634
635#define GROUP_FIRST_ELEMENT(S) (S)->first_element
636#define GROUP_NEXT_ELEMENT(S) (S)->next_element
637#define GROUP_SIZE(S) (S)->size
638#define GROUP_STORE_COUNT(S) (S)->store_count
639#define GROUP_GAP(S) (S)->gap
640#define GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt
c91e8223 641
f083cd24 642#define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope)
867c03eb 643
c6895939 644#define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid)
645#define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp)
646#define STMT_SLP_TYPE(S) (S)->slp_type
647
23e1875f 648struct dataref_aux {
23e1875f 649 int misalignment;
9dd88d41 650 /* If true the alignment of base_decl needs to be increased. */
651 bool base_misaligned;
652 /* If true we know the base is at least vector element alignment aligned. */
653 bool base_element_aligned;
654 tree base_decl;
23e1875f 655};
656
9dd88d41 657#define DR_VECT_AUX(dr) ((dataref_aux *)(dr)->aux)
658
0822b158 659#define VECT_MAX_COST 1000
660
862bb3cd 661/* The maximum number of intermediate steps required in multi-step type
662 conversion. */
663#define MAX_INTERM_CVT_STEPS 3
664
1706116d 665/* The maximum vectorization factor supported by any target (V64QI). */
666#define MAX_VECTORIZATION_FACTOR 64
91a74fc6 667
3702cf13 668extern vec<stmt_vec_info> stmt_vec_info_vec;
75a70cf9 669
670void init_stmt_vec_info_vec (void);
671void free_stmt_vec_info_vec (void);
672
282bf14c 673/* Return a stmt_vec_info corresponding to STMT. */
674
75a70cf9 675static inline stmt_vec_info
42acab1c 676vinfo_for_stmt (gimple *stmt)
c91e8223 677{
75a70cf9 678 unsigned int uid = gimple_uid (stmt);
679 if (uid == 0)
680 return NULL;
681
3702cf13 682 return stmt_vec_info_vec[uid - 1];
c91e8223 683}
684
282bf14c 685/* Set vectorizer information INFO for STMT. */
686
75a70cf9 687static inline void
42acab1c 688set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info)
c91e8223 689{
75a70cf9 690 unsigned int uid = gimple_uid (stmt);
691 if (uid == 0)
692 {
e95895ef 693 gcc_checking_assert (info);
f1f41a6c 694 uid = stmt_vec_info_vec.length () + 1;
75a70cf9 695 gimple_set_uid (stmt, uid);
3702cf13 696 stmt_vec_info_vec.safe_push (info);
75a70cf9 697 }
698 else
3702cf13 699 stmt_vec_info_vec[uid - 1] = info;
c91e8223 700}
701
282bf14c 702/* Return the earlier statement between STMT1 and STMT2. */
703
42acab1c 704static inline gimple *
705get_earlier_stmt (gimple *stmt1, gimple *stmt2)
bdc89b8f 706{
707 unsigned int uid1, uid2;
708
709 if (stmt1 == NULL)
710 return stmt2;
711
712 if (stmt2 == NULL)
713 return stmt1;
714
715 uid1 = gimple_uid (stmt1);
716 uid2 = gimple_uid (stmt2);
717
718 if (uid1 == 0 || uid2 == 0)
719 return NULL;
720
f1f41a6c 721 gcc_checking_assert (uid1 <= stmt_vec_info_vec.length ()
722 && uid2 <= stmt_vec_info_vec.length ());
bdc89b8f 723
724 if (uid1 < uid2)
725 return stmt1;
726 else
727 return stmt2;
728}
729
282bf14c 730/* Return the later statement between STMT1 and STMT2. */
731
42acab1c 732static inline gimple *
733get_later_stmt (gimple *stmt1, gimple *stmt2)
d4b21757 734{
735 unsigned int uid1, uid2;
736
737 if (stmt1 == NULL)
738 return stmt2;
739
740 if (stmt2 == NULL)
741 return stmt1;
742
743 uid1 = gimple_uid (stmt1);
744 uid2 = gimple_uid (stmt2);
745
746 if (uid1 == 0 || uid2 == 0)
747 return NULL;
748
f1f41a6c 749 gcc_assert (uid1 <= stmt_vec_info_vec.length ());
750 gcc_assert (uid2 <= stmt_vec_info_vec.length ());
d4b21757 751
752 if (uid1 > uid2)
753 return stmt1;
754 else
755 return stmt2;
756}
757
282bf14c 758/* Return TRUE if a statement represented by STMT_INFO is a part of a
759 pattern. */
760
213448e9 761static inline bool
762is_pattern_stmt_p (stmt_vec_info stmt_info)
763{
42acab1c 764 gimple *related_stmt;
213448e9 765 stmt_vec_info related_stmt_info;
766
767 related_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
768 if (related_stmt
769 && (related_stmt_info = vinfo_for_stmt (related_stmt))
770 && STMT_VINFO_IN_PATTERN_P (related_stmt_info))
771 return true;
772
773 return false;
774}
775
282bf14c 776/* Return true if BB is a loop header. */
777
221e9a92 778static inline bool
779is_loop_header_bb_p (basic_block bb)
780{
781 if (bb == (bb->loop_father)->header)
782 return true;
e95895ef 783 gcc_checking_assert (EDGE_COUNT (bb->preds) == 1);
221e9a92 784 return false;
785}
786
282bf14c 787/* Return pow2 (X). */
788
862bb3cd 789static inline int
790vect_pow2 (int x)
791{
792 int i, res = 1;
793
794 for (i = 0; i < x; i++)
795 res *= 2;
796
797 return res;
798}
84a15e8f 799
f97dec81 800/* Alias targetm.vectorize.builtin_vectorization_cost. */
801
802static inline int
803builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
804 tree vectype, int misalign)
805{
806 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
807 vectype, misalign);
808}
809
f4ac3f3e 810/* Get cost by calling cost target builtin. */
811
812static inline
813int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
814{
f97dec81 815 return builtin_vectorization_cost (type_of_cost, NULL, 0);
f4ac3f3e 816}
817
4db2b577 818/* Alias targetm.vectorize.init_cost. */
819
820static inline void *
821init_cost (struct loop *loop_info)
822{
823 return targetm.vectorize.init_cost (loop_info);
824}
825
826/* Alias targetm.vectorize.add_stmt_cost. */
827
828static inline unsigned
829add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
f97dec81 830 stmt_vec_info stmt_info, int misalign,
831 enum vect_cost_model_location where)
4db2b577 832{
833 return targetm.vectorize.add_stmt_cost (data, count, kind,
f97dec81 834 stmt_info, misalign, where);
4db2b577 835}
836
837/* Alias targetm.vectorize.finish_cost. */
838
f97dec81 839static inline void
840finish_cost (void *data, unsigned *prologue_cost,
841 unsigned *body_cost, unsigned *epilogue_cost)
4db2b577 842{
f97dec81 843 targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost);
4db2b577 844}
845
846/* Alias targetm.vectorize.destroy_cost_data. */
847
848static inline void
849destroy_cost_data (void *data)
850{
851 targetm.vectorize.destroy_cost_data (data);
852}
853
c91e8223 854/*-----------------------------------------------------------------*/
855/* Info on data references alignment. */
856/*-----------------------------------------------------------------*/
23e1875f 857inline void
858set_dr_misalignment (struct data_reference *dr, int val)
859{
9dd88d41 860 dataref_aux *data_aux = DR_VECT_AUX (dr);
23e1875f 861
862 if (!data_aux)
863 {
864 data_aux = XCNEW (dataref_aux);
865 dr->aux = data_aux;
866 }
867
868 data_aux->misalignment = val;
869}
870
871inline int
872dr_misalignment (struct data_reference *dr)
873{
9dd88d41 874 return DR_VECT_AUX (dr)->misalignment;
23e1875f 875}
c91e8223 876
39b8f742 877/* Reflects actual alignment of first access in the vectorized loop,
878 taking into account peeling/versioning if applied. */
23e1875f 879#define DR_MISALIGNMENT(DR) dr_misalignment (DR)
880#define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL)
c91e8223 881
282bf14c 882/* Return TRUE if the data access is aligned, and FALSE otherwise. */
883
c91e8223 884static inline bool
885aligned_access_p (struct data_reference *data_ref_info)
886{
887 return (DR_MISALIGNMENT (data_ref_info) == 0);
888}
889
282bf14c 890/* Return TRUE if the alignment of the data access is known, and FALSE
891 otherwise. */
892
c91e8223 893static inline bool
39b8f742 894known_alignment_for_access_p (struct data_reference *data_ref_info)
c91e8223 895{
39b8f742 896 return (DR_MISALIGNMENT (data_ref_info) != -1);
c91e8223 897}
898
1dbf9bd1 899
900/* Return true if the vect cost model is unlimited. */
901static inline bool
3e398f5b 902unlimited_cost_model (loop_p loop)
1dbf9bd1 903{
4c73695b 904 if (loop != NULL && loop->force_vectorize
3e398f5b 905 && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT)
906 return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED;
907 return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED);
1dbf9bd1 908}
909
7bd765d4 910/* Source location */
36f39b2e 911extern source_location vect_location;
fb85abff 912
c91e8223 913/*-----------------------------------------------------------------*/
914/* Function prototypes. */
915/*-----------------------------------------------------------------*/
916
48e1416a 917/* Simple loop peeling and versioning utilities for vectorizer's purposes -
fb85abff 918 in tree-vect-loop-manip.c. */
f2983e95 919extern void slpeel_make_loop_iterate_ntimes (struct loop *, tree);
1f1872fd 920extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge);
c71d3c24 921struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *,
922 struct loop *, edge);
e7430948 923extern void vect_loop_versioning (loop_vec_info, unsigned int, bool);
782fd1d1 924extern void vect_do_peeling_for_loop_bound (loop_vec_info, tree, tree,
e7430948 925 unsigned int, bool);
782fd1d1 926extern void vect_do_peeling_for_alignment (loop_vec_info, tree,
927 unsigned int, bool);
36f39b2e 928extern source_location find_loop_location (struct loop *);
fb85abff 929extern bool vect_can_advance_ivs_p (loop_vec_info);
c91e8223 930
fb85abff 931/* In tree-vect-stmts.c. */
c4740c5d 932extern unsigned int current_vector_size;
f2983e95 933extern tree get_vectype_for_scalar_type (tree);
b334cbba 934extern tree get_same_sized_vectype (tree, tree);
5cc2ea45 935extern bool vect_is_simple_use (tree, vec_info *, gimple **,
936 enum vect_def_type *);
937extern bool vect_is_simple_use (tree, vec_info *, gimple **,
938 enum vect_def_type *, tree *);
42acab1c 939extern bool supportable_widening_operation (enum tree_code, gimple *, tree,
940 tree, enum tree_code *,
941 enum tree_code *, int *,
942 vec<tree> *);
b334cbba 943extern bool supportable_narrowing_operation (enum tree_code, tree, tree,
944 enum tree_code *,
f1f41a6c 945 int *, vec<tree> *);
e2c5c678 946extern stmt_vec_info new_stmt_vec_info (gimple *stmt, vec_info *);
42acab1c 947extern void free_stmt_vec_info (gimple *stmt);
1a91d914 948extern tree vectorizable_function (gcall *, tree, tree);
fb85abff 949extern void vect_model_simple_cost (stmt_vec_info, int, enum vect_def_type *,
f97dec81 950 stmt_vector_for_cost *,
951 stmt_vector_for_cost *);
94b7b4dd 952extern void vect_model_store_cost (stmt_vec_info, int, bool,
4db2b577 953 enum vect_def_type, slp_tree,
f97dec81 954 stmt_vector_for_cost *,
4db2b577 955 stmt_vector_for_cost *);
956extern void vect_model_load_cost (stmt_vec_info, int, bool, slp_tree,
f97dec81 957 stmt_vector_for_cost *,
4db2b577 958 stmt_vector_for_cost *);
959extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
f97dec81 960 enum vect_cost_for_stmt, stmt_vec_info,
961 int, enum vect_cost_model_location);
42acab1c 962extern void vect_finish_stmt_generation (gimple *, gimple *,
fb85abff 963 gimple_stmt_iterator *);
964extern bool vect_mark_stmts_to_be_vectorized (loop_vec_info);
5cc2ea45 965extern tree vect_get_vec_def_for_operand (tree, gimple *);
42acab1c 966extern tree vect_init_vector (gimple *, tree, tree,
fb85abff 967 gimple_stmt_iterator *);
968extern tree vect_get_vec_def_for_stmt_copy (enum vect_def_type, tree);
42acab1c 969extern bool vect_transform_stmt (gimple *, gimple_stmt_iterator *,
fb85abff 970 bool *, slp_tree, slp_instance);
42acab1c 971extern void vect_remove_stores (gimple *);
972extern bool vect_analyze_stmt (gimple *, bool *, slp_tree);
973extern bool vectorizable_condition (gimple *, gimple_stmt_iterator *,
974 gimple **, tree, int, slp_tree);
0822b158 975extern void vect_get_load_cost (struct data_reference *, int, bool,
4db2b577 976 unsigned int *, unsigned int *,
f97dec81 977 stmt_vector_for_cost *,
978 stmt_vector_for_cost *, bool);
4db2b577 979extern void vect_get_store_cost (struct data_reference *, int,
980 unsigned int *, stmt_vector_for_cost *);
45eea33f 981extern bool vect_supportable_shift (enum tree_code, tree);
42acab1c 982extern void vect_get_vec_defs (tree, tree, gimple *, vec<tree> *,
f1f41a6c 983 vec<tree> *, slp_tree, int);
0761848d 984extern tree vect_gen_perm_mask_any (tree, const unsigned char *);
985extern tree vect_gen_perm_mask_checked (tree, const unsigned char *);
48e1416a 986
fb85abff 987/* In tree-vect-data-refs.c. */
988extern bool vect_can_force_dr_alignment_p (const_tree, unsigned int);
989extern enum dr_alignment_support vect_supportable_dr_alignment
0822b158 990 (struct data_reference *, bool);
42acab1c 991extern tree vect_get_smallest_scalar_type (gimple *, HOST_WIDE_INT *,
fb85abff 992 HOST_WIDE_INT *);
68f15e9d 993extern bool vect_analyze_data_ref_dependences (loop_vec_info, int *);
994extern bool vect_slp_analyze_data_ref_dependences (bb_vec_info);
fb85abff 995extern bool vect_enhance_data_refs_alignment (loop_vec_info);
e2c5c678 996extern bool vect_analyze_data_refs_alignment (vec_info *);
997extern bool vect_verify_datarefs_alignment (vec_info *);
998extern bool vect_analyze_data_ref_accesses (vec_info *);
fb85abff 999extern bool vect_prune_runtime_alias_test_list (loop_vec_info);
42acab1c 1000extern tree vect_check_gather_scatter (gimple *, loop_vec_info, tree *, tree *,
0bd6d857 1001 int *);
e2c5c678 1002extern bool vect_analyze_data_refs (vec_info *, int *, unsigned *);
42acab1c 1003extern tree vect_create_data_ref_ptr (gimple *, tree, struct loop *, tree,
bd5ba09f 1004 tree *, gimple_stmt_iterator *,
42acab1c 1005 gimple **, bool, bool *,
1ec61bbd 1006 tree = NULL_TREE);
42acab1c 1007extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, gimple *,
1008 tree);
fb85abff 1009extern tree vect_create_destination_var (tree, tree);
ee612634 1010extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT);
94b7b4dd 1011extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT);
ee612634 1012extern bool vect_grouped_load_supported (tree, unsigned HOST_WIDE_INT);
94b7b4dd 1013extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT);
42acab1c 1014extern void vect_permute_store_chain (vec<tree> ,unsigned int, gimple *,
f1f41a6c 1015 gimple_stmt_iterator *, vec<tree> *);
42acab1c 1016extern tree vect_setup_realignment (gimple *, gimple_stmt_iterator *, tree *,
48e1416a 1017 enum dr_alignment_support, tree,
fb85abff 1018 struct loop **);
42acab1c 1019extern void vect_transform_grouped_load (gimple *, vec<tree> , int,
fb85abff 1020 gimple_stmt_iterator *);
42acab1c 1021extern void vect_record_grouped_load_vectors (gimple *, vec<tree> );
fb85abff 1022extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
42acab1c 1023extern tree vect_create_addr_base_for_vector_ref (gimple *, gimple_seq *,
1ec61bbd 1024 tree, struct loop *,
1025 tree = NULL_TREE);
fb85abff 1026
1027/* In tree-vect-loop.c. */
1028/* FORNOW: Used in tree-parloops.c. */
1029extern void destroy_loop_vec_info (loop_vec_info, bool);
42acab1c 1030extern gimple *vect_force_simple_reduction (loop_vec_info, gimple *, bool,
1031 bool *, bool);
fb85abff 1032/* Drive for loop analysis stage. */
f2983e95 1033extern loop_vec_info vect_analyze_loop (struct loop *);
fb85abff 1034/* Drive for loop transformation stage. */
1035extern void vect_transform_loop (loop_vec_info);
cb7f680b 1036extern loop_vec_info vect_analyze_loop_form (struct loop *);
42acab1c 1037extern bool vectorizable_live_operation (gimple *, gimple_stmt_iterator *,
1038 gimple **);
1039extern bool vectorizable_reduction (gimple *, gimple_stmt_iterator *,
1040 gimple **, slp_tree);
1041extern bool vectorizable_induction (gimple *, gimple_stmt_iterator *, gimple **);
1042extern tree get_initial_def_for_reduction (gimple *, tree, tree *);
fb85abff 1043extern int vect_min_worthwhile_factor (enum tree_code);
7a66d0cf 1044extern int vect_get_known_peeling_cost (loop_vec_info, int, int *,
1045 stmt_vector_for_cost *,
f97dec81 1046 stmt_vector_for_cost *,
1047 stmt_vector_for_cost *);
4a61a337 1048
fb85abff 1049/* In tree-vect-slp.c. */
1050extern void vect_free_slp_instance (slp_instance);
678e3d6e 1051extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> ,
48e1416a 1052 gimple_stmt_iterator *, int,
fb85abff 1053 slp_instance, bool);
31bf2c9a 1054extern bool vect_slp_analyze_operations (vec<slp_instance> slp_instances,
1055 void *);
e2c5c678 1056extern bool vect_schedule_slp (vec_info *);
1057extern bool vect_analyze_slp (vec_info *, unsigned);
bc937a44 1058extern bool vect_make_slp_decision (loop_vec_info);
fb85abff 1059extern void vect_detect_hybrid_slp (loop_vec_info);
f1f41a6c 1060extern void vect_get_slp_defs (vec<tree> , slp_tree,
7f7695a7 1061 vec<vec<tree> > *, int);
b0f64919 1062
36f39b2e 1063extern source_location find_bb_location (basic_block);
37545e54 1064extern bb_vec_info vect_slp_analyze_bb (basic_block);
1065extern void vect_slp_transform_bb (basic_block);
fb85abff 1066
1067/* In tree-vect-patterns.c. */
4a61a337 1068/* Pattern recognition functions.
1069 Additional pattern recognition functions can (and will) be added
1070 in the future. */
42acab1c 1071typedef gimple *(* vect_recog_func_ptr) (vec<gimple *> *, tree *, tree *);
8f282ed2 1072#define NUM_PATTERNS 13
e2c5c678 1073void vect_pattern_recog (vec_info *);
4a61a337 1074
10230637 1075/* In tree-vectorizer.c. */
1076unsigned vectorize_loops (void);
e2c5c678 1077void vect_destroy_datarefs (vec_info *);
c91e8223 1078
1079#endif /* GCC_TREE_VECTORIZER_H */