2 Copyright (C) 2003-2019 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #ifndef GCC_TREE_VECTORIZER_H
22 #define GCC_TREE_VECTORIZER_H
24 typedef struct _stmt_vec_info
*stmt_vec_info
;
26 #include "tree-data-ref.h"
27 #include "tree-hash-traits.h"
30 /* Used for naming of new temporaries. */
38 /* Defines type of operation. */
45 /* Define type of available alignment support. */
46 enum dr_alignment_support
{
47 dr_unaligned_unsupported
,
48 dr_unaligned_supported
,
50 dr_explicit_realign_optimized
,
54 /* Define type of def-use cross-iteration cycle. */
56 vect_uninitialized_def
= 0,
57 vect_constant_def
= 1,
62 vect_double_reduction_def
,
67 /* Define type of reduction. */
68 enum vect_reduction_type
{
71 INTEGER_INDUC_COND_REDUCTION
,
74 /* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop
77 for (int i = 0; i < VF; ++i)
78 res = cond[i] ? val[i] : res; */
79 EXTRACT_LAST_REDUCTION
,
81 /* Use a folding reduction within the loop to implement:
83 for (int i = 0; i < VF; ++i)
86 (with no reassocation). */
90 #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \
91 || ((D) == vect_double_reduction_def) \
92 || ((D) == vect_nested_cycle))
94 /* Structure to encapsulate information about a group of like
95 instructions to be presented to the target cost model. */
96 struct stmt_info_for_cost
{
98 enum vect_cost_for_stmt kind
;
99 enum vect_cost_model_location where
;
100 stmt_vec_info stmt_info
;
104 typedef vec
<stmt_info_for_cost
> stmt_vector_for_cost
;
106 /* Maps base addresses to an innermost_loop_behavior that gives the maximum
107 known alignment for that base. */
108 typedef hash_map
<tree_operand_hash
,
109 innermost_loop_behavior
*> vec_base_alignments
;
111 /************************************************************************
113 ************************************************************************/
114 typedef struct _slp_tree
*slp_tree
;
116 /* A computation tree of an SLP instance. Each node corresponds to a group of
117 stmts to be packed in a SIMD stmt. */
119 /* Nodes that contain def-stmts of this node statements operands. */
120 vec
<slp_tree
> children
;
121 /* A group of scalar stmts to be vectorized together. */
122 vec
<stmt_vec_info
> stmts
;
123 /* Load permutation relative to the stores, NULL if there is no
125 vec
<unsigned> load_permutation
;
126 /* Vectorized stmt/s. */
127 vec
<stmt_vec_info
> vec_stmts
;
128 /* Number of vector stmts that are created to replace the group of scalar
129 stmts. It is calculated during the transformation phase as the number of
130 scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
131 divided by vector size. */
132 unsigned int vec_stmts_size
;
133 /* Reference count in the SLP graph. */
135 /* Whether the scalar computations use two different operators. */
137 /* The DEF type of this node. */
138 enum vect_def_type def_type
;
142 /* SLP instance is a sequence of stmts in a loop that can be packed into
144 typedef struct _slp_instance
{
145 /* The root of SLP tree. */
148 /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */
149 unsigned int group_size
;
151 /* The unrolling factor required to vectorized this SLP instance. */
152 poly_uint64 unrolling_factor
;
154 /* The group of nodes that contain loads of this SLP instance. */
157 /* The SLP node containing the reduction PHIs. */
162 /* Access Functions. */
163 #define SLP_INSTANCE_TREE(S) (S)->root
164 #define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size
165 #define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor
166 #define SLP_INSTANCE_LOADS(S) (S)->loads
168 #define SLP_TREE_CHILDREN(S) (S)->children
169 #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts
170 #define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts
171 #define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size
172 #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation
173 #define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators
174 #define SLP_TREE_DEF_TYPE(S) (S)->def_type
178 /* Describes two objects whose addresses must be unequal for the vectorized
180 typedef std::pair
<tree
, tree
> vec_object_pair
;
182 /* Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE.
183 UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */
184 struct vec_lower_bound
{
185 vec_lower_bound () {}
186 vec_lower_bound (tree e
, bool u
, poly_uint64 m
)
187 : expr (e
), unsigned_p (u
), min_value (m
) {}
191 poly_uint64 min_value
;
194 /* Vectorizer state shared between different analyses like vector sizes
195 of the same CFG region. */
196 struct vec_info_shared
{
200 void save_datarefs();
201 void check_datarefs();
203 /* All data references. Freed by free_data_refs, so not an auto_vec. */
204 vec
<data_reference_p
> datarefs
;
205 vec
<data_reference
> datarefs_copy
;
207 /* The loop nest in which the data dependences are computed. */
208 auto_vec
<loop_p
> loop_nest
;
210 /* All data dependences. Freed by free_dependence_relations, so not
215 /* Vectorizer state common between loop and basic-block vectorization. */
217 enum vec_kind
{ bb
, loop
};
219 vec_info (vec_kind
, void *, vec_info_shared
*);
222 stmt_vec_info
add_stmt (gimple
*);
223 stmt_vec_info
lookup_stmt (gimple
*);
224 stmt_vec_info
lookup_def (tree
);
225 stmt_vec_info
lookup_single_use (tree
);
226 struct dr_vec_info
*lookup_dr (data_reference
*);
227 void move_dr (stmt_vec_info
, stmt_vec_info
);
228 void remove_stmt (stmt_vec_info
);
229 void replace_stmt (gimple_stmt_iterator
*, stmt_vec_info
, gimple
*);
231 /* The type of vectorization. */
234 /* Shared vectorizer state. */
235 vec_info_shared
*shared
;
237 /* The mapping of GIMPLE UID to stmt_vec_info. */
238 vec
<stmt_vec_info
> stmt_vec_infos
;
240 /* All SLP instances. */
241 auto_vec
<slp_instance
> slp_instances
;
243 /* Maps base addresses to an innermost_loop_behavior that gives the maximum
244 known alignment for that base. */
245 vec_base_alignments base_alignments
;
247 /* All interleaving chains of stores, represented by the first
248 stmt in the chain. */
249 auto_vec
<stmt_vec_info
> grouped_stores
;
251 /* Cost data used by the target cost model. */
252 void *target_cost_data
;
255 stmt_vec_info
new_stmt_vec_info (gimple
*stmt
);
256 void set_vinfo_for_stmt (gimple
*, stmt_vec_info
);
257 void free_stmt_vec_infos ();
258 void free_stmt_vec_info (stmt_vec_info
);
261 struct _loop_vec_info
;
267 is_a_helper
<_loop_vec_info
*>::test (vec_info
*i
)
269 return i
->kind
== vec_info::loop
;
275 is_a_helper
<_bb_vec_info
*>::test (vec_info
*i
)
277 return i
->kind
== vec_info::bb
;
281 /* In general, we can divide the vector statements in a vectorized loop
282 into related groups ("rgroups") and say that for each rgroup there is
283 some nS such that the rgroup operates on nS values from one scalar
284 iteration followed by nS values from the next. That is, if VF is the
285 vectorization factor of the loop, the rgroup operates on a sequence:
287 (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS)
289 where (i,j) represents a scalar value with index j in a scalar
290 iteration with index i.
292 [ We use the term "rgroup" to emphasise that this grouping isn't
293 necessarily the same as the grouping of statements used elsewhere.
294 For example, if we implement a group of scalar loads using gather
295 loads, we'll use a separate gather load for each scalar load, and
296 thus each gather load will belong to its own rgroup. ]
298 In general this sequence will occupy nV vectors concatenated
299 together. If these vectors have nL lanes each, the total number
300 of scalar values N is given by:
302 N = nS * VF = nV * nL
304 None of nS, VF, nV and nL are required to be a power of 2. nS and nV
305 are compile-time constants but VF and nL can be variable (if the target
306 supports variable-length vectors).
308 In classical vectorization, each iteration of the vector loop would
309 handle exactly VF iterations of the original scalar loop. However,
310 in a fully-masked loop, a particular iteration of the vector loop
311 might handle fewer than VF iterations of the scalar loop. The vector
312 lanes that correspond to iterations of the scalar loop are said to be
313 "active" and the other lanes are said to be "inactive".
315 In a fully-masked loop, many rgroups need to be masked to ensure that
316 they have no effect for the inactive lanes. Each such rgroup needs a
317 sequence of booleans in the same order as above, but with each (i,j)
318 replaced by a boolean that indicates whether iteration i is active.
319 This sequence occupies nV vector masks that again have nL lanes each.
320 Thus the mask sequence as a whole consists of VF independent booleans
321 that are each repeated nS times.
323 We make the simplifying assumption that if a sequence of nV masks is
324 suitable for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by
325 VIEW_CONVERTing it. This holds for all current targets that support
326 fully-masked loops. For example, suppose the scalar loop is:
330 for (int i = 0; i < n; ++i)
332 f[i * 2 + 0] += 1.0f;
333 f[i * 2 + 1] += 2.0f;
337 and suppose that vectors have 256 bits. The vectorized f accesses
338 will belong to one rgroup and the vectorized d access to another:
340 f rgroup: nS = 2, nV = 1, nL = 8
341 d rgroup: nS = 1, nV = 1, nL = 4
344 [ In this simple example the rgroups do correspond to the normal
345 SLP grouping scheme. ]
347 If only the first three lanes are active, the masks we need are:
349 f rgroup: 1 1 | 1 1 | 1 1 | 0 0
350 d rgroup: 1 | 1 | 1 | 0
352 Here we can use a mask calculated for f's rgroup for d's, but not
355 Thus for each value of nV, it is enough to provide nV masks, with the
356 mask being calculated based on the highest nL (or, equivalently, based
357 on the highest nS) required by any rgroup with that nV. We therefore
358 represent the entire collection of masks as a two-level table, with the
359 first level being indexed by nV - 1 (since nV == 0 doesn't exist) and
360 the second being indexed by the mask index 0 <= i < nV. */
362 /* The masks needed by rgroups with nV vectors, according to the
363 description above. */
364 struct rgroup_masks
{
365 /* The largest nS for all rgroups that use these masks. */
366 unsigned int max_nscalars_per_iter
;
368 /* The type of mask to use, based on the highest nS recorded above. */
371 /* A vector of nV masks, in iteration order. */
375 typedef auto_vec
<rgroup_masks
> vec_loop_masks
;
377 /*-----------------------------------------------------------------*/
378 /* Info on vectorized loops. */
379 /*-----------------------------------------------------------------*/
380 typedef struct _loop_vec_info
: public vec_info
{
381 _loop_vec_info (struct loop
*, vec_info_shared
*);
384 /* The loop to which this info struct refers to. */
387 /* The loop basic blocks. */
390 /* Number of latch executions. */
392 /* Number of iterations. */
394 /* Number of iterations of the original loop. */
395 tree num_iters_unchanged
;
396 /* Condition under which this loop is analyzed and versioned. */
397 tree num_iters_assumptions
;
399 /* Threshold of number of iterations below which vectorization will not be
400 performed. It is calculated from MIN_PROFITABLE_ITERS and
401 PARAM_MIN_VECT_LOOP_BOUND. */
404 /* When applying loop versioning, the vector form should only be used
405 if the number of scalar iterations is >= this value, on top of all
406 the other requirements. Ignored when loop versioning is not being
408 poly_uint64 versioning_threshold
;
410 /* Unrolling factor */
411 poly_uint64 vectorization_factor
;
413 /* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR
414 if there is no particular limit. */
415 unsigned HOST_WIDE_INT max_vectorization_factor
;
417 /* The masks that a fully-masked loop should use to avoid operating
418 on inactive scalars. */
419 vec_loop_masks masks
;
421 /* If we are using a loop mask to align memory addresses, this variable
422 contains the number of vector elements that we should skip in the
423 first iteration of the vector loop (i.e. the number of leading
424 elements that should be false in the first mask). */
425 tree mask_skip_niters
;
427 /* Type of the variables to use in the WHILE_ULT call for fully-masked
429 tree mask_compare_type
;
431 /* For #pragma omp simd if (x) loops the x expression. If constant 0,
432 the loop should not be vectorized, if constant non-zero, simd_if_cond
433 shouldn't be set and loop vectorized normally, if SSA_NAME, the loop
434 should be versioned on that condition, using scalar loop if the condition
435 is false and vectorized loop otherwise. */
438 /* Type of the IV to use in the WHILE_ULT call for fully-masked
442 /* Unknown DRs according to which loop was peeled. */
443 struct dr_vec_info
*unaligned_dr
;
445 /* peeling_for_alignment indicates whether peeling for alignment will take
446 place, and what the peeling factor should be:
447 peeling_for_alignment = X means:
448 If X=0: Peeling for alignment will not be applied.
449 If X>0: Peel first X iterations.
450 If X=-1: Generate a runtime test to calculate the number of iterations
451 to be peeled, using the dataref recorded in the field
453 int peeling_for_alignment
;
455 /* The mask used to check the alignment of pointers or arrays. */
458 /* Data Dependence Relations defining address ranges that are candidates
459 for a run-time aliasing check. */
460 auto_vec
<ddr_p
> may_alias_ddrs
;
462 /* Data Dependence Relations defining address ranges together with segment
463 lengths from which the run-time aliasing check is built. */
464 auto_vec
<dr_with_seg_len_pair_t
> comp_alias_ddrs
;
466 /* Check that the addresses of each pair of objects is unequal. */
467 auto_vec
<vec_object_pair
> check_unequal_addrs
;
469 /* List of values that are required to be nonzero. This is used to check
470 whether things like "x[i * n] += 1;" are safe and eventually gets added
471 to the checks for lower bounds below. */
472 auto_vec
<tree
> check_nonzero
;
474 /* List of values that need to be checked for a minimum value. */
475 auto_vec
<vec_lower_bound
> lower_bounds
;
477 /* Statements in the loop that have data references that are candidates for a
478 runtime (loop versioning) misalignment check. */
479 auto_vec
<stmt_vec_info
> may_misalign_stmts
;
481 /* Reduction cycles detected in the loop. Used in loop-aware SLP. */
482 auto_vec
<stmt_vec_info
> reductions
;
484 /* All reduction chains in the loop, represented by the first
485 stmt in the chain. */
486 auto_vec
<stmt_vec_info
> reduction_chains
;
488 /* Cost vector for a single scalar iteration. */
489 auto_vec
<stmt_info_for_cost
> scalar_cost_vec
;
491 /* Map of IV base/step expressions to inserted name in the preheader. */
492 hash_map
<tree_operand_hash
, tree
> *ivexpr_map
;
494 /* Map of OpenMP "omp simd array" scan variables to corresponding
495 rhs of the store of the initializer. */
496 hash_map
<tree
, tree
> *scan_map
;
498 /* The unrolling factor needed to SLP the loop. In case of that pure SLP is
499 applied to the loop, i.e., no unrolling is needed, this is 1. */
500 poly_uint64 slp_unrolling_factor
;
502 /* Cost of a single scalar iteration. */
503 int single_scalar_iteration_cost
;
505 /* Is the loop vectorizable? */
508 /* Records whether we still have the option of using a fully-masked loop. */
509 bool can_fully_mask_p
;
511 /* True if have decided to use a fully-masked loop. */
514 /* When we have grouped data accesses with gaps, we may introduce invalid
515 memory accesses. We peel the last iteration of the loop to prevent
517 bool peeling_for_gaps
;
519 /* When the number of iterations is not a multiple of the vector size
520 we need to peel off iterations at the end to form an epilogue loop. */
521 bool peeling_for_niter
;
523 /* Reductions are canonicalized so that the last operand is the reduction
524 operand. If this places a constant into RHS1, this decanonicalizes
525 GIMPLE for other phases, so we must track when this has occurred and
527 bool operands_swapped
;
529 /* True if there are no loop carried data dependencies in the loop.
530 If loop->safelen <= 1, then this is always true, either the loop
531 didn't have any loop carried data dependencies, or the loop is being
532 vectorized guarded with some runtime alias checks, or couldn't
533 be vectorized at all, but then this field shouldn't be used.
534 For loop->safelen >= 2, the user has asserted that there are no
535 backward dependencies, but there still could be loop carried forward
536 dependencies in such loops. This flag will be false if normal
537 vectorizer data dependency analysis would fail or require versioning
538 for alias, but because of loop->safelen >= 2 it has been vectorized
539 even without versioning for alias. E.g. in:
541 for (int i = 0; i < m; i++)
543 (or #pragma simd or #pragma ivdep) we can vectorize this and it will
544 DTRT even for k > 0 && k < m, but without safelen we would not
545 vectorize this, so this field would be false. */
546 bool no_data_dependencies
;
548 /* Mark loops having masked stores. */
551 /* Queued scaling factor for the scalar loop. */
552 profile_probability scalar_loop_scaling
;
554 /* If if-conversion versioned this loop before conversion, this is the
555 loop version without if-conversion. */
556 struct loop
*scalar_loop
;
558 /* For loops being epilogues of already vectorized loops
559 this points to the original vectorized loop. Otherwise NULL. */
560 _loop_vec_info
*orig_loop_info
;
564 /* Access Functions. */
565 #define LOOP_VINFO_LOOP(L) (L)->loop
566 #define LOOP_VINFO_BBS(L) (L)->bbs
567 #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1
568 #define LOOP_VINFO_NITERS(L) (L)->num_iters
569 /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
570 prologue peeling retain total unchanged scalar loop iterations for
572 #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged
573 #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions
574 #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th
575 #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold
576 #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable
577 #define LOOP_VINFO_CAN_FULLY_MASK_P(L) (L)->can_fully_mask_p
578 #define LOOP_VINFO_FULLY_MASKED_P(L) (L)->fully_masked_p
579 #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor
580 #define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor
581 #define LOOP_VINFO_MASKS(L) (L)->masks
582 #define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters
583 #define LOOP_VINFO_MASK_COMPARE_TYPE(L) (L)->mask_compare_type
584 #define LOOP_VINFO_MASK_IV_TYPE(L) (L)->iv_type
585 #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask
586 #define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest
587 #define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs
588 #define LOOP_VINFO_DDRS(L) (L)->shared->ddrs
589 #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters))
590 #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment
591 #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr
592 #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts
593 #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs
594 #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs
595 #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs
596 #define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero
597 #define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds
598 #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores
599 #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances
600 #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
601 #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions
602 #define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains
603 #define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data
604 #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps
605 #define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped
606 #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter
607 #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies
608 #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop
609 #define LOOP_VINFO_SCALAR_LOOP_SCALING(L) (L)->scalar_loop_scaling
610 #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store
611 #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec
612 #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost
613 #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info
614 #define LOOP_VINFO_SIMD_IF_COND(L) (L)->simd_if_cond
616 #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
617 ((L)->may_misalign_stmts.length () > 0)
618 #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \
619 ((L)->comp_alias_ddrs.length () > 0 \
620 || (L)->check_unequal_addrs.length () > 0 \
621 || (L)->lower_bounds.length () > 0)
622 #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \
623 (LOOP_VINFO_NITERS_ASSUMPTIONS (L))
624 #define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L) \
625 (LOOP_VINFO_SIMD_IF_COND (L))
626 #define LOOP_REQUIRES_VERSIONING(L) \
627 (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \
628 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \
629 || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L) \
630 || LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (L))
632 #define LOOP_VINFO_NITERS_KNOWN_P(L) \
633 (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
635 #define LOOP_VINFO_EPILOGUE_P(L) \
636 (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL)
638 #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \
639 (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L)))
641 /* Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL
642 value signifies success, and a NULL value signifies failure, supporting
643 propagating an opt_problem * describing the failure back up the call
645 typedef opt_pointer_wrapper
<loop_vec_info
> opt_loop_vec_info
;
647 static inline loop_vec_info
648 loop_vec_info_for_loop (struct loop
*loop
)
650 return (loop_vec_info
) loop
->aux
;
653 typedef struct _bb_vec_info
: public vec_info
655 _bb_vec_info (gimple_stmt_iterator
, gimple_stmt_iterator
, vec_info_shared
*);
659 gimple_stmt_iterator region_begin
;
660 gimple_stmt_iterator region_end
;
663 #define BB_VINFO_BB(B) (B)->bb
664 #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores
665 #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances
666 #define BB_VINFO_DATAREFS(B) (B)->shared->datarefs
667 #define BB_VINFO_DDRS(B) (B)->shared->ddrs
668 #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data
670 static inline bb_vec_info
671 vec_info_for_bb (basic_block bb
)
673 return (bb_vec_info
) bb
->aux
;
676 /*-----------------------------------------------------------------*/
677 /* Info on vectorized defs. */
678 /*-----------------------------------------------------------------*/
679 enum stmt_vec_info_type
{
680 undef_vec_info_type
= 0,
686 call_simd_clone_vec_info_type
,
687 assignment_vec_info_type
,
688 condition_vec_info_type
,
689 comparison_vec_info_type
,
692 type_promotion_vec_info_type
,
693 type_demotion_vec_info_type
,
694 type_conversion_vec_info_type
,
695 loop_exit_ctrl_vec_info_type
698 /* Indicates whether/how a variable is used in the scope of loop/basic
701 vect_unused_in_scope
= 0,
703 /* The def is only used outside the loop. */
705 /* The def is in the inner loop, and the use is in the outer loop, and the
706 use is a reduction stmt. */
707 vect_used_in_outer_by_reduction
,
708 /* The def is in the inner loop, and the use is in the outer loop (and is
709 not part of reduction). */
712 /* defs that feed computations that end up (only) in a reduction. These
713 defs may be used by non-reduction stmts, but eventually, any
714 computations/values that are affected by these defs are used to compute
715 a reduction (i.e. don't get stored to memory, for example). We use this
716 to identify computations that we can change the order in which they are
718 vect_used_by_reduction
,
723 /* The type of vectorization that can be applied to the stmt: regular loop-based
724 vectorization; pure SLP - the stmt is a part of SLP instances and does not
725 have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
726 a part of SLP instance and also must be loop-based vectorized, since it has
727 uses outside SLP sequences.
729 In the loop context the meanings of pure and hybrid SLP are slightly
730 different. By saying that pure SLP is applied to the loop, we mean that we
731 exploit only intra-iteration parallelism in the loop; i.e., the loop can be
732 vectorized without doing any conceptual unrolling, cause we don't pack
733 together stmts from different iterations, only within a single iteration.
734 Loop hybrid SLP means that we exploit both intra-iteration and
735 inter-iteration parallelism (e.g., number of elements in the vector is 4
736 and the slp-group-size is 2, in which case we don't have enough parallelism
737 within an iteration, so we obtain the rest of the parallelism from subsequent
738 iterations by unrolling the loop by 2). */
745 /* Says whether a statement is a load, a store of a vectorized statement
746 result, or a store of an invariant value. */
747 enum vec_load_store_type
{
753 /* Describes how we're going to vectorize an individual load or store,
754 or a group of loads or stores. */
755 enum vect_memory_access_type
{
756 /* An access to an invariant address. This is used only for loads. */
759 /* A simple contiguous access. */
762 /* A contiguous access that goes down in memory rather than up,
763 with no additional permutation. This is used only for stores
765 VMAT_CONTIGUOUS_DOWN
,
767 /* A simple contiguous access in which the elements need to be permuted
768 after loading or before storing. Only used for loop vectorization;
769 SLP uses separate permutes. */
770 VMAT_CONTIGUOUS_PERMUTE
,
772 /* A simple contiguous access in which the elements need to be reversed
773 after loading or before storing. */
774 VMAT_CONTIGUOUS_REVERSE
,
776 /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */
777 VMAT_LOAD_STORE_LANES
,
779 /* An access in which each scalar element is loaded or stored
783 /* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped
784 SLP accesses. Each unrolled iteration uses a contiguous load
785 or store for the whole group, but the groups from separate iterations
786 are combined in the same way as for VMAT_ELEMENTWISE. */
789 /* The access uses gather loads or scatter stores. */
794 /* The data reference itself. */
796 /* The statement that contains the data reference. */
798 /* The misalignment in bytes of the reference, or -1 if not known. */
800 /* The byte alignment that we'd ideally like the reference to have,
801 and the value that misalignment is measured against. */
802 poly_uint64 target_alignment
;
803 /* If true the alignment of base_decl needs to be increased. */
804 bool base_misaligned
;
808 typedef struct data_reference
*dr_p
;
810 struct _stmt_vec_info
{
812 enum stmt_vec_info_type type
;
814 /* Indicates whether this stmts is part of a computation whose result is
815 used outside the loop. */
818 /* Stmt is part of some pattern (computation idiom) */
821 /* True if the statement was created during pattern recognition as
822 part of the replacement for RELATED_STMT. This implies that the
823 statement isn't part of any basic block, although for convenience
824 its gimple_bb is the same as for RELATED_STMT. */
827 /* Is this statement vectorizable or should it be skipped in (partial)
831 /* The stmt to which this info struct refers to. */
834 /* The vec_info with respect to which STMT is vectorized. */
837 /* The vector type to be used for the LHS of this statement. */
840 /* The vectorized version of the stmt. */
841 stmt_vec_info vectorized_stmt
;
844 /* The following is relevant only for stmts that contain a non-scalar
845 data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
846 at most one such data-ref. */
850 /* Information about the data-ref relative to this loop
851 nest (the loop that is being considered for vectorization). */
852 innermost_loop_behavior dr_wrt_vec_loop
;
854 /* For loop PHI nodes, the base and evolution part of it. This makes sure
855 this information is still available in vect_update_ivs_after_vectorizer
856 where we may not be able to re-analyze the PHI nodes evolution as
857 peeling for the prologue loop can make it unanalyzable. The evolution
858 part is still correct after peeling, but the base may have changed from
860 tree loop_phi_evolution_base_unchanged
;
861 tree loop_phi_evolution_part
;
863 /* Used for various bookkeeping purposes, generally holding a pointer to
864 some other stmt S that is in some way "related" to this stmt.
865 Current use of this field is:
866 If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
867 true): S is the "pattern stmt" that represents (and replaces) the
868 sequence of stmts that constitutes the pattern. Similarly, the
869 related_stmt of the "pattern stmt" points back to this stmt (which is
870 the last stmt in the original sequence of stmts that constitutes the
872 stmt_vec_info related_stmt
;
874 /* Used to keep a sequence of def stmts of a pattern stmt if such exists.
875 The sequence is attached to the original statement rather than the
876 pattern statement. */
877 gimple_seq pattern_def_seq
;
879 /* List of datarefs that are known to have the same alignment as the dataref
881 vec
<dr_p
> same_align_refs
;
883 /* Selected SIMD clone's function info. First vector element
884 is SIMD clone's function decl, followed by a pair of trees (base + step)
885 for linear arguments (pair of NULLs for other arguments). */
886 vec
<tree
> simd_clone_info
;
888 /* Classify the def of this stmt. */
889 enum vect_def_type def_type
;
891 /* Whether the stmt is SLPed, loop-based vectorized, or both. */
892 enum slp_vect_type slp_type
;
894 /* Interleaving and reduction chains info. */
895 /* First element in the group. */
896 stmt_vec_info first_element
;
897 /* Pointer to the next element in the group. */
898 stmt_vec_info next_element
;
899 /* The size of the group. */
901 /* For stores, number of stores from this group seen. We vectorize the last
903 unsigned int store_count
;
904 /* For loads only, the gap from the previous load. For consecutive loads, GAP
908 /* The minimum negative dependence distance this stmt participates in
910 unsigned int min_neg_dist
;
912 /* Not all stmts in the loop need to be vectorized. e.g, the increment
913 of the loop induction variable and computation of array indexes. relevant
914 indicates whether the stmt needs to be vectorized. */
915 enum vect_relevant relevant
;
917 /* For loads if this is a gather, for stores if this is a scatter. */
918 bool gather_scatter_p
;
920 /* True if this is an access with loop-invariant stride. */
923 /* For both loads and stores. */
924 unsigned simd_lane_access_p
: 3;
926 /* Classifies how the load or store is going to be implemented
927 for loop vectorization. */
928 vect_memory_access_type memory_access_type
;
930 /* For reduction loops, this is the type of reduction. */
931 enum vect_reduction_type v_reduc_type
;
933 /* For CONST_COND_REDUCTION, record the reduc code. */
934 enum tree_code const_cond_reduc_code
;
936 /* On a reduction PHI the reduction type as detected by
937 vect_force_simple_reduction. */
938 enum vect_reduction_type reduc_type
;
940 /* On a reduction PHI the def returned by vect_force_simple_reduction.
941 On the def returned by vect_force_simple_reduction the
942 corresponding PHI. */
943 stmt_vec_info reduc_def
;
945 /* The number of scalar stmt references from active SLP instances. */
946 unsigned int num_slp_uses
;
948 /* If nonzero, the lhs of the statement could be truncated to this
949 many bits without affecting any users of the result. */
950 unsigned int min_output_precision
;
952 /* If nonzero, all non-boolean input operands have the same precision,
953 and they could each be truncated to this many bits without changing
955 unsigned int min_input_precision
;
957 /* If OPERATION_BITS is nonzero, the statement could be performed on
958 an integer with the sign and number of bits given by OPERATION_SIGN
959 and OPERATION_BITS without changing the result. */
960 unsigned int operation_precision
;
961 signop operation_sign
;
963 /* True if this is only suitable for SLP vectorization. */
964 bool slp_vect_only_p
;
967 /* Information about a gather/scatter call. */
968 struct gather_scatter_info
{
969 /* The internal function to use for the gather/scatter operation,
970 or IFN_LAST if a built-in function should be used instead. */
973 /* The FUNCTION_DECL for the built-in gather/scatter function,
974 or null if an internal function should be used instead. */
977 /* The loop-invariant base value. */
980 /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */
983 /* Each offset element should be multiplied by this amount before
984 being added to the base. */
987 /* The definition type for the vectorized offset. */
988 enum vect_def_type offset_dt
;
990 /* The type of the vectorized offset. */
993 /* The type of the scalar elements after loading or before storing. */
996 /* The type of the scalar elements being loaded or stored. */
1000 /* Access Functions. */
1001 #define STMT_VINFO_TYPE(S) (S)->type
1002 #define STMT_VINFO_STMT(S) (S)->stmt
1003 inline loop_vec_info
1004 STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo
)
1006 if (loop_vec_info loop_vinfo
= dyn_cast
<loop_vec_info
> (stmt_vinfo
->vinfo
))
1011 STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo
)
1013 if (bb_vec_info bb_vinfo
= dyn_cast
<bb_vec_info
> (stmt_vinfo
->vinfo
))
1017 #define STMT_VINFO_RELEVANT(S) (S)->relevant
1018 #define STMT_VINFO_LIVE_P(S) (S)->live
1019 #define STMT_VINFO_VECTYPE(S) (S)->vectype
1020 #define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt
1021 #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable
1022 #define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0)
1023 #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p
1024 #define STMT_VINFO_STRIDED_P(S) (S)->strided_p
1025 #define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type
1026 #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p
1027 #define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type
1028 #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code
1030 #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop
1031 #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address
1032 #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init
1033 #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset
1034 #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step
1035 #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment
1036 #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \
1037 (S)->dr_wrt_vec_loop.base_misalignment
1038 #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \
1039 (S)->dr_wrt_vec_loop.offset_alignment
1040 #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \
1041 (S)->dr_wrt_vec_loop.step_alignment
1043 #define STMT_VINFO_DR_INFO(S) \
1044 (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux)
1046 #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p
1047 #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt
1048 #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq
1049 #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs
1050 #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info
1051 #define STMT_VINFO_DEF_TYPE(S) (S)->def_type
1052 #define STMT_VINFO_GROUPED_ACCESS(S) \
1053 ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S))
1054 #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged
1055 #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
1056 #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist
1057 #define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses
1058 #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type
1059 #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def
1060 #define STMT_VINFO_SLP_VECT_ONLY(S) (S)->slp_vect_only_p
1062 #define DR_GROUP_FIRST_ELEMENT(S) \
1063 (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element)
1064 #define DR_GROUP_NEXT_ELEMENT(S) \
1065 (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element)
1066 #define DR_GROUP_SIZE(S) \
1067 (gcc_checking_assert ((S)->dr_aux.dr), (S)->size)
1068 #define DR_GROUP_STORE_COUNT(S) \
1069 (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count)
1070 #define DR_GROUP_GAP(S) \
1071 (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap)
1073 #define REDUC_GROUP_FIRST_ELEMENT(S) \
1074 (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element)
1075 #define REDUC_GROUP_NEXT_ELEMENT(S) \
1076 (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element)
1077 #define REDUC_GROUP_SIZE(S) \
1078 (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size)
1080 #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope)
1082 #define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid)
1083 #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp)
1084 #define STMT_SLP_TYPE(S) (S)->slp_type
1086 #define VECT_MAX_COST 1000
1088 /* The maximum number of intermediate steps required in multi-step type
1090 #define MAX_INTERM_CVT_STEPS 3
1092 #define MAX_VECTORIZATION_FACTOR INT_MAX
1094 /* Nonzero if TYPE represents a (scalar) boolean type or type
1095 in the middle-end compatible with it (unsigned precision 1 integral
1096 types). Used to determine which types should be vectorized as
1097 VECTOR_BOOLEAN_TYPE_P. */
1099 #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \
1100 (TREE_CODE (TYPE) == BOOLEAN_TYPE \
1101 || ((TREE_CODE (TYPE) == INTEGER_TYPE \
1102 || TREE_CODE (TYPE) == ENUMERAL_TYPE) \
1103 && TYPE_PRECISION (TYPE) == 1 \
1104 && TYPE_UNSIGNED (TYPE)))
1107 nested_in_vect_loop_p (struct loop
*loop
, stmt_vec_info stmt_info
)
1110 && (loop
->inner
== (gimple_bb (stmt_info
->stmt
))->loop_father
));
1113 /* Return TRUE if a statement represented by STMT_INFO is a part of a
1117 is_pattern_stmt_p (stmt_vec_info stmt_info
)
1119 return stmt_info
->pattern_stmt_p
;
1122 /* If STMT_INFO is a pattern statement, return the statement that it
1123 replaces, otherwise return STMT_INFO itself. */
1125 inline stmt_vec_info
1126 vect_orig_stmt (stmt_vec_info stmt_info
)
1128 if (is_pattern_stmt_p (stmt_info
))
1129 return STMT_VINFO_RELATED_STMT (stmt_info
);
1133 /* Return the later statement between STMT1_INFO and STMT2_INFO. */
1135 static inline stmt_vec_info
1136 get_later_stmt (stmt_vec_info stmt1_info
, stmt_vec_info stmt2_info
)
1138 if (gimple_uid (vect_orig_stmt (stmt1_info
)->stmt
)
1139 > gimple_uid (vect_orig_stmt (stmt2_info
)->stmt
))
1145 /* If STMT_INFO has been replaced by a pattern statement, return the
1146 replacement statement, otherwise return STMT_INFO itself. */
1148 inline stmt_vec_info
1149 vect_stmt_to_vectorize (stmt_vec_info stmt_info
)
1151 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
1152 return STMT_VINFO_RELATED_STMT (stmt_info
);
1156 /* Return true if BB is a loop header. */
1159 is_loop_header_bb_p (basic_block bb
)
1161 if (bb
== (bb
->loop_father
)->header
)
1163 gcc_checking_assert (EDGE_COUNT (bb
->preds
) == 1);
1167 /* Return pow2 (X). */
1174 for (i
= 0; i
< x
; i
++)
1180 /* Alias targetm.vectorize.builtin_vectorization_cost. */
1183 builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
1184 tree vectype
, int misalign
)
1186 return targetm
.vectorize
.builtin_vectorization_cost (type_of_cost
,
1190 /* Get cost by calling cost target builtin. */
1193 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost
)
1195 return builtin_vectorization_cost (type_of_cost
, NULL
, 0);
1198 /* Alias targetm.vectorize.init_cost. */
1200 static inline void *
1201 init_cost (struct loop
*loop_info
)
1203 return targetm
.vectorize
.init_cost (loop_info
);
1206 extern void dump_stmt_cost (FILE *, void *, int, enum vect_cost_for_stmt
,
1207 stmt_vec_info
, int, unsigned,
1208 enum vect_cost_model_location
);
1210 /* Alias targetm.vectorize.add_stmt_cost. */
1212 static inline unsigned
1213 add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
1214 stmt_vec_info stmt_info
, int misalign
,
1215 enum vect_cost_model_location where
)
1217 unsigned cost
= targetm
.vectorize
.add_stmt_cost (data
, count
, kind
,
1218 stmt_info
, misalign
, where
);
1219 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1220 dump_stmt_cost (dump_file
, data
, count
, kind
, stmt_info
, misalign
,
1225 /* Alias targetm.vectorize.finish_cost. */
1228 finish_cost (void *data
, unsigned *prologue_cost
,
1229 unsigned *body_cost
, unsigned *epilogue_cost
)
1231 targetm
.vectorize
.finish_cost (data
, prologue_cost
, body_cost
, epilogue_cost
);
1234 /* Alias targetm.vectorize.destroy_cost_data. */
1237 destroy_cost_data (void *data
)
1239 targetm
.vectorize
.destroy_cost_data (data
);
1243 add_stmt_costs (void *data
, stmt_vector_for_cost
*cost_vec
)
1245 stmt_info_for_cost
*cost
;
1247 FOR_EACH_VEC_ELT (*cost_vec
, i
, cost
)
1248 add_stmt_cost (data
, cost
->count
, cost
->kind
, cost
->stmt_info
,
1249 cost
->misalign
, cost
->where
);
1252 /*-----------------------------------------------------------------*/
1253 /* Info on data references alignment. */
1254 /*-----------------------------------------------------------------*/
1255 #define DR_MISALIGNMENT_UNKNOWN (-1)
1256 #define DR_MISALIGNMENT_UNINITIALIZED (-2)
1259 set_dr_misalignment (dr_vec_info
*dr_info
, int val
)
1261 dr_info
->misalignment
= val
;
1265 dr_misalignment (dr_vec_info
*dr_info
)
1267 int misalign
= dr_info
->misalignment
;
1268 gcc_assert (misalign
!= DR_MISALIGNMENT_UNINITIALIZED
);
1272 /* Reflects actual alignment of first access in the vectorized loop,
1273 taking into account peeling/versioning if applied. */
1274 #define DR_MISALIGNMENT(DR) dr_misalignment (DR)
1275 #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL)
1277 /* Only defined once DR_MISALIGNMENT is defined. */
1278 #define DR_TARGET_ALIGNMENT(DR) ((DR)->target_alignment)
1280 /* Return true if data access DR_INFO is aligned to its target alignment
1281 (which may be less than a full vector). */
1284 aligned_access_p (dr_vec_info
*dr_info
)
1286 return (DR_MISALIGNMENT (dr_info
) == 0);
1289 /* Return TRUE if the alignment of the data access is known, and FALSE
1293 known_alignment_for_access_p (dr_vec_info
*dr_info
)
1295 return (DR_MISALIGNMENT (dr_info
) != DR_MISALIGNMENT_UNKNOWN
);
1298 /* Return the minimum alignment in bytes that the vectorized version
1299 of DR_INFO is guaranteed to have. */
1301 static inline unsigned int
1302 vect_known_alignment_in_bytes (dr_vec_info
*dr_info
)
1304 if (DR_MISALIGNMENT (dr_info
) == DR_MISALIGNMENT_UNKNOWN
)
1305 return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info
->dr
)));
1306 if (DR_MISALIGNMENT (dr_info
) == 0)
1307 return known_alignment (DR_TARGET_ALIGNMENT (dr_info
));
1308 return DR_MISALIGNMENT (dr_info
) & -DR_MISALIGNMENT (dr_info
);
1311 /* Return the behavior of DR_INFO with respect to the vectorization context
1312 (which for outer loop vectorization might not be the behavior recorded
1313 in DR_INFO itself). */
1315 static inline innermost_loop_behavior
*
1316 vect_dr_behavior (dr_vec_info
*dr_info
)
1318 stmt_vec_info stmt_info
= dr_info
->stmt
;
1319 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1320 if (loop_vinfo
== NULL
1321 || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo
), stmt_info
))
1322 return &DR_INNERMOST (dr_info
->dr
);
1324 return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info
);
1327 /* Return true if the vect cost model is unlimited. */
1329 unlimited_cost_model (loop_p loop
)
1331 if (loop
!= NULL
&& loop
->force_vectorize
1332 && flag_simd_cost_model
!= VECT_COST_MODEL_DEFAULT
)
1333 return flag_simd_cost_model
== VECT_COST_MODEL_UNLIMITED
;
1334 return (flag_vect_cost_model
== VECT_COST_MODEL_UNLIMITED
);
1337 /* Return true if the loop described by LOOP_VINFO is fully-masked and
1338 if the first iteration should use a partial mask in order to achieve
1342 vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo
)
1344 return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
1345 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
));
1348 /* Return the number of vectors of type VECTYPE that are needed to get
1349 NUNITS elements. NUNITS should be based on the vectorization factor,
1350 so it is always a known multiple of the number of elements in VECTYPE. */
1352 static inline unsigned int
1353 vect_get_num_vectors (poly_uint64 nunits
, tree vectype
)
1355 return exact_div (nunits
, TYPE_VECTOR_SUBPARTS (vectype
)).to_constant ();
1358 /* Return the number of copies needed for loop vectorization when
1359 a statement operates on vectors of type VECTYPE. This is the
1360 vectorization factor divided by the number of elements in
1361 VECTYPE and is always known at compile time. */
1363 static inline unsigned int
1364 vect_get_num_copies (loop_vec_info loop_vinfo
, tree vectype
)
1366 return vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo
), vectype
);
1369 /* Update maximum unit count *MAX_NUNITS so that it accounts for
1370 the number of units in vector type VECTYPE. *MAX_NUNITS can be 1
1371 if we haven't yet recorded any vector types. */
1374 vect_update_max_nunits (poly_uint64
*max_nunits
, tree vectype
)
1376 /* All unit counts have the form current_vector_size * X for some
1377 rational X, so two unit sizes must have a common multiple.
1378 Everything is a multiple of the initial value of 1. */
1379 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1380 *max_nunits
= force_common_multiple (*max_nunits
, nunits
);
1383 /* Return the vectorization factor that should be used for costing
1384 purposes while vectorizing the loop described by LOOP_VINFO.
1385 Pick a reasonable estimate if the vectorization factor isn't
1386 known at compile time. */
1388 static inline unsigned int
1389 vect_vf_for_cost (loop_vec_info loop_vinfo
)
1391 return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo
));
1394 /* Estimate the number of elements in VEC_TYPE for costing purposes.
1395 Pick a reasonable estimate if the exact number isn't known at
1398 static inline unsigned int
1399 vect_nunits_for_cost (tree vec_type
)
1401 return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type
));
1404 /* Return the maximum possible vectorization factor for LOOP_VINFO. */
1406 static inline unsigned HOST_WIDE_INT
1407 vect_max_vf (loop_vec_info loop_vinfo
)
1409 unsigned HOST_WIDE_INT vf
;
1410 if (LOOP_VINFO_VECT_FACTOR (loop_vinfo
).is_constant (&vf
))
1412 return MAX_VECTORIZATION_FACTOR
;
1415 /* Return the size of the value accessed by unvectorized data reference
1416 DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated
1417 for the associated gimple statement, since that guarantees that DR_INFO
1418 accesses either a scalar or a scalar equivalent. ("Scalar equivalent"
1419 here includes things like V1SI, which can be vectorized in the same way
1423 vect_get_scalar_dr_size (dr_vec_info
*dr_info
)
1425 return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_info
->dr
))));
1428 /* Source location + hotness information. */
1429 extern dump_user_location_t vect_location
;
1431 /* A macro for calling:
1432 dump_begin_scope (MSG, vect_location);
1433 via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc,
1436 once the object goes out of scope, thus capturing the nesting of
1439 These scopes affect dump messages within them: dump messages at the
1440 top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those
1441 in a nested scope implicitly default to MSG_PRIORITY_INTERNALS. */
1443 #define DUMP_VECT_SCOPE(MSG) \
1444 AUTO_DUMP_SCOPE (MSG, vect_location)
1446 /* A sentinel class for ensuring that the "vect_location" global gets
1447 reset at the end of a scope.
1449 The "vect_location" global is used during dumping and contains a
1450 location_t, which could contain references to a tree block via the
1451 ad-hoc data. This data is used for tracking inlining information,
1452 but it's not a GC root; it's simply assumed that such locations never
1453 get accessed if the blocks are optimized away.
1455 Hence we need to ensure that such locations are purged at the end
1456 of any operations using them (e.g. via this class). */
1458 class auto_purge_vect_location
1461 ~auto_purge_vect_location ();
1464 /*-----------------------------------------------------------------*/
1465 /* Function prototypes. */
1466 /*-----------------------------------------------------------------*/
1468 /* Simple loop peeling and versioning utilities for vectorizer's purposes -
1469 in tree-vect-loop-manip.c. */
1470 extern void vect_set_loop_condition (struct loop
*, loop_vec_info
,
1471 tree
, tree
, tree
, bool);
1472 extern bool slpeel_can_duplicate_loop_p (const struct loop
*, const_edge
);
1473 struct loop
*slpeel_tree_duplicate_loop_to_edge_cfg (struct loop
*,
1474 struct loop
*, edge
);
1475 struct loop
*vect_loop_versioning (loop_vec_info
, unsigned int, bool,
1477 extern struct loop
*vect_do_peeling (loop_vec_info
, tree
, tree
,
1478 tree
*, tree
*, tree
*, int, bool, bool);
1479 extern void vect_prepare_for_masked_peels (loop_vec_info
);
1480 extern dump_user_location_t
find_loop_location (struct loop
*);
1481 extern bool vect_can_advance_ivs_p (loop_vec_info
);
1483 /* In tree-vect-stmts.c. */
1484 extern poly_uint64 current_vector_size
;
1485 extern tree
get_vectype_for_scalar_type (tree
);
1486 extern tree
get_vectype_for_scalar_type_and_size (tree
, poly_uint64
);
1487 extern tree
get_mask_type_for_scalar_type (tree
);
1488 extern tree
get_same_sized_vectype (tree
, tree
);
1489 extern bool vect_get_loop_mask_type (loop_vec_info
);
1490 extern bool vect_is_simple_use (tree
, vec_info
*, enum vect_def_type
*,
1491 stmt_vec_info
* = NULL
, gimple
** = NULL
);
1492 extern bool vect_is_simple_use (tree
, vec_info
*, enum vect_def_type
*,
1493 tree
*, stmt_vec_info
* = NULL
,
1495 extern bool supportable_widening_operation (enum tree_code
, stmt_vec_info
,
1496 tree
, tree
, enum tree_code
*,
1497 enum tree_code
*, int *,
1499 extern bool supportable_narrowing_operation (enum tree_code
, tree
, tree
,
1501 int *, vec
<tree
> *);
1502 extern unsigned record_stmt_cost (stmt_vector_for_cost
*, int,
1503 enum vect_cost_for_stmt
, stmt_vec_info
,
1504 int, enum vect_cost_model_location
);
1505 extern stmt_vec_info
vect_finish_replace_stmt (stmt_vec_info
, gimple
*);
1506 extern stmt_vec_info
vect_finish_stmt_generation (stmt_vec_info
, gimple
*,
1507 gimple_stmt_iterator
*);
1508 extern opt_result
vect_mark_stmts_to_be_vectorized (loop_vec_info
, bool *);
1509 extern tree
vect_get_store_rhs (stmt_vec_info
);
1510 extern tree
vect_get_vec_def_for_operand_1 (stmt_vec_info
, enum vect_def_type
);
1511 extern tree
vect_get_vec_def_for_operand (tree
, stmt_vec_info
, tree
= NULL
);
1512 extern void vect_get_vec_defs (tree
, tree
, stmt_vec_info
, vec
<tree
> *,
1513 vec
<tree
> *, slp_tree
);
1514 extern void vect_get_vec_defs_for_stmt_copy (vec_info
*,
1515 vec
<tree
> *, vec
<tree
> *);
1516 extern tree
vect_init_vector (stmt_vec_info
, tree
, tree
,
1517 gimple_stmt_iterator
*);
1518 extern tree
vect_get_vec_def_for_stmt_copy (vec_info
*, tree
);
1519 extern bool vect_transform_stmt (stmt_vec_info
, gimple_stmt_iterator
*,
1520 slp_tree
, slp_instance
);
1521 extern void vect_remove_stores (stmt_vec_info
);
1522 extern opt_result
vect_analyze_stmt (stmt_vec_info
, bool *, slp_tree
,
1523 slp_instance
, stmt_vector_for_cost
*);
1524 extern bool vectorizable_condition (stmt_vec_info
, gimple_stmt_iterator
*,
1525 stmt_vec_info
*, bool, slp_tree
,
1526 stmt_vector_for_cost
*);
1527 extern bool vectorizable_shift (stmt_vec_info
, gimple_stmt_iterator
*,
1528 stmt_vec_info
*, slp_tree
,
1529 stmt_vector_for_cost
*);
1530 extern void vect_get_load_cost (stmt_vec_info
, int, bool,
1531 unsigned int *, unsigned int *,
1532 stmt_vector_for_cost
*,
1533 stmt_vector_for_cost
*, bool);
1534 extern void vect_get_store_cost (stmt_vec_info
, int,
1535 unsigned int *, stmt_vector_for_cost
*);
1536 extern bool vect_supportable_shift (enum tree_code
, tree
);
1537 extern tree
vect_gen_perm_mask_any (tree
, const vec_perm_indices
&);
1538 extern tree
vect_gen_perm_mask_checked (tree
, const vec_perm_indices
&);
1539 extern void optimize_mask_stores (struct loop
*);
1540 extern gcall
*vect_gen_while (tree
, tree
, tree
);
1541 extern tree
vect_gen_while_not (gimple_seq
*, tree
, tree
, tree
);
1542 extern opt_result
vect_get_vector_types_for_stmt (stmt_vec_info
, tree
*,
1544 extern opt_tree
vect_get_mask_type_for_stmt (stmt_vec_info
);
1546 /* In tree-vect-data-refs.c. */
1547 extern bool vect_can_force_dr_alignment_p (const_tree
, poly_uint64
);
1548 extern enum dr_alignment_support vect_supportable_dr_alignment
1549 (dr_vec_info
*, bool);
1550 extern tree
vect_get_smallest_scalar_type (stmt_vec_info
, HOST_WIDE_INT
*,
1552 extern opt_result
vect_analyze_data_ref_dependences (loop_vec_info
, unsigned int *);
1553 extern bool vect_slp_analyze_instance_dependence (slp_instance
);
1554 extern opt_result
vect_enhance_data_refs_alignment (loop_vec_info
);
1555 extern opt_result
vect_analyze_data_refs_alignment (loop_vec_info
);
1556 extern opt_result
vect_verify_datarefs_alignment (loop_vec_info
);
1557 extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance
);
1558 extern opt_result
vect_analyze_data_ref_accesses (vec_info
*);
1559 extern opt_result
vect_prune_runtime_alias_test_list (loop_vec_info
);
1560 extern bool vect_gather_scatter_fn_p (bool, bool, tree
, tree
, unsigned int,
1561 signop
, int, internal_fn
*, tree
*);
1562 extern bool vect_check_gather_scatter (stmt_vec_info
, loop_vec_info
,
1563 gather_scatter_info
*);
1564 extern opt_result
vect_find_stmt_data_reference (loop_p
, gimple
*,
1565 vec
<data_reference_p
> *);
1566 extern opt_result
vect_analyze_data_refs (vec_info
*, poly_uint64
*, bool *);
1567 extern void vect_record_base_alignments (vec_info
*);
1568 extern tree
vect_create_data_ref_ptr (stmt_vec_info
, tree
, struct loop
*, tree
,
1569 tree
*, gimple_stmt_iterator
*,
1571 tree
= NULL_TREE
, tree
= NULL_TREE
);
1572 extern tree
bump_vector_ptr (tree
, gimple
*, gimple_stmt_iterator
*,
1573 stmt_vec_info
, tree
);
1574 extern void vect_copy_ref_info (tree
, tree
);
1575 extern tree
vect_create_destination_var (tree
, tree
);
1576 extern bool vect_grouped_store_supported (tree
, unsigned HOST_WIDE_INT
);
1577 extern bool vect_store_lanes_supported (tree
, unsigned HOST_WIDE_INT
, bool);
1578 extern bool vect_grouped_load_supported (tree
, bool, unsigned HOST_WIDE_INT
);
1579 extern bool vect_load_lanes_supported (tree
, unsigned HOST_WIDE_INT
, bool);
1580 extern void vect_permute_store_chain (vec
<tree
> ,unsigned int, stmt_vec_info
,
1581 gimple_stmt_iterator
*, vec
<tree
> *);
1582 extern tree
vect_setup_realignment (stmt_vec_info
, gimple_stmt_iterator
*,
1583 tree
*, enum dr_alignment_support
, tree
,
1585 extern void vect_transform_grouped_load (stmt_vec_info
, vec
<tree
> , int,
1586 gimple_stmt_iterator
*);
1587 extern void vect_record_grouped_load_vectors (stmt_vec_info
, vec
<tree
>);
1588 extern tree
vect_get_new_vect_var (tree
, enum vect_var_kind
, const char *);
1589 extern tree
vect_get_new_ssa_name (tree
, enum vect_var_kind
,
1590 const char * = NULL
);
1591 extern tree
vect_create_addr_base_for_vector_ref (stmt_vec_info
, gimple_seq
*,
1592 tree
, tree
= NULL_TREE
);
1594 /* In tree-vect-loop.c. */
1595 /* FORNOW: Used in tree-parloops.c. */
1596 extern stmt_vec_info
vect_force_simple_reduction (loop_vec_info
, stmt_vec_info
,
1598 extern widest_int
vect_iv_limit_for_full_masking (loop_vec_info loop_vinfo
);
1599 /* Used in gimple-loop-interchange.c. */
1600 extern bool check_reduction_path (dump_user_location_t
, loop_p
, gphi
*, tree
,
1602 /* Drive for loop analysis stage. */
1603 extern opt_loop_vec_info
vect_analyze_loop (struct loop
*,
1606 extern tree
vect_build_loop_niters (loop_vec_info
, bool * = NULL
);
1607 extern void vect_gen_vector_loop_niters (loop_vec_info
, tree
, tree
*,
1609 extern tree
vect_halve_mask_nunits (tree
);
1610 extern tree
vect_double_mask_nunits (tree
);
1611 extern void vect_record_loop_mask (loop_vec_info
, vec_loop_masks
*,
1612 unsigned int, tree
);
1613 extern tree
vect_get_loop_mask (gimple_stmt_iterator
*, vec_loop_masks
*,
1614 unsigned int, tree
, unsigned int);
1616 /* Drive for loop transformation stage. */
1617 extern struct loop
*vect_transform_loop (loop_vec_info
);
1618 extern opt_loop_vec_info
vect_analyze_loop_form (struct loop
*,
1620 extern bool vectorizable_live_operation (stmt_vec_info
, gimple_stmt_iterator
*,
1621 slp_tree
, int, stmt_vec_info
*,
1622 stmt_vector_for_cost
*);
1623 extern bool vectorizable_reduction (stmt_vec_info
, gimple_stmt_iterator
*,
1624 stmt_vec_info
*, slp_tree
, slp_instance
,
1625 stmt_vector_for_cost
*);
1626 extern bool vectorizable_induction (stmt_vec_info
, gimple_stmt_iterator
*,
1627 stmt_vec_info
*, slp_tree
,
1628 stmt_vector_for_cost
*);
1629 extern tree
get_initial_def_for_reduction (stmt_vec_info
, tree
, tree
*);
1630 extern bool vect_worthwhile_without_simd_p (vec_info
*, tree_code
);
1631 extern int vect_get_known_peeling_cost (loop_vec_info
, int, int *,
1632 stmt_vector_for_cost
*,
1633 stmt_vector_for_cost
*,
1634 stmt_vector_for_cost
*);
1635 extern tree
cse_and_gimplify_to_preheader (loop_vec_info
, tree
);
1637 /* In tree-vect-slp.c. */
1638 extern void vect_free_slp_instance (slp_instance
, bool);
1639 extern bool vect_transform_slp_perm_load (slp_tree
, vec
<tree
> ,
1640 gimple_stmt_iterator
*, poly_uint64
,
1641 slp_instance
, bool, unsigned *);
1642 extern bool vect_slp_analyze_operations (vec_info
*);
1643 extern void vect_schedule_slp (vec_info
*);
1644 extern opt_result
vect_analyze_slp (vec_info
*, unsigned);
1645 extern bool vect_make_slp_decision (loop_vec_info
);
1646 extern void vect_detect_hybrid_slp (loop_vec_info
);
1647 extern void vect_get_slp_defs (vec
<tree
> , slp_tree
, vec
<vec
<tree
> > *);
1648 extern bool vect_slp_bb (basic_block
);
1649 extern stmt_vec_info
vect_find_last_scalar_stmt_in_slp (slp_tree
);
1650 extern bool is_simple_and_all_uses_invariant (stmt_vec_info
, loop_vec_info
);
1651 extern bool can_duplicate_and_interleave_p (unsigned int, machine_mode
,
1652 unsigned int * = NULL
,
1653 tree
* = NULL
, tree
* = NULL
);
1654 extern void duplicate_and_interleave (gimple_seq
*, tree
, vec
<tree
>,
1655 unsigned int, vec
<tree
> &);
1656 extern int vect_get_place_in_interleaving_chain (stmt_vec_info
, stmt_vec_info
);
1658 /* In tree-vect-patterns.c. */
1659 /* Pattern recognition functions.
1660 Additional pattern recognition functions can (and will) be added
1662 void vect_pattern_recog (vec_info
*);
1664 /* In tree-vectorizer.c. */
1665 unsigned vectorize_loops (void);
1666 void vect_free_loop_info_assumptions (struct loop
*);
1667 gimple
*vect_loop_vectorized_call (struct loop
*, gcond
**cond
= NULL
);
1670 #endif /* GCC_TREE_VECTORIZER_H */