]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-vect-data-refs.c
tree-core.h: Include symtab.h.
[thirdparty/gcc.git] / gcc / tree-vect-data-refs.c
CommitLineData
b8698a0f 1/* Data References Analysis and Manipulation Utilities for Vectorization.
5624e564 2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
b8698a0f 3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
ebfd146a
IR
4 and Ira Rosen <irar@il.ibm.com>
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 3, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
78c60e3d 25#include "dumpfile.h"
c7131fb2 26#include "backend.h"
ebfd146a 27#include "tree.h"
c7131fb2
AM
28#include "gimple.h"
29#include "rtl.h"
30#include "ssa.h"
31#include "alias.h"
40e23961 32#include "fold-const.h"
d8a2d370 33#include "stor-layout.h"
237e9c04 34#include "tm_p.h"
ebfd146a 35#include "target.h"
cf835838 36#include "gimple-pretty-print.h"
2fb9a547
AM
37#include "internal-fn.h"
38#include "tree-eh.h"
45b0be94 39#include "gimplify.h"
5be5c238 40#include "gimple-iterator.h"
18f429e2 41#include "gimplify-me.h"
e28030cf
AM
42#include "tree-ssa-loop-ivopts.h"
43#include "tree-ssa-loop-manip.h"
442b4905 44#include "tree-ssa-loop.h"
ebfd146a 45#include "cfgloop.h"
ebfd146a
IR
46#include "tree-chrec.h"
47#include "tree-scalar-evolution.h"
48#include "tree-vectorizer.h"
718f9c0f 49#include "diagnostic-core.h"
0136f8f0 50#include "cgraph.h"
2eb79bbb 51/* Need to include rtl.h, expr.h, etc. for optabs. */
36566b39 52#include "flags.h"
36566b39
PK
53#include "insn-config.h"
54#include "expmed.h"
55#include "dojump.h"
56#include "explow.h"
57#include "calls.h"
58#include "emit-rtl.h"
59#include "varasm.h"
60#include "stmt.h"
2eb79bbb 61#include "expr.h"
b0710fe1 62#include "insn-codes.h"
2eb79bbb 63#include "optabs.h"
9b2b7279 64#include "builtins.h"
ebfd146a 65
272c6793
RS
66/* Return true if load- or store-lanes optab OPTAB is implemented for
67 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
68
69static bool
70vect_lanes_optab_supported_p (const char *name, convert_optab optab,
71 tree vectype, unsigned HOST_WIDE_INT count)
72{
ef4bddc2 73 machine_mode mode, array_mode;
272c6793
RS
74 bool limit_p;
75
76 mode = TYPE_MODE (vectype);
77 limit_p = !targetm.array_mode_supported_p (mode, count);
78 array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
79 MODE_INT, limit_p);
80
81 if (array_mode == BLKmode)
82 {
73fbfcad 83 if (dump_enabled_p ())
e645e942
TJ
84 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
85 "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n",
78c60e3d 86 GET_MODE_NAME (mode), count);
272c6793
RS
87 return false;
88 }
89
90 if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
91 {
73fbfcad 92 if (dump_enabled_p ())
78c60e3d 93 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 94 "cannot use %s<%s><%s>\n", name,
78c60e3d 95 GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
272c6793
RS
96 return false;
97 }
98
73fbfcad 99 if (dump_enabled_p ())
78c60e3d 100 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 101 "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
78c60e3d 102 GET_MODE_NAME (mode));
272c6793
RS
103
104 return true;
105}
106
107
ebfd146a 108/* Return the smallest scalar part of STMT.
ff802fa1
IR
109 This is used to determine the vectype of the stmt. We generally set the
110 vectype according to the type of the result (lhs). For stmts whose
ebfd146a 111 result-type is different than the type of the arguments (e.g., demotion,
b8698a0f 112 promotion), vectype will be reset appropriately (later). Note that we have
ebfd146a 113 to visit the smallest datatype in this function, because that determines the
ff802fa1 114 VF. If the smallest datatype in the loop is present only as the rhs of a
ebfd146a
IR
115 promotion operation - we'd miss it.
116 Such a case, where a variable of this datatype does not appear in the lhs
117 anywhere in the loop, can only occur if it's an invariant: e.g.:
b8698a0f 118 'int_x = (int) short_inv', which we'd expect to have been optimized away by
ff802fa1
IR
119 invariant motion. However, we cannot rely on invariant motion to always
120 take invariants out of the loop, and so in the case of promotion we also
121 have to check the rhs.
ebfd146a
IR
122 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
123 types. */
124
125tree
126vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
127 HOST_WIDE_INT *rhs_size_unit)
128{
129 tree scalar_type = gimple_expr_type (stmt);
130 HOST_WIDE_INT lhs, rhs;
131
132 lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
133
134 if (is_gimple_assign (stmt)
135 && (gimple_assign_cast_p (stmt)
136 || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
39f3fed6 137 || gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR
ebfd146a
IR
138 || gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
139 {
140 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
141
142 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
143 if (rhs < lhs)
144 scalar_type = rhs_type;
145 }
b8698a0f
L
146
147 *lhs_size_unit = lhs;
ebfd146a
IR
148 *rhs_size_unit = rhs;
149 return scalar_type;
150}
151
152
ebfd146a
IR
153/* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
154 tested at run-time. Return TRUE if DDR was successfully inserted.
155 Return false if versioning is not supported. */
156
157static bool
158vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
159{
160 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
161
162 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
163 return false;
164
73fbfcad 165 if (dump_enabled_p ())
ebfd146a 166 {
78c60e3d
SS
167 dump_printf_loc (MSG_NOTE, vect_location,
168 "mark for run-time aliasing test between ");
169 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
170 dump_printf (MSG_NOTE, " and ");
171 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
e645e942 172 dump_printf (MSG_NOTE, "\n");
ebfd146a
IR
173 }
174
175 if (optimize_loop_nest_for_size_p (loop))
176 {
73fbfcad 177 if (dump_enabled_p ())
e645e942
TJ
178 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
179 "versioning not supported when optimizing"
180 " for size.\n");
ebfd146a
IR
181 return false;
182 }
183
184 /* FORNOW: We don't support versioning with outer-loop vectorization. */
185 if (loop->inner)
186 {
73fbfcad 187 if (dump_enabled_p ())
e645e942
TJ
188 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
189 "versioning not yet supported for outer-loops.\n");
ebfd146a
IR
190 return false;
191 }
192
319e6439
RG
193 /* FORNOW: We don't support creating runtime alias tests for non-constant
194 step. */
195 if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST
196 || TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST)
197 {
73fbfcad 198 if (dump_enabled_p ())
e645e942 199 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
78c60e3d 200 "versioning not yet supported for non-constant "
e645e942 201 "step\n");
319e6439
RG
202 return false;
203 }
204
9771b263 205 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
ebfd146a
IR
206 return true;
207}
208
a70d6342 209
ebfd146a
IR
210/* Function vect_analyze_data_ref_dependence.
211
212 Return TRUE if there (might) exist a dependence between a memory-reference
213 DRA and a memory-reference DRB. When versioning for alias may check a
777e1f09
RG
214 dependence at run-time, return FALSE. Adjust *MAX_VF according to
215 the data dependence. */
b8698a0f 216
ebfd146a
IR
217static bool
218vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
5bfdb7d8 219 loop_vec_info loop_vinfo, int *max_vf)
ebfd146a
IR
220{
221 unsigned int i;
5abe1e05 222 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a
IR
223 struct data_reference *dra = DDR_A (ddr);
224 struct data_reference *drb = DDR_B (ddr);
b8698a0f 225 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
ebfd146a 226 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
ebfd146a
IR
227 lambda_vector dist_v;
228 unsigned int loop_depth;
b8698a0f 229
5abe1e05 230 /* In loop analysis all data references should be vectorizable. */
4b5caab7
IR
231 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
232 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
5abe1e05 233 gcc_unreachable ();
4b5caab7 234
5abe1e05 235 /* Independent data accesses. */
ebfd146a 236 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
5abe1e05 237 return false;
a70d6342 238
5abe1e05
RB
239 if (dra == drb
240 || (DR_IS_READ (dra) && DR_IS_READ (drb)))
ebfd146a 241 return false;
5961d779
RB
242
243 /* Even if we have an anti-dependence then, as the vectorized loop covers at
244 least two scalar iterations, there is always also a true dependence.
245 As the vectorizer does not re-order loads and stores we can ignore
246 the anti-dependence if TBAA can disambiguate both DRs similar to the
247 case with known negative distance anti-dependences (positive
248 distance anti-dependences would violate TBAA constraints). */
249 if (((DR_IS_READ (dra) && DR_IS_WRITE (drb))
250 || (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
251 && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
252 get_alias_set (DR_REF (drb))))
253 return false;
b8698a0f 254
5abe1e05 255 /* Unknown data dependence. */
ebfd146a
IR
256 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
257 {
74bf76ed
JJ
258 /* If user asserted safelen consecutive iterations can be
259 executed concurrently, assume independence. */
260 if (loop->safelen >= 2)
261 {
262 if (loop->safelen < *max_vf)
263 *max_vf = loop->safelen;
d1417442 264 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
74bf76ed
JJ
265 return false;
266 }
267
90eb75f2
RB
268 if (STMT_VINFO_GATHER_P (stmtinfo_a)
269 || STMT_VINFO_GATHER_P (stmtinfo_b))
270 {
271 if (dump_enabled_p ())
272 {
273 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
274 "versioning for alias not supported for: "
275 "can't determine dependence between ");
276 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
277 DR_REF (dra));
278 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
279 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
280 DR_REF (drb));
e645e942 281 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
90eb75f2 282 }
fdf6a7b9 283 return true;
90eb75f2
RB
284 }
285
73fbfcad 286 if (dump_enabled_p ())
5abe1e05
RB
287 {
288 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
289 "versioning for alias required: "
290 "can't determine dependence between ");
291 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
292 DR_REF (dra));
293 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
294 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
295 DR_REF (drb));
e645e942 296 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5abe1e05 297 }
e4a707c4 298
5abe1e05
RB
299 /* Add to list of ddrs that need to be tested at run-time. */
300 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
a70d6342
IR
301 }
302
5abe1e05 303 /* Known data dependence. */
ebfd146a
IR
304 if (DDR_NUM_DIST_VECTS (ddr) == 0)
305 {
74bf76ed
JJ
306 /* If user asserted safelen consecutive iterations can be
307 executed concurrently, assume independence. */
308 if (loop->safelen >= 2)
309 {
310 if (loop->safelen < *max_vf)
311 *max_vf = loop->safelen;
d1417442 312 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
74bf76ed
JJ
313 return false;
314 }
315
90eb75f2
RB
316 if (STMT_VINFO_GATHER_P (stmtinfo_a)
317 || STMT_VINFO_GATHER_P (stmtinfo_b))
318 {
319 if (dump_enabled_p ())
320 {
321 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
322 "versioning for alias not supported for: "
323 "bad dist vector for ");
324 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
325 DR_REF (dra));
326 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
327 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
328 DR_REF (drb));
e645e942 329 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
90eb75f2 330 }
fdf6a7b9 331 return true;
90eb75f2
RB
332 }
333
73fbfcad 334 if (dump_enabled_p ())
ebfd146a 335 {
e645e942 336 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
78c60e3d
SS
337 "versioning for alias required: "
338 "bad dist vector for ");
339 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
340 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
341 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
e645e942 342 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
ebfd146a
IR
343 }
344 /* Add to list of ddrs that need to be tested at run-time. */
345 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
b8698a0f 346 }
ebfd146a
IR
347
348 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
9771b263 349 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
ebfd146a
IR
350 {
351 int dist = dist_v[loop_depth];
352
73fbfcad 353 if (dump_enabled_p ())
78c60e3d 354 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 355 "dependence distance = %d.\n", dist);
ebfd146a 356
777e1f09 357 if (dist == 0)
ebfd146a 358 {
73fbfcad 359 if (dump_enabled_p ())
ebfd146a 360 {
e645e942
TJ
361 dump_printf_loc (MSG_NOTE, vect_location,
362 "dependence distance == 0 between ");
78c60e3d
SS
363 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
364 dump_printf (MSG_NOTE, " and ");
365 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
e645e942 366 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
ebfd146a
IR
367 }
368
5185d248
RB
369 /* When we perform grouped accesses and perform implicit CSE
370 by detecting equal accesses and doing disambiguation with
371 runtime alias tests like for
372 .. = a[i];
373 .. = a[i+1];
374 a[i] = ..;
375 a[i+1] = ..;
376 *p = ..;
377 .. = a[i];
378 .. = a[i+1];
379 where we will end up loading { a[i], a[i+1] } once, make
380 sure that inserting group loads before the first load and
e33f43b9
RB
381 stores after the last store will do the right thing.
382 Similar for groups like
383 a[i] = ...;
384 ... = a[i];
385 a[i+1] = ...;
386 where loads from the group interleave with the store. */
387 if (STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
388 || STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
5185d248
RB
389 {
390 gimple earlier_stmt;
391 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
392 if (DR_IS_WRITE
393 (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
394 {
395 if (dump_enabled_p ())
396 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942
TJ
397 "READ_WRITE dependence in interleaving."
398 "\n");
5185d248
RB
399 return true;
400 }
ebfd146a 401 }
b8698a0f 402
777e1f09
RG
403 continue;
404 }
405
406 if (dist > 0 && DDR_REVERSED_P (ddr))
407 {
408 /* If DDR_REVERSED_P the order of the data-refs in DDR was
409 reversed (to make distance vector positive), and the actual
410 distance is negative. */
73fbfcad 411 if (dump_enabled_p ())
78c60e3d 412 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 413 "dependence distance negative.\n");
f2556b68
RB
414 /* Record a negative dependence distance to later limit the
415 amount of stmt copying / unrolling we can perform.
416 Only need to handle read-after-write dependence. */
417 if (DR_IS_READ (drb)
418 && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0
419 || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist))
420 STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist;
777e1f09
RG
421 continue;
422 }
423
424 if (abs (dist) >= 2
425 && abs (dist) < *max_vf)
426 {
427 /* The dependence distance requires reduction of the maximal
428 vectorization factor. */
429 *max_vf = abs (dist);
73fbfcad 430 if (dump_enabled_p ())
78c60e3d 431 dump_printf_loc (MSG_NOTE, vect_location,
e645e942
TJ
432 "adjusting maximal vectorization factor to %i\n",
433 *max_vf);
ebfd146a
IR
434 }
435
777e1f09 436 if (abs (dist) >= *max_vf)
ebfd146a 437 {
b8698a0f 438 /* Dependence distance does not create dependence, as far as
777e1f09 439 vectorization is concerned, in this case. */
73fbfcad 440 if (dump_enabled_p ())
78c60e3d 441 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 442 "dependence distance >= VF.\n");
ebfd146a
IR
443 continue;
444 }
445
73fbfcad 446 if (dump_enabled_p ())
ebfd146a 447 {
78c60e3d 448 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942
TJ
449 "not vectorized, possible dependence "
450 "between data-refs ");
78c60e3d
SS
451 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
452 dump_printf (MSG_NOTE, " and ");
453 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
e645e942 454 dump_printf (MSG_NOTE, "\n");
ebfd146a
IR
455 }
456
457 return true;
458 }
459
460 return false;
461}
462
463/* Function vect_analyze_data_ref_dependences.
b8698a0f 464
ebfd146a 465 Examine all the data references in the loop, and make sure there do not
777e1f09
RG
466 exist any data dependences between them. Set *MAX_VF according to
467 the maximum vectorization factor the data dependences allow. */
b8698a0f 468
ebfd146a 469bool
5abe1e05 470vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf)
ebfd146a
IR
471{
472 unsigned int i;
ebfd146a
IR
473 struct data_dependence_relation *ddr;
474
73fbfcad 475 if (dump_enabled_p ())
78c60e3d 476 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 477 "=== vect_analyze_data_ref_dependences ===\n");
5abe1e05 478
d1417442 479 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
5abe1e05
RB
480 if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
481 &LOOP_VINFO_DDRS (loop_vinfo),
482 LOOP_VINFO_LOOP_NEST (loop_vinfo), true))
483 return false;
484
485 FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
486 if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
487 return false;
488
489 return true;
490}
491
492
493/* Function vect_slp_analyze_data_ref_dependence.
494
495 Return TRUE if there (might) exist a dependence between a memory-reference
496 DRA and a memory-reference DRB. When versioning for alias may check a
497 dependence at run-time, return FALSE. Adjust *MAX_VF according to
498 the data dependence. */
499
500static bool
501vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
502{
503 struct data_reference *dra = DDR_A (ddr);
504 struct data_reference *drb = DDR_B (ddr);
505
506 /* We need to check dependences of statements marked as unvectorizable
507 as well, they still can prohibit vectorization. */
508
509 /* Independent data accesses. */
510 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
511 return false;
512
513 if (dra == drb)
514 return false;
515
516 /* Read-read is OK. */
517 if (DR_IS_READ (dra) && DR_IS_READ (drb))
518 return false;
519
e6c9d234
RB
520 /* If dra and drb are part of the same interleaving chain consider
521 them independent. */
522 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
523 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
524 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
525 return false;
526
5abe1e05
RB
527 /* Unknown data dependence. */
528 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
fcac74a1 529 {
649d196d
RB
530 if (dump_enabled_p ())
531 {
532 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
533 "can't determine dependence between ");
534 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
535 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
536 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
e645e942 537 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
649d196d 538 }
fcac74a1 539 }
649d196d 540 else if (dump_enabled_p ())
fcac74a1 541 {
5abe1e05
RB
542 dump_printf_loc (MSG_NOTE, vect_location,
543 "determined dependence between ");
544 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
545 dump_printf (MSG_NOTE, " and ");
546 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
e645e942 547 dump_printf (MSG_NOTE, "\n");
fcac74a1 548 }
b8698a0f 549
649d196d 550 /* We do not vectorize basic blocks with write-write dependencies. */
5abe1e05
RB
551 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
552 return true;
553
649d196d 554 /* If we have a read-write dependence check that the load is before the store.
5abe1e05
RB
555 When we vectorize basic blocks, vector load can be only before
556 corresponding scalar load, and vector store can be only after its
557 corresponding scalar store. So the order of the acceses is preserved in
558 case the load is before the store. */
649d196d 559 gimple earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
5abe1e05 560 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
649d196d
RB
561 {
562 /* That only holds for load-store pairs taking part in vectorization. */
563 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dra)))
564 && STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (drb))))
565 return false;
566 }
5abe1e05
RB
567
568 return true;
569}
570
571
572/* Function vect_analyze_data_ref_dependences.
573
574 Examine all the data references in the basic-block, and make sure there
575 do not exist any data dependences between them. Set *MAX_VF according to
576 the maximum vectorization factor the data dependences allow. */
577
578bool
579vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo)
580{
581 struct data_dependence_relation *ddr;
582 unsigned int i;
583
584 if (dump_enabled_p ())
585 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 586 "=== vect_slp_analyze_data_ref_dependences ===\n");
5abe1e05
RB
587
588 if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo),
589 &BB_VINFO_DDRS (bb_vinfo),
590 vNULL, true))
591 return false;
592
593 FOR_EACH_VEC_ELT (BB_VINFO_DDRS (bb_vinfo), i, ddr)
594 if (vect_slp_analyze_data_ref_dependence (ddr))
ebfd146a
IR
595 return false;
596
597 return true;
598}
599
600
601/* Function vect_compute_data_ref_alignment
602
603 Compute the misalignment of the data reference DR.
604
605 Output:
606 1. If during the misalignment computation it is found that the data reference
607 cannot be vectorized then false is returned.
608 2. DR_MISALIGNMENT (DR) is defined.
609
610 FOR NOW: No analysis is actually performed. Misalignment is calculated
611 only for trivial cases. TODO. */
612
613static bool
614vect_compute_data_ref_alignment (struct data_reference *dr)
615{
616 gimple stmt = DR_STMT (dr);
b8698a0f 617 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
ebfd146a 618 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 619 struct loop *loop = NULL;
ebfd146a
IR
620 tree ref = DR_REF (dr);
621 tree vectype;
622 tree base, base_addr;
623 bool base_aligned;
7b5fc413 624 tree misalign = NULL_TREE;
b162e1e7
RB
625 tree aligned_to;
626 unsigned HOST_WIDE_INT alignment;
b8698a0f 627
73fbfcad 628 if (dump_enabled_p ())
78c60e3d 629 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 630 "vect_compute_data_ref_alignment:\n");
ebfd146a 631
a70d6342
IR
632 if (loop_vinfo)
633 loop = LOOP_VINFO_LOOP (loop_vinfo);
b8698a0f 634
ebfd146a
IR
635 /* Initialize misalignment to unknown. */
636 SET_DR_MISALIGNMENT (dr, -1);
637
f2e2a985 638 /* Strided accesses perform only component accesses, misalignment information
7595989b 639 is irrelevant for them. */
f2e2a985 640 if (STMT_VINFO_STRIDED_P (stmt_info)
7b5fc413 641 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
7595989b
RG
642 return true;
643
7b5fc413
RB
644 if (tree_fits_shwi_p (DR_STEP (dr)))
645 misalign = DR_INIT (dr);
ebfd146a
IR
646 aligned_to = DR_ALIGNED_TO (dr);
647 base_addr = DR_BASE_ADDRESS (dr);
648 vectype = STMT_VINFO_VECTYPE (stmt_info);
649
650 /* In case the dataref is in an inner-loop of the loop that is being
651 vectorized (LOOP), we use the base and misalignment information
ff802fa1 652 relative to the outer-loop (LOOP). This is ok only if the misalignment
ebfd146a
IR
653 stays the same throughout the execution of the inner-loop, which is why
654 we have to check that the stride of the dataref in the inner-loop evenly
655 divides by the vector size. */
a70d6342 656 if (loop && nested_in_vect_loop_p (loop, stmt))
ebfd146a
IR
657 {
658 tree step = DR_STEP (dr);
b8698a0f 659
7b5fc413
RB
660 if (tree_fits_shwi_p (step)
661 && tree_to_shwi (step) % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
ebfd146a 662 {
73fbfcad 663 if (dump_enabled_p ())
78c60e3d 664 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 665 "inner step divides the vector-size.\n");
ebfd146a
IR
666 misalign = STMT_VINFO_DR_INIT (stmt_info);
667 aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
668 base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
669 }
670 else
671 {
73fbfcad 672 if (dump_enabled_p ())
78c60e3d 673 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 674 "inner step doesn't divide the vector-size.\n");
ebfd146a
IR
675 misalign = NULL_TREE;
676 }
677 }
678
91ff1504
RB
679 /* Similarly we can only use base and misalignment information relative to
680 an innermost loop if the misalignment stays the same throughout the
681 execution of the loop. As above, this is the case if the stride of
682 the dataref evenly divides by the vector size. */
683 else
3ebde0e9
UW
684 {
685 tree step = DR_STEP (dr);
91ff1504 686 unsigned vf = loop ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) : 1;
3ebde0e9 687
7b5fc413 688 if (tree_fits_shwi_p (step)
91ff1504
RB
689 && ((tree_to_shwi (step) * vf)
690 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3ebde0e9 691 {
73fbfcad 692 if (dump_enabled_p ())
e645e942 693 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
91ff1504 694 "step doesn't divide the vector-size.\n");
3ebde0e9
UW
695 misalign = NULL_TREE;
696 }
697 }
698
b162e1e7 699 alignment = TYPE_ALIGN_UNIT (vectype);
ebfd146a 700
b162e1e7 701 if ((compare_tree_int (aligned_to, alignment) < 0)
ebfd146a
IR
702 || !misalign)
703 {
73fbfcad 704 if (dump_enabled_p ())
ebfd146a 705 {
78c60e3d 706 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 707 "Unknown alignment for access: ");
b162e1e7 708 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
e645e942 709 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
ebfd146a
IR
710 }
711 return true;
712 }
713
b162e1e7
RB
714 /* To look at alignment of the base we have to preserve an inner MEM_REF
715 as that carries alignment information of the actual access. */
716 base = ref;
717 while (handled_component_p (base))
718 base = TREE_OPERAND (base, 0);
719 if (TREE_CODE (base) == MEM_REF)
720 base = build2 (MEM_REF, TREE_TYPE (base), base_addr,
721 build_int_cst (TREE_TYPE (TREE_OPERAND (base, 1)), 0));
722
723 if (get_object_alignment (base) >= TYPE_ALIGN (vectype))
ebfd146a
IR
724 base_aligned = true;
725 else
b8698a0f 726 base_aligned = false;
ebfd146a 727
b8698a0f 728 if (!base_aligned)
ebfd146a 729 {
b162e1e7
RB
730 /* Strip an inner MEM_REF to a bare decl if possible. */
731 if (TREE_CODE (base) == MEM_REF
732 && integer_zerop (TREE_OPERAND (base, 1))
733 && TREE_CODE (TREE_OPERAND (base, 0)) == ADDR_EXPR)
734 base = TREE_OPERAND (TREE_OPERAND (base, 0), 0);
735
caf2df93 736 if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype)))
ebfd146a 737 {
73fbfcad 738 if (dump_enabled_p ())
ebfd146a 739 {
78c60e3d 740 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 741 "can't force alignment of ref: ");
78c60e3d 742 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
e645e942 743 dump_printf (MSG_NOTE, "\n");
ebfd146a
IR
744 }
745 return true;
746 }
b8698a0f 747
ebfd146a
IR
748 /* Force the alignment of the decl.
749 NOTE: This is the only change to the code we make during
750 the analysis phase, before deciding to vectorize the loop. */
73fbfcad 751 if (dump_enabled_p ())
720f5239 752 {
78c60e3d
SS
753 dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
754 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
e645e942 755 dump_printf (MSG_NOTE, "\n");
720f5239
IR
756 }
757
c716e67f
XDL
758 ((dataref_aux *)dr->aux)->base_decl = base;
759 ((dataref_aux *)dr->aux)->base_misaligned = true;
ebfd146a
IR
760 }
761
46241ea9
RG
762 /* If this is a backward running DR then first access in the larger
763 vectype actually is N-1 elements before the address in the DR.
764 Adjust misalign accordingly. */
b162e1e7 765 if (tree_int_cst_sgn (DR_STEP (dr)) < 0)
46241ea9
RG
766 {
767 tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
768 /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
769 otherwise we wouldn't be here. */
770 offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr));
771 /* PLUS because DR_STEP was negative. */
772 misalign = size_binop (PLUS_EXPR, misalign, offset);
773 }
774
b162e1e7
RB
775 SET_DR_MISALIGNMENT (dr,
776 wi::mod_floor (misalign, alignment, SIGNED).to_uhwi ());
ebfd146a 777
73fbfcad 778 if (dump_enabled_p ())
ebfd146a 779 {
78c60e3d
SS
780 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
781 "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
782 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
e645e942 783 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
ebfd146a
IR
784 }
785
786 return true;
787}
788
789
790/* Function vect_compute_data_refs_alignment
791
792 Compute the misalignment of data references in the loop.
793 Return FALSE if a data reference is found that cannot be vectorized. */
794
795static bool
b8698a0f 796vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
a70d6342 797 bb_vec_info bb_vinfo)
ebfd146a 798{
9771b263 799 vec<data_reference_p> datarefs;
ebfd146a
IR
800 struct data_reference *dr;
801 unsigned int i;
802
a70d6342
IR
803 if (loop_vinfo)
804 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
805 else
806 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
b8698a0f 807
9771b263 808 FOR_EACH_VEC_ELT (datarefs, i, dr)
4b5caab7
IR
809 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
810 && !vect_compute_data_ref_alignment (dr))
811 {
812 if (bb_vinfo)
813 {
814 /* Mark unsupported statement as unvectorizable. */
815 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
816 continue;
817 }
818 else
819 return false;
820 }
ebfd146a
IR
821
822 return true;
823}
824
825
826/* Function vect_update_misalignment_for_peel
827
828 DR - the data reference whose misalignment is to be adjusted.
829 DR_PEEL - the data reference whose misalignment is being made
830 zero in the vector loop by the peel.
831 NPEEL - the number of iterations in the peel loop if the misalignment
832 of DR_PEEL is known at compile time. */
833
834static void
835vect_update_misalignment_for_peel (struct data_reference *dr,
836 struct data_reference *dr_peel, int npeel)
837{
838 unsigned int i;
9771b263 839 vec<dr_p> same_align_drs;
ebfd146a
IR
840 struct data_reference *current_dr;
841 int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
842 int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
843 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
844 stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
845
846 /* For interleaved data accesses the step in the loop must be multiplied by
847 the size of the interleaving group. */
0d0293ac 848 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
e14c1050 849 dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
0d0293ac 850 if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
e14c1050 851 dr_peel_size *= GROUP_SIZE (peel_stmt_info);
ebfd146a
IR
852
853 /* It can be assumed that the data refs with the same alignment as dr_peel
854 are aligned in the vector loop. */
855 same_align_drs
856 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
9771b263 857 FOR_EACH_VEC_ELT (same_align_drs, i, current_dr)
ebfd146a
IR
858 {
859 if (current_dr != dr)
860 continue;
861 gcc_assert (DR_MISALIGNMENT (dr) / dr_size ==
862 DR_MISALIGNMENT (dr_peel) / dr_peel_size);
863 SET_DR_MISALIGNMENT (dr, 0);
864 return;
865 }
866
867 if (known_alignment_for_access_p (dr)
868 && known_alignment_for_access_p (dr_peel))
869 {
d8ba5b19 870 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
ebfd146a
IR
871 int misal = DR_MISALIGNMENT (dr);
872 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
d8ba5b19 873 misal += negative ? -npeel * dr_size : npeel * dr_size;
5aea1e76 874 misal &= (TYPE_ALIGN (vectype) / BITS_PER_UNIT) - 1;
ebfd146a
IR
875 SET_DR_MISALIGNMENT (dr, misal);
876 return;
877 }
878
73fbfcad 879 if (dump_enabled_p ())
e645e942 880 dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.\n");
ebfd146a
IR
881 SET_DR_MISALIGNMENT (dr, -1);
882}
883
884
885/* Function vect_verify_datarefs_alignment
886
887 Return TRUE if all data references in the loop can be
888 handled with respect to alignment. */
889
a70d6342
IR
890bool
891vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
ebfd146a 892{
9771b263 893 vec<data_reference_p> datarefs;
ebfd146a
IR
894 struct data_reference *dr;
895 enum dr_alignment_support supportable_dr_alignment;
896 unsigned int i;
897
a70d6342
IR
898 if (loop_vinfo)
899 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
900 else
901 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
902
9771b263 903 FOR_EACH_VEC_ELT (datarefs, i, dr)
ebfd146a
IR
904 {
905 gimple stmt = DR_STMT (dr);
906 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
907
38eec4c6
UW
908 if (!STMT_VINFO_RELEVANT_P (stmt_info))
909 continue;
910
4b5caab7
IR
911 /* For interleaving, only the alignment of the first access matters.
912 Skip statements marked as not vectorizable. */
0d0293ac 913 if ((STMT_VINFO_GROUPED_ACCESS (stmt_info)
e14c1050 914 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
4b5caab7 915 || !STMT_VINFO_VECTORIZABLE (stmt_info))
ebfd146a
IR
916 continue;
917
f2e2a985 918 /* Strided accesses perform only component accesses, alignment is
a82960aa 919 irrelevant for them. */
f2e2a985 920 if (STMT_VINFO_STRIDED_P (stmt_info)
7b5fc413 921 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
a82960aa
RG
922 continue;
923
720f5239 924 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
ebfd146a
IR
925 if (!supportable_dr_alignment)
926 {
73fbfcad 927 if (dump_enabled_p ())
ebfd146a
IR
928 {
929 if (DR_IS_READ (dr))
78c60e3d
SS
930 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
931 "not vectorized: unsupported unaligned load.");
ebfd146a 932 else
78c60e3d
SS
933 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
934 "not vectorized: unsupported unaligned "
935 "store.");
4b5caab7 936
78c60e3d
SS
937 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
938 DR_REF (dr));
e645e942 939 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
ebfd146a
IR
940 }
941 return false;
942 }
73fbfcad 943 if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
78c60e3d 944 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 945 "Vectorizing an unaligned access.\n");
ebfd146a
IR
946 }
947 return true;
948}
949
4c9bcf89
RG
950/* Given an memory reference EXP return whether its alignment is less
951 than its size. */
952
953static bool
954not_size_aligned (tree exp)
955{
cc269bb6 956 if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))))
4c9bcf89
RG
957 return true;
958
eb1ce453 959 return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))
4c9bcf89
RG
960 > get_object_alignment (exp));
961}
ebfd146a
IR
962
963/* Function vector_alignment_reachable_p
964
965 Return true if vector alignment for DR is reachable by peeling
966 a few loop iterations. Return false otherwise. */
967
968static bool
969vector_alignment_reachable_p (struct data_reference *dr)
970{
971 gimple stmt = DR_STMT (dr);
972 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
973 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
974
0d0293ac 975 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
ebfd146a
IR
976 {
977 /* For interleaved access we peel only if number of iterations in
978 the prolog loop ({VF - misalignment}), is a multiple of the
979 number of the interleaved accesses. */
980 int elem_size, mis_in_elements;
981 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
982
983 /* FORNOW: handle only known alignment. */
984 if (!known_alignment_for_access_p (dr))
985 return false;
986
987 elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
988 mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
989
e14c1050 990 if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
ebfd146a
IR
991 return false;
992 }
993
994 /* If misalignment is known at the compile time then allow peeling
995 only if natural alignment is reachable through peeling. */
996 if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
997 {
b8698a0f 998 HOST_WIDE_INT elmsize =
ebfd146a 999 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
73fbfcad 1000 if (dump_enabled_p ())
ebfd146a 1001 {
e645e942
TJ
1002 dump_printf_loc (MSG_NOTE, vect_location,
1003 "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
1004 dump_printf (MSG_NOTE,
1005 ". misalignment = %d.\n", DR_MISALIGNMENT (dr));
ebfd146a
IR
1006 }
1007 if (DR_MISALIGNMENT (dr) % elmsize)
1008 {
73fbfcad 1009 if (dump_enabled_p ())
e645e942
TJ
1010 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1011 "data size does not divide the misalignment.\n");
ebfd146a
IR
1012 return false;
1013 }
1014 }
1015
1016 if (!known_alignment_for_access_p (dr))
1017 {
4c9bcf89
RG
1018 tree type = TREE_TYPE (DR_REF (dr));
1019 bool is_packed = not_size_aligned (DR_REF (dr));
73fbfcad 1020 if (dump_enabled_p ())
e645e942
TJ
1021 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1022 "Unknown misalignment, is_packed = %d\n",is_packed);
afb119be
RB
1023 if ((TYPE_USER_ALIGN (type) && !is_packed)
1024 || targetm.vectorize.vector_alignment_reachable (type, is_packed))
ebfd146a
IR
1025 return true;
1026 else
1027 return false;
1028 }
1029
1030 return true;
1031}
1032
720f5239
IR
1033
1034/* Calculate the cost of the memory access represented by DR. */
1035
92345349 1036static void
720f5239
IR
1037vect_get_data_access_cost (struct data_reference *dr,
1038 unsigned int *inside_cost,
92345349
BS
1039 unsigned int *outside_cost,
1040 stmt_vector_for_cost *body_cost_vec)
720f5239
IR
1041{
1042 gimple stmt = DR_STMT (dr);
1043 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1044 int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1045 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1046 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1047 int ncopies = vf / nunits;
720f5239 1048
38eec4c6 1049 if (DR_IS_READ (dr))
92345349
BS
1050 vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost,
1051 NULL, body_cost_vec, false);
720f5239 1052 else
92345349 1053 vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
720f5239 1054
73fbfcad 1055 if (dump_enabled_p ())
78c60e3d
SS
1056 dump_printf_loc (MSG_NOTE, vect_location,
1057 "vect_get_data_access_cost: inside_cost = %d, "
e645e942 1058 "outside_cost = %d.\n", *inside_cost, *outside_cost);
720f5239
IR
1059}
1060
1061
720f5239
IR
1062/* Insert DR into peeling hash table with NPEEL as key. */
1063
1064static void
1065vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr,
1066 int npeel)
1067{
1068 struct _vect_peel_info elem, *slot;
bf190e8d 1069 _vect_peel_info **new_slot;
720f5239
IR
1070 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1071
1072 elem.npeel = npeel;
c203e8a7 1073 slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find (&elem);
720f5239
IR
1074 if (slot)
1075 slot->count++;
1076 else
1077 {
1078 slot = XNEW (struct _vect_peel_info);
1079 slot->npeel = npeel;
1080 slot->dr = dr;
1081 slot->count = 1;
c203e8a7
TS
1082 new_slot
1083 = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find_slot (slot, INSERT);
720f5239
IR
1084 *new_slot = slot;
1085 }
1086
8b5e1202
SO
1087 if (!supportable_dr_alignment
1088 && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
720f5239
IR
1089 slot->count += VECT_MAX_COST;
1090}
1091
1092
1093/* Traverse peeling hash table to find peeling option that aligns maximum
1094 number of data accesses. */
1095
bf190e8d
LC
1096int
1097vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
1098 _vect_peel_extended_info *max)
720f5239 1099{
bf190e8d 1100 vect_peel_info elem = *slot;
720f5239 1101
44542f8e
IR
1102 if (elem->count > max->peel_info.count
1103 || (elem->count == max->peel_info.count
1104 && max->peel_info.npeel > elem->npeel))
720f5239
IR
1105 {
1106 max->peel_info.npeel = elem->npeel;
1107 max->peel_info.count = elem->count;
1108 max->peel_info.dr = elem->dr;
1109 }
1110
1111 return 1;
1112}
1113
1114
ff802fa1
IR
1115/* Traverse peeling hash table and calculate cost for each peeling option.
1116 Find the one with the lowest cost. */
720f5239 1117
bf190e8d
LC
1118int
1119vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
1120 _vect_peel_extended_info *min)
720f5239 1121{
bf190e8d 1122 vect_peel_info elem = *slot;
720f5239
IR
1123 int save_misalignment, dummy;
1124 unsigned int inside_cost = 0, outside_cost = 0, i;
1125 gimple stmt = DR_STMT (elem->dr);
1126 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1127 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
9771b263 1128 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
720f5239 1129 struct data_reference *dr;
92345349 1130 stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec;
92345349 1131
9771b263
DN
1132 prologue_cost_vec.create (2);
1133 body_cost_vec.create (2);
1134 epilogue_cost_vec.create (2);
720f5239 1135
9771b263 1136 FOR_EACH_VEC_ELT (datarefs, i, dr)
720f5239
IR
1137 {
1138 stmt = DR_STMT (dr);
1139 stmt_info = vinfo_for_stmt (stmt);
1140 /* For interleaving, only the alignment of the first access
1141 matters. */
0d0293ac 1142 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
e14c1050 1143 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
720f5239
IR
1144 continue;
1145
1146 save_misalignment = DR_MISALIGNMENT (dr);
1147 vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel);
92345349
BS
1148 vect_get_data_access_cost (dr, &inside_cost, &outside_cost,
1149 &body_cost_vec);
720f5239
IR
1150 SET_DR_MISALIGNMENT (dr, save_misalignment);
1151 }
1152
696814ed
RB
1153 outside_cost += vect_get_known_peeling_cost
1154 (loop_vinfo, elem->npeel, &dummy,
6d098c57
RB
1155 &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1156 &prologue_cost_vec, &epilogue_cost_vec);
92345349
BS
1157
1158 /* Prologue and epilogue costs are added to the target model later.
1159 These costs depend only on the scalar iteration cost, the
1160 number of peeling iterations finally chosen, and the number of
1161 misaligned statements. So discard the information found here. */
9771b263
DN
1162 prologue_cost_vec.release ();
1163 epilogue_cost_vec.release ();
720f5239
IR
1164
1165 if (inside_cost < min->inside_cost
1166 || (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
1167 {
1168 min->inside_cost = inside_cost;
1169 min->outside_cost = outside_cost;
9771b263 1170 min->body_cost_vec.release ();
92345349 1171 min->body_cost_vec = body_cost_vec;
720f5239
IR
1172 min->peel_info.dr = elem->dr;
1173 min->peel_info.npeel = elem->npeel;
1174 }
92345349 1175 else
9771b263 1176 body_cost_vec.release ();
720f5239
IR
1177
1178 return 1;
1179}
1180
1181
1182/* Choose best peeling option by traversing peeling hash table and either
1183 choosing an option with the lowest cost (if cost model is enabled) or the
1184 option that aligns as many accesses as possible. */
1185
1186static struct data_reference *
1187vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo,
c3e7ee41 1188 unsigned int *npeel,
92345349 1189 stmt_vector_for_cost *body_cost_vec)
720f5239
IR
1190{
1191 struct _vect_peel_extended_info res;
1192
1193 res.peel_info.dr = NULL;
c3284718 1194 res.body_cost_vec = stmt_vector_for_cost ();
720f5239 1195
8b5e1202 1196 if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
720f5239
IR
1197 {
1198 res.inside_cost = INT_MAX;
1199 res.outside_cost = INT_MAX;
bf190e8d 1200 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
c203e8a7
TS
1201 ->traverse <_vect_peel_extended_info *,
1202 vect_peeling_hash_get_lowest_cost> (&res);
720f5239
IR
1203 }
1204 else
1205 {
1206 res.peel_info.count = 0;
bf190e8d 1207 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
c203e8a7
TS
1208 ->traverse <_vect_peel_extended_info *,
1209 vect_peeling_hash_get_most_frequent> (&res);
720f5239
IR
1210 }
1211
1212 *npeel = res.peel_info.npeel;
92345349 1213 *body_cost_vec = res.body_cost_vec;
720f5239
IR
1214 return res.peel_info.dr;
1215}
1216
1217
ebfd146a
IR
1218/* Function vect_enhance_data_refs_alignment
1219
1220 This pass will use loop versioning and loop peeling in order to enhance
1221 the alignment of data references in the loop.
1222
1223 FOR NOW: we assume that whatever versioning/peeling takes place, only the
ff802fa1 1224 original loop is to be vectorized. Any other loops that are created by
ebfd146a 1225 the transformations performed in this pass - are not supposed to be
ff802fa1 1226 vectorized. This restriction will be relaxed.
ebfd146a
IR
1227
1228 This pass will require a cost model to guide it whether to apply peeling
ff802fa1 1229 or versioning or a combination of the two. For example, the scheme that
ebfd146a
IR
1230 intel uses when given a loop with several memory accesses, is as follows:
1231 choose one memory access ('p') which alignment you want to force by doing
ff802fa1 1232 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
ebfd146a
IR
1233 other accesses are not necessarily aligned, or (2) use loop versioning to
1234 generate one loop in which all accesses are aligned, and another loop in
1235 which only 'p' is necessarily aligned.
1236
1237 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1238 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1239 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1240
ff802fa1 1241 Devising a cost model is the most critical aspect of this work. It will
ebfd146a 1242 guide us on which access to peel for, whether to use loop versioning, how
ff802fa1 1243 many versions to create, etc. The cost model will probably consist of
ebfd146a
IR
1244 generic considerations as well as target specific considerations (on
1245 powerpc for example, misaligned stores are more painful than misaligned
1246 loads).
1247
1248 Here are the general steps involved in alignment enhancements:
1249
1250 -- original loop, before alignment analysis:
1251 for (i=0; i<N; i++){
1252 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1253 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1254 }
1255
1256 -- After vect_compute_data_refs_alignment:
1257 for (i=0; i<N; i++){
1258 x = q[i]; # DR_MISALIGNMENT(q) = 3
1259 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1260 }
1261
1262 -- Possibility 1: we do loop versioning:
1263 if (p is aligned) {
1264 for (i=0; i<N; i++){ # loop 1A
1265 x = q[i]; # DR_MISALIGNMENT(q) = 3
1266 p[i] = y; # DR_MISALIGNMENT(p) = 0
1267 }
1268 }
1269 else {
1270 for (i=0; i<N; i++){ # loop 1B
1271 x = q[i]; # DR_MISALIGNMENT(q) = 3
1272 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1273 }
1274 }
1275
1276 -- Possibility 2: we do loop peeling:
1277 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1278 x = q[i];
1279 p[i] = y;
1280 }
1281 for (i = 3; i < N; i++){ # loop 2A
1282 x = q[i]; # DR_MISALIGNMENT(q) = 0
1283 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1284 }
1285
1286 -- Possibility 3: combination of loop peeling and versioning:
1287 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1288 x = q[i];
1289 p[i] = y;
1290 }
1291 if (p is aligned) {
1292 for (i = 3; i<N; i++){ # loop 3A
1293 x = q[i]; # DR_MISALIGNMENT(q) = 0
1294 p[i] = y; # DR_MISALIGNMENT(p) = 0
1295 }
1296 }
1297 else {
1298 for (i = 3; i<N; i++){ # loop 3B
1299 x = q[i]; # DR_MISALIGNMENT(q) = 0
1300 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1301 }
1302 }
1303
ff802fa1 1304 These loops are later passed to loop_transform to be vectorized. The
ebfd146a
IR
1305 vectorizer will use the alignment information to guide the transformation
1306 (whether to generate regular loads/stores, or with special handling for
1307 misalignment). */
1308
1309bool
1310vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
1311{
9771b263 1312 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
ebfd146a
IR
1313 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1314 enum dr_alignment_support supportable_dr_alignment;
720f5239 1315 struct data_reference *dr0 = NULL, *first_store = NULL;
ebfd146a 1316 struct data_reference *dr;
720f5239 1317 unsigned int i, j;
ebfd146a
IR
1318 bool do_peeling = false;
1319 bool do_versioning = false;
1320 bool stat;
1321 gimple stmt;
1322 stmt_vec_info stmt_info;
720f5239
IR
1323 unsigned int npeel = 0;
1324 bool all_misalignments_unknown = true;
1325 unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1326 unsigned possible_npeel_number = 1;
1327 tree vectype;
1328 unsigned int nelements, mis, same_align_drs_max = 0;
c3284718 1329 stmt_vector_for_cost body_cost_vec = stmt_vector_for_cost ();
ebfd146a 1330
73fbfcad 1331 if (dump_enabled_p ())
78c60e3d 1332 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 1333 "=== vect_enhance_data_refs_alignment ===\n");
ebfd146a
IR
1334
1335 /* While cost model enhancements are expected in the future, the high level
1336 view of the code at this time is as follows:
1337
673beced
RE
1338 A) If there is a misaligned access then see if peeling to align
1339 this access can make all data references satisfy
8f439681
RE
1340 vect_supportable_dr_alignment. If so, update data structures
1341 as needed and return true.
ebfd146a
IR
1342
1343 B) If peeling wasn't possible and there is a data reference with an
1344 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1345 then see if loop versioning checks can be used to make all data
1346 references satisfy vect_supportable_dr_alignment. If so, update
1347 data structures as needed and return true.
1348
1349 C) If neither peeling nor versioning were successful then return false if
1350 any data reference does not satisfy vect_supportable_dr_alignment.
1351
1352 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1353
1354 Note, Possibility 3 above (which is peeling and versioning together) is not
1355 being done at this time. */
1356
1357 /* (1) Peeling to force alignment. */
1358
1359 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1360 Considerations:
1361 + How many accesses will become aligned due to the peeling
1362 - How many accesses will become unaligned due to the peeling,
1363 and the cost of misaligned accesses.
b8698a0f 1364 - The cost of peeling (the extra runtime checks, the increase
720f5239 1365 in code size). */
ebfd146a 1366
9771b263 1367 FOR_EACH_VEC_ELT (datarefs, i, dr)
ebfd146a
IR
1368 {
1369 stmt = DR_STMT (dr);
1370 stmt_info = vinfo_for_stmt (stmt);
1371
38eec4c6 1372 if (!STMT_VINFO_RELEVANT_P (stmt_info))
39becbac
RG
1373 continue;
1374
ebfd146a
IR
1375 /* For interleaving, only the alignment of the first access
1376 matters. */
0d0293ac 1377 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
e14c1050 1378 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
ebfd146a
IR
1379 continue;
1380
39becbac
RG
1381 /* For invariant accesses there is nothing to enhance. */
1382 if (integer_zerop (DR_STEP (dr)))
1383 continue;
1384
f2e2a985 1385 /* Strided accesses perform only component accesses, alignment is
319e6439 1386 irrelevant for them. */
f2e2a985 1387 if (STMT_VINFO_STRIDED_P (stmt_info)
7b5fc413 1388 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
319e6439
RG
1389 continue;
1390
720f5239
IR
1391 supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1392 do_peeling = vector_alignment_reachable_p (dr);
1393 if (do_peeling)
ebfd146a 1394 {
720f5239
IR
1395 if (known_alignment_for_access_p (dr))
1396 {
1397 unsigned int npeel_tmp;
d8ba5b19
RG
1398 bool negative = tree_int_cst_compare (DR_STEP (dr),
1399 size_zero_node) < 0;
720f5239
IR
1400
1401 /* Save info about DR in the hash table. */
c203e8a7
TS
1402 if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo))
1403 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1404 = new hash_table<peel_info_hasher> (1);
720f5239
IR
1405
1406 vectype = STMT_VINFO_VECTYPE (stmt_info);
1407 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1408 mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
1409 TREE_TYPE (DR_REF (dr))));
d8ba5b19 1410 npeel_tmp = (negative
8b8bba2d
RG
1411 ? (mis - nelements) : (nelements - mis))
1412 & (nelements - 1);
720f5239
IR
1413
1414 /* For multiple types, it is possible that the bigger type access
ff802fa1 1415 will have more than one peeling option. E.g., a loop with two
720f5239 1416 types: one of size (vector size / 4), and the other one of
ff802fa1 1417 size (vector size / 8). Vectorization factor will 8. If both
720f5239 1418 access are misaligned by 3, the first one needs one scalar
ff802fa1 1419 iteration to be aligned, and the second one needs 5. But the
720f5239
IR
1420 the first one will be aligned also by peeling 5 scalar
1421 iterations, and in that case both accesses will be aligned.
1422 Hence, except for the immediate peeling amount, we also want
1423 to try to add full vector size, while we don't exceed
1424 vectorization factor.
1425 We do this automtically for cost model, since we calculate cost
1426 for every peeling option. */
8b5e1202 1427 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
91ff1504
RB
1428 {
1429 if (STMT_SLP_TYPE (stmt_info))
1430 possible_npeel_number
1431 = (vf * GROUP_SIZE (stmt_info)) / nelements;
1432 else
1433 possible_npeel_number = vf / nelements;
1434 }
720f5239
IR
1435
1436 /* Handle the aligned case. We may decide to align some other
1437 access, making DR unaligned. */
1438 if (DR_MISALIGNMENT (dr) == 0)
1439 {
1440 npeel_tmp = 0;
8b5e1202 1441 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
720f5239
IR
1442 possible_npeel_number++;
1443 }
1444
1445 for (j = 0; j < possible_npeel_number; j++)
1446 {
720f5239
IR
1447 vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp);
1448 npeel_tmp += nelements;
1449 }
1450
1451 all_misalignments_unknown = false;
1452 /* Data-ref that was chosen for the case that all the
1453 misalignments are unknown is not relevant anymore, since we
1454 have a data-ref with known alignment. */
1455 dr0 = NULL;
1456 }
1457 else
1458 {
4ba5ea11
RB
1459 /* If we don't know any misalignment values, we prefer
1460 peeling for data-ref that has the maximum number of data-refs
720f5239
IR
1461 with the same alignment, unless the target prefers to align
1462 stores over load. */
1463 if (all_misalignments_unknown)
1464 {
4ba5ea11
RB
1465 unsigned same_align_drs
1466 = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
1467 if (!dr0
1468 || same_align_drs_max < same_align_drs)
720f5239 1469 {
4ba5ea11 1470 same_align_drs_max = same_align_drs;
720f5239
IR
1471 dr0 = dr;
1472 }
4ba5ea11
RB
1473 /* For data-refs with the same number of related
1474 accesses prefer the one where the misalign
1475 computation will be invariant in the outermost loop. */
1476 else if (same_align_drs_max == same_align_drs)
1477 {
1478 struct loop *ivloop0, *ivloop;
1479 ivloop0 = outermost_invariant_loop_for_expr
1480 (loop, DR_BASE_ADDRESS (dr0));
1481 ivloop = outermost_invariant_loop_for_expr
1482 (loop, DR_BASE_ADDRESS (dr));
1483 if ((ivloop && !ivloop0)
1484 || (ivloop && ivloop0
1485 && flow_loop_nested_p (ivloop, ivloop0)))
1486 dr0 = dr;
1487 }
720f5239 1488
b0af49c4 1489 if (!first_store && DR_IS_WRITE (dr))
720f5239
IR
1490 first_store = dr;
1491 }
1492
1493 /* If there are both known and unknown misaligned accesses in the
1494 loop, we choose peeling amount according to the known
1495 accesses. */
720f5239
IR
1496 if (!supportable_dr_alignment)
1497 {
1498 dr0 = dr;
b0af49c4 1499 if (!first_store && DR_IS_WRITE (dr))
720f5239
IR
1500 first_store = dr;
1501 }
1502 }
1503 }
1504 else
1505 {
1506 if (!aligned_access_p (dr))
1507 {
73fbfcad 1508 if (dump_enabled_p ())
e645e942
TJ
1509 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1510 "vector alignment may not be reachable\n");
720f5239
IR
1511 break;
1512 }
1513 }
ebfd146a
IR
1514 }
1515
afb119be
RB
1516 /* Check if we can possibly peel the loop. */
1517 if (!vect_can_advance_ivs_p (loop_vinfo)
ebfd146a
IR
1518 || !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1519 do_peeling = false;
1520
b1aef01e
RB
1521 if (do_peeling
1522 && all_misalignments_unknown
720f5239
IR
1523 && vect_supportable_dr_alignment (dr0, false))
1524 {
720f5239
IR
1525 /* Check if the target requires to prefer stores over loads, i.e., if
1526 misaligned stores are more expensive than misaligned loads (taking
1527 drs with same alignment into account). */
1528 if (first_store && DR_IS_READ (dr0))
1529 {
1530 unsigned int load_inside_cost = 0, load_outside_cost = 0;
1531 unsigned int store_inside_cost = 0, store_outside_cost = 0;
1532 unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
1533 unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
9771b263
DN
1534 stmt_vector_for_cost dummy;
1535 dummy.create (2);
92345349
BS
1536
1537 vect_get_data_access_cost (dr0, &load_inside_cost, &load_outside_cost,
1538 &dummy);
1539 vect_get_data_access_cost (first_store, &store_inside_cost,
1540 &store_outside_cost, &dummy);
720f5239 1541
9771b263 1542 dummy.release ();
720f5239
IR
1543
1544 /* Calculate the penalty for leaving FIRST_STORE unaligned (by
1545 aligning the load DR0). */
1546 load_inside_penalty = store_inside_cost;
1547 load_outside_penalty = store_outside_cost;
9771b263
DN
1548 for (i = 0;
1549 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1550 DR_STMT (first_store))).iterate (i, &dr);
720f5239
IR
1551 i++)
1552 if (DR_IS_READ (dr))
1553 {
1554 load_inside_penalty += load_inside_cost;
1555 load_outside_penalty += load_outside_cost;
1556 }
1557 else
1558 {
1559 load_inside_penalty += store_inside_cost;
1560 load_outside_penalty += store_outside_cost;
1561 }
1562
1563 /* Calculate the penalty for leaving DR0 unaligned (by
1564 aligning the FIRST_STORE). */
1565 store_inside_penalty = load_inside_cost;
1566 store_outside_penalty = load_outside_cost;
9771b263
DN
1567 for (i = 0;
1568 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1569 DR_STMT (dr0))).iterate (i, &dr);
720f5239
IR
1570 i++)
1571 if (DR_IS_READ (dr))
1572 {
1573 store_inside_penalty += load_inside_cost;
1574 store_outside_penalty += load_outside_cost;
1575 }
1576 else
1577 {
1578 store_inside_penalty += store_inside_cost;
1579 store_outside_penalty += store_outside_cost;
1580 }
1581
1582 if (load_inside_penalty > store_inside_penalty
1583 || (load_inside_penalty == store_inside_penalty
1584 && load_outside_penalty > store_outside_penalty))
1585 dr0 = first_store;
1586 }
1587
1588 /* In case there are only loads with different unknown misalignments, use
476c1280
RB
1589 peeling only if it may help to align other accesses in the loop or
1590 if it may help improving load bandwith when we'd end up using
1591 unaligned loads. */
1592 tree dr0_vt = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr0)));
9771b263
DN
1593 if (!first_store
1594 && !STMT_VINFO_SAME_ALIGN_REFS (
1595 vinfo_for_stmt (DR_STMT (dr0))).length ()
476c1280
RB
1596 && (vect_supportable_dr_alignment (dr0, false)
1597 != dr_unaligned_supported
1598 || (builtin_vectorization_cost (vector_load, dr0_vt, 0)
1599 == builtin_vectorization_cost (unaligned_load, dr0_vt, -1))))
720f5239
IR
1600 do_peeling = false;
1601 }
1602
1603 if (do_peeling && !dr0)
1604 {
1605 /* Peeling is possible, but there is no data access that is not supported
1606 unless aligned. So we try to choose the best possible peeling. */
1607
1608 /* We should get here only if there are drs with known misalignment. */
1609 gcc_assert (!all_misalignments_unknown);
1610
1611 /* Choose the best peeling from the hash table. */
c3e7ee41 1612 dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel,
92345349 1613 &body_cost_vec);
720f5239
IR
1614 if (!dr0 || !npeel)
1615 do_peeling = false;
1616 }
1617
ebfd146a
IR
1618 if (do_peeling)
1619 {
720f5239
IR
1620 stmt = DR_STMT (dr0);
1621 stmt_info = vinfo_for_stmt (stmt);
1622 vectype = STMT_VINFO_VECTYPE (stmt_info);
1623 nelements = TYPE_VECTOR_SUBPARTS (vectype);
ebfd146a
IR
1624
1625 if (known_alignment_for_access_p (dr0))
1626 {
d8ba5b19
RG
1627 bool negative = tree_int_cst_compare (DR_STEP (dr0),
1628 size_zero_node) < 0;
720f5239
IR
1629 if (!npeel)
1630 {
1631 /* Since it's known at compile time, compute the number of
1632 iterations in the peeled loop (the peeling factor) for use in
1633 updating DR_MISALIGNMENT values. The peeling factor is the
1634 vectorization factor minus the misalignment as an element
1635 count. */
1636 mis = DR_MISALIGNMENT (dr0);
1637 mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
8b8bba2d
RG
1638 npeel = ((negative ? mis - nelements : nelements - mis)
1639 & (nelements - 1));
720f5239 1640 }
ebfd146a 1641
b8698a0f 1642 /* For interleaved data access every iteration accesses all the
ebfd146a
IR
1643 members of the group, therefore we divide the number of iterations
1644 by the group size. */
b8698a0f 1645 stmt_info = vinfo_for_stmt (DR_STMT (dr0));
0d0293ac 1646 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
e14c1050 1647 npeel /= GROUP_SIZE (stmt_info);
ebfd146a 1648
73fbfcad 1649 if (dump_enabled_p ())
78c60e3d 1650 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 1651 "Try peeling by %d\n", npeel);
ebfd146a
IR
1652 }
1653
1654 /* Ensure that all data refs can be vectorized after the peel. */
9771b263 1655 FOR_EACH_VEC_ELT (datarefs, i, dr)
ebfd146a
IR
1656 {
1657 int save_misalignment;
1658
1659 if (dr == dr0)
1660 continue;
1661
1662 stmt = DR_STMT (dr);
1663 stmt_info = vinfo_for_stmt (stmt);
1664 /* For interleaving, only the alignment of the first access
1665 matters. */
0d0293ac 1666 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
e14c1050 1667 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
ebfd146a
IR
1668 continue;
1669
f2e2a985 1670 /* Strided accesses perform only component accesses, alignment is
319e6439 1671 irrelevant for them. */
f2e2a985 1672 if (STMT_VINFO_STRIDED_P (stmt_info)
7b5fc413 1673 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
319e6439
RG
1674 continue;
1675
ebfd146a
IR
1676 save_misalignment = DR_MISALIGNMENT (dr);
1677 vect_update_misalignment_for_peel (dr, dr0, npeel);
720f5239 1678 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
ebfd146a 1679 SET_DR_MISALIGNMENT (dr, save_misalignment);
b8698a0f 1680
ebfd146a
IR
1681 if (!supportable_dr_alignment)
1682 {
1683 do_peeling = false;
1684 break;
1685 }
1686 }
1687
720f5239
IR
1688 if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
1689 {
1690 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1691 if (!stat)
1692 do_peeling = false;
1693 else
c7e62a26 1694 {
9771b263 1695 body_cost_vec.release ();
c7e62a26
RG
1696 return stat;
1697 }
720f5239
IR
1698 }
1699
476c1280 1700 /* Cost model #1 - honor --param vect-max-peeling-for-alignment. */
4f17aa0b
XDL
1701 if (do_peeling)
1702 {
1703 unsigned max_allowed_peel
1704 = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT);
1705 if (max_allowed_peel != (unsigned)-1)
1706 {
1707 unsigned max_peel = npeel;
1708 if (max_peel == 0)
1709 {
1710 gimple dr_stmt = DR_STMT (dr0);
1711 stmt_vec_info vinfo = vinfo_for_stmt (dr_stmt);
1712 tree vtype = STMT_VINFO_VECTYPE (vinfo);
1713 max_peel = TYPE_VECTOR_SUBPARTS (vtype) - 1;
1714 }
1715 if (max_peel > max_allowed_peel)
1716 {
1717 do_peeling = false;
1718 if (dump_enabled_p ())
1719 dump_printf_loc (MSG_NOTE, vect_location,
1720 "Disable peeling, max peels reached: %d\n", max_peel);
1721 }
1722 }
1723 }
1724
476c1280
RB
1725 /* Cost model #2 - if peeling may result in a remaining loop not
1726 iterating enough to be vectorized then do not peel. */
1727 if (do_peeling
1728 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1729 {
1730 unsigned max_peel
1731 = npeel == 0 ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1 : npeel;
1732 if (LOOP_VINFO_INT_NITERS (loop_vinfo)
1733 < LOOP_VINFO_VECT_FACTOR (loop_vinfo) + max_peel)
1734 do_peeling = false;
1735 }
1736
ebfd146a
IR
1737 if (do_peeling)
1738 {
1739 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1740 If the misalignment of DR_i is identical to that of dr0 then set
1741 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1742 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1743 by the peeling factor times the element size of DR_i (MOD the
1744 vectorization factor times the size). Otherwise, the
1745 misalignment of DR_i must be set to unknown. */
9771b263 1746 FOR_EACH_VEC_ELT (datarefs, i, dr)
ebfd146a
IR
1747 if (dr != dr0)
1748 vect_update_misalignment_for_peel (dr, dr0, npeel);
1749
1750 LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
720f5239 1751 if (npeel)
15e693cc 1752 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
720f5239 1753 else
15e693cc
RB
1754 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1755 = DR_MISALIGNMENT (dr0);
ebfd146a 1756 SET_DR_MISALIGNMENT (dr0, 0);
73fbfcad 1757 if (dump_enabled_p ())
78c60e3d
SS
1758 {
1759 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 1760 "Alignment of access forced using peeling.\n");
78c60e3d 1761 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 1762 "Peeling for alignment will be applied.\n");
78c60e3d 1763 }
62c00445
RB
1764 /* The inside-loop cost will be accounted for in vectorizable_load
1765 and vectorizable_store correctly with adjusted alignments.
1766 Drop the body_cst_vec on the floor here. */
1767 body_cost_vec.release ();
c3e7ee41 1768
a70d6342 1769 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
ebfd146a
IR
1770 gcc_assert (stat);
1771 return stat;
1772 }
1773 }
1774
9771b263 1775 body_cost_vec.release ();
ebfd146a
IR
1776
1777 /* (2) Versioning to force alignment. */
1778
1779 /* Try versioning if:
d6d11272
XDL
1780 1) optimize loop for speed
1781 2) there is at least one unsupported misaligned data ref with an unknown
ebfd146a 1782 misalignment, and
d6d11272
XDL
1783 3) all misaligned data refs with a known misalignment are supported, and
1784 4) the number of runtime alignment checks is within reason. */
ebfd146a 1785
b8698a0f 1786 do_versioning =
d6d11272 1787 optimize_loop_nest_for_speed_p (loop)
ebfd146a
IR
1788 && (!loop->inner); /* FORNOW */
1789
1790 if (do_versioning)
1791 {
9771b263 1792 FOR_EACH_VEC_ELT (datarefs, i, dr)
ebfd146a
IR
1793 {
1794 stmt = DR_STMT (dr);
1795 stmt_info = vinfo_for_stmt (stmt);
1796
1797 /* For interleaving, only the alignment of the first access
1798 matters. */
1799 if (aligned_access_p (dr)
0d0293ac 1800 || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
e14c1050 1801 && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
ebfd146a
IR
1802 continue;
1803
f2e2a985 1804 if (STMT_VINFO_STRIDED_P (stmt_info))
7b5fc413
RB
1805 {
1806 /* Strided loads perform only component accesses, alignment is
1807 irrelevant for them. */
1808 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1809 continue;
1810 do_versioning = false;
1811 break;
1812 }
319e6439 1813
720f5239 1814 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
ebfd146a
IR
1815
1816 if (!supportable_dr_alignment)
1817 {
1818 gimple stmt;
1819 int mask;
1820 tree vectype;
1821
1822 if (known_alignment_for_access_p (dr)
9771b263 1823 || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
ebfd146a
IR
1824 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
1825 {
1826 do_versioning = false;
1827 break;
1828 }
1829
1830 stmt = DR_STMT (dr);
1831 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1832 gcc_assert (vectype);
b8698a0f 1833
ebfd146a
IR
1834 /* The rightmost bits of an aligned address must be zeros.
1835 Construct the mask needed for this test. For example,
1836 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1837 mask must be 15 = 0xf. */
1838 mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1839
1840 /* FORNOW: use the same mask to test all potentially unaligned
1841 references in the loop. The vectorizer currently supports
1842 a single vector size, see the reference to
1843 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
1844 vectorization factor is computed. */
1845 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
1846 || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
1847 LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
9771b263
DN
1848 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
1849 DR_STMT (dr));
ebfd146a
IR
1850 }
1851 }
b8698a0f 1852
ebfd146a 1853 /* Versioning requires at least one misaligned data reference. */
e9dbe7bb 1854 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
ebfd146a
IR
1855 do_versioning = false;
1856 else if (!do_versioning)
9771b263 1857 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
ebfd146a
IR
1858 }
1859
1860 if (do_versioning)
1861 {
9771b263 1862 vec<gimple> may_misalign_stmts
ebfd146a
IR
1863 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
1864 gimple stmt;
1865
1866 /* It can now be assumed that the data references in the statements
1867 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
1868 of the loop being vectorized. */
9771b263 1869 FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt)
ebfd146a
IR
1870 {
1871 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1872 dr = STMT_VINFO_DATA_REF (stmt_info);
1873 SET_DR_MISALIGNMENT (dr, 0);
73fbfcad 1874 if (dump_enabled_p ())
e645e942
TJ
1875 dump_printf_loc (MSG_NOTE, vect_location,
1876 "Alignment of access forced using versioning.\n");
ebfd146a
IR
1877 }
1878
73fbfcad 1879 if (dump_enabled_p ())
e645e942
TJ
1880 dump_printf_loc (MSG_NOTE, vect_location,
1881 "Versioning for alignment will be applied.\n");
ebfd146a
IR
1882
1883 /* Peeling and versioning can't be done together at this time. */
1884 gcc_assert (! (do_peeling && do_versioning));
1885
a70d6342 1886 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
ebfd146a
IR
1887 gcc_assert (stat);
1888 return stat;
1889 }
1890
1891 /* This point is reached if neither peeling nor versioning is being done. */
1892 gcc_assert (! (do_peeling || do_versioning));
1893
a70d6342 1894 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
ebfd146a
IR
1895 return stat;
1896}
1897
1898
777e1f09
RG
1899/* Function vect_find_same_alignment_drs.
1900
1901 Update group and alignment relations according to the chosen
1902 vectorization factor. */
1903
1904static void
1905vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
1906 loop_vec_info loop_vinfo)
1907{
1908 unsigned int i;
1909 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1910 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1911 struct data_reference *dra = DDR_A (ddr);
1912 struct data_reference *drb = DDR_B (ddr);
1913 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
1914 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
1915 int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
1916 int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
1917 lambda_vector dist_v;
1918 unsigned int loop_depth;
1919
1920 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1921 return;
1922
720f5239 1923 if (dra == drb)
777e1f09
RG
1924 return;
1925
1926 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1927 return;
1928
1929 /* Loop-based vectorization and known data dependence. */
1930 if (DDR_NUM_DIST_VECTS (ddr) == 0)
1931 return;
1932
46241ea9
RG
1933 /* Data-dependence analysis reports a distance vector of zero
1934 for data-references that overlap only in the first iteration
1935 but have different sign step (see PR45764).
1936 So as a sanity check require equal DR_STEP. */
1937 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
1938 return;
1939
777e1f09 1940 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
9771b263 1941 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
777e1f09
RG
1942 {
1943 int dist = dist_v[loop_depth];
1944
73fbfcad 1945 if (dump_enabled_p ())
78c60e3d 1946 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 1947 "dependence distance = %d.\n", dist);
777e1f09
RG
1948
1949 /* Same loop iteration. */
1950 if (dist == 0
1951 || (dist % vectorization_factor == 0 && dra_size == drb_size))
1952 {
1953 /* Two references with distance zero have the same alignment. */
9771b263
DN
1954 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
1955 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
73fbfcad 1956 if (dump_enabled_p ())
777e1f09 1957 {
e645e942
TJ
1958 dump_printf_loc (MSG_NOTE, vect_location,
1959 "accesses have the same alignment.\n");
78c60e3d 1960 dump_printf (MSG_NOTE,
e645e942 1961 "dependence distance modulo vf == 0 between ");
78c60e3d
SS
1962 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
1963 dump_printf (MSG_NOTE, " and ");
1964 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
e645e942 1965 dump_printf (MSG_NOTE, "\n");
777e1f09
RG
1966 }
1967 }
1968 }
1969}
1970
1971
ebfd146a
IR
1972/* Function vect_analyze_data_refs_alignment
1973
1974 Analyze the alignment of the data-references in the loop.
1975 Return FALSE if a data reference is found that cannot be vectorized. */
1976
1977bool
b8698a0f 1978vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
a70d6342 1979 bb_vec_info bb_vinfo)
ebfd146a 1980{
73fbfcad 1981 if (dump_enabled_p ())
78c60e3d 1982 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 1983 "=== vect_analyze_data_refs_alignment ===\n");
ebfd146a 1984
777e1f09
RG
1985 /* Mark groups of data references with same alignment using
1986 data dependence information. */
1987 if (loop_vinfo)
1988 {
9771b263 1989 vec<ddr_p> ddrs = LOOP_VINFO_DDRS (loop_vinfo);
777e1f09
RG
1990 struct data_dependence_relation *ddr;
1991 unsigned int i;
1992
9771b263 1993 FOR_EACH_VEC_ELT (ddrs, i, ddr)
777e1f09
RG
1994 vect_find_same_alignment_drs (ddr, loop_vinfo);
1995 }
1996
a70d6342 1997 if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
ebfd146a 1998 {
73fbfcad 1999 if (dump_enabled_p ())
e645e942
TJ
2000 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2001 "not vectorized: can't calculate alignment "
2002 "for data ref.\n");
ebfd146a
IR
2003 return false;
2004 }
2005
2006 return true;
2007}
2008
2009
0d0293ac
MM
2010/* Analyze groups of accesses: check that DR belongs to a group of
2011 accesses of legal size, step, etc. Detect gaps, single element
2012 interleaving, and other special cases. Set grouped access info.
ebfd146a
IR
2013 Collect groups of strided stores for further use in SLP analysis. */
2014
2015static bool
2016vect_analyze_group_access (struct data_reference *dr)
2017{
2018 tree step = DR_STEP (dr);
2019 tree scalar_type = TREE_TYPE (DR_REF (dr));
2020 HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
2021 gimple stmt = DR_STMT (dr);
2022 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2023 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 2024 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7b5fc413 2025 HOST_WIDE_INT dr_step = -1;
0d0293ac 2026 HOST_WIDE_INT groupsize, last_accessed_element = 1;
ebfd146a 2027 bool slp_impossible = false;
deaf836c
IR
2028 struct loop *loop = NULL;
2029
2030 if (loop_vinfo)
2031 loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a 2032
0d0293ac
MM
2033 /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
2034 size of the interleaving group (including gaps). */
7b5fc413
RB
2035 if (tree_fits_shwi_p (step))
2036 {
2037 dr_step = tree_to_shwi (step);
2038 groupsize = absu_hwi (dr_step) / type_size;
2039 }
2040 else
2041 groupsize = 0;
ebfd146a
IR
2042
2043 /* Not consecutive access is possible only if it is a part of interleaving. */
e14c1050 2044 if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
ebfd146a
IR
2045 {
2046 /* Check if it this DR is a part of interleaving, and is a single
2047 element of the group that is accessed in the loop. */
b8698a0f 2048
ebfd146a
IR
2049 /* Gaps are supported only for loads. STEP must be a multiple of the type
2050 size. The size of the group must be a power of 2. */
2051 if (DR_IS_READ (dr)
2052 && (dr_step % type_size) == 0
0d0293ac
MM
2053 && groupsize > 0
2054 && exact_log2 (groupsize) != -1)
ebfd146a 2055 {
e14c1050 2056 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
0d0293ac 2057 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
73fbfcad 2058 if (dump_enabled_p ())
ebfd146a 2059 {
e645e942
TJ
2060 dump_printf_loc (MSG_NOTE, vect_location,
2061 "Detected single element interleaving ");
78c60e3d
SS
2062 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
2063 dump_printf (MSG_NOTE, " step ");
2064 dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
e645e942 2065 dump_printf (MSG_NOTE, "\n");
ebfd146a 2066 }
48df3fa6
IR
2067
2068 if (loop_vinfo)
2069 {
73fbfcad 2070 if (dump_enabled_p ())
78c60e3d 2071 dump_printf_loc (MSG_NOTE, vect_location,
e645e942
TJ
2072 "Data access with gaps requires scalar "
2073 "epilogue loop\n");
deaf836c
IR
2074 if (loop->inner)
2075 {
73fbfcad 2076 if (dump_enabled_p ())
78c60e3d
SS
2077 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2078 "Peeling for outer loop is not"
e645e942 2079 " supported\n");
deaf836c
IR
2080 return false;
2081 }
2082
2083 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
48df3fa6
IR
2084 }
2085
ebfd146a
IR
2086 return true;
2087 }
4b5caab7 2088
73fbfcad 2089 if (dump_enabled_p ())
4b5caab7 2090 {
78c60e3d 2091 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942
TJ
2092 "not consecutive access ");
2093 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2094 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4b5caab7
IR
2095 }
2096
2097 if (bb_vinfo)
2098 {
2099 /* Mark the statement as unvectorizable. */
2100 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2101 return true;
2102 }
78c60e3d 2103
ebfd146a
IR
2104 return false;
2105 }
2106
e14c1050 2107 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
ebfd146a
IR
2108 {
2109 /* First stmt in the interleaving chain. Check the chain. */
e14c1050 2110 gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
ebfd146a 2111 struct data_reference *data_ref = dr;
df398a37 2112 unsigned int count = 1;
ebfd146a
IR
2113 tree prev_init = DR_INIT (data_ref);
2114 gimple prev = stmt;
08940f33 2115 HOST_WIDE_INT diff, gaps = 0;
ebfd146a
IR
2116
2117 while (next)
2118 {
ff802fa1
IR
2119 /* Skip same data-refs. In case that two or more stmts share
2120 data-ref (supported only for loads), we vectorize only the first
2121 stmt, and the rest get their vectorized loads from the first
2122 one. */
ebfd146a
IR
2123 if (!tree_int_cst_compare (DR_INIT (data_ref),
2124 DR_INIT (STMT_VINFO_DATA_REF (
2125 vinfo_for_stmt (next)))))
2126 {
b0af49c4 2127 if (DR_IS_WRITE (data_ref))
ebfd146a 2128 {
73fbfcad 2129 if (dump_enabled_p ())
e645e942
TJ
2130 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2131 "Two store stmts share the same dr.\n");
ebfd146a
IR
2132 return false;
2133 }
2134
ebfd146a 2135 /* For load use the same data-ref load. */
e14c1050 2136 GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
ebfd146a
IR
2137
2138 prev = next;
e14c1050 2139 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
ebfd146a
IR
2140 continue;
2141 }
48df3fa6 2142
ebfd146a 2143 prev = next;
08940f33 2144 data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
ebfd146a 2145
08940f33
RB
2146 /* All group members have the same STEP by construction. */
2147 gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
ebfd146a 2148
ebfd146a
IR
2149 /* Check that the distance between two accesses is equal to the type
2150 size. Otherwise, we have gaps. */
2151 diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
2152 - TREE_INT_CST_LOW (prev_init)) / type_size;
2153 if (diff != 1)
2154 {
2155 /* FORNOW: SLP of accesses with gaps is not supported. */
2156 slp_impossible = true;
b0af49c4 2157 if (DR_IS_WRITE (data_ref))
ebfd146a 2158 {
73fbfcad 2159 if (dump_enabled_p ())
e645e942
TJ
2160 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2161 "interleaved store with gaps\n");
ebfd146a
IR
2162 return false;
2163 }
4da39468
IR
2164
2165 gaps += diff - 1;
ebfd146a
IR
2166 }
2167
48df3fa6
IR
2168 last_accessed_element += diff;
2169
ebfd146a 2170 /* Store the gap from the previous member of the group. If there is no
e14c1050
IR
2171 gap in the access, GROUP_GAP is always 1. */
2172 GROUP_GAP (vinfo_for_stmt (next)) = diff;
ebfd146a
IR
2173
2174 prev_init = DR_INIT (data_ref);
e14c1050 2175 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
ebfd146a
IR
2176 /* Count the number of data-refs in the chain. */
2177 count++;
2178 }
2179
7b5fc413
RB
2180 if (groupsize == 0)
2181 groupsize = count + gaps;
ebfd146a 2182
7b5fc413 2183 /* Check that the size of the interleaving is equal to count for stores,
ebfd146a 2184 i.e., that there are no gaps. */
e004aa11
RB
2185 if (groupsize != count
2186 && !DR_IS_READ (dr))
ebfd146a 2187 {
e004aa11
RB
2188 if (dump_enabled_p ())
2189 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2190 "interleaved store with gaps\n");
2191 return false;
2192 }
2193
2194 /* If there is a gap after the last load in the group it is the
2195 difference between the groupsize and the last accessed
2196 element.
2197 When there is no gap, this difference should be 0. */
2198 GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - last_accessed_element;
ebfd146a 2199
0d0293ac 2200 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
73fbfcad 2201 if (dump_enabled_p ())
e004aa11
RB
2202 {
2203 dump_printf_loc (MSG_NOTE, vect_location,
2204 "Detected interleaving of size %d starting with ",
2205 (int)groupsize);
2206 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
2207 if (GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
2208 dump_printf_loc (MSG_NOTE, vect_location,
2209 "There is a gap of %d elements after the group\n",
2210 (int)GROUP_GAP (vinfo_for_stmt (stmt)));
2211 }
ebfd146a 2212
b8698a0f 2213 /* SLP: create an SLP data structure for every interleaving group of
ebfd146a 2214 stores for further analysis in vect_analyse_slp. */
b0af49c4 2215 if (DR_IS_WRITE (dr) && !slp_impossible)
a70d6342
IR
2216 {
2217 if (loop_vinfo)
9771b263 2218 LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt);
a70d6342 2219 if (bb_vinfo)
9771b263 2220 BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt);
a70d6342 2221 }
48df3fa6 2222
91ff1504
RB
2223 /* If there is a gap in the end of the group or the group size cannot
2224 be made a multiple of the vector element count then we access excess
2225 elements in the last iteration and thus need to peel that off. */
2226 if (loop_vinfo
2227 && (groupsize - last_accessed_element > 0
2228 || exact_log2 (groupsize) == -1))
2229
48df3fa6 2230 {
73fbfcad 2231 if (dump_enabled_p ())
78c60e3d 2232 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942
TJ
2233 "Data access with gaps requires scalar "
2234 "epilogue loop\n");
deaf836c
IR
2235 if (loop->inner)
2236 {
73fbfcad 2237 if (dump_enabled_p ())
e645e942
TJ
2238 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2239 "Peeling for outer loop is not supported\n");
deaf836c
IR
2240 return false;
2241 }
2242
2243 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
48df3fa6 2244 }
ebfd146a
IR
2245 }
2246
2247 return true;
2248}
2249
2250
2251/* Analyze the access pattern of the data-reference DR.
2252 In case of non-consecutive accesses call vect_analyze_group_access() to
0d0293ac 2253 analyze groups of accesses. */
ebfd146a
IR
2254
2255static bool
2256vect_analyze_data_ref_access (struct data_reference *dr)
2257{
2258 tree step = DR_STEP (dr);
2259 tree scalar_type = TREE_TYPE (DR_REF (dr));
2260 gimple stmt = DR_STMT (dr);
2261 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2262 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342 2263 struct loop *loop = NULL;
ebfd146a 2264
a70d6342
IR
2265 if (loop_vinfo)
2266 loop = LOOP_VINFO_LOOP (loop_vinfo);
b8698a0f 2267
a70d6342 2268 if (loop_vinfo && !step)
ebfd146a 2269 {
73fbfcad 2270 if (dump_enabled_p ())
e645e942
TJ
2271 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2272 "bad data-ref access in loop\n");
ebfd146a
IR
2273 return false;
2274 }
2275
c134cf2a 2276 /* Allow loads with zero step in inner-loop vectorization. */
319e6439 2277 if (loop_vinfo && integer_zerop (step))
39becbac
RG
2278 {
2279 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
c134cf2a
YR
2280 if (!nested_in_vect_loop_p (loop, stmt))
2281 return DR_IS_READ (dr);
2282 /* Allow references with zero step for outer loops marked
2283 with pragma omp simd only - it guarantees absence of
2284 loop-carried dependencies between inner loop iterations. */
2285 if (!loop->force_vectorize)
6e8dad05
RB
2286 {
2287 if (dump_enabled_p ())
2288 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 2289 "zero step in inner loop of nest\n");
6e8dad05
RB
2290 return false;
2291 }
39becbac 2292 }
ebfd146a 2293
a70d6342 2294 if (loop && nested_in_vect_loop_p (loop, stmt))
ebfd146a
IR
2295 {
2296 /* Interleaved accesses are not yet supported within outer-loop
2297 vectorization for references in the inner-loop. */
e14c1050 2298 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
ebfd146a
IR
2299
2300 /* For the rest of the analysis we use the outer-loop step. */
2301 step = STMT_VINFO_DR_STEP (stmt_info);
319e6439 2302 if (integer_zerop (step))
ebfd146a 2303 {
73fbfcad 2304 if (dump_enabled_p ())
78c60e3d 2305 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 2306 "zero step in outer loop.\n");
ebfd146a 2307 if (DR_IS_READ (dr))
b8698a0f 2308 return true;
ebfd146a
IR
2309 else
2310 return false;
2311 }
2312 }
2313
2314 /* Consecutive? */
319e6439 2315 if (TREE_CODE (step) == INTEGER_CST)
ebfd146a 2316 {
319e6439
RG
2317 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2318 if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
2319 || (dr_step < 0
2320 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
2321 {
2322 /* Mark that it is not interleaving. */
2323 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2324 return true;
2325 }
ebfd146a
IR
2326 }
2327
a70d6342 2328 if (loop && nested_in_vect_loop_p (loop, stmt))
ebfd146a 2329 {
73fbfcad 2330 if (dump_enabled_p ())
78c60e3d 2331 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 2332 "grouped access in outer loop.\n");
ebfd146a
IR
2333 return false;
2334 }
2335
7b5fc413 2336
319e6439
RG
2337 /* Assume this is a DR handled by non-constant strided load case. */
2338 if (TREE_CODE (step) != INTEGER_CST)
f2e2a985 2339 return (STMT_VINFO_STRIDED_P (stmt_info)
7b5fc413
RB
2340 && (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
2341 || vect_analyze_group_access (dr)));
319e6439 2342
ebfd146a
IR
2343 /* Not consecutive access - check if it's a part of interleaving group. */
2344 return vect_analyze_group_access (dr);
2345}
2346
839c74bc
CH
2347
2348
2349/* A helper function used in the comparator function to sort data
2350 references. T1 and T2 are two data references to be compared.
2351 The function returns -1, 0, or 1. */
2352
2353static int
2354compare_tree (tree t1, tree t2)
2355{
2356 int i, cmp;
2357 enum tree_code code;
2358 char tclass;
2359
2360 if (t1 == t2)
2361 return 0;
2362 if (t1 == NULL)
2363 return -1;
2364 if (t2 == NULL)
2365 return 1;
2366
2367
2368 if (TREE_CODE (t1) != TREE_CODE (t2))
2369 return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1;
2370
2371 code = TREE_CODE (t1);
2372 switch (code)
2373 {
2374 /* For const values, we can just use hash values for comparisons. */
2375 case INTEGER_CST:
2376 case REAL_CST:
2377 case FIXED_CST:
2378 case STRING_CST:
2379 case COMPLEX_CST:
2380 case VECTOR_CST:
2381 {
2382 hashval_t h1 = iterative_hash_expr (t1, 0);
2383 hashval_t h2 = iterative_hash_expr (t2, 0);
2384 if (h1 != h2)
2385 return h1 < h2 ? -1 : 1;
2386 break;
2387 }
2388
2389 case SSA_NAME:
2390 cmp = compare_tree (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2));
2391 if (cmp != 0)
2392 return cmp;
2393
2394 if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2))
2395 return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1;
2396 break;
2397
2398 default:
2399 tclass = TREE_CODE_CLASS (code);
2400
2401 /* For var-decl, we could compare their UIDs. */
2402 if (tclass == tcc_declaration)
2403 {
2404 if (DECL_UID (t1) != DECL_UID (t2))
2405 return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1;
2406 break;
2407 }
2408
2409 /* For expressions with operands, compare their operands recursively. */
2410 for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i)
2411 {
2412 cmp = compare_tree (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i));
2413 if (cmp != 0)
2414 return cmp;
2415 }
2416 }
2417
2418 return 0;
2419}
2420
2421
5abe1e05
RB
2422/* Compare two data-references DRA and DRB to group them into chunks
2423 suitable for grouping. */
2424
2425static int
2426dr_group_sort_cmp (const void *dra_, const void *drb_)
2427{
2428 data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
2429 data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
5abe1e05
RB
2430 int cmp;
2431
2432 /* Stabilize sort. */
2433 if (dra == drb)
2434 return 0;
2435
2436 /* Ordering of DRs according to base. */
2437 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0))
2438 {
839c74bc
CH
2439 cmp = compare_tree (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb));
2440 if (cmp != 0)
2441 return cmp;
5abe1e05
RB
2442 }
2443
2444 /* And according to DR_OFFSET. */
2445 if (!dr_equal_offsets_p (dra, drb))
2446 {
839c74bc
CH
2447 cmp = compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
2448 if (cmp != 0)
2449 return cmp;
5abe1e05
RB
2450 }
2451
2452 /* Put reads before writes. */
2453 if (DR_IS_READ (dra) != DR_IS_READ (drb))
2454 return DR_IS_READ (dra) ? -1 : 1;
2455
2456 /* Then sort after access size. */
2457 if (!operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2458 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))), 0))
2459 {
839c74bc
CH
2460 cmp = compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2461 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
2462 if (cmp != 0)
2463 return cmp;
5abe1e05
RB
2464 }
2465
2466 /* And after step. */
2467 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
2468 {
839c74bc
CH
2469 cmp = compare_tree (DR_STEP (dra), DR_STEP (drb));
2470 if (cmp != 0)
2471 return cmp;
5abe1e05
RB
2472 }
2473
2474 /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
2475 cmp = tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb));
2476 if (cmp == 0)
2477 return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
2478 return cmp;
2479}
ebfd146a
IR
2480
2481/* Function vect_analyze_data_ref_accesses.
2482
2483 Analyze the access pattern of all the data references in the loop.
2484
2485 FORNOW: the only access pattern that is considered vectorizable is a
2486 simple step 1 (consecutive) access.
2487
2488 FORNOW: handle only arrays and pointer accesses. */
2489
2490bool
a70d6342 2491vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
ebfd146a
IR
2492{
2493 unsigned int i;
9771b263 2494 vec<data_reference_p> datarefs;
ebfd146a
IR
2495 struct data_reference *dr;
2496
73fbfcad 2497 if (dump_enabled_p ())
78c60e3d 2498 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 2499 "=== vect_analyze_data_ref_accesses ===\n");
ebfd146a 2500
a70d6342
IR
2501 if (loop_vinfo)
2502 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2503 else
2504 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2505
5abe1e05
RB
2506 if (datarefs.is_empty ())
2507 return true;
2508
2509 /* Sort the array of datarefs to make building the interleaving chains
3d54b29d
JJ
2510 linear. Don't modify the original vector's order, it is needed for
2511 determining what dependencies are reversed. */
2512 vec<data_reference_p> datarefs_copy = datarefs.copy ();
75509ba2 2513 datarefs_copy.qsort (dr_group_sort_cmp);
5abe1e05
RB
2514
2515 /* Build the interleaving chains. */
3d54b29d 2516 for (i = 0; i < datarefs_copy.length () - 1;)
5abe1e05 2517 {
3d54b29d 2518 data_reference_p dra = datarefs_copy[i];
5abe1e05
RB
2519 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
2520 stmt_vec_info lastinfo = NULL;
3d54b29d 2521 for (i = i + 1; i < datarefs_copy.length (); ++i)
5abe1e05 2522 {
3d54b29d 2523 data_reference_p drb = datarefs_copy[i];
5abe1e05
RB
2524 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
2525
2526 /* ??? Imperfect sorting (non-compatible types, non-modulo
2527 accesses, same accesses) can lead to a group to be artificially
2528 split here as we don't just skip over those. If it really
2529 matters we can push those to a worklist and re-iterate
2530 over them. The we can just skip ahead to the next DR here. */
2531
2532 /* Check that the data-refs have same first location (except init)
61331c48
JJ
2533 and they are both either store or load (not load and store,
2534 not masked loads or stores). */
5abe1e05
RB
2535 if (DR_IS_READ (dra) != DR_IS_READ (drb)
2536 || !operand_equal_p (DR_BASE_ADDRESS (dra),
2537 DR_BASE_ADDRESS (drb), 0)
61331c48
JJ
2538 || !dr_equal_offsets_p (dra, drb)
2539 || !gimple_assign_single_p (DR_STMT (dra))
2540 || !gimple_assign_single_p (DR_STMT (drb)))
5abe1e05
RB
2541 break;
2542
7b5fc413 2543 /* Check that the data-refs have the same constant size. */
5abe1e05
RB
2544 tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
2545 tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
cc269bb6
RS
2546 if (!tree_fits_uhwi_p (sza)
2547 || !tree_fits_uhwi_p (szb)
7b5fc413
RB
2548 || !tree_int_cst_equal (sza, szb))
2549 break;
2550
2551 /* Check that the data-refs have the same step. */
2552 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
5abe1e05
RB
2553 break;
2554
2555 /* Do not place the same access in the interleaving chain twice. */
2556 if (tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) == 0)
2557 break;
2558
2559 /* Check the types are compatible.
2560 ??? We don't distinguish this during sorting. */
2561 if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
2562 TREE_TYPE (DR_REF (drb))))
2563 break;
2564
2565 /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
2566 HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
2567 HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
2568 gcc_assert (init_a < init_b);
2569
2570 /* If init_b == init_a + the size of the type * k, we have an
2571 interleaving, and DRA is accessed before DRB. */
eb1ce453 2572 HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
5abe1e05
RB
2573 if ((init_b - init_a) % type_size_a != 0)
2574 break;
2575
78a8b26c
RB
2576 /* If we have a store, the accesses are adjacent. This splits
2577 groups into chunks we support (we don't support vectorization
2578 of stores with gaps). */
2579 if (!DR_IS_READ (dra)
2580 && (init_b - (HOST_WIDE_INT) TREE_INT_CST_LOW
2581 (DR_INIT (datarefs_copy[i-1]))
2582 != type_size_a))
2583 break;
2584
7b5fc413
RB
2585 /* If the step (if not zero or non-constant) is greater than the
2586 difference between data-refs' inits this splits groups into
2587 suitable sizes. */
2588 if (tree_fits_shwi_p (DR_STEP (dra)))
2589 {
2590 HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
2591 if (step != 0 && step <= (init_b - init_a))
2592 break;
2593 }
5abe1e05
RB
2594
2595 if (dump_enabled_p ())
2596 {
2597 dump_printf_loc (MSG_NOTE, vect_location,
2598 "Detected interleaving ");
2599 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
2600 dump_printf (MSG_NOTE, " and ");
2601 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
e645e942 2602 dump_printf (MSG_NOTE, "\n");
5abe1e05
RB
2603 }
2604
2605 /* Link the found element into the group list. */
2606 if (!GROUP_FIRST_ELEMENT (stmtinfo_a))
2607 {
2608 GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
2609 lastinfo = stmtinfo_a;
2610 }
2611 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
2612 GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
2613 lastinfo = stmtinfo_b;
2614 }
2615 }
2616
3d54b29d 2617 FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
4b5caab7
IR
2618 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
2619 && !vect_analyze_data_ref_access (dr))
ebfd146a 2620 {
73fbfcad 2621 if (dump_enabled_p ())
e645e942
TJ
2622 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2623 "not vectorized: complicated access pattern.\n");
4b5caab7
IR
2624
2625 if (bb_vinfo)
2626 {
2627 /* Mark the statement as not vectorizable. */
2628 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2629 continue;
2630 }
2631 else
3d54b29d
JJ
2632 {
2633 datarefs_copy.release ();
2634 return false;
2635 }
ebfd146a
IR
2636 }
2637
3d54b29d 2638 datarefs_copy.release ();
ebfd146a
IR
2639 return true;
2640}
2641
a05a89fa 2642
93bdc3ed 2643/* Operator == between two dr_with_seg_len objects.
a05a89fa
CH
2644
2645 This equality operator is used to make sure two data refs
2646 are the same one so that we will consider to combine the
2647 aliasing checks of those two pairs of data dependent data
2648 refs. */
2649
2650static bool
93bdc3ed
CH
2651operator == (const dr_with_seg_len& d1,
2652 const dr_with_seg_len& d2)
a05a89fa 2653{
93bdc3ed
CH
2654 return operand_equal_p (DR_BASE_ADDRESS (d1.dr),
2655 DR_BASE_ADDRESS (d2.dr), 0)
2656 && compare_tree (d1.offset, d2.offset) == 0
2657 && compare_tree (d1.seg_len, d2.seg_len) == 0;
a05a89fa
CH
2658}
2659
93bdc3ed 2660/* Function comp_dr_with_seg_len_pair.
a05a89fa 2661
93bdc3ed 2662 Comparison function for sorting objects of dr_with_seg_len_pair_t
a05a89fa
CH
2663 so that we can combine aliasing checks in one scan. */
2664
2665static int
93bdc3ed 2666comp_dr_with_seg_len_pair (const void *p1_, const void *p2_)
a05a89fa 2667{
93bdc3ed
CH
2668 const dr_with_seg_len_pair_t* p1 = (const dr_with_seg_len_pair_t *) p1_;
2669 const dr_with_seg_len_pair_t* p2 = (const dr_with_seg_len_pair_t *) p2_;
2670
2671 const dr_with_seg_len &p11 = p1->first,
2672 &p12 = p1->second,
2673 &p21 = p2->first,
2674 &p22 = p2->second;
2675
2676 /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks
2677 if a and c have the same basic address snd step, and b and d have the same
2678 address and step. Therefore, if any a&c or b&d don't have the same address
2679 and step, we don't care the order of those two pairs after sorting. */
2680 int comp_res;
2681
2682 if ((comp_res = compare_tree (DR_BASE_ADDRESS (p11.dr),
2683 DR_BASE_ADDRESS (p21.dr))) != 0)
a05a89fa 2684 return comp_res;
93bdc3ed
CH
2685 if ((comp_res = compare_tree (DR_BASE_ADDRESS (p12.dr),
2686 DR_BASE_ADDRESS (p22.dr))) != 0)
2687 return comp_res;
2688 if ((comp_res = compare_tree (DR_STEP (p11.dr), DR_STEP (p21.dr))) != 0)
2689 return comp_res;
2690 if ((comp_res = compare_tree (DR_STEP (p12.dr), DR_STEP (p22.dr))) != 0)
2691 return comp_res;
2692 if ((comp_res = compare_tree (p11.offset, p21.offset)) != 0)
2693 return comp_res;
2694 if ((comp_res = compare_tree (p12.offset, p22.offset)) != 0)
a05a89fa 2695 return comp_res;
a05a89fa
CH
2696
2697 return 0;
2698}
2699
a05a89fa
CH
2700/* Function vect_vfa_segment_size.
2701
2702 Create an expression that computes the size of segment
2703 that will be accessed for a data reference. The functions takes into
2704 account that realignment loads may access one more vector.
2705
2706 Input:
2707 DR: The data reference.
2708 LENGTH_FACTOR: segment length to consider.
2709
2710 Return an expression whose value is the size of segment which will be
2711 accessed by DR. */
2712
2713static tree
2714vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
2715{
2716 tree segment_length;
2717
2718 if (integer_zerop (DR_STEP (dr)))
2719 segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2720 else
2721 segment_length = size_binop (MULT_EXPR,
93bdc3ed
CH
2722 fold_convert (sizetype, DR_STEP (dr)),
2723 fold_convert (sizetype, length_factor));
a05a89fa
CH
2724
2725 if (vect_supportable_dr_alignment (dr, false)
93bdc3ed 2726 == dr_explicit_realign_optimized)
a05a89fa
CH
2727 {
2728 tree vector_size = TYPE_SIZE_UNIT
2729 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
2730
2731 segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
2732 }
2733 return segment_length;
2734}
2735
ebfd146a
IR
2736/* Function vect_prune_runtime_alias_test_list.
2737
2738 Prune a list of ddrs to be tested at run-time by versioning for alias.
a05a89fa 2739 Merge several alias checks into one if possible.
ebfd146a
IR
2740 Return FALSE if resulting list of ddrs is longer then allowed by
2741 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2742
2743bool
2744vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
2745{
a05a89fa 2746 vec<ddr_p> may_alias_ddrs =
ebfd146a 2747 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
93bdc3ed 2748 vec<dr_with_seg_len_pair_t>& comp_alias_ddrs =
a05a89fa
CH
2749 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
2750 int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2751 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
2752
2753 ddr_p ddr;
2754 unsigned int i;
2755 tree length_factor;
ebfd146a 2756
73fbfcad 2757 if (dump_enabled_p ())
78c60e3d 2758 dump_printf_loc (MSG_NOTE, vect_location,
e645e942 2759 "=== vect_prune_runtime_alias_test_list ===\n");
ebfd146a 2760
a05a89fa
CH
2761 if (may_alias_ddrs.is_empty ())
2762 return true;
2763
2764 /* Basically, for each pair of dependent data refs store_ptr_0
2765 and load_ptr_0, we create an expression:
2766
2767 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2768 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
2769
2770 for aliasing checks. However, in some cases we can decrease
2771 the number of checks by combining two checks into one. For
2772 example, suppose we have another pair of data refs store_ptr_0
2773 and load_ptr_1, and if the following condition is satisfied:
2774
2775 load_ptr_0 < load_ptr_1 &&
2776 load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0
2777
2778 (this condition means, in each iteration of vectorized loop,
2779 the accessed memory of store_ptr_0 cannot be between the memory
2780 of load_ptr_0 and load_ptr_1.)
2781
2782 we then can use only the following expression to finish the
2783 alising checks between store_ptr_0 & load_ptr_0 and
2784 store_ptr_0 & load_ptr_1:
2785
2786 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2787 || (load_ptr_1 + load_segment_length_1 <= store_ptr_0))
2788
2789 Note that we only consider that load_ptr_0 and load_ptr_1 have the
2790 same basic address. */
2791
2792 comp_alias_ddrs.create (may_alias_ddrs.length ());
2793
2794 /* First, we collect all data ref pairs for aliasing checks. */
2795 FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
ebfd146a 2796 {
a05a89fa
CH
2797 struct data_reference *dr_a, *dr_b;
2798 gimple dr_group_first_a, dr_group_first_b;
2799 tree segment_length_a, segment_length_b;
2800 gimple stmt_a, stmt_b;
2801
2802 dr_a = DDR_A (ddr);
2803 stmt_a = DR_STMT (DDR_A (ddr));
2804 dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
2805 if (dr_group_first_a)
2806 {
2807 stmt_a = dr_group_first_a;
2808 dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
2809 }
ebfd146a 2810
a05a89fa
CH
2811 dr_b = DDR_B (ddr);
2812 stmt_b = DR_STMT (DDR_B (ddr));
2813 dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
2814 if (dr_group_first_b)
2815 {
2816 stmt_b = dr_group_first_b;
2817 dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
2818 }
ebfd146a 2819
a05a89fa
CH
2820 if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
2821 length_factor = scalar_loop_iters;
2822 else
2823 length_factor = size_int (vect_factor);
2824 segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
2825 segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
2826
93bdc3ed
CH
2827 dr_with_seg_len_pair_t dr_with_seg_len_pair
2828 (dr_with_seg_len (dr_a, segment_length_a),
2829 dr_with_seg_len (dr_b, segment_length_b));
2830
2831 if (compare_tree (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b)) > 0)
9310366b 2832 std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second);
a05a89fa
CH
2833
2834 comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
2835 }
2836
2837 /* Second, we sort the collected data ref pairs so that we can scan
2838 them once to combine all possible aliasing checks. */
93bdc3ed 2839 comp_alias_ddrs.qsort (comp_dr_with_seg_len_pair);
ebfd146a 2840
a05a89fa
CH
2841 /* Third, we scan the sorted dr pairs and check if we can combine
2842 alias checks of two neighbouring dr pairs. */
2843 for (size_t i = 1; i < comp_alias_ddrs.length (); ++i)
2844 {
2845 /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */
93bdc3ed
CH
2846 dr_with_seg_len *dr_a1 = &comp_alias_ddrs[i-1].first,
2847 *dr_b1 = &comp_alias_ddrs[i-1].second,
2848 *dr_a2 = &comp_alias_ddrs[i].first,
2849 *dr_b2 = &comp_alias_ddrs[i].second;
a05a89fa
CH
2850
2851 /* Remove duplicate data ref pairs. */
2852 if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2)
2853 {
2854 if (dump_enabled_p ())
ebfd146a 2855 {
a05a89fa
CH
2856 dump_printf_loc (MSG_NOTE, vect_location,
2857 "found equal ranges ");
2858 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2859 DR_REF (dr_a1->dr));
2860 dump_printf (MSG_NOTE, ", ");
2861 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2862 DR_REF (dr_b1->dr));
2863 dump_printf (MSG_NOTE, " and ");
2864 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2865 DR_REF (dr_a2->dr));
2866 dump_printf (MSG_NOTE, ", ");
2867 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2868 DR_REF (dr_b2->dr));
2869 dump_printf (MSG_NOTE, "\n");
ebfd146a 2870 }
a05a89fa
CH
2871
2872 comp_alias_ddrs.ordered_remove (i--);
2873 continue;
ebfd146a 2874 }
b8698a0f 2875
a05a89fa
CH
2876 if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2)
2877 {
2878 /* We consider the case that DR_B1 and DR_B2 are same memrefs,
2879 and DR_A1 and DR_A2 are two consecutive memrefs. */
2880 if (*dr_a1 == *dr_a2)
2881 {
9310366b
UB
2882 std::swap (dr_a1, dr_b1);
2883 std::swap (dr_a2, dr_b2);
a05a89fa
CH
2884 }
2885
93bdc3ed
CH
2886 if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr),
2887 DR_BASE_ADDRESS (dr_a2->dr),
2888 0)
9541ffee
RS
2889 || !tree_fits_shwi_p (dr_a1->offset)
2890 || !tree_fits_shwi_p (dr_a2->offset))
a05a89fa
CH
2891 continue;
2892
eb1ce453
KZ
2893 HOST_WIDE_INT diff = (tree_to_shwi (dr_a2->offset)
2894 - tree_to_shwi (dr_a1->offset));
a05a89fa
CH
2895
2896
2897 /* Now we check if the following condition is satisfied:
2898
2899 DIFF - SEGMENT_LENGTH_A < SEGMENT_LENGTH_B
2900
2901 where DIFF = DR_A2->OFFSET - DR_A1->OFFSET. However,
2902 SEGMENT_LENGTH_A or SEGMENT_LENGTH_B may not be constant so we
2903 have to make a best estimation. We can get the minimum value
2904 of SEGMENT_LENGTH_B as a constant, represented by MIN_SEG_LEN_B,
2905 then either of the following two conditions can guarantee the
2906 one above:
2907
2908 1: DIFF <= MIN_SEG_LEN_B
2909 2: DIFF - SEGMENT_LENGTH_A < MIN_SEG_LEN_B
2910
2911 */
2912
807e902e
KZ
2913 HOST_WIDE_INT min_seg_len_b = (tree_fits_shwi_p (dr_b1->seg_len)
2914 ? tree_to_shwi (dr_b1->seg_len)
2915 : vect_factor);
a05a89fa
CH
2916
2917 if (diff <= min_seg_len_b
807e902e
KZ
2918 || (tree_fits_shwi_p (dr_a1->seg_len)
2919 && diff - tree_to_shwi (dr_a1->seg_len) < min_seg_len_b))
a05a89fa 2920 {
d55d9ed0
RB
2921 if (dump_enabled_p ())
2922 {
2923 dump_printf_loc (MSG_NOTE, vect_location,
2924 "merging ranges for ");
2925 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2926 DR_REF (dr_a1->dr));
2927 dump_printf (MSG_NOTE, ", ");
2928 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2929 DR_REF (dr_b1->dr));
2930 dump_printf (MSG_NOTE, " and ");
2931 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2932 DR_REF (dr_a2->dr));
2933 dump_printf (MSG_NOTE, ", ");
2934 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2935 DR_REF (dr_b2->dr));
2936 dump_printf (MSG_NOTE, "\n");
2937 }
2938
a05a89fa
CH
2939 dr_a1->seg_len = size_binop (PLUS_EXPR,
2940 dr_a2->seg_len, size_int (diff));
2941 comp_alias_ddrs.ordered_remove (i--);
2942 }
2943 }
ebfd146a
IR
2944 }
2945
d55d9ed0
RB
2946 dump_printf_loc (MSG_NOTE, vect_location,
2947 "improved number of alias checks from %d to %d\n",
2948 may_alias_ddrs.length (), comp_alias_ddrs.length ());
a05a89fa
CH
2949 if ((int) comp_alias_ddrs.length () >
2950 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
d55d9ed0 2951 return false;
ebfd146a
IR
2952
2953 return true;
2954}
2955
aec7ae7d
JJ
2956/* Check whether a non-affine read in stmt is suitable for gather load
2957 and if so, return a builtin decl for that operation. */
2958
2959tree
2960vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
2961 tree *offp, int *scalep)
2962{
2963 HOST_WIDE_INT scale = 1, pbitpos, pbitsize;
2964 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2965 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2966 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2967 tree offtype = NULL_TREE;
2968 tree decl, base, off;
ef4bddc2 2969 machine_mode pmode;
aec7ae7d
JJ
2970 int punsignedp, pvolatilep;
2971
5ce9450f
JJ
2972 base = DR_REF (dr);
2973 /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
2974 see if we can use the def stmt of the address. */
2975 if (is_gimple_call (stmt)
2976 && gimple_call_internal_p (stmt)
2977 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2978 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
2979 && TREE_CODE (base) == MEM_REF
2980 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
2981 && integer_zerop (TREE_OPERAND (base, 1))
2982 && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
2983 {
2984 gimple def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
2985 if (is_gimple_assign (def_stmt)
2986 && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
2987 base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
2988 }
2989
aec7ae7d
JJ
2990 /* The gather builtins need address of the form
2991 loop_invariant + vector * {1, 2, 4, 8}
2992 or
2993 loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
2994 Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
2995 of loop invariants/SSA_NAMEs defined in the loop, with casts,
2996 multiplications and additions in it. To get a vector, we need
2997 a single SSA_NAME that will be defined in the loop and will
2998 contain everything that is not loop invariant and that can be
2999 vectorized. The following code attempts to find such a preexistng
3000 SSA_NAME OFF and put the loop invariants into a tree BASE
3001 that can be gimplified before the loop. */
5ce9450f 3002 base = get_inner_reference (base, &pbitsize, &pbitpos, &off,
b3ecff82 3003 &pmode, &punsignedp, &pvolatilep, false);
aec7ae7d
JJ
3004 gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0);
3005
3006 if (TREE_CODE (base) == MEM_REF)
3007 {
3008 if (!integer_zerop (TREE_OPERAND (base, 1)))
3009 {
3010 if (off == NULL_TREE)
3011 {
807e902e
KZ
3012 offset_int moff = mem_ref_offset (base);
3013 off = wide_int_to_tree (sizetype, moff);
aec7ae7d
JJ
3014 }
3015 else
3016 off = size_binop (PLUS_EXPR, off,
3017 fold_convert (sizetype, TREE_OPERAND (base, 1)));
3018 }
3019 base = TREE_OPERAND (base, 0);
3020 }
3021 else
3022 base = build_fold_addr_expr (base);
3023
3024 if (off == NULL_TREE)
3025 off = size_zero_node;
3026
3027 /* If base is not loop invariant, either off is 0, then we start with just
3028 the constant offset in the loop invariant BASE and continue with base
3029 as OFF, otherwise give up.
3030 We could handle that case by gimplifying the addition of base + off
3031 into some SSA_NAME and use that as off, but for now punt. */
3032 if (!expr_invariant_in_loop_p (loop, base))
3033 {
3034 if (!integer_zerop (off))
3035 return NULL_TREE;
3036 off = base;
3037 base = size_int (pbitpos / BITS_PER_UNIT);
3038 }
3039 /* Otherwise put base + constant offset into the loop invariant BASE
3040 and continue with OFF. */
3041 else
3042 {
3043 base = fold_convert (sizetype, base);
3044 base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT));
3045 }
3046
3047 /* OFF at this point may be either a SSA_NAME or some tree expression
3048 from get_inner_reference. Try to peel off loop invariants from it
3049 into BASE as long as possible. */
3050 STRIP_NOPS (off);
3051 while (offtype == NULL_TREE)
3052 {
3053 enum tree_code code;
3054 tree op0, op1, add = NULL_TREE;
3055
3056 if (TREE_CODE (off) == SSA_NAME)
3057 {
3058 gimple def_stmt = SSA_NAME_DEF_STMT (off);
3059
3060 if (expr_invariant_in_loop_p (loop, off))
3061 return NULL_TREE;
3062
3063 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
3064 break;
3065
3066 op0 = gimple_assign_rhs1 (def_stmt);
3067 code = gimple_assign_rhs_code (def_stmt);
3068 op1 = gimple_assign_rhs2 (def_stmt);
3069 }
3070 else
3071 {
3072 if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
3073 return NULL_TREE;
3074 code = TREE_CODE (off);
3075 extract_ops_from_tree (off, &code, &op0, &op1);
3076 }
3077 switch (code)
3078 {
3079 case POINTER_PLUS_EXPR:
3080 case PLUS_EXPR:
3081 if (expr_invariant_in_loop_p (loop, op0))
3082 {
3083 add = op0;
3084 off = op1;
3085 do_add:
3086 add = fold_convert (sizetype, add);
3087 if (scale != 1)
3088 add = size_binop (MULT_EXPR, add, size_int (scale));
3089 base = size_binop (PLUS_EXPR, base, add);
3090 continue;
3091 }
3092 if (expr_invariant_in_loop_p (loop, op1))
3093 {
3094 add = op1;
3095 off = op0;
3096 goto do_add;
3097 }
3098 break;
3099 case MINUS_EXPR:
3100 if (expr_invariant_in_loop_p (loop, op1))
3101 {
3102 add = fold_convert (sizetype, op1);
3103 add = size_binop (MINUS_EXPR, size_zero_node, add);
3104 off = op0;
3105 goto do_add;
3106 }
3107 break;
3108 case MULT_EXPR:
9541ffee 3109 if (scale == 1 && tree_fits_shwi_p (op1))
aec7ae7d 3110 {
9439e9a1 3111 scale = tree_to_shwi (op1);
aec7ae7d
JJ
3112 off = op0;
3113 continue;
3114 }
3115 break;
3116 case SSA_NAME:
3117 off = op0;
3118 continue;
3119 CASE_CONVERT:
3120 if (!POINTER_TYPE_P (TREE_TYPE (op0))
3121 && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3122 break;
3123 if (TYPE_PRECISION (TREE_TYPE (op0))
3124 == TYPE_PRECISION (TREE_TYPE (off)))
3125 {
3126 off = op0;
3127 continue;
3128 }
3129 if (TYPE_PRECISION (TREE_TYPE (op0))
3130 < TYPE_PRECISION (TREE_TYPE (off)))
3131 {
3132 off = op0;
3133 offtype = TREE_TYPE (off);
3134 STRIP_NOPS (off);
3135 continue;
3136 }
3137 break;
3138 default:
3139 break;
3140 }
3141 break;
3142 }
3143
3144 /* If at the end OFF still isn't a SSA_NAME or isn't
3145 defined in the loop, punt. */
3146 if (TREE_CODE (off) != SSA_NAME
3147 || expr_invariant_in_loop_p (loop, off))
3148 return NULL_TREE;
3149
3150 if (offtype == NULL_TREE)
3151 offtype = TREE_TYPE (off);
3152
3153 decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info),
3154 offtype, scale);
3155 if (decl == NULL_TREE)
3156 return NULL_TREE;
3157
3158 if (basep)
3159 *basep = base;
3160 if (offp)
3161 *offp = off;
3162 if (scalep)
3163 *scalep = scale;
3164 return decl;
3165}
3166
ebfd146a
IR
3167/* Function vect_analyze_data_refs.
3168
a70d6342 3169 Find all the data references in the loop or basic block.
ebfd146a
IR
3170
3171 The general structure of the analysis of data refs in the vectorizer is as
3172 follows:
b8698a0f 3173 1- vect_analyze_data_refs(loop/bb): call
a70d6342
IR
3174 compute_data_dependences_for_loop/bb to find and analyze all data-refs
3175 in the loop/bb and their dependences.
ebfd146a
IR
3176 2- vect_analyze_dependences(): apply dependence testing using ddrs.
3177 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
3178 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
3179
3180*/
3181
3182bool
777e1f09
RG
3183vect_analyze_data_refs (loop_vec_info loop_vinfo,
3184 bb_vec_info bb_vinfo,
1428105c 3185 int *min_vf, unsigned *n_stmts)
ebfd146a 3186{
a70d6342
IR
3187 struct loop *loop = NULL;
3188 basic_block bb = NULL;
ebfd146a 3189 unsigned int i;
9771b263 3190 vec<data_reference_p> datarefs;
ebfd146a
IR
3191 struct data_reference *dr;
3192 tree scalar_type;
3193
73fbfcad 3194 if (dump_enabled_p ())
78c60e3d
SS
3195 dump_printf_loc (MSG_NOTE, vect_location,
3196 "=== vect_analyze_data_refs ===\n");
b8698a0f 3197
a70d6342
IR
3198 if (loop_vinfo)
3199 {
0136f8f0
AH
3200 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
3201
a70d6342 3202 loop = LOOP_VINFO_LOOP (loop_vinfo);
0136f8f0
AH
3203 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
3204 if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo)))
22a8be9e 3205 {
73fbfcad 3206 if (dump_enabled_p ())
e645e942
TJ
3207 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3208 "not vectorized: loop contains function calls"
3209 " or data references that cannot be analyzed\n");
22a8be9e
SP
3210 return false;
3211 }
3212
0136f8f0
AH
3213 for (i = 0; i < loop->num_nodes; i++)
3214 {
3215 gimple_stmt_iterator gsi;
3216
3217 for (gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi))
3218 {
3219 gimple stmt = gsi_stmt (gsi);
1428105c
RB
3220 if (is_gimple_debug (stmt))
3221 continue;
3222 ++*n_stmts;
0136f8f0
AH
3223 if (!find_data_references_in_stmt (loop, stmt, &datarefs))
3224 {
3225 if (is_gimple_call (stmt) && loop->safelen)
3226 {
3227 tree fndecl = gimple_call_fndecl (stmt), op;
3228 if (fndecl != NULL_TREE)
3229 {
d52f5295 3230 struct cgraph_node *node = cgraph_node::get (fndecl);
0136f8f0
AH
3231 if (node != NULL && node->simd_clones != NULL)
3232 {
3233 unsigned int j, n = gimple_call_num_args (stmt);
3234 for (j = 0; j < n; j++)
3235 {
3236 op = gimple_call_arg (stmt, j);
3237 if (DECL_P (op)
3238 || (REFERENCE_CLASS_P (op)
3239 && get_base_address (op)))
3240 break;
3241 }
3242 op = gimple_call_lhs (stmt);
3243 /* Ignore #pragma omp declare simd functions
3244 if they don't have data references in the
3245 call stmt itself. */
3246 if (j == n
3247 && !(op
3248 && (DECL_P (op)
3249 || (REFERENCE_CLASS_P (op)
3250 && get_base_address (op)))))
3251 continue;
3252 }
3253 }
3254 }
3255 LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs;
3256 if (dump_enabled_p ())
3257 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3258 "not vectorized: loop contains function "
3259 "calls or data references that cannot "
3260 "be analyzed\n");
3261 return false;
3262 }
3263 }
3264 }
3265
3266 LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs;
a70d6342
IR
3267 }
3268 else
3269 {
1aedeafe
RG
3270 gimple_stmt_iterator gsi;
3271
a70d6342 3272 bb = BB_VINFO_BB (bb_vinfo);
1aedeafe
RG
3273 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3274 {
3275 gimple stmt = gsi_stmt (gsi);
1428105c
RB
3276 if (is_gimple_debug (stmt))
3277 continue;
3278 ++*n_stmts;
1aedeafe
RG
3279 if (!find_data_references_in_stmt (NULL, stmt,
3280 &BB_VINFO_DATAREFS (bb_vinfo)))
3281 {
3282 /* Mark the rest of the basic-block as unvectorizable. */
3283 for (; !gsi_end_p (gsi); gsi_next (&gsi))
d4d5e146
RG
3284 {
3285 stmt = gsi_stmt (gsi);
3286 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)) = false;
3287 }
1aedeafe
RG
3288 break;
3289 }
3290 }
22a8be9e 3291
a70d6342
IR
3292 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
3293 }
ebfd146a 3294
ff802fa1
IR
3295 /* Go through the data-refs, check that the analysis succeeded. Update
3296 pointer from stmt_vec_info struct to DR and vectype. */
ebfd146a 3297
9771b263 3298 FOR_EACH_VEC_ELT (datarefs, i, dr)
ebfd146a
IR
3299 {
3300 gimple stmt;
3301 stmt_vec_info stmt_info;
b8698a0f 3302 tree base, offset, init;
aec7ae7d 3303 bool gather = false;
74bf76ed 3304 bool simd_lane_access = false;
777e1f09 3305 int vf;
b8698a0f 3306
fbd7e877 3307again:
ebfd146a
IR
3308 if (!dr || !DR_REF (dr))
3309 {
73fbfcad 3310 if (dump_enabled_p ())
78c60e3d 3311 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 3312 "not vectorized: unhandled data-ref\n");
ebfd146a
IR
3313 return false;
3314 }
3315
3316 stmt = DR_STMT (dr);
3317 stmt_info = vinfo_for_stmt (stmt);
3318
fbd7e877
RB
3319 /* Discard clobbers from the dataref vector. We will remove
3320 clobber stmts during vectorization. */
3321 if (gimple_clobber_p (stmt))
3322 {
d3ef8c53 3323 free_data_ref (dr);
fbd7e877
RB
3324 if (i == datarefs.length () - 1)
3325 {
3326 datarefs.pop ();
3327 break;
3328 }
41475e96
JJ
3329 datarefs.ordered_remove (i);
3330 dr = datarefs[i];
fbd7e877
RB
3331 goto again;
3332 }
3333
ebfd146a
IR
3334 /* Check that analysis of the data-ref succeeded. */
3335 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
aec7ae7d 3336 || !DR_STEP (dr))
ebfd146a 3337 {
74bf76ed
JJ
3338 bool maybe_gather
3339 = DR_IS_READ (dr)
aec7ae7d 3340 && !TREE_THIS_VOLATILE (DR_REF (dr))
74bf76ed
JJ
3341 && targetm.vectorize.builtin_gather != NULL;
3342 bool maybe_simd_lane_access
3343 = loop_vinfo && loop->simduid;
3344
3345 /* If target supports vector gather loads, or if this might be
3346 a SIMD lane access, see if they can't be used. */
3347 if (loop_vinfo
3348 && (maybe_gather || maybe_simd_lane_access)
aec7ae7d
JJ
3349 && !nested_in_vect_loop_p (loop, stmt))
3350 {
3351 struct data_reference *newdr
3352 = create_data_ref (NULL, loop_containing_stmt (stmt),
3353 DR_REF (dr), stmt, true);
3354 gcc_assert (newdr != NULL && DR_REF (newdr));
3355 if (DR_BASE_ADDRESS (newdr)
3356 && DR_OFFSET (newdr)
3357 && DR_INIT (newdr)
3358 && DR_STEP (newdr)
3359 && integer_zerop (DR_STEP (newdr)))
3360 {
74bf76ed
JJ
3361 if (maybe_simd_lane_access)
3362 {
3363 tree off = DR_OFFSET (newdr);
3364 STRIP_NOPS (off);
3365 if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
3366 && TREE_CODE (off) == MULT_EXPR
cc269bb6 3367 && tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
74bf76ed
JJ
3368 {
3369 tree step = TREE_OPERAND (off, 1);
3370 off = TREE_OPERAND (off, 0);
3371 STRIP_NOPS (off);
3372 if (CONVERT_EXPR_P (off)
3373 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off,
3374 0)))
3375 < TYPE_PRECISION (TREE_TYPE (off)))
3376 off = TREE_OPERAND (off, 0);
3377 if (TREE_CODE (off) == SSA_NAME)
3378 {
3379 gimple def = SSA_NAME_DEF_STMT (off);
3380 tree reft = TREE_TYPE (DR_REF (newdr));
cd4447e2
JJ
3381 if (is_gimple_call (def)
3382 && gimple_call_internal_p (def)
3383 && (gimple_call_internal_fn (def)
3384 == IFN_GOMP_SIMD_LANE))
74bf76ed
JJ
3385 {
3386 tree arg = gimple_call_arg (def, 0);
3387 gcc_assert (TREE_CODE (arg) == SSA_NAME);
3388 arg = SSA_NAME_VAR (arg);
3389 if (arg == loop->simduid
3390 /* For now. */
3391 && tree_int_cst_equal
3392 (TYPE_SIZE_UNIT (reft),
3393 step))
3394 {
3395 DR_OFFSET (newdr) = ssize_int (0);
3396 DR_STEP (newdr) = step;
995a1b4a
JJ
3397 DR_ALIGNED_TO (newdr)
3398 = size_int (BIGGEST_ALIGNMENT);
74bf76ed
JJ
3399 dr = newdr;
3400 simd_lane_access = true;
3401 }
3402 }
3403 }
3404 }
3405 }
3406 if (!simd_lane_access && maybe_gather)
3407 {
3408 dr = newdr;
3409 gather = true;
3410 }
aec7ae7d 3411 }
74bf76ed 3412 if (!gather && !simd_lane_access)
aec7ae7d
JJ
3413 free_data_ref (newdr);
3414 }
4b5caab7 3415
74bf76ed 3416 if (!gather && !simd_lane_access)
aec7ae7d 3417 {
73fbfcad 3418 if (dump_enabled_p ())
aec7ae7d 3419 {
e645e942 3420 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
78c60e3d
SS
3421 "not vectorized: data ref analysis "
3422 "failed ");
3423 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
e645e942 3424 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
aec7ae7d 3425 }
ba65ae42 3426
aec7ae7d 3427 if (bb_vinfo)
fcac74a1 3428 break;
aec7ae7d
JJ
3429
3430 return false;
3431 }
ebfd146a
IR
3432 }
3433
3434 if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
3435 {
73fbfcad 3436 if (dump_enabled_p ())
78c60e3d
SS
3437 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3438 "not vectorized: base addr of dr is a "
e645e942 3439 "constant\n");
ba65ae42
IR
3440
3441 if (bb_vinfo)
fcac74a1 3442 break;
ba65ae42 3443
74bf76ed 3444 if (gather || simd_lane_access)
aec7ae7d
JJ
3445 free_data_ref (dr);
3446 return false;
ebfd146a
IR
3447 }
3448
8f7de592
IR
3449 if (TREE_THIS_VOLATILE (DR_REF (dr)))
3450 {
73fbfcad 3451 if (dump_enabled_p ())
8f7de592 3452 {
78c60e3d
SS
3453 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3454 "not vectorized: volatile type ");
3455 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
e645e942 3456 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
8f7de592 3457 }
ba65ae42
IR
3458
3459 if (bb_vinfo)
fcac74a1 3460 break;
ba65ae42 3461
8f7de592
IR
3462 return false;
3463 }
3464
822ba6d7 3465 if (stmt_can_throw_internal (stmt))
5a2c1986 3466 {
73fbfcad 3467 if (dump_enabled_p ())
5a2c1986 3468 {
78c60e3d
SS
3469 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3470 "not vectorized: statement can throw an "
3471 "exception ");
3472 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
e645e942 3473 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5a2c1986 3474 }
ba65ae42
IR
3475
3476 if (bb_vinfo)
fcac74a1 3477 break;
ba65ae42 3478
74bf76ed 3479 if (gather || simd_lane_access)
aec7ae7d 3480 free_data_ref (dr);
5a2c1986
IR
3481 return false;
3482 }
3483
508ef0c6
RG
3484 if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
3485 && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
3486 {
73fbfcad 3487 if (dump_enabled_p ())
508ef0c6 3488 {
78c60e3d
SS
3489 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3490 "not vectorized: statement is bitfield "
3491 "access ");
3492 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
e645e942 3493 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
508ef0c6
RG
3494 }
3495
3496 if (bb_vinfo)
fcac74a1 3497 break;
508ef0c6 3498
74bf76ed 3499 if (gather || simd_lane_access)
508ef0c6
RG
3500 free_data_ref (dr);
3501 return false;
3502 }
3503
3504 base = unshare_expr (DR_BASE_ADDRESS (dr));
3505 offset = unshare_expr (DR_OFFSET (dr));
3506 init = unshare_expr (DR_INIT (dr));
3507
5ce9450f
JJ
3508 if (is_gimple_call (stmt)
3509 && (!gimple_call_internal_p (stmt)
3510 || (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD
3511 && gimple_call_internal_fn (stmt) != IFN_MASK_STORE)))
9c239085 3512 {
73fbfcad 3513 if (dump_enabled_p ())
9c239085 3514 {
78c60e3d 3515 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e645e942 3516 "not vectorized: dr in a call ");
78c60e3d 3517 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
e645e942 3518 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
9c239085
JJ
3519 }
3520
3521 if (bb_vinfo)
fcac74a1 3522 break;
9c239085 3523
74bf76ed 3524 if (gather || simd_lane_access)
9c239085
JJ
3525 free_data_ref (dr);
3526 return false;
3527 }
3528
ebfd146a 3529 /* Update DR field in stmt_vec_info struct. */
ebfd146a
IR
3530
3531 /* If the dataref is in an inner-loop of the loop that is considered for
3532 for vectorization, we also want to analyze the access relative to
b8698a0f 3533 the outer-loop (DR contains information only relative to the
ebfd146a
IR
3534 inner-most enclosing loop). We do that by building a reference to the
3535 first location accessed by the inner-loop, and analyze it relative to
b8698a0f
L
3536 the outer-loop. */
3537 if (loop && nested_in_vect_loop_p (loop, stmt))
ebfd146a
IR
3538 {
3539 tree outer_step, outer_base, outer_init;
3540 HOST_WIDE_INT pbitsize, pbitpos;
3541 tree poffset;
ef4bddc2 3542 machine_mode pmode;
ebfd146a
IR
3543 int punsignedp, pvolatilep;
3544 affine_iv base_iv, offset_iv;
3545 tree dinit;
3546
b8698a0f 3547 /* Build a reference to the first location accessed by the
ff802fa1 3548 inner-loop: *(BASE+INIT). (The first location is actually
ebfd146a
IR
3549 BASE+INIT+OFFSET, but we add OFFSET separately later). */
3550 tree inner_base = build_fold_indirect_ref
5d49b6a7 3551 (fold_build_pointer_plus (base, init));
ebfd146a 3552
73fbfcad 3553 if (dump_enabled_p ())
ebfd146a 3554 {
78c60e3d
SS
3555 dump_printf_loc (MSG_NOTE, vect_location,
3556 "analyze in outer-loop: ");
3557 dump_generic_expr (MSG_NOTE, TDF_SLIM, inner_base);
e645e942 3558 dump_printf (MSG_NOTE, "\n");
ebfd146a
IR
3559 }
3560
b8698a0f 3561 outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
b3ecff82 3562 &poffset, &pmode, &punsignedp, &pvolatilep, false);
ebfd146a
IR
3563 gcc_assert (outer_base != NULL_TREE);
3564
3565 if (pbitpos % BITS_PER_UNIT != 0)
3566 {
73fbfcad 3567 if (dump_enabled_p ())
78c60e3d
SS
3568 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3569 "failed: bit offset alignment.\n");
ebfd146a
IR
3570 return false;
3571 }
3572
3573 outer_base = build_fold_addr_expr (outer_base);
b8698a0f 3574 if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
ebfd146a
IR
3575 &base_iv, false))
3576 {
73fbfcad 3577 if (dump_enabled_p ())
e645e942 3578 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
78c60e3d 3579 "failed: evolution of base is not affine.\n");
ebfd146a
IR
3580 return false;
3581 }
3582
3583 if (offset)
3584 {
3585 if (poffset)
b8698a0f 3586 poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
ebfd146a
IR
3587 poffset);
3588 else
3589 poffset = offset;
3590 }
3591
3592 if (!poffset)
3593 {
3594 offset_iv.base = ssize_int (0);
3595 offset_iv.step = ssize_int (0);
3596 }
b8698a0f 3597 else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
ebfd146a
IR
3598 &offset_iv, false))
3599 {
73fbfcad 3600 if (dump_enabled_p ())
e645e942 3601 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
78c60e3d 3602 "evolution of offset is not affine.\n");
ebfd146a
IR
3603 return false;
3604 }
3605
3606 outer_init = ssize_int (pbitpos / BITS_PER_UNIT);
3607 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
3608 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3609 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
3610 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3611
3612 outer_step = size_binop (PLUS_EXPR,
3613 fold_convert (ssizetype, base_iv.step),
3614 fold_convert (ssizetype, offset_iv.step));
3615
3616 STMT_VINFO_DR_STEP (stmt_info) = outer_step;
3617 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
b8698a0f 3618 STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
ebfd146a 3619 STMT_VINFO_DR_INIT (stmt_info) = outer_init;
b8698a0f 3620 STMT_VINFO_DR_OFFSET (stmt_info) =
ebfd146a 3621 fold_convert (ssizetype, offset_iv.base);
b8698a0f 3622 STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
ebfd146a
IR
3623 size_int (highest_pow2_factor (offset_iv.base));
3624
73fbfcad 3625 if (dump_enabled_p ())
ebfd146a 3626 {
78c60e3d
SS
3627 dump_printf_loc (MSG_NOTE, vect_location,
3628 "\touter base_address: ");
3629 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3630 STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3631 dump_printf (MSG_NOTE, "\n\touter offset from base address: ");
3632 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3633 STMT_VINFO_DR_OFFSET (stmt_info));
3634 dump_printf (MSG_NOTE,
3635 "\n\touter constant offset from base address: ");
3636 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3637 STMT_VINFO_DR_INIT (stmt_info));
3638 dump_printf (MSG_NOTE, "\n\touter step: ");
3639 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3640 STMT_VINFO_DR_STEP (stmt_info));
3641 dump_printf (MSG_NOTE, "\n\touter aligned to: ");
3642 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3643 STMT_VINFO_DR_ALIGNED_TO (stmt_info));
e645e942 3644 dump_printf (MSG_NOTE, "\n");
ebfd146a
IR
3645 }
3646 }
3647
3648 if (STMT_VINFO_DATA_REF (stmt_info))
3649 {
73fbfcad 3650 if (dump_enabled_p ())
ebfd146a 3651 {
78c60e3d
SS
3652 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3653 "not vectorized: more than one data ref "
3654 "in stmt: ");
3655 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
e645e942 3656 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
ebfd146a 3657 }
ba65ae42
IR
3658
3659 if (bb_vinfo)
fcac74a1 3660 break;
ba65ae42 3661
74bf76ed 3662 if (gather || simd_lane_access)
aec7ae7d 3663 free_data_ref (dr);
ebfd146a
IR
3664 return false;
3665 }
8644a673 3666
ebfd146a 3667 STMT_VINFO_DATA_REF (stmt_info) = dr;
74bf76ed
JJ
3668 if (simd_lane_access)
3669 {
3670 STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true;
d3ef8c53 3671 free_data_ref (datarefs[i]);
74bf76ed
JJ
3672 datarefs[i] = dr;
3673 }
b8698a0f 3674
ebfd146a
IR
3675 /* Set vectype for STMT. */
3676 scalar_type = TREE_TYPE (DR_REF (dr));
d3ef8c53
JJ
3677 STMT_VINFO_VECTYPE (stmt_info)
3678 = get_vectype_for_scalar_type (scalar_type);
b8698a0f 3679 if (!STMT_VINFO_VECTYPE (stmt_info))
ebfd146a 3680 {
73fbfcad 3681 if (dump_enabled_p ())
ebfd146a 3682 {
e645e942 3683 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
78c60e3d
SS
3684 "not vectorized: no vectype for stmt: ");
3685 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3686 dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
3687 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
3688 scalar_type);
e645e942 3689 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
ebfd146a 3690 }
4b5caab7
IR
3691
3692 if (bb_vinfo)
fcac74a1 3693 break;
aec7ae7d 3694
74bf76ed 3695 if (gather || simd_lane_access)
aec7ae7d
JJ
3696 {
3697 STMT_VINFO_DATA_REF (stmt_info) = NULL;
d3ef8c53
JJ
3698 if (gather)
3699 free_data_ref (dr);
aec7ae7d
JJ
3700 }
3701 return false;
ebfd146a 3702 }
451dabda
RB
3703 else
3704 {
3705 if (dump_enabled_p ())
3706 {
3707 dump_printf_loc (MSG_NOTE, vect_location,
3708 "got vectype for stmt: ");
3709 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3710 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3711 STMT_VINFO_VECTYPE (stmt_info));
e645e942 3712 dump_printf (MSG_NOTE, "\n");
451dabda
RB
3713 }
3714 }
777e1f09
RG
3715
3716 /* Adjust the minimal vectorization factor according to the
3717 vector type. */
3718 vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
3719 if (vf > *min_vf)
3720 *min_vf = vf;
aec7ae7d
JJ
3721
3722 if (gather)
3723 {
aec7ae7d 3724 tree off;
aec7ae7d 3725
7d75abc8
MM
3726 gather = 0 != vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
3727 if (gather
3728 && get_vectype_for_scalar_type (TREE_TYPE (off)) == NULL_TREE)
3729 gather = false;
319e6439 3730 if (!gather)
aec7ae7d 3731 {
6f723d33
JJ
3732 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3733 free_data_ref (dr);
73fbfcad 3734 if (dump_enabled_p ())
aec7ae7d 3735 {
78c60e3d
SS
3736 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3737 "not vectorized: not suitable for gather "
3738 "load ");
3739 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
e645e942 3740 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
aec7ae7d
JJ
3741 }
3742 return false;
3743 }
3744
9771b263 3745 datarefs[i] = dr;
319e6439
RG
3746 STMT_VINFO_GATHER_P (stmt_info) = true;
3747 }
3748 else if (loop_vinfo
3749 && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
3750 {
f2e2a985 3751 if (nested_in_vect_loop_p (loop, stmt))
319e6439 3752 {
73fbfcad 3753 if (dump_enabled_p ())
319e6439 3754 {
78c60e3d
SS
3755 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3756 "not vectorized: not suitable for strided "
3757 "load ");
3758 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
e645e942 3759 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
319e6439
RG
3760 }
3761 return false;
3762 }
f2e2a985 3763 STMT_VINFO_STRIDED_P (stmt_info) = true;
aec7ae7d 3764 }
ebfd146a 3765 }
b8698a0f 3766
fcac74a1
RB
3767 /* If we stopped analysis at the first dataref we could not analyze
3768 when trying to vectorize a basic-block mark the rest of the datarefs
3769 as not vectorizable and truncate the vector of datarefs. That
3770 avoids spending useless time in analyzing their dependence. */
3771 if (i != datarefs.length ())
3772 {
3773 gcc_assert (bb_vinfo != NULL);
3774 for (unsigned j = i; j < datarefs.length (); ++j)
3775 {
3776 data_reference_p dr = datarefs[j];
3777 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
3778 free_data_ref (dr);
3779 }
3780 datarefs.truncate (i);
3781 }
3782
ebfd146a
IR
3783 return true;
3784}
3785
3786
3787/* Function vect_get_new_vect_var.
3788
ff802fa1 3789 Returns a name for a new variable. The current naming scheme appends the
b8698a0f
L
3790 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
3791 the name of vectorizer generated variables, and appends that to NAME if
ebfd146a
IR
3792 provided. */
3793
3794tree
3795vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
3796{
3797 const char *prefix;
3798 tree new_vect_var;
3799
3800 switch (var_kind)
3801 {
3802 case vect_simple_var:
451dabda 3803 prefix = "vect";
ebfd146a
IR
3804 break;
3805 case vect_scalar_var:
451dabda 3806 prefix = "stmp";
ebfd146a
IR
3807 break;
3808 case vect_pointer_var:
451dabda 3809 prefix = "vectp";
ebfd146a
IR
3810 break;
3811 default:
3812 gcc_unreachable ();
3813 }
3814
3815 if (name)
3816 {
451dabda 3817 char* tmp = concat (prefix, "_", name, NULL);
65876d24 3818 new_vect_var = create_tmp_reg (type, tmp);
ebfd146a
IR
3819 free (tmp);
3820 }
3821 else
65876d24 3822 new_vect_var = create_tmp_reg (type, prefix);
ebfd146a
IR
3823
3824 return new_vect_var;
3825}
3826
faf4220c
JJ
3827/* Duplicate ptr info and set alignment/misaligment on NAME from DR. */
3828
3829static void
3830vect_duplicate_ssa_name_ptr_info (tree name, data_reference *dr,
3831 stmt_vec_info stmt_info)
3832{
3833 duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr));
3834 unsigned int align = TYPE_ALIGN_UNIT (STMT_VINFO_VECTYPE (stmt_info));
3835 int misalign = DR_MISALIGNMENT (dr);
3836 if (misalign == -1)
3837 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name));
3838 else
3839 set_ptr_info_alignment (SSA_NAME_PTR_INFO (name), align, misalign);
3840}
ebfd146a
IR
3841
3842/* Function vect_create_addr_base_for_vector_ref.
3843
3844 Create an expression that computes the address of the first memory location
3845 that will be accessed for a data reference.
3846
3847 Input:
3848 STMT: The statement containing the data reference.
3849 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
3850 OFFSET: Optional. If supplied, it is be added to the initial address.
3851 LOOP: Specify relative to which loop-nest should the address be computed.
3852 For example, when the dataref is in an inner-loop nested in an
3853 outer-loop that is now being vectorized, LOOP can be either the
ff802fa1 3854 outer-loop, or the inner-loop. The first memory location accessed
ebfd146a
IR
3855 by the following dataref ('in' points to short):
3856
3857 for (i=0; i<N; i++)
3858 for (j=0; j<M; j++)
3859 s += in[i+j]
3860
3861 is as follows:
3862 if LOOP=i_loop: &in (relative to i_loop)
3863 if LOOP=j_loop: &in+i*2B (relative to j_loop)
356bbc4c
JJ
3864 BYTE_OFFSET: Optional, defaulted to NULL. If supplied, it is added to the
3865 initial address. Unlike OFFSET, which is number of elements to
3866 be added, BYTE_OFFSET is measured in bytes.
ebfd146a
IR
3867
3868 Output:
b8698a0f 3869 1. Return an SSA_NAME whose value is the address of the memory location of
ebfd146a
IR
3870 the first vector of the data reference.
3871 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
3872 these statement(s) which define the returned SSA_NAME.
3873
3874 FORNOW: We are only handling array accesses with step 1. */
3875
3876tree
3877vect_create_addr_base_for_vector_ref (gimple stmt,
3878 gimple_seq *new_stmt_list,
3879 tree offset,
356bbc4c
JJ
3880 struct loop *loop,
3881 tree byte_offset)
ebfd146a
IR
3882{
3883 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3884 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4bdd44c4 3885 tree data_ref_base;
595c2679 3886 const char *base_name;
4bdd44c4 3887 tree addr_base;
ebfd146a
IR
3888 tree dest;
3889 gimple_seq seq = NULL;
4bdd44c4
RB
3890 tree base_offset;
3891 tree init;
8644a673 3892 tree vect_ptr_type;
ebfd146a 3893 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
a70d6342 3894 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
ebfd146a 3895
a70d6342 3896 if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father)
ebfd146a 3897 {
a70d6342 3898 struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a 3899
a70d6342 3900 gcc_assert (nested_in_vect_loop_p (outer_loop, stmt));
ebfd146a
IR
3901
3902 data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3903 base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
3904 init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
3905 }
4bdd44c4
RB
3906 else
3907 {
3908 data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
3909 base_offset = unshare_expr (DR_OFFSET (dr));
3910 init = unshare_expr (DR_INIT (dr));
3911 }
ebfd146a 3912
a70d6342 3913 if (loop_vinfo)
595c2679 3914 base_name = get_name (data_ref_base);
a70d6342
IR
3915 else
3916 {
3917 base_offset = ssize_int (0);
3918 init = ssize_int (0);
595c2679 3919 base_name = get_name (DR_REF (dr));
b8698a0f 3920 }
a70d6342 3921
ebfd146a
IR
3922 /* Create base_offset */
3923 base_offset = size_binop (PLUS_EXPR,
3924 fold_convert (sizetype, base_offset),
3925 fold_convert (sizetype, init));
ebfd146a
IR
3926
3927 if (offset)
3928 {
ebfd146a
IR
3929 offset = fold_build2 (MULT_EXPR, sizetype,
3930 fold_convert (sizetype, offset), step);
3931 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3932 base_offset, offset);
ebfd146a 3933 }
356bbc4c
JJ
3934 if (byte_offset)
3935 {
3936 byte_offset = fold_convert (sizetype, byte_offset);
3937 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3938 base_offset, byte_offset);
3939 }
ebfd146a
IR
3940
3941 /* base + base_offset */
a70d6342 3942 if (loop_vinfo)
5d49b6a7 3943 addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
a70d6342
IR
3944 else
3945 {
70f34814
RG
3946 addr_base = build1 (ADDR_EXPR,
3947 build_pointer_type (TREE_TYPE (DR_REF (dr))),
3948 unshare_expr (DR_REF (dr)));
a70d6342 3949 }
b8698a0f 3950
ebfd146a 3951 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
4bdd44c4 3952 dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name);
aed93b23 3953 addr_base = force_gimple_operand (addr_base, &seq, true, dest);
ebfd146a
IR
3954 gimple_seq_add_seq (new_stmt_list, seq);
3955
17fc049f 3956 if (DR_PTR_INFO (dr)
aed93b23
RB
3957 && TREE_CODE (addr_base) == SSA_NAME
3958 && !SSA_NAME_PTR_INFO (addr_base))
128aaeed 3959 {
faf4220c
JJ
3960 vect_duplicate_ssa_name_ptr_info (addr_base, dr, stmt_info);
3961 if (offset || byte_offset)
4bdd44c4 3962 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
128aaeed 3963 }
17fc049f 3964
73fbfcad 3965 if (dump_enabled_p ())
ebfd146a 3966 {
78c60e3d 3967 dump_printf_loc (MSG_NOTE, vect_location, "created ");
4bdd44c4 3968 dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
e645e942 3969 dump_printf (MSG_NOTE, "\n");
ebfd146a 3970 }
8644a673 3971
4bdd44c4 3972 return addr_base;
ebfd146a
IR
3973}
3974
3975
3976/* Function vect_create_data_ref_ptr.
3977
920e8172
RS
3978 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
3979 location accessed in the loop by STMT, along with the def-use update
3980 chain to appropriately advance the pointer through the loop iterations.
3981 Also set aliasing information for the pointer. This pointer is used by
3982 the callers to this function to create a memory reference expression for
3983 vector load/store access.
ebfd146a
IR
3984
3985 Input:
3986 1. STMT: a stmt that references memory. Expected to be of the form
3987 GIMPLE_ASSIGN <name, data-ref> or
3988 GIMPLE_ASSIGN <data-ref, name>.
920e8172
RS
3989 2. AGGR_TYPE: the type of the reference, which should be either a vector
3990 or an array.
3991 3. AT_LOOP: the loop where the vector memref is to be created.
3992 4. OFFSET (optional): an offset to be added to the initial address accessed
ebfd146a 3993 by the data-ref in STMT.
920e8172
RS
3994 5. BSI: location where the new stmts are to be placed if there is no loop
3995 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
ebfd146a 3996 pointing to the initial address.
356bbc4c
JJ
3997 7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
3998 to the initial address accessed by the data-ref in STMT. This is
3999 similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET
4000 in bytes.
ebfd146a
IR
4001
4002 Output:
4003 1. Declare a new ptr to vector_type, and have it point to the base of the
4004 data reference (initial addressed accessed by the data reference).
4005 For example, for vector of type V8HI, the following code is generated:
4006
920e8172
RS
4007 v8hi *ap;
4008 ap = (v8hi *)initial_address;
ebfd146a
IR
4009
4010 if OFFSET is not supplied:
4011 initial_address = &a[init];
4012 if OFFSET is supplied:
4013 initial_address = &a[init + OFFSET];
356bbc4c
JJ
4014 if BYTE_OFFSET is supplied:
4015 initial_address = &a[init] + BYTE_OFFSET;
ebfd146a
IR
4016
4017 Return the initial_address in INITIAL_ADDRESS.
4018
4019 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
b8698a0f 4020 update the pointer in each iteration of the loop.
ebfd146a
IR
4021
4022 Return the increment stmt that updates the pointer in PTR_INCR.
4023
b8698a0f 4024 3. Set INV_P to true if the access pattern of the data reference in the
ff802fa1 4025 vectorized loop is invariant. Set it to false otherwise.
ebfd146a
IR
4026
4027 4. Return the pointer. */
4028
4029tree
920e8172
RS
4030vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop,
4031 tree offset, tree *initial_address,
4032 gimple_stmt_iterator *gsi, gimple *ptr_incr,
356bbc4c 4033 bool only_init, bool *inv_p, tree byte_offset)
ebfd146a 4034{
595c2679 4035 const char *base_name;
ebfd146a
IR
4036 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4037 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
a70d6342
IR
4038 struct loop *loop = NULL;
4039 bool nested_in_vect_loop = false;
4040 struct loop *containing_loop = NULL;
920e8172
RS
4041 tree aggr_ptr_type;
4042 tree aggr_ptr;
ebfd146a 4043 tree new_temp;
ebfd146a 4044 gimple_seq new_stmt_list = NULL;
a70d6342 4045 edge pe = NULL;
ebfd146a 4046 basic_block new_bb;
920e8172 4047 tree aggr_ptr_init;
ebfd146a 4048 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
920e8172 4049 tree aptr;
ebfd146a
IR
4050 gimple_stmt_iterator incr_gsi;
4051 bool insert_after;
4052 tree indx_before_incr, indx_after_incr;
4053 gimple incr;
4054 tree step;
a70d6342 4055 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
b8698a0f 4056
920e8172
RS
4057 gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
4058 || TREE_CODE (aggr_type) == VECTOR_TYPE);
4059
a70d6342
IR
4060 if (loop_vinfo)
4061 {
4062 loop = LOOP_VINFO_LOOP (loop_vinfo);
4063 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4064 containing_loop = (gimple_bb (stmt))->loop_father;
4065 pe = loop_preheader_edge (loop);
4066 }
4067 else
4068 {
4069 gcc_assert (bb_vinfo);
4070 only_init = true;
4071 *ptr_incr = NULL;
4072 }
b8698a0f 4073
ebfd146a
IR
4074 /* Check the step (evolution) of the load in LOOP, and record
4075 whether it's invariant. */
4076 if (nested_in_vect_loop)
4077 step = STMT_VINFO_DR_STEP (stmt_info);
4078 else
4079 step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
b8698a0f 4080
08940f33 4081 if (integer_zerop (step))
ebfd146a
IR
4082 *inv_p = true;
4083 else
4084 *inv_p = false;
4085
4086 /* Create an expression for the first address accessed by this load
b8698a0f 4087 in LOOP. */
595c2679 4088 base_name = get_name (DR_BASE_ADDRESS (dr));
ebfd146a 4089
73fbfcad 4090 if (dump_enabled_p ())
ebfd146a 4091 {
595c2679 4092 tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
78c60e3d
SS
4093 dump_printf_loc (MSG_NOTE, vect_location,
4094 "create %s-pointer variable to type: ",
5806f481 4095 get_tree_code_name (TREE_CODE (aggr_type)));
78c60e3d 4096 dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type);
595c2679 4097 if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
78c60e3d 4098 dump_printf (MSG_NOTE, " vectorizing an array ref: ");
38000232
MG
4099 else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
4100 dump_printf (MSG_NOTE, " vectorizing a vector ref: ");
595c2679 4101 else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
78c60e3d 4102 dump_printf (MSG_NOTE, " vectorizing a record based array ref: ");
595c2679 4103 else
78c60e3d 4104 dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
595c2679 4105 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr));
e645e942 4106 dump_printf (MSG_NOTE, "\n");
ebfd146a
IR
4107 }
4108
4bdd44c4
RB
4109 /* (1) Create the new aggregate-pointer variable.
4110 Vector and array types inherit the alias set of their component
920e8172
RS
4111 type by default so we need to use a ref-all pointer if the data
4112 reference does not conflict with the created aggregated data
4113 reference because it is not addressable. */
4bdd44c4
RB
4114 bool need_ref_all = false;
4115 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
3f49ba3f 4116 get_alias_set (DR_REF (dr))))
4bdd44c4 4117 need_ref_all = true;
3f49ba3f 4118 /* Likewise for any of the data references in the stmt group. */
e14c1050 4119 else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
ebfd146a 4120 {
e14c1050 4121 gimple orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
5006671f
RG
4122 do
4123 {
4bdd44c4
RB
4124 stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt);
4125 struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
4126 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4127 get_alias_set (DR_REF (sdr))))
5006671f 4128 {
4bdd44c4 4129 need_ref_all = true;
5006671f
RG
4130 break;
4131 }
4bdd44c4 4132 orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo);
5006671f
RG
4133 }
4134 while (orig_stmt);
ebfd146a 4135 }
4bdd44c4
RB
4136 aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode,
4137 need_ref_all);
4138 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name);
4139
ebfd146a 4140
ff802fa1
IR
4141 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
4142 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
4143 def-use update cycles for the pointer: one relative to the outer-loop
4144 (LOOP), which is what steps (3) and (4) below do. The other is relative
4145 to the inner-loop (which is the inner-most loop containing the dataref),
4146 and this is done be step (5) below.
ebfd146a 4147
ff802fa1
IR
4148 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
4149 inner-most loop, and so steps (3),(4) work the same, and step (5) is
4150 redundant. Steps (3),(4) create the following:
ebfd146a
IR
4151
4152 vp0 = &base_addr;
4153 LOOP: vp1 = phi(vp0,vp2)
b8698a0f 4154 ...
ebfd146a
IR
4155 ...
4156 vp2 = vp1 + step
4157 goto LOOP
b8698a0f 4158
ff802fa1
IR
4159 If there is an inner-loop nested in loop, then step (5) will also be
4160 applied, and an additional update in the inner-loop will be created:
ebfd146a
IR
4161
4162 vp0 = &base_addr;
4163 LOOP: vp1 = phi(vp0,vp2)
4164 ...
4165 inner: vp3 = phi(vp1,vp4)
4166 vp4 = vp3 + inner_step
4167 if () goto inner
4168 ...
4169 vp2 = vp1 + step
4170 if () goto LOOP */
4171
920e8172
RS
4172 /* (2) Calculate the initial address of the aggregate-pointer, and set
4173 the aggregate-pointer to point to it before the loop. */
ebfd146a 4174
356bbc4c 4175 /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */
ebfd146a
IR
4176
4177 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
356bbc4c 4178 offset, loop, byte_offset);
ebfd146a
IR
4179 if (new_stmt_list)
4180 {
a70d6342
IR
4181 if (pe)
4182 {
4183 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
4184 gcc_assert (!new_bb);
4185 }
4186 else
1b29f05e 4187 gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
ebfd146a
IR
4188 }
4189
4190 *initial_address = new_temp;
aed93b23 4191 aggr_ptr_init = new_temp;
ebfd146a 4192
920e8172 4193 /* (3) Handle the updating of the aggregate-pointer inside the loop.
ff802fa1
IR
4194 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
4195 inner-loop nested in LOOP (during outer-loop vectorization). */
ebfd146a 4196
a70d6342 4197 /* No update in loop is required. */
b8698a0f 4198 if (only_init && (!loop_vinfo || at_loop == loop))
920e8172 4199 aptr = aggr_ptr_init;
ebfd146a
IR
4200 else
4201 {
920e8172 4202 /* The step of the aggregate pointer is the type size. */
08940f33 4203 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
b8698a0f 4204 /* One exception to the above is when the scalar step of the load in
ebfd146a
IR
4205 LOOP is zero. In this case the step here is also zero. */
4206 if (*inv_p)
08940f33
RB
4207 iv_step = size_zero_node;
4208 else if (tree_int_cst_sgn (step) == -1)
4209 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
ebfd146a
IR
4210
4211 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4212
920e8172 4213 create_iv (aggr_ptr_init,
08940f33 4214 fold_convert (aggr_ptr_type, iv_step),
920e8172 4215 aggr_ptr, loop, &incr_gsi, insert_after,
ebfd146a
IR
4216 &indx_before_incr, &indx_after_incr);
4217 incr = gsi_stmt (incr_gsi);
a70d6342 4218 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
ebfd146a
IR
4219
4220 /* Copy the points-to information if it exists. */
4221 if (DR_PTR_INFO (dr))
4222 {
faf4220c
JJ
4223 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info);
4224 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info);
ebfd146a 4225 }
ebfd146a
IR
4226 if (ptr_incr)
4227 *ptr_incr = incr;
4228
920e8172 4229 aptr = indx_before_incr;
ebfd146a
IR
4230 }
4231
4232 if (!nested_in_vect_loop || only_init)
920e8172 4233 return aptr;
ebfd146a
IR
4234
4235
920e8172 4236 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
ff802fa1 4237 nested in LOOP, if exists. */
ebfd146a
IR
4238
4239 gcc_assert (nested_in_vect_loop);
4240 if (!only_init)
4241 {
4242 standard_iv_increment_position (containing_loop, &incr_gsi,
4243 &insert_after);
920e8172 4244 create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
ebfd146a
IR
4245 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
4246 &indx_after_incr);
4247 incr = gsi_stmt (incr_gsi);
a70d6342 4248 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
ebfd146a
IR
4249
4250 /* Copy the points-to information if it exists. */
4251 if (DR_PTR_INFO (dr))
4252 {
faf4220c
JJ
4253 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info);
4254 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info);
ebfd146a 4255 }
ebfd146a
IR
4256 if (ptr_incr)
4257 *ptr_incr = incr;
4258
b8698a0f 4259 return indx_before_incr;
ebfd146a
IR
4260 }
4261 else
4262 gcc_unreachable ();
4263}
4264
4265
4266/* Function bump_vector_ptr
4267
4268 Increment a pointer (to a vector type) by vector-size. If requested,
b8698a0f 4269 i.e. if PTR-INCR is given, then also connect the new increment stmt
ebfd146a
IR
4270 to the existing def-use update-chain of the pointer, by modifying
4271 the PTR_INCR as illustrated below:
4272
4273 The pointer def-use update-chain before this function:
4274 DATAREF_PTR = phi (p_0, p_2)
4275 ....
b8698a0f 4276 PTR_INCR: p_2 = DATAREF_PTR + step
ebfd146a
IR
4277
4278 The pointer def-use update-chain after this function:
4279 DATAREF_PTR = phi (p_0, p_2)
4280 ....
4281 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
4282 ....
4283 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
4284
4285 Input:
b8698a0f 4286 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
ebfd146a 4287 in the loop.
b8698a0f 4288 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
ebfd146a 4289 the loop. The increment amount across iterations is expected
b8698a0f 4290 to be vector_size.
ebfd146a
IR
4291 BSI - location where the new update stmt is to be placed.
4292 STMT - the original scalar memory-access stmt that is being vectorized.
4293 BUMP - optional. The offset by which to bump the pointer. If not given,
4294 the offset is assumed to be vector_size.
4295
4296 Output: Return NEW_DATAREF_PTR as illustrated above.
b8698a0f 4297
ebfd146a
IR
4298*/
4299
4300tree
4301bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
4302 gimple stmt, tree bump)
4303{
4304 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4305 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4306 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
ebfd146a 4307 tree update = TYPE_SIZE_UNIT (vectype);
538dd0b7 4308 gassign *incr_stmt;
ebfd146a
IR
4309 ssa_op_iter iter;
4310 use_operand_p use_p;
4311 tree new_dataref_ptr;
4312
4313 if (bump)
4314 update = bump;
b8698a0f 4315
aed93b23
RB
4316 if (TREE_CODE (dataref_ptr) == SSA_NAME)
4317 new_dataref_ptr = copy_ssa_name (dataref_ptr);
4318 else
4319 new_dataref_ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
0d0e4a03
JJ
4320 incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
4321 dataref_ptr, update);
ebfd146a
IR
4322 vect_finish_stmt_generation (stmt, incr_stmt, gsi);
4323
4324 /* Copy the points-to information if it exists. */
4325 if (DR_PTR_INFO (dr))
128aaeed
RB
4326 {
4327 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
644ffefd 4328 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr));
128aaeed 4329 }
ebfd146a
IR
4330
4331 if (!ptr_incr)
4332 return new_dataref_ptr;
4333
4334 /* Update the vector-pointer's cross-iteration increment. */
4335 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
4336 {
4337 tree use = USE_FROM_PTR (use_p);
4338
4339 if (use == dataref_ptr)
4340 SET_USE (use_p, new_dataref_ptr);
4341 else
4342 gcc_assert (tree_int_cst_compare (use, update) == 0);
4343 }
4344
4345 return new_dataref_ptr;
4346}
4347
4348
4349/* Function vect_create_destination_var.
4350
4351 Create a new temporary of type VECTYPE. */
4352
4353tree
4354vect_create_destination_var (tree scalar_dest, tree vectype)
4355{
4356 tree vec_dest;
451dabda
RB
4357 const char *name;
4358 char *new_name;
ebfd146a
IR
4359 tree type;
4360 enum vect_var_kind kind;
4361
4362 kind = vectype ? vect_simple_var : vect_scalar_var;
4363 type = vectype ? vectype : TREE_TYPE (scalar_dest);
4364
4365 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
4366
451dabda
RB
4367 name = get_name (scalar_dest);
4368 if (name)
378b2932 4369 new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
451dabda 4370 else
378b2932 4371 new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
ebfd146a 4372 vec_dest = vect_get_new_vect_var (type, kind, new_name);
451dabda 4373 free (new_name);
ebfd146a
IR
4374
4375 return vec_dest;
4376}
4377
0d0293ac 4378/* Function vect_grouped_store_supported.
ebfd146a 4379
e2c83630
RH
4380 Returns TRUE if interleave high and interleave low permutations
4381 are supported, and FALSE otherwise. */
ebfd146a
IR
4382
4383bool
0d0293ac 4384vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
ebfd146a 4385{
ef4bddc2 4386 machine_mode mode = TYPE_MODE (vectype);
b8698a0f 4387
e1377713
ES
4388 /* vect_permute_store_chain requires the group size to be equal to 3 or
4389 be a power of two. */
4390 if (count != 3 && exact_log2 (count) == -1)
b602d918 4391 {
73fbfcad 4392 if (dump_enabled_p ())
78c60e3d 4393 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
e1377713
ES
4394 "the size of the group of accesses"
4395 " is not a power of 2 or not eqaul to 3\n");
b602d918
RS
4396 return false;
4397 }
4398
e2c83630 4399 /* Check that the permutation is supported. */
3fcc1b55
JJ
4400 if (VECTOR_MODE_P (mode))
4401 {
4402 unsigned int i, nelt = GET_MODE_NUNITS (mode);
4403 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
e1377713
ES
4404
4405 if (count == 3)
3fcc1b55 4406 {
e1377713
ES
4407 unsigned int j0 = 0, j1 = 0, j2 = 0;
4408 unsigned int i, j;
4409
4410 for (j = 0; j < 3; j++)
4411 {
4412 int nelt0 = ((3 - j) * nelt) % 3;
4413 int nelt1 = ((3 - j) * nelt + 1) % 3;
4414 int nelt2 = ((3 - j) * nelt + 2) % 3;
4415 for (i = 0; i < nelt; i++)
4416 {
4417 if (3 * i + nelt0 < nelt)
4418 sel[3 * i + nelt0] = j0++;
4419 if (3 * i + nelt1 < nelt)
4420 sel[3 * i + nelt1] = nelt + j1++;
4421 if (3 * i + nelt2 < nelt)
4422 sel[3 * i + nelt2] = 0;
4423 }
4424 if (!can_vec_perm_p (mode, false, sel))
4425 {
4426 if (dump_enabled_p ())
4427 dump_printf (MSG_MISSED_OPTIMIZATION,
4428 "permutaion op not supported by target.\n");
4429 return false;
4430 }
4431
4432 for (i = 0; i < nelt; i++)
4433 {
4434 if (3 * i + nelt0 < nelt)
4435 sel[3 * i + nelt0] = 3 * i + nelt0;
4436 if (3 * i + nelt1 < nelt)
4437 sel[3 * i + nelt1] = 3 * i + nelt1;
4438 if (3 * i + nelt2 < nelt)
4439 sel[3 * i + nelt2] = nelt + j2++;
4440 }
4441 if (!can_vec_perm_p (mode, false, sel))
4442 {
4443 if (dump_enabled_p ())
4444 dump_printf (MSG_MISSED_OPTIMIZATION,
4445 "permutaion op not supported by target.\n");
4446 return false;
4447 }
4448 }
4449 return true;
3fcc1b55 4450 }
e1377713 4451 else
3fcc1b55 4452 {
e1377713
ES
4453 /* If length is not equal to 3 then only power of 2 is supported. */
4454 gcc_assert (exact_log2 (count) != -1);
4455
4456 for (i = 0; i < nelt / 2; i++)
4457 {
4458 sel[i * 2] = i;
4459 sel[i * 2 + 1] = i + nelt;
4460 }
4461 if (can_vec_perm_p (mode, false, sel))
4462 {
4463 for (i = 0; i < nelt; i++)
4464 sel[i] += nelt / 2;
4465 if (can_vec_perm_p (mode, false, sel))
4466 return true;
4467 }
3fcc1b55
JJ
4468 }
4469 }
ebfd146a 4470
73fbfcad 4471 if (dump_enabled_p ())
78c60e3d 4472 dump_printf (MSG_MISSED_OPTIMIZATION,
e1377713 4473 "permutaion op not supported by target.\n");
a6b3dfde 4474 return false;
ebfd146a
IR
4475}
4476
4477
272c6793
RS
4478/* Return TRUE if vec_store_lanes is available for COUNT vectors of
4479 type VECTYPE. */
4480
4481bool
4482vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
4483{
4484 return vect_lanes_optab_supported_p ("vec_store_lanes",
4485 vec_store_lanes_optab,
4486 vectype, count);
4487}
4488
4489
ebfd146a
IR
4490/* Function vect_permute_store_chain.
4491
4492 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
e1377713
ES
4493 a power of 2 or equal to 3, generate interleave_high/low stmts to reorder
4494 the data correctly for the stores. Return the final references for stores
4495 in RESULT_CHAIN.
ebfd146a
IR
4496
4497 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
ff802fa1
IR
4498 The input is 4 vectors each containing 8 elements. We assign a number to
4499 each element, the input sequence is:
ebfd146a
IR
4500
4501 1st vec: 0 1 2 3 4 5 6 7
4502 2nd vec: 8 9 10 11 12 13 14 15
b8698a0f 4503 3rd vec: 16 17 18 19 20 21 22 23
ebfd146a
IR
4504 4th vec: 24 25 26 27 28 29 30 31
4505
4506 The output sequence should be:
4507
4508 1st vec: 0 8 16 24 1 9 17 25
4509 2nd vec: 2 10 18 26 3 11 19 27
4510 3rd vec: 4 12 20 28 5 13 21 30
4511 4th vec: 6 14 22 30 7 15 23 31
4512
4513 i.e., we interleave the contents of the four vectors in their order.
4514
ff802fa1 4515 We use interleave_high/low instructions to create such output. The input of
ebfd146a 4516 each interleave_high/low operation is two vectors:
b8698a0f
L
4517 1st vec 2nd vec
4518 0 1 2 3 4 5 6 7
4519 the even elements of the result vector are obtained left-to-right from the
ff802fa1 4520 high/low elements of the first vector. The odd elements of the result are
ebfd146a
IR
4521 obtained left-to-right from the high/low elements of the second vector.
4522 The output of interleave_high will be: 0 4 1 5
4523 and of interleave_low: 2 6 3 7
4524
b8698a0f 4525
ff802fa1 4526 The permutation is done in log LENGTH stages. In each stage interleave_high
b8698a0f
L
4527 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
4528 where the first argument is taken from the first half of DR_CHAIN and the
4529 second argument from it's second half.
4530 In our example,
ebfd146a
IR
4531
4532 I1: interleave_high (1st vec, 3rd vec)
4533 I2: interleave_low (1st vec, 3rd vec)
4534 I3: interleave_high (2nd vec, 4th vec)
4535 I4: interleave_low (2nd vec, 4th vec)
4536
4537 The output for the first stage is:
4538
4539 I1: 0 16 1 17 2 18 3 19
4540 I2: 4 20 5 21 6 22 7 23
4541 I3: 8 24 9 25 10 26 11 27
4542 I4: 12 28 13 29 14 30 15 31
4543
4544 The output of the second stage, i.e. the final result is:
4545
4546 I1: 0 8 16 24 1 9 17 25
4547 I2: 2 10 18 26 3 11 19 27
4548 I3: 4 12 20 28 5 13 21 30
4549 I4: 6 14 22 30 7 15 23 31. */
b8698a0f 4550
b602d918 4551void
9771b263 4552vect_permute_store_chain (vec<tree> dr_chain,
b8698a0f 4553 unsigned int length,
ebfd146a
IR
4554 gimple stmt,
4555 gimple_stmt_iterator *gsi,
9771b263 4556 vec<tree> *result_chain)
ebfd146a 4557{
83d5977e 4558 tree vect1, vect2, high, low;
ebfd146a
IR
4559 gimple perm_stmt;
4560 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
3fcc1b55 4561 tree perm_mask_low, perm_mask_high;
e1377713
ES
4562 tree data_ref;
4563 tree perm3_mask_low, perm3_mask_high;
4564 unsigned int i, n, log_length = exact_log2 (length);
e2c83630 4565 unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype);
3fcc1b55 4566 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
b8698a0f 4567
b6b9227d
JJ
4568 result_chain->quick_grow (length);
4569 memcpy (result_chain->address (), dr_chain.address (),
4570 length * sizeof (tree));
ebfd146a 4571
e1377713 4572 if (length == 3)
3fcc1b55 4573 {
e1377713 4574 unsigned int j0 = 0, j1 = 0, j2 = 0;
e2c83630 4575
e1377713
ES
4576 for (j = 0; j < 3; j++)
4577 {
4578 int nelt0 = ((3 - j) * nelt) % 3;
4579 int nelt1 = ((3 - j) * nelt + 1) % 3;
4580 int nelt2 = ((3 - j) * nelt + 2) % 3;
3fcc1b55 4581
e1377713
ES
4582 for (i = 0; i < nelt; i++)
4583 {
4584 if (3 * i + nelt0 < nelt)
4585 sel[3 * i + nelt0] = j0++;
4586 if (3 * i + nelt1 < nelt)
4587 sel[3 * i + nelt1] = nelt + j1++;
4588 if (3 * i + nelt2 < nelt)
4589 sel[3 * i + nelt2] = 0;
4590 }
557be5a8 4591 perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
e1377713
ES
4592
4593 for (i = 0; i < nelt; i++)
4594 {
4595 if (3 * i + nelt0 < nelt)
4596 sel[3 * i + nelt0] = 3 * i + nelt0;
4597 if (3 * i + nelt1 < nelt)
4598 sel[3 * i + nelt1] = 3 * i + nelt1;
4599 if (3 * i + nelt2 < nelt)
4600 sel[3 * i + nelt2] = nelt + j2++;
4601 }
557be5a8 4602 perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
e1377713
ES
4603
4604 vect1 = dr_chain[0];
4605 vect2 = dr_chain[1];
ebfd146a
IR
4606
4607 /* Create interleaving stmt:
e1377713
ES
4608 low = VEC_PERM_EXPR <vect1, vect2,
4609 {j, nelt, *, j + 1, nelt + j + 1, *,
4610 j + 2, nelt + j + 2, *, ...}> */
4611 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
0d0e4a03
JJ
4612 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4613 vect2, perm3_mask_low);
ebfd146a 4614 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
ebfd146a 4615
e1377713
ES
4616 vect1 = data_ref;
4617 vect2 = dr_chain[2];
ebfd146a 4618 /* Create interleaving stmt:
e1377713
ES
4619 low = VEC_PERM_EXPR <vect1, vect2,
4620 {0, 1, nelt + j, 3, 4, nelt + j + 1,
4621 6, 7, nelt + j + 2, ...}> */
4622 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
0d0e4a03
JJ
4623 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4624 vect2, perm3_mask_high);
ebfd146a 4625 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
e1377713 4626 (*result_chain)[j] = data_ref;
ebfd146a 4627 }
e1377713
ES
4628 }
4629 else
4630 {
4631 /* If length is not equal to 3 then only power of 2 is supported. */
4632 gcc_assert (exact_log2 (length) != -1);
4633
4634 for (i = 0, n = nelt / 2; i < n; i++)
4635 {
4636 sel[i * 2] = i;
4637 sel[i * 2 + 1] = i + nelt;
4638 }
557be5a8 4639 perm_mask_high = vect_gen_perm_mask_checked (vectype, sel);
e1377713
ES
4640
4641 for (i = 0; i < nelt; i++)
4642 sel[i] += nelt / 2;
557be5a8 4643 perm_mask_low = vect_gen_perm_mask_checked (vectype, sel);
e1377713
ES
4644
4645 for (i = 0, n = log_length; i < n; i++)
4646 {
4647 for (j = 0; j < length/2; j++)
4648 {
4649 vect1 = dr_chain[j];
4650 vect2 = dr_chain[j+length/2];
4651
4652 /* Create interleaving stmt:
4653 high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1,
4654 ...}> */
4655 high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
0d0e4a03
JJ
4656 perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
4657 vect2, perm_mask_high);
e1377713
ES
4658 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4659 (*result_chain)[2*j] = high;
4660
4661 /* Create interleaving stmt:
4662 low = VEC_PERM_EXPR <vect1, vect2,
4663 {nelt/2, nelt*3/2, nelt/2+1, nelt*3/2+1,
4664 ...}> */
4665 low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
0d0e4a03
JJ
4666 perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
4667 vect2, perm_mask_low);
e1377713
ES
4668 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4669 (*result_chain)[2*j+1] = low;
4670 }
4671 memcpy (dr_chain.address (), result_chain->address (),
4672 length * sizeof (tree));
4673 }
ebfd146a 4674 }
ebfd146a
IR
4675}
4676
4677/* Function vect_setup_realignment
b8698a0f 4678
ebfd146a
IR
4679 This function is called when vectorizing an unaligned load using
4680 the dr_explicit_realign[_optimized] scheme.
4681 This function generates the following code at the loop prolog:
4682
4683 p = initial_addr;
4684 x msq_init = *(floor(p)); # prolog load
b8698a0f 4685 realignment_token = call target_builtin;
ebfd146a
IR
4686 loop:
4687 x msq = phi (msq_init, ---)
4688
b8698a0f 4689 The stmts marked with x are generated only for the case of
ebfd146a
IR
4690 dr_explicit_realign_optimized.
4691
b8698a0f 4692 The code above sets up a new (vector) pointer, pointing to the first
ebfd146a
IR
4693 location accessed by STMT, and a "floor-aligned" load using that pointer.
4694 It also generates code to compute the "realignment-token" (if the relevant
4695 target hook was defined), and creates a phi-node at the loop-header bb
4696 whose arguments are the result of the prolog-load (created by this
4697 function) and the result of a load that takes place in the loop (to be
4698 created by the caller to this function).
4699
4700 For the case of dr_explicit_realign_optimized:
b8698a0f 4701 The caller to this function uses the phi-result (msq) to create the
ebfd146a
IR
4702 realignment code inside the loop, and sets up the missing phi argument,
4703 as follows:
b8698a0f 4704 loop:
ebfd146a
IR
4705 msq = phi (msq_init, lsq)
4706 lsq = *(floor(p')); # load in loop
4707 result = realign_load (msq, lsq, realignment_token);
4708
4709 For the case of dr_explicit_realign:
4710 loop:
4711 msq = *(floor(p)); # load in loop
4712 p' = p + (VS-1);
4713 lsq = *(floor(p')); # load in loop
4714 result = realign_load (msq, lsq, realignment_token);
4715
4716 Input:
4717 STMT - (scalar) load stmt to be vectorized. This load accesses
4718 a memory location that may be unaligned.
4719 BSI - place where new code is to be inserted.
4720 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
b8698a0f
L
4721 is used.
4722
ebfd146a
IR
4723 Output:
4724 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
4725 target hook, if defined.
4726 Return value - the result of the loop-header phi node. */
4727
4728tree
4729vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi,
4730 tree *realignment_token,
4731 enum dr_alignment_support alignment_support_scheme,
4732 tree init_addr,
4733 struct loop **at_loop)
4734{
4735 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4736 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4737 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
20ede5c6 4738 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
69f11a13
IR
4739 struct loop *loop = NULL;
4740 edge pe = NULL;
ebfd146a
IR
4741 tree scalar_dest = gimple_assign_lhs (stmt);
4742 tree vec_dest;
4743 gimple inc;
4744 tree ptr;
4745 tree data_ref;
ebfd146a
IR
4746 basic_block new_bb;
4747 tree msq_init = NULL_TREE;
4748 tree new_temp;
538dd0b7 4749 gphi *phi_stmt;
ebfd146a
IR
4750 tree msq = NULL_TREE;
4751 gimple_seq stmts = NULL;
4752 bool inv_p;
4753 bool compute_in_loop = false;
69f11a13 4754 bool nested_in_vect_loop = false;
ebfd146a 4755 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
69f11a13
IR
4756 struct loop *loop_for_initial_load = NULL;
4757
4758 if (loop_vinfo)
4759 {
4760 loop = LOOP_VINFO_LOOP (loop_vinfo);
4761 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4762 }
ebfd146a
IR
4763
4764 gcc_assert (alignment_support_scheme == dr_explicit_realign
4765 || alignment_support_scheme == dr_explicit_realign_optimized);
4766
4767 /* We need to generate three things:
4768 1. the misalignment computation
4769 2. the extra vector load (for the optimized realignment scheme).
4770 3. the phi node for the two vectors from which the realignment is
ff802fa1 4771 done (for the optimized realignment scheme). */
ebfd146a
IR
4772
4773 /* 1. Determine where to generate the misalignment computation.
4774
4775 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
4776 calculation will be generated by this function, outside the loop (in the
4777 preheader). Otherwise, INIT_ADDR had already been computed for us by the
4778 caller, inside the loop.
4779
4780 Background: If the misalignment remains fixed throughout the iterations of
4781 the loop, then both realignment schemes are applicable, and also the
4782 misalignment computation can be done outside LOOP. This is because we are
4783 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
4784 are a multiple of VS (the Vector Size), and therefore the misalignment in
4785 different vectorized LOOP iterations is always the same.
4786 The problem arises only if the memory access is in an inner-loop nested
4787 inside LOOP, which is now being vectorized using outer-loop vectorization.
4788 This is the only case when the misalignment of the memory access may not
4789 remain fixed throughout the iterations of the inner-loop (as explained in
4790 detail in vect_supportable_dr_alignment). In this case, not only is the
4791 optimized realignment scheme not applicable, but also the misalignment
4792 computation (and generation of the realignment token that is passed to
4793 REALIGN_LOAD) have to be done inside the loop.
4794
4795 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
4796 or not, which in turn determines if the misalignment is computed inside
4797 the inner-loop, or outside LOOP. */
4798
69f11a13 4799 if (init_addr != NULL_TREE || !loop_vinfo)
ebfd146a
IR
4800 {
4801 compute_in_loop = true;
4802 gcc_assert (alignment_support_scheme == dr_explicit_realign);
4803 }
4804
4805
4806 /* 2. Determine where to generate the extra vector load.
4807
4808 For the optimized realignment scheme, instead of generating two vector
4809 loads in each iteration, we generate a single extra vector load in the
4810 preheader of the loop, and in each iteration reuse the result of the
4811 vector load from the previous iteration. In case the memory access is in
4812 an inner-loop nested inside LOOP, which is now being vectorized using
4813 outer-loop vectorization, we need to determine whether this initial vector
4814 load should be generated at the preheader of the inner-loop, or can be
4815 generated at the preheader of LOOP. If the memory access has no evolution
4816 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
4817 to be generated inside LOOP (in the preheader of the inner-loop). */
4818
4819 if (nested_in_vect_loop)
4820 {
4821 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
4822 bool invariant_in_outerloop =
4823 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
4824 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
4825 }
4826 else
4827 loop_for_initial_load = loop;
4828 if (at_loop)
4829 *at_loop = loop_for_initial_load;
4830
69f11a13
IR
4831 if (loop_for_initial_load)
4832 pe = loop_preheader_edge (loop_for_initial_load);
4833
ebfd146a
IR
4834 /* 3. For the case of the optimized realignment, create the first vector
4835 load at the loop preheader. */
4836
4837 if (alignment_support_scheme == dr_explicit_realign_optimized)
4838 {
4839 /* Create msq_init = *(floor(p1)) in the loop preheader */
538dd0b7 4840 gassign *new_stmt;
ebfd146a
IR
4841
4842 gcc_assert (!compute_in_loop);
ebfd146a 4843 vec_dest = vect_create_destination_var (scalar_dest, vectype);
920e8172
RS
4844 ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
4845 NULL_TREE, &init_addr, NULL, &inc,
4846 true, &inv_p);
b89dfa17
RB
4847 if (TREE_CODE (ptr) == SSA_NAME)
4848 new_temp = copy_ssa_name (ptr);
4849 else
4850 new_temp = make_ssa_name (TREE_TYPE (ptr));
0d0e4a03
JJ
4851 new_stmt = gimple_build_assign
4852 (new_temp, BIT_AND_EXPR, ptr,
75421dcd
RG
4853 build_int_cst (TREE_TYPE (ptr),
4854 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
75421dcd
RG
4855 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4856 gcc_assert (!new_bb);
20ede5c6
RG
4857 data_ref
4858 = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
4859 build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
ebfd146a
IR
4860 new_stmt = gimple_build_assign (vec_dest, data_ref);
4861 new_temp = make_ssa_name (vec_dest, new_stmt);
4862 gimple_assign_set_lhs (new_stmt, new_temp);
69f11a13
IR
4863 if (pe)
4864 {
4865 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4866 gcc_assert (!new_bb);
4867 }
4868 else
4869 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4870
ebfd146a
IR
4871 msq_init = gimple_assign_lhs (new_stmt);
4872 }
4873
4874 /* 4. Create realignment token using a target builtin, if available.
4875 It is done either inside the containing loop, or before LOOP (as
4876 determined above). */
4877
4878 if (targetm.vectorize.builtin_mask_for_load)
4879 {
538dd0b7 4880 gcall *new_stmt;
ebfd146a
IR
4881 tree builtin_decl;
4882
4883 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
69f11a13 4884 if (!init_addr)
ebfd146a
IR
4885 {
4886 /* Generate the INIT_ADDR computation outside LOOP. */
4887 init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
4888 NULL_TREE, loop);
69f11a13
IR
4889 if (loop)
4890 {
4891 pe = loop_preheader_edge (loop);
4892 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4893 gcc_assert (!new_bb);
4894 }
4895 else
4896 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
ebfd146a
IR
4897 }
4898
4899 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
4900 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
4901 vec_dest =
4902 vect_create_destination_var (scalar_dest,
4903 gimple_call_return_type (new_stmt));
4904 new_temp = make_ssa_name (vec_dest, new_stmt);
4905 gimple_call_set_lhs (new_stmt, new_temp);
4906
4907 if (compute_in_loop)
4908 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4909 else
4910 {
4911 /* Generate the misalignment computation outside LOOP. */
4912 pe = loop_preheader_edge (loop);
4913 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4914 gcc_assert (!new_bb);
4915 }
4916
4917 *realignment_token = gimple_call_lhs (new_stmt);
4918
4919 /* The result of the CALL_EXPR to this builtin is determined from
4920 the value of the parameter and no global variables are touched
4921 which makes the builtin a "const" function. Requiring the
4922 builtin to have the "const" attribute makes it unnecessary
4923 to call mark_call_clobbered. */
4924 gcc_assert (TREE_READONLY (builtin_decl));
4925 }
4926
4927 if (alignment_support_scheme == dr_explicit_realign)
4928 return msq;
4929
4930 gcc_assert (!compute_in_loop);
4931 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
4932
4933
4934 /* 5. Create msq = phi <msq_init, lsq> in loop */
4935
4936 pe = loop_preheader_edge (containing_loop);
4937 vec_dest = vect_create_destination_var (scalar_dest, vectype);
b731b390 4938 msq = make_ssa_name (vec_dest);
ebfd146a 4939 phi_stmt = create_phi_node (msq, containing_loop->header);
9e227d60 4940 add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
ebfd146a
IR
4941
4942 return msq;
4943}
4944
4945
0d0293ac 4946/* Function vect_grouped_load_supported.
ebfd146a 4947
e2c83630 4948 Returns TRUE if even and odd permutations are supported,
ebfd146a
IR
4949 and FALSE otherwise. */
4950
4951bool
0d0293ac 4952vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
ebfd146a 4953{
ef4bddc2 4954 machine_mode mode = TYPE_MODE (vectype);
ebfd146a 4955
2c23db6d
ES
4956 /* vect_permute_load_chain requires the group size to be equal to 3 or
4957 be a power of two. */
4958 if (count != 3 && exact_log2 (count) == -1)
b602d918 4959 {
73fbfcad 4960 if (dump_enabled_p ())
78c60e3d 4961 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2c23db6d
ES
4962 "the size of the group of accesses"
4963 " is not a power of 2 or not equal to 3\n");
b602d918
RS
4964 return false;
4965 }
4966
e2c83630
RH
4967 /* Check that the permutation is supported. */
4968 if (VECTOR_MODE_P (mode))
4969 {
2c23db6d 4970 unsigned int i, j, nelt = GET_MODE_NUNITS (mode);
e2c83630 4971 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
ebfd146a 4972
2c23db6d 4973 if (count == 3)
e2c83630 4974 {
2c23db6d
ES
4975 unsigned int k;
4976 for (k = 0; k < 3; k++)
4977 {
4978 for (i = 0; i < nelt; i++)
4979 if (3 * i + k < 2 * nelt)
4980 sel[i] = 3 * i + k;
4981 else
4982 sel[i] = 0;
4983 if (!can_vec_perm_p (mode, false, sel))
4984 {
4985 if (dump_enabled_p ())
4986 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4987 "shuffle of 3 loads is not supported by"
4988 " target\n");
21c0a521 4989 return false;
2c23db6d
ES
4990 }
4991 for (i = 0, j = 0; i < nelt; i++)
4992 if (3 * i + k < 2 * nelt)
4993 sel[i] = i;
4994 else
4995 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
4996 if (!can_vec_perm_p (mode, false, sel))
4997 {
4998 if (dump_enabled_p ())
4999 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5000 "shuffle of 3 loads is not supported by"
5001 " target\n");
5002 return false;
5003 }
5004 }
5005 return true;
5006 }
5007 else
5008 {
5009 /* If length is not equal to 3 then only power of 2 is supported. */
5010 gcc_assert (exact_log2 (count) != -1);
e2c83630 5011 for (i = 0; i < nelt; i++)
2c23db6d 5012 sel[i] = i * 2;
e2c83630 5013 if (can_vec_perm_p (mode, false, sel))
2c23db6d
ES
5014 {
5015 for (i = 0; i < nelt; i++)
5016 sel[i] = i * 2 + 1;
5017 if (can_vec_perm_p (mode, false, sel))
5018 return true;
5019 }
5020 }
e2c83630 5021 }
ebfd146a 5022
73fbfcad 5023 if (dump_enabled_p ())
78c60e3d 5024 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2c23db6d 5025 "extract even/odd not supported by target\n");
a6b3dfde 5026 return false;
ebfd146a
IR
5027}
5028
272c6793
RS
5029/* Return TRUE if vec_load_lanes is available for COUNT vectors of
5030 type VECTYPE. */
5031
5032bool
5033vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
5034{
5035 return vect_lanes_optab_supported_p ("vec_load_lanes",
5036 vec_load_lanes_optab,
5037 vectype, count);
5038}
ebfd146a
IR
5039
5040/* Function vect_permute_load_chain.
5041
5042 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
2c23db6d
ES
5043 a power of 2 or equal to 3, generate extract_even/odd stmts to reorder
5044 the input data correctly. Return the final references for loads in
5045 RESULT_CHAIN.
ebfd146a
IR
5046
5047 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
5048 The input is 4 vectors each containing 8 elements. We assign a number to each
5049 element, the input sequence is:
5050
5051 1st vec: 0 1 2 3 4 5 6 7
5052 2nd vec: 8 9 10 11 12 13 14 15
b8698a0f 5053 3rd vec: 16 17 18 19 20 21 22 23
ebfd146a
IR
5054 4th vec: 24 25 26 27 28 29 30 31
5055
5056 The output sequence should be:
5057
5058 1st vec: 0 4 8 12 16 20 24 28
5059 2nd vec: 1 5 9 13 17 21 25 29
b8698a0f 5060 3rd vec: 2 6 10 14 18 22 26 30
ebfd146a
IR
5061 4th vec: 3 7 11 15 19 23 27 31
5062
5063 i.e., the first output vector should contain the first elements of each
5064 interleaving group, etc.
5065
ff802fa1
IR
5066 We use extract_even/odd instructions to create such output. The input of
5067 each extract_even/odd operation is two vectors
b8698a0f
L
5068 1st vec 2nd vec
5069 0 1 2 3 4 5 6 7
ebfd146a 5070
ff802fa1 5071 and the output is the vector of extracted even/odd elements. The output of
ebfd146a
IR
5072 extract_even will be: 0 2 4 6
5073 and of extract_odd: 1 3 5 7
5074
b8698a0f 5075
ff802fa1
IR
5076 The permutation is done in log LENGTH stages. In each stage extract_even
5077 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
5078 their order. In our example,
ebfd146a
IR
5079
5080 E1: extract_even (1st vec, 2nd vec)
5081 E2: extract_odd (1st vec, 2nd vec)
5082 E3: extract_even (3rd vec, 4th vec)
5083 E4: extract_odd (3rd vec, 4th vec)
5084
5085 The output for the first stage will be:
5086
5087 E1: 0 2 4 6 8 10 12 14
5088 E2: 1 3 5 7 9 11 13 15
b8698a0f 5089 E3: 16 18 20 22 24 26 28 30
ebfd146a
IR
5090 E4: 17 19 21 23 25 27 29 31
5091
5092 In order to proceed and create the correct sequence for the next stage (or
b8698a0f
L
5093 for the correct output, if the second stage is the last one, as in our
5094 example), we first put the output of extract_even operation and then the
ebfd146a
IR
5095 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
5096 The input for the second stage is:
5097
5098 1st vec (E1): 0 2 4 6 8 10 12 14
b8698a0f
L
5099 2nd vec (E3): 16 18 20 22 24 26 28 30
5100 3rd vec (E2): 1 3 5 7 9 11 13 15
ebfd146a
IR
5101 4th vec (E4): 17 19 21 23 25 27 29 31
5102
5103 The output of the second stage:
5104
5105 E1: 0 4 8 12 16 20 24 28
5106 E2: 2 6 10 14 18 22 26 30
5107 E3: 1 5 9 13 17 21 25 29
5108 E4: 3 7 11 15 19 23 27 31
5109
5110 And RESULT_CHAIN after reordering:
5111
5112 1st vec (E1): 0 4 8 12 16 20 24 28
5113 2nd vec (E3): 1 5 9 13 17 21 25 29
b8698a0f 5114 3rd vec (E2): 2 6 10 14 18 22 26 30
ebfd146a
IR
5115 4th vec (E4): 3 7 11 15 19 23 27 31. */
5116
b602d918 5117static void
9771b263 5118vect_permute_load_chain (vec<tree> dr_chain,
b8698a0f 5119 unsigned int length,
ebfd146a
IR
5120 gimple stmt,
5121 gimple_stmt_iterator *gsi,
9771b263 5122 vec<tree> *result_chain)
ebfd146a 5123{
83d5977e 5124 tree data_ref, first_vect, second_vect;
e2c83630 5125 tree perm_mask_even, perm_mask_odd;
2c23db6d 5126 tree perm3_mask_low, perm3_mask_high;
ebfd146a
IR
5127 gimple perm_stmt;
5128 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
e2c83630
RH
5129 unsigned int i, j, log_length = exact_log2 (length);
5130 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
5131 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
ebfd146a 5132
3f292312
JJ
5133 result_chain->quick_grow (length);
5134 memcpy (result_chain->address (), dr_chain.address (),
5135 length * sizeof (tree));
e2c83630 5136
2c23db6d 5137 if (length == 3)
ebfd146a 5138 {
2c23db6d 5139 unsigned int k;
ebfd146a 5140
2c23db6d
ES
5141 for (k = 0; k < 3; k++)
5142 {
5143 for (i = 0; i < nelt; i++)
5144 if (3 * i + k < 2 * nelt)
5145 sel[i] = 3 * i + k;
5146 else
5147 sel[i] = 0;
557be5a8 5148 perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
2c23db6d
ES
5149
5150 for (i = 0, j = 0; i < nelt; i++)
5151 if (3 * i + k < 2 * nelt)
5152 sel[i] = i;
5153 else
5154 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5155
557be5a8 5156 perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
2c23db6d
ES
5157
5158 first_vect = dr_chain[0];
5159 second_vect = dr_chain[1];
5160
5161 /* Create interleaving stmt (low part of):
5162 low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5163 ...}> */
f598c55c 5164 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
0d0e4a03
JJ
5165 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5166 second_vect, perm3_mask_low);
ebfd146a 5167 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
b8698a0f 5168
2c23db6d
ES
5169 /* Create interleaving stmt (high part of):
5170 high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5171 ...}> */
5172 first_vect = data_ref;
5173 second_vect = dr_chain[2];
f598c55c 5174 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
0d0e4a03
JJ
5175 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5176 second_vect, perm3_mask_high);
ebfd146a 5177 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
2c23db6d 5178 (*result_chain)[k] = data_ref;
ebfd146a 5179 }
ebfd146a 5180 }
2c23db6d
ES
5181 else
5182 {
5183 /* If length is not equal to 3 then only power of 2 is supported. */
5184 gcc_assert (exact_log2 (length) != -1);
5185
5186 for (i = 0; i < nelt; ++i)
5187 sel[i] = i * 2;
557be5a8 5188 perm_mask_even = vect_gen_perm_mask_checked (vectype, sel);
2c23db6d
ES
5189
5190 for (i = 0; i < nelt; ++i)
5191 sel[i] = i * 2 + 1;
557be5a8 5192 perm_mask_odd = vect_gen_perm_mask_checked (vectype, sel);
ebfd146a 5193
2c23db6d
ES
5194 for (i = 0; i < log_length; i++)
5195 {
5196 for (j = 0; j < length; j += 2)
5197 {
5198 first_vect = dr_chain[j];
5199 second_vect = dr_chain[j+1];
5200
5201 /* data_ref = permute_even (first_data_ref, second_data_ref); */
5202 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even");
0d0e4a03
JJ
5203 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5204 first_vect, second_vect,
5205 perm_mask_even);
2c23db6d
ES
5206 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5207 (*result_chain)[j/2] = data_ref;
5208
5209 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
5210 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd");
0d0e4a03
JJ
5211 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5212 first_vect, second_vect,
5213 perm_mask_odd);
2c23db6d
ES
5214 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5215 (*result_chain)[j/2+length/2] = data_ref;
5216 }
5217 memcpy (dr_chain.address (), result_chain->address (),
5218 length * sizeof (tree));
5219 }
5220 }
5221}
ebfd146a 5222
f7917029
ES
5223/* Function vect_shift_permute_load_chain.
5224
5225 Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate
5226 sequence of stmts to reorder the input data accordingly.
5227 Return the final references for loads in RESULT_CHAIN.
5228 Return true if successed, false otherwise.
5229
5230 E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8.
5231 The input is 3 vectors each containing 8 elements. We assign a
5232 number to each element, the input sequence is:
5233
5234 1st vec: 0 1 2 3 4 5 6 7
5235 2nd vec: 8 9 10 11 12 13 14 15
5236 3rd vec: 16 17 18 19 20 21 22 23
5237
5238 The output sequence should be:
5239
5240 1st vec: 0 3 6 9 12 15 18 21
5241 2nd vec: 1 4 7 10 13 16 19 22
5242 3rd vec: 2 5 8 11 14 17 20 23
5243
5244 We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output.
5245
5246 First we shuffle all 3 vectors to get correct elements order:
5247
5248 1st vec: ( 0 3 6) ( 1 4 7) ( 2 5)
5249 2nd vec: ( 8 11 14) ( 9 12 15) (10 13)
5250 3rd vec: (16 19 22) (17 20 23) (18 21)
5251
5252 Next we unite and shift vector 3 times:
5253
5254 1st step:
5255 shift right by 6 the concatenation of:
5256 "1st vec" and "2nd vec"
5257 ( 0 3 6) ( 1 4 7) |( 2 5) _ ( 8 11 14) ( 9 12 15)| (10 13)
5258 "2nd vec" and "3rd vec"
5259 ( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21)
5260 "3rd vec" and "1st vec"
5261 (16 19 22) (17 20 23) |(18 21) _ ( 0 3 6) ( 1 4 7)| ( 2 5)
5262 | New vectors |
5263
5264 So that now new vectors are:
5265
5266 1st vec: ( 2 5) ( 8 11 14) ( 9 12 15)
5267 2nd vec: (10 13) (16 19 22) (17 20 23)
5268 3rd vec: (18 21) ( 0 3 6) ( 1 4 7)
5269
5270 2nd step:
5271 shift right by 5 the concatenation of:
5272 "1st vec" and "3rd vec"
5273 ( 2 5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0 3 6)| ( 1 4 7)
5274 "2nd vec" and "1st vec"
5275 (10 13) (16 19 22) |(17 20 23) _ ( 2 5) ( 8 11 14)| ( 9 12 15)
5276 "3rd vec" and "2nd vec"
5277 (18 21) ( 0 3 6) |( 1 4 7) _ (10 13) (16 19 22)| (17 20 23)
5278 | New vectors |
5279
5280 So that now new vectors are:
5281
5282 1st vec: ( 9 12 15) (18 21) ( 0 3 6)
5283 2nd vec: (17 20 23) ( 2 5) ( 8 11 14)
5284 3rd vec: ( 1 4 7) (10 13) (16 19 22) READY
5285
5286 3rd step:
5287 shift right by 5 the concatenation of:
5288 "1st vec" and "1st vec"
5289 ( 9 12 15) (18 21) |( 0 3 6) _ ( 9 12 15) (18 21)| ( 0 3 6)
5290 shift right by 3 the concatenation of:
5291 "2nd vec" and "2nd vec"
5292 (17 20 23) |( 2 5) ( 8 11 14) _ (17 20 23)| ( 2 5) ( 8 11 14)
5293 | New vectors |
5294
5295 So that now all vectors are READY:
5296 1st vec: ( 0 3 6) ( 9 12 15) (18 21)
5297 2nd vec: ( 2 5) ( 8 11 14) (17 20 23)
5298 3rd vec: ( 1 4 7) (10 13) (16 19 22)
5299
5300 This algorithm is faster than one in vect_permute_load_chain if:
5301 1. "shift of a concatination" is faster than general permutation.
5302 This is usually so.
5303 2. The TARGET machine can't execute vector instructions in parallel.
5304 This is because each step of the algorithm depends on previous.
5305 The algorithm in vect_permute_load_chain is much more parallel.
5306
5307 The algorithm is applicable only for LOAD CHAIN LENGTH less than VF.
5308*/
5309
5310static bool
5311vect_shift_permute_load_chain (vec<tree> dr_chain,
5312 unsigned int length,
5313 gimple stmt,
5314 gimple_stmt_iterator *gsi,
5315 vec<tree> *result_chain)
5316{
5317 tree vect[3], vect_shift[3], data_ref, first_vect, second_vect;
5318 tree perm2_mask1, perm2_mask2, perm3_mask;
5319 tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask;
5320 gimple perm_stmt;
5321
5322 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5323 unsigned int i;
5324 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
5325 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5326 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5327 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5328
5329 result_chain->quick_grow (length);
5330 memcpy (result_chain->address (), dr_chain.address (),
5331 length * sizeof (tree));
5332
af4c011e 5333 if (exact_log2 (length) != -1 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4)
f7917029 5334 {
af4c011e 5335 unsigned int j, log_length = exact_log2 (length);
f7917029
ES
5336 for (i = 0; i < nelt / 2; ++i)
5337 sel[i] = i * 2;
5338 for (i = 0; i < nelt / 2; ++i)
5339 sel[nelt / 2 + i] = i * 2 + 1;
5340 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5341 {
5342 if (dump_enabled_p ())
5343 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5344 "shuffle of 2 fields structure is not \
5345 supported by target\n");
5346 return false;
5347 }
557be5a8 5348 perm2_mask1 = vect_gen_perm_mask_checked (vectype, sel);
f7917029
ES
5349
5350 for (i = 0; i < nelt / 2; ++i)
5351 sel[i] = i * 2 + 1;
5352 for (i = 0; i < nelt / 2; ++i)
5353 sel[nelt / 2 + i] = i * 2;
5354 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5355 {
5356 if (dump_enabled_p ())
5357 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5358 "shuffle of 2 fields structure is not \
5359 supported by target\n");
5360 return false;
5361 }
557be5a8 5362 perm2_mask2 = vect_gen_perm_mask_checked (vectype, sel);
f7917029
ES
5363
5364 /* Generating permutation constant to shift all elements.
5365 For vector length 8 it is {4 5 6 7 8 9 10 11}. */
5366 for (i = 0; i < nelt; i++)
5367 sel[i] = nelt / 2 + i;
5368 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5369 {
5370 if (dump_enabled_p ())
5371 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5372 "shift permutation is not supported by target\n");
5373 return false;
5374 }
557be5a8 5375 shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
f7917029
ES
5376
5377 /* Generating permutation constant to select vector from 2.
5378 For vector length 8 it is {0 1 2 3 12 13 14 15}. */
5379 for (i = 0; i < nelt / 2; i++)
5380 sel[i] = i;
5381 for (i = nelt / 2; i < nelt; i++)
5382 sel[i] = nelt + i;
5383 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5384 {
5385 if (dump_enabled_p ())
5386 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5387 "select is not supported by target\n");
5388 return false;
5389 }
557be5a8 5390 select_mask = vect_gen_perm_mask_checked (vectype, sel);
f7917029 5391
af4c011e
ES
5392 for (i = 0; i < log_length; i++)
5393 {
5394 for (j = 0; j < length; j += 2)
5395 {
5396 first_vect = dr_chain[j];
5397 second_vect = dr_chain[j + 1];
5398
5399 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
0d0e4a03
JJ
5400 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5401 first_vect, first_vect,
5402 perm2_mask1);
af4c011e
ES
5403 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5404 vect[0] = data_ref;
5405
5406 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
0d0e4a03
JJ
5407 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5408 second_vect, second_vect,
5409 perm2_mask2);
af4c011e
ES
5410 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5411 vect[1] = data_ref;
f7917029 5412
af4c011e 5413 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift");
0d0e4a03
JJ
5414 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5415 vect[0], vect[1], shift1_mask);
af4c011e
ES
5416 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5417 (*result_chain)[j/2 + length/2] = data_ref;
5418
5419 data_ref = make_temp_ssa_name (vectype, NULL, "vect_select");
0d0e4a03
JJ
5420 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5421 vect[0], vect[1], select_mask);
af4c011e
ES
5422 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5423 (*result_chain)[j/2] = data_ref;
5424 }
5425 memcpy (dr_chain.address (), result_chain->address (),
5426 length * sizeof (tree));
5427 }
f7917029
ES
5428 return true;
5429 }
5430 if (length == 3 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 2)
5431 {
5432 unsigned int k = 0, l = 0;
5433
5434 /* Generating permutation constant to get all elements in rigth order.
5435 For vector length 8 it is {0 3 6 1 4 7 2 5}. */
5436 for (i = 0; i < nelt; i++)
5437 {
5438 if (3 * k + (l % 3) >= nelt)
5439 {
5440 k = 0;
5441 l += (3 - (nelt % 3));
5442 }
5443 sel[i] = 3 * k + (l % 3);
5444 k++;
5445 }
5446 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5447 {
5448 if (dump_enabled_p ())
5449 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5450 "shuffle of 3 fields structure is not \
5451 supported by target\n");
5452 return false;
5453 }
557be5a8 5454 perm3_mask = vect_gen_perm_mask_checked (vectype, sel);
f7917029
ES
5455
5456 /* Generating permutation constant to shift all elements.
5457 For vector length 8 it is {6 7 8 9 10 11 12 13}. */
5458 for (i = 0; i < nelt; i++)
5459 sel[i] = 2 * (nelt / 3) + (nelt % 3) + i;
5460 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5461 {
5462 if (dump_enabled_p ())
5463 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5464 "shift permutation is not supported by target\n");
5465 return false;
5466 }
557be5a8 5467 shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
f7917029
ES
5468
5469 /* Generating permutation constant to shift all elements.
5470 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5471 for (i = 0; i < nelt; i++)
5472 sel[i] = 2 * (nelt / 3) + 1 + i;
5473 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5474 {
5475 if (dump_enabled_p ())
5476 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5477 "shift permutation is not supported by target\n");
5478 return false;
5479 }
557be5a8 5480 shift2_mask = vect_gen_perm_mask_checked (vectype, sel);
f7917029
ES
5481
5482 /* Generating permutation constant to shift all elements.
5483 For vector length 8 it is {3 4 5 6 7 8 9 10}. */
5484 for (i = 0; i < nelt; i++)
5485 sel[i] = (nelt / 3) + (nelt % 3) / 2 + i;
5486 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5487 {
5488 if (dump_enabled_p ())
5489 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5490 "shift permutation is not supported by target\n");
5491 return false;
5492 }
557be5a8 5493 shift3_mask = vect_gen_perm_mask_checked (vectype, sel);
f7917029
ES
5494
5495 /* Generating permutation constant to shift all elements.
5496 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5497 for (i = 0; i < nelt; i++)
5498 sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i;
5499 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5500 {
5501 if (dump_enabled_p ())
5502 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5503 "shift permutation is not supported by target\n");
5504 return false;
5505 }
557be5a8 5506 shift4_mask = vect_gen_perm_mask_checked (vectype, sel);
f7917029
ES
5507
5508 for (k = 0; k < 3; k++)
5509 {
f598c55c 5510 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3");
0d0e4a03
JJ
5511 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5512 dr_chain[k], dr_chain[k],
5513 perm3_mask);
f7917029
ES
5514 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5515 vect[k] = data_ref;
5516 }
5517
5518 for (k = 0; k < 3; k++)
5519 {
5520 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift1");
0d0e4a03
JJ
5521 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5522 vect[k % 3], vect[(k + 1) % 3],
5523 shift1_mask);
f7917029
ES
5524 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5525 vect_shift[k] = data_ref;
5526 }
5527
5528 for (k = 0; k < 3; k++)
5529 {
5530 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift2");
0d0e4a03
JJ
5531 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5532 vect_shift[(4 - k) % 3],
5533 vect_shift[(3 - k) % 3],
5534 shift2_mask);
f7917029
ES
5535 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5536 vect[k] = data_ref;
5537 }
5538
5539 (*result_chain)[3 - (nelt % 3)] = vect[2];
5540
5541 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3");
0d0e4a03
JJ
5542 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0],
5543 vect[0], shift3_mask);
f7917029
ES
5544 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5545 (*result_chain)[nelt % 3] = data_ref;
5546
5547 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4");
0d0e4a03
JJ
5548 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1],
5549 vect[1], shift4_mask);
f7917029
ES
5550 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5551 (*result_chain)[0] = data_ref;
5552 return true;
5553 }
5554 return false;
5555}
5556
0d0293ac 5557/* Function vect_transform_grouped_load.
ebfd146a
IR
5558
5559 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
5560 to perform their permutation and ascribe the result vectorized statements to
5561 the scalar statements.
5562*/
5563
b602d918 5564void
9771b263 5565vect_transform_grouped_load (gimple stmt, vec<tree> dr_chain, int size,
ebfd146a
IR
5566 gimple_stmt_iterator *gsi)
5567{
ef4bddc2 5568 machine_mode mode;
6e1aa848 5569 vec<tree> result_chain = vNULL;
ebfd146a 5570
b8698a0f
L
5571 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
5572 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
ebfd146a 5573 vectors, that are ready for vector computation. */
9771b263 5574 result_chain.create (size);
f7917029
ES
5575
5576 /* If reassociation width for vector type is 2 or greater target machine can
5577 execute 2 or more vector instructions in parallel. Otherwise try to
5578 get chain for loads group using vect_shift_permute_load_chain. */
5579 mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)));
5580 if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
f101d09c 5581 || exact_log2 (size) != -1
f7917029
ES
5582 || !vect_shift_permute_load_chain (dr_chain, size, stmt,
5583 gsi, &result_chain))
5584 vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
0d0293ac 5585 vect_record_grouped_load_vectors (stmt, result_chain);
9771b263 5586 result_chain.release ();
272c6793
RS
5587}
5588
0d0293ac 5589/* RESULT_CHAIN contains the output of a group of grouped loads that were
272c6793
RS
5590 generated as part of the vectorization of STMT. Assign the statement
5591 for each vector to the associated scalar statement. */
5592
5593void
9771b263 5594vect_record_grouped_load_vectors (gimple stmt, vec<tree> result_chain)
272c6793 5595{
e14c1050 5596 gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
272c6793
RS
5597 gimple next_stmt, new_stmt;
5598 unsigned int i, gap_count;
5599 tree tmp_data_ref;
ebfd146a 5600
b8698a0f
L
5601 /* Put a permuted data-ref in the VECTORIZED_STMT field.
5602 Since we scan the chain starting from it's first node, their order
ebfd146a
IR
5603 corresponds the order of data-refs in RESULT_CHAIN. */
5604 next_stmt = first_stmt;
5605 gap_count = 1;
9771b263 5606 FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
ebfd146a
IR
5607 {
5608 if (!next_stmt)
5609 break;
5610
ff802fa1
IR
5611 /* Skip the gaps. Loads created for the gaps will be removed by dead
5612 code elimination pass later. No need to check for the first stmt in
ebfd146a 5613 the group, since it always exists.
e14c1050
IR
5614 GROUP_GAP is the number of steps in elements from the previous
5615 access (if there is no gap GROUP_GAP is 1). We skip loads that
ff802fa1 5616 correspond to the gaps. */
b8698a0f 5617 if (next_stmt != first_stmt
e14c1050 5618 && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
ebfd146a
IR
5619 {
5620 gap_count++;
5621 continue;
5622 }
5623
5624 while (next_stmt)
5625 {
5626 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
5627 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
5628 copies, and we put the new vector statement in the first available
5629 RELATED_STMT. */
5630 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
5631 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
5632 else
5633 {
e14c1050 5634 if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
ebfd146a
IR
5635 {
5636 gimple prev_stmt =
5637 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
5638 gimple rel_stmt =
5639 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
5640 while (rel_stmt)
5641 {
5642 prev_stmt = rel_stmt;
b8698a0f 5643 rel_stmt =
ebfd146a
IR
5644 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
5645 }
5646
b8698a0f 5647 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
ebfd146a
IR
5648 new_stmt;
5649 }
5650 }
5651
e14c1050 5652 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
ebfd146a
IR
5653 gap_count = 1;
5654 /* If NEXT_STMT accesses the same DR as the previous statement,
5655 put the same TMP_DATA_REF as its vectorized statement; otherwise
5656 get the next data-ref from RESULT_CHAIN. */
e14c1050 5657 if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
ebfd146a
IR
5658 break;
5659 }
5660 }
ebfd146a
IR
5661}
5662
5663/* Function vect_force_dr_alignment_p.
5664
5665 Returns whether the alignment of a DECL can be forced to be aligned
5666 on ALIGNMENT bit boundary. */
5667
b8698a0f 5668bool
ebfd146a
IR
5669vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
5670{
5671 if (TREE_CODE (decl) != VAR_DECL)
5672 return false;
5673
428f0c67
JH
5674 if (decl_in_symtab_p (decl)
5675 && !symtab_node::get (decl)->can_increase_alignment_p ())
6192fa79
JH
5676 return false;
5677
ebfd146a
IR
5678 if (TREE_STATIC (decl))
5679 return (alignment <= MAX_OFILE_ALIGNMENT);
5680 else
5681 return (alignment <= MAX_STACK_ALIGNMENT);
5682}
5683
ebfd146a 5684
720f5239
IR
5685/* Return whether the data reference DR is supported with respect to its
5686 alignment.
5687 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
5688 it is aligned, i.e., check if it is possible to vectorize it with different
ebfd146a
IR
5689 alignment. */
5690
5691enum dr_alignment_support
720f5239
IR
5692vect_supportable_dr_alignment (struct data_reference *dr,
5693 bool check_aligned_accesses)
ebfd146a
IR
5694{
5695 gimple stmt = DR_STMT (dr);
5696 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5697 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
ef4bddc2 5698 machine_mode mode = TYPE_MODE (vectype);
a70d6342
IR
5699 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5700 struct loop *vect_loop = NULL;
5701 bool nested_in_vect_loop = false;
ebfd146a 5702
720f5239 5703 if (aligned_access_p (dr) && !check_aligned_accesses)
ebfd146a
IR
5704 return dr_aligned;
5705
5ce9450f
JJ
5706 /* For now assume all conditional loads/stores support unaligned
5707 access without any special code. */
5708 if (is_gimple_call (stmt)
5709 && gimple_call_internal_p (stmt)
5710 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
5711 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
5712 return dr_unaligned_supported;
5713
69f11a13
IR
5714 if (loop_vinfo)
5715 {
5716 vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
5717 nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
5718 }
a70d6342 5719
ebfd146a
IR
5720 /* Possibly unaligned access. */
5721
5722 /* We can choose between using the implicit realignment scheme (generating
5723 a misaligned_move stmt) and the explicit realignment scheme (generating
ff802fa1
IR
5724 aligned loads with a REALIGN_LOAD). There are two variants to the
5725 explicit realignment scheme: optimized, and unoptimized.
ebfd146a
IR
5726 We can optimize the realignment only if the step between consecutive
5727 vector loads is equal to the vector size. Since the vector memory
5728 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
5729 is guaranteed that the misalignment amount remains the same throughout the
5730 execution of the vectorized loop. Therefore, we can create the
5731 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
5732 at the loop preheader.
5733
5734 However, in the case of outer-loop vectorization, when vectorizing a
5735 memory access in the inner-loop nested within the LOOP that is now being
5736 vectorized, while it is guaranteed that the misalignment of the
5737 vectorized memory access will remain the same in different outer-loop
5738 iterations, it is *not* guaranteed that is will remain the same throughout
5739 the execution of the inner-loop. This is because the inner-loop advances
5740 with the original scalar step (and not in steps of VS). If the inner-loop
5741 step happens to be a multiple of VS, then the misalignment remains fixed
5742 and we can use the optimized realignment scheme. For example:
5743
5744 for (i=0; i<N; i++)
5745 for (j=0; j<M; j++)
5746 s += a[i+j];
5747
5748 When vectorizing the i-loop in the above example, the step between
5749 consecutive vector loads is 1, and so the misalignment does not remain
5750 fixed across the execution of the inner-loop, and the realignment cannot
5751 be optimized (as illustrated in the following pseudo vectorized loop):
5752
5753 for (i=0; i<N; i+=4)
5754 for (j=0; j<M; j++){
5755 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
5756 // when j is {0,1,2,3,4,5,6,7,...} respectively.
5757 // (assuming that we start from an aligned address).
5758 }
5759
5760 We therefore have to use the unoptimized realignment scheme:
5761
5762 for (i=0; i<N; i+=4)
5763 for (j=k; j<M; j+=4)
5764 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
5765 // that the misalignment of the initial address is
5766 // 0).
5767
5768 The loop can then be vectorized as follows:
5769
5770 for (k=0; k<4; k++){
5771 rt = get_realignment_token (&vp[k]);
5772 for (i=0; i<N; i+=4){
5773 v1 = vp[i+k];
5774 for (j=k; j<M; j+=4){
5775 v2 = vp[i+j+VS-1];
5776 va = REALIGN_LOAD <v1,v2,rt>;
5777 vs += va;
5778 v1 = v2;
5779 }
5780 }
5781 } */
5782
5783 if (DR_IS_READ (dr))
5784 {
0601d0cf
RE
5785 bool is_packed = false;
5786 tree type = (TREE_TYPE (DR_REF (dr)));
5787
947131ba 5788 if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
ebfd146a
IR
5789 && (!targetm.vectorize.builtin_mask_for_load
5790 || targetm.vectorize.builtin_mask_for_load ()))
5791 {
5792 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
69f11a13
IR
5793 if ((nested_in_vect_loop
5794 && (TREE_INT_CST_LOW (DR_STEP (dr))
5795 != GET_MODE_SIZE (TYPE_MODE (vectype))))
5796 || !loop_vinfo)
ebfd146a
IR
5797 return dr_explicit_realign;
5798 else
5799 return dr_explicit_realign_optimized;
5800 }
0601d0cf 5801 if (!known_alignment_for_access_p (dr))
4c9bcf89 5802 is_packed = not_size_aligned (DR_REF (dr));
b8698a0f 5803
afb119be
RB
5804 if ((TYPE_USER_ALIGN (type) && !is_packed)
5805 || targetm.vectorize.
5806 support_vector_misalignment (mode, type,
5807 DR_MISALIGNMENT (dr), is_packed))
ebfd146a
IR
5808 /* Can't software pipeline the loads, but can at least do them. */
5809 return dr_unaligned_supported;
5810 }
0601d0cf
RE
5811 else
5812 {
5813 bool is_packed = false;
5814 tree type = (TREE_TYPE (DR_REF (dr)));
ebfd146a 5815
0601d0cf 5816 if (!known_alignment_for_access_p (dr))
4c9bcf89 5817 is_packed = not_size_aligned (DR_REF (dr));
b8698a0f 5818
afb119be
RB
5819 if ((TYPE_USER_ALIGN (type) && !is_packed)
5820 || targetm.vectorize.
5821 support_vector_misalignment (mode, type,
5822 DR_MISALIGNMENT (dr), is_packed))
0601d0cf
RE
5823 return dr_unaligned_supported;
5824 }
b8698a0f 5825
ebfd146a
IR
5826 /* Unsupported. */
5827 return dr_unaligned_unsupported;
5828}