]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-vect-data-refs.c
tree-ssa.h: Remove all #include's
[thirdparty/gcc.git] / gcc / tree-vect-data-refs.c
1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003-2013 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "target.h"
31 #include "basic-block.h"
32 #include "gimple-pretty-print.h"
33 #include "gimple.h"
34 #include "gimple-ssa.h"
35 #include "tree-phinodes.h"
36 #include "ssa-iterators.h"
37 #include "tree-ssanames.h"
38 #include "tree-ssa-loop.h"
39 #include "dumpfile.h"
40 #include "cfgloop.h"
41 #include "tree-chrec.h"
42 #include "tree-scalar-evolution.h"
43 #include "tree-vectorizer.h"
44 #include "diagnostic-core.h"
45 /* Need to include rtl.h, expr.h, etc. for optabs. */
46 #include "expr.h"
47 #include "optabs.h"
48
49 /* Return true if load- or store-lanes optab OPTAB is implemented for
50 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
51
52 static bool
53 vect_lanes_optab_supported_p (const char *name, convert_optab optab,
54 tree vectype, unsigned HOST_WIDE_INT count)
55 {
56 enum machine_mode mode, array_mode;
57 bool limit_p;
58
59 mode = TYPE_MODE (vectype);
60 limit_p = !targetm.array_mode_supported_p (mode, count);
61 array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
62 MODE_INT, limit_p);
63
64 if (array_mode == BLKmode)
65 {
66 if (dump_enabled_p ())
67 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
68 "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n",
69 GET_MODE_NAME (mode), count);
70 return false;
71 }
72
73 if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
74 {
75 if (dump_enabled_p ())
76 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
77 "cannot use %s<%s><%s>\n", name,
78 GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
79 return false;
80 }
81
82 if (dump_enabled_p ())
83 dump_printf_loc (MSG_NOTE, vect_location,
84 "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
85 GET_MODE_NAME (mode));
86
87 return true;
88 }
89
90
91 /* Return the smallest scalar part of STMT.
92 This is used to determine the vectype of the stmt. We generally set the
93 vectype according to the type of the result (lhs). For stmts whose
94 result-type is different than the type of the arguments (e.g., demotion,
95 promotion), vectype will be reset appropriately (later). Note that we have
96 to visit the smallest datatype in this function, because that determines the
97 VF. If the smallest datatype in the loop is present only as the rhs of a
98 promotion operation - we'd miss it.
99 Such a case, where a variable of this datatype does not appear in the lhs
100 anywhere in the loop, can only occur if it's an invariant: e.g.:
101 'int_x = (int) short_inv', which we'd expect to have been optimized away by
102 invariant motion. However, we cannot rely on invariant motion to always
103 take invariants out of the loop, and so in the case of promotion we also
104 have to check the rhs.
105 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
106 types. */
107
108 tree
109 vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
110 HOST_WIDE_INT *rhs_size_unit)
111 {
112 tree scalar_type = gimple_expr_type (stmt);
113 HOST_WIDE_INT lhs, rhs;
114
115 lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
116
117 if (is_gimple_assign (stmt)
118 && (gimple_assign_cast_p (stmt)
119 || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
120 || gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR
121 || gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
122 {
123 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
124
125 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
126 if (rhs < lhs)
127 scalar_type = rhs_type;
128 }
129
130 *lhs_size_unit = lhs;
131 *rhs_size_unit = rhs;
132 return scalar_type;
133 }
134
135
136 /* Check if data references pointed by DR_I and DR_J are same or
137 belong to same interleaving group. Return FALSE if drs are
138 different, otherwise return TRUE. */
139
140 static bool
141 vect_same_range_drs (data_reference_p dr_i, data_reference_p dr_j)
142 {
143 gimple stmt_i = DR_STMT (dr_i);
144 gimple stmt_j = DR_STMT (dr_j);
145
146 if (operand_equal_p (DR_REF (dr_i), DR_REF (dr_j), 0)
147 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_i))
148 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_j))
149 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_i))
150 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_j)))))
151 return true;
152 else
153 return false;
154 }
155
156 /* If address ranges represented by DDR_I and DDR_J are equal,
157 return TRUE, otherwise return FALSE. */
158
159 static bool
160 vect_vfa_range_equal (ddr_p ddr_i, ddr_p ddr_j)
161 {
162 if ((vect_same_range_drs (DDR_A (ddr_i), DDR_A (ddr_j))
163 && vect_same_range_drs (DDR_B (ddr_i), DDR_B (ddr_j)))
164 || (vect_same_range_drs (DDR_A (ddr_i), DDR_B (ddr_j))
165 && vect_same_range_drs (DDR_B (ddr_i), DDR_A (ddr_j))))
166 return true;
167 else
168 return false;
169 }
170
171 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
172 tested at run-time. Return TRUE if DDR was successfully inserted.
173 Return false if versioning is not supported. */
174
175 static bool
176 vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
177 {
178 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
179
180 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
181 return false;
182
183 if (dump_enabled_p ())
184 {
185 dump_printf_loc (MSG_NOTE, vect_location,
186 "mark for run-time aliasing test between ");
187 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
188 dump_printf (MSG_NOTE, " and ");
189 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
190 dump_printf (MSG_NOTE, "\n");
191 }
192
193 if (optimize_loop_nest_for_size_p (loop))
194 {
195 if (dump_enabled_p ())
196 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
197 "versioning not supported when optimizing"
198 " for size.\n");
199 return false;
200 }
201
202 /* FORNOW: We don't support versioning with outer-loop vectorization. */
203 if (loop->inner)
204 {
205 if (dump_enabled_p ())
206 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
207 "versioning not yet supported for outer-loops.\n");
208 return false;
209 }
210
211 /* FORNOW: We don't support creating runtime alias tests for non-constant
212 step. */
213 if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST
214 || TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST)
215 {
216 if (dump_enabled_p ())
217 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
218 "versioning not yet supported for non-constant "
219 "step\n");
220 return false;
221 }
222
223 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
224 return true;
225 }
226
227
228 /* Function vect_analyze_data_ref_dependence.
229
230 Return TRUE if there (might) exist a dependence between a memory-reference
231 DRA and a memory-reference DRB. When versioning for alias may check a
232 dependence at run-time, return FALSE. Adjust *MAX_VF according to
233 the data dependence. */
234
235 static bool
236 vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
237 loop_vec_info loop_vinfo, int *max_vf)
238 {
239 unsigned int i;
240 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
241 struct data_reference *dra = DDR_A (ddr);
242 struct data_reference *drb = DDR_B (ddr);
243 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
244 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
245 lambda_vector dist_v;
246 unsigned int loop_depth;
247
248 /* In loop analysis all data references should be vectorizable. */
249 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
250 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
251 gcc_unreachable ();
252
253 /* Independent data accesses. */
254 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
255 return false;
256
257 if (dra == drb
258 || (DR_IS_READ (dra) && DR_IS_READ (drb)))
259 return false;
260
261 /* Unknown data dependence. */
262 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
263 {
264 /* If user asserted safelen consecutive iterations can be
265 executed concurrently, assume independence. */
266 if (loop->safelen >= 2)
267 {
268 if (loop->safelen < *max_vf)
269 *max_vf = loop->safelen;
270 return false;
271 }
272
273 if (STMT_VINFO_GATHER_P (stmtinfo_a)
274 || STMT_VINFO_GATHER_P (stmtinfo_b))
275 {
276 if (dump_enabled_p ())
277 {
278 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
279 "versioning for alias not supported for: "
280 "can't determine dependence between ");
281 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
282 DR_REF (dra));
283 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
284 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
285 DR_REF (drb));
286 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
287 }
288 return true;
289 }
290
291 if (dump_enabled_p ())
292 {
293 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
294 "versioning for alias required: "
295 "can't determine dependence between ");
296 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
297 DR_REF (dra));
298 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
299 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
300 DR_REF (drb));
301 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
302 }
303
304 /* Add to list of ddrs that need to be tested at run-time. */
305 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
306 }
307
308 /* Known data dependence. */
309 if (DDR_NUM_DIST_VECTS (ddr) == 0)
310 {
311 /* If user asserted safelen consecutive iterations can be
312 executed concurrently, assume independence. */
313 if (loop->safelen >= 2)
314 {
315 if (loop->safelen < *max_vf)
316 *max_vf = loop->safelen;
317 return false;
318 }
319
320 if (STMT_VINFO_GATHER_P (stmtinfo_a)
321 || STMT_VINFO_GATHER_P (stmtinfo_b))
322 {
323 if (dump_enabled_p ())
324 {
325 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
326 "versioning for alias not supported for: "
327 "bad dist vector for ");
328 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
329 DR_REF (dra));
330 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
331 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
332 DR_REF (drb));
333 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
334 }
335 return true;
336 }
337
338 if (dump_enabled_p ())
339 {
340 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
341 "versioning for alias required: "
342 "bad dist vector for ");
343 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
344 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
345 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
346 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
347 }
348 /* Add to list of ddrs that need to be tested at run-time. */
349 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
350 }
351
352 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
353 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
354 {
355 int dist = dist_v[loop_depth];
356
357 if (dump_enabled_p ())
358 dump_printf_loc (MSG_NOTE, vect_location,
359 "dependence distance = %d.\n", dist);
360
361 if (dist == 0)
362 {
363 if (dump_enabled_p ())
364 {
365 dump_printf_loc (MSG_NOTE, vect_location,
366 "dependence distance == 0 between ");
367 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
368 dump_printf (MSG_NOTE, " and ");
369 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
370 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
371 }
372
373 /* When we perform grouped accesses and perform implicit CSE
374 by detecting equal accesses and doing disambiguation with
375 runtime alias tests like for
376 .. = a[i];
377 .. = a[i+1];
378 a[i] = ..;
379 a[i+1] = ..;
380 *p = ..;
381 .. = a[i];
382 .. = a[i+1];
383 where we will end up loading { a[i], a[i+1] } once, make
384 sure that inserting group loads before the first load and
385 stores after the last store will do the right thing. */
386 if ((STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
387 && GROUP_SAME_DR_STMT (stmtinfo_a))
388 || (STMT_VINFO_GROUPED_ACCESS (stmtinfo_b)
389 && GROUP_SAME_DR_STMT (stmtinfo_b)))
390 {
391 gimple earlier_stmt;
392 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
393 if (DR_IS_WRITE
394 (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
395 {
396 if (dump_enabled_p ())
397 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
398 "READ_WRITE dependence in interleaving."
399 "\n");
400 return true;
401 }
402 }
403
404 continue;
405 }
406
407 if (dist > 0 && DDR_REVERSED_P (ddr))
408 {
409 /* If DDR_REVERSED_P the order of the data-refs in DDR was
410 reversed (to make distance vector positive), and the actual
411 distance is negative. */
412 if (dump_enabled_p ())
413 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
414 "dependence distance negative.\n");
415 continue;
416 }
417
418 if (abs (dist) >= 2
419 && abs (dist) < *max_vf)
420 {
421 /* The dependence distance requires reduction of the maximal
422 vectorization factor. */
423 *max_vf = abs (dist);
424 if (dump_enabled_p ())
425 dump_printf_loc (MSG_NOTE, vect_location,
426 "adjusting maximal vectorization factor to %i\n",
427 *max_vf);
428 }
429
430 if (abs (dist) >= *max_vf)
431 {
432 /* Dependence distance does not create dependence, as far as
433 vectorization is concerned, in this case. */
434 if (dump_enabled_p ())
435 dump_printf_loc (MSG_NOTE, vect_location,
436 "dependence distance >= VF.\n");
437 continue;
438 }
439
440 if (dump_enabled_p ())
441 {
442 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
443 "not vectorized, possible dependence "
444 "between data-refs ");
445 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
446 dump_printf (MSG_NOTE, " and ");
447 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
448 dump_printf (MSG_NOTE, "\n");
449 }
450
451 return true;
452 }
453
454 return false;
455 }
456
457 /* Function vect_analyze_data_ref_dependences.
458
459 Examine all the data references in the loop, and make sure there do not
460 exist any data dependences between them. Set *MAX_VF according to
461 the maximum vectorization factor the data dependences allow. */
462
463 bool
464 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf)
465 {
466 unsigned int i;
467 struct data_dependence_relation *ddr;
468
469 if (dump_enabled_p ())
470 dump_printf_loc (MSG_NOTE, vect_location,
471 "=== vect_analyze_data_ref_dependences ===\n");
472
473 if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
474 &LOOP_VINFO_DDRS (loop_vinfo),
475 LOOP_VINFO_LOOP_NEST (loop_vinfo), true))
476 return false;
477
478 FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
479 if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
480 return false;
481
482 return true;
483 }
484
485
486 /* Function vect_slp_analyze_data_ref_dependence.
487
488 Return TRUE if there (might) exist a dependence between a memory-reference
489 DRA and a memory-reference DRB. When versioning for alias may check a
490 dependence at run-time, return FALSE. Adjust *MAX_VF according to
491 the data dependence. */
492
493 static bool
494 vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
495 {
496 struct data_reference *dra = DDR_A (ddr);
497 struct data_reference *drb = DDR_B (ddr);
498
499 /* We need to check dependences of statements marked as unvectorizable
500 as well, they still can prohibit vectorization. */
501
502 /* Independent data accesses. */
503 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
504 return false;
505
506 if (dra == drb)
507 return false;
508
509 /* Read-read is OK. */
510 if (DR_IS_READ (dra) && DR_IS_READ (drb))
511 return false;
512
513 /* If dra and drb are part of the same interleaving chain consider
514 them independent. */
515 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
516 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
517 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
518 return false;
519
520 /* Unknown data dependence. */
521 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
522 {
523 gimple earlier_stmt;
524
525 if (dump_enabled_p ())
526 {
527 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
528 "can't determine dependence between ");
529 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
530 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
531 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
532 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
533 }
534
535 /* We do not vectorize basic blocks with write-write dependencies. */
536 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
537 return true;
538
539 /* Check that it's not a load-after-store dependence. */
540 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
541 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
542 return true;
543
544 return false;
545 }
546
547 if (dump_enabled_p ())
548 {
549 dump_printf_loc (MSG_NOTE, vect_location,
550 "determined dependence between ");
551 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
552 dump_printf (MSG_NOTE, " and ");
553 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
554 dump_printf (MSG_NOTE, "\n");
555 }
556
557 /* Do not vectorize basic blocks with write-write dependences. */
558 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
559 return true;
560
561 /* Check dependence between DRA and DRB for basic block vectorization.
562 If the accesses share same bases and offsets, we can compare their initial
563 constant offsets to decide whether they differ or not. In case of a read-
564 write dependence we check that the load is before the store to ensure that
565 vectorization will not change the order of the accesses. */
566
567 HOST_WIDE_INT type_size_a, type_size_b, init_a, init_b;
568 gimple earlier_stmt;
569
570 /* Check that the data-refs have same bases and offsets. If not, we can't
571 determine if they are dependent. */
572 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)
573 || !dr_equal_offsets_p (dra, drb))
574 return true;
575
576 /* Check the types. */
577 type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))));
578 type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
579
580 if (type_size_a != type_size_b
581 || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
582 TREE_TYPE (DR_REF (drb))))
583 return true;
584
585 init_a = TREE_INT_CST_LOW (DR_INIT (dra));
586 init_b = TREE_INT_CST_LOW (DR_INIT (drb));
587
588 /* Two different locations - no dependence. */
589 if (init_a != init_b)
590 return false;
591
592 /* We have a read-write dependence. Check that the load is before the store.
593 When we vectorize basic blocks, vector load can be only before
594 corresponding scalar load, and vector store can be only after its
595 corresponding scalar store. So the order of the acceses is preserved in
596 case the load is before the store. */
597 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
598 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
599 return false;
600
601 return true;
602 }
603
604
605 /* Function vect_analyze_data_ref_dependences.
606
607 Examine all the data references in the basic-block, and make sure there
608 do not exist any data dependences between them. Set *MAX_VF according to
609 the maximum vectorization factor the data dependences allow. */
610
611 bool
612 vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo)
613 {
614 struct data_dependence_relation *ddr;
615 unsigned int i;
616
617 if (dump_enabled_p ())
618 dump_printf_loc (MSG_NOTE, vect_location,
619 "=== vect_slp_analyze_data_ref_dependences ===\n");
620
621 if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo),
622 &BB_VINFO_DDRS (bb_vinfo),
623 vNULL, true))
624 return false;
625
626 FOR_EACH_VEC_ELT (BB_VINFO_DDRS (bb_vinfo), i, ddr)
627 if (vect_slp_analyze_data_ref_dependence (ddr))
628 return false;
629
630 return true;
631 }
632
633
634 /* Function vect_compute_data_ref_alignment
635
636 Compute the misalignment of the data reference DR.
637
638 Output:
639 1. If during the misalignment computation it is found that the data reference
640 cannot be vectorized then false is returned.
641 2. DR_MISALIGNMENT (DR) is defined.
642
643 FOR NOW: No analysis is actually performed. Misalignment is calculated
644 only for trivial cases. TODO. */
645
646 static bool
647 vect_compute_data_ref_alignment (struct data_reference *dr)
648 {
649 gimple stmt = DR_STMT (dr);
650 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
651 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
652 struct loop *loop = NULL;
653 tree ref = DR_REF (dr);
654 tree vectype;
655 tree base, base_addr;
656 bool base_aligned;
657 tree misalign;
658 tree aligned_to, alignment;
659
660 if (dump_enabled_p ())
661 dump_printf_loc (MSG_NOTE, vect_location,
662 "vect_compute_data_ref_alignment:\n");
663
664 if (loop_vinfo)
665 loop = LOOP_VINFO_LOOP (loop_vinfo);
666
667 /* Initialize misalignment to unknown. */
668 SET_DR_MISALIGNMENT (dr, -1);
669
670 /* Strided loads perform only component accesses, misalignment information
671 is irrelevant for them. */
672 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
673 return true;
674
675 misalign = DR_INIT (dr);
676 aligned_to = DR_ALIGNED_TO (dr);
677 base_addr = DR_BASE_ADDRESS (dr);
678 vectype = STMT_VINFO_VECTYPE (stmt_info);
679
680 /* In case the dataref is in an inner-loop of the loop that is being
681 vectorized (LOOP), we use the base and misalignment information
682 relative to the outer-loop (LOOP). This is ok only if the misalignment
683 stays the same throughout the execution of the inner-loop, which is why
684 we have to check that the stride of the dataref in the inner-loop evenly
685 divides by the vector size. */
686 if (loop && nested_in_vect_loop_p (loop, stmt))
687 {
688 tree step = DR_STEP (dr);
689 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
690
691 if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
692 {
693 if (dump_enabled_p ())
694 dump_printf_loc (MSG_NOTE, vect_location,
695 "inner step divides the vector-size.\n");
696 misalign = STMT_VINFO_DR_INIT (stmt_info);
697 aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
698 base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
699 }
700 else
701 {
702 if (dump_enabled_p ())
703 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
704 "inner step doesn't divide the vector-size.\n");
705 misalign = NULL_TREE;
706 }
707 }
708
709 /* Similarly, if we're doing basic-block vectorization, we can only use
710 base and misalignment information relative to an innermost loop if the
711 misalignment stays the same throughout the execution of the loop.
712 As above, this is the case if the stride of the dataref evenly divides
713 by the vector size. */
714 if (!loop)
715 {
716 tree step = DR_STEP (dr);
717 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
718
719 if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0)
720 {
721 if (dump_enabled_p ())
722 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
723 "SLP: step doesn't divide the vector-size.\n");
724 misalign = NULL_TREE;
725 }
726 }
727
728 base = build_fold_indirect_ref (base_addr);
729 alignment = ssize_int (TYPE_ALIGN (vectype)/BITS_PER_UNIT);
730
731 if ((aligned_to && tree_int_cst_compare (aligned_to, alignment) < 0)
732 || !misalign)
733 {
734 if (dump_enabled_p ())
735 {
736 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
737 "Unknown alignment for access: ");
738 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, base);
739 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
740 }
741 return true;
742 }
743
744 if ((DECL_P (base)
745 && tree_int_cst_compare (ssize_int (DECL_ALIGN_UNIT (base)),
746 alignment) >= 0)
747 || (TREE_CODE (base_addr) == SSA_NAME
748 && tree_int_cst_compare (ssize_int (TYPE_ALIGN_UNIT (TREE_TYPE (
749 TREE_TYPE (base_addr)))),
750 alignment) >= 0)
751 || (get_pointer_alignment (base_addr) >= TYPE_ALIGN (vectype)))
752 base_aligned = true;
753 else
754 base_aligned = false;
755
756 if (!base_aligned)
757 {
758 /* Do not change the alignment of global variables here if
759 flag_section_anchors is enabled as we already generated
760 RTL for other functions. Most global variables should
761 have been aligned during the IPA increase_alignment pass. */
762 if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype))
763 || (TREE_STATIC (base) && flag_section_anchors))
764 {
765 if (dump_enabled_p ())
766 {
767 dump_printf_loc (MSG_NOTE, vect_location,
768 "can't force alignment of ref: ");
769 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
770 dump_printf (MSG_NOTE, "\n");
771 }
772 return true;
773 }
774
775 /* Force the alignment of the decl.
776 NOTE: This is the only change to the code we make during
777 the analysis phase, before deciding to vectorize the loop. */
778 if (dump_enabled_p ())
779 {
780 dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
781 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
782 dump_printf (MSG_NOTE, "\n");
783 }
784
785 ((dataref_aux *)dr->aux)->base_decl = base;
786 ((dataref_aux *)dr->aux)->base_misaligned = true;
787 }
788
789 /* If this is a backward running DR then first access in the larger
790 vectype actually is N-1 elements before the address in the DR.
791 Adjust misalign accordingly. */
792 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
793 {
794 tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
795 /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
796 otherwise we wouldn't be here. */
797 offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr));
798 /* PLUS because DR_STEP was negative. */
799 misalign = size_binop (PLUS_EXPR, misalign, offset);
800 }
801
802 /* Modulo alignment. */
803 misalign = size_binop (FLOOR_MOD_EXPR, misalign, alignment);
804
805 if (!host_integerp (misalign, 1))
806 {
807 /* Negative or overflowed misalignment value. */
808 if (dump_enabled_p ())
809 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
810 "unexpected misalign value\n");
811 return false;
812 }
813
814 SET_DR_MISALIGNMENT (dr, TREE_INT_CST_LOW (misalign));
815
816 if (dump_enabled_p ())
817 {
818 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
819 "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
820 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
821 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
822 }
823
824 return true;
825 }
826
827
828 /* Function vect_compute_data_refs_alignment
829
830 Compute the misalignment of data references in the loop.
831 Return FALSE if a data reference is found that cannot be vectorized. */
832
833 static bool
834 vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
835 bb_vec_info bb_vinfo)
836 {
837 vec<data_reference_p> datarefs;
838 struct data_reference *dr;
839 unsigned int i;
840
841 if (loop_vinfo)
842 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
843 else
844 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
845
846 FOR_EACH_VEC_ELT (datarefs, i, dr)
847 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
848 && !vect_compute_data_ref_alignment (dr))
849 {
850 if (bb_vinfo)
851 {
852 /* Mark unsupported statement as unvectorizable. */
853 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
854 continue;
855 }
856 else
857 return false;
858 }
859
860 return true;
861 }
862
863
864 /* Function vect_update_misalignment_for_peel
865
866 DR - the data reference whose misalignment is to be adjusted.
867 DR_PEEL - the data reference whose misalignment is being made
868 zero in the vector loop by the peel.
869 NPEEL - the number of iterations in the peel loop if the misalignment
870 of DR_PEEL is known at compile time. */
871
872 static void
873 vect_update_misalignment_for_peel (struct data_reference *dr,
874 struct data_reference *dr_peel, int npeel)
875 {
876 unsigned int i;
877 vec<dr_p> same_align_drs;
878 struct data_reference *current_dr;
879 int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
880 int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
881 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
882 stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
883
884 /* For interleaved data accesses the step in the loop must be multiplied by
885 the size of the interleaving group. */
886 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
887 dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
888 if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
889 dr_peel_size *= GROUP_SIZE (peel_stmt_info);
890
891 /* It can be assumed that the data refs with the same alignment as dr_peel
892 are aligned in the vector loop. */
893 same_align_drs
894 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
895 FOR_EACH_VEC_ELT (same_align_drs, i, current_dr)
896 {
897 if (current_dr != dr)
898 continue;
899 gcc_assert (DR_MISALIGNMENT (dr) / dr_size ==
900 DR_MISALIGNMENT (dr_peel) / dr_peel_size);
901 SET_DR_MISALIGNMENT (dr, 0);
902 return;
903 }
904
905 if (known_alignment_for_access_p (dr)
906 && known_alignment_for_access_p (dr_peel))
907 {
908 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
909 int misal = DR_MISALIGNMENT (dr);
910 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
911 misal += negative ? -npeel * dr_size : npeel * dr_size;
912 misal &= (TYPE_ALIGN (vectype) / BITS_PER_UNIT) - 1;
913 SET_DR_MISALIGNMENT (dr, misal);
914 return;
915 }
916
917 if (dump_enabled_p ())
918 dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.\n");
919 SET_DR_MISALIGNMENT (dr, -1);
920 }
921
922
923 /* Function vect_verify_datarefs_alignment
924
925 Return TRUE if all data references in the loop can be
926 handled with respect to alignment. */
927
928 bool
929 vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
930 {
931 vec<data_reference_p> datarefs;
932 struct data_reference *dr;
933 enum dr_alignment_support supportable_dr_alignment;
934 unsigned int i;
935
936 if (loop_vinfo)
937 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
938 else
939 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
940
941 FOR_EACH_VEC_ELT (datarefs, i, dr)
942 {
943 gimple stmt = DR_STMT (dr);
944 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
945
946 if (!STMT_VINFO_RELEVANT_P (stmt_info))
947 continue;
948
949 /* For interleaving, only the alignment of the first access matters.
950 Skip statements marked as not vectorizable. */
951 if ((STMT_VINFO_GROUPED_ACCESS (stmt_info)
952 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
953 || !STMT_VINFO_VECTORIZABLE (stmt_info))
954 continue;
955
956 /* Strided loads perform only component accesses, alignment is
957 irrelevant for them. */
958 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
959 continue;
960
961 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
962 if (!supportable_dr_alignment)
963 {
964 if (dump_enabled_p ())
965 {
966 if (DR_IS_READ (dr))
967 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
968 "not vectorized: unsupported unaligned load.");
969 else
970 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
971 "not vectorized: unsupported unaligned "
972 "store.");
973
974 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
975 DR_REF (dr));
976 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
977 }
978 return false;
979 }
980 if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
981 dump_printf_loc (MSG_NOTE, vect_location,
982 "Vectorizing an unaligned access.\n");
983 }
984 return true;
985 }
986
987 /* Given an memory reference EXP return whether its alignment is less
988 than its size. */
989
990 static bool
991 not_size_aligned (tree exp)
992 {
993 if (!host_integerp (TYPE_SIZE (TREE_TYPE (exp)), 1))
994 return true;
995
996 return (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (exp)))
997 > get_object_alignment (exp));
998 }
999
1000 /* Function vector_alignment_reachable_p
1001
1002 Return true if vector alignment for DR is reachable by peeling
1003 a few loop iterations. Return false otherwise. */
1004
1005 static bool
1006 vector_alignment_reachable_p (struct data_reference *dr)
1007 {
1008 gimple stmt = DR_STMT (dr);
1009 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1010 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1011
1012 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1013 {
1014 /* For interleaved access we peel only if number of iterations in
1015 the prolog loop ({VF - misalignment}), is a multiple of the
1016 number of the interleaved accesses. */
1017 int elem_size, mis_in_elements;
1018 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
1019
1020 /* FORNOW: handle only known alignment. */
1021 if (!known_alignment_for_access_p (dr))
1022 return false;
1023
1024 elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
1025 mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
1026
1027 if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
1028 return false;
1029 }
1030
1031 /* If misalignment is known at the compile time then allow peeling
1032 only if natural alignment is reachable through peeling. */
1033 if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
1034 {
1035 HOST_WIDE_INT elmsize =
1036 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1037 if (dump_enabled_p ())
1038 {
1039 dump_printf_loc (MSG_NOTE, vect_location,
1040 "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
1041 dump_printf (MSG_NOTE,
1042 ". misalignment = %d.\n", DR_MISALIGNMENT (dr));
1043 }
1044 if (DR_MISALIGNMENT (dr) % elmsize)
1045 {
1046 if (dump_enabled_p ())
1047 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1048 "data size does not divide the misalignment.\n");
1049 return false;
1050 }
1051 }
1052
1053 if (!known_alignment_for_access_p (dr))
1054 {
1055 tree type = TREE_TYPE (DR_REF (dr));
1056 bool is_packed = not_size_aligned (DR_REF (dr));
1057 if (dump_enabled_p ())
1058 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1059 "Unknown misalignment, is_packed = %d\n",is_packed);
1060 if ((TYPE_USER_ALIGN (type) && !is_packed)
1061 || targetm.vectorize.vector_alignment_reachable (type, is_packed))
1062 return true;
1063 else
1064 return false;
1065 }
1066
1067 return true;
1068 }
1069
1070
1071 /* Calculate the cost of the memory access represented by DR. */
1072
1073 static void
1074 vect_get_data_access_cost (struct data_reference *dr,
1075 unsigned int *inside_cost,
1076 unsigned int *outside_cost,
1077 stmt_vector_for_cost *body_cost_vec)
1078 {
1079 gimple stmt = DR_STMT (dr);
1080 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1081 int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1082 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1083 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1084 int ncopies = vf / nunits;
1085
1086 if (DR_IS_READ (dr))
1087 vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost,
1088 NULL, body_cost_vec, false);
1089 else
1090 vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
1091
1092 if (dump_enabled_p ())
1093 dump_printf_loc (MSG_NOTE, vect_location,
1094 "vect_get_data_access_cost: inside_cost = %d, "
1095 "outside_cost = %d.\n", *inside_cost, *outside_cost);
1096 }
1097
1098
1099 /* Insert DR into peeling hash table with NPEEL as key. */
1100
1101 static void
1102 vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr,
1103 int npeel)
1104 {
1105 struct _vect_peel_info elem, *slot;
1106 _vect_peel_info **new_slot;
1107 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1108
1109 elem.npeel = npeel;
1110 slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo).find (&elem);
1111 if (slot)
1112 slot->count++;
1113 else
1114 {
1115 slot = XNEW (struct _vect_peel_info);
1116 slot->npeel = npeel;
1117 slot->dr = dr;
1118 slot->count = 1;
1119 new_slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo).find_slot (slot, INSERT);
1120 *new_slot = slot;
1121 }
1122
1123 if (!supportable_dr_alignment && unlimited_cost_model ())
1124 slot->count += VECT_MAX_COST;
1125 }
1126
1127
1128 /* Traverse peeling hash table to find peeling option that aligns maximum
1129 number of data accesses. */
1130
1131 int
1132 vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
1133 _vect_peel_extended_info *max)
1134 {
1135 vect_peel_info elem = *slot;
1136
1137 if (elem->count > max->peel_info.count
1138 || (elem->count == max->peel_info.count
1139 && max->peel_info.npeel > elem->npeel))
1140 {
1141 max->peel_info.npeel = elem->npeel;
1142 max->peel_info.count = elem->count;
1143 max->peel_info.dr = elem->dr;
1144 }
1145
1146 return 1;
1147 }
1148
1149
1150 /* Traverse peeling hash table and calculate cost for each peeling option.
1151 Find the one with the lowest cost. */
1152
1153 int
1154 vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
1155 _vect_peel_extended_info *min)
1156 {
1157 vect_peel_info elem = *slot;
1158 int save_misalignment, dummy;
1159 unsigned int inside_cost = 0, outside_cost = 0, i;
1160 gimple stmt = DR_STMT (elem->dr);
1161 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1162 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1163 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1164 struct data_reference *dr;
1165 stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec;
1166 int single_iter_cost;
1167
1168 prologue_cost_vec.create (2);
1169 body_cost_vec.create (2);
1170 epilogue_cost_vec.create (2);
1171
1172 FOR_EACH_VEC_ELT (datarefs, i, dr)
1173 {
1174 stmt = DR_STMT (dr);
1175 stmt_info = vinfo_for_stmt (stmt);
1176 /* For interleaving, only the alignment of the first access
1177 matters. */
1178 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1179 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1180 continue;
1181
1182 save_misalignment = DR_MISALIGNMENT (dr);
1183 vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel);
1184 vect_get_data_access_cost (dr, &inside_cost, &outside_cost,
1185 &body_cost_vec);
1186 SET_DR_MISALIGNMENT (dr, save_misalignment);
1187 }
1188
1189 single_iter_cost = vect_get_single_scalar_iteration_cost (loop_vinfo);
1190 outside_cost += vect_get_known_peeling_cost (loop_vinfo, elem->npeel,
1191 &dummy, single_iter_cost,
1192 &prologue_cost_vec,
1193 &epilogue_cost_vec);
1194
1195 /* Prologue and epilogue costs are added to the target model later.
1196 These costs depend only on the scalar iteration cost, the
1197 number of peeling iterations finally chosen, and the number of
1198 misaligned statements. So discard the information found here. */
1199 prologue_cost_vec.release ();
1200 epilogue_cost_vec.release ();
1201
1202 if (inside_cost < min->inside_cost
1203 || (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
1204 {
1205 min->inside_cost = inside_cost;
1206 min->outside_cost = outside_cost;
1207 min->body_cost_vec.release ();
1208 min->body_cost_vec = body_cost_vec;
1209 min->peel_info.dr = elem->dr;
1210 min->peel_info.npeel = elem->npeel;
1211 }
1212 else
1213 body_cost_vec.release ();
1214
1215 return 1;
1216 }
1217
1218
1219 /* Choose best peeling option by traversing peeling hash table and either
1220 choosing an option with the lowest cost (if cost model is enabled) or the
1221 option that aligns as many accesses as possible. */
1222
1223 static struct data_reference *
1224 vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo,
1225 unsigned int *npeel,
1226 stmt_vector_for_cost *body_cost_vec)
1227 {
1228 struct _vect_peel_extended_info res;
1229
1230 res.peel_info.dr = NULL;
1231 res.body_cost_vec = stmt_vector_for_cost ();
1232
1233 if (!unlimited_cost_model ())
1234 {
1235 res.inside_cost = INT_MAX;
1236 res.outside_cost = INT_MAX;
1237 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1238 .traverse <_vect_peel_extended_info *,
1239 vect_peeling_hash_get_lowest_cost> (&res);
1240 }
1241 else
1242 {
1243 res.peel_info.count = 0;
1244 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1245 .traverse <_vect_peel_extended_info *,
1246 vect_peeling_hash_get_most_frequent> (&res);
1247 }
1248
1249 *npeel = res.peel_info.npeel;
1250 *body_cost_vec = res.body_cost_vec;
1251 return res.peel_info.dr;
1252 }
1253
1254
1255 /* Function vect_enhance_data_refs_alignment
1256
1257 This pass will use loop versioning and loop peeling in order to enhance
1258 the alignment of data references in the loop.
1259
1260 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1261 original loop is to be vectorized. Any other loops that are created by
1262 the transformations performed in this pass - are not supposed to be
1263 vectorized. This restriction will be relaxed.
1264
1265 This pass will require a cost model to guide it whether to apply peeling
1266 or versioning or a combination of the two. For example, the scheme that
1267 intel uses when given a loop with several memory accesses, is as follows:
1268 choose one memory access ('p') which alignment you want to force by doing
1269 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1270 other accesses are not necessarily aligned, or (2) use loop versioning to
1271 generate one loop in which all accesses are aligned, and another loop in
1272 which only 'p' is necessarily aligned.
1273
1274 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1275 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1276 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1277
1278 Devising a cost model is the most critical aspect of this work. It will
1279 guide us on which access to peel for, whether to use loop versioning, how
1280 many versions to create, etc. The cost model will probably consist of
1281 generic considerations as well as target specific considerations (on
1282 powerpc for example, misaligned stores are more painful than misaligned
1283 loads).
1284
1285 Here are the general steps involved in alignment enhancements:
1286
1287 -- original loop, before alignment analysis:
1288 for (i=0; i<N; i++){
1289 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1290 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1291 }
1292
1293 -- After vect_compute_data_refs_alignment:
1294 for (i=0; i<N; i++){
1295 x = q[i]; # DR_MISALIGNMENT(q) = 3
1296 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1297 }
1298
1299 -- Possibility 1: we do loop versioning:
1300 if (p is aligned) {
1301 for (i=0; i<N; i++){ # loop 1A
1302 x = q[i]; # DR_MISALIGNMENT(q) = 3
1303 p[i] = y; # DR_MISALIGNMENT(p) = 0
1304 }
1305 }
1306 else {
1307 for (i=0; i<N; i++){ # loop 1B
1308 x = q[i]; # DR_MISALIGNMENT(q) = 3
1309 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1310 }
1311 }
1312
1313 -- Possibility 2: we do loop peeling:
1314 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1315 x = q[i];
1316 p[i] = y;
1317 }
1318 for (i = 3; i < N; i++){ # loop 2A
1319 x = q[i]; # DR_MISALIGNMENT(q) = 0
1320 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1321 }
1322
1323 -- Possibility 3: combination of loop peeling and versioning:
1324 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1325 x = q[i];
1326 p[i] = y;
1327 }
1328 if (p is aligned) {
1329 for (i = 3; i<N; i++){ # loop 3A
1330 x = q[i]; # DR_MISALIGNMENT(q) = 0
1331 p[i] = y; # DR_MISALIGNMENT(p) = 0
1332 }
1333 }
1334 else {
1335 for (i = 3; i<N; i++){ # loop 3B
1336 x = q[i]; # DR_MISALIGNMENT(q) = 0
1337 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1338 }
1339 }
1340
1341 These loops are later passed to loop_transform to be vectorized. The
1342 vectorizer will use the alignment information to guide the transformation
1343 (whether to generate regular loads/stores, or with special handling for
1344 misalignment). */
1345
1346 bool
1347 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
1348 {
1349 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1350 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1351 enum dr_alignment_support supportable_dr_alignment;
1352 struct data_reference *dr0 = NULL, *first_store = NULL;
1353 struct data_reference *dr;
1354 unsigned int i, j;
1355 bool do_peeling = false;
1356 bool do_versioning = false;
1357 bool stat;
1358 gimple stmt;
1359 stmt_vec_info stmt_info;
1360 unsigned int npeel = 0;
1361 bool all_misalignments_unknown = true;
1362 unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1363 unsigned possible_npeel_number = 1;
1364 tree vectype;
1365 unsigned int nelements, mis, same_align_drs_max = 0;
1366 stmt_vector_for_cost body_cost_vec = stmt_vector_for_cost ();
1367
1368 if (dump_enabled_p ())
1369 dump_printf_loc (MSG_NOTE, vect_location,
1370 "=== vect_enhance_data_refs_alignment ===\n");
1371
1372 /* While cost model enhancements are expected in the future, the high level
1373 view of the code at this time is as follows:
1374
1375 A) If there is a misaligned access then see if peeling to align
1376 this access can make all data references satisfy
1377 vect_supportable_dr_alignment. If so, update data structures
1378 as needed and return true.
1379
1380 B) If peeling wasn't possible and there is a data reference with an
1381 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1382 then see if loop versioning checks can be used to make all data
1383 references satisfy vect_supportable_dr_alignment. If so, update
1384 data structures as needed and return true.
1385
1386 C) If neither peeling nor versioning were successful then return false if
1387 any data reference does not satisfy vect_supportable_dr_alignment.
1388
1389 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1390
1391 Note, Possibility 3 above (which is peeling and versioning together) is not
1392 being done at this time. */
1393
1394 /* (1) Peeling to force alignment. */
1395
1396 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1397 Considerations:
1398 + How many accesses will become aligned due to the peeling
1399 - How many accesses will become unaligned due to the peeling,
1400 and the cost of misaligned accesses.
1401 - The cost of peeling (the extra runtime checks, the increase
1402 in code size). */
1403
1404 FOR_EACH_VEC_ELT (datarefs, i, dr)
1405 {
1406 stmt = DR_STMT (dr);
1407 stmt_info = vinfo_for_stmt (stmt);
1408
1409 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1410 continue;
1411
1412 /* For interleaving, only the alignment of the first access
1413 matters. */
1414 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1415 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1416 continue;
1417
1418 /* For invariant accesses there is nothing to enhance. */
1419 if (integer_zerop (DR_STEP (dr)))
1420 continue;
1421
1422 /* Strided loads perform only component accesses, alignment is
1423 irrelevant for them. */
1424 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1425 continue;
1426
1427 supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1428 do_peeling = vector_alignment_reachable_p (dr);
1429 if (do_peeling)
1430 {
1431 if (known_alignment_for_access_p (dr))
1432 {
1433 unsigned int npeel_tmp;
1434 bool negative = tree_int_cst_compare (DR_STEP (dr),
1435 size_zero_node) < 0;
1436
1437 /* Save info about DR in the hash table. */
1438 if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo).is_created ())
1439 LOOP_VINFO_PEELING_HTAB (loop_vinfo).create (1);
1440
1441 vectype = STMT_VINFO_VECTYPE (stmt_info);
1442 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1443 mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
1444 TREE_TYPE (DR_REF (dr))));
1445 npeel_tmp = (negative
1446 ? (mis - nelements) : (nelements - mis))
1447 & (nelements - 1);
1448
1449 /* For multiple types, it is possible that the bigger type access
1450 will have more than one peeling option. E.g., a loop with two
1451 types: one of size (vector size / 4), and the other one of
1452 size (vector size / 8). Vectorization factor will 8. If both
1453 access are misaligned by 3, the first one needs one scalar
1454 iteration to be aligned, and the second one needs 5. But the
1455 the first one will be aligned also by peeling 5 scalar
1456 iterations, and in that case both accesses will be aligned.
1457 Hence, except for the immediate peeling amount, we also want
1458 to try to add full vector size, while we don't exceed
1459 vectorization factor.
1460 We do this automtically for cost model, since we calculate cost
1461 for every peeling option. */
1462 if (unlimited_cost_model ())
1463 possible_npeel_number = vf /nelements;
1464
1465 /* Handle the aligned case. We may decide to align some other
1466 access, making DR unaligned. */
1467 if (DR_MISALIGNMENT (dr) == 0)
1468 {
1469 npeel_tmp = 0;
1470 if (unlimited_cost_model ())
1471 possible_npeel_number++;
1472 }
1473
1474 for (j = 0; j < possible_npeel_number; j++)
1475 {
1476 gcc_assert (npeel_tmp <= vf);
1477 vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp);
1478 npeel_tmp += nelements;
1479 }
1480
1481 all_misalignments_unknown = false;
1482 /* Data-ref that was chosen for the case that all the
1483 misalignments are unknown is not relevant anymore, since we
1484 have a data-ref with known alignment. */
1485 dr0 = NULL;
1486 }
1487 else
1488 {
1489 /* If we don't know any misalignment values, we prefer
1490 peeling for data-ref that has the maximum number of data-refs
1491 with the same alignment, unless the target prefers to align
1492 stores over load. */
1493 if (all_misalignments_unknown)
1494 {
1495 unsigned same_align_drs
1496 = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
1497 if (!dr0
1498 || same_align_drs_max < same_align_drs)
1499 {
1500 same_align_drs_max = same_align_drs;
1501 dr0 = dr;
1502 }
1503 /* For data-refs with the same number of related
1504 accesses prefer the one where the misalign
1505 computation will be invariant in the outermost loop. */
1506 else if (same_align_drs_max == same_align_drs)
1507 {
1508 struct loop *ivloop0, *ivloop;
1509 ivloop0 = outermost_invariant_loop_for_expr
1510 (loop, DR_BASE_ADDRESS (dr0));
1511 ivloop = outermost_invariant_loop_for_expr
1512 (loop, DR_BASE_ADDRESS (dr));
1513 if ((ivloop && !ivloop0)
1514 || (ivloop && ivloop0
1515 && flow_loop_nested_p (ivloop, ivloop0)))
1516 dr0 = dr;
1517 }
1518
1519 if (!first_store && DR_IS_WRITE (dr))
1520 first_store = dr;
1521 }
1522
1523 /* If there are both known and unknown misaligned accesses in the
1524 loop, we choose peeling amount according to the known
1525 accesses. */
1526 if (!supportable_dr_alignment)
1527 {
1528 dr0 = dr;
1529 if (!first_store && DR_IS_WRITE (dr))
1530 first_store = dr;
1531 }
1532 }
1533 }
1534 else
1535 {
1536 if (!aligned_access_p (dr))
1537 {
1538 if (dump_enabled_p ())
1539 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1540 "vector alignment may not be reachable\n");
1541 break;
1542 }
1543 }
1544 }
1545
1546 /* Check if we can possibly peel the loop. */
1547 if (!vect_can_advance_ivs_p (loop_vinfo)
1548 || !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1549 do_peeling = false;
1550
1551 if (do_peeling && all_misalignments_unknown
1552 && vect_supportable_dr_alignment (dr0, false))
1553 {
1554
1555 /* Check if the target requires to prefer stores over loads, i.e., if
1556 misaligned stores are more expensive than misaligned loads (taking
1557 drs with same alignment into account). */
1558 if (first_store && DR_IS_READ (dr0))
1559 {
1560 unsigned int load_inside_cost = 0, load_outside_cost = 0;
1561 unsigned int store_inside_cost = 0, store_outside_cost = 0;
1562 unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
1563 unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
1564 stmt_vector_for_cost dummy;
1565 dummy.create (2);
1566
1567 vect_get_data_access_cost (dr0, &load_inside_cost, &load_outside_cost,
1568 &dummy);
1569 vect_get_data_access_cost (first_store, &store_inside_cost,
1570 &store_outside_cost, &dummy);
1571
1572 dummy.release ();
1573
1574 /* Calculate the penalty for leaving FIRST_STORE unaligned (by
1575 aligning the load DR0). */
1576 load_inside_penalty = store_inside_cost;
1577 load_outside_penalty = store_outside_cost;
1578 for (i = 0;
1579 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1580 DR_STMT (first_store))).iterate (i, &dr);
1581 i++)
1582 if (DR_IS_READ (dr))
1583 {
1584 load_inside_penalty += load_inside_cost;
1585 load_outside_penalty += load_outside_cost;
1586 }
1587 else
1588 {
1589 load_inside_penalty += store_inside_cost;
1590 load_outside_penalty += store_outside_cost;
1591 }
1592
1593 /* Calculate the penalty for leaving DR0 unaligned (by
1594 aligning the FIRST_STORE). */
1595 store_inside_penalty = load_inside_cost;
1596 store_outside_penalty = load_outside_cost;
1597 for (i = 0;
1598 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1599 DR_STMT (dr0))).iterate (i, &dr);
1600 i++)
1601 if (DR_IS_READ (dr))
1602 {
1603 store_inside_penalty += load_inside_cost;
1604 store_outside_penalty += load_outside_cost;
1605 }
1606 else
1607 {
1608 store_inside_penalty += store_inside_cost;
1609 store_outside_penalty += store_outside_cost;
1610 }
1611
1612 if (load_inside_penalty > store_inside_penalty
1613 || (load_inside_penalty == store_inside_penalty
1614 && load_outside_penalty > store_outside_penalty))
1615 dr0 = first_store;
1616 }
1617
1618 /* In case there are only loads with different unknown misalignments, use
1619 peeling only if it may help to align other accesses in the loop. */
1620 if (!first_store
1621 && !STMT_VINFO_SAME_ALIGN_REFS (
1622 vinfo_for_stmt (DR_STMT (dr0))).length ()
1623 && vect_supportable_dr_alignment (dr0, false)
1624 != dr_unaligned_supported)
1625 do_peeling = false;
1626 }
1627
1628 if (do_peeling && !dr0)
1629 {
1630 /* Peeling is possible, but there is no data access that is not supported
1631 unless aligned. So we try to choose the best possible peeling. */
1632
1633 /* We should get here only if there are drs with known misalignment. */
1634 gcc_assert (!all_misalignments_unknown);
1635
1636 /* Choose the best peeling from the hash table. */
1637 dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel,
1638 &body_cost_vec);
1639 if (!dr0 || !npeel)
1640 do_peeling = false;
1641 }
1642
1643 if (do_peeling)
1644 {
1645 stmt = DR_STMT (dr0);
1646 stmt_info = vinfo_for_stmt (stmt);
1647 vectype = STMT_VINFO_VECTYPE (stmt_info);
1648 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1649
1650 if (known_alignment_for_access_p (dr0))
1651 {
1652 bool negative = tree_int_cst_compare (DR_STEP (dr0),
1653 size_zero_node) < 0;
1654 if (!npeel)
1655 {
1656 /* Since it's known at compile time, compute the number of
1657 iterations in the peeled loop (the peeling factor) for use in
1658 updating DR_MISALIGNMENT values. The peeling factor is the
1659 vectorization factor minus the misalignment as an element
1660 count. */
1661 mis = DR_MISALIGNMENT (dr0);
1662 mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
1663 npeel = ((negative ? mis - nelements : nelements - mis)
1664 & (nelements - 1));
1665 }
1666
1667 /* For interleaved data access every iteration accesses all the
1668 members of the group, therefore we divide the number of iterations
1669 by the group size. */
1670 stmt_info = vinfo_for_stmt (DR_STMT (dr0));
1671 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1672 npeel /= GROUP_SIZE (stmt_info);
1673
1674 if (dump_enabled_p ())
1675 dump_printf_loc (MSG_NOTE, vect_location,
1676 "Try peeling by %d\n", npeel);
1677 }
1678
1679 /* Ensure that all data refs can be vectorized after the peel. */
1680 FOR_EACH_VEC_ELT (datarefs, i, dr)
1681 {
1682 int save_misalignment;
1683
1684 if (dr == dr0)
1685 continue;
1686
1687 stmt = DR_STMT (dr);
1688 stmt_info = vinfo_for_stmt (stmt);
1689 /* For interleaving, only the alignment of the first access
1690 matters. */
1691 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1692 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1693 continue;
1694
1695 /* Strided loads perform only component accesses, alignment is
1696 irrelevant for them. */
1697 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1698 continue;
1699
1700 save_misalignment = DR_MISALIGNMENT (dr);
1701 vect_update_misalignment_for_peel (dr, dr0, npeel);
1702 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1703 SET_DR_MISALIGNMENT (dr, save_misalignment);
1704
1705 if (!supportable_dr_alignment)
1706 {
1707 do_peeling = false;
1708 break;
1709 }
1710 }
1711
1712 if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
1713 {
1714 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1715 if (!stat)
1716 do_peeling = false;
1717 else
1718 {
1719 body_cost_vec.release ();
1720 return stat;
1721 }
1722 }
1723
1724 if (do_peeling)
1725 {
1726 unsigned max_allowed_peel
1727 = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT);
1728 if (max_allowed_peel != (unsigned)-1)
1729 {
1730 unsigned max_peel = npeel;
1731 if (max_peel == 0)
1732 {
1733 gimple dr_stmt = DR_STMT (dr0);
1734 stmt_vec_info vinfo = vinfo_for_stmt (dr_stmt);
1735 tree vtype = STMT_VINFO_VECTYPE (vinfo);
1736 max_peel = TYPE_VECTOR_SUBPARTS (vtype) - 1;
1737 }
1738 if (max_peel > max_allowed_peel)
1739 {
1740 do_peeling = false;
1741 if (dump_enabled_p ())
1742 dump_printf_loc (MSG_NOTE, vect_location,
1743 "Disable peeling, max peels reached: %d\n", max_peel);
1744 }
1745 }
1746 }
1747
1748 if (do_peeling)
1749 {
1750 stmt_info_for_cost *si;
1751 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
1752
1753 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1754 If the misalignment of DR_i is identical to that of dr0 then set
1755 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1756 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1757 by the peeling factor times the element size of DR_i (MOD the
1758 vectorization factor times the size). Otherwise, the
1759 misalignment of DR_i must be set to unknown. */
1760 FOR_EACH_VEC_ELT (datarefs, i, dr)
1761 if (dr != dr0)
1762 vect_update_misalignment_for_peel (dr, dr0, npeel);
1763
1764 LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
1765 if (npeel)
1766 LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
1767 else
1768 LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = DR_MISALIGNMENT (dr0);
1769 SET_DR_MISALIGNMENT (dr0, 0);
1770 if (dump_enabled_p ())
1771 {
1772 dump_printf_loc (MSG_NOTE, vect_location,
1773 "Alignment of access forced using peeling.\n");
1774 dump_printf_loc (MSG_NOTE, vect_location,
1775 "Peeling for alignment will be applied.\n");
1776 }
1777 /* We've delayed passing the inside-loop peeling costs to the
1778 target cost model until we were sure peeling would happen.
1779 Do so now. */
1780 if (body_cost_vec.exists ())
1781 {
1782 FOR_EACH_VEC_ELT (body_cost_vec, i, si)
1783 {
1784 struct _stmt_vec_info *stmt_info
1785 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1786 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1787 si->misalign, vect_body);
1788 }
1789 body_cost_vec.release ();
1790 }
1791
1792 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1793 gcc_assert (stat);
1794 return stat;
1795 }
1796 }
1797
1798 body_cost_vec.release ();
1799
1800 /* (2) Versioning to force alignment. */
1801
1802 /* Try versioning if:
1803 1) optimize loop for speed
1804 2) there is at least one unsupported misaligned data ref with an unknown
1805 misalignment, and
1806 3) all misaligned data refs with a known misalignment are supported, and
1807 4) the number of runtime alignment checks is within reason. */
1808
1809 do_versioning =
1810 optimize_loop_nest_for_speed_p (loop)
1811 && (!loop->inner); /* FORNOW */
1812
1813 if (do_versioning)
1814 {
1815 FOR_EACH_VEC_ELT (datarefs, i, dr)
1816 {
1817 stmt = DR_STMT (dr);
1818 stmt_info = vinfo_for_stmt (stmt);
1819
1820 /* For interleaving, only the alignment of the first access
1821 matters. */
1822 if (aligned_access_p (dr)
1823 || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1824 && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
1825 continue;
1826
1827 /* Strided loads perform only component accesses, alignment is
1828 irrelevant for them. */
1829 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1830 continue;
1831
1832 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1833
1834 if (!supportable_dr_alignment)
1835 {
1836 gimple stmt;
1837 int mask;
1838 tree vectype;
1839
1840 if (known_alignment_for_access_p (dr)
1841 || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
1842 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
1843 {
1844 do_versioning = false;
1845 break;
1846 }
1847
1848 stmt = DR_STMT (dr);
1849 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1850 gcc_assert (vectype);
1851
1852 /* The rightmost bits of an aligned address must be zeros.
1853 Construct the mask needed for this test. For example,
1854 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1855 mask must be 15 = 0xf. */
1856 mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1857
1858 /* FORNOW: use the same mask to test all potentially unaligned
1859 references in the loop. The vectorizer currently supports
1860 a single vector size, see the reference to
1861 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
1862 vectorization factor is computed. */
1863 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
1864 || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
1865 LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
1866 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
1867 DR_STMT (dr));
1868 }
1869 }
1870
1871 /* Versioning requires at least one misaligned data reference. */
1872 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1873 do_versioning = false;
1874 else if (!do_versioning)
1875 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
1876 }
1877
1878 if (do_versioning)
1879 {
1880 vec<gimple> may_misalign_stmts
1881 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
1882 gimple stmt;
1883
1884 /* It can now be assumed that the data references in the statements
1885 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
1886 of the loop being vectorized. */
1887 FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt)
1888 {
1889 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1890 dr = STMT_VINFO_DATA_REF (stmt_info);
1891 SET_DR_MISALIGNMENT (dr, 0);
1892 if (dump_enabled_p ())
1893 dump_printf_loc (MSG_NOTE, vect_location,
1894 "Alignment of access forced using versioning.\n");
1895 }
1896
1897 if (dump_enabled_p ())
1898 dump_printf_loc (MSG_NOTE, vect_location,
1899 "Versioning for alignment will be applied.\n");
1900
1901 /* Peeling and versioning can't be done together at this time. */
1902 gcc_assert (! (do_peeling && do_versioning));
1903
1904 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1905 gcc_assert (stat);
1906 return stat;
1907 }
1908
1909 /* This point is reached if neither peeling nor versioning is being done. */
1910 gcc_assert (! (do_peeling || do_versioning));
1911
1912 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1913 return stat;
1914 }
1915
1916
1917 /* Function vect_find_same_alignment_drs.
1918
1919 Update group and alignment relations according to the chosen
1920 vectorization factor. */
1921
1922 static void
1923 vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
1924 loop_vec_info loop_vinfo)
1925 {
1926 unsigned int i;
1927 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1928 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1929 struct data_reference *dra = DDR_A (ddr);
1930 struct data_reference *drb = DDR_B (ddr);
1931 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
1932 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
1933 int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
1934 int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
1935 lambda_vector dist_v;
1936 unsigned int loop_depth;
1937
1938 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1939 return;
1940
1941 if (dra == drb)
1942 return;
1943
1944 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1945 return;
1946
1947 /* Loop-based vectorization and known data dependence. */
1948 if (DDR_NUM_DIST_VECTS (ddr) == 0)
1949 return;
1950
1951 /* Data-dependence analysis reports a distance vector of zero
1952 for data-references that overlap only in the first iteration
1953 but have different sign step (see PR45764).
1954 So as a sanity check require equal DR_STEP. */
1955 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
1956 return;
1957
1958 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
1959 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
1960 {
1961 int dist = dist_v[loop_depth];
1962
1963 if (dump_enabled_p ())
1964 dump_printf_loc (MSG_NOTE, vect_location,
1965 "dependence distance = %d.\n", dist);
1966
1967 /* Same loop iteration. */
1968 if (dist == 0
1969 || (dist % vectorization_factor == 0 && dra_size == drb_size))
1970 {
1971 /* Two references with distance zero have the same alignment. */
1972 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
1973 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
1974 if (dump_enabled_p ())
1975 {
1976 dump_printf_loc (MSG_NOTE, vect_location,
1977 "accesses have the same alignment.\n");
1978 dump_printf (MSG_NOTE,
1979 "dependence distance modulo vf == 0 between ");
1980 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
1981 dump_printf (MSG_NOTE, " and ");
1982 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
1983 dump_printf (MSG_NOTE, "\n");
1984 }
1985 }
1986 }
1987 }
1988
1989
1990 /* Function vect_analyze_data_refs_alignment
1991
1992 Analyze the alignment of the data-references in the loop.
1993 Return FALSE if a data reference is found that cannot be vectorized. */
1994
1995 bool
1996 vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
1997 bb_vec_info bb_vinfo)
1998 {
1999 if (dump_enabled_p ())
2000 dump_printf_loc (MSG_NOTE, vect_location,
2001 "=== vect_analyze_data_refs_alignment ===\n");
2002
2003 /* Mark groups of data references with same alignment using
2004 data dependence information. */
2005 if (loop_vinfo)
2006 {
2007 vec<ddr_p> ddrs = LOOP_VINFO_DDRS (loop_vinfo);
2008 struct data_dependence_relation *ddr;
2009 unsigned int i;
2010
2011 FOR_EACH_VEC_ELT (ddrs, i, ddr)
2012 vect_find_same_alignment_drs (ddr, loop_vinfo);
2013 }
2014
2015 if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
2016 {
2017 if (dump_enabled_p ())
2018 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2019 "not vectorized: can't calculate alignment "
2020 "for data ref.\n");
2021 return false;
2022 }
2023
2024 return true;
2025 }
2026
2027
2028 /* Analyze groups of accesses: check that DR belongs to a group of
2029 accesses of legal size, step, etc. Detect gaps, single element
2030 interleaving, and other special cases. Set grouped access info.
2031 Collect groups of strided stores for further use in SLP analysis. */
2032
2033 static bool
2034 vect_analyze_group_access (struct data_reference *dr)
2035 {
2036 tree step = DR_STEP (dr);
2037 tree scalar_type = TREE_TYPE (DR_REF (dr));
2038 HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
2039 gimple stmt = DR_STMT (dr);
2040 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2041 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2042 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2043 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2044 HOST_WIDE_INT groupsize, last_accessed_element = 1;
2045 bool slp_impossible = false;
2046 struct loop *loop = NULL;
2047
2048 if (loop_vinfo)
2049 loop = LOOP_VINFO_LOOP (loop_vinfo);
2050
2051 /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
2052 size of the interleaving group (including gaps). */
2053 groupsize = absu_hwi (dr_step) / type_size;
2054
2055 /* Not consecutive access is possible only if it is a part of interleaving. */
2056 if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
2057 {
2058 /* Check if it this DR is a part of interleaving, and is a single
2059 element of the group that is accessed in the loop. */
2060
2061 /* Gaps are supported only for loads. STEP must be a multiple of the type
2062 size. The size of the group must be a power of 2. */
2063 if (DR_IS_READ (dr)
2064 && (dr_step % type_size) == 0
2065 && groupsize > 0
2066 && exact_log2 (groupsize) != -1)
2067 {
2068 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
2069 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2070 if (dump_enabled_p ())
2071 {
2072 dump_printf_loc (MSG_NOTE, vect_location,
2073 "Detected single element interleaving ");
2074 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
2075 dump_printf (MSG_NOTE, " step ");
2076 dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
2077 dump_printf (MSG_NOTE, "\n");
2078 }
2079
2080 if (loop_vinfo)
2081 {
2082 if (dump_enabled_p ())
2083 dump_printf_loc (MSG_NOTE, vect_location,
2084 "Data access with gaps requires scalar "
2085 "epilogue loop\n");
2086 if (loop->inner)
2087 {
2088 if (dump_enabled_p ())
2089 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2090 "Peeling for outer loop is not"
2091 " supported\n");
2092 return false;
2093 }
2094
2095 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2096 }
2097
2098 return true;
2099 }
2100
2101 if (dump_enabled_p ())
2102 {
2103 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2104 "not consecutive access ");
2105 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2106 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2107 }
2108
2109 if (bb_vinfo)
2110 {
2111 /* Mark the statement as unvectorizable. */
2112 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2113 return true;
2114 }
2115
2116 return false;
2117 }
2118
2119 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
2120 {
2121 /* First stmt in the interleaving chain. Check the chain. */
2122 gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
2123 struct data_reference *data_ref = dr;
2124 unsigned int count = 1;
2125 tree prev_init = DR_INIT (data_ref);
2126 gimple prev = stmt;
2127 HOST_WIDE_INT diff, gaps = 0;
2128 unsigned HOST_WIDE_INT count_in_bytes;
2129
2130 while (next)
2131 {
2132 /* Skip same data-refs. In case that two or more stmts share
2133 data-ref (supported only for loads), we vectorize only the first
2134 stmt, and the rest get their vectorized loads from the first
2135 one. */
2136 if (!tree_int_cst_compare (DR_INIT (data_ref),
2137 DR_INIT (STMT_VINFO_DATA_REF (
2138 vinfo_for_stmt (next)))))
2139 {
2140 if (DR_IS_WRITE (data_ref))
2141 {
2142 if (dump_enabled_p ())
2143 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2144 "Two store stmts share the same dr.\n");
2145 return false;
2146 }
2147
2148 /* For load use the same data-ref load. */
2149 GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
2150
2151 prev = next;
2152 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2153 continue;
2154 }
2155
2156 prev = next;
2157 data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
2158
2159 /* All group members have the same STEP by construction. */
2160 gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
2161
2162 /* Check that the distance between two accesses is equal to the type
2163 size. Otherwise, we have gaps. */
2164 diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
2165 - TREE_INT_CST_LOW (prev_init)) / type_size;
2166 if (diff != 1)
2167 {
2168 /* FORNOW: SLP of accesses with gaps is not supported. */
2169 slp_impossible = true;
2170 if (DR_IS_WRITE (data_ref))
2171 {
2172 if (dump_enabled_p ())
2173 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2174 "interleaved store with gaps\n");
2175 return false;
2176 }
2177
2178 gaps += diff - 1;
2179 }
2180
2181 last_accessed_element += diff;
2182
2183 /* Store the gap from the previous member of the group. If there is no
2184 gap in the access, GROUP_GAP is always 1. */
2185 GROUP_GAP (vinfo_for_stmt (next)) = diff;
2186
2187 prev_init = DR_INIT (data_ref);
2188 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2189 /* Count the number of data-refs in the chain. */
2190 count++;
2191 }
2192
2193 /* COUNT is the number of accesses found, we multiply it by the size of
2194 the type to get COUNT_IN_BYTES. */
2195 count_in_bytes = type_size * count;
2196
2197 /* Check that the size of the interleaving (including gaps) is not
2198 greater than STEP. */
2199 if (dr_step != 0
2200 && absu_hwi (dr_step) < count_in_bytes + gaps * type_size)
2201 {
2202 if (dump_enabled_p ())
2203 {
2204 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2205 "interleaving size is greater than step for ");
2206 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2207 DR_REF (dr));
2208 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2209 }
2210 return false;
2211 }
2212
2213 /* Check that the size of the interleaving is equal to STEP for stores,
2214 i.e., that there are no gaps. */
2215 if (dr_step != 0
2216 && absu_hwi (dr_step) != count_in_bytes)
2217 {
2218 if (DR_IS_READ (dr))
2219 {
2220 slp_impossible = true;
2221 /* There is a gap after the last load in the group. This gap is a
2222 difference between the groupsize and the number of elements.
2223 When there is no gap, this difference should be 0. */
2224 GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - count;
2225 }
2226 else
2227 {
2228 if (dump_enabled_p ())
2229 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2230 "interleaved store with gaps\n");
2231 return false;
2232 }
2233 }
2234
2235 /* Check that STEP is a multiple of type size. */
2236 if (dr_step != 0
2237 && (dr_step % type_size) != 0)
2238 {
2239 if (dump_enabled_p ())
2240 {
2241 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2242 "step is not a multiple of type size: step ");
2243 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, step);
2244 dump_printf (MSG_MISSED_OPTIMIZATION, " size ");
2245 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2246 TYPE_SIZE_UNIT (scalar_type));
2247 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2248 }
2249 return false;
2250 }
2251
2252 if (groupsize == 0)
2253 groupsize = count;
2254
2255 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2256 if (dump_enabled_p ())
2257 dump_printf_loc (MSG_NOTE, vect_location,
2258 "Detected interleaving of size %d\n", (int)groupsize);
2259
2260 /* SLP: create an SLP data structure for every interleaving group of
2261 stores for further analysis in vect_analyse_slp. */
2262 if (DR_IS_WRITE (dr) && !slp_impossible)
2263 {
2264 if (loop_vinfo)
2265 LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt);
2266 if (bb_vinfo)
2267 BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt);
2268 }
2269
2270 /* There is a gap in the end of the group. */
2271 if (groupsize - last_accessed_element > 0 && loop_vinfo)
2272 {
2273 if (dump_enabled_p ())
2274 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2275 "Data access with gaps requires scalar "
2276 "epilogue loop\n");
2277 if (loop->inner)
2278 {
2279 if (dump_enabled_p ())
2280 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2281 "Peeling for outer loop is not supported\n");
2282 return false;
2283 }
2284
2285 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2286 }
2287 }
2288
2289 return true;
2290 }
2291
2292
2293 /* Analyze the access pattern of the data-reference DR.
2294 In case of non-consecutive accesses call vect_analyze_group_access() to
2295 analyze groups of accesses. */
2296
2297 static bool
2298 vect_analyze_data_ref_access (struct data_reference *dr)
2299 {
2300 tree step = DR_STEP (dr);
2301 tree scalar_type = TREE_TYPE (DR_REF (dr));
2302 gimple stmt = DR_STMT (dr);
2303 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2304 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2305 struct loop *loop = NULL;
2306
2307 if (loop_vinfo)
2308 loop = LOOP_VINFO_LOOP (loop_vinfo);
2309
2310 if (loop_vinfo && !step)
2311 {
2312 if (dump_enabled_p ())
2313 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2314 "bad data-ref access in loop\n");
2315 return false;
2316 }
2317
2318 /* Allow invariant loads in not nested loops. */
2319 if (loop_vinfo && integer_zerop (step))
2320 {
2321 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2322 if (nested_in_vect_loop_p (loop, stmt))
2323 {
2324 if (dump_enabled_p ())
2325 dump_printf_loc (MSG_NOTE, vect_location,
2326 "zero step in inner loop of nest\n");
2327 return false;
2328 }
2329 return DR_IS_READ (dr);
2330 }
2331
2332 if (loop && nested_in_vect_loop_p (loop, stmt))
2333 {
2334 /* Interleaved accesses are not yet supported within outer-loop
2335 vectorization for references in the inner-loop. */
2336 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2337
2338 /* For the rest of the analysis we use the outer-loop step. */
2339 step = STMT_VINFO_DR_STEP (stmt_info);
2340 if (integer_zerop (step))
2341 {
2342 if (dump_enabled_p ())
2343 dump_printf_loc (MSG_NOTE, vect_location,
2344 "zero step in outer loop.\n");
2345 if (DR_IS_READ (dr))
2346 return true;
2347 else
2348 return false;
2349 }
2350 }
2351
2352 /* Consecutive? */
2353 if (TREE_CODE (step) == INTEGER_CST)
2354 {
2355 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2356 if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
2357 || (dr_step < 0
2358 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
2359 {
2360 /* Mark that it is not interleaving. */
2361 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2362 return true;
2363 }
2364 }
2365
2366 if (loop && nested_in_vect_loop_p (loop, stmt))
2367 {
2368 if (dump_enabled_p ())
2369 dump_printf_loc (MSG_NOTE, vect_location,
2370 "grouped access in outer loop.\n");
2371 return false;
2372 }
2373
2374 /* Assume this is a DR handled by non-constant strided load case. */
2375 if (TREE_CODE (step) != INTEGER_CST)
2376 return STMT_VINFO_STRIDE_LOAD_P (stmt_info);
2377
2378 /* Not consecutive access - check if it's a part of interleaving group. */
2379 return vect_analyze_group_access (dr);
2380 }
2381
2382
2383
2384 /* A helper function used in the comparator function to sort data
2385 references. T1 and T2 are two data references to be compared.
2386 The function returns -1, 0, or 1. */
2387
2388 static int
2389 compare_tree (tree t1, tree t2)
2390 {
2391 int i, cmp;
2392 enum tree_code code;
2393 char tclass;
2394
2395 if (t1 == t2)
2396 return 0;
2397 if (t1 == NULL)
2398 return -1;
2399 if (t2 == NULL)
2400 return 1;
2401
2402
2403 if (TREE_CODE (t1) != TREE_CODE (t2))
2404 return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1;
2405
2406 code = TREE_CODE (t1);
2407 switch (code)
2408 {
2409 /* For const values, we can just use hash values for comparisons. */
2410 case INTEGER_CST:
2411 case REAL_CST:
2412 case FIXED_CST:
2413 case STRING_CST:
2414 case COMPLEX_CST:
2415 case VECTOR_CST:
2416 {
2417 hashval_t h1 = iterative_hash_expr (t1, 0);
2418 hashval_t h2 = iterative_hash_expr (t2, 0);
2419 if (h1 != h2)
2420 return h1 < h2 ? -1 : 1;
2421 break;
2422 }
2423
2424 case SSA_NAME:
2425 cmp = compare_tree (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2));
2426 if (cmp != 0)
2427 return cmp;
2428
2429 if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2))
2430 return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1;
2431 break;
2432
2433 default:
2434 tclass = TREE_CODE_CLASS (code);
2435
2436 /* For var-decl, we could compare their UIDs. */
2437 if (tclass == tcc_declaration)
2438 {
2439 if (DECL_UID (t1) != DECL_UID (t2))
2440 return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1;
2441 break;
2442 }
2443
2444 /* For expressions with operands, compare their operands recursively. */
2445 for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i)
2446 {
2447 cmp = compare_tree (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i));
2448 if (cmp != 0)
2449 return cmp;
2450 }
2451 }
2452
2453 return 0;
2454 }
2455
2456
2457 /* Compare two data-references DRA and DRB to group them into chunks
2458 suitable for grouping. */
2459
2460 static int
2461 dr_group_sort_cmp (const void *dra_, const void *drb_)
2462 {
2463 data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
2464 data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
2465 int cmp;
2466
2467 /* Stabilize sort. */
2468 if (dra == drb)
2469 return 0;
2470
2471 /* Ordering of DRs according to base. */
2472 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0))
2473 {
2474 cmp = compare_tree (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb));
2475 if (cmp != 0)
2476 return cmp;
2477 }
2478
2479 /* And according to DR_OFFSET. */
2480 if (!dr_equal_offsets_p (dra, drb))
2481 {
2482 cmp = compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
2483 if (cmp != 0)
2484 return cmp;
2485 }
2486
2487 /* Put reads before writes. */
2488 if (DR_IS_READ (dra) != DR_IS_READ (drb))
2489 return DR_IS_READ (dra) ? -1 : 1;
2490
2491 /* Then sort after access size. */
2492 if (!operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2493 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))), 0))
2494 {
2495 cmp = compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2496 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
2497 if (cmp != 0)
2498 return cmp;
2499 }
2500
2501 /* And after step. */
2502 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
2503 {
2504 cmp = compare_tree (DR_STEP (dra), DR_STEP (drb));
2505 if (cmp != 0)
2506 return cmp;
2507 }
2508
2509 /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
2510 cmp = tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb));
2511 if (cmp == 0)
2512 return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
2513 return cmp;
2514 }
2515
2516 /* Function vect_analyze_data_ref_accesses.
2517
2518 Analyze the access pattern of all the data references in the loop.
2519
2520 FORNOW: the only access pattern that is considered vectorizable is a
2521 simple step 1 (consecutive) access.
2522
2523 FORNOW: handle only arrays and pointer accesses. */
2524
2525 bool
2526 vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
2527 {
2528 unsigned int i;
2529 vec<data_reference_p> datarefs;
2530 struct data_reference *dr;
2531
2532 if (dump_enabled_p ())
2533 dump_printf_loc (MSG_NOTE, vect_location,
2534 "=== vect_analyze_data_ref_accesses ===\n");
2535
2536 if (loop_vinfo)
2537 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2538 else
2539 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2540
2541 if (datarefs.is_empty ())
2542 return true;
2543
2544 /* Sort the array of datarefs to make building the interleaving chains
2545 linear. */
2546 qsort (datarefs.address (), datarefs.length (),
2547 sizeof (data_reference_p), dr_group_sort_cmp);
2548
2549 /* Build the interleaving chains. */
2550 for (i = 0; i < datarefs.length () - 1;)
2551 {
2552 data_reference_p dra = datarefs[i];
2553 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
2554 stmt_vec_info lastinfo = NULL;
2555 for (i = i + 1; i < datarefs.length (); ++i)
2556 {
2557 data_reference_p drb = datarefs[i];
2558 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
2559
2560 /* ??? Imperfect sorting (non-compatible types, non-modulo
2561 accesses, same accesses) can lead to a group to be artificially
2562 split here as we don't just skip over those. If it really
2563 matters we can push those to a worklist and re-iterate
2564 over them. The we can just skip ahead to the next DR here. */
2565
2566 /* Check that the data-refs have same first location (except init)
2567 and they are both either store or load (not load and store). */
2568 if (DR_IS_READ (dra) != DR_IS_READ (drb)
2569 || !operand_equal_p (DR_BASE_ADDRESS (dra),
2570 DR_BASE_ADDRESS (drb), 0)
2571 || !dr_equal_offsets_p (dra, drb))
2572 break;
2573
2574 /* Check that the data-refs have the same constant size and step. */
2575 tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
2576 tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
2577 if (!host_integerp (sza, 1)
2578 || !host_integerp (szb, 1)
2579 || !tree_int_cst_equal (sza, szb)
2580 || !host_integerp (DR_STEP (dra), 0)
2581 || !host_integerp (DR_STEP (drb), 0)
2582 || !tree_int_cst_equal (DR_STEP (dra), DR_STEP (drb)))
2583 break;
2584
2585 /* Do not place the same access in the interleaving chain twice. */
2586 if (tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) == 0)
2587 break;
2588
2589 /* Check the types are compatible.
2590 ??? We don't distinguish this during sorting. */
2591 if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
2592 TREE_TYPE (DR_REF (drb))))
2593 break;
2594
2595 /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
2596 HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
2597 HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
2598 gcc_assert (init_a < init_b);
2599
2600 /* If init_b == init_a + the size of the type * k, we have an
2601 interleaving, and DRA is accessed before DRB. */
2602 HOST_WIDE_INT type_size_a = TREE_INT_CST_LOW (sza);
2603 if ((init_b - init_a) % type_size_a != 0)
2604 break;
2605
2606 /* The step (if not zero) is greater than the difference between
2607 data-refs' inits. This splits groups into suitable sizes. */
2608 HOST_WIDE_INT step = TREE_INT_CST_LOW (DR_STEP (dra));
2609 if (step != 0 && step <= (init_b - init_a))
2610 break;
2611
2612 if (dump_enabled_p ())
2613 {
2614 dump_printf_loc (MSG_NOTE, vect_location,
2615 "Detected interleaving ");
2616 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
2617 dump_printf (MSG_NOTE, " and ");
2618 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
2619 dump_printf (MSG_NOTE, "\n");
2620 }
2621
2622 /* Link the found element into the group list. */
2623 if (!GROUP_FIRST_ELEMENT (stmtinfo_a))
2624 {
2625 GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
2626 lastinfo = stmtinfo_a;
2627 }
2628 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
2629 GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
2630 lastinfo = stmtinfo_b;
2631 }
2632 }
2633
2634 FOR_EACH_VEC_ELT (datarefs, i, dr)
2635 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
2636 && !vect_analyze_data_ref_access (dr))
2637 {
2638 if (dump_enabled_p ())
2639 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2640 "not vectorized: complicated access pattern.\n");
2641
2642 if (bb_vinfo)
2643 {
2644 /* Mark the statement as not vectorizable. */
2645 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2646 continue;
2647 }
2648 else
2649 return false;
2650 }
2651
2652 return true;
2653 }
2654
2655 /* Function vect_prune_runtime_alias_test_list.
2656
2657 Prune a list of ddrs to be tested at run-time by versioning for alias.
2658 Return FALSE if resulting list of ddrs is longer then allowed by
2659 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2660
2661 bool
2662 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
2663 {
2664 vec<ddr_p> ddrs =
2665 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
2666 unsigned i, j;
2667
2668 if (dump_enabled_p ())
2669 dump_printf_loc (MSG_NOTE, vect_location,
2670 "=== vect_prune_runtime_alias_test_list ===\n");
2671
2672 for (i = 0; i < ddrs.length (); )
2673 {
2674 bool found;
2675 ddr_p ddr_i;
2676
2677 ddr_i = ddrs[i];
2678 found = false;
2679
2680 for (j = 0; j < i; j++)
2681 {
2682 ddr_p ddr_j = ddrs[j];
2683
2684 if (vect_vfa_range_equal (ddr_i, ddr_j))
2685 {
2686 if (dump_enabled_p ())
2687 {
2688 dump_printf_loc (MSG_NOTE, vect_location,
2689 "found equal ranges ");
2690 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2691 DR_REF (DDR_A (ddr_i)));
2692 dump_printf (MSG_NOTE, ", ");
2693 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2694 DR_REF (DDR_B (ddr_i)));
2695 dump_printf (MSG_NOTE, " and ");
2696 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2697 DR_REF (DDR_A (ddr_j)));
2698 dump_printf (MSG_NOTE, ", ");
2699 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2700 DR_REF (DDR_B (ddr_j)));
2701 dump_printf (MSG_NOTE, "\n");
2702 }
2703 found = true;
2704 break;
2705 }
2706 }
2707
2708 if (found)
2709 {
2710 ddrs.ordered_remove (i);
2711 continue;
2712 }
2713 i++;
2714 }
2715
2716 if (ddrs.length () >
2717 (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
2718 {
2719 if (dump_enabled_p ())
2720 {
2721 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2722 "disable versioning for alias - max number of "
2723 "generated checks exceeded.\n");
2724 }
2725
2726 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).truncate (0);
2727
2728 return false;
2729 }
2730
2731 return true;
2732 }
2733
2734 /* Check whether a non-affine read in stmt is suitable for gather load
2735 and if so, return a builtin decl for that operation. */
2736
2737 tree
2738 vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
2739 tree *offp, int *scalep)
2740 {
2741 HOST_WIDE_INT scale = 1, pbitpos, pbitsize;
2742 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2743 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2744 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2745 tree offtype = NULL_TREE;
2746 tree decl, base, off;
2747 enum machine_mode pmode;
2748 int punsignedp, pvolatilep;
2749
2750 /* The gather builtins need address of the form
2751 loop_invariant + vector * {1, 2, 4, 8}
2752 or
2753 loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
2754 Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
2755 of loop invariants/SSA_NAMEs defined in the loop, with casts,
2756 multiplications and additions in it. To get a vector, we need
2757 a single SSA_NAME that will be defined in the loop and will
2758 contain everything that is not loop invariant and that can be
2759 vectorized. The following code attempts to find such a preexistng
2760 SSA_NAME OFF and put the loop invariants into a tree BASE
2761 that can be gimplified before the loop. */
2762 base = get_inner_reference (DR_REF (dr), &pbitsize, &pbitpos, &off,
2763 &pmode, &punsignedp, &pvolatilep, false);
2764 gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0);
2765
2766 if (TREE_CODE (base) == MEM_REF)
2767 {
2768 if (!integer_zerop (TREE_OPERAND (base, 1)))
2769 {
2770 if (off == NULL_TREE)
2771 {
2772 double_int moff = mem_ref_offset (base);
2773 off = double_int_to_tree (sizetype, moff);
2774 }
2775 else
2776 off = size_binop (PLUS_EXPR, off,
2777 fold_convert (sizetype, TREE_OPERAND (base, 1)));
2778 }
2779 base = TREE_OPERAND (base, 0);
2780 }
2781 else
2782 base = build_fold_addr_expr (base);
2783
2784 if (off == NULL_TREE)
2785 off = size_zero_node;
2786
2787 /* If base is not loop invariant, either off is 0, then we start with just
2788 the constant offset in the loop invariant BASE and continue with base
2789 as OFF, otherwise give up.
2790 We could handle that case by gimplifying the addition of base + off
2791 into some SSA_NAME and use that as off, but for now punt. */
2792 if (!expr_invariant_in_loop_p (loop, base))
2793 {
2794 if (!integer_zerop (off))
2795 return NULL_TREE;
2796 off = base;
2797 base = size_int (pbitpos / BITS_PER_UNIT);
2798 }
2799 /* Otherwise put base + constant offset into the loop invariant BASE
2800 and continue with OFF. */
2801 else
2802 {
2803 base = fold_convert (sizetype, base);
2804 base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT));
2805 }
2806
2807 /* OFF at this point may be either a SSA_NAME or some tree expression
2808 from get_inner_reference. Try to peel off loop invariants from it
2809 into BASE as long as possible. */
2810 STRIP_NOPS (off);
2811 while (offtype == NULL_TREE)
2812 {
2813 enum tree_code code;
2814 tree op0, op1, add = NULL_TREE;
2815
2816 if (TREE_CODE (off) == SSA_NAME)
2817 {
2818 gimple def_stmt = SSA_NAME_DEF_STMT (off);
2819
2820 if (expr_invariant_in_loop_p (loop, off))
2821 return NULL_TREE;
2822
2823 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
2824 break;
2825
2826 op0 = gimple_assign_rhs1 (def_stmt);
2827 code = gimple_assign_rhs_code (def_stmt);
2828 op1 = gimple_assign_rhs2 (def_stmt);
2829 }
2830 else
2831 {
2832 if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
2833 return NULL_TREE;
2834 code = TREE_CODE (off);
2835 extract_ops_from_tree (off, &code, &op0, &op1);
2836 }
2837 switch (code)
2838 {
2839 case POINTER_PLUS_EXPR:
2840 case PLUS_EXPR:
2841 if (expr_invariant_in_loop_p (loop, op0))
2842 {
2843 add = op0;
2844 off = op1;
2845 do_add:
2846 add = fold_convert (sizetype, add);
2847 if (scale != 1)
2848 add = size_binop (MULT_EXPR, add, size_int (scale));
2849 base = size_binop (PLUS_EXPR, base, add);
2850 continue;
2851 }
2852 if (expr_invariant_in_loop_p (loop, op1))
2853 {
2854 add = op1;
2855 off = op0;
2856 goto do_add;
2857 }
2858 break;
2859 case MINUS_EXPR:
2860 if (expr_invariant_in_loop_p (loop, op1))
2861 {
2862 add = fold_convert (sizetype, op1);
2863 add = size_binop (MINUS_EXPR, size_zero_node, add);
2864 off = op0;
2865 goto do_add;
2866 }
2867 break;
2868 case MULT_EXPR:
2869 if (scale == 1 && host_integerp (op1, 0))
2870 {
2871 scale = tree_low_cst (op1, 0);
2872 off = op0;
2873 continue;
2874 }
2875 break;
2876 case SSA_NAME:
2877 off = op0;
2878 continue;
2879 CASE_CONVERT:
2880 if (!POINTER_TYPE_P (TREE_TYPE (op0))
2881 && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2882 break;
2883 if (TYPE_PRECISION (TREE_TYPE (op0))
2884 == TYPE_PRECISION (TREE_TYPE (off)))
2885 {
2886 off = op0;
2887 continue;
2888 }
2889 if (TYPE_PRECISION (TREE_TYPE (op0))
2890 < TYPE_PRECISION (TREE_TYPE (off)))
2891 {
2892 off = op0;
2893 offtype = TREE_TYPE (off);
2894 STRIP_NOPS (off);
2895 continue;
2896 }
2897 break;
2898 default:
2899 break;
2900 }
2901 break;
2902 }
2903
2904 /* If at the end OFF still isn't a SSA_NAME or isn't
2905 defined in the loop, punt. */
2906 if (TREE_CODE (off) != SSA_NAME
2907 || expr_invariant_in_loop_p (loop, off))
2908 return NULL_TREE;
2909
2910 if (offtype == NULL_TREE)
2911 offtype = TREE_TYPE (off);
2912
2913 decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info),
2914 offtype, scale);
2915 if (decl == NULL_TREE)
2916 return NULL_TREE;
2917
2918 if (basep)
2919 *basep = base;
2920 if (offp)
2921 *offp = off;
2922 if (scalep)
2923 *scalep = scale;
2924 return decl;
2925 }
2926
2927 /* Function vect_analyze_data_refs.
2928
2929 Find all the data references in the loop or basic block.
2930
2931 The general structure of the analysis of data refs in the vectorizer is as
2932 follows:
2933 1- vect_analyze_data_refs(loop/bb): call
2934 compute_data_dependences_for_loop/bb to find and analyze all data-refs
2935 in the loop/bb and their dependences.
2936 2- vect_analyze_dependences(): apply dependence testing using ddrs.
2937 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
2938 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
2939
2940 */
2941
2942 bool
2943 vect_analyze_data_refs (loop_vec_info loop_vinfo,
2944 bb_vec_info bb_vinfo,
2945 int *min_vf)
2946 {
2947 struct loop *loop = NULL;
2948 basic_block bb = NULL;
2949 unsigned int i;
2950 vec<data_reference_p> datarefs;
2951 struct data_reference *dr;
2952 tree scalar_type;
2953
2954 if (dump_enabled_p ())
2955 dump_printf_loc (MSG_NOTE, vect_location,
2956 "=== vect_analyze_data_refs ===\n");
2957
2958 if (loop_vinfo)
2959 {
2960 loop = LOOP_VINFO_LOOP (loop_vinfo);
2961 if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo))
2962 || find_data_references_in_loop
2963 (loop, &LOOP_VINFO_DATAREFS (loop_vinfo)))
2964 {
2965 if (dump_enabled_p ())
2966 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2967 "not vectorized: loop contains function calls"
2968 " or data references that cannot be analyzed\n");
2969 return false;
2970 }
2971
2972 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2973 }
2974 else
2975 {
2976 gimple_stmt_iterator gsi;
2977
2978 bb = BB_VINFO_BB (bb_vinfo);
2979 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2980 {
2981 gimple stmt = gsi_stmt (gsi);
2982 if (!find_data_references_in_stmt (NULL, stmt,
2983 &BB_VINFO_DATAREFS (bb_vinfo)))
2984 {
2985 /* Mark the rest of the basic-block as unvectorizable. */
2986 for (; !gsi_end_p (gsi); gsi_next (&gsi))
2987 {
2988 stmt = gsi_stmt (gsi);
2989 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)) = false;
2990 }
2991 break;
2992 }
2993 }
2994
2995 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2996 }
2997
2998 /* Go through the data-refs, check that the analysis succeeded. Update
2999 pointer from stmt_vec_info struct to DR and vectype. */
3000
3001 FOR_EACH_VEC_ELT (datarefs, i, dr)
3002 {
3003 gimple stmt;
3004 stmt_vec_info stmt_info;
3005 tree base, offset, init;
3006 bool gather = false;
3007 bool simd_lane_access = false;
3008 int vf;
3009
3010 again:
3011 if (!dr || !DR_REF (dr))
3012 {
3013 if (dump_enabled_p ())
3014 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3015 "not vectorized: unhandled data-ref\n");
3016 return false;
3017 }
3018
3019 stmt = DR_STMT (dr);
3020 stmt_info = vinfo_for_stmt (stmt);
3021
3022 /* Discard clobbers from the dataref vector. We will remove
3023 clobber stmts during vectorization. */
3024 if (gimple_clobber_p (stmt))
3025 {
3026 if (i == datarefs.length () - 1)
3027 {
3028 datarefs.pop ();
3029 break;
3030 }
3031 datarefs[i] = datarefs.pop ();
3032 goto again;
3033 }
3034
3035 /* Check that analysis of the data-ref succeeded. */
3036 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
3037 || !DR_STEP (dr))
3038 {
3039 bool maybe_gather
3040 = DR_IS_READ (dr)
3041 && !TREE_THIS_VOLATILE (DR_REF (dr))
3042 && targetm.vectorize.builtin_gather != NULL;
3043 bool maybe_simd_lane_access
3044 = loop_vinfo && loop->simduid;
3045
3046 /* If target supports vector gather loads, or if this might be
3047 a SIMD lane access, see if they can't be used. */
3048 if (loop_vinfo
3049 && (maybe_gather || maybe_simd_lane_access)
3050 && !nested_in_vect_loop_p (loop, stmt))
3051 {
3052 struct data_reference *newdr
3053 = create_data_ref (NULL, loop_containing_stmt (stmt),
3054 DR_REF (dr), stmt, true);
3055 gcc_assert (newdr != NULL && DR_REF (newdr));
3056 if (DR_BASE_ADDRESS (newdr)
3057 && DR_OFFSET (newdr)
3058 && DR_INIT (newdr)
3059 && DR_STEP (newdr)
3060 && integer_zerop (DR_STEP (newdr)))
3061 {
3062 if (maybe_simd_lane_access)
3063 {
3064 tree off = DR_OFFSET (newdr);
3065 STRIP_NOPS (off);
3066 if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
3067 && TREE_CODE (off) == MULT_EXPR
3068 && host_integerp (TREE_OPERAND (off, 1), 1))
3069 {
3070 tree step = TREE_OPERAND (off, 1);
3071 off = TREE_OPERAND (off, 0);
3072 STRIP_NOPS (off);
3073 if (CONVERT_EXPR_P (off)
3074 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off,
3075 0)))
3076 < TYPE_PRECISION (TREE_TYPE (off)))
3077 off = TREE_OPERAND (off, 0);
3078 if (TREE_CODE (off) == SSA_NAME)
3079 {
3080 gimple def = SSA_NAME_DEF_STMT (off);
3081 tree reft = TREE_TYPE (DR_REF (newdr));
3082 if (gimple_call_internal_p (def)
3083 && gimple_call_internal_fn (def)
3084 == IFN_GOMP_SIMD_LANE)
3085 {
3086 tree arg = gimple_call_arg (def, 0);
3087 gcc_assert (TREE_CODE (arg) == SSA_NAME);
3088 arg = SSA_NAME_VAR (arg);
3089 if (arg == loop->simduid
3090 /* For now. */
3091 && tree_int_cst_equal
3092 (TYPE_SIZE_UNIT (reft),
3093 step))
3094 {
3095 DR_OFFSET (newdr) = ssize_int (0);
3096 DR_STEP (newdr) = step;
3097 DR_ALIGNED_TO (newdr)
3098 = size_int (BIGGEST_ALIGNMENT);
3099 dr = newdr;
3100 simd_lane_access = true;
3101 }
3102 }
3103 }
3104 }
3105 }
3106 if (!simd_lane_access && maybe_gather)
3107 {
3108 dr = newdr;
3109 gather = true;
3110 }
3111 }
3112 if (!gather && !simd_lane_access)
3113 free_data_ref (newdr);
3114 }
3115
3116 if (!gather && !simd_lane_access)
3117 {
3118 if (dump_enabled_p ())
3119 {
3120 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3121 "not vectorized: data ref analysis "
3122 "failed ");
3123 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3124 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3125 }
3126
3127 if (bb_vinfo)
3128 break;
3129
3130 return false;
3131 }
3132 }
3133
3134 if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
3135 {
3136 if (dump_enabled_p ())
3137 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3138 "not vectorized: base addr of dr is a "
3139 "constant\n");
3140
3141 if (bb_vinfo)
3142 break;
3143
3144 if (gather || simd_lane_access)
3145 free_data_ref (dr);
3146 return false;
3147 }
3148
3149 if (TREE_THIS_VOLATILE (DR_REF (dr)))
3150 {
3151 if (dump_enabled_p ())
3152 {
3153 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3154 "not vectorized: volatile type ");
3155 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3156 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3157 }
3158
3159 if (bb_vinfo)
3160 break;
3161
3162 return false;
3163 }
3164
3165 if (stmt_can_throw_internal (stmt))
3166 {
3167 if (dump_enabled_p ())
3168 {
3169 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3170 "not vectorized: statement can throw an "
3171 "exception ");
3172 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3173 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3174 }
3175
3176 if (bb_vinfo)
3177 break;
3178
3179 if (gather || simd_lane_access)
3180 free_data_ref (dr);
3181 return false;
3182 }
3183
3184 if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
3185 && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
3186 {
3187 if (dump_enabled_p ())
3188 {
3189 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3190 "not vectorized: statement is bitfield "
3191 "access ");
3192 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3193 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3194 }
3195
3196 if (bb_vinfo)
3197 break;
3198
3199 if (gather || simd_lane_access)
3200 free_data_ref (dr);
3201 return false;
3202 }
3203
3204 base = unshare_expr (DR_BASE_ADDRESS (dr));
3205 offset = unshare_expr (DR_OFFSET (dr));
3206 init = unshare_expr (DR_INIT (dr));
3207
3208 if (is_gimple_call (stmt))
3209 {
3210 if (dump_enabled_p ())
3211 {
3212 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3213 "not vectorized: dr in a call ");
3214 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3215 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3216 }
3217
3218 if (bb_vinfo)
3219 break;
3220
3221 if (gather || simd_lane_access)
3222 free_data_ref (dr);
3223 return false;
3224 }
3225
3226 /* Update DR field in stmt_vec_info struct. */
3227
3228 /* If the dataref is in an inner-loop of the loop that is considered for
3229 for vectorization, we also want to analyze the access relative to
3230 the outer-loop (DR contains information only relative to the
3231 inner-most enclosing loop). We do that by building a reference to the
3232 first location accessed by the inner-loop, and analyze it relative to
3233 the outer-loop. */
3234 if (loop && nested_in_vect_loop_p (loop, stmt))
3235 {
3236 tree outer_step, outer_base, outer_init;
3237 HOST_WIDE_INT pbitsize, pbitpos;
3238 tree poffset;
3239 enum machine_mode pmode;
3240 int punsignedp, pvolatilep;
3241 affine_iv base_iv, offset_iv;
3242 tree dinit;
3243
3244 /* Build a reference to the first location accessed by the
3245 inner-loop: *(BASE+INIT). (The first location is actually
3246 BASE+INIT+OFFSET, but we add OFFSET separately later). */
3247 tree inner_base = build_fold_indirect_ref
3248 (fold_build_pointer_plus (base, init));
3249
3250 if (dump_enabled_p ())
3251 {
3252 dump_printf_loc (MSG_NOTE, vect_location,
3253 "analyze in outer-loop: ");
3254 dump_generic_expr (MSG_NOTE, TDF_SLIM, inner_base);
3255 dump_printf (MSG_NOTE, "\n");
3256 }
3257
3258 outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
3259 &poffset, &pmode, &punsignedp, &pvolatilep, false);
3260 gcc_assert (outer_base != NULL_TREE);
3261
3262 if (pbitpos % BITS_PER_UNIT != 0)
3263 {
3264 if (dump_enabled_p ())
3265 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3266 "failed: bit offset alignment.\n");
3267 return false;
3268 }
3269
3270 outer_base = build_fold_addr_expr (outer_base);
3271 if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
3272 &base_iv, false))
3273 {
3274 if (dump_enabled_p ())
3275 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3276 "failed: evolution of base is not affine.\n");
3277 return false;
3278 }
3279
3280 if (offset)
3281 {
3282 if (poffset)
3283 poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
3284 poffset);
3285 else
3286 poffset = offset;
3287 }
3288
3289 if (!poffset)
3290 {
3291 offset_iv.base = ssize_int (0);
3292 offset_iv.step = ssize_int (0);
3293 }
3294 else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
3295 &offset_iv, false))
3296 {
3297 if (dump_enabled_p ())
3298 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3299 "evolution of offset is not affine.\n");
3300 return false;
3301 }
3302
3303 outer_init = ssize_int (pbitpos / BITS_PER_UNIT);
3304 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
3305 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3306 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
3307 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3308
3309 outer_step = size_binop (PLUS_EXPR,
3310 fold_convert (ssizetype, base_iv.step),
3311 fold_convert (ssizetype, offset_iv.step));
3312
3313 STMT_VINFO_DR_STEP (stmt_info) = outer_step;
3314 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
3315 STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
3316 STMT_VINFO_DR_INIT (stmt_info) = outer_init;
3317 STMT_VINFO_DR_OFFSET (stmt_info) =
3318 fold_convert (ssizetype, offset_iv.base);
3319 STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
3320 size_int (highest_pow2_factor (offset_iv.base));
3321
3322 if (dump_enabled_p ())
3323 {
3324 dump_printf_loc (MSG_NOTE, vect_location,
3325 "\touter base_address: ");
3326 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3327 STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3328 dump_printf (MSG_NOTE, "\n\touter offset from base address: ");
3329 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3330 STMT_VINFO_DR_OFFSET (stmt_info));
3331 dump_printf (MSG_NOTE,
3332 "\n\touter constant offset from base address: ");
3333 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3334 STMT_VINFO_DR_INIT (stmt_info));
3335 dump_printf (MSG_NOTE, "\n\touter step: ");
3336 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3337 STMT_VINFO_DR_STEP (stmt_info));
3338 dump_printf (MSG_NOTE, "\n\touter aligned to: ");
3339 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3340 STMT_VINFO_DR_ALIGNED_TO (stmt_info));
3341 dump_printf (MSG_NOTE, "\n");
3342 }
3343 }
3344
3345 if (STMT_VINFO_DATA_REF (stmt_info))
3346 {
3347 if (dump_enabled_p ())
3348 {
3349 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3350 "not vectorized: more than one data ref "
3351 "in stmt: ");
3352 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3353 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3354 }
3355
3356 if (bb_vinfo)
3357 break;
3358
3359 if (gather || simd_lane_access)
3360 free_data_ref (dr);
3361 return false;
3362 }
3363
3364 STMT_VINFO_DATA_REF (stmt_info) = dr;
3365 if (simd_lane_access)
3366 {
3367 STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true;
3368 datarefs[i] = dr;
3369 }
3370
3371 /* Set vectype for STMT. */
3372 scalar_type = TREE_TYPE (DR_REF (dr));
3373 STMT_VINFO_VECTYPE (stmt_info) =
3374 get_vectype_for_scalar_type (scalar_type);
3375 if (!STMT_VINFO_VECTYPE (stmt_info))
3376 {
3377 if (dump_enabled_p ())
3378 {
3379 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3380 "not vectorized: no vectype for stmt: ");
3381 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3382 dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
3383 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
3384 scalar_type);
3385 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3386 }
3387
3388 if (bb_vinfo)
3389 break;
3390
3391 if (gather || simd_lane_access)
3392 {
3393 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3394 free_data_ref (dr);
3395 }
3396 return false;
3397 }
3398 else
3399 {
3400 if (dump_enabled_p ())
3401 {
3402 dump_printf_loc (MSG_NOTE, vect_location,
3403 "got vectype for stmt: ");
3404 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3405 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3406 STMT_VINFO_VECTYPE (stmt_info));
3407 dump_printf (MSG_NOTE, "\n");
3408 }
3409 }
3410
3411 /* Adjust the minimal vectorization factor according to the
3412 vector type. */
3413 vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
3414 if (vf > *min_vf)
3415 *min_vf = vf;
3416
3417 if (gather)
3418 {
3419 tree off;
3420
3421 gather = 0 != vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
3422 if (gather
3423 && get_vectype_for_scalar_type (TREE_TYPE (off)) == NULL_TREE)
3424 gather = false;
3425 if (!gather)
3426 {
3427 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3428 free_data_ref (dr);
3429 if (dump_enabled_p ())
3430 {
3431 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3432 "not vectorized: not suitable for gather "
3433 "load ");
3434 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3435 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3436 }
3437 return false;
3438 }
3439
3440 datarefs[i] = dr;
3441 STMT_VINFO_GATHER_P (stmt_info) = true;
3442 }
3443 else if (loop_vinfo
3444 && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
3445 {
3446 if (nested_in_vect_loop_p (loop, stmt)
3447 || !DR_IS_READ (dr))
3448 {
3449 if (dump_enabled_p ())
3450 {
3451 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3452 "not vectorized: not suitable for strided "
3453 "load ");
3454 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3455 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3456 }
3457 return false;
3458 }
3459 STMT_VINFO_STRIDE_LOAD_P (stmt_info) = true;
3460 }
3461 }
3462
3463 /* If we stopped analysis at the first dataref we could not analyze
3464 when trying to vectorize a basic-block mark the rest of the datarefs
3465 as not vectorizable and truncate the vector of datarefs. That
3466 avoids spending useless time in analyzing their dependence. */
3467 if (i != datarefs.length ())
3468 {
3469 gcc_assert (bb_vinfo != NULL);
3470 for (unsigned j = i; j < datarefs.length (); ++j)
3471 {
3472 data_reference_p dr = datarefs[j];
3473 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
3474 free_data_ref (dr);
3475 }
3476 datarefs.truncate (i);
3477 }
3478
3479 return true;
3480 }
3481
3482
3483 /* Function vect_get_new_vect_var.
3484
3485 Returns a name for a new variable. The current naming scheme appends the
3486 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
3487 the name of vectorizer generated variables, and appends that to NAME if
3488 provided. */
3489
3490 tree
3491 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
3492 {
3493 const char *prefix;
3494 tree new_vect_var;
3495
3496 switch (var_kind)
3497 {
3498 case vect_simple_var:
3499 prefix = "vect";
3500 break;
3501 case vect_scalar_var:
3502 prefix = "stmp";
3503 break;
3504 case vect_pointer_var:
3505 prefix = "vectp";
3506 break;
3507 default:
3508 gcc_unreachable ();
3509 }
3510
3511 if (name)
3512 {
3513 char* tmp = concat (prefix, "_", name, NULL);
3514 new_vect_var = create_tmp_reg (type, tmp);
3515 free (tmp);
3516 }
3517 else
3518 new_vect_var = create_tmp_reg (type, prefix);
3519
3520 return new_vect_var;
3521 }
3522
3523
3524 /* Function vect_create_addr_base_for_vector_ref.
3525
3526 Create an expression that computes the address of the first memory location
3527 that will be accessed for a data reference.
3528
3529 Input:
3530 STMT: The statement containing the data reference.
3531 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
3532 OFFSET: Optional. If supplied, it is be added to the initial address.
3533 LOOP: Specify relative to which loop-nest should the address be computed.
3534 For example, when the dataref is in an inner-loop nested in an
3535 outer-loop that is now being vectorized, LOOP can be either the
3536 outer-loop, or the inner-loop. The first memory location accessed
3537 by the following dataref ('in' points to short):
3538
3539 for (i=0; i<N; i++)
3540 for (j=0; j<M; j++)
3541 s += in[i+j]
3542
3543 is as follows:
3544 if LOOP=i_loop: &in (relative to i_loop)
3545 if LOOP=j_loop: &in+i*2B (relative to j_loop)
3546
3547 Output:
3548 1. Return an SSA_NAME whose value is the address of the memory location of
3549 the first vector of the data reference.
3550 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
3551 these statement(s) which define the returned SSA_NAME.
3552
3553 FORNOW: We are only handling array accesses with step 1. */
3554
3555 tree
3556 vect_create_addr_base_for_vector_ref (gimple stmt,
3557 gimple_seq *new_stmt_list,
3558 tree offset,
3559 struct loop *loop)
3560 {
3561 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3562 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3563 tree data_ref_base;
3564 const char *base_name;
3565 tree addr_base;
3566 tree dest;
3567 gimple_seq seq = NULL;
3568 tree base_offset;
3569 tree init;
3570 tree vect_ptr_type;
3571 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
3572 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3573
3574 if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father)
3575 {
3576 struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo);
3577
3578 gcc_assert (nested_in_vect_loop_p (outer_loop, stmt));
3579
3580 data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3581 base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
3582 init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
3583 }
3584 else
3585 {
3586 data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
3587 base_offset = unshare_expr (DR_OFFSET (dr));
3588 init = unshare_expr (DR_INIT (dr));
3589 }
3590
3591 if (loop_vinfo)
3592 base_name = get_name (data_ref_base);
3593 else
3594 {
3595 base_offset = ssize_int (0);
3596 init = ssize_int (0);
3597 base_name = get_name (DR_REF (dr));
3598 }
3599
3600 /* Create base_offset */
3601 base_offset = size_binop (PLUS_EXPR,
3602 fold_convert (sizetype, base_offset),
3603 fold_convert (sizetype, init));
3604
3605 if (offset)
3606 {
3607 offset = fold_build2 (MULT_EXPR, sizetype,
3608 fold_convert (sizetype, offset), step);
3609 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3610 base_offset, offset);
3611 }
3612
3613 /* base + base_offset */
3614 if (loop_vinfo)
3615 addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
3616 else
3617 {
3618 addr_base = build1 (ADDR_EXPR,
3619 build_pointer_type (TREE_TYPE (DR_REF (dr))),
3620 unshare_expr (DR_REF (dr)));
3621 }
3622
3623 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
3624 addr_base = fold_convert (vect_ptr_type, addr_base);
3625 dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name);
3626 addr_base = force_gimple_operand (addr_base, &seq, false, dest);
3627 gimple_seq_add_seq (new_stmt_list, seq);
3628
3629 if (DR_PTR_INFO (dr)
3630 && TREE_CODE (addr_base) == SSA_NAME)
3631 {
3632 duplicate_ssa_name_ptr_info (addr_base, DR_PTR_INFO (dr));
3633 if (offset)
3634 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
3635 }
3636
3637 if (dump_enabled_p ())
3638 {
3639 dump_printf_loc (MSG_NOTE, vect_location, "created ");
3640 dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
3641 dump_printf (MSG_NOTE, "\n");
3642 }
3643
3644 return addr_base;
3645 }
3646
3647
3648 /* Function vect_create_data_ref_ptr.
3649
3650 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
3651 location accessed in the loop by STMT, along with the def-use update
3652 chain to appropriately advance the pointer through the loop iterations.
3653 Also set aliasing information for the pointer. This pointer is used by
3654 the callers to this function to create a memory reference expression for
3655 vector load/store access.
3656
3657 Input:
3658 1. STMT: a stmt that references memory. Expected to be of the form
3659 GIMPLE_ASSIGN <name, data-ref> or
3660 GIMPLE_ASSIGN <data-ref, name>.
3661 2. AGGR_TYPE: the type of the reference, which should be either a vector
3662 or an array.
3663 3. AT_LOOP: the loop where the vector memref is to be created.
3664 4. OFFSET (optional): an offset to be added to the initial address accessed
3665 by the data-ref in STMT.
3666 5. BSI: location where the new stmts are to be placed if there is no loop
3667 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
3668 pointing to the initial address.
3669
3670 Output:
3671 1. Declare a new ptr to vector_type, and have it point to the base of the
3672 data reference (initial addressed accessed by the data reference).
3673 For example, for vector of type V8HI, the following code is generated:
3674
3675 v8hi *ap;
3676 ap = (v8hi *)initial_address;
3677
3678 if OFFSET is not supplied:
3679 initial_address = &a[init];
3680 if OFFSET is supplied:
3681 initial_address = &a[init + OFFSET];
3682
3683 Return the initial_address in INITIAL_ADDRESS.
3684
3685 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
3686 update the pointer in each iteration of the loop.
3687
3688 Return the increment stmt that updates the pointer in PTR_INCR.
3689
3690 3. Set INV_P to true if the access pattern of the data reference in the
3691 vectorized loop is invariant. Set it to false otherwise.
3692
3693 4. Return the pointer. */
3694
3695 tree
3696 vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop,
3697 tree offset, tree *initial_address,
3698 gimple_stmt_iterator *gsi, gimple *ptr_incr,
3699 bool only_init, bool *inv_p)
3700 {
3701 const char *base_name;
3702 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3703 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3704 struct loop *loop = NULL;
3705 bool nested_in_vect_loop = false;
3706 struct loop *containing_loop = NULL;
3707 tree aggr_ptr_type;
3708 tree aggr_ptr;
3709 tree new_temp;
3710 gimple vec_stmt;
3711 gimple_seq new_stmt_list = NULL;
3712 edge pe = NULL;
3713 basic_block new_bb;
3714 tree aggr_ptr_init;
3715 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3716 tree aptr;
3717 gimple_stmt_iterator incr_gsi;
3718 bool insert_after;
3719 tree indx_before_incr, indx_after_incr;
3720 gimple incr;
3721 tree step;
3722 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3723
3724 gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
3725 || TREE_CODE (aggr_type) == VECTOR_TYPE);
3726
3727 if (loop_vinfo)
3728 {
3729 loop = LOOP_VINFO_LOOP (loop_vinfo);
3730 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3731 containing_loop = (gimple_bb (stmt))->loop_father;
3732 pe = loop_preheader_edge (loop);
3733 }
3734 else
3735 {
3736 gcc_assert (bb_vinfo);
3737 only_init = true;
3738 *ptr_incr = NULL;
3739 }
3740
3741 /* Check the step (evolution) of the load in LOOP, and record
3742 whether it's invariant. */
3743 if (nested_in_vect_loop)
3744 step = STMT_VINFO_DR_STEP (stmt_info);
3745 else
3746 step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
3747
3748 if (integer_zerop (step))
3749 *inv_p = true;
3750 else
3751 *inv_p = false;
3752
3753 /* Create an expression for the first address accessed by this load
3754 in LOOP. */
3755 base_name = get_name (DR_BASE_ADDRESS (dr));
3756
3757 if (dump_enabled_p ())
3758 {
3759 tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
3760 dump_printf_loc (MSG_NOTE, vect_location,
3761 "create %s-pointer variable to type: ",
3762 get_tree_code_name (TREE_CODE (aggr_type)));
3763 dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type);
3764 if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
3765 dump_printf (MSG_NOTE, " vectorizing an array ref: ");
3766 else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
3767 dump_printf (MSG_NOTE, " vectorizing a vector ref: ");
3768 else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
3769 dump_printf (MSG_NOTE, " vectorizing a record based array ref: ");
3770 else
3771 dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
3772 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr));
3773 dump_printf (MSG_NOTE, "\n");
3774 }
3775
3776 /* (1) Create the new aggregate-pointer variable.
3777 Vector and array types inherit the alias set of their component
3778 type by default so we need to use a ref-all pointer if the data
3779 reference does not conflict with the created aggregated data
3780 reference because it is not addressable. */
3781 bool need_ref_all = false;
3782 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
3783 get_alias_set (DR_REF (dr))))
3784 need_ref_all = true;
3785 /* Likewise for any of the data references in the stmt group. */
3786 else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
3787 {
3788 gimple orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
3789 do
3790 {
3791 stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt);
3792 struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
3793 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
3794 get_alias_set (DR_REF (sdr))))
3795 {
3796 need_ref_all = true;
3797 break;
3798 }
3799 orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo);
3800 }
3801 while (orig_stmt);
3802 }
3803 aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode,
3804 need_ref_all);
3805 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name);
3806
3807
3808 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
3809 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
3810 def-use update cycles for the pointer: one relative to the outer-loop
3811 (LOOP), which is what steps (3) and (4) below do. The other is relative
3812 to the inner-loop (which is the inner-most loop containing the dataref),
3813 and this is done be step (5) below.
3814
3815 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
3816 inner-most loop, and so steps (3),(4) work the same, and step (5) is
3817 redundant. Steps (3),(4) create the following:
3818
3819 vp0 = &base_addr;
3820 LOOP: vp1 = phi(vp0,vp2)
3821 ...
3822 ...
3823 vp2 = vp1 + step
3824 goto LOOP
3825
3826 If there is an inner-loop nested in loop, then step (5) will also be
3827 applied, and an additional update in the inner-loop will be created:
3828
3829 vp0 = &base_addr;
3830 LOOP: vp1 = phi(vp0,vp2)
3831 ...
3832 inner: vp3 = phi(vp1,vp4)
3833 vp4 = vp3 + inner_step
3834 if () goto inner
3835 ...
3836 vp2 = vp1 + step
3837 if () goto LOOP */
3838
3839 /* (2) Calculate the initial address of the aggregate-pointer, and set
3840 the aggregate-pointer to point to it before the loop. */
3841
3842 /* Create: (&(base[init_val+offset]) in the loop preheader. */
3843
3844 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
3845 offset, loop);
3846 if (new_stmt_list)
3847 {
3848 if (pe)
3849 {
3850 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
3851 gcc_assert (!new_bb);
3852 }
3853 else
3854 gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
3855 }
3856
3857 *initial_address = new_temp;
3858
3859 /* Create: p = (aggr_type *) initial_base */
3860 if (TREE_CODE (new_temp) != SSA_NAME
3861 || !useless_type_conversion_p (aggr_ptr_type, TREE_TYPE (new_temp)))
3862 {
3863 vec_stmt = gimple_build_assign (aggr_ptr,
3864 fold_convert (aggr_ptr_type, new_temp));
3865 aggr_ptr_init = make_ssa_name (aggr_ptr, vec_stmt);
3866 /* Copy the points-to information if it exists. */
3867 if (DR_PTR_INFO (dr))
3868 duplicate_ssa_name_ptr_info (aggr_ptr_init, DR_PTR_INFO (dr));
3869 gimple_assign_set_lhs (vec_stmt, aggr_ptr_init);
3870 if (pe)
3871 {
3872 new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt);
3873 gcc_assert (!new_bb);
3874 }
3875 else
3876 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
3877 }
3878 else
3879 aggr_ptr_init = new_temp;
3880
3881 /* (3) Handle the updating of the aggregate-pointer inside the loop.
3882 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
3883 inner-loop nested in LOOP (during outer-loop vectorization). */
3884
3885 /* No update in loop is required. */
3886 if (only_init && (!loop_vinfo || at_loop == loop))
3887 aptr = aggr_ptr_init;
3888 else
3889 {
3890 /* The step of the aggregate pointer is the type size. */
3891 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
3892 /* One exception to the above is when the scalar step of the load in
3893 LOOP is zero. In this case the step here is also zero. */
3894 if (*inv_p)
3895 iv_step = size_zero_node;
3896 else if (tree_int_cst_sgn (step) == -1)
3897 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
3898
3899 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
3900
3901 create_iv (aggr_ptr_init,
3902 fold_convert (aggr_ptr_type, iv_step),
3903 aggr_ptr, loop, &incr_gsi, insert_after,
3904 &indx_before_incr, &indx_after_incr);
3905 incr = gsi_stmt (incr_gsi);
3906 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
3907
3908 /* Copy the points-to information if it exists. */
3909 if (DR_PTR_INFO (dr))
3910 {
3911 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
3912 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
3913 }
3914 if (ptr_incr)
3915 *ptr_incr = incr;
3916
3917 aptr = indx_before_incr;
3918 }
3919
3920 if (!nested_in_vect_loop || only_init)
3921 return aptr;
3922
3923
3924 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
3925 nested in LOOP, if exists. */
3926
3927 gcc_assert (nested_in_vect_loop);
3928 if (!only_init)
3929 {
3930 standard_iv_increment_position (containing_loop, &incr_gsi,
3931 &insert_after);
3932 create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
3933 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
3934 &indx_after_incr);
3935 incr = gsi_stmt (incr_gsi);
3936 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
3937
3938 /* Copy the points-to information if it exists. */
3939 if (DR_PTR_INFO (dr))
3940 {
3941 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
3942 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
3943 }
3944 if (ptr_incr)
3945 *ptr_incr = incr;
3946
3947 return indx_before_incr;
3948 }
3949 else
3950 gcc_unreachable ();
3951 }
3952
3953
3954 /* Function bump_vector_ptr
3955
3956 Increment a pointer (to a vector type) by vector-size. If requested,
3957 i.e. if PTR-INCR is given, then also connect the new increment stmt
3958 to the existing def-use update-chain of the pointer, by modifying
3959 the PTR_INCR as illustrated below:
3960
3961 The pointer def-use update-chain before this function:
3962 DATAREF_PTR = phi (p_0, p_2)
3963 ....
3964 PTR_INCR: p_2 = DATAREF_PTR + step
3965
3966 The pointer def-use update-chain after this function:
3967 DATAREF_PTR = phi (p_0, p_2)
3968 ....
3969 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
3970 ....
3971 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
3972
3973 Input:
3974 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
3975 in the loop.
3976 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
3977 the loop. The increment amount across iterations is expected
3978 to be vector_size.
3979 BSI - location where the new update stmt is to be placed.
3980 STMT - the original scalar memory-access stmt that is being vectorized.
3981 BUMP - optional. The offset by which to bump the pointer. If not given,
3982 the offset is assumed to be vector_size.
3983
3984 Output: Return NEW_DATAREF_PTR as illustrated above.
3985
3986 */
3987
3988 tree
3989 bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
3990 gimple stmt, tree bump)
3991 {
3992 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3993 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3994 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3995 tree update = TYPE_SIZE_UNIT (vectype);
3996 gimple incr_stmt;
3997 ssa_op_iter iter;
3998 use_operand_p use_p;
3999 tree new_dataref_ptr;
4000
4001 if (bump)
4002 update = bump;
4003
4004 new_dataref_ptr = copy_ssa_name (dataref_ptr, NULL);
4005 incr_stmt = gimple_build_assign_with_ops (POINTER_PLUS_EXPR, new_dataref_ptr,
4006 dataref_ptr, update);
4007 vect_finish_stmt_generation (stmt, incr_stmt, gsi);
4008
4009 /* Copy the points-to information if it exists. */
4010 if (DR_PTR_INFO (dr))
4011 {
4012 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
4013 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr));
4014 }
4015
4016 if (!ptr_incr)
4017 return new_dataref_ptr;
4018
4019 /* Update the vector-pointer's cross-iteration increment. */
4020 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
4021 {
4022 tree use = USE_FROM_PTR (use_p);
4023
4024 if (use == dataref_ptr)
4025 SET_USE (use_p, new_dataref_ptr);
4026 else
4027 gcc_assert (tree_int_cst_compare (use, update) == 0);
4028 }
4029
4030 return new_dataref_ptr;
4031 }
4032
4033
4034 /* Function vect_create_destination_var.
4035
4036 Create a new temporary of type VECTYPE. */
4037
4038 tree
4039 vect_create_destination_var (tree scalar_dest, tree vectype)
4040 {
4041 tree vec_dest;
4042 const char *name;
4043 char *new_name;
4044 tree type;
4045 enum vect_var_kind kind;
4046
4047 kind = vectype ? vect_simple_var : vect_scalar_var;
4048 type = vectype ? vectype : TREE_TYPE (scalar_dest);
4049
4050 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
4051
4052 name = get_name (scalar_dest);
4053 if (name)
4054 asprintf (&new_name, "%s_%u", name, SSA_NAME_VERSION (scalar_dest));
4055 else
4056 asprintf (&new_name, "_%u", SSA_NAME_VERSION (scalar_dest));
4057 vec_dest = vect_get_new_vect_var (type, kind, new_name);
4058 free (new_name);
4059
4060 return vec_dest;
4061 }
4062
4063 /* Function vect_grouped_store_supported.
4064
4065 Returns TRUE if interleave high and interleave low permutations
4066 are supported, and FALSE otherwise. */
4067
4068 bool
4069 vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
4070 {
4071 enum machine_mode mode = TYPE_MODE (vectype);
4072
4073 /* vect_permute_store_chain requires the group size to be a power of two. */
4074 if (exact_log2 (count) == -1)
4075 {
4076 if (dump_enabled_p ())
4077 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4078 "the size of the group of accesses"
4079 " is not a power of 2\n");
4080 return false;
4081 }
4082
4083 /* Check that the permutation is supported. */
4084 if (VECTOR_MODE_P (mode))
4085 {
4086 unsigned int i, nelt = GET_MODE_NUNITS (mode);
4087 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4088 for (i = 0; i < nelt / 2; i++)
4089 {
4090 sel[i * 2] = i;
4091 sel[i * 2 + 1] = i + nelt;
4092 }
4093 if (can_vec_perm_p (mode, false, sel))
4094 {
4095 for (i = 0; i < nelt; i++)
4096 sel[i] += nelt / 2;
4097 if (can_vec_perm_p (mode, false, sel))
4098 return true;
4099 }
4100 }
4101
4102 if (dump_enabled_p ())
4103 dump_printf (MSG_MISSED_OPTIMIZATION,
4104 "interleave op not supported by target.\n");
4105 return false;
4106 }
4107
4108
4109 /* Return TRUE if vec_store_lanes is available for COUNT vectors of
4110 type VECTYPE. */
4111
4112 bool
4113 vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
4114 {
4115 return vect_lanes_optab_supported_p ("vec_store_lanes",
4116 vec_store_lanes_optab,
4117 vectype, count);
4118 }
4119
4120
4121 /* Function vect_permute_store_chain.
4122
4123 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
4124 a power of 2, generate interleave_high/low stmts to reorder the data
4125 correctly for the stores. Return the final references for stores in
4126 RESULT_CHAIN.
4127
4128 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4129 The input is 4 vectors each containing 8 elements. We assign a number to
4130 each element, the input sequence is:
4131
4132 1st vec: 0 1 2 3 4 5 6 7
4133 2nd vec: 8 9 10 11 12 13 14 15
4134 3rd vec: 16 17 18 19 20 21 22 23
4135 4th vec: 24 25 26 27 28 29 30 31
4136
4137 The output sequence should be:
4138
4139 1st vec: 0 8 16 24 1 9 17 25
4140 2nd vec: 2 10 18 26 3 11 19 27
4141 3rd vec: 4 12 20 28 5 13 21 30
4142 4th vec: 6 14 22 30 7 15 23 31
4143
4144 i.e., we interleave the contents of the four vectors in their order.
4145
4146 We use interleave_high/low instructions to create such output. The input of
4147 each interleave_high/low operation is two vectors:
4148 1st vec 2nd vec
4149 0 1 2 3 4 5 6 7
4150 the even elements of the result vector are obtained left-to-right from the
4151 high/low elements of the first vector. The odd elements of the result are
4152 obtained left-to-right from the high/low elements of the second vector.
4153 The output of interleave_high will be: 0 4 1 5
4154 and of interleave_low: 2 6 3 7
4155
4156
4157 The permutation is done in log LENGTH stages. In each stage interleave_high
4158 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
4159 where the first argument is taken from the first half of DR_CHAIN and the
4160 second argument from it's second half.
4161 In our example,
4162
4163 I1: interleave_high (1st vec, 3rd vec)
4164 I2: interleave_low (1st vec, 3rd vec)
4165 I3: interleave_high (2nd vec, 4th vec)
4166 I4: interleave_low (2nd vec, 4th vec)
4167
4168 The output for the first stage is:
4169
4170 I1: 0 16 1 17 2 18 3 19
4171 I2: 4 20 5 21 6 22 7 23
4172 I3: 8 24 9 25 10 26 11 27
4173 I4: 12 28 13 29 14 30 15 31
4174
4175 The output of the second stage, i.e. the final result is:
4176
4177 I1: 0 8 16 24 1 9 17 25
4178 I2: 2 10 18 26 3 11 19 27
4179 I3: 4 12 20 28 5 13 21 30
4180 I4: 6 14 22 30 7 15 23 31. */
4181
4182 void
4183 vect_permute_store_chain (vec<tree> dr_chain,
4184 unsigned int length,
4185 gimple stmt,
4186 gimple_stmt_iterator *gsi,
4187 vec<tree> *result_chain)
4188 {
4189 tree vect1, vect2, high, low;
4190 gimple perm_stmt;
4191 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
4192 tree perm_mask_low, perm_mask_high;
4193 unsigned int i, n;
4194 unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype);
4195 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4196
4197 result_chain->quick_grow (length);
4198 memcpy (result_chain->address (), dr_chain.address (),
4199 length * sizeof (tree));
4200
4201 for (i = 0, n = nelt / 2; i < n; i++)
4202 {
4203 sel[i * 2] = i;
4204 sel[i * 2 + 1] = i + nelt;
4205 }
4206 perm_mask_high = vect_gen_perm_mask (vectype, sel);
4207 gcc_assert (perm_mask_high != NULL);
4208
4209 for (i = 0; i < nelt; i++)
4210 sel[i] += nelt / 2;
4211 perm_mask_low = vect_gen_perm_mask (vectype, sel);
4212 gcc_assert (perm_mask_low != NULL);
4213
4214 for (i = 0, n = exact_log2 (length); i < n; i++)
4215 {
4216 for (j = 0; j < length/2; j++)
4217 {
4218 vect1 = dr_chain[j];
4219 vect2 = dr_chain[j+length/2];
4220
4221 /* Create interleaving stmt:
4222 high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1, ...}> */
4223 high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
4224 perm_stmt
4225 = gimple_build_assign_with_ops (VEC_PERM_EXPR, high,
4226 vect1, vect2, perm_mask_high);
4227 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4228 (*result_chain)[2*j] = high;
4229
4230 /* Create interleaving stmt:
4231 low = VEC_PERM_EXPR <vect1, vect2, {nelt/2, nelt*3/2, nelt/2+1,
4232 nelt*3/2+1, ...}> */
4233 low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
4234 perm_stmt
4235 = gimple_build_assign_with_ops (VEC_PERM_EXPR, low,
4236 vect1, vect2, perm_mask_low);
4237 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4238 (*result_chain)[2*j+1] = low;
4239 }
4240 memcpy (dr_chain.address (), result_chain->address (),
4241 length * sizeof (tree));
4242 }
4243 }
4244
4245 /* Function vect_setup_realignment
4246
4247 This function is called when vectorizing an unaligned load using
4248 the dr_explicit_realign[_optimized] scheme.
4249 This function generates the following code at the loop prolog:
4250
4251 p = initial_addr;
4252 x msq_init = *(floor(p)); # prolog load
4253 realignment_token = call target_builtin;
4254 loop:
4255 x msq = phi (msq_init, ---)
4256
4257 The stmts marked with x are generated only for the case of
4258 dr_explicit_realign_optimized.
4259
4260 The code above sets up a new (vector) pointer, pointing to the first
4261 location accessed by STMT, and a "floor-aligned" load using that pointer.
4262 It also generates code to compute the "realignment-token" (if the relevant
4263 target hook was defined), and creates a phi-node at the loop-header bb
4264 whose arguments are the result of the prolog-load (created by this
4265 function) and the result of a load that takes place in the loop (to be
4266 created by the caller to this function).
4267
4268 For the case of dr_explicit_realign_optimized:
4269 The caller to this function uses the phi-result (msq) to create the
4270 realignment code inside the loop, and sets up the missing phi argument,
4271 as follows:
4272 loop:
4273 msq = phi (msq_init, lsq)
4274 lsq = *(floor(p')); # load in loop
4275 result = realign_load (msq, lsq, realignment_token);
4276
4277 For the case of dr_explicit_realign:
4278 loop:
4279 msq = *(floor(p)); # load in loop
4280 p' = p + (VS-1);
4281 lsq = *(floor(p')); # load in loop
4282 result = realign_load (msq, lsq, realignment_token);
4283
4284 Input:
4285 STMT - (scalar) load stmt to be vectorized. This load accesses
4286 a memory location that may be unaligned.
4287 BSI - place where new code is to be inserted.
4288 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
4289 is used.
4290
4291 Output:
4292 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
4293 target hook, if defined.
4294 Return value - the result of the loop-header phi node. */
4295
4296 tree
4297 vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi,
4298 tree *realignment_token,
4299 enum dr_alignment_support alignment_support_scheme,
4300 tree init_addr,
4301 struct loop **at_loop)
4302 {
4303 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4304 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4305 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4306 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4307 struct loop *loop = NULL;
4308 edge pe = NULL;
4309 tree scalar_dest = gimple_assign_lhs (stmt);
4310 tree vec_dest;
4311 gimple inc;
4312 tree ptr;
4313 tree data_ref;
4314 gimple new_stmt;
4315 basic_block new_bb;
4316 tree msq_init = NULL_TREE;
4317 tree new_temp;
4318 gimple phi_stmt;
4319 tree msq = NULL_TREE;
4320 gimple_seq stmts = NULL;
4321 bool inv_p;
4322 bool compute_in_loop = false;
4323 bool nested_in_vect_loop = false;
4324 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
4325 struct loop *loop_for_initial_load = NULL;
4326
4327 if (loop_vinfo)
4328 {
4329 loop = LOOP_VINFO_LOOP (loop_vinfo);
4330 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4331 }
4332
4333 gcc_assert (alignment_support_scheme == dr_explicit_realign
4334 || alignment_support_scheme == dr_explicit_realign_optimized);
4335
4336 /* We need to generate three things:
4337 1. the misalignment computation
4338 2. the extra vector load (for the optimized realignment scheme).
4339 3. the phi node for the two vectors from which the realignment is
4340 done (for the optimized realignment scheme). */
4341
4342 /* 1. Determine where to generate the misalignment computation.
4343
4344 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
4345 calculation will be generated by this function, outside the loop (in the
4346 preheader). Otherwise, INIT_ADDR had already been computed for us by the
4347 caller, inside the loop.
4348
4349 Background: If the misalignment remains fixed throughout the iterations of
4350 the loop, then both realignment schemes are applicable, and also the
4351 misalignment computation can be done outside LOOP. This is because we are
4352 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
4353 are a multiple of VS (the Vector Size), and therefore the misalignment in
4354 different vectorized LOOP iterations is always the same.
4355 The problem arises only if the memory access is in an inner-loop nested
4356 inside LOOP, which is now being vectorized using outer-loop vectorization.
4357 This is the only case when the misalignment of the memory access may not
4358 remain fixed throughout the iterations of the inner-loop (as explained in
4359 detail in vect_supportable_dr_alignment). In this case, not only is the
4360 optimized realignment scheme not applicable, but also the misalignment
4361 computation (and generation of the realignment token that is passed to
4362 REALIGN_LOAD) have to be done inside the loop.
4363
4364 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
4365 or not, which in turn determines if the misalignment is computed inside
4366 the inner-loop, or outside LOOP. */
4367
4368 if (init_addr != NULL_TREE || !loop_vinfo)
4369 {
4370 compute_in_loop = true;
4371 gcc_assert (alignment_support_scheme == dr_explicit_realign);
4372 }
4373
4374
4375 /* 2. Determine where to generate the extra vector load.
4376
4377 For the optimized realignment scheme, instead of generating two vector
4378 loads in each iteration, we generate a single extra vector load in the
4379 preheader of the loop, and in each iteration reuse the result of the
4380 vector load from the previous iteration. In case the memory access is in
4381 an inner-loop nested inside LOOP, which is now being vectorized using
4382 outer-loop vectorization, we need to determine whether this initial vector
4383 load should be generated at the preheader of the inner-loop, or can be
4384 generated at the preheader of LOOP. If the memory access has no evolution
4385 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
4386 to be generated inside LOOP (in the preheader of the inner-loop). */
4387
4388 if (nested_in_vect_loop)
4389 {
4390 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
4391 bool invariant_in_outerloop =
4392 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
4393 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
4394 }
4395 else
4396 loop_for_initial_load = loop;
4397 if (at_loop)
4398 *at_loop = loop_for_initial_load;
4399
4400 if (loop_for_initial_load)
4401 pe = loop_preheader_edge (loop_for_initial_load);
4402
4403 /* 3. For the case of the optimized realignment, create the first vector
4404 load at the loop preheader. */
4405
4406 if (alignment_support_scheme == dr_explicit_realign_optimized)
4407 {
4408 /* Create msq_init = *(floor(p1)) in the loop preheader */
4409
4410 gcc_assert (!compute_in_loop);
4411 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4412 ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
4413 NULL_TREE, &init_addr, NULL, &inc,
4414 true, &inv_p);
4415 new_temp = copy_ssa_name (ptr, NULL);
4416 new_stmt = gimple_build_assign_with_ops
4417 (BIT_AND_EXPR, new_temp, ptr,
4418 build_int_cst (TREE_TYPE (ptr),
4419 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4420 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4421 gcc_assert (!new_bb);
4422 data_ref
4423 = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
4424 build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
4425 new_stmt = gimple_build_assign (vec_dest, data_ref);
4426 new_temp = make_ssa_name (vec_dest, new_stmt);
4427 gimple_assign_set_lhs (new_stmt, new_temp);
4428 if (pe)
4429 {
4430 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4431 gcc_assert (!new_bb);
4432 }
4433 else
4434 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4435
4436 msq_init = gimple_assign_lhs (new_stmt);
4437 }
4438
4439 /* 4. Create realignment token using a target builtin, if available.
4440 It is done either inside the containing loop, or before LOOP (as
4441 determined above). */
4442
4443 if (targetm.vectorize.builtin_mask_for_load)
4444 {
4445 tree builtin_decl;
4446
4447 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
4448 if (!init_addr)
4449 {
4450 /* Generate the INIT_ADDR computation outside LOOP. */
4451 init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
4452 NULL_TREE, loop);
4453 if (loop)
4454 {
4455 pe = loop_preheader_edge (loop);
4456 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4457 gcc_assert (!new_bb);
4458 }
4459 else
4460 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
4461 }
4462
4463 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
4464 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
4465 vec_dest =
4466 vect_create_destination_var (scalar_dest,
4467 gimple_call_return_type (new_stmt));
4468 new_temp = make_ssa_name (vec_dest, new_stmt);
4469 gimple_call_set_lhs (new_stmt, new_temp);
4470
4471 if (compute_in_loop)
4472 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4473 else
4474 {
4475 /* Generate the misalignment computation outside LOOP. */
4476 pe = loop_preheader_edge (loop);
4477 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4478 gcc_assert (!new_bb);
4479 }
4480
4481 *realignment_token = gimple_call_lhs (new_stmt);
4482
4483 /* The result of the CALL_EXPR to this builtin is determined from
4484 the value of the parameter and no global variables are touched
4485 which makes the builtin a "const" function. Requiring the
4486 builtin to have the "const" attribute makes it unnecessary
4487 to call mark_call_clobbered. */
4488 gcc_assert (TREE_READONLY (builtin_decl));
4489 }
4490
4491 if (alignment_support_scheme == dr_explicit_realign)
4492 return msq;
4493
4494 gcc_assert (!compute_in_loop);
4495 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
4496
4497
4498 /* 5. Create msq = phi <msq_init, lsq> in loop */
4499
4500 pe = loop_preheader_edge (containing_loop);
4501 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4502 msq = make_ssa_name (vec_dest, NULL);
4503 phi_stmt = create_phi_node (msq, containing_loop->header);
4504 add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
4505
4506 return msq;
4507 }
4508
4509
4510 /* Function vect_grouped_load_supported.
4511
4512 Returns TRUE if even and odd permutations are supported,
4513 and FALSE otherwise. */
4514
4515 bool
4516 vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
4517 {
4518 enum machine_mode mode = TYPE_MODE (vectype);
4519
4520 /* vect_permute_load_chain requires the group size to be a power of two. */
4521 if (exact_log2 (count) == -1)
4522 {
4523 if (dump_enabled_p ())
4524 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4525 "the size of the group of accesses"
4526 " is not a power of 2\n");
4527 return false;
4528 }
4529
4530 /* Check that the permutation is supported. */
4531 if (VECTOR_MODE_P (mode))
4532 {
4533 unsigned int i, nelt = GET_MODE_NUNITS (mode);
4534 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4535
4536 for (i = 0; i < nelt; i++)
4537 sel[i] = i * 2;
4538 if (can_vec_perm_p (mode, false, sel))
4539 {
4540 for (i = 0; i < nelt; i++)
4541 sel[i] = i * 2 + 1;
4542 if (can_vec_perm_p (mode, false, sel))
4543 return true;
4544 }
4545 }
4546
4547 if (dump_enabled_p ())
4548 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4549 "extract even/odd not supported by target\n");
4550 return false;
4551 }
4552
4553 /* Return TRUE if vec_load_lanes is available for COUNT vectors of
4554 type VECTYPE. */
4555
4556 bool
4557 vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
4558 {
4559 return vect_lanes_optab_supported_p ("vec_load_lanes",
4560 vec_load_lanes_optab,
4561 vectype, count);
4562 }
4563
4564 /* Function vect_permute_load_chain.
4565
4566 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
4567 a power of 2, generate extract_even/odd stmts to reorder the input data
4568 correctly. Return the final references for loads in RESULT_CHAIN.
4569
4570 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4571 The input is 4 vectors each containing 8 elements. We assign a number to each
4572 element, the input sequence is:
4573
4574 1st vec: 0 1 2 3 4 5 6 7
4575 2nd vec: 8 9 10 11 12 13 14 15
4576 3rd vec: 16 17 18 19 20 21 22 23
4577 4th vec: 24 25 26 27 28 29 30 31
4578
4579 The output sequence should be:
4580
4581 1st vec: 0 4 8 12 16 20 24 28
4582 2nd vec: 1 5 9 13 17 21 25 29
4583 3rd vec: 2 6 10 14 18 22 26 30
4584 4th vec: 3 7 11 15 19 23 27 31
4585
4586 i.e., the first output vector should contain the first elements of each
4587 interleaving group, etc.
4588
4589 We use extract_even/odd instructions to create such output. The input of
4590 each extract_even/odd operation is two vectors
4591 1st vec 2nd vec
4592 0 1 2 3 4 5 6 7
4593
4594 and the output is the vector of extracted even/odd elements. The output of
4595 extract_even will be: 0 2 4 6
4596 and of extract_odd: 1 3 5 7
4597
4598
4599 The permutation is done in log LENGTH stages. In each stage extract_even
4600 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
4601 their order. In our example,
4602
4603 E1: extract_even (1st vec, 2nd vec)
4604 E2: extract_odd (1st vec, 2nd vec)
4605 E3: extract_even (3rd vec, 4th vec)
4606 E4: extract_odd (3rd vec, 4th vec)
4607
4608 The output for the first stage will be:
4609
4610 E1: 0 2 4 6 8 10 12 14
4611 E2: 1 3 5 7 9 11 13 15
4612 E3: 16 18 20 22 24 26 28 30
4613 E4: 17 19 21 23 25 27 29 31
4614
4615 In order to proceed and create the correct sequence for the next stage (or
4616 for the correct output, if the second stage is the last one, as in our
4617 example), we first put the output of extract_even operation and then the
4618 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
4619 The input for the second stage is:
4620
4621 1st vec (E1): 0 2 4 6 8 10 12 14
4622 2nd vec (E3): 16 18 20 22 24 26 28 30
4623 3rd vec (E2): 1 3 5 7 9 11 13 15
4624 4th vec (E4): 17 19 21 23 25 27 29 31
4625
4626 The output of the second stage:
4627
4628 E1: 0 4 8 12 16 20 24 28
4629 E2: 2 6 10 14 18 22 26 30
4630 E3: 1 5 9 13 17 21 25 29
4631 E4: 3 7 11 15 19 23 27 31
4632
4633 And RESULT_CHAIN after reordering:
4634
4635 1st vec (E1): 0 4 8 12 16 20 24 28
4636 2nd vec (E3): 1 5 9 13 17 21 25 29
4637 3rd vec (E2): 2 6 10 14 18 22 26 30
4638 4th vec (E4): 3 7 11 15 19 23 27 31. */
4639
4640 static void
4641 vect_permute_load_chain (vec<tree> dr_chain,
4642 unsigned int length,
4643 gimple stmt,
4644 gimple_stmt_iterator *gsi,
4645 vec<tree> *result_chain)
4646 {
4647 tree data_ref, first_vect, second_vect;
4648 tree perm_mask_even, perm_mask_odd;
4649 gimple perm_stmt;
4650 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
4651 unsigned int i, j, log_length = exact_log2 (length);
4652 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
4653 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4654
4655 result_chain->quick_grow (length);
4656 memcpy (result_chain->address (), dr_chain.address (),
4657 length * sizeof (tree));
4658
4659 for (i = 0; i < nelt; ++i)
4660 sel[i] = i * 2;
4661 perm_mask_even = vect_gen_perm_mask (vectype, sel);
4662 gcc_assert (perm_mask_even != NULL);
4663
4664 for (i = 0; i < nelt; ++i)
4665 sel[i] = i * 2 + 1;
4666 perm_mask_odd = vect_gen_perm_mask (vectype, sel);
4667 gcc_assert (perm_mask_odd != NULL);
4668
4669 for (i = 0; i < log_length; i++)
4670 {
4671 for (j = 0; j < length; j += 2)
4672 {
4673 first_vect = dr_chain[j];
4674 second_vect = dr_chain[j+1];
4675
4676 /* data_ref = permute_even (first_data_ref, second_data_ref); */
4677 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even");
4678 perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref,
4679 first_vect, second_vect,
4680 perm_mask_even);
4681 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4682 (*result_chain)[j/2] = data_ref;
4683
4684 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
4685 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd");
4686 perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref,
4687 first_vect, second_vect,
4688 perm_mask_odd);
4689 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4690 (*result_chain)[j/2+length/2] = data_ref;
4691 }
4692 memcpy (dr_chain.address (), result_chain->address (),
4693 length * sizeof (tree));
4694 }
4695 }
4696
4697
4698 /* Function vect_transform_grouped_load.
4699
4700 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
4701 to perform their permutation and ascribe the result vectorized statements to
4702 the scalar statements.
4703 */
4704
4705 void
4706 vect_transform_grouped_load (gimple stmt, vec<tree> dr_chain, int size,
4707 gimple_stmt_iterator *gsi)
4708 {
4709 vec<tree> result_chain = vNULL;
4710
4711 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
4712 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
4713 vectors, that are ready for vector computation. */
4714 result_chain.create (size);
4715 vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
4716 vect_record_grouped_load_vectors (stmt, result_chain);
4717 result_chain.release ();
4718 }
4719
4720 /* RESULT_CHAIN contains the output of a group of grouped loads that were
4721 generated as part of the vectorization of STMT. Assign the statement
4722 for each vector to the associated scalar statement. */
4723
4724 void
4725 vect_record_grouped_load_vectors (gimple stmt, vec<tree> result_chain)
4726 {
4727 gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
4728 gimple next_stmt, new_stmt;
4729 unsigned int i, gap_count;
4730 tree tmp_data_ref;
4731
4732 /* Put a permuted data-ref in the VECTORIZED_STMT field.
4733 Since we scan the chain starting from it's first node, their order
4734 corresponds the order of data-refs in RESULT_CHAIN. */
4735 next_stmt = first_stmt;
4736 gap_count = 1;
4737 FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
4738 {
4739 if (!next_stmt)
4740 break;
4741
4742 /* Skip the gaps. Loads created for the gaps will be removed by dead
4743 code elimination pass later. No need to check for the first stmt in
4744 the group, since it always exists.
4745 GROUP_GAP is the number of steps in elements from the previous
4746 access (if there is no gap GROUP_GAP is 1). We skip loads that
4747 correspond to the gaps. */
4748 if (next_stmt != first_stmt
4749 && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
4750 {
4751 gap_count++;
4752 continue;
4753 }
4754
4755 while (next_stmt)
4756 {
4757 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
4758 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
4759 copies, and we put the new vector statement in the first available
4760 RELATED_STMT. */
4761 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
4762 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
4763 else
4764 {
4765 if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
4766 {
4767 gimple prev_stmt =
4768 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
4769 gimple rel_stmt =
4770 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
4771 while (rel_stmt)
4772 {
4773 prev_stmt = rel_stmt;
4774 rel_stmt =
4775 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
4776 }
4777
4778 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
4779 new_stmt;
4780 }
4781 }
4782
4783 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
4784 gap_count = 1;
4785 /* If NEXT_STMT accesses the same DR as the previous statement,
4786 put the same TMP_DATA_REF as its vectorized statement; otherwise
4787 get the next data-ref from RESULT_CHAIN. */
4788 if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
4789 break;
4790 }
4791 }
4792 }
4793
4794 /* Function vect_force_dr_alignment_p.
4795
4796 Returns whether the alignment of a DECL can be forced to be aligned
4797 on ALIGNMENT bit boundary. */
4798
4799 bool
4800 vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
4801 {
4802 if (TREE_CODE (decl) != VAR_DECL)
4803 return false;
4804
4805 /* We cannot change alignment of common or external symbols as another
4806 translation unit may contain a definition with lower alignment.
4807 The rules of common symbol linking mean that the definition
4808 will override the common symbol. The same is true for constant
4809 pool entries which may be shared and are not properly merged
4810 by LTO. */
4811 if (DECL_EXTERNAL (decl)
4812 || DECL_COMMON (decl)
4813 || DECL_IN_CONSTANT_POOL (decl))
4814 return false;
4815
4816 if (TREE_ASM_WRITTEN (decl))
4817 return false;
4818
4819 /* Do not override the alignment as specified by the ABI when the used
4820 attribute is set. */
4821 if (DECL_PRESERVE_P (decl))
4822 return false;
4823
4824 /* Do not override explicit alignment set by the user when an explicit
4825 section name is also used. This is a common idiom used by many
4826 software projects. */
4827 if (DECL_SECTION_NAME (decl) != NULL_TREE
4828 && !DECL_HAS_IMPLICIT_SECTION_NAME_P (decl))
4829 return false;
4830
4831 if (TREE_STATIC (decl))
4832 return (alignment <= MAX_OFILE_ALIGNMENT);
4833 else
4834 return (alignment <= MAX_STACK_ALIGNMENT);
4835 }
4836
4837
4838 /* Return whether the data reference DR is supported with respect to its
4839 alignment.
4840 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
4841 it is aligned, i.e., check if it is possible to vectorize it with different
4842 alignment. */
4843
4844 enum dr_alignment_support
4845 vect_supportable_dr_alignment (struct data_reference *dr,
4846 bool check_aligned_accesses)
4847 {
4848 gimple stmt = DR_STMT (dr);
4849 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4850 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4851 enum machine_mode mode = TYPE_MODE (vectype);
4852 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4853 struct loop *vect_loop = NULL;
4854 bool nested_in_vect_loop = false;
4855
4856 if (aligned_access_p (dr) && !check_aligned_accesses)
4857 return dr_aligned;
4858
4859 if (loop_vinfo)
4860 {
4861 vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
4862 nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
4863 }
4864
4865 /* Possibly unaligned access. */
4866
4867 /* We can choose between using the implicit realignment scheme (generating
4868 a misaligned_move stmt) and the explicit realignment scheme (generating
4869 aligned loads with a REALIGN_LOAD). There are two variants to the
4870 explicit realignment scheme: optimized, and unoptimized.
4871 We can optimize the realignment only if the step between consecutive
4872 vector loads is equal to the vector size. Since the vector memory
4873 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
4874 is guaranteed that the misalignment amount remains the same throughout the
4875 execution of the vectorized loop. Therefore, we can create the
4876 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
4877 at the loop preheader.
4878
4879 However, in the case of outer-loop vectorization, when vectorizing a
4880 memory access in the inner-loop nested within the LOOP that is now being
4881 vectorized, while it is guaranteed that the misalignment of the
4882 vectorized memory access will remain the same in different outer-loop
4883 iterations, it is *not* guaranteed that is will remain the same throughout
4884 the execution of the inner-loop. This is because the inner-loop advances
4885 with the original scalar step (and not in steps of VS). If the inner-loop
4886 step happens to be a multiple of VS, then the misalignment remains fixed
4887 and we can use the optimized realignment scheme. For example:
4888
4889 for (i=0; i<N; i++)
4890 for (j=0; j<M; j++)
4891 s += a[i+j];
4892
4893 When vectorizing the i-loop in the above example, the step between
4894 consecutive vector loads is 1, and so the misalignment does not remain
4895 fixed across the execution of the inner-loop, and the realignment cannot
4896 be optimized (as illustrated in the following pseudo vectorized loop):
4897
4898 for (i=0; i<N; i+=4)
4899 for (j=0; j<M; j++){
4900 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
4901 // when j is {0,1,2,3,4,5,6,7,...} respectively.
4902 // (assuming that we start from an aligned address).
4903 }
4904
4905 We therefore have to use the unoptimized realignment scheme:
4906
4907 for (i=0; i<N; i+=4)
4908 for (j=k; j<M; j+=4)
4909 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
4910 // that the misalignment of the initial address is
4911 // 0).
4912
4913 The loop can then be vectorized as follows:
4914
4915 for (k=0; k<4; k++){
4916 rt = get_realignment_token (&vp[k]);
4917 for (i=0; i<N; i+=4){
4918 v1 = vp[i+k];
4919 for (j=k; j<M; j+=4){
4920 v2 = vp[i+j+VS-1];
4921 va = REALIGN_LOAD <v1,v2,rt>;
4922 vs += va;
4923 v1 = v2;
4924 }
4925 }
4926 } */
4927
4928 if (DR_IS_READ (dr))
4929 {
4930 bool is_packed = false;
4931 tree type = (TREE_TYPE (DR_REF (dr)));
4932
4933 if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
4934 && (!targetm.vectorize.builtin_mask_for_load
4935 || targetm.vectorize.builtin_mask_for_load ()))
4936 {
4937 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4938 if ((nested_in_vect_loop
4939 && (TREE_INT_CST_LOW (DR_STEP (dr))
4940 != GET_MODE_SIZE (TYPE_MODE (vectype))))
4941 || !loop_vinfo)
4942 return dr_explicit_realign;
4943 else
4944 return dr_explicit_realign_optimized;
4945 }
4946 if (!known_alignment_for_access_p (dr))
4947 is_packed = not_size_aligned (DR_REF (dr));
4948
4949 if ((TYPE_USER_ALIGN (type) && !is_packed)
4950 || targetm.vectorize.
4951 support_vector_misalignment (mode, type,
4952 DR_MISALIGNMENT (dr), is_packed))
4953 /* Can't software pipeline the loads, but can at least do them. */
4954 return dr_unaligned_supported;
4955 }
4956 else
4957 {
4958 bool is_packed = false;
4959 tree type = (TREE_TYPE (DR_REF (dr)));
4960
4961 if (!known_alignment_for_access_p (dr))
4962 is_packed = not_size_aligned (DR_REF (dr));
4963
4964 if ((TYPE_USER_ALIGN (type) && !is_packed)
4965 || targetm.vectorize.
4966 support_vector_misalignment (mode, type,
4967 DR_MISALIGNMENT (dr), is_packed))
4968 return dr_unaligned_supported;
4969 }
4970
4971 /* Unsupported. */
4972 return dr_unaligned_unsupported;
4973 }