]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-parloops.c
* config/avr/avr.h (ASM_OUTPUT_EXTERNAL): Add.
[thirdparty/gcc.git] / gcc / tree-parloops.c
CommitLineData
5f40b3cb 1/* Loop autoparallelization.
6da7fc87 2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5f40b3cb
ZD
3 Contributed by Sebastian Pop <pop@cri.ensmp.fr> and
4 Zdenek Dvorak <dvorakz@suse.cz>.
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
6da7fc87 10Software Foundation; either version 3, or (at your option) any later
5f40b3cb
ZD
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
6da7fc87
NC
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
5f40b3cb
ZD
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "tm.h"
26#include "tree.h"
27#include "rtl.h"
28#include "tree-flow.h"
29#include "cfgloop.h"
30#include "ggc.h"
31#include "tree-data-ref.h"
32#include "diagnostic.h"
33#include "tree-pass.h"
34#include "tree-scalar-evolution.h"
35#include "hashtab.h"
36#include "langhooks.h"
a509ebb5 37#include "tree-vectorizer.h"
5f40b3cb
ZD
38
39/* This pass tries to distribute iterations of loops into several threads.
40 The implementation is straightforward -- for each loop we test whether its
41 iterations are independent, and if it is the case (and some additional
42 conditions regarding profitability and correctness are satisfied), we
726a989a
RB
43 add GIMPLE_OMP_PARALLEL and GIMPLE_OMP_FOR codes and let omp expansion
44 machinery do its job.
b8698a0f 45
5f40b3cb
ZD
46 The most of the complexity is in bringing the code into shape expected
47 by the omp expanders:
726a989a
RB
48 -- for GIMPLE_OMP_FOR, ensuring that the loop has only one induction
49 variable and that the exit test is at the start of the loop body
50 -- for GIMPLE_OMP_PARALLEL, replacing the references to local addressable
5f40b3cb
ZD
51 variables by accesses through pointers, and breaking up ssa chains
52 by storing the values incoming to the parallelized loop to a structure
53 passed to the new function as an argument (something similar is done
54 in omp gimplification, unfortunately only a small part of the code
55 can be shared).
56
57 TODO:
58 -- if there are several parallelizable loops in a function, it may be
59 possible to generate the threads just once (using synchronization to
60 ensure that cross-loop dependences are obeyed).
61 -- handling of common scalar dependence patterns (accumulation, ...)
62 -- handling of non-innermost loops */
63
b8698a0f 64/*
a509ebb5
RL
65 Reduction handling:
66 currently we use vect_is_simple_reduction() to detect reduction patterns.
67 The code transformation will be introduced by an example.
b8698a0f
L
68
69
a509ebb5
RL
70parloop
71{
72 int sum=1;
73
0eb7e7aa 74 for (i = 0; i < N; i++)
a509ebb5
RL
75 {
76 x[i] = i + 3;
77 sum+=x[i];
78 }
79}
80
0eb7e7aa 81gimple-like code:
a509ebb5
RL
82header_bb:
83
0eb7e7aa
RL
84 # sum_29 = PHI <sum_11(5), 1(3)>
85 # i_28 = PHI <i_12(5), 0(3)>
86 D.1795_8 = i_28 + 3;
87 x[i_28] = D.1795_8;
88 sum_11 = D.1795_8 + sum_29;
89 i_12 = i_28 + 1;
90 if (N_6(D) > i_12)
91 goto header_bb;
92
a509ebb5
RL
93
94exit_bb:
95
0eb7e7aa
RL
96 # sum_21 = PHI <sum_11(4)>
97 printf (&"%d"[0], sum_21);
a509ebb5
RL
98
99
100after reduction transformation (only relevant parts):
101
102parloop
103{
104
105....
106
0eb7e7aa 107
fa10beec 108 # Storing the initial value given by the user. #
0eb7e7aa 109
ae0bce62 110 .paral_data_store.32.sum.27 = 1;
b8698a0f
L
111
112 #pragma omp parallel num_threads(4)
a509ebb5 113
0eb7e7aa 114 #pragma omp for schedule(static)
ae0bce62
RL
115
116 # The neutral element corresponding to the particular
117 reduction's operation, e.g. 0 for PLUS_EXPR,
118 1 for MULT_EXPR, etc. replaces the user's initial value. #
119
120 # sum.27_29 = PHI <sum.27_11, 0>
121
0eb7e7aa 122 sum.27_11 = D.1827_8 + sum.27_29;
ae0bce62 123
726a989a 124 GIMPLE_OMP_CONTINUE
a509ebb5 125
0eb7e7aa
RL
126 # Adding this reduction phi is done at create_phi_for_local_result() #
127 # sum.27_56 = PHI <sum.27_11, 0>
726a989a 128 GIMPLE_OMP_RETURN
b8698a0f
L
129
130 # Creating the atomic operation is done at
0eb7e7aa 131 create_call_for_reduction_1() #
a509ebb5 132
0eb7e7aa
RL
133 #pragma omp atomic_load
134 D.1839_59 = *&.paral_data_load.33_51->reduction.23;
135 D.1840_60 = sum.27_56 + D.1839_59;
136 #pragma omp atomic_store (D.1840_60);
b8698a0f 137
726a989a 138 GIMPLE_OMP_RETURN
b8698a0f 139
0eb7e7aa
RL
140 # collecting the result after the join of the threads is done at
141 create_loads_for_reductions().
ae0bce62
RL
142 The value computed by the threads is loaded from the
143 shared struct. #
144
b8698a0f 145
0eb7e7aa 146 .paral_data_load.33_52 = &.paral_data_store.32;
ae0bce62 147 sum_37 = .paral_data_load.33_52->sum.27;
0eb7e7aa
RL
148 sum_43 = D.1795_41 + sum_37;
149
150 exit bb:
151 # sum_21 = PHI <sum_43, sum_26>
152 printf (&"%d"[0], sum_21);
153
154...
155
a509ebb5
RL
156}
157
158*/
159
5f40b3cb
ZD
160/* Minimal number of iterations of a loop that should be executed in each
161 thread. */
162#define MIN_PER_THREAD 100
163
b8698a0f 164/* Element of the hashtable, representing a
a509ebb5
RL
165 reduction in the current loop. */
166struct reduction_info
167{
726a989a
RB
168 gimple reduc_stmt; /* reduction statement. */
169 gimple reduc_phi; /* The phi node defining the reduction. */
170 enum tree_code reduction_code;/* code for the reduction operation. */
b8698a0f 171 gimple keep_res; /* The PHI_RESULT of this phi is the resulting value
a509ebb5 172 of the reduction variable when existing the loop. */
ae0bce62 173 tree initial_value; /* The initial value of the reduction var before entering the loop. */
a509ebb5 174 tree field; /* the name of the field in the parloop data structure intended for reduction. */
a509ebb5 175 tree init; /* reduction initialization value. */
b8698a0f 176 gimple new_phi; /* (helper field) Newly created phi node whose result
a509ebb5
RL
177 will be passed to the atomic operation. Represents
178 the local result each thread computed for the reduction
179 operation. */
180};
181
182/* Equality and hash functions for hashtab code. */
183
184static int
185reduction_info_eq (const void *aa, const void *bb)
186{
187 const struct reduction_info *a = (const struct reduction_info *) aa;
188 const struct reduction_info *b = (const struct reduction_info *) bb;
189
190 return (a->reduc_phi == b->reduc_phi);
191}
192
193static hashval_t
194reduction_info_hash (const void *aa)
195{
196 const struct reduction_info *a = (const struct reduction_info *) aa;
197
198 return htab_hash_pointer (a->reduc_phi);
199}
200
201static struct reduction_info *
726a989a 202reduction_phi (htab_t reduction_list, gimple phi)
a509ebb5
RL
203{
204 struct reduction_info tmpred, *red;
205
206 if (htab_elements (reduction_list) == 0)
207 return NULL;
208
209 tmpred.reduc_phi = phi;
3d9a9f94 210 red = (struct reduction_info *) htab_find (reduction_list, &tmpred);
a509ebb5
RL
211
212 return red;
213}
214
5f40b3cb
ZD
215/* Element of hashtable of names to copy. */
216
217struct name_to_copy_elt
218{
219 unsigned version; /* The version of the name to copy. */
220 tree new_name; /* The new name used in the copy. */
221 tree field; /* The field of the structure used to pass the
222 value. */
223};
224
225/* Equality and hash functions for hashtab code. */
226
227static int
228name_to_copy_elt_eq (const void *aa, const void *bb)
229{
a509ebb5
RL
230 const struct name_to_copy_elt *a = (const struct name_to_copy_elt *) aa;
231 const struct name_to_copy_elt *b = (const struct name_to_copy_elt *) bb;
5f40b3cb
ZD
232
233 return a->version == b->version;
234}
235
236static hashval_t
237name_to_copy_elt_hash (const void *aa)
238{
a509ebb5 239 const struct name_to_copy_elt *a = (const struct name_to_copy_elt *) aa;
5f40b3cb
ZD
240
241 return (hashval_t) a->version;
242}
243
08dab97a
RL
244
245/* Data dependency analysis. Returns true if the iterations of LOOP
246 are independent on each other (that is, if we can execute them
247 in parallel). */
5f40b3cb
ZD
248
249static bool
08dab97a 250loop_parallel_p (struct loop *loop)
5f40b3cb 251{
a509ebb5 252 VEC (ddr_p, heap) * dependence_relations;
726a989a 253 VEC (data_reference_p, heap) *datarefs;
5f40b3cb
ZD
254 lambda_trans_matrix trans;
255 bool ret = false;
5f40b3cb
ZD
256
257 if (dump_file && (dump_flags & TDF_DETAILS))
48710229
RL
258 {
259 fprintf (dump_file, "Considering loop %d\n", loop->num);
260 if (!loop->inner)
261 fprintf (dump_file, "loop is innermost\n");
b8698a0f 262 else
48710229
RL
263 fprintf (dump_file, "loop NOT innermost\n");
264 }
5f40b3cb 265
5f40b3cb
ZD
266 /* Check for problems with dependences. If the loop can be reversed,
267 the iterations are independent. */
268 datarefs = VEC_alloc (data_reference_p, heap, 10);
269 dependence_relations = VEC_alloc (ddr_p, heap, 10 * 10);
270 compute_data_dependences_for_loop (loop, true, &datarefs,
271 &dependence_relations);
272 if (dump_file && (dump_flags & TDF_DETAILS))
273 dump_data_dependence_relations (dump_file, dependence_relations);
274
275 trans = lambda_trans_matrix_new (1, 1);
276 LTM_MATRIX (trans)[0][0] = -1;
277
278 if (lambda_transform_legal_p (trans, 1, dependence_relations))
279 {
280 ret = true;
281 if (dump_file && (dump_flags & TDF_DETAILS))
282 fprintf (dump_file, " SUCCESS: may be parallelized\n");
283 }
284 else if (dump_file && (dump_flags & TDF_DETAILS))
a509ebb5
RL
285 fprintf (dump_file,
286 " FAILED: data dependencies exist across iterations\n");
5f40b3cb
ZD
287
288 free_dependence_relations (dependence_relations);
289 free_data_refs (datarefs);
290
291 return ret;
292}
293
1d4af1e8
SP
294/* Return true when LOOP contains basic blocks marked with the
295 BB_IRREDUCIBLE_LOOP flag. */
296
297static inline bool
298loop_has_blocks_with_irreducible_flag (struct loop *loop)
299{
300 unsigned i;
301 basic_block *bbs = get_loop_body_in_dom_order (loop);
302 bool res = true;
303
304 for (i = 0; i < loop->num_nodes; i++)
305 if (bbs[i]->flags & BB_IRREDUCIBLE_LOOP)
306 goto end;
307
308 res = false;
309 end:
310 free (bbs);
311 return res;
312}
313
8a171a59 314/* Assigns the address of OBJ in TYPE to an ssa name, and returns this name.
9f9f72aa 315 The assignment statement is placed on edge ENTRY. DECL_ADDRESS maps decls
8a171a59
ZD
316 to their addresses that can be reused. The address of OBJ is known to
317 be invariant in the whole function. */
5f40b3cb
ZD
318
319static tree
9f9f72aa 320take_address_of (tree obj, tree type, edge entry, htab_t decl_address)
5f40b3cb 321{
8a171a59 322 int uid;
5f40b3cb
ZD
323 void **dslot;
324 struct int_tree_map ielt, *nielt;
726a989a
RB
325 tree *var_p, name, bvar, addr;
326 gimple stmt;
327 gimple_seq stmts;
5f40b3cb 328
8a171a59
ZD
329 /* Since the address of OBJ is invariant, the trees may be shared.
330 Avoid rewriting unrelated parts of the code. */
331 obj = unshare_expr (obj);
332 for (var_p = &obj;
333 handled_component_p (*var_p);
334 var_p = &TREE_OPERAND (*var_p, 0))
335 continue;
336 uid = DECL_UID (*var_p);
337
5f40b3cb
ZD
338 ielt.uid = uid;
339 dslot = htab_find_slot_with_hash (decl_address, &ielt, uid, INSERT);
340 if (!*dslot)
341 {
8a171a59
ZD
342 addr = build_addr (*var_p, current_function_decl);
343 bvar = create_tmp_var (TREE_TYPE (addr), get_name (*var_p));
5f40b3cb 344 add_referenced_var (bvar);
726a989a 345 stmt = gimple_build_assign (bvar, addr);
5f40b3cb 346 name = make_ssa_name (bvar, stmt);
726a989a
RB
347 gimple_assign_set_lhs (stmt, name);
348 gsi_insert_on_edge_immediate (entry, stmt);
5f40b3cb
ZD
349
350 nielt = XNEW (struct int_tree_map);
351 nielt->uid = uid;
352 nielt->to = name;
353 *dslot = nielt;
5f40b3cb 354 }
8a171a59
ZD
355 else
356 name = ((struct int_tree_map *) *dslot)->to;
5f40b3cb 357
8a171a59
ZD
358 if (var_p != &obj)
359 {
360 *var_p = build1 (INDIRECT_REF, TREE_TYPE (*var_p), name);
361 name = force_gimple_operand (build_addr (obj, current_function_decl),
726a989a
RB
362 &stmts, true, NULL_TREE);
363 if (!gimple_seq_empty_p (stmts))
364 gsi_insert_seq_on_edge_immediate (entry, stmts);
8a171a59 365 }
5f40b3cb 366
8a171a59
ZD
367 if (TREE_TYPE (name) != type)
368 {
726a989a 369 name = force_gimple_operand (fold_convert (type, name), &stmts, true,
8a171a59 370 NULL_TREE);
726a989a
RB
371 if (!gimple_seq_empty_p (stmts))
372 gsi_insert_seq_on_edge_immediate (entry, stmts);
8a171a59 373 }
5f40b3cb
ZD
374
375 return name;
376}
377
a509ebb5 378/* Callback for htab_traverse. Create the initialization statement
b8698a0f 379 for reduction described in SLOT, and place it at the preheader of
a509ebb5
RL
380 the loop described in DATA. */
381
382static int
383initialize_reductions (void **slot, void *data)
384{
a509ebb5 385 tree init, c;
a509ebb5
RL
386 tree bvar, type, arg;
387 edge e;
388
3d9a9f94 389 struct reduction_info *const reduc = (struct reduction_info *) *slot;
a509ebb5
RL
390 struct loop *loop = (struct loop *) data;
391
b8698a0f 392 /* Create initialization in preheader:
a509ebb5
RL
393 reduction_variable = initialization value of reduction. */
394
b8698a0f 395 /* In the phi node at the header, replace the argument coming
a509ebb5
RL
396 from the preheader with the reduction initialization value. */
397
398 /* Create a new variable to initialize the reduction. */
399 type = TREE_TYPE (PHI_RESULT (reduc->reduc_phi));
400 bvar = create_tmp_var (type, "reduction");
401 add_referenced_var (bvar);
402
c2255bc4
AH
403 c = build_omp_clause (gimple_location (reduc->reduc_stmt),
404 OMP_CLAUSE_REDUCTION);
a509ebb5 405 OMP_CLAUSE_REDUCTION_CODE (c) = reduc->reduction_code;
726a989a 406 OMP_CLAUSE_DECL (c) = SSA_NAME_VAR (gimple_assign_lhs (reduc->reduc_stmt));
a509ebb5
RL
407
408 init = omp_reduction_init (c, TREE_TYPE (bvar));
409 reduc->init = init;
410
b8698a0f
L
411 /* Replace the argument representing the initialization value
412 with the initialization value for the reduction (neutral
413 element for the particular operation, e.g. 0 for PLUS_EXPR,
414 1 for MULT_EXPR, etc).
415 Keep the old value in a new variable "reduction_initial",
416 that will be taken in consideration after the parallel
0eb7e7aa 417 computing is done. */
a509ebb5
RL
418
419 e = loop_preheader_edge (loop);
420 arg = PHI_ARG_DEF_FROM_EDGE (reduc->reduc_phi, e);
421 /* Create new variable to hold the initial value. */
a509ebb5 422
a509ebb5 423 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE
0eb7e7aa 424 (reduc->reduc_phi, loop_preheader_edge (loop)), init);
ae0bce62 425 reduc->initial_value = arg;
a509ebb5
RL
426 return 1;
427}
5f40b3cb
ZD
428
429struct elv_data
430{
726a989a 431 struct walk_stmt_info info;
9f9f72aa 432 edge entry;
5f40b3cb
ZD
433 htab_t decl_address;
434 bool changed;
435};
436
9f9f72aa
AP
437/* Eliminates references to local variables in *TP out of the single
438 entry single exit region starting at DTA->ENTRY.
439 DECL_ADDRESS contains addresses of the references that had their
440 address taken already. If the expression is changed, CHANGED is
441 set to true. Callback for walk_tree. */
a509ebb5 442
5f40b3cb 443static tree
8a171a59 444eliminate_local_variables_1 (tree *tp, int *walk_subtrees, void *data)
5f40b3cb 445{
3d9a9f94 446 struct elv_data *const dta = (struct elv_data *) data;
8a171a59 447 tree t = *tp, var, addr, addr_type, type, obj;
5f40b3cb
ZD
448
449 if (DECL_P (t))
450 {
451 *walk_subtrees = 0;
452
453 if (!SSA_VAR_P (t) || DECL_EXTERNAL (t))
454 return NULL_TREE;
455
456 type = TREE_TYPE (t);
457 addr_type = build_pointer_type (type);
9f9f72aa 458 addr = take_address_of (t, addr_type, dta->entry, dta->decl_address);
5f40b3cb
ZD
459 *tp = build1 (INDIRECT_REF, TREE_TYPE (*tp), addr);
460
461 dta->changed = true;
462 return NULL_TREE;
463 }
464
465 if (TREE_CODE (t) == ADDR_EXPR)
466 {
8a171a59
ZD
467 /* ADDR_EXPR may appear in two contexts:
468 -- as a gimple operand, when the address taken is a function invariant
469 -- as gimple rhs, when the resulting address in not a function
470 invariant
471 We do not need to do anything special in the latter case (the base of
472 the memory reference whose address is taken may be replaced in the
473 DECL_P case). The former case is more complicated, as we need to
474 ensure that the new address is still a gimple operand. Thus, it
475 is not sufficient to replace just the base of the memory reference --
476 we need to move the whole computation of the address out of the
477 loop. */
478 if (!is_gimple_val (t))
5f40b3cb
ZD
479 return NULL_TREE;
480
481 *walk_subtrees = 0;
8a171a59
ZD
482 obj = TREE_OPERAND (t, 0);
483 var = get_base_address (obj);
484 if (!var || !SSA_VAR_P (var) || DECL_EXTERNAL (var))
5f40b3cb
ZD
485 return NULL_TREE;
486
487 addr_type = TREE_TYPE (t);
9f9f72aa 488 addr = take_address_of (obj, addr_type, dta->entry, dta->decl_address);
5f40b3cb
ZD
489 *tp = addr;
490
491 dta->changed = true;
492 return NULL_TREE;
493 }
494
726a989a 495 if (!EXPR_P (t))
5f40b3cb
ZD
496 *walk_subtrees = 0;
497
498 return NULL_TREE;
499}
500
9f9f72aa
AP
501/* Moves the references to local variables in STMT out of the single
502 entry single exit region starting at ENTRY. DECL_ADDRESS contains
503 addresses of the references that had their address taken
504 already. */
5f40b3cb
ZD
505
506static void
726a989a 507eliminate_local_variables_stmt (edge entry, gimple stmt,
5f40b3cb
ZD
508 htab_t decl_address)
509{
510 struct elv_data dta;
511
726a989a 512 memset (&dta.info, '\0', sizeof (dta.info));
9f9f72aa 513 dta.entry = entry;
5f40b3cb
ZD
514 dta.decl_address = decl_address;
515 dta.changed = false;
516
b5b8b0ac
AO
517 if (gimple_debug_bind_p (stmt))
518 walk_tree (gimple_debug_bind_get_value_ptr (stmt),
519 eliminate_local_variables_1, &dta.info, NULL);
520 else
521 walk_gimple_op (stmt, eliminate_local_variables_1, &dta.info);
5f40b3cb
ZD
522
523 if (dta.changed)
524 update_stmt (stmt);
525}
526
9f9f72aa
AP
527/* Eliminates the references to local variables from the single entry
528 single exit region between the ENTRY and EXIT edges.
b8698a0f 529
a509ebb5 530 This includes:
b8698a0f
L
531 1) Taking address of a local variable -- these are moved out of the
532 region (and temporary variable is created to hold the address if
a509ebb5 533 necessary).
9f9f72aa 534
5f40b3cb 535 2) Dereferencing a local variable -- these are replaced with indirect
a509ebb5 536 references. */
5f40b3cb
ZD
537
538static void
9f9f72aa 539eliminate_local_variables (edge entry, edge exit)
5f40b3cb 540{
9f9f72aa
AP
541 basic_block bb;
542 VEC (basic_block, heap) *body = VEC_alloc (basic_block, heap, 3);
5f40b3cb 543 unsigned i;
726a989a 544 gimple_stmt_iterator gsi;
5f40b3cb
ZD
545 htab_t decl_address = htab_create (10, int_tree_map_hash, int_tree_map_eq,
546 free);
9f9f72aa
AP
547 basic_block entry_bb = entry->src;
548 basic_block exit_bb = exit->dest;
5f40b3cb 549
9f9f72aa 550 gather_blocks_in_sese_region (entry_bb, exit_bb, &body);
5f40b3cb 551
9f9f72aa
AP
552 for (i = 0; VEC_iterate (basic_block, body, i, bb); i++)
553 if (bb != entry_bb && bb != exit_bb)
726a989a
RB
554 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
555 eliminate_local_variables_stmt (entry, gsi_stmt (gsi),
9f9f72aa 556 decl_address);
5f40b3cb
ZD
557
558 htab_delete (decl_address);
9f9f72aa
AP
559 VEC_free (basic_block, heap, body);
560}
561
562/* Returns true if expression EXPR is not defined between ENTRY and
563 EXIT, i.e. if all its operands are defined outside of the region. */
564
565static bool
566expr_invariant_in_region_p (edge entry, edge exit, tree expr)
567{
568 basic_block entry_bb = entry->src;
569 basic_block exit_bb = exit->dest;
570 basic_block def_bb;
9f9f72aa
AP
571
572 if (is_gimple_min_invariant (expr))
573 return true;
574
575 if (TREE_CODE (expr) == SSA_NAME)
576 {
726a989a 577 def_bb = gimple_bb (SSA_NAME_DEF_STMT (expr));
9f9f72aa
AP
578 if (def_bb
579 && dominated_by_p (CDI_DOMINATORS, def_bb, entry_bb)
580 && !dominated_by_p (CDI_DOMINATORS, def_bb, exit_bb))
581 return false;
582
583 return true;
584 }
585
726a989a 586 return false;
5f40b3cb
ZD
587}
588
589/* If COPY_NAME_P is true, creates and returns a duplicate of NAME.
590 The copies are stored to NAME_COPIES, if NAME was already duplicated,
591 its duplicate stored in NAME_COPIES is returned.
b8698a0f 592
5f40b3cb
ZD
593 Regardless of COPY_NAME_P, the decl used as a base of the ssa name is also
594 duplicated, storing the copies in DECL_COPIES. */
595
596static tree
9f9f72aa
AP
597separate_decls_in_region_name (tree name,
598 htab_t name_copies, htab_t decl_copies,
599 bool copy_name_p)
5f40b3cb
ZD
600{
601 tree copy, var, var_copy;
602 unsigned idx, uid, nuid;
603 struct int_tree_map ielt, *nielt;
604 struct name_to_copy_elt elt, *nelt;
605 void **slot, **dslot;
606
607 if (TREE_CODE (name) != SSA_NAME)
608 return name;
609
610 idx = SSA_NAME_VERSION (name);
611 elt.version = idx;
612 slot = htab_find_slot_with_hash (name_copies, &elt, idx,
613 copy_name_p ? INSERT : NO_INSERT);
614 if (slot && *slot)
615 return ((struct name_to_copy_elt *) *slot)->new_name;
616
617 var = SSA_NAME_VAR (name);
618 uid = DECL_UID (var);
619 ielt.uid = uid;
620 dslot = htab_find_slot_with_hash (decl_copies, &ielt, uid, INSERT);
621 if (!*dslot)
622 {
623 var_copy = create_tmp_var (TREE_TYPE (var), get_name (var));
36ad7922 624 DECL_GIMPLE_REG_P (var_copy) = DECL_GIMPLE_REG_P (var);
5f40b3cb
ZD
625 add_referenced_var (var_copy);
626 nielt = XNEW (struct int_tree_map);
627 nielt->uid = uid;
628 nielt->to = var_copy;
629 *dslot = nielt;
630
631 /* Ensure that when we meet this decl next time, we won't duplicate
a509ebb5 632 it again. */
5f40b3cb
ZD
633 nuid = DECL_UID (var_copy);
634 ielt.uid = nuid;
635 dslot = htab_find_slot_with_hash (decl_copies, &ielt, nuid, INSERT);
636 gcc_assert (!*dslot);
637 nielt = XNEW (struct int_tree_map);
638 nielt->uid = nuid;
639 nielt->to = var_copy;
640 *dslot = nielt;
641 }
642 else
643 var_copy = ((struct int_tree_map *) *dslot)->to;
644
645 if (copy_name_p)
646 {
726a989a 647 copy = duplicate_ssa_name (name, NULL);
5f40b3cb
ZD
648 nelt = XNEW (struct name_to_copy_elt);
649 nelt->version = idx;
650 nelt->new_name = copy;
651 nelt->field = NULL_TREE;
652 *slot = nelt;
653 }
654 else
655 {
656 gcc_assert (!slot);
657 copy = name;
658 }
659
660 SSA_NAME_VAR (copy) = var_copy;
661 return copy;
662}
663
9f9f72aa
AP
664/* Finds the ssa names used in STMT that are defined outside the
665 region between ENTRY and EXIT and replaces such ssa names with
666 their duplicates. The duplicates are stored to NAME_COPIES. Base
667 decls of all ssa names used in STMT (including those defined in
668 LOOP) are replaced with the new temporary variables; the
669 replacement decls are stored in DECL_COPIES. */
5f40b3cb
ZD
670
671static void
726a989a 672separate_decls_in_region_stmt (edge entry, edge exit, gimple stmt,
9f9f72aa 673 htab_t name_copies, htab_t decl_copies)
5f40b3cb
ZD
674{
675 use_operand_p use;
676 def_operand_p def;
677 ssa_op_iter oi;
678 tree name, copy;
679 bool copy_name_p;
680
681 mark_virtual_ops_for_renaming (stmt);
682
683 FOR_EACH_PHI_OR_STMT_DEF (def, stmt, oi, SSA_OP_DEF)
a509ebb5
RL
684 {
685 name = DEF_FROM_PTR (def);
686 gcc_assert (TREE_CODE (name) == SSA_NAME);
9f9f72aa
AP
687 copy = separate_decls_in_region_name (name, name_copies, decl_copies,
688 false);
a509ebb5
RL
689 gcc_assert (copy == name);
690 }
5f40b3cb
ZD
691
692 FOR_EACH_PHI_OR_STMT_USE (use, stmt, oi, SSA_OP_USE)
a509ebb5
RL
693 {
694 name = USE_FROM_PTR (use);
695 if (TREE_CODE (name) != SSA_NAME)
696 continue;
697
9f9f72aa
AP
698 copy_name_p = expr_invariant_in_region_p (entry, exit, name);
699 copy = separate_decls_in_region_name (name, name_copies, decl_copies,
700 copy_name_p);
a509ebb5
RL
701 SET_USE (use, copy);
702 }
5f40b3cb
ZD
703}
704
b5b8b0ac
AO
705/* Finds the ssa names used in STMT that are defined outside the
706 region between ENTRY and EXIT and replaces such ssa names with
707 their duplicates. The duplicates are stored to NAME_COPIES. Base
708 decls of all ssa names used in STMT (including those defined in
709 LOOP) are replaced with the new temporary variables; the
710 replacement decls are stored in DECL_COPIES. */
711
712static bool
713separate_decls_in_region_debug_bind (gimple stmt,
714 htab_t name_copies, htab_t decl_copies)
715{
716 use_operand_p use;
717 ssa_op_iter oi;
718 tree var, name;
719 struct int_tree_map ielt;
720 struct name_to_copy_elt elt;
721 void **slot, **dslot;
722
723 var = gimple_debug_bind_get_var (stmt);
4f2a9af8
JJ
724 if (TREE_CODE (var) == DEBUG_EXPR_DECL)
725 return true;
b5b8b0ac
AO
726 gcc_assert (DECL_P (var) && SSA_VAR_P (var));
727 ielt.uid = DECL_UID (var);
728 dslot = htab_find_slot_with_hash (decl_copies, &ielt, ielt.uid, NO_INSERT);
729 if (!dslot)
730 return true;
731 gimple_debug_bind_set_var (stmt, ((struct int_tree_map *) *dslot)->to);
732
733 FOR_EACH_PHI_OR_STMT_USE (use, stmt, oi, SSA_OP_USE)
734 {
735 name = USE_FROM_PTR (use);
736 if (TREE_CODE (name) != SSA_NAME)
737 continue;
738
739 elt.version = SSA_NAME_VERSION (name);
740 slot = htab_find_slot_with_hash (name_copies, &elt, elt.version, NO_INSERT);
741 if (!slot)
742 {
743 gimple_debug_bind_reset_value (stmt);
744 update_stmt (stmt);
745 break;
746 }
747
748 SET_USE (use, ((struct name_to_copy_elt *) *slot)->new_name);
749 }
750
751 return false;
752}
753
0eb7e7aa
RL
754/* Callback for htab_traverse. Adds a field corresponding to the reduction
755 specified in SLOT. The type is passed in DATA. */
756
757static int
758add_field_for_reduction (void **slot, void *data)
a509ebb5 759{
b8698a0f 760
3d9a9f94
KG
761 struct reduction_info *const red = (struct reduction_info *) *slot;
762 tree const type = (tree) data;
726a989a 763 tree var = SSA_NAME_VAR (gimple_assign_lhs (red->reduc_stmt));
c2255bc4
AH
764 tree field = build_decl (gimple_location (red->reduc_stmt),
765 FIELD_DECL, DECL_NAME (var), TREE_TYPE (var));
0eb7e7aa
RL
766
767 insert_field_into_struct (type, field);
768
769 red->field = field;
770
771 return 1;
772}
a509ebb5 773
5f40b3cb 774/* Callback for htab_traverse. Adds a field corresponding to a ssa name
b8698a0f 775 described in SLOT. The type is passed in DATA. */
5f40b3cb
ZD
776
777static int
778add_field_for_name (void **slot, void *data)
779{
3d9a9f94
KG
780 struct name_to_copy_elt *const elt = (struct name_to_copy_elt *) *slot;
781 tree type = (tree) data;
5f40b3cb
ZD
782 tree name = ssa_name (elt->version);
783 tree var = SSA_NAME_VAR (name);
c2255bc4
AH
784 tree field = build_decl (DECL_SOURCE_LOCATION (var),
785 FIELD_DECL, DECL_NAME (var), TREE_TYPE (var));
5f40b3cb
ZD
786
787 insert_field_into_struct (type, field);
788 elt->field = field;
a509ebb5 789
5f40b3cb
ZD
790 return 1;
791}
792
b8698a0f
L
793/* Callback for htab_traverse. A local result is the intermediate result
794 computed by a single
fa10beec 795 thread, or the initial value in case no iteration was executed.
b8698a0f
L
796 This function creates a phi node reflecting these values.
797 The phi's result will be stored in NEW_PHI field of the
798 reduction's data structure. */
a509ebb5
RL
799
800static int
801create_phi_for_local_result (void **slot, void *data)
802{
3d9a9f94
KG
803 struct reduction_info *const reduc = (struct reduction_info *) *slot;
804 const struct loop *const loop = (const struct loop *) data;
a509ebb5 805 edge e;
726a989a 806 gimple new_phi;
a509ebb5
RL
807 basic_block store_bb;
808 tree local_res;
f5045c96 809 source_location locus;
a509ebb5 810
b8698a0f
L
811 /* STORE_BB is the block where the phi
812 should be stored. It is the destination of the loop exit.
726a989a 813 (Find the fallthru edge from GIMPLE_OMP_CONTINUE). */
a509ebb5
RL
814 store_bb = FALLTHRU_EDGE (loop->latch)->dest;
815
816 /* STORE_BB has two predecessors. One coming from the loop
817 (the reduction's result is computed at the loop),
b8698a0f
L
818 and another coming from a block preceding the loop,
819 when no iterations
820 are executed (the initial value should be taken). */
a509ebb5
RL
821 if (EDGE_PRED (store_bb, 0) == FALLTHRU_EDGE (loop->latch))
822 e = EDGE_PRED (store_bb, 1);
823 else
824 e = EDGE_PRED (store_bb, 0);
726a989a
RB
825 local_res
826 = make_ssa_name (SSA_NAME_VAR (gimple_assign_lhs (reduc->reduc_stmt)),
827 NULL);
f5045c96 828 locus = gimple_location (reduc->reduc_stmt);
a509ebb5
RL
829 new_phi = create_phi_node (local_res, store_bb);
830 SSA_NAME_DEF_STMT (local_res) = new_phi;
f5045c96 831 add_phi_arg (new_phi, reduc->init, e, locus);
726a989a 832 add_phi_arg (new_phi, gimple_assign_lhs (reduc->reduc_stmt),
f5045c96 833 FALLTHRU_EDGE (loop->latch), locus);
a509ebb5
RL
834 reduc->new_phi = new_phi;
835
836 return 1;
837}
5f40b3cb
ZD
838
839struct clsn_data
840{
841 tree store;
842 tree load;
843
844 basic_block store_bb;
845 basic_block load_bb;
846};
847
a509ebb5 848/* Callback for htab_traverse. Create an atomic instruction for the
b8698a0f 849 reduction described in SLOT.
a509ebb5
RL
850 DATA annotates the place in memory the atomic operation relates to,
851 and the basic block it needs to be generated in. */
852
853static int
854create_call_for_reduction_1 (void **slot, void *data)
855{
3d9a9f94
KG
856 struct reduction_info *const reduc = (struct reduction_info *) *slot;
857 struct clsn_data *const clsn_data = (struct clsn_data *) data;
726a989a 858 gimple_stmt_iterator gsi;
a509ebb5
RL
859 tree type = TREE_TYPE (PHI_RESULT (reduc->reduc_phi));
860 tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load));
861 tree load_struct;
862 basic_block bb;
863 basic_block new_bb;
864 edge e;
865 tree t, addr, addr_type, ref, x;
726a989a
RB
866 tree tmp_load, name;
867 gimple load;
a509ebb5
RL
868
869 load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load);
870 t = build3 (COMPONENT_REF, type, load_struct, reduc->field, NULL_TREE);
871 addr_type = build_pointer_type (type);
872
873 addr = build_addr (t, current_function_decl);
874
875 /* Create phi node. */
876 bb = clsn_data->load_bb;
877
878 e = split_block (bb, t);
879 new_bb = e->dest;
880
881 tmp_load = create_tmp_var (TREE_TYPE (TREE_TYPE (addr)), NULL);
882 add_referenced_var (tmp_load);
883 tmp_load = make_ssa_name (tmp_load, NULL);
726a989a 884 load = gimple_build_omp_atomic_load (tmp_load, addr);
a509ebb5 885 SSA_NAME_DEF_STMT (tmp_load) = load;
726a989a
RB
886 gsi = gsi_start_bb (new_bb);
887 gsi_insert_after (&gsi, load, GSI_NEW_STMT);
a509ebb5
RL
888
889 e = split_block (new_bb, load);
890 new_bb = e->dest;
726a989a 891 gsi = gsi_start_bb (new_bb);
a509ebb5 892 ref = tmp_load;
726a989a
RB
893 x = fold_build2 (reduc->reduction_code,
894 TREE_TYPE (PHI_RESULT (reduc->new_phi)), ref,
895 PHI_RESULT (reduc->new_phi));
a509ebb5 896
726a989a
RB
897 name = force_gimple_operand_gsi (&gsi, x, true, NULL_TREE, true,
898 GSI_CONTINUE_LINKING);
a509ebb5 899
726a989a 900 gsi_insert_after (&gsi, gimple_build_omp_atomic_store (name), GSI_NEW_STMT);
a509ebb5
RL
901 return 1;
902}
903
b8698a0f
L
904/* Create the atomic operation at the join point of the threads.
905 REDUCTION_LIST describes the reductions in the LOOP.
906 LD_ST_DATA describes the shared data structure where
a509ebb5
RL
907 shared data is stored in and loaded from. */
908static void
b8698a0f 909create_call_for_reduction (struct loop *loop, htab_t reduction_list,
a509ebb5
RL
910 struct clsn_data *ld_st_data)
911{
912 htab_traverse (reduction_list, create_phi_for_local_result, loop);
726a989a 913 /* Find the fallthru edge from GIMPLE_OMP_CONTINUE. */
a509ebb5
RL
914 ld_st_data->load_bb = FALLTHRU_EDGE (loop->latch)->dest;
915 htab_traverse (reduction_list, create_call_for_reduction_1, ld_st_data);
916}
917
ae0bce62
RL
918/* Callback for htab_traverse. Loads the final reduction value at the
919 join point of all threads, and inserts it in the right place. */
a509ebb5
RL
920
921static int
922create_loads_for_reductions (void **slot, void *data)
923{
3d9a9f94
KG
924 struct reduction_info *const red = (struct reduction_info *) *slot;
925 struct clsn_data *const clsn_data = (struct clsn_data *) data;
726a989a
RB
926 gimple stmt;
927 gimple_stmt_iterator gsi;
928 tree type = TREE_TYPE (gimple_assign_lhs (red->reduc_stmt));
a509ebb5
RL
929 tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load));
930 tree load_struct;
ae0bce62 931 tree name;
a509ebb5
RL
932 tree x;
933
726a989a 934 gsi = gsi_after_labels (clsn_data->load_bb);
a509ebb5
RL
935 load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load);
936 load_struct = build3 (COMPONENT_REF, type, load_struct, red->field,
937 NULL_TREE);
a509ebb5 938
ae0bce62 939 x = load_struct;
a509ebb5 940 name = PHI_RESULT (red->keep_res);
726a989a 941 stmt = gimple_build_assign (name, x);
a509ebb5
RL
942 SSA_NAME_DEF_STMT (name) = stmt;
943
726a989a 944 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
a509ebb5 945
726a989a
RB
946 for (gsi = gsi_start_phis (gimple_bb (red->keep_res));
947 !gsi_end_p (gsi); gsi_next (&gsi))
948 if (gsi_stmt (gsi) == red->keep_res)
949 {
950 remove_phi_node (&gsi, false);
951 return 1;
952 }
953 gcc_unreachable ();
a509ebb5
RL
954}
955
b8698a0f 956/* Load the reduction result that was stored in LD_ST_DATA.
a509ebb5 957 REDUCTION_LIST describes the list of reductions that the
fa10beec 958 loads should be generated for. */
a509ebb5 959static void
b8698a0f 960create_final_loads_for_reduction (htab_t reduction_list,
a509ebb5
RL
961 struct clsn_data *ld_st_data)
962{
726a989a 963 gimple_stmt_iterator gsi;
a509ebb5 964 tree t;
726a989a 965 gimple stmt;
a509ebb5 966
726a989a 967 gsi = gsi_after_labels (ld_st_data->load_bb);
a509ebb5 968 t = build_fold_addr_expr (ld_st_data->store);
726a989a 969 stmt = gimple_build_assign (ld_st_data->load, t);
a509ebb5 970
726a989a
RB
971 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
972 SSA_NAME_DEF_STMT (ld_st_data->load) = stmt;
a509ebb5
RL
973
974 htab_traverse (reduction_list, create_loads_for_reductions, ld_st_data);
975
976}
977
0eb7e7aa
RL
978/* Callback for htab_traverse. Store the neutral value for the
979 particular reduction's operation, e.g. 0 for PLUS_EXPR,
980 1 for MULT_EXPR, etc. into the reduction field.
b8698a0f
L
981 The reduction is specified in SLOT. The store information is
982 passed in DATA. */
0eb7e7aa
RL
983
984static int
985create_stores_for_reduction (void **slot, void *data)
986{
3d9a9f94
KG
987 struct reduction_info *const red = (struct reduction_info *) *slot;
988 struct clsn_data *const clsn_data = (struct clsn_data *) data;
726a989a
RB
989 tree t;
990 gimple stmt;
991 gimple_stmt_iterator gsi;
992 tree type = TREE_TYPE (gimple_assign_lhs (red->reduc_stmt));
993
994 gsi = gsi_last_bb (clsn_data->store_bb);
995 t = build3 (COMPONENT_REF, type, clsn_data->store, red->field, NULL_TREE);
996 stmt = gimple_build_assign (t, red->initial_value);
0eb7e7aa 997 mark_virtual_ops_for_renaming (stmt);
726a989a 998 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
0eb7e7aa
RL
999
1000 return 1;
1001}
1002
a509ebb5
RL
1003/* Callback for htab_traverse. Creates loads to a field of LOAD in LOAD_BB and
1004 store to a field of STORE in STORE_BB for the ssa name and its duplicate
1005 specified in SLOT. */
1006
5f40b3cb
ZD
1007static int
1008create_loads_and_stores_for_name (void **slot, void *data)
1009{
3d9a9f94
KG
1010 struct name_to_copy_elt *const elt = (struct name_to_copy_elt *) *slot;
1011 struct clsn_data *const clsn_data = (struct clsn_data *) data;
726a989a
RB
1012 tree t;
1013 gimple stmt;
1014 gimple_stmt_iterator gsi;
5f40b3cb
ZD
1015 tree type = TREE_TYPE (elt->new_name);
1016 tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load));
1017 tree load_struct;
1018
726a989a
RB
1019 gsi = gsi_last_bb (clsn_data->store_bb);
1020 t = build3 (COMPONENT_REF, type, clsn_data->store, elt->field, NULL_TREE);
1021 stmt = gimple_build_assign (t, ssa_name (elt->version));
5f40b3cb 1022 mark_virtual_ops_for_renaming (stmt);
726a989a 1023 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
5f40b3cb 1024
726a989a 1025 gsi = gsi_last_bb (clsn_data->load_bb);
5f40b3cb 1026 load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load);
726a989a
RB
1027 t = build3 (COMPONENT_REF, type, load_struct, elt->field, NULL_TREE);
1028 stmt = gimple_build_assign (elt->new_name, t);
5f40b3cb 1029 SSA_NAME_DEF_STMT (elt->new_name) = stmt;
726a989a 1030 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
5f40b3cb
ZD
1031
1032 return 1;
1033}
1034
1035/* Moves all the variables used in LOOP and defined outside of it (including
1036 the initial values of loop phi nodes, and *PER_THREAD if it is a ssa
1037 name) to a structure created for this purpose. The code
b8698a0f 1038
5f40b3cb
ZD
1039 while (1)
1040 {
1041 use (a);
1042 use (b);
1043 }
1044
1045 is transformed this way:
1046
1047 bb0:
1048 old.a = a;
1049 old.b = b;
1050
1051 bb1:
1052 a' = new->a;
1053 b' = new->b;
1054 while (1)
1055 {
1056 use (a');
1057 use (b');
1058 }
1059
1060 `old' is stored to *ARG_STRUCT and `new' is stored to NEW_ARG_STRUCT. The
1061 pointer `new' is intentionally not initialized (the loop will be split to a
1062 separate function later, and `new' will be initialized from its arguments).
a509ebb5 1063 LD_ST_DATA holds information about the shared data structure used to pass
b8698a0f
L
1064 information among the threads. It is initialized here, and
1065 gen_parallel_loop will pass it to create_call_for_reduction that
1066 needs this information. REDUCTION_LIST describes the reductions
a509ebb5 1067 in LOOP. */
5f40b3cb
ZD
1068
1069static void
9f9f72aa 1070separate_decls_in_region (edge entry, edge exit, htab_t reduction_list,
b8698a0f 1071 tree *arg_struct, tree *new_arg_struct,
9f9f72aa 1072 struct clsn_data *ld_st_data)
a509ebb5 1073
5f40b3cb 1074{
9f9f72aa 1075 basic_block bb1 = split_edge (entry);
5f40b3cb
ZD
1076 basic_block bb0 = single_pred (bb1);
1077 htab_t name_copies = htab_create (10, name_to_copy_elt_hash,
1078 name_to_copy_elt_eq, free);
1079 htab_t decl_copies = htab_create (10, int_tree_map_hash, int_tree_map_eq,
1080 free);
5f40b3cb 1081 unsigned i;
726a989a
RB
1082 tree type, type_name, nvar;
1083 gimple_stmt_iterator gsi;
5f40b3cb 1084 struct clsn_data clsn_data;
9f9f72aa
AP
1085 VEC (basic_block, heap) *body = VEC_alloc (basic_block, heap, 3);
1086 basic_block bb;
1087 basic_block entry_bb = bb1;
1088 basic_block exit_bb = exit->dest;
b5b8b0ac 1089 bool has_debug_stmt = false;
5f40b3cb 1090
726a989a 1091 entry = single_succ_edge (entry_bb);
9f9f72aa 1092 gather_blocks_in_sese_region (entry_bb, exit_bb, &body);
5f40b3cb 1093
9f9f72aa
AP
1094 for (i = 0; VEC_iterate (basic_block, body, i, bb); i++)
1095 {
b8698a0f 1096 if (bb != entry_bb && bb != exit_bb)
9f9f72aa 1097 {
726a989a
RB
1098 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1099 separate_decls_in_region_stmt (entry, exit, gsi_stmt (gsi),
1100 name_copies, decl_copies);
1101
1102 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
b5b8b0ac
AO
1103 {
1104 gimple stmt = gsi_stmt (gsi);
1105
1106 if (is_gimple_debug (stmt))
1107 has_debug_stmt = true;
1108 else
1109 separate_decls_in_region_stmt (entry, exit, stmt,
1110 name_copies, decl_copies);
1111 }
9f9f72aa 1112 }
5f40b3cb 1113 }
9f9f72aa 1114
b5b8b0ac
AO
1115 /* Now process debug bind stmts. We must not create decls while
1116 processing debug stmts, so we defer their processing so as to
1117 make sure we will have debug info for as many variables as
1118 possible (all of those that were dealt with in the loop above),
1119 and discard those for which we know there's nothing we can
1120 do. */
1121 if (has_debug_stmt)
1122 for (i = 0; VEC_iterate (basic_block, body, i, bb); i++)
1123 if (bb != entry_bb && bb != exit_bb)
1124 {
1125 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
1126 {
1127 gimple stmt = gsi_stmt (gsi);
1128
1129 if (gimple_debug_bind_p (stmt))
1130 {
1131 if (separate_decls_in_region_debug_bind (stmt,
1132 name_copies,
1133 decl_copies))
1134 {
1135 gsi_remove (&gsi, true);
1136 continue;
1137 }
1138 }
1139
1140 gsi_next (&gsi);
1141 }
1142 }
1143
9f9f72aa 1144 VEC_free (basic_block, heap, body);
5f40b3cb 1145
b8698a0f 1146 if (htab_elements (name_copies) == 0 && htab_elements (reduction_list) == 0)
5f40b3cb
ZD
1147 {
1148 /* It may happen that there is nothing to copy (if there are only
a509ebb5 1149 loop carried and external variables in the loop). */
5f40b3cb
ZD
1150 *arg_struct = NULL;
1151 *new_arg_struct = NULL;
1152 }
1153 else
1154 {
1155 /* Create the type for the structure to store the ssa names to. */
1156 type = lang_hooks.types.make_type (RECORD_TYPE);
c2255bc4
AH
1157 type_name = build_decl (BUILTINS_LOCATION,
1158 TYPE_DECL, create_tmp_var_name (".paral_data"),
5f40b3cb
ZD
1159 type);
1160 TYPE_NAME (type) = type_name;
1161
0eb7e7aa 1162 htab_traverse (name_copies, add_field_for_name, type);
9f9f72aa 1163 if (reduction_list && htab_elements (reduction_list) > 0)
0eb7e7aa
RL
1164 {
1165 /* Create the fields for reductions. */
1166 htab_traverse (reduction_list, add_field_for_reduction,
1167 type);
1168 }
5f40b3cb 1169 layout_type (type);
b8698a0f 1170
5f40b3cb
ZD
1171 /* Create the loads and stores. */
1172 *arg_struct = create_tmp_var (type, ".paral_data_store");
1173 add_referenced_var (*arg_struct);
1174 nvar = create_tmp_var (build_pointer_type (type), ".paral_data_load");
1175 add_referenced_var (nvar);
726a989a 1176 *new_arg_struct = make_ssa_name (nvar, NULL);
5f40b3cb 1177
a509ebb5
RL
1178 ld_st_data->store = *arg_struct;
1179 ld_st_data->load = *new_arg_struct;
1180 ld_st_data->store_bb = bb0;
1181 ld_st_data->load_bb = bb1;
0eb7e7aa 1182
5f40b3cb 1183 htab_traverse (name_copies, create_loads_and_stores_for_name,
a509ebb5
RL
1184 ld_st_data);
1185
ae0bce62
RL
1186 /* Load the calculation from memory (after the join of the threads). */
1187
9f9f72aa 1188 if (reduction_list && htab_elements (reduction_list) > 0)
a509ebb5 1189 {
0eb7e7aa 1190 htab_traverse (reduction_list, create_stores_for_reduction,
b8698a0f 1191 ld_st_data);
726a989a 1192 clsn_data.load = make_ssa_name (nvar, NULL);
9f9f72aa 1193 clsn_data.load_bb = exit->dest;
a509ebb5
RL
1194 clsn_data.store = ld_st_data->store;
1195 create_final_loads_for_reduction (reduction_list, &clsn_data);
1196 }
5f40b3cb
ZD
1197 }
1198
1199 htab_delete (decl_copies);
1200 htab_delete (name_copies);
1201}
1202
1203/* Bitmap containing uids of functions created by parallelization. We cannot
1204 allocate it from the default obstack, as it must live across compilation
1205 of several functions; we make it gc allocated instead. */
1206
1207static GTY(()) bitmap parallelized_functions;
1208
1209/* Returns true if FN was created by create_loop_fn. */
1210
1211static bool
1212parallelized_function_p (tree fn)
1213{
1214 if (!parallelized_functions || !DECL_ARTIFICIAL (fn))
1215 return false;
1216
1217 return bitmap_bit_p (parallelized_functions, DECL_UID (fn));
1218}
1219
1220/* Creates and returns an empty function that will receive the body of
1221 a parallelized loop. */
1222
1223static tree
1224create_loop_fn (void)
1225{
1226 char buf[100];
1227 char *tname;
1228 tree decl, type, name, t;
1229 struct function *act_cfun = cfun;
1230 static unsigned loopfn_num;
1231
1232 snprintf (buf, 100, "%s.$loopfn", current_function_name ());
1233 ASM_FORMAT_PRIVATE_NAME (tname, buf, loopfn_num++);
1234 clean_symbol_name (tname);
1235 name = get_identifier (tname);
1236 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1237
c2255bc4
AH
1238 decl = build_decl (BUILTINS_LOCATION,
1239 FUNCTION_DECL, name, type);
5f40b3cb
ZD
1240 if (!parallelized_functions)
1241 parallelized_functions = BITMAP_GGC_ALLOC ();
1242 bitmap_set_bit (parallelized_functions, DECL_UID (decl));
1243
1244 TREE_STATIC (decl) = 1;
1245 TREE_USED (decl) = 1;
1246 DECL_ARTIFICIAL (decl) = 1;
1247 DECL_IGNORED_P (decl) = 0;
1248 TREE_PUBLIC (decl) = 0;
1249 DECL_UNINLINABLE (decl) = 1;
1250 DECL_EXTERNAL (decl) = 0;
1251 DECL_CONTEXT (decl) = NULL_TREE;
1252 DECL_INITIAL (decl) = make_node (BLOCK);
1253
c2255bc4
AH
1254 t = build_decl (BUILTINS_LOCATION,
1255 RESULT_DECL, NULL_TREE, void_type_node);
5f40b3cb
ZD
1256 DECL_ARTIFICIAL (t) = 1;
1257 DECL_IGNORED_P (t) = 1;
1258 DECL_RESULT (decl) = t;
1259
c2255bc4
AH
1260 t = build_decl (BUILTINS_LOCATION,
1261 PARM_DECL, get_identifier (".paral_data_param"),
5f40b3cb
ZD
1262 ptr_type_node);
1263 DECL_ARTIFICIAL (t) = 1;
1264 DECL_ARG_TYPE (t) = ptr_type_node;
1265 DECL_CONTEXT (t) = decl;
1266 TREE_USED (t) = 1;
1267 DECL_ARGUMENTS (decl) = t;
1268
182e0d71 1269 allocate_struct_function (decl, false);
5f40b3cb
ZD
1270
1271 /* The call to allocate_struct_function clobbers CFUN, so we need to restore
1272 it. */
5576d6f2 1273 set_cfun (act_cfun);
5f40b3cb
ZD
1274
1275 return decl;
1276}
1277
5f40b3cb
ZD
1278/* Moves the exit condition of LOOP to the beginning of its header, and
1279 duplicates the part of the last iteration that gets disabled to the
1280 exit of the loop. NIT is the number of iterations of the loop
1281 (used to initialize the variables in the duplicated part).
b8698a0f 1282
fa10beec 1283 TODO: the common case is that latch of the loop is empty and immediately
5f40b3cb
ZD
1284 follows the loop exit. In this case, it would be better not to copy the
1285 body of the loop, but only move the entry of the loop directly before the
1286 exit check and increase the number of iterations of the loop by one.
b8698a0f 1287 This may need some additional preconditioning in case NIT = ~0.
a509ebb5 1288 REDUCTION_LIST describes the reductions in LOOP. */
5f40b3cb
ZD
1289
1290static void
a509ebb5 1291transform_to_exit_first_loop (struct loop *loop, htab_t reduction_list, tree nit)
5f40b3cb
ZD
1292{
1293 basic_block *bbs, *nbbs, ex_bb, orig_header;
1294 unsigned n;
1295 bool ok;
1296 edge exit = single_dom_exit (loop), hpred;
726a989a 1297 tree control, control_name, res, t;
48710229 1298 gimple phi, nphi, cond_stmt, stmt, cond_nit;
726a989a 1299 gimple_stmt_iterator gsi;
48710229 1300 tree nit_1;
5f40b3cb
ZD
1301
1302 split_block_after_labels (loop->header);
1303 orig_header = single_succ (loop->header);
1304 hpred = single_succ_edge (loop->header);
1305
1306 cond_stmt = last_stmt (exit->src);
726a989a
RB
1307 control = gimple_cond_lhs (cond_stmt);
1308 gcc_assert (gimple_cond_rhs (cond_stmt) == nit);
5f40b3cb
ZD
1309
1310 /* Make sure that we have phi nodes on exit for all loop header phis
1311 (create_parallel_loop requires that). */
726a989a 1312 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
5f40b3cb 1313 {
726a989a 1314 phi = gsi_stmt (gsi);
5f40b3cb
ZD
1315 res = PHI_RESULT (phi);
1316 t = make_ssa_name (SSA_NAME_VAR (res), phi);
1317 SET_PHI_RESULT (phi, t);
5f40b3cb
ZD
1318 nphi = create_phi_node (res, orig_header);
1319 SSA_NAME_DEF_STMT (res) = nphi;
f5045c96 1320 add_phi_arg (nphi, t, hpred, UNKNOWN_LOCATION);
5f40b3cb
ZD
1321
1322 if (res == control)
1323 {
726a989a 1324 gimple_cond_set_lhs (cond_stmt, t);
5f40b3cb
ZD
1325 update_stmt (cond_stmt);
1326 control = t;
1327 }
1328 }
5f40b3cb 1329 bbs = get_loop_body_in_dom_order (loop);
48710229
RL
1330
1331 for (n = 0; bbs[n] != loop->latch; n++)
5f40b3cb 1332 continue;
48710229 1333 n--;
5f40b3cb 1334 nbbs = XNEWVEC (basic_block, n);
726a989a
RB
1335 ok = gimple_duplicate_sese_tail (single_succ_edge (loop->header), exit,
1336 bbs + 1, n, nbbs);
5f40b3cb
ZD
1337 gcc_assert (ok);
1338 free (bbs);
1339 ex_bb = nbbs[0];
1340 free (nbbs);
1341
b8698a0f 1342 /* Other than reductions, the only gimple reg that should be copied
726a989a 1343 out of the loop is the control variable. */
a509ebb5 1344
5f40b3cb 1345 control_name = NULL_TREE;
726a989a 1346 for (gsi = gsi_start_phis (ex_bb); !gsi_end_p (gsi); )
5f40b3cb 1347 {
726a989a 1348 phi = gsi_stmt (gsi);
5f40b3cb
ZD
1349 res = PHI_RESULT (phi);
1350 if (!is_gimple_reg (res))
726a989a
RB
1351 {
1352 gsi_next (&gsi);
1353 continue;
1354 }
5f40b3cb 1355
a509ebb5 1356 /* Check if it is a part of reduction. If it is,
b8698a0f
L
1357 keep the phi at the reduction's keep_res field. The
1358 PHI_RESULT of this phi is the resulting value of the reduction
a509ebb5
RL
1359 variable when exiting the loop. */
1360
1361 exit = single_dom_exit (loop);
1362
b8698a0f 1363 if (htab_elements (reduction_list) > 0)
a509ebb5
RL
1364 {
1365 struct reduction_info *red;
1366
1367 tree val = PHI_ARG_DEF_FROM_EDGE (phi, exit);
a509ebb5
RL
1368 red = reduction_phi (reduction_list, SSA_NAME_DEF_STMT (val));
1369 if (red)
726a989a
RB
1370 {
1371 red->keep_res = phi;
1372 gsi_next (&gsi);
1373 continue;
1374 }
a509ebb5 1375 }
726a989a
RB
1376 gcc_assert (control_name == NULL_TREE
1377 && SSA_NAME_VAR (res) == SSA_NAME_VAR (control));
5f40b3cb 1378 control_name = res;
726a989a 1379 remove_phi_node (&gsi, false);
5f40b3cb
ZD
1380 }
1381 gcc_assert (control_name != NULL_TREE);
5f40b3cb 1382
b8698a0f 1383 /* Initialize the control variable to number of iterations
48710229 1384 according to the rhs of the exit condition. */
726a989a 1385 gsi = gsi_after_labels (ex_bb);
b8698a0f 1386 cond_nit = last_stmt (exit->src);
48710229
RL
1387 nit_1 = gimple_cond_rhs (cond_nit);
1388 nit_1 = force_gimple_operand_gsi (&gsi,
1389 fold_convert (TREE_TYPE (control_name), nit_1),
726a989a 1390 false, NULL_TREE, false, GSI_SAME_STMT);
48710229 1391 stmt = gimple_build_assign (control_name, nit_1);
726a989a
RB
1392 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
1393 SSA_NAME_DEF_STMT (control_name) = stmt;
5f40b3cb
ZD
1394}
1395
1396/* Create the parallel constructs for LOOP as described in gen_parallel_loop.
726a989a 1397 LOOP_FN and DATA are the arguments of GIMPLE_OMP_PARALLEL.
5f40b3cb
ZD
1398 NEW_DATA is the variable that should be initialized from the argument
1399 of LOOP_FN. N_THREADS is the requested number of threads. Returns the
726a989a 1400 basic block containing GIMPLE_OMP_PARALLEL tree. */
5f40b3cb
ZD
1401
1402static basic_block
1403create_parallel_loop (struct loop *loop, tree loop_fn, tree data,
1404 tree new_data, unsigned n_threads)
1405{
726a989a 1406 gimple_stmt_iterator gsi;
5f40b3cb 1407 basic_block bb, paral_bb, for_bb, ex_bb;
726a989a
RB
1408 tree t, param, res;
1409 gimple stmt, for_stmt, phi, cond_stmt;
1410 tree cvar, cvar_init, initvar, cvar_next, cvar_base, type;
5f40b3cb
ZD
1411 edge exit, nexit, guard, end, e;
1412
726a989a 1413 /* Prepare the GIMPLE_OMP_PARALLEL statement. */
5f40b3cb
ZD
1414 bb = loop_preheader_edge (loop)->src;
1415 paral_bb = single_pred (bb);
726a989a 1416 gsi = gsi_last_bb (paral_bb);
5f40b3cb 1417
c2255bc4 1418 t = build_omp_clause (BUILTINS_LOCATION, OMP_CLAUSE_NUM_THREADS);
5f40b3cb 1419 OMP_CLAUSE_NUM_THREADS_EXPR (t)
a509ebb5 1420 = build_int_cst (integer_type_node, n_threads);
726a989a 1421 stmt = gimple_build_omp_parallel (NULL, t, loop_fn, data);
5f40b3cb 1422
726a989a 1423 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
5f40b3cb
ZD
1424
1425 /* Initialize NEW_DATA. */
1426 if (data)
1427 {
726a989a
RB
1428 gsi = gsi_after_labels (bb);
1429
1430 param = make_ssa_name (DECL_ARGUMENTS (loop_fn), NULL);
1431 stmt = gimple_build_assign (param, build_fold_addr_expr (data));
1432 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1433 SSA_NAME_DEF_STMT (param) = stmt;
1434
1435 stmt = gimple_build_assign (new_data,
1436 fold_convert (TREE_TYPE (new_data), param));
1437 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1438 SSA_NAME_DEF_STMT (new_data) = stmt;
5f40b3cb
ZD
1439 }
1440
726a989a 1441 /* Emit GIMPLE_OMP_RETURN for GIMPLE_OMP_PARALLEL. */
5f40b3cb 1442 bb = split_loop_exit_edge (single_dom_exit (loop));
726a989a
RB
1443 gsi = gsi_last_bb (bb);
1444 gsi_insert_after (&gsi, gimple_build_omp_return (false), GSI_NEW_STMT);
5f40b3cb 1445
726a989a 1446 /* Extract data for GIMPLE_OMP_FOR. */
5f40b3cb 1447 gcc_assert (loop->header == single_dom_exit (loop)->src);
726a989a 1448 cond_stmt = last_stmt (loop->header);
5f40b3cb 1449
726a989a 1450 cvar = gimple_cond_lhs (cond_stmt);
5f40b3cb
ZD
1451 cvar_base = SSA_NAME_VAR (cvar);
1452 phi = SSA_NAME_DEF_STMT (cvar);
1453 cvar_init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
726a989a 1454 initvar = make_ssa_name (cvar_base, NULL);
5f40b3cb
ZD
1455 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, loop_preheader_edge (loop)),
1456 initvar);
1457 cvar_next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
1458
726a989a
RB
1459 gsi = gsi_last_bb (loop->latch);
1460 gcc_assert (gsi_stmt (gsi) == SSA_NAME_DEF_STMT (cvar_next));
1461 gsi_remove (&gsi, true);
5f40b3cb
ZD
1462
1463 /* Prepare cfg. */
1464 for_bb = split_edge (loop_preheader_edge (loop));
1465 ex_bb = split_loop_exit_edge (single_dom_exit (loop));
1466 extract_true_false_edges_from_block (loop->header, &nexit, &exit);
1467 gcc_assert (exit == single_dom_exit (loop));
1468
1469 guard = make_edge (for_bb, ex_bb, 0);
1470 single_succ_edge (loop->latch)->flags = 0;
1471 end = make_edge (loop->latch, ex_bb, EDGE_FALLTHRU);
726a989a 1472 for (gsi = gsi_start_phis (ex_bb); !gsi_end_p (gsi); gsi_next (&gsi))
5f40b3cb 1473 {
f5045c96
AM
1474 source_location locus;
1475 tree def;
726a989a 1476 phi = gsi_stmt (gsi);
5f40b3cb 1477 res = PHI_RESULT (phi);
726a989a 1478 stmt = SSA_NAME_DEF_STMT (PHI_ARG_DEF_FROM_EDGE (phi, exit));
f5045c96
AM
1479
1480 def = PHI_ARG_DEF_FROM_EDGE (stmt, loop_preheader_edge (loop));
b8698a0f 1481 locus = gimple_phi_arg_location_from_edge (stmt,
f5045c96
AM
1482 loop_preheader_edge (loop));
1483 add_phi_arg (phi, def, guard, locus);
1484
1485 def = PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (loop));
1486 locus = gimple_phi_arg_location_from_edge (stmt, loop_latch_edge (loop));
1487 add_phi_arg (phi, def, end, locus);
5f40b3cb
ZD
1488 }
1489 e = redirect_edge_and_branch (exit, nexit->dest);
1490 PENDING_STMT (e) = NULL;
1491
726a989a
RB
1492 /* Emit GIMPLE_OMP_FOR. */
1493 gimple_cond_set_lhs (cond_stmt, cvar_base);
5f40b3cb 1494 type = TREE_TYPE (cvar);
c2255bc4 1495 t = build_omp_clause (BUILTINS_LOCATION, OMP_CLAUSE_SCHEDULE);
5f40b3cb
ZD
1496 OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_STATIC;
1497
726a989a
RB
1498 for_stmt = gimple_build_omp_for (NULL, t, 1, NULL);
1499 gimple_omp_for_set_index (for_stmt, 0, initvar);
1500 gimple_omp_for_set_initial (for_stmt, 0, cvar_init);
1501 gimple_omp_for_set_final (for_stmt, 0, gimple_cond_rhs (cond_stmt));
1502 gimple_omp_for_set_cond (for_stmt, 0, gimple_cond_code (cond_stmt));
1503 gimple_omp_for_set_incr (for_stmt, 0, build2 (PLUS_EXPR, type,
1504 cvar_base,
1505 build_int_cst (type, 1)));
1506
1507 gsi = gsi_last_bb (for_bb);
1508 gsi_insert_after (&gsi, for_stmt, GSI_NEW_STMT);
5f40b3cb
ZD
1509 SSA_NAME_DEF_STMT (initvar) = for_stmt;
1510
726a989a
RB
1511 /* Emit GIMPLE_OMP_CONTINUE. */
1512 gsi = gsi_last_bb (loop->latch);
1513 stmt = gimple_build_omp_continue (cvar_next, cvar);
1514 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
1515 SSA_NAME_DEF_STMT (cvar_next) = stmt;
5f40b3cb 1516
726a989a
RB
1517 /* Emit GIMPLE_OMP_RETURN for GIMPLE_OMP_FOR. */
1518 gsi = gsi_last_bb (ex_bb);
1519 gsi_insert_after (&gsi, gimple_build_omp_return (true), GSI_NEW_STMT);
5f40b3cb
ZD
1520
1521 return paral_bb;
1522}
1523
08dab97a
RL
1524/* Generates code to execute the iterations of LOOP in N_THREADS
1525 threads in parallel.
1526
1527 NITER describes number of iterations of LOOP.
fa10beec 1528 REDUCTION_LIST describes the reductions existent in the LOOP. */
5f40b3cb
ZD
1529
1530static void
08dab97a 1531gen_parallel_loop (struct loop *loop, htab_t reduction_list,
a509ebb5 1532 unsigned n_threads, struct tree_niter_desc *niter)
5f40b3cb
ZD
1533{
1534 struct loop *nloop;
9326236d 1535 loop_iterator li;
5f40b3cb 1536 tree many_iterations_cond, type, nit;
726a989a
RB
1537 tree arg_struct, new_arg_struct;
1538 gimple_seq stmts;
5f40b3cb 1539 basic_block parallel_head;
9f9f72aa 1540 edge entry, exit;
a509ebb5 1541 struct clsn_data clsn_data;
5f40b3cb
ZD
1542 unsigned prob;
1543
1544 /* From
1545
1546 ---------------------------------------------------------------------
1547 loop
1548 {
1549 IV = phi (INIT, IV + STEP)
1550 BODY1;
1551 if (COND)
1552 break;
1553 BODY2;
1554 }
1555 ---------------------------------------------------------------------
1556
1557 with # of iterations NITER (possibly with MAY_BE_ZERO assumption),
1558 we generate the following code:
1559
1560 ---------------------------------------------------------------------
1561
1562 if (MAY_BE_ZERO
a509ebb5
RL
1563 || NITER < MIN_PER_THREAD * N_THREADS)
1564 goto original;
5f40b3cb
ZD
1565
1566 BODY1;
1567 store all local loop-invariant variables used in body of the loop to DATA.
726a989a 1568 GIMPLE_OMP_PARALLEL (OMP_CLAUSE_NUM_THREADS (N_THREADS), LOOPFN, DATA);
5f40b3cb 1569 load the variables from DATA.
726a989a 1570 GIMPLE_OMP_FOR (IV = INIT; COND; IV += STEP) (OMP_CLAUSE_SCHEDULE (static))
5f40b3cb
ZD
1571 BODY2;
1572 BODY1;
726a989a
RB
1573 GIMPLE_OMP_CONTINUE;
1574 GIMPLE_OMP_RETURN -- GIMPLE_OMP_FOR
1575 GIMPLE_OMP_RETURN -- GIMPLE_OMP_PARALLEL
5f40b3cb
ZD
1576 goto end;
1577
1578 original:
1579 loop
1580 {
1581 IV = phi (INIT, IV + STEP)
1582 BODY1;
1583 if (COND)
1584 break;
1585 BODY2;
1586 }
1587
1588 end:
1589
1590 */
1591
1592 /* Create two versions of the loop -- in the old one, we know that the
1593 number of iterations is large enough, and we will transform it into the
1594 loop that will be split to loop_fn, the new one will be used for the
1595 remaining iterations. */
a509ebb5 1596
5f40b3cb
ZD
1597 type = TREE_TYPE (niter->niter);
1598 nit = force_gimple_operand (unshare_expr (niter->niter), &stmts, true,
1599 NULL_TREE);
1600 if (stmts)
726a989a 1601 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
5f40b3cb
ZD
1602
1603 many_iterations_cond =
a509ebb5
RL
1604 fold_build2 (GE_EXPR, boolean_type_node,
1605 nit, build_int_cst (type, MIN_PER_THREAD * n_threads));
5f40b3cb 1606 many_iterations_cond
a509ebb5
RL
1607 = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1608 invert_truthvalue (unshare_expr (niter->may_be_zero)),
1609 many_iterations_cond);
5f40b3cb 1610 many_iterations_cond
a509ebb5 1611 = force_gimple_operand (many_iterations_cond, &stmts, false, NULL_TREE);
5f40b3cb 1612 if (stmts)
726a989a 1613 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
5f40b3cb
ZD
1614 if (!is_gimple_condexpr (many_iterations_cond))
1615 {
1616 many_iterations_cond
a509ebb5
RL
1617 = force_gimple_operand (many_iterations_cond, &stmts,
1618 true, NULL_TREE);
5f40b3cb 1619 if (stmts)
726a989a 1620 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
5f40b3cb
ZD
1621 }
1622
1623 initialize_original_copy_tables ();
1624
1625 /* We assume that the loop usually iterates a lot. */
1626 prob = 4 * REG_BR_PROB_BASE / 5;
1627 nloop = loop_version (loop, many_iterations_cond, NULL,
1628 prob, prob, REG_BR_PROB_BASE - prob, true);
1629 update_ssa (TODO_update_ssa);
1630 free_original_copy_tables ();
1631
1632 /* Base all the induction variables in LOOP on a single control one. */
08dab97a 1633 canonicalize_loop_ivs (loop, &nit);
5f40b3cb
ZD
1634
1635 /* Ensure that the exit condition is the first statement in the loop. */
a509ebb5
RL
1636 transform_to_exit_first_loop (loop, reduction_list, nit);
1637
fa10beec 1638 /* Generate initializations for reductions. */
b8698a0f 1639 if (htab_elements (reduction_list) > 0)
a509ebb5 1640 htab_traverse (reduction_list, initialize_reductions, loop);
5f40b3cb
ZD
1641
1642 /* Eliminate the references to local variables from the loop. */
9f9f72aa
AP
1643 gcc_assert (single_exit (loop));
1644 entry = loop_preheader_edge (loop);
1645 exit = single_dom_exit (loop);
5f40b3cb 1646
9f9f72aa 1647 eliminate_local_variables (entry, exit);
5f40b3cb
ZD
1648 /* In the old loop, move all variables non-local to the loop to a structure
1649 and back, and create separate decls for the variables used in loop. */
b8698a0f 1650 separate_decls_in_region (entry, exit, reduction_list, &arg_struct,
9f9f72aa 1651 &new_arg_struct, &clsn_data);
5f40b3cb
ZD
1652
1653 /* Create the parallel constructs. */
1654 parallel_head = create_parallel_loop (loop, create_loop_fn (), arg_struct,
1655 new_arg_struct, n_threads);
b8698a0f 1656 if (htab_elements (reduction_list) > 0)
a509ebb5 1657 create_call_for_reduction (loop, reduction_list, &clsn_data);
5f40b3cb
ZD
1658
1659 scev_reset ();
1660
1661 /* Cancel the loop (it is simpler to do it here rather than to teach the
1662 expander to do it). */
1663 cancel_loop_tree (loop);
1664
92a6bdbd
SP
1665 /* Free loop bound estimations that could contain references to
1666 removed statements. */
1667 FOR_EACH_LOOP (li, loop, 0)
1668 free_numbers_of_iterations_estimates_loop (loop);
1669
5f40b3cb
ZD
1670 /* Expand the parallel constructs. We do it directly here instead of running
1671 a separate expand_omp pass, since it is more efficient, and less likely to
1672 cause troubles with further analyses not being able to deal with the
1673 OMP trees. */
a509ebb5 1674
5f40b3cb
ZD
1675 omp_expand_local (parallel_head);
1676}
1677
9857228c
SP
1678/* Returns true when LOOP contains vector phi nodes. */
1679
1680static bool
726a989a 1681loop_has_vector_phi_nodes (struct loop *loop ATTRIBUTE_UNUSED)
9857228c
SP
1682{
1683 unsigned i;
1684 basic_block *bbs = get_loop_body_in_dom_order (loop);
726a989a 1685 gimple_stmt_iterator gsi;
9857228c 1686 bool res = true;
9857228c
SP
1687
1688 for (i = 0; i < loop->num_nodes; i++)
726a989a
RB
1689 for (gsi = gsi_start_phis (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi))
1690 if (TREE_CODE (TREE_TYPE (PHI_RESULT (gsi_stmt (gsi)))) == VECTOR_TYPE)
9857228c
SP
1691 goto end;
1692
1693 res = false;
1694 end:
1695 free (bbs);
1696 return res;
1697}
1698
08dab97a
RL
1699/* Create a reduction_info struct, initialize it with REDUC_STMT
1700 and PHI, insert it to the REDUCTION_LIST. */
1701
1702static void
1703build_new_reduction (htab_t reduction_list, gimple reduc_stmt, gimple phi)
1704{
1705 PTR *slot;
1706 struct reduction_info *new_reduction;
1707
1708 gcc_assert (reduc_stmt);
b8698a0f 1709
08dab97a
RL
1710 if (dump_file && (dump_flags & TDF_DETAILS))
1711 {
1712 fprintf (dump_file,
1713 "Detected reduction. reduction stmt is: \n");
1714 print_gimple_stmt (dump_file, reduc_stmt, 0, 0);
1715 fprintf (dump_file, "\n");
1716 }
b8698a0f 1717
08dab97a 1718 new_reduction = XCNEW (struct reduction_info);
b8698a0f 1719
08dab97a
RL
1720 new_reduction->reduc_stmt = reduc_stmt;
1721 new_reduction->reduc_phi = phi;
1722 new_reduction->reduction_code = gimple_assign_rhs_code (reduc_stmt);
1723 slot = htab_find_slot (reduction_list, new_reduction, INSERT);
1724 *slot = new_reduction;
1725}
1726
1727/* Detect all reductions in the LOOP, insert them into REDUCTION_LIST. */
1728
1729static void
1730gather_scalar_reductions (loop_p loop, htab_t reduction_list)
1731{
1732 gimple_stmt_iterator gsi;
1733 loop_vec_info simple_loop_info;
1734
1735 vect_dump = NULL;
1736 simple_loop_info = vect_analyze_loop_form (loop);
1737
1738 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
1739 {
1740 gimple phi = gsi_stmt (gsi);
1741 affine_iv iv;
1742 tree res = PHI_RESULT (phi);
1743 bool double_reduc;
1744
1745 if (!is_gimple_reg (res))
1746 continue;
1747
1748 if (!simple_iv (loop, loop, res, &iv, true)
1749 && simple_loop_info)
1750 {
1751 gimple reduc_stmt = vect_is_simple_reduction (simple_loop_info, phi, true, &double_reduc);
48710229 1752 if (reduc_stmt && !double_reduc)
08dab97a
RL
1753 build_new_reduction (reduction_list, reduc_stmt, phi);
1754 }
1755 }
1756 destroy_loop_vec_info (simple_loop_info, true);
1757}
1758
1759/* Try to initialize NITER for code generation part. */
1760
1761static bool
1762try_get_loop_niter (loop_p loop, struct tree_niter_desc *niter)
1763{
1764 edge exit = single_dom_exit (loop);
1765
1766 gcc_assert (exit);
1767
1768 /* We need to know # of iterations, and there should be no uses of values
1769 defined inside loop outside of it, unless the values are invariants of
1770 the loop. */
1771 if (!number_of_iterations_exit (loop, exit, niter, false))
1772 {
1773 if (dump_file && (dump_flags & TDF_DETAILS))
1774 fprintf (dump_file, " FAILED: number of iterations not known\n");
1775 return false;
1776 }
1777
1778 return true;
1779}
1780
1781/* Try to initialize REDUCTION_LIST for code generation part.
1782 REDUCTION_LIST describes the reductions. */
1783
1784static bool
1785try_create_reduction_list (loop_p loop, htab_t reduction_list)
1786{
1787 edge exit = single_dom_exit (loop);
1788 gimple_stmt_iterator gsi;
1789
1790 gcc_assert (exit);
1791
1792 gather_scalar_reductions (loop, reduction_list);
1793
b8698a0f 1794
08dab97a
RL
1795 for (gsi = gsi_start_phis (exit->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1796 {
1797 gimple phi = gsi_stmt (gsi);
1798 struct reduction_info *red;
1799 imm_use_iterator imm_iter;
1800 use_operand_p use_p;
1801 gimple reduc_phi;
1802 tree val = PHI_ARG_DEF_FROM_EDGE (phi, exit);
1803
1804 if (is_gimple_reg (val))
1805 {
1806 if (dump_file && (dump_flags & TDF_DETAILS))
1807 {
1808 fprintf (dump_file, "phi is ");
1809 print_gimple_stmt (dump_file, phi, 0, 0);
1810 fprintf (dump_file, "arg of phi to exit: value ");
1811 print_generic_expr (dump_file, val, 0);
1812 fprintf (dump_file, " used outside loop\n");
1813 fprintf (dump_file,
1814 " checking if it a part of reduction pattern: \n");
1815 }
1816 if (htab_elements (reduction_list) == 0)
1817 {
1818 if (dump_file && (dump_flags & TDF_DETAILS))
1819 fprintf (dump_file,
1820 " FAILED: it is not a part of reduction.\n");
1821 return false;
1822 }
1823 reduc_phi = NULL;
1824 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, val)
1825 {
1826 if (flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
1827 {
1828 reduc_phi = USE_STMT (use_p);
1829 break;
1830 }
1831 }
1832 red = reduction_phi (reduction_list, reduc_phi);
1833 if (red == NULL)
1834 {
1835 if (dump_file && (dump_flags & TDF_DETAILS))
1836 fprintf (dump_file,
1837 " FAILED: it is not a part of reduction.\n");
1838 return false;
1839 }
1840 if (dump_file && (dump_flags & TDF_DETAILS))
1841 {
1842 fprintf (dump_file, "reduction phi is ");
1843 print_gimple_stmt (dump_file, red->reduc_phi, 0, 0);
1844 fprintf (dump_file, "reduction stmt is ");
1845 print_gimple_stmt (dump_file, red->reduc_stmt, 0, 0);
1846 }
1847 }
1848 }
1849
1850 /* The iterations of the loop may communicate only through bivs whose
1851 iteration space can be distributed efficiently. */
1852 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
1853 {
1854 gimple phi = gsi_stmt (gsi);
1855 tree def = PHI_RESULT (phi);
1856 affine_iv iv;
1857
1858 if (is_gimple_reg (def) && !simple_iv (loop, loop, def, &iv, true))
1859 {
1860 struct reduction_info *red;
1861
1862 red = reduction_phi (reduction_list, phi);
1863 if (red == NULL)
1864 {
1865 if (dump_file && (dump_flags & TDF_DETAILS))
1866 fprintf (dump_file,
1867 " FAILED: scalar dependency between iterations\n");
1868 return false;
1869 }
1870 }
1871 }
1872
1873
1874 return true;
1875}
1876
5f40b3cb
ZD
1877/* Detect parallel loops and generate parallel code using libgomp
1878 primitives. Returns true if some loop was parallelized, false
1879 otherwise. */
1880
1881bool
1882parallelize_loops (void)
1883{
1884 unsigned n_threads = flag_tree_parallelize_loops;
1885 bool changed = false;
1886 struct loop *loop;
1887 struct tree_niter_desc niter_desc;
1888 loop_iterator li;
a509ebb5 1889 htab_t reduction_list;
5f40b3cb
ZD
1890
1891 /* Do not parallelize loops in the functions created by parallelization. */
1892 if (parallelized_function_p (cfun->decl))
1893 return false;
1894
a509ebb5 1895 reduction_list = htab_create (10, reduction_info_hash,
08dab97a 1896 reduction_info_eq, free);
726a989a 1897 init_stmt_vec_info_vec ();
a509ebb5 1898
5f40b3cb
ZD
1899 FOR_EACH_LOOP (li, loop, 0)
1900 {
a509ebb5 1901 htab_empty (reduction_list);
48710229
RL
1902 if (dump_file && (dump_flags & TDF_DETAILS))
1903 {
1904 fprintf (dump_file, "Trying loop %d as candidate\n",loop->num);
1905 if (loop->inner)
1906 fprintf (dump_file, "loop %d is not innermost\n",loop->num);
1907 else
1908 fprintf (dump_file, "loop %d is innermost\n",loop->num);
1909 }
b8698a0f 1910
48710229 1911 /* If we use autopar in graphite pass, we use its marked dependency
87d4d0ee
SP
1912 checking results. */
1913 if (flag_loop_parallelize_all && !loop->can_be_parallel)
48710229
RL
1914 {
1915 if (dump_file && (dump_flags & TDF_DETAILS))
1916 fprintf (dump_file, "loop is not parallel according to graphite\n");
87d4d0ee 1917 continue;
48710229 1918 }
87d4d0ee 1919
48710229
RL
1920 if (!single_dom_exit (loop))
1921 {
b8698a0f 1922
48710229
RL
1923 if (dump_file && (dump_flags & TDF_DETAILS))
1924 fprintf (dump_file, "loop is !single_dom_exit\n");
b8698a0f 1925
08dab97a 1926 continue;
48710229 1927 }
08dab97a
RL
1928
1929 if (/* And of course, the loop must be parallelizable. */
1930 !can_duplicate_loop_p (loop)
1d4af1e8 1931 || loop_has_blocks_with_irreducible_flag (loop)
9857228c 1932 /* FIXME: the check for vector phi nodes could be removed. */
08dab97a
RL
1933 || loop_has_vector_phi_nodes (loop))
1934 continue;
87d4d0ee
SP
1935
1936 /* FIXME: Bypass this check as graphite doesn't update the
1937 count and frequency correctly now. */
1938 if (!flag_loop_parallelize_all
8e094aa2
L
1939 && ((estimated_loop_iterations_int (loop, false)
1940 <= (HOST_WIDE_INT) n_threads * MIN_PER_THREAD)
87d4d0ee
SP
1941 /* Do not bother with loops in cold areas. */
1942 || optimize_loop_nest_for_size_p (loop)))
08dab97a 1943 continue;
b8698a0f 1944
08dab97a
RL
1945 if (!try_get_loop_niter (loop, &niter_desc))
1946 continue;
1947
1948 if (!try_create_reduction_list (loop, reduction_list))
1949 continue;
1950
87d4d0ee 1951 if (!flag_loop_parallelize_all && !loop_parallel_p (loop))
5f40b3cb
ZD
1952 continue;
1953
1954 changed = true;
48710229
RL
1955 if (dump_file && (dump_flags & TDF_DETAILS))
1956 {
1957 fprintf (dump_file, "parallelizing ");
1958 if (loop->inner)
1959 fprintf (dump_file, "outer loop\n");
1960 else
1961 fprintf (dump_file, "inner loop\n");
b8698a0f
L
1962 }
1963 gen_parallel_loop (loop, reduction_list,
08dab97a 1964 n_threads, &niter_desc);
5f40b3cb
ZD
1965 verify_flow_info ();
1966 verify_dominators (CDI_DOMINATORS);
1967 verify_loop_structure ();
1968 verify_loop_closed_ssa ();
1969 }
1970
726a989a 1971 free_stmt_vec_info_vec ();
a509ebb5 1972 htab_delete (reduction_list);
6b8ed145
RG
1973
1974 /* Parallelization will cause new function calls to be inserted through
1975 which local variables will escape. Reset the points-to solutions
1976 for ESCAPED and CALLUSED. */
1977 if (changed)
1978 {
1979 pt_solution_reset (&cfun->gimple_df->escaped);
1980 pt_solution_reset (&cfun->gimple_df->callused);
1981 }
1982
5f40b3cb
ZD
1983 return changed;
1984}
1985
1986#include "gt-tree-parloops.h"