]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/trans-mem.c
2014-10-27 Andrew MacLeod <amacleod@redhat.com>
[thirdparty/gcc.git] / gcc / trans-mem.c
1 /* Passes for transactional memory support.
2 Copyright (C) 2008-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "hash-table.h"
24 #include "tree.h"
25 #include "predict.h"
26 #include "vec.h"
27 #include "hashtab.h"
28 #include "hash-set.h"
29 #include "machmode.h"
30 #include "tm.h"
31 #include "hard-reg-set.h"
32 #include "input.h"
33 #include "function.h"
34 #include "dominance.h"
35 #include "cfg.h"
36 #include "basic-block.h"
37 #include "tree-ssa-alias.h"
38 #include "internal-fn.h"
39 #include "tree-eh.h"
40 #include "gimple-expr.h"
41 #include "is-a.h"
42 #include "gimple.h"
43 #include "calls.h"
44 #include "rtl.h"
45 #include "emit-rtl.h"
46 #include "gimplify.h"
47 #include "gimple-iterator.h"
48 #include "gimplify-me.h"
49 #include "gimple-walk.h"
50 #include "gimple-ssa.h"
51 #include "cgraph.h"
52 #include "tree-cfg.h"
53 #include "stringpool.h"
54 #include "tree-ssanames.h"
55 #include "tree-into-ssa.h"
56 #include "tree-pass.h"
57 #include "tree-inline.h"
58 #include "diagnostic-core.h"
59 #include "demangle.h"
60 #include "output.h"
61 #include "trans-mem.h"
62 #include "params.h"
63 #include "target.h"
64 #include "langhooks.h"
65 #include "gimple-pretty-print.h"
66 #include "cfgloop.h"
67 #include "tree-ssa-address.h"
68
69
70 #define A_RUNINSTRUMENTEDCODE 0x0001
71 #define A_RUNUNINSTRUMENTEDCODE 0x0002
72 #define A_SAVELIVEVARIABLES 0x0004
73 #define A_RESTORELIVEVARIABLES 0x0008
74 #define A_ABORTTRANSACTION 0x0010
75
76 #define AR_USERABORT 0x0001
77 #define AR_USERRETRY 0x0002
78 #define AR_TMCONFLICT 0x0004
79 #define AR_EXCEPTIONBLOCKABORT 0x0008
80 #define AR_OUTERABORT 0x0010
81
82 #define MODE_SERIALIRREVOCABLE 0x0000
83
84
85 /* The representation of a transaction changes several times during the
86 lowering process. In the beginning, in the front-end we have the
87 GENERIC tree TRANSACTION_EXPR. For example,
88
89 __transaction {
90 local++;
91 if (++global == 10)
92 __tm_abort;
93 }
94
95 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
96 trivially replaced with a GIMPLE_TRANSACTION node.
97
98 During pass_lower_tm, we examine the body of transactions looking
99 for aborts. Transactions that do not contain an abort may be
100 merged into an outer transaction. We also add a TRY-FINALLY node
101 to arrange for the transaction to be committed on any exit.
102
103 [??? Think about how this arrangement affects throw-with-commit
104 and throw-with-abort operations. In this case we want the TRY to
105 handle gotos, but not to catch any exceptions because the transaction
106 will already be closed.]
107
108 GIMPLE_TRANSACTION [label=NULL] {
109 try {
110 local = local + 1;
111 t0 = global;
112 t1 = t0 + 1;
113 global = t1;
114 if (t1 == 10)
115 __builtin___tm_abort ();
116 } finally {
117 __builtin___tm_commit ();
118 }
119 }
120
121 During pass_lower_eh, we create EH regions for the transactions,
122 intermixed with the regular EH stuff. This gives us a nice persistent
123 mapping (all the way through rtl) from transactional memory operation
124 back to the transaction, which allows us to get the abnormal edges
125 correct to model transaction aborts and restarts:
126
127 GIMPLE_TRANSACTION [label=over]
128 local = local + 1;
129 t0 = global;
130 t1 = t0 + 1;
131 global = t1;
132 if (t1 == 10)
133 __builtin___tm_abort ();
134 __builtin___tm_commit ();
135 over:
136
137 This is the end of all_lowering_passes, and so is what is present
138 during the IPA passes, and through all of the optimization passes.
139
140 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
141 functions and mark functions for cloning.
142
143 At the end of gimple optimization, before exiting SSA form,
144 pass_tm_edges replaces statements that perform transactional
145 memory operations with the appropriate TM builtins, and swap
146 out function calls with their transactional clones. At this
147 point we introduce the abnormal transaction restart edges and
148 complete lowering of the GIMPLE_TRANSACTION node.
149
150 x = __builtin___tm_start (MAY_ABORT);
151 eh_label:
152 if (x & abort_transaction)
153 goto over;
154 local = local + 1;
155 t0 = __builtin___tm_load (global);
156 t1 = t0 + 1;
157 __builtin___tm_store (&global, t1);
158 if (t1 == 10)
159 __builtin___tm_abort ();
160 __builtin___tm_commit ();
161 over:
162 */
163
164 static void *expand_regions (struct tm_region *,
165 void *(*callback)(struct tm_region *, void *),
166 void *, bool);
167
168 \f
169 /* Return the attributes we want to examine for X, or NULL if it's not
170 something we examine. We look at function types, but allow pointers
171 to function types and function decls and peek through. */
172
173 static tree
174 get_attrs_for (const_tree x)
175 {
176 switch (TREE_CODE (x))
177 {
178 case FUNCTION_DECL:
179 return TYPE_ATTRIBUTES (TREE_TYPE (x));
180 break;
181
182 default:
183 if (TYPE_P (x))
184 return NULL;
185 x = TREE_TYPE (x);
186 if (TREE_CODE (x) != POINTER_TYPE)
187 return NULL;
188 /* FALLTHRU */
189
190 case POINTER_TYPE:
191 x = TREE_TYPE (x);
192 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
193 return NULL;
194 /* FALLTHRU */
195
196 case FUNCTION_TYPE:
197 case METHOD_TYPE:
198 return TYPE_ATTRIBUTES (x);
199 }
200 }
201
202 /* Return true if X has been marked TM_PURE. */
203
204 bool
205 is_tm_pure (const_tree x)
206 {
207 unsigned flags;
208
209 switch (TREE_CODE (x))
210 {
211 case FUNCTION_DECL:
212 case FUNCTION_TYPE:
213 case METHOD_TYPE:
214 break;
215
216 default:
217 if (TYPE_P (x))
218 return false;
219 x = TREE_TYPE (x);
220 if (TREE_CODE (x) != POINTER_TYPE)
221 return false;
222 /* FALLTHRU */
223
224 case POINTER_TYPE:
225 x = TREE_TYPE (x);
226 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
227 return false;
228 break;
229 }
230
231 flags = flags_from_decl_or_type (x);
232 return (flags & ECF_TM_PURE) != 0;
233 }
234
235 /* Return true if X has been marked TM_IRREVOCABLE. */
236
237 static bool
238 is_tm_irrevocable (tree x)
239 {
240 tree attrs = get_attrs_for (x);
241
242 if (attrs && lookup_attribute ("transaction_unsafe", attrs))
243 return true;
244
245 /* A call to the irrevocable builtin is by definition,
246 irrevocable. */
247 if (TREE_CODE (x) == ADDR_EXPR)
248 x = TREE_OPERAND (x, 0);
249 if (TREE_CODE (x) == FUNCTION_DECL
250 && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
251 && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
252 return true;
253
254 return false;
255 }
256
257 /* Return true if X has been marked TM_SAFE. */
258
259 bool
260 is_tm_safe (const_tree x)
261 {
262 if (flag_tm)
263 {
264 tree attrs = get_attrs_for (x);
265 if (attrs)
266 {
267 if (lookup_attribute ("transaction_safe", attrs))
268 return true;
269 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
270 return true;
271 }
272 }
273 return false;
274 }
275
276 /* Return true if CALL is const, or tm_pure. */
277
278 static bool
279 is_tm_pure_call (gimple call)
280 {
281 tree fn = gimple_call_fn (call);
282
283 if (TREE_CODE (fn) == ADDR_EXPR)
284 {
285 fn = TREE_OPERAND (fn, 0);
286 gcc_assert (TREE_CODE (fn) == FUNCTION_DECL);
287 }
288 else
289 fn = TREE_TYPE (fn);
290
291 return is_tm_pure (fn);
292 }
293
294 /* Return true if X has been marked TM_CALLABLE. */
295
296 static bool
297 is_tm_callable (tree x)
298 {
299 tree attrs = get_attrs_for (x);
300 if (attrs)
301 {
302 if (lookup_attribute ("transaction_callable", attrs))
303 return true;
304 if (lookup_attribute ("transaction_safe", attrs))
305 return true;
306 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
307 return true;
308 }
309 return false;
310 }
311
312 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
313
314 bool
315 is_tm_may_cancel_outer (tree x)
316 {
317 tree attrs = get_attrs_for (x);
318 if (attrs)
319 return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
320 return false;
321 }
322
323 /* Return true for built in functions that "end" a transaction. */
324
325 bool
326 is_tm_ending_fndecl (tree fndecl)
327 {
328 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
329 switch (DECL_FUNCTION_CODE (fndecl))
330 {
331 case BUILT_IN_TM_COMMIT:
332 case BUILT_IN_TM_COMMIT_EH:
333 case BUILT_IN_TM_ABORT:
334 case BUILT_IN_TM_IRREVOCABLE:
335 return true;
336 default:
337 break;
338 }
339
340 return false;
341 }
342
343 /* Return true if STMT is a built in function call that "ends" a
344 transaction. */
345
346 bool
347 is_tm_ending (gimple stmt)
348 {
349 tree fndecl;
350
351 if (gimple_code (stmt) != GIMPLE_CALL)
352 return false;
353
354 fndecl = gimple_call_fndecl (stmt);
355 return (fndecl != NULL_TREE
356 && is_tm_ending_fndecl (fndecl));
357 }
358
359 /* Return true if STMT is a TM load. */
360
361 static bool
362 is_tm_load (gimple stmt)
363 {
364 tree fndecl;
365
366 if (gimple_code (stmt) != GIMPLE_CALL)
367 return false;
368
369 fndecl = gimple_call_fndecl (stmt);
370 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
371 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
372 }
373
374 /* Same as above, but for simple TM loads, that is, not the
375 after-write, after-read, etc optimized variants. */
376
377 static bool
378 is_tm_simple_load (gimple stmt)
379 {
380 tree fndecl;
381
382 if (gimple_code (stmt) != GIMPLE_CALL)
383 return false;
384
385 fndecl = gimple_call_fndecl (stmt);
386 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
387 {
388 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
389 return (fcode == BUILT_IN_TM_LOAD_1
390 || fcode == BUILT_IN_TM_LOAD_2
391 || fcode == BUILT_IN_TM_LOAD_4
392 || fcode == BUILT_IN_TM_LOAD_8
393 || fcode == BUILT_IN_TM_LOAD_FLOAT
394 || fcode == BUILT_IN_TM_LOAD_DOUBLE
395 || fcode == BUILT_IN_TM_LOAD_LDOUBLE
396 || fcode == BUILT_IN_TM_LOAD_M64
397 || fcode == BUILT_IN_TM_LOAD_M128
398 || fcode == BUILT_IN_TM_LOAD_M256);
399 }
400 return false;
401 }
402
403 /* Return true if STMT is a TM store. */
404
405 static bool
406 is_tm_store (gimple stmt)
407 {
408 tree fndecl;
409
410 if (gimple_code (stmt) != GIMPLE_CALL)
411 return false;
412
413 fndecl = gimple_call_fndecl (stmt);
414 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
415 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
416 }
417
418 /* Same as above, but for simple TM stores, that is, not the
419 after-write, after-read, etc optimized variants. */
420
421 static bool
422 is_tm_simple_store (gimple stmt)
423 {
424 tree fndecl;
425
426 if (gimple_code (stmt) != GIMPLE_CALL)
427 return false;
428
429 fndecl = gimple_call_fndecl (stmt);
430 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
431 {
432 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
433 return (fcode == BUILT_IN_TM_STORE_1
434 || fcode == BUILT_IN_TM_STORE_2
435 || fcode == BUILT_IN_TM_STORE_4
436 || fcode == BUILT_IN_TM_STORE_8
437 || fcode == BUILT_IN_TM_STORE_FLOAT
438 || fcode == BUILT_IN_TM_STORE_DOUBLE
439 || fcode == BUILT_IN_TM_STORE_LDOUBLE
440 || fcode == BUILT_IN_TM_STORE_M64
441 || fcode == BUILT_IN_TM_STORE_M128
442 || fcode == BUILT_IN_TM_STORE_M256);
443 }
444 return false;
445 }
446
447 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */
448
449 static bool
450 is_tm_abort (tree fndecl)
451 {
452 return (fndecl
453 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
454 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
455 }
456
457 /* Build a GENERIC tree for a user abort. This is called by front ends
458 while transforming the __tm_abort statement. */
459
460 tree
461 build_tm_abort_call (location_t loc, bool is_outer)
462 {
463 return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
464 build_int_cst (integer_type_node,
465 AR_USERABORT
466 | (is_outer ? AR_OUTERABORT : 0)));
467 }
468 \f
469 /* Map for aribtrary function replacement under TM, as created
470 by the tm_wrap attribute. */
471
472 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
473 htab_t tm_wrap_map;
474
475 void
476 record_tm_replacement (tree from, tree to)
477 {
478 struct tree_map **slot, *h;
479
480 /* Do not inline wrapper functions that will get replaced in the TM
481 pass.
482
483 Suppose you have foo() that will get replaced into tmfoo(). Make
484 sure the inliner doesn't try to outsmart us and inline foo()
485 before we get a chance to do the TM replacement. */
486 DECL_UNINLINABLE (from) = 1;
487
488 if (tm_wrap_map == NULL)
489 tm_wrap_map = htab_create_ggc (32, tree_map_hash, tree_map_eq, 0);
490
491 h = ggc_alloc<tree_map> ();
492 h->hash = htab_hash_pointer (from);
493 h->base.from = from;
494 h->to = to;
495
496 slot = (struct tree_map **)
497 htab_find_slot_with_hash (tm_wrap_map, h, h->hash, INSERT);
498 *slot = h;
499 }
500
501 /* Return a TM-aware replacement function for DECL. */
502
503 static tree
504 find_tm_replacement_function (tree fndecl)
505 {
506 if (tm_wrap_map)
507 {
508 struct tree_map *h, in;
509
510 in.base.from = fndecl;
511 in.hash = htab_hash_pointer (fndecl);
512 h = (struct tree_map *) htab_find_with_hash (tm_wrap_map, &in, in.hash);
513 if (h)
514 return h->to;
515 }
516
517 /* ??? We may well want TM versions of most of the common <string.h>
518 functions. For now, we've already these two defined. */
519 /* Adjust expand_call_tm() attributes as necessary for the cases
520 handled here: */
521 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
522 switch (DECL_FUNCTION_CODE (fndecl))
523 {
524 case BUILT_IN_MEMCPY:
525 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
526 case BUILT_IN_MEMMOVE:
527 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
528 case BUILT_IN_MEMSET:
529 return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
530 default:
531 return NULL;
532 }
533
534 return NULL;
535 }
536
537 /* When appropriate, record TM replacement for memory allocation functions.
538
539 FROM is the FNDECL to wrap. */
540 void
541 tm_malloc_replacement (tree from)
542 {
543 const char *str;
544 tree to;
545
546 if (TREE_CODE (from) != FUNCTION_DECL)
547 return;
548
549 /* If we have a previous replacement, the user must be explicitly
550 wrapping malloc/calloc/free. They better know what they're
551 doing... */
552 if (find_tm_replacement_function (from))
553 return;
554
555 str = IDENTIFIER_POINTER (DECL_NAME (from));
556
557 if (!strcmp (str, "malloc"))
558 to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
559 else if (!strcmp (str, "calloc"))
560 to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
561 else if (!strcmp (str, "free"))
562 to = builtin_decl_explicit (BUILT_IN_TM_FREE);
563 else
564 return;
565
566 TREE_NOTHROW (to) = 0;
567
568 record_tm_replacement (from, to);
569 }
570 \f
571 /* Diagnostics for tm_safe functions/regions. Called by the front end
572 once we've lowered the function to high-gimple. */
573
574 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
575 Process exactly one statement. WI->INFO is set to non-null when in
576 the context of a tm_safe function, and null for a __transaction block. */
577
578 #define DIAG_TM_OUTER 1
579 #define DIAG_TM_SAFE 2
580 #define DIAG_TM_RELAXED 4
581
582 struct diagnose_tm
583 {
584 unsigned int summary_flags : 8;
585 unsigned int block_flags : 8;
586 unsigned int func_flags : 8;
587 unsigned int saw_volatile : 1;
588 gimple stmt;
589 };
590
591 /* Return true if T is a volatile variable of some kind. */
592
593 static bool
594 volatile_var_p (tree t)
595 {
596 return (SSA_VAR_P (t)
597 && TREE_THIS_VOLATILE (TREE_TYPE (t)));
598 }
599
600 /* Tree callback function for diagnose_tm pass. */
601
602 static tree
603 diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
604 void *data)
605 {
606 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
607 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
608
609 if (volatile_var_p (*tp)
610 && d->block_flags & DIAG_TM_SAFE
611 && !d->saw_volatile)
612 {
613 d->saw_volatile = 1;
614 error_at (gimple_location (d->stmt),
615 "invalid volatile use of %qD inside transaction",
616 *tp);
617 }
618
619 return NULL_TREE;
620 }
621
622 static inline bool
623 is_tm_safe_or_pure (const_tree x)
624 {
625 return is_tm_safe (x) || is_tm_pure (x);
626 }
627
628 static tree
629 diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
630 struct walk_stmt_info *wi)
631 {
632 gimple stmt = gsi_stmt (*gsi);
633 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
634
635 /* Save stmt for use in leaf analysis. */
636 d->stmt = stmt;
637
638 switch (gimple_code (stmt))
639 {
640 case GIMPLE_CALL:
641 {
642 tree fn = gimple_call_fn (stmt);
643
644 if ((d->summary_flags & DIAG_TM_OUTER) == 0
645 && is_tm_may_cancel_outer (fn))
646 error_at (gimple_location (stmt),
647 "%<transaction_may_cancel_outer%> function call not within"
648 " outer transaction or %<transaction_may_cancel_outer%>");
649
650 if (d->summary_flags & DIAG_TM_SAFE)
651 {
652 bool is_safe, direct_call_p;
653 tree replacement;
654
655 if (TREE_CODE (fn) == ADDR_EXPR
656 && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
657 {
658 direct_call_p = true;
659 replacement = TREE_OPERAND (fn, 0);
660 replacement = find_tm_replacement_function (replacement);
661 if (replacement)
662 fn = replacement;
663 }
664 else
665 {
666 direct_call_p = false;
667 replacement = NULL_TREE;
668 }
669
670 if (is_tm_safe_or_pure (fn))
671 is_safe = true;
672 else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
673 {
674 /* A function explicitly marked transaction_callable as
675 opposed to transaction_safe is being defined to be
676 unsafe as part of its ABI, regardless of its contents. */
677 is_safe = false;
678 }
679 else if (direct_call_p)
680 {
681 if (IS_TYPE_OR_DECL_P (fn)
682 && flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
683 is_safe = true;
684 else if (replacement)
685 {
686 /* ??? At present we've been considering replacements
687 merely transaction_callable, and therefore might
688 enter irrevocable. The tm_wrap attribute has not
689 yet made it into the new language spec. */
690 is_safe = false;
691 }
692 else
693 {
694 /* ??? Diagnostics for unmarked direct calls moved into
695 the IPA pass. Section 3.2 of the spec details how
696 functions not marked should be considered "implicitly
697 safe" based on having examined the function body. */
698 is_safe = true;
699 }
700 }
701 else
702 {
703 /* An unmarked indirect call. Consider it unsafe even
704 though optimization may yet figure out how to inline. */
705 is_safe = false;
706 }
707
708 if (!is_safe)
709 {
710 if (TREE_CODE (fn) == ADDR_EXPR)
711 fn = TREE_OPERAND (fn, 0);
712 if (d->block_flags & DIAG_TM_SAFE)
713 {
714 if (direct_call_p)
715 error_at (gimple_location (stmt),
716 "unsafe function call %qD within "
717 "atomic transaction", fn);
718 else
719 {
720 if (!DECL_P (fn) || DECL_NAME (fn))
721 error_at (gimple_location (stmt),
722 "unsafe function call %qE within "
723 "atomic transaction", fn);
724 else
725 error_at (gimple_location (stmt),
726 "unsafe indirect function call within "
727 "atomic transaction");
728 }
729 }
730 else
731 {
732 if (direct_call_p)
733 error_at (gimple_location (stmt),
734 "unsafe function call %qD within "
735 "%<transaction_safe%> function", fn);
736 else
737 {
738 if (!DECL_P (fn) || DECL_NAME (fn))
739 error_at (gimple_location (stmt),
740 "unsafe function call %qE within "
741 "%<transaction_safe%> function", fn);
742 else
743 error_at (gimple_location (stmt),
744 "unsafe indirect function call within "
745 "%<transaction_safe%> function");
746 }
747 }
748 }
749 }
750 }
751 break;
752
753 case GIMPLE_ASM:
754 /* ??? We ought to come up with a way to add attributes to
755 asm statements, and then add "transaction_safe" to it.
756 Either that or get the language spec to resurrect __tm_waiver. */
757 if (d->block_flags & DIAG_TM_SAFE)
758 error_at (gimple_location (stmt),
759 "asm not allowed in atomic transaction");
760 else if (d->func_flags & DIAG_TM_SAFE)
761 error_at (gimple_location (stmt),
762 "asm not allowed in %<transaction_safe%> function");
763 break;
764
765 case GIMPLE_TRANSACTION:
766 {
767 unsigned char inner_flags = DIAG_TM_SAFE;
768
769 if (gimple_transaction_subcode (stmt) & GTMA_IS_RELAXED)
770 {
771 if (d->block_flags & DIAG_TM_SAFE)
772 error_at (gimple_location (stmt),
773 "relaxed transaction in atomic transaction");
774 else if (d->func_flags & DIAG_TM_SAFE)
775 error_at (gimple_location (stmt),
776 "relaxed transaction in %<transaction_safe%> function");
777 inner_flags = DIAG_TM_RELAXED;
778 }
779 else if (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)
780 {
781 if (d->block_flags)
782 error_at (gimple_location (stmt),
783 "outer transaction in transaction");
784 else if (d->func_flags & DIAG_TM_OUTER)
785 error_at (gimple_location (stmt),
786 "outer transaction in "
787 "%<transaction_may_cancel_outer%> function");
788 else if (d->func_flags & DIAG_TM_SAFE)
789 error_at (gimple_location (stmt),
790 "outer transaction in %<transaction_safe%> function");
791 inner_flags |= DIAG_TM_OUTER;
792 }
793
794 *handled_ops_p = true;
795 if (gimple_transaction_body (stmt))
796 {
797 struct walk_stmt_info wi_inner;
798 struct diagnose_tm d_inner;
799
800 memset (&d_inner, 0, sizeof (d_inner));
801 d_inner.func_flags = d->func_flags;
802 d_inner.block_flags = d->block_flags | inner_flags;
803 d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
804
805 memset (&wi_inner, 0, sizeof (wi_inner));
806 wi_inner.info = &d_inner;
807
808 walk_gimple_seq (gimple_transaction_body (stmt),
809 diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
810 }
811 }
812 break;
813
814 default:
815 break;
816 }
817
818 return NULL_TREE;
819 }
820
821 static unsigned int
822 diagnose_tm_blocks (void)
823 {
824 struct walk_stmt_info wi;
825 struct diagnose_tm d;
826
827 memset (&d, 0, sizeof (d));
828 if (is_tm_may_cancel_outer (current_function_decl))
829 d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
830 else if (is_tm_safe (current_function_decl))
831 d.func_flags = DIAG_TM_SAFE;
832 d.summary_flags = d.func_flags;
833
834 memset (&wi, 0, sizeof (wi));
835 wi.info = &d;
836
837 walk_gimple_seq (gimple_body (current_function_decl),
838 diagnose_tm_1, diagnose_tm_1_op, &wi);
839
840 return 0;
841 }
842
843 namespace {
844
845 const pass_data pass_data_diagnose_tm_blocks =
846 {
847 GIMPLE_PASS, /* type */
848 "*diagnose_tm_blocks", /* name */
849 OPTGROUP_NONE, /* optinfo_flags */
850 TV_TRANS_MEM, /* tv_id */
851 PROP_gimple_any, /* properties_required */
852 0, /* properties_provided */
853 0, /* properties_destroyed */
854 0, /* todo_flags_start */
855 0, /* todo_flags_finish */
856 };
857
858 class pass_diagnose_tm_blocks : public gimple_opt_pass
859 {
860 public:
861 pass_diagnose_tm_blocks (gcc::context *ctxt)
862 : gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt)
863 {}
864
865 /* opt_pass methods: */
866 virtual bool gate (function *) { return flag_tm; }
867 virtual unsigned int execute (function *) { return diagnose_tm_blocks (); }
868
869 }; // class pass_diagnose_tm_blocks
870
871 } // anon namespace
872
873 gimple_opt_pass *
874 make_pass_diagnose_tm_blocks (gcc::context *ctxt)
875 {
876 return new pass_diagnose_tm_blocks (ctxt);
877 }
878 \f
879 /* Instead of instrumenting thread private memory, we save the
880 addresses in a log which we later use to save/restore the addresses
881 upon transaction start/restart.
882
883 The log is keyed by address, where each element contains individual
884 statements among different code paths that perform the store.
885
886 This log is later used to generate either plain save/restore of the
887 addresses upon transaction start/restart, or calls to the ITM_L*
888 logging functions.
889
890 So for something like:
891
892 struct large { int x[1000]; };
893 struct large lala = { 0 };
894 __transaction {
895 lala.x[i] = 123;
896 ...
897 }
898
899 We can either save/restore:
900
901 lala = { 0 };
902 trxn = _ITM_startTransaction ();
903 if (trxn & a_saveLiveVariables)
904 tmp_lala1 = lala.x[i];
905 else if (a & a_restoreLiveVariables)
906 lala.x[i] = tmp_lala1;
907
908 or use the logging functions:
909
910 lala = { 0 };
911 trxn = _ITM_startTransaction ();
912 _ITM_LU4 (&lala.x[i]);
913
914 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
915 far up the dominator tree to shadow all of the writes to a given
916 location (thus reducing the total number of logging calls), but not
917 so high as to be called on a path that does not perform a
918 write. */
919
920 /* One individual log entry. We may have multiple statements for the
921 same location if neither dominate each other (on different
922 execution paths). */
923 typedef struct tm_log_entry
924 {
925 /* Address to save. */
926 tree addr;
927 /* Entry block for the transaction this address occurs in. */
928 basic_block entry_block;
929 /* Dominating statements the store occurs in. */
930 gimple_vec stmts;
931 /* Initially, while we are building the log, we place a nonzero
932 value here to mean that this address *will* be saved with a
933 save/restore sequence. Later, when generating the save sequence
934 we place the SSA temp generated here. */
935 tree save_var;
936 } *tm_log_entry_t;
937
938
939 /* Log entry hashtable helpers. */
940
941 struct log_entry_hasher
942 {
943 typedef tm_log_entry value_type;
944 typedef tm_log_entry compare_type;
945 static inline hashval_t hash (const value_type *);
946 static inline bool equal (const value_type *, const compare_type *);
947 static inline void remove (value_type *);
948 };
949
950 /* Htab support. Return hash value for a `tm_log_entry'. */
951 inline hashval_t
952 log_entry_hasher::hash (const value_type *log)
953 {
954 return iterative_hash_expr (log->addr, 0);
955 }
956
957 /* Htab support. Return true if two log entries are the same. */
958 inline bool
959 log_entry_hasher::equal (const value_type *log1, const compare_type *log2)
960 {
961 /* FIXME:
962
963 rth: I suggest that we get rid of the component refs etc.
964 I.e. resolve the reference to base + offset.
965
966 We may need to actually finish a merge with mainline for this,
967 since we'd like to be presented with Richi's MEM_REF_EXPRs more
968 often than not. But in the meantime your tm_log_entry could save
969 the results of get_inner_reference.
970
971 See: g++.dg/tm/pr46653.C
972 */
973
974 /* Special case plain equality because operand_equal_p() below will
975 return FALSE if the addresses are equal but they have
976 side-effects (e.g. a volatile address). */
977 if (log1->addr == log2->addr)
978 return true;
979
980 return operand_equal_p (log1->addr, log2->addr, 0);
981 }
982
983 /* Htab support. Free one tm_log_entry. */
984 inline void
985 log_entry_hasher::remove (value_type *lp)
986 {
987 lp->stmts.release ();
988 free (lp);
989 }
990
991
992 /* The actual log. */
993 static hash_table<log_entry_hasher> *tm_log;
994
995 /* Addresses to log with a save/restore sequence. These should be in
996 dominator order. */
997 static vec<tree> tm_log_save_addresses;
998
999 enum thread_memory_type
1000 {
1001 mem_non_local = 0,
1002 mem_thread_local,
1003 mem_transaction_local,
1004 mem_max
1005 };
1006
1007 typedef struct tm_new_mem_map
1008 {
1009 /* SSA_NAME being dereferenced. */
1010 tree val;
1011 enum thread_memory_type local_new_memory;
1012 } tm_new_mem_map_t;
1013
1014 /* Hashtable helpers. */
1015
1016 struct tm_mem_map_hasher : typed_free_remove <tm_new_mem_map_t>
1017 {
1018 typedef tm_new_mem_map_t value_type;
1019 typedef tm_new_mem_map_t compare_type;
1020 static inline hashval_t hash (const value_type *);
1021 static inline bool equal (const value_type *, const compare_type *);
1022 };
1023
1024 inline hashval_t
1025 tm_mem_map_hasher::hash (const value_type *v)
1026 {
1027 return (intptr_t)v->val >> 4;
1028 }
1029
1030 inline bool
1031 tm_mem_map_hasher::equal (const value_type *v, const compare_type *c)
1032 {
1033 return v->val == c->val;
1034 }
1035
1036 /* Map for an SSA_NAME originally pointing to a non aliased new piece
1037 of memory (malloc, alloc, etc). */
1038 static hash_table<tm_mem_map_hasher> *tm_new_mem_hash;
1039
1040 /* Initialize logging data structures. */
1041 static void
1042 tm_log_init (void)
1043 {
1044 tm_log = new hash_table<log_entry_hasher> (10);
1045 tm_new_mem_hash = new hash_table<tm_mem_map_hasher> (5);
1046 tm_log_save_addresses.create (5);
1047 }
1048
1049 /* Free logging data structures. */
1050 static void
1051 tm_log_delete (void)
1052 {
1053 delete tm_log;
1054 tm_log = NULL;
1055 delete tm_new_mem_hash;
1056 tm_new_mem_hash = NULL;
1057 tm_log_save_addresses.release ();
1058 }
1059
1060 /* Return true if MEM is a transaction invariant memory for the TM
1061 region starting at REGION_ENTRY_BLOCK. */
1062 static bool
1063 transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
1064 {
1065 if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
1066 && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
1067 {
1068 basic_block def_bb;
1069
1070 def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
1071 return def_bb != region_entry_block
1072 && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
1073 }
1074
1075 mem = strip_invariant_refs (mem);
1076 return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
1077 }
1078
1079 /* Given an address ADDR in STMT, find it in the memory log or add it,
1080 making sure to keep only the addresses highest in the dominator
1081 tree.
1082
1083 ENTRY_BLOCK is the entry_block for the transaction.
1084
1085 If we find the address in the log, make sure it's either the same
1086 address, or an equivalent one that dominates ADDR.
1087
1088 If we find the address, but neither ADDR dominates the found
1089 address, nor the found one dominates ADDR, we're on different
1090 execution paths. Add it.
1091
1092 If known, ENTRY_BLOCK is the entry block for the region, otherwise
1093 NULL. */
1094 static void
1095 tm_log_add (basic_block entry_block, tree addr, gimple stmt)
1096 {
1097 tm_log_entry **slot;
1098 struct tm_log_entry l, *lp;
1099
1100 l.addr = addr;
1101 slot = tm_log->find_slot (&l, INSERT);
1102 if (!*slot)
1103 {
1104 tree type = TREE_TYPE (addr);
1105
1106 lp = XNEW (struct tm_log_entry);
1107 lp->addr = addr;
1108 *slot = lp;
1109
1110 /* Small invariant addresses can be handled as save/restores. */
1111 if (entry_block
1112 && transaction_invariant_address_p (lp->addr, entry_block)
1113 && TYPE_SIZE_UNIT (type) != NULL
1114 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
1115 && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type))
1116 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
1117 /* We must be able to copy this type normally. I.e., no
1118 special constructors and the like. */
1119 && !TREE_ADDRESSABLE (type))
1120 {
1121 lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
1122 lp->stmts.create (0);
1123 lp->entry_block = entry_block;
1124 /* Save addresses separately in dominator order so we don't
1125 get confused by overlapping addresses in the save/restore
1126 sequence. */
1127 tm_log_save_addresses.safe_push (lp->addr);
1128 }
1129 else
1130 {
1131 /* Use the logging functions. */
1132 lp->stmts.create (5);
1133 lp->stmts.quick_push (stmt);
1134 lp->save_var = NULL;
1135 }
1136 }
1137 else
1138 {
1139 size_t i;
1140 gimple oldstmt;
1141
1142 lp = *slot;
1143
1144 /* If we're generating a save/restore sequence, we don't care
1145 about statements. */
1146 if (lp->save_var)
1147 return;
1148
1149 for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i)
1150 {
1151 if (stmt == oldstmt)
1152 return;
1153 /* We already have a store to the same address, higher up the
1154 dominator tree. Nothing to do. */
1155 if (dominated_by_p (CDI_DOMINATORS,
1156 gimple_bb (stmt), gimple_bb (oldstmt)))
1157 return;
1158 /* We should be processing blocks in dominator tree order. */
1159 gcc_assert (!dominated_by_p (CDI_DOMINATORS,
1160 gimple_bb (oldstmt), gimple_bb (stmt)));
1161 }
1162 /* Store is on a different code path. */
1163 lp->stmts.safe_push (stmt);
1164 }
1165 }
1166
1167 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1168 result, insert the new statements before GSI. */
1169
1170 static tree
1171 gimplify_addr (gimple_stmt_iterator *gsi, tree x)
1172 {
1173 if (TREE_CODE (x) == TARGET_MEM_REF)
1174 x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
1175 else
1176 x = build_fold_addr_expr (x);
1177 return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
1178 }
1179
1180 /* Instrument one address with the logging functions.
1181 ADDR is the address to save.
1182 STMT is the statement before which to place it. */
1183 static void
1184 tm_log_emit_stmt (tree addr, gimple stmt)
1185 {
1186 tree type = TREE_TYPE (addr);
1187 tree size = TYPE_SIZE_UNIT (type);
1188 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1189 gimple log;
1190 enum built_in_function code = BUILT_IN_TM_LOG;
1191
1192 if (type == float_type_node)
1193 code = BUILT_IN_TM_LOG_FLOAT;
1194 else if (type == double_type_node)
1195 code = BUILT_IN_TM_LOG_DOUBLE;
1196 else if (type == long_double_type_node)
1197 code = BUILT_IN_TM_LOG_LDOUBLE;
1198 else if (tree_fits_uhwi_p (size))
1199 {
1200 unsigned int n = tree_to_uhwi (size);
1201 switch (n)
1202 {
1203 case 1:
1204 code = BUILT_IN_TM_LOG_1;
1205 break;
1206 case 2:
1207 code = BUILT_IN_TM_LOG_2;
1208 break;
1209 case 4:
1210 code = BUILT_IN_TM_LOG_4;
1211 break;
1212 case 8:
1213 code = BUILT_IN_TM_LOG_8;
1214 break;
1215 default:
1216 code = BUILT_IN_TM_LOG;
1217 if (TREE_CODE (type) == VECTOR_TYPE)
1218 {
1219 if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64))
1220 code = BUILT_IN_TM_LOG_M64;
1221 else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128))
1222 code = BUILT_IN_TM_LOG_M128;
1223 else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256))
1224 code = BUILT_IN_TM_LOG_M256;
1225 }
1226 break;
1227 }
1228 }
1229
1230 addr = gimplify_addr (&gsi, addr);
1231 if (code == BUILT_IN_TM_LOG)
1232 log = gimple_build_call (builtin_decl_explicit (code), 2, addr, size);
1233 else
1234 log = gimple_build_call (builtin_decl_explicit (code), 1, addr);
1235 gsi_insert_before (&gsi, log, GSI_SAME_STMT);
1236 }
1237
1238 /* Go through the log and instrument address that must be instrumented
1239 with the logging functions. Leave the save/restore addresses for
1240 later. */
1241 static void
1242 tm_log_emit (void)
1243 {
1244 hash_table<log_entry_hasher>::iterator hi;
1245 struct tm_log_entry *lp;
1246
1247 FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi)
1248 {
1249 size_t i;
1250 gimple stmt;
1251
1252 if (dump_file)
1253 {
1254 fprintf (dump_file, "TM thread private mem logging: ");
1255 print_generic_expr (dump_file, lp->addr, 0);
1256 fprintf (dump_file, "\n");
1257 }
1258
1259 if (lp->save_var)
1260 {
1261 if (dump_file)
1262 fprintf (dump_file, "DUMPING to variable\n");
1263 continue;
1264 }
1265 else
1266 {
1267 if (dump_file)
1268 fprintf (dump_file, "DUMPING with logging functions\n");
1269 for (i = 0; lp->stmts.iterate (i, &stmt); ++i)
1270 tm_log_emit_stmt (lp->addr, stmt);
1271 }
1272 }
1273 }
1274
1275 /* Emit the save sequence for the corresponding addresses in the log.
1276 ENTRY_BLOCK is the entry block for the transaction.
1277 BB is the basic block to insert the code in. */
1278 static void
1279 tm_log_emit_saves (basic_block entry_block, basic_block bb)
1280 {
1281 size_t i;
1282 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1283 gimple stmt;
1284 struct tm_log_entry l, *lp;
1285
1286 for (i = 0; i < tm_log_save_addresses.length (); ++i)
1287 {
1288 l.addr = tm_log_save_addresses[i];
1289 lp = *(tm_log->find_slot (&l, NO_INSERT));
1290 gcc_assert (lp->save_var != NULL);
1291
1292 /* We only care about variables in the current transaction. */
1293 if (lp->entry_block != entry_block)
1294 continue;
1295
1296 stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
1297
1298 /* Make sure we can create an SSA_NAME for this type. For
1299 instance, aggregates aren't allowed, in which case the system
1300 will create a VOP for us and everything will just work. */
1301 if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
1302 {
1303 lp->save_var = make_ssa_name (lp->save_var, stmt);
1304 gimple_assign_set_lhs (stmt, lp->save_var);
1305 }
1306
1307 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1308 }
1309 }
1310
1311 /* Emit the restore sequence for the corresponding addresses in the log.
1312 ENTRY_BLOCK is the entry block for the transaction.
1313 BB is the basic block to insert the code in. */
1314 static void
1315 tm_log_emit_restores (basic_block entry_block, basic_block bb)
1316 {
1317 int i;
1318 struct tm_log_entry l, *lp;
1319 gimple_stmt_iterator gsi;
1320 gimple stmt;
1321
1322 for (i = tm_log_save_addresses.length () - 1; i >= 0; i--)
1323 {
1324 l.addr = tm_log_save_addresses[i];
1325 lp = *(tm_log->find_slot (&l, NO_INSERT));
1326 gcc_assert (lp->save_var != NULL);
1327
1328 /* We only care about variables in the current transaction. */
1329 if (lp->entry_block != entry_block)
1330 continue;
1331
1332 /* Restores are in LIFO order from the saves in case we have
1333 overlaps. */
1334 gsi = gsi_start_bb (bb);
1335
1336 stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
1337 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1338 }
1339 }
1340
1341 \f
1342 static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
1343 struct walk_stmt_info *);
1344 static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
1345 struct walk_stmt_info *);
1346
1347 /* Evaluate an address X being dereferenced and determine if it
1348 originally points to a non aliased new chunk of memory (malloc,
1349 alloca, etc).
1350
1351 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1352 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1353 Return MEM_NON_LOCAL otherwise.
1354
1355 ENTRY_BLOCK is the entry block to the transaction containing the
1356 dereference of X. */
1357 static enum thread_memory_type
1358 thread_private_new_memory (basic_block entry_block, tree x)
1359 {
1360 gimple stmt = NULL;
1361 enum tree_code code;
1362 tm_new_mem_map_t **slot;
1363 tm_new_mem_map_t elt, *elt_p;
1364 tree val = x;
1365 enum thread_memory_type retval = mem_transaction_local;
1366
1367 if (!entry_block
1368 || TREE_CODE (x) != SSA_NAME
1369 /* Possible uninitialized use, or a function argument. In
1370 either case, we don't care. */
1371 || SSA_NAME_IS_DEFAULT_DEF (x))
1372 return mem_non_local;
1373
1374 /* Look in cache first. */
1375 elt.val = x;
1376 slot = tm_new_mem_hash->find_slot (&elt, INSERT);
1377 elt_p = *slot;
1378 if (elt_p)
1379 return elt_p->local_new_memory;
1380
1381 /* Optimistically assume the memory is transaction local during
1382 processing. This catches recursion into this variable. */
1383 *slot = elt_p = XNEW (tm_new_mem_map_t);
1384 elt_p->val = val;
1385 elt_p->local_new_memory = mem_transaction_local;
1386
1387 /* Search DEF chain to find the original definition of this address. */
1388 do
1389 {
1390 if (ptr_deref_may_alias_global_p (x))
1391 {
1392 /* Address escapes. This is not thread-private. */
1393 retval = mem_non_local;
1394 goto new_memory_ret;
1395 }
1396
1397 stmt = SSA_NAME_DEF_STMT (x);
1398
1399 /* If the malloc call is outside the transaction, this is
1400 thread-local. */
1401 if (retval != mem_thread_local
1402 && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
1403 retval = mem_thread_local;
1404
1405 if (is_gimple_assign (stmt))
1406 {
1407 code = gimple_assign_rhs_code (stmt);
1408 /* x = foo ==> foo */
1409 if (code == SSA_NAME)
1410 x = gimple_assign_rhs1 (stmt);
1411 /* x = foo + n ==> foo */
1412 else if (code == POINTER_PLUS_EXPR)
1413 x = gimple_assign_rhs1 (stmt);
1414 /* x = (cast*) foo ==> foo */
1415 else if (code == VIEW_CONVERT_EXPR || code == NOP_EXPR)
1416 x = gimple_assign_rhs1 (stmt);
1417 /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
1418 else if (code == COND_EXPR)
1419 {
1420 tree op1 = gimple_assign_rhs2 (stmt);
1421 tree op2 = gimple_assign_rhs3 (stmt);
1422 enum thread_memory_type mem;
1423 retval = thread_private_new_memory (entry_block, op1);
1424 if (retval == mem_non_local)
1425 goto new_memory_ret;
1426 mem = thread_private_new_memory (entry_block, op2);
1427 retval = MIN (retval, mem);
1428 goto new_memory_ret;
1429 }
1430 else
1431 {
1432 retval = mem_non_local;
1433 goto new_memory_ret;
1434 }
1435 }
1436 else
1437 {
1438 if (gimple_code (stmt) == GIMPLE_PHI)
1439 {
1440 unsigned int i;
1441 enum thread_memory_type mem;
1442 tree phi_result = gimple_phi_result (stmt);
1443
1444 /* If any of the ancestors are non-local, we are sure to
1445 be non-local. Otherwise we can avoid doing anything
1446 and inherit what has already been generated. */
1447 retval = mem_max;
1448 for (i = 0; i < gimple_phi_num_args (stmt); ++i)
1449 {
1450 tree op = PHI_ARG_DEF (stmt, i);
1451
1452 /* Exclude self-assignment. */
1453 if (phi_result == op)
1454 continue;
1455
1456 mem = thread_private_new_memory (entry_block, op);
1457 if (mem == mem_non_local)
1458 {
1459 retval = mem;
1460 goto new_memory_ret;
1461 }
1462 retval = MIN (retval, mem);
1463 }
1464 goto new_memory_ret;
1465 }
1466 break;
1467 }
1468 }
1469 while (TREE_CODE (x) == SSA_NAME);
1470
1471 if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
1472 /* Thread-local or transaction-local. */
1473 ;
1474 else
1475 retval = mem_non_local;
1476
1477 new_memory_ret:
1478 elt_p->local_new_memory = retval;
1479 return retval;
1480 }
1481
1482 /* Determine whether X has to be instrumented using a read
1483 or write barrier.
1484
1485 ENTRY_BLOCK is the entry block for the region where stmt resides
1486 in. NULL if unknown.
1487
1488 STMT is the statement in which X occurs in. It is used for thread
1489 private memory instrumentation. If no TPM instrumentation is
1490 desired, STMT should be null. */
1491 static bool
1492 requires_barrier (basic_block entry_block, tree x, gimple stmt)
1493 {
1494 tree orig = x;
1495 while (handled_component_p (x))
1496 x = TREE_OPERAND (x, 0);
1497
1498 switch (TREE_CODE (x))
1499 {
1500 case INDIRECT_REF:
1501 case MEM_REF:
1502 {
1503 enum thread_memory_type ret;
1504
1505 ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
1506 if (ret == mem_non_local)
1507 return true;
1508 if (stmt && ret == mem_thread_local)
1509 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1510 tm_log_add (entry_block, orig, stmt);
1511
1512 /* Transaction-locals require nothing at all. For malloc, a
1513 transaction restart frees the memory and we reallocate.
1514 For alloca, the stack pointer gets reset by the retry and
1515 we reallocate. */
1516 return false;
1517 }
1518
1519 case TARGET_MEM_REF:
1520 if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
1521 return true;
1522 x = TREE_OPERAND (TMR_BASE (x), 0);
1523 if (TREE_CODE (x) == PARM_DECL)
1524 return false;
1525 gcc_assert (TREE_CODE (x) == VAR_DECL);
1526 /* FALLTHRU */
1527
1528 case PARM_DECL:
1529 case RESULT_DECL:
1530 case VAR_DECL:
1531 if (DECL_BY_REFERENCE (x))
1532 {
1533 /* ??? This value is a pointer, but aggregate_value_p has been
1534 jigged to return true which confuses needs_to_live_in_memory.
1535 This ought to be cleaned up generically.
1536
1537 FIXME: Verify this still happens after the next mainline
1538 merge. Testcase ie g++.dg/tm/pr47554.C.
1539 */
1540 return false;
1541 }
1542
1543 if (is_global_var (x))
1544 return !TREE_READONLY (x);
1545 if (/* FIXME: This condition should actually go below in the
1546 tm_log_add() call, however is_call_clobbered() depends on
1547 aliasing info which is not available during
1548 gimplification. Since requires_barrier() gets called
1549 during lower_sequence_tm/gimplification, leave the call
1550 to needs_to_live_in_memory until we eliminate
1551 lower_sequence_tm altogether. */
1552 needs_to_live_in_memory (x))
1553 return true;
1554 else
1555 {
1556 /* For local memory that doesn't escape (aka thread private
1557 memory), we can either save the value at the beginning of
1558 the transaction and restore on restart, or call a tm
1559 function to dynamically save and restore on restart
1560 (ITM_L*). */
1561 if (stmt)
1562 tm_log_add (entry_block, orig, stmt);
1563 return false;
1564 }
1565
1566 default:
1567 return false;
1568 }
1569 }
1570
1571 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1572 a transaction region. */
1573
1574 static void
1575 examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
1576 {
1577 gimple stmt = gsi_stmt (*gsi);
1578
1579 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
1580 *state |= GTMA_HAVE_LOAD;
1581 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
1582 *state |= GTMA_HAVE_STORE;
1583 }
1584
1585 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1586
1587 static void
1588 examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
1589 {
1590 gimple stmt = gsi_stmt (*gsi);
1591 tree fn;
1592
1593 if (is_tm_pure_call (stmt))
1594 return;
1595
1596 /* Check if this call is a transaction abort. */
1597 fn = gimple_call_fndecl (stmt);
1598 if (is_tm_abort (fn))
1599 *state |= GTMA_HAVE_ABORT;
1600
1601 /* Note that something may happen. */
1602 *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
1603 }
1604
1605 /* Lower a GIMPLE_TRANSACTION statement. */
1606
1607 static void
1608 lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
1609 {
1610 gimple g, stmt = gsi_stmt (*gsi);
1611 unsigned int *outer_state = (unsigned int *) wi->info;
1612 unsigned int this_state = 0;
1613 struct walk_stmt_info this_wi;
1614
1615 /* First, lower the body. The scanning that we do inside gives
1616 us some idea of what we're dealing with. */
1617 memset (&this_wi, 0, sizeof (this_wi));
1618 this_wi.info = (void *) &this_state;
1619 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
1620 lower_sequence_tm, NULL, &this_wi);
1621
1622 /* If there was absolutely nothing transaction related inside the
1623 transaction, we may elide it. Likewise if this is a nested
1624 transaction and does not contain an abort. */
1625 if (this_state == 0
1626 || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
1627 {
1628 if (outer_state)
1629 *outer_state |= this_state;
1630
1631 gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
1632 GSI_SAME_STMT);
1633 gimple_transaction_set_body (stmt, NULL);
1634
1635 gsi_remove (gsi, true);
1636 wi->removed_stmt = true;
1637 return;
1638 }
1639
1640 /* Wrap the body of the transaction in a try-finally node so that
1641 the commit call is always properly called. */
1642 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
1643 if (flag_exceptions)
1644 {
1645 tree ptr;
1646 gimple_seq n_seq, e_seq;
1647
1648 n_seq = gimple_seq_alloc_with_stmt (g);
1649 e_seq = NULL;
1650
1651 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1652 1, integer_zero_node);
1653 ptr = create_tmp_var (ptr_type_node, NULL);
1654 gimple_call_set_lhs (g, ptr);
1655 gimple_seq_add_stmt (&e_seq, g);
1656
1657 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
1658 1, ptr);
1659 gimple_seq_add_stmt (&e_seq, g);
1660
1661 g = gimple_build_eh_else (n_seq, e_seq);
1662 }
1663
1664 g = gimple_build_try (gimple_transaction_body (stmt),
1665 gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
1666 gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
1667
1668 gimple_transaction_set_body (stmt, NULL);
1669
1670 /* If the transaction calls abort or if this is an outer transaction,
1671 add an "over" label afterwards. */
1672 if ((this_state & (GTMA_HAVE_ABORT))
1673 || (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER))
1674 {
1675 tree label = create_artificial_label (UNKNOWN_LOCATION);
1676 gimple_transaction_set_label (stmt, label);
1677 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
1678 }
1679
1680 /* Record the set of operations found for use later. */
1681 this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
1682 gimple_transaction_set_subcode (stmt, this_state);
1683 }
1684
1685 /* Iterate through the statements in the sequence, lowering them all
1686 as appropriate for being in a transaction. */
1687
1688 static tree
1689 lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1690 struct walk_stmt_info *wi)
1691 {
1692 unsigned int *state = (unsigned int *) wi->info;
1693 gimple stmt = gsi_stmt (*gsi);
1694
1695 *handled_ops_p = true;
1696 switch (gimple_code (stmt))
1697 {
1698 case GIMPLE_ASSIGN:
1699 /* Only memory reads/writes need to be instrumented. */
1700 if (gimple_assign_single_p (stmt))
1701 examine_assign_tm (state, gsi);
1702 break;
1703
1704 case GIMPLE_CALL:
1705 examine_call_tm (state, gsi);
1706 break;
1707
1708 case GIMPLE_ASM:
1709 *state |= GTMA_MAY_ENTER_IRREVOCABLE;
1710 break;
1711
1712 case GIMPLE_TRANSACTION:
1713 lower_transaction (gsi, wi);
1714 break;
1715
1716 default:
1717 *handled_ops_p = !gimple_has_substatements (stmt);
1718 break;
1719 }
1720
1721 return NULL_TREE;
1722 }
1723
1724 /* Iterate through the statements in the sequence, lowering them all
1725 as appropriate for being outside of a transaction. */
1726
1727 static tree
1728 lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1729 struct walk_stmt_info * wi)
1730 {
1731 gimple stmt = gsi_stmt (*gsi);
1732
1733 if (gimple_code (stmt) == GIMPLE_TRANSACTION)
1734 {
1735 *handled_ops_p = true;
1736 lower_transaction (gsi, wi);
1737 }
1738 else
1739 *handled_ops_p = !gimple_has_substatements (stmt);
1740
1741 return NULL_TREE;
1742 }
1743
1744 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1745 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1746 been moved out, and all the data required for constructing a proper
1747 CFG has been recorded. */
1748
1749 static unsigned int
1750 execute_lower_tm (void)
1751 {
1752 struct walk_stmt_info wi;
1753 gimple_seq body;
1754
1755 /* Transactional clones aren't created until a later pass. */
1756 gcc_assert (!decl_is_tm_clone (current_function_decl));
1757
1758 body = gimple_body (current_function_decl);
1759 memset (&wi, 0, sizeof (wi));
1760 walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi);
1761 gimple_set_body (current_function_decl, body);
1762
1763 return 0;
1764 }
1765
1766 namespace {
1767
1768 const pass_data pass_data_lower_tm =
1769 {
1770 GIMPLE_PASS, /* type */
1771 "tmlower", /* name */
1772 OPTGROUP_NONE, /* optinfo_flags */
1773 TV_TRANS_MEM, /* tv_id */
1774 PROP_gimple_lcf, /* properties_required */
1775 0, /* properties_provided */
1776 0, /* properties_destroyed */
1777 0, /* todo_flags_start */
1778 0, /* todo_flags_finish */
1779 };
1780
1781 class pass_lower_tm : public gimple_opt_pass
1782 {
1783 public:
1784 pass_lower_tm (gcc::context *ctxt)
1785 : gimple_opt_pass (pass_data_lower_tm, ctxt)
1786 {}
1787
1788 /* opt_pass methods: */
1789 virtual bool gate (function *) { return flag_tm; }
1790 virtual unsigned int execute (function *) { return execute_lower_tm (); }
1791
1792 }; // class pass_lower_tm
1793
1794 } // anon namespace
1795
1796 gimple_opt_pass *
1797 make_pass_lower_tm (gcc::context *ctxt)
1798 {
1799 return new pass_lower_tm (ctxt);
1800 }
1801 \f
1802 /* Collect region information for each transaction. */
1803
1804 struct tm_region
1805 {
1806 /* Link to the next unnested transaction. */
1807 struct tm_region *next;
1808
1809 /* Link to the next inner transaction. */
1810 struct tm_region *inner;
1811
1812 /* Link to the next outer transaction. */
1813 struct tm_region *outer;
1814
1815 /* The GIMPLE_TRANSACTION statement beginning this transaction.
1816 After TM_MARK, this gets replaced by a call to
1817 BUILT_IN_TM_START. */
1818 gimple transaction_stmt;
1819
1820 /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
1821 BUILT_IN_TM_START, this field is true if the transaction is an
1822 outer transaction. */
1823 bool original_transaction_was_outer;
1824
1825 /* Return value from BUILT_IN_TM_START. */
1826 tree tm_state;
1827
1828 /* The entry block to this region. This will always be the first
1829 block of the body of the transaction. */
1830 basic_block entry_block;
1831
1832 /* The first block after an expanded call to _ITM_beginTransaction. */
1833 basic_block restart_block;
1834
1835 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1836 These blocks are still a part of the region (i.e., the border is
1837 inclusive). Note that this set is only complete for paths in the CFG
1838 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1839 the edge to the "over" label. */
1840 bitmap exit_blocks;
1841
1842 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1843 bitmap irr_blocks;
1844 };
1845
1846 typedef struct tm_region *tm_region_p;
1847
1848 /* True if there are pending edge statements to be committed for the
1849 current function being scanned in the tmmark pass. */
1850 bool pending_edge_inserts_p;
1851
1852 static struct tm_region *all_tm_regions;
1853 static bitmap_obstack tm_obstack;
1854
1855
1856 /* A subroutine of tm_region_init. Record the existence of the
1857 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1858
1859 static struct tm_region *
1860 tm_region_init_0 (struct tm_region *outer, basic_block bb, gimple stmt)
1861 {
1862 struct tm_region *region;
1863
1864 region = (struct tm_region *)
1865 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1866
1867 if (outer)
1868 {
1869 region->next = outer->inner;
1870 outer->inner = region;
1871 }
1872 else
1873 {
1874 region->next = all_tm_regions;
1875 all_tm_regions = region;
1876 }
1877 region->inner = NULL;
1878 region->outer = outer;
1879
1880 region->transaction_stmt = stmt;
1881 region->original_transaction_was_outer = false;
1882 region->tm_state = NULL;
1883
1884 /* There are either one or two edges out of the block containing
1885 the GIMPLE_TRANSACTION, one to the actual region and one to the
1886 "over" label if the region contains an abort. The former will
1887 always be the one marked FALLTHRU. */
1888 region->entry_block = FALLTHRU_EDGE (bb)->dest;
1889
1890 region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
1891 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1892
1893 return region;
1894 }
1895
1896 /* A subroutine of tm_region_init. Record all the exit and
1897 irrevocable blocks in BB into the region's exit_blocks and
1898 irr_blocks bitmaps. Returns the new region being scanned. */
1899
1900 static struct tm_region *
1901 tm_region_init_1 (struct tm_region *region, basic_block bb)
1902 {
1903 gimple_stmt_iterator gsi;
1904 gimple g;
1905
1906 if (!region
1907 || (!region->irr_blocks && !region->exit_blocks))
1908 return region;
1909
1910 /* Check to see if this is the end of a region by seeing if it
1911 contains a call to __builtin_tm_commit{,_eh}. Note that the
1912 outermost region for DECL_IS_TM_CLONE need not collect this. */
1913 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
1914 {
1915 g = gsi_stmt (gsi);
1916 if (gimple_code (g) == GIMPLE_CALL)
1917 {
1918 tree fn = gimple_call_fndecl (g);
1919 if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
1920 {
1921 if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
1922 || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
1923 && region->exit_blocks)
1924 {
1925 bitmap_set_bit (region->exit_blocks, bb->index);
1926 region = region->outer;
1927 break;
1928 }
1929 if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
1930 bitmap_set_bit (region->irr_blocks, bb->index);
1931 }
1932 }
1933 }
1934 return region;
1935 }
1936
1937 /* Collect all of the transaction regions within the current function
1938 and record them in ALL_TM_REGIONS. The REGION parameter may specify
1939 an "outermost" region for use by tm clones. */
1940
1941 static void
1942 tm_region_init (struct tm_region *region)
1943 {
1944 gimple g;
1945 edge_iterator ei;
1946 edge e;
1947 basic_block bb;
1948 auto_vec<basic_block> queue;
1949 bitmap visited_blocks = BITMAP_ALLOC (NULL);
1950 struct tm_region *old_region;
1951 auto_vec<tm_region_p> bb_regions;
1952
1953 all_tm_regions = region;
1954 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
1955
1956 /* We could store this information in bb->aux, but we may get called
1957 through get_all_tm_blocks() from another pass that may be already
1958 using bb->aux. */
1959 bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun));
1960
1961 queue.safe_push (bb);
1962 bb_regions[bb->index] = region;
1963 do
1964 {
1965 bb = queue.pop ();
1966 region = bb_regions[bb->index];
1967 bb_regions[bb->index] = NULL;
1968
1969 /* Record exit and irrevocable blocks. */
1970 region = tm_region_init_1 (region, bb);
1971
1972 /* Check for the last statement in the block beginning a new region. */
1973 g = last_stmt (bb);
1974 old_region = region;
1975 if (g && gimple_code (g) == GIMPLE_TRANSACTION)
1976 region = tm_region_init_0 (region, bb, g);
1977
1978 /* Process subsequent blocks. */
1979 FOR_EACH_EDGE (e, ei, bb->succs)
1980 if (!bitmap_bit_p (visited_blocks, e->dest->index))
1981 {
1982 bitmap_set_bit (visited_blocks, e->dest->index);
1983 queue.safe_push (e->dest);
1984
1985 /* If the current block started a new region, make sure that only
1986 the entry block of the new region is associated with this region.
1987 Other successors are still part of the old region. */
1988 if (old_region != region && e->dest != region->entry_block)
1989 bb_regions[e->dest->index] = old_region;
1990 else
1991 bb_regions[e->dest->index] = region;
1992 }
1993 }
1994 while (!queue.is_empty ());
1995 BITMAP_FREE (visited_blocks);
1996 }
1997
1998 /* The "gate" function for all transactional memory expansion and optimization
1999 passes. We collect region information for each top-level transaction, and
2000 if we don't find any, we skip all of the TM passes. Each region will have
2001 all of the exit blocks recorded, and the originating statement. */
2002
2003 static bool
2004 gate_tm_init (void)
2005 {
2006 if (!flag_tm)
2007 return false;
2008
2009 calculate_dominance_info (CDI_DOMINATORS);
2010 bitmap_obstack_initialize (&tm_obstack);
2011
2012 /* If the function is a TM_CLONE, then the entire function is the region. */
2013 if (decl_is_tm_clone (current_function_decl))
2014 {
2015 struct tm_region *region = (struct tm_region *)
2016 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
2017 memset (region, 0, sizeof (*region));
2018 region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2019 /* For a clone, the entire function is the region. But even if
2020 we don't need to record any exit blocks, we may need to
2021 record irrevocable blocks. */
2022 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
2023
2024 tm_region_init (region);
2025 }
2026 else
2027 {
2028 tm_region_init (NULL);
2029
2030 /* If we didn't find any regions, cleanup and skip the whole tree
2031 of tm-related optimizations. */
2032 if (all_tm_regions == NULL)
2033 {
2034 bitmap_obstack_release (&tm_obstack);
2035 return false;
2036 }
2037 }
2038
2039 return true;
2040 }
2041
2042 namespace {
2043
2044 const pass_data pass_data_tm_init =
2045 {
2046 GIMPLE_PASS, /* type */
2047 "*tminit", /* name */
2048 OPTGROUP_NONE, /* optinfo_flags */
2049 TV_TRANS_MEM, /* tv_id */
2050 ( PROP_ssa | PROP_cfg ), /* properties_required */
2051 0, /* properties_provided */
2052 0, /* properties_destroyed */
2053 0, /* todo_flags_start */
2054 0, /* todo_flags_finish */
2055 };
2056
2057 class pass_tm_init : public gimple_opt_pass
2058 {
2059 public:
2060 pass_tm_init (gcc::context *ctxt)
2061 : gimple_opt_pass (pass_data_tm_init, ctxt)
2062 {}
2063
2064 /* opt_pass methods: */
2065 virtual bool gate (function *) { return gate_tm_init (); }
2066
2067 }; // class pass_tm_init
2068
2069 } // anon namespace
2070
2071 gimple_opt_pass *
2072 make_pass_tm_init (gcc::context *ctxt)
2073 {
2074 return new pass_tm_init (ctxt);
2075 }
2076 \f
2077 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
2078 represented by STATE. */
2079
2080 static inline void
2081 transaction_subcode_ior (struct tm_region *region, unsigned flags)
2082 {
2083 if (region && region->transaction_stmt)
2084 {
2085 flags |= gimple_transaction_subcode (region->transaction_stmt);
2086 gimple_transaction_set_subcode (region->transaction_stmt, flags);
2087 }
2088 }
2089
2090 /* Construct a memory load in a transactional context. Return the
2091 gimple statement performing the load, or NULL if there is no
2092 TM_LOAD builtin of the appropriate size to do the load.
2093
2094 LOC is the location to use for the new statement(s). */
2095
2096 static gimple
2097 build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2098 {
2099 enum built_in_function code = END_BUILTINS;
2100 tree t, type = TREE_TYPE (rhs), decl;
2101 gimple gcall;
2102
2103 if (type == float_type_node)
2104 code = BUILT_IN_TM_LOAD_FLOAT;
2105 else if (type == double_type_node)
2106 code = BUILT_IN_TM_LOAD_DOUBLE;
2107 else if (type == long_double_type_node)
2108 code = BUILT_IN_TM_LOAD_LDOUBLE;
2109 else if (TYPE_SIZE_UNIT (type) != NULL
2110 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
2111 {
2112 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type)))
2113 {
2114 case 1:
2115 code = BUILT_IN_TM_LOAD_1;
2116 break;
2117 case 2:
2118 code = BUILT_IN_TM_LOAD_2;
2119 break;
2120 case 4:
2121 code = BUILT_IN_TM_LOAD_4;
2122 break;
2123 case 8:
2124 code = BUILT_IN_TM_LOAD_8;
2125 break;
2126 }
2127 }
2128
2129 if (code == END_BUILTINS)
2130 {
2131 decl = targetm.vectorize.builtin_tm_load (type);
2132 if (!decl)
2133 return NULL;
2134 }
2135 else
2136 decl = builtin_decl_explicit (code);
2137
2138 t = gimplify_addr (gsi, rhs);
2139 gcall = gimple_build_call (decl, 1, t);
2140 gimple_set_location (gcall, loc);
2141
2142 t = TREE_TYPE (TREE_TYPE (decl));
2143 if (useless_type_conversion_p (type, t))
2144 {
2145 gimple_call_set_lhs (gcall, lhs);
2146 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2147 }
2148 else
2149 {
2150 gimple g;
2151 tree temp;
2152
2153 temp = create_tmp_reg (t, NULL);
2154 gimple_call_set_lhs (gcall, temp);
2155 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2156
2157 t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
2158 g = gimple_build_assign (lhs, t);
2159 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2160 }
2161
2162 return gcall;
2163 }
2164
2165
2166 /* Similarly for storing TYPE in a transactional context. */
2167
2168 static gimple
2169 build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2170 {
2171 enum built_in_function code = END_BUILTINS;
2172 tree t, fn, type = TREE_TYPE (rhs), simple_type;
2173 gimple gcall;
2174
2175 if (type == float_type_node)
2176 code = BUILT_IN_TM_STORE_FLOAT;
2177 else if (type == double_type_node)
2178 code = BUILT_IN_TM_STORE_DOUBLE;
2179 else if (type == long_double_type_node)
2180 code = BUILT_IN_TM_STORE_LDOUBLE;
2181 else if (TYPE_SIZE_UNIT (type) != NULL
2182 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
2183 {
2184 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type)))
2185 {
2186 case 1:
2187 code = BUILT_IN_TM_STORE_1;
2188 break;
2189 case 2:
2190 code = BUILT_IN_TM_STORE_2;
2191 break;
2192 case 4:
2193 code = BUILT_IN_TM_STORE_4;
2194 break;
2195 case 8:
2196 code = BUILT_IN_TM_STORE_8;
2197 break;
2198 }
2199 }
2200
2201 if (code == END_BUILTINS)
2202 {
2203 fn = targetm.vectorize.builtin_tm_store (type);
2204 if (!fn)
2205 return NULL;
2206 }
2207 else
2208 fn = builtin_decl_explicit (code);
2209
2210 simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
2211
2212 if (TREE_CODE (rhs) == CONSTRUCTOR)
2213 {
2214 /* Handle the easy initialization to zero. */
2215 if (!CONSTRUCTOR_ELTS (rhs))
2216 rhs = build_int_cst (simple_type, 0);
2217 else
2218 {
2219 /* ...otherwise punt to the caller and probably use
2220 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2221 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2222 valid gimple. */
2223 return NULL;
2224 }
2225 }
2226 else if (!useless_type_conversion_p (simple_type, type))
2227 {
2228 gimple g;
2229 tree temp;
2230
2231 temp = create_tmp_reg (simple_type, NULL);
2232 t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
2233 g = gimple_build_assign (temp, t);
2234 gimple_set_location (g, loc);
2235 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2236
2237 rhs = temp;
2238 }
2239
2240 t = gimplify_addr (gsi, lhs);
2241 gcall = gimple_build_call (fn, 2, t, rhs);
2242 gimple_set_location (gcall, loc);
2243 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2244
2245 return gcall;
2246 }
2247
2248
2249 /* Expand an assignment statement into transactional builtins. */
2250
2251 static void
2252 expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
2253 {
2254 gimple stmt = gsi_stmt (*gsi);
2255 location_t loc = gimple_location (stmt);
2256 tree lhs = gimple_assign_lhs (stmt);
2257 tree rhs = gimple_assign_rhs1 (stmt);
2258 bool store_p = requires_barrier (region->entry_block, lhs, NULL);
2259 bool load_p = requires_barrier (region->entry_block, rhs, NULL);
2260 gimple gcall = NULL;
2261
2262 if (!load_p && !store_p)
2263 {
2264 /* Add thread private addresses to log if applicable. */
2265 requires_barrier (region->entry_block, lhs, stmt);
2266 gsi_next (gsi);
2267 return;
2268 }
2269
2270 // Remove original load/store statement.
2271 gsi_remove (gsi, true);
2272
2273 if (load_p && !store_p)
2274 {
2275 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2276 gcall = build_tm_load (loc, lhs, rhs, gsi);
2277 }
2278 else if (store_p && !load_p)
2279 {
2280 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2281 gcall = build_tm_store (loc, lhs, rhs, gsi);
2282 }
2283 if (!gcall)
2284 {
2285 tree lhs_addr, rhs_addr, tmp;
2286
2287 if (load_p)
2288 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2289 if (store_p)
2290 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2291
2292 /* ??? Figure out if there's any possible overlap between the LHS
2293 and the RHS and if not, use MEMCPY. */
2294
2295 if (load_p && is_gimple_reg (lhs))
2296 {
2297 tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
2298 lhs_addr = build_fold_addr_expr (tmp);
2299 }
2300 else
2301 {
2302 tmp = NULL_TREE;
2303 lhs_addr = gimplify_addr (gsi, lhs);
2304 }
2305 rhs_addr = gimplify_addr (gsi, rhs);
2306 gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
2307 3, lhs_addr, rhs_addr,
2308 TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
2309 gimple_set_location (gcall, loc);
2310 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2311
2312 if (tmp)
2313 {
2314 gcall = gimple_build_assign (lhs, tmp);
2315 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2316 }
2317 }
2318
2319 /* Now that we have the load/store in its instrumented form, add
2320 thread private addresses to the log if applicable. */
2321 if (!store_p)
2322 requires_barrier (region->entry_block, lhs, gcall);
2323
2324 // The calls to build_tm_{store,load} above inserted the instrumented
2325 // call into the stream.
2326 // gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2327 }
2328
2329
2330 /* Expand a call statement as appropriate for a transaction. That is,
2331 either verify that the call does not affect the transaction, or
2332 redirect the call to a clone that handles transactions, or change
2333 the transaction state to IRREVOCABLE. Return true if the call is
2334 one of the builtins that end a transaction. */
2335
2336 static bool
2337 expand_call_tm (struct tm_region *region,
2338 gimple_stmt_iterator *gsi)
2339 {
2340 gimple stmt = gsi_stmt (*gsi);
2341 tree lhs = gimple_call_lhs (stmt);
2342 tree fn_decl;
2343 struct cgraph_node *node;
2344 bool retval = false;
2345
2346 fn_decl = gimple_call_fndecl (stmt);
2347
2348 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
2349 || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
2350 transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
2351 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
2352 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2353
2354 if (is_tm_pure_call (stmt))
2355 return false;
2356
2357 if (fn_decl)
2358 retval = is_tm_ending_fndecl (fn_decl);
2359 if (!retval)
2360 {
2361 /* Assume all non-const/pure calls write to memory, except
2362 transaction ending builtins. */
2363 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2364 }
2365
2366 /* For indirect calls, we already generated a call into the runtime. */
2367 if (!fn_decl)
2368 {
2369 tree fn = gimple_call_fn (stmt);
2370
2371 /* We are guaranteed never to go irrevocable on a safe or pure
2372 call, and the pure call was handled above. */
2373 if (is_tm_safe (fn))
2374 return false;
2375 else
2376 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2377
2378 return false;
2379 }
2380
2381 node = cgraph_node::get (fn_decl);
2382 /* All calls should have cgraph here. */
2383 if (!node)
2384 {
2385 /* We can have a nodeless call here if some pass after IPA-tm
2386 added uninstrumented calls. For example, loop distribution
2387 can transform certain loop constructs into __builtin_mem*
2388 calls. In this case, see if we have a suitable TM
2389 replacement and fill in the gaps. */
2390 gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL);
2391 enum built_in_function code = DECL_FUNCTION_CODE (fn_decl);
2392 gcc_assert (code == BUILT_IN_MEMCPY
2393 || code == BUILT_IN_MEMMOVE
2394 || code == BUILT_IN_MEMSET);
2395
2396 tree repl = find_tm_replacement_function (fn_decl);
2397 if (repl)
2398 {
2399 gimple_call_set_fndecl (stmt, repl);
2400 update_stmt (stmt);
2401 node = cgraph_node::create (repl);
2402 node->local.tm_may_enter_irr = false;
2403 return expand_call_tm (region, gsi);
2404 }
2405 gcc_unreachable ();
2406 }
2407 if (node->local.tm_may_enter_irr)
2408 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2409
2410 if (is_tm_abort (fn_decl))
2411 {
2412 transaction_subcode_ior (region, GTMA_HAVE_ABORT);
2413 return true;
2414 }
2415
2416 /* Instrument the store if needed.
2417
2418 If the assignment happens inside the function call (return slot
2419 optimization), there is no instrumentation to be done, since
2420 the callee should have done the right thing. */
2421 if (lhs && requires_barrier (region->entry_block, lhs, stmt)
2422 && !gimple_call_return_slot_opt_p (stmt))
2423 {
2424 tree tmp = create_tmp_reg (TREE_TYPE (lhs), NULL);
2425 location_t loc = gimple_location (stmt);
2426 edge fallthru_edge = NULL;
2427
2428 /* Remember if the call was going to throw. */
2429 if (stmt_can_throw_internal (stmt))
2430 {
2431 edge_iterator ei;
2432 edge e;
2433 basic_block bb = gimple_bb (stmt);
2434
2435 FOR_EACH_EDGE (e, ei, bb->succs)
2436 if (e->flags & EDGE_FALLTHRU)
2437 {
2438 fallthru_edge = e;
2439 break;
2440 }
2441 }
2442
2443 gimple_call_set_lhs (stmt, tmp);
2444 update_stmt (stmt);
2445 stmt = gimple_build_assign (lhs, tmp);
2446 gimple_set_location (stmt, loc);
2447
2448 /* We cannot throw in the middle of a BB. If the call was going
2449 to throw, place the instrumentation on the fallthru edge, so
2450 the call remains the last statement in the block. */
2451 if (fallthru_edge)
2452 {
2453 gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (stmt);
2454 gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
2455 expand_assign_tm (region, &fallthru_gsi);
2456 gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
2457 pending_edge_inserts_p = true;
2458 }
2459 else
2460 {
2461 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
2462 expand_assign_tm (region, gsi);
2463 }
2464
2465 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2466 }
2467
2468 return retval;
2469 }
2470
2471
2472 /* Expand all statements in BB as appropriate for being inside
2473 a transaction. */
2474
2475 static void
2476 expand_block_tm (struct tm_region *region, basic_block bb)
2477 {
2478 gimple_stmt_iterator gsi;
2479
2480 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2481 {
2482 gimple stmt = gsi_stmt (gsi);
2483 switch (gimple_code (stmt))
2484 {
2485 case GIMPLE_ASSIGN:
2486 /* Only memory reads/writes need to be instrumented. */
2487 if (gimple_assign_single_p (stmt)
2488 && !gimple_clobber_p (stmt))
2489 {
2490 expand_assign_tm (region, &gsi);
2491 continue;
2492 }
2493 break;
2494
2495 case GIMPLE_CALL:
2496 if (expand_call_tm (region, &gsi))
2497 return;
2498 break;
2499
2500 case GIMPLE_ASM:
2501 gcc_unreachable ();
2502
2503 default:
2504 break;
2505 }
2506 if (!gsi_end_p (gsi))
2507 gsi_next (&gsi);
2508 }
2509 }
2510
2511 /* Return the list of basic-blocks in REGION.
2512
2513 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2514 following a TM_IRREVOCABLE call.
2515
2516 INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the
2517 uninstrumented code path blocks in the list of basic blocks
2518 returned, false otherwise. */
2519
2520 static vec<basic_block>
2521 get_tm_region_blocks (basic_block entry_block,
2522 bitmap exit_blocks,
2523 bitmap irr_blocks,
2524 bitmap all_region_blocks,
2525 bool stop_at_irrevocable_p,
2526 bool include_uninstrumented_p = true)
2527 {
2528 vec<basic_block> bbs = vNULL;
2529 unsigned i;
2530 edge e;
2531 edge_iterator ei;
2532 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2533
2534 i = 0;
2535 bbs.safe_push (entry_block);
2536 bitmap_set_bit (visited_blocks, entry_block->index);
2537
2538 do
2539 {
2540 basic_block bb = bbs[i++];
2541
2542 if (exit_blocks &&
2543 bitmap_bit_p (exit_blocks, bb->index))
2544 continue;
2545
2546 if (stop_at_irrevocable_p
2547 && irr_blocks
2548 && bitmap_bit_p (irr_blocks, bb->index))
2549 continue;
2550
2551 FOR_EACH_EDGE (e, ei, bb->succs)
2552 if ((include_uninstrumented_p
2553 || !(e->flags & EDGE_TM_UNINSTRUMENTED))
2554 && !bitmap_bit_p (visited_blocks, e->dest->index))
2555 {
2556 bitmap_set_bit (visited_blocks, e->dest->index);
2557 bbs.safe_push (e->dest);
2558 }
2559 }
2560 while (i < bbs.length ());
2561
2562 if (all_region_blocks)
2563 bitmap_ior_into (all_region_blocks, visited_blocks);
2564
2565 BITMAP_FREE (visited_blocks);
2566 return bbs;
2567 }
2568
2569 // Callback data for collect_bb2reg.
2570 struct bb2reg_stuff
2571 {
2572 vec<tm_region_p> *bb2reg;
2573 bool include_uninstrumented_p;
2574 };
2575
2576 // Callback for expand_regions, collect innermost region data for each bb.
2577 static void *
2578 collect_bb2reg (struct tm_region *region, void *data)
2579 {
2580 struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data;
2581 vec<tm_region_p> *bb2reg = stuff->bb2reg;
2582 vec<basic_block> queue;
2583 unsigned int i;
2584 basic_block bb;
2585
2586 queue = get_tm_region_blocks (region->entry_block,
2587 region->exit_blocks,
2588 region->irr_blocks,
2589 NULL,
2590 /*stop_at_irr_p=*/true,
2591 stuff->include_uninstrumented_p);
2592
2593 // We expect expand_region to perform a post-order traversal of the region
2594 // tree. Therefore the last region seen for any bb is the innermost.
2595 FOR_EACH_VEC_ELT (queue, i, bb)
2596 (*bb2reg)[bb->index] = region;
2597
2598 queue.release ();
2599 return NULL;
2600 }
2601
2602 // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to
2603 // which a basic block belongs. Note that we only consider the instrumented
2604 // code paths for the region; the uninstrumented code paths are ignored if
2605 // INCLUDE_UNINSTRUMENTED_P is false.
2606 //
2607 // ??? This data is very similar to the bb_regions array that is collected
2608 // during tm_region_init. Or, rather, this data is similar to what could
2609 // be used within tm_region_init. The actual computation in tm_region_init
2610 // begins and ends with bb_regions entirely full of NULL pointers, due to
2611 // the way in which pointers are swapped in and out of the array.
2612 //
2613 // ??? Our callers expect that blocks are not shared between transactions.
2614 // When the optimizers get too smart, and blocks are shared, then during
2615 // the tm_mark phase we'll add log entries to only one of the two transactions,
2616 // and in the tm_edge phase we'll add edges to the CFG that create invalid
2617 // cycles. The symptom being SSA defs that do not dominate their uses.
2618 // Note that the optimizers were locally correct with their transformation,
2619 // as we have no info within the program that suggests that the blocks cannot
2620 // be shared.
2621 //
2622 // ??? There is currently a hack inside tree-ssa-pre.c to work around the
2623 // only known instance of this block sharing.
2624
2625 static vec<tm_region_p>
2626 get_bb_regions_instrumented (bool traverse_clones,
2627 bool include_uninstrumented_p)
2628 {
2629 unsigned n = last_basic_block_for_fn (cfun);
2630 struct bb2reg_stuff stuff;
2631 vec<tm_region_p> ret;
2632
2633 ret.create (n);
2634 ret.safe_grow_cleared (n);
2635 stuff.bb2reg = &ret;
2636 stuff.include_uninstrumented_p = include_uninstrumented_p;
2637 expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones);
2638
2639 return ret;
2640 }
2641
2642 /* Set the IN_TRANSACTION for all gimple statements that appear in a
2643 transaction. */
2644
2645 void
2646 compute_transaction_bits (void)
2647 {
2648 struct tm_region *region;
2649 vec<basic_block> queue;
2650 unsigned int i;
2651 basic_block bb;
2652
2653 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2654 certainly don't need it to calculate CDI_DOMINATOR info. */
2655 gate_tm_init ();
2656
2657 FOR_EACH_BB_FN (bb, cfun)
2658 bb->flags &= ~BB_IN_TRANSACTION;
2659
2660 for (region = all_tm_regions; region; region = region->next)
2661 {
2662 queue = get_tm_region_blocks (region->entry_block,
2663 region->exit_blocks,
2664 region->irr_blocks,
2665 NULL,
2666 /*stop_at_irr_p=*/true);
2667 for (i = 0; queue.iterate (i, &bb); ++i)
2668 bb->flags |= BB_IN_TRANSACTION;
2669 queue.release ();
2670 }
2671
2672 if (all_tm_regions)
2673 bitmap_obstack_release (&tm_obstack);
2674 }
2675
2676 /* Replace the GIMPLE_TRANSACTION in this region with the corresponding
2677 call to BUILT_IN_TM_START. */
2678
2679 static void *
2680 expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
2681 {
2682 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2683 basic_block transaction_bb = gimple_bb (region->transaction_stmt);
2684 tree tm_state = region->tm_state;
2685 tree tm_state_type = TREE_TYPE (tm_state);
2686 edge abort_edge = NULL;
2687 edge inst_edge = NULL;
2688 edge uninst_edge = NULL;
2689 edge fallthru_edge = NULL;
2690
2691 // Identify the various successors of the transaction start.
2692 {
2693 edge_iterator i;
2694 edge e;
2695 FOR_EACH_EDGE (e, i, transaction_bb->succs)
2696 {
2697 if (e->flags & EDGE_TM_ABORT)
2698 abort_edge = e;
2699 else if (e->flags & EDGE_TM_UNINSTRUMENTED)
2700 uninst_edge = e;
2701 else
2702 inst_edge = e;
2703 if (e->flags & EDGE_FALLTHRU)
2704 fallthru_edge = e;
2705 }
2706 }
2707
2708 /* ??? There are plenty of bits here we're not computing. */
2709 {
2710 int subcode = gimple_transaction_subcode (region->transaction_stmt);
2711 int flags = 0;
2712 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2713 flags |= PR_DOESGOIRREVOCABLE;
2714 if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
2715 flags |= PR_HASNOIRREVOCABLE;
2716 /* If the transaction does not have an abort in lexical scope and is not
2717 marked as an outer transaction, then it will never abort. */
2718 if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0)
2719 flags |= PR_HASNOABORT;
2720 if ((subcode & GTMA_HAVE_STORE) == 0)
2721 flags |= PR_READONLY;
2722 if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION))
2723 flags |= PR_INSTRUMENTEDCODE;
2724 if (uninst_edge)
2725 flags |= PR_UNINSTRUMENTEDCODE;
2726 if (subcode & GTMA_IS_OUTER)
2727 region->original_transaction_was_outer = true;
2728 tree t = build_int_cst (tm_state_type, flags);
2729 gimple call = gimple_build_call (tm_start, 1, t);
2730 gimple_call_set_lhs (call, tm_state);
2731 gimple_set_location (call, gimple_location (region->transaction_stmt));
2732
2733 // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START.
2734 gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb);
2735 gcc_assert (gsi_stmt (gsi) == region->transaction_stmt);
2736 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
2737 gsi_remove (&gsi, true);
2738 region->transaction_stmt = call;
2739 }
2740
2741 // Generate log saves.
2742 if (!tm_log_save_addresses.is_empty ())
2743 tm_log_emit_saves (region->entry_block, transaction_bb);
2744
2745 // In the beginning, we've no tests to perform on transaction restart.
2746 // Note that after this point, transaction_bb becomes the "most recent
2747 // block containing tests for the transaction".
2748 region->restart_block = region->entry_block;
2749
2750 // Generate log restores.
2751 if (!tm_log_save_addresses.is_empty ())
2752 {
2753 basic_block test_bb = create_empty_bb (transaction_bb);
2754 basic_block code_bb = create_empty_bb (test_bb);
2755 basic_block join_bb = create_empty_bb (code_bb);
2756 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2757 add_bb_to_loop (code_bb, transaction_bb->loop_father);
2758 add_bb_to_loop (join_bb, transaction_bb->loop_father);
2759 if (region->restart_block == region->entry_block)
2760 region->restart_block = test_bb;
2761
2762 tree t1 = create_tmp_reg (tm_state_type, NULL);
2763 tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES);
2764 gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1,
2765 tm_state, t2);
2766 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2767 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2768
2769 t2 = build_int_cst (tm_state_type, 0);
2770 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2771 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2772
2773 tm_log_emit_restores (region->entry_block, code_bb);
2774
2775 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2776 edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE);
2777 edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE);
2778 redirect_edge_pred (fallthru_edge, join_bb);
2779
2780 join_bb->frequency = test_bb->frequency = transaction_bb->frequency;
2781 join_bb->count = test_bb->count = transaction_bb->count;
2782
2783 ei->probability = PROB_ALWAYS;
2784 et->probability = PROB_LIKELY;
2785 ef->probability = PROB_UNLIKELY;
2786 et->count = apply_probability (test_bb->count, et->probability);
2787 ef->count = apply_probability (test_bb->count, ef->probability);
2788
2789 code_bb->count = et->count;
2790 code_bb->frequency = EDGE_FREQUENCY (et);
2791
2792 transaction_bb = join_bb;
2793 }
2794
2795 // If we have an ABORT edge, create a test to perform the abort.
2796 if (abort_edge)
2797 {
2798 basic_block test_bb = create_empty_bb (transaction_bb);
2799 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2800 if (region->restart_block == region->entry_block)
2801 region->restart_block = test_bb;
2802
2803 tree t1 = create_tmp_reg (tm_state_type, NULL);
2804 tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION);
2805 gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1,
2806 tm_state, t2);
2807 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2808 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2809
2810 t2 = build_int_cst (tm_state_type, 0);
2811 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2812 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2813
2814 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2815 test_bb->frequency = transaction_bb->frequency;
2816 test_bb->count = transaction_bb->count;
2817 ei->probability = PROB_ALWAYS;
2818
2819 // Not abort edge. If both are live, chose one at random as we'll
2820 // we'll be fixing that up below.
2821 redirect_edge_pred (fallthru_edge, test_bb);
2822 fallthru_edge->flags = EDGE_FALSE_VALUE;
2823 fallthru_edge->probability = PROB_VERY_LIKELY;
2824 fallthru_edge->count
2825 = apply_probability (test_bb->count, fallthru_edge->probability);
2826
2827 // Abort/over edge.
2828 redirect_edge_pred (abort_edge, test_bb);
2829 abort_edge->flags = EDGE_TRUE_VALUE;
2830 abort_edge->probability = PROB_VERY_UNLIKELY;
2831 abort_edge->count
2832 = apply_probability (test_bb->count, abort_edge->probability);
2833
2834 transaction_bb = test_bb;
2835 }
2836
2837 // If we have both instrumented and uninstrumented code paths, select one.
2838 if (inst_edge && uninst_edge)
2839 {
2840 basic_block test_bb = create_empty_bb (transaction_bb);
2841 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2842 if (region->restart_block == region->entry_block)
2843 region->restart_block = test_bb;
2844
2845 tree t1 = create_tmp_reg (tm_state_type, NULL);
2846 tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE);
2847
2848 gimple stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1,
2849 tm_state, t2);
2850 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2851 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2852
2853 t2 = build_int_cst (tm_state_type, 0);
2854 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2855 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2856
2857 // Create the edge into test_bb first, as we want to copy values
2858 // out of the fallthru edge.
2859 edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags);
2860 e->probability = fallthru_edge->probability;
2861 test_bb->count = e->count = fallthru_edge->count;
2862 test_bb->frequency = EDGE_FREQUENCY (e);
2863
2864 // Now update the edges to the inst/uninist implementations.
2865 // For now assume that the paths are equally likely. When using HTM,
2866 // we'll try the uninst path first and fallback to inst path if htm
2867 // buffers are exceeded. Without HTM we start with the inst path and
2868 // use the uninst path when falling back to serial mode.
2869 redirect_edge_pred (inst_edge, test_bb);
2870 inst_edge->flags = EDGE_FALSE_VALUE;
2871 inst_edge->probability = REG_BR_PROB_BASE / 2;
2872 inst_edge->count
2873 = apply_probability (test_bb->count, inst_edge->probability);
2874
2875 redirect_edge_pred (uninst_edge, test_bb);
2876 uninst_edge->flags = EDGE_TRUE_VALUE;
2877 uninst_edge->probability = REG_BR_PROB_BASE / 2;
2878 uninst_edge->count
2879 = apply_probability (test_bb->count, uninst_edge->probability);
2880 }
2881
2882 // If we have no previous special cases, and we have PHIs at the beginning
2883 // of the atomic region, this means we have a loop at the beginning of the
2884 // atomic region that shares the first block. This can cause problems with
2885 // the transaction restart abnormal edges to be added in the tm_edges pass.
2886 // Solve this by adding a new empty block to receive the abnormal edges.
2887 if (region->restart_block == region->entry_block
2888 && phi_nodes (region->entry_block))
2889 {
2890 basic_block empty_bb = create_empty_bb (transaction_bb);
2891 region->restart_block = empty_bb;
2892 add_bb_to_loop (empty_bb, transaction_bb->loop_father);
2893
2894 redirect_edge_pred (fallthru_edge, empty_bb);
2895 make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU);
2896 }
2897
2898 return NULL;
2899 }
2900
2901 /* Generate the temporary to be used for the return value of
2902 BUILT_IN_TM_START. */
2903
2904 static void *
2905 generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
2906 {
2907 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2908 region->tm_state =
2909 create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
2910
2911 // Reset the subcode, post optimizations. We'll fill this in
2912 // again as we process blocks.
2913 if (region->exit_blocks)
2914 {
2915 unsigned int subcode
2916 = gimple_transaction_subcode (region->transaction_stmt);
2917
2918 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2919 subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
2920 | GTMA_MAY_ENTER_IRREVOCABLE
2921 | GTMA_HAS_NO_INSTRUMENTATION);
2922 else
2923 subcode &= GTMA_DECLARATION_MASK;
2924 gimple_transaction_set_subcode (region->transaction_stmt, subcode);
2925 }
2926
2927 return NULL;
2928 }
2929
2930 // Propagate flags from inner transactions outwards.
2931 static void
2932 propagate_tm_flags_out (struct tm_region *region)
2933 {
2934 if (region == NULL)
2935 return;
2936 propagate_tm_flags_out (region->inner);
2937
2938 if (region->outer && region->outer->transaction_stmt)
2939 {
2940 unsigned s = gimple_transaction_subcode (region->transaction_stmt);
2941 s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE
2942 | GTMA_MAY_ENTER_IRREVOCABLE);
2943 s |= gimple_transaction_subcode (region->outer->transaction_stmt);
2944 gimple_transaction_set_subcode (region->outer->transaction_stmt, s);
2945 }
2946
2947 propagate_tm_flags_out (region->next);
2948 }
2949
2950 /* Entry point to the MARK phase of TM expansion. Here we replace
2951 transactional memory statements with calls to builtins, and function
2952 calls with their transactional clones (if available). But we don't
2953 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
2954
2955 static unsigned int
2956 execute_tm_mark (void)
2957 {
2958 pending_edge_inserts_p = false;
2959
2960 expand_regions (all_tm_regions, generate_tm_state, NULL,
2961 /*traverse_clones=*/true);
2962
2963 tm_log_init ();
2964
2965 vec<tm_region_p> bb_regions
2966 = get_bb_regions_instrumented (/*traverse_clones=*/true,
2967 /*include_uninstrumented_p=*/false);
2968 struct tm_region *r;
2969 unsigned i;
2970
2971 // Expand memory operations into calls into the runtime.
2972 // This collects log entries as well.
2973 FOR_EACH_VEC_ELT (bb_regions, i, r)
2974 {
2975 if (r != NULL)
2976 {
2977 if (r->transaction_stmt)
2978 {
2979 unsigned sub = gimple_transaction_subcode (r->transaction_stmt);
2980
2981 /* If we're sure to go irrevocable, there won't be
2982 anything to expand, since the run-time will go
2983 irrevocable right away. */
2984 if (sub & GTMA_DOES_GO_IRREVOCABLE
2985 && sub & GTMA_MAY_ENTER_IRREVOCABLE)
2986 continue;
2987 }
2988 expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i));
2989 }
2990 }
2991
2992 bb_regions.release ();
2993
2994 // Propagate flags from inner transactions outwards.
2995 propagate_tm_flags_out (all_tm_regions);
2996
2997 // Expand GIMPLE_TRANSACTIONs into calls into the runtime.
2998 expand_regions (all_tm_regions, expand_transaction, NULL,
2999 /*traverse_clones=*/false);
3000
3001 tm_log_emit ();
3002 tm_log_delete ();
3003
3004 if (pending_edge_inserts_p)
3005 gsi_commit_edge_inserts ();
3006 free_dominance_info (CDI_DOMINATORS);
3007 return 0;
3008 }
3009
3010 namespace {
3011
3012 const pass_data pass_data_tm_mark =
3013 {
3014 GIMPLE_PASS, /* type */
3015 "tmmark", /* name */
3016 OPTGROUP_NONE, /* optinfo_flags */
3017 TV_TRANS_MEM, /* tv_id */
3018 ( PROP_ssa | PROP_cfg ), /* properties_required */
3019 0, /* properties_provided */
3020 0, /* properties_destroyed */
3021 0, /* todo_flags_start */
3022 TODO_update_ssa, /* todo_flags_finish */
3023 };
3024
3025 class pass_tm_mark : public gimple_opt_pass
3026 {
3027 public:
3028 pass_tm_mark (gcc::context *ctxt)
3029 : gimple_opt_pass (pass_data_tm_mark, ctxt)
3030 {}
3031
3032 /* opt_pass methods: */
3033 virtual unsigned int execute (function *) { return execute_tm_mark (); }
3034
3035 }; // class pass_tm_mark
3036
3037 } // anon namespace
3038
3039 gimple_opt_pass *
3040 make_pass_tm_mark (gcc::context *ctxt)
3041 {
3042 return new pass_tm_mark (ctxt);
3043 }
3044 \f
3045
3046 /* Create an abnormal edge from STMT at iter, splitting the block
3047 as necessary. Adjust *PNEXT as needed for the split block. */
3048
3049 static inline void
3050 split_bb_make_tm_edge (gimple stmt, basic_block dest_bb,
3051 gimple_stmt_iterator iter, gimple_stmt_iterator *pnext)
3052 {
3053 basic_block bb = gimple_bb (stmt);
3054 if (!gsi_one_before_end_p (iter))
3055 {
3056 edge e = split_block (bb, stmt);
3057 *pnext = gsi_start_bb (e->dest);
3058 }
3059 make_edge (bb, dest_bb, EDGE_ABNORMAL);
3060
3061 // Record the need for the edge for the benefit of the rtl passes.
3062 if (cfun->gimple_df->tm_restart == NULL)
3063 cfun->gimple_df->tm_restart = htab_create_ggc (31, struct_ptr_hash,
3064 struct_ptr_eq, ggc_free);
3065
3066 struct tm_restart_node dummy;
3067 dummy.stmt = stmt;
3068 dummy.label_or_list = gimple_block_label (dest_bb);
3069
3070 void **slot = htab_find_slot (cfun->gimple_df->tm_restart, &dummy, INSERT);
3071 struct tm_restart_node *n = (struct tm_restart_node *) *slot;
3072 if (n == NULL)
3073 {
3074 n = ggc_alloc<tm_restart_node> ();
3075 *n = dummy;
3076 }
3077 else
3078 {
3079 tree old = n->label_or_list;
3080 if (TREE_CODE (old) == LABEL_DECL)
3081 old = tree_cons (NULL, old, NULL);
3082 n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
3083 }
3084 }
3085
3086 /* Split block BB as necessary for every builtin function we added, and
3087 wire up the abnormal back edges implied by the transaction restart. */
3088
3089 static void
3090 expand_block_edges (struct tm_region *const region, basic_block bb)
3091 {
3092 gimple_stmt_iterator gsi, next_gsi;
3093
3094 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi)
3095 {
3096 gimple stmt = gsi_stmt (gsi);
3097
3098 next_gsi = gsi;
3099 gsi_next (&next_gsi);
3100
3101 // ??? Shouldn't we split for any non-pure, non-irrevocable function?
3102 if (gimple_code (stmt) != GIMPLE_CALL
3103 || (gimple_call_flags (stmt) & ECF_TM_BUILTIN) == 0)
3104 continue;
3105
3106 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)) == BUILT_IN_TM_ABORT)
3107 {
3108 // If we have a ``_transaction_cancel [[outer]]'', there is only
3109 // one abnormal edge: to the transaction marked OUTER.
3110 // All compiler-generated instances of BUILT_IN_TM_ABORT have a
3111 // constant argument, which we can examine here. Users invoking
3112 // TM_ABORT directly get what they deserve.
3113 tree arg = gimple_call_arg (stmt, 0);
3114 if (TREE_CODE (arg) == INTEGER_CST
3115 && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0
3116 && !decl_is_tm_clone (current_function_decl))
3117 {
3118 // Find the GTMA_IS_OUTER transaction.
3119 for (struct tm_region *o = region; o; o = o->outer)
3120 if (o->original_transaction_was_outer)
3121 {
3122 split_bb_make_tm_edge (stmt, o->restart_block,
3123 gsi, &next_gsi);
3124 break;
3125 }
3126
3127 // Otherwise, the front-end should have semantically checked
3128 // outer aborts, but in either case the target region is not
3129 // within this function.
3130 continue;
3131 }
3132
3133 // Non-outer, TM aborts have an abnormal edge to the inner-most
3134 // transaction, the one being aborted;
3135 split_bb_make_tm_edge (stmt, region->restart_block, gsi, &next_gsi);
3136 }
3137
3138 // All TM builtins have an abnormal edge to the outer-most transaction.
3139 // We never restart inner transactions. For tm clones, we know a-priori
3140 // that the outer-most transaction is outside the function.
3141 if (decl_is_tm_clone (current_function_decl))
3142 continue;
3143
3144 if (cfun->gimple_df->tm_restart == NULL)
3145 cfun->gimple_df->tm_restart
3146 = htab_create_ggc (31, struct_ptr_hash, struct_ptr_eq, ggc_free);
3147
3148 // All TM builtins have an abnormal edge to the outer-most transaction.
3149 // We never restart inner transactions.
3150 for (struct tm_region *o = region; o; o = o->outer)
3151 if (!o->outer)
3152 {
3153 split_bb_make_tm_edge (stmt, o->restart_block, gsi, &next_gsi);
3154 break;
3155 }
3156
3157 // Delete any tail-call annotation that may have been added.
3158 // The tail-call pass may have mis-identified the commit as being
3159 // a candidate because we had not yet added this restart edge.
3160 gimple_call_set_tail (stmt, false);
3161 }
3162 }
3163
3164 /* Entry point to the final expansion of transactional nodes. */
3165
3166 namespace {
3167
3168 const pass_data pass_data_tm_edges =
3169 {
3170 GIMPLE_PASS, /* type */
3171 "tmedge", /* name */
3172 OPTGROUP_NONE, /* optinfo_flags */
3173 TV_TRANS_MEM, /* tv_id */
3174 ( PROP_ssa | PROP_cfg ), /* properties_required */
3175 0, /* properties_provided */
3176 0, /* properties_destroyed */
3177 0, /* todo_flags_start */
3178 TODO_update_ssa, /* todo_flags_finish */
3179 };
3180
3181 class pass_tm_edges : public gimple_opt_pass
3182 {
3183 public:
3184 pass_tm_edges (gcc::context *ctxt)
3185 : gimple_opt_pass (pass_data_tm_edges, ctxt)
3186 {}
3187
3188 /* opt_pass methods: */
3189 virtual unsigned int execute (function *);
3190
3191 }; // class pass_tm_edges
3192
3193 unsigned int
3194 pass_tm_edges::execute (function *fun)
3195 {
3196 vec<tm_region_p> bb_regions
3197 = get_bb_regions_instrumented (/*traverse_clones=*/false,
3198 /*include_uninstrumented_p=*/true);
3199 struct tm_region *r;
3200 unsigned i;
3201
3202 FOR_EACH_VEC_ELT (bb_regions, i, r)
3203 if (r != NULL)
3204 expand_block_edges (r, BASIC_BLOCK_FOR_FN (fun, i));
3205
3206 bb_regions.release ();
3207
3208 /* We've got to release the dominance info now, to indicate that it
3209 must be rebuilt completely. Otherwise we'll crash trying to update
3210 the SSA web in the TODO section following this pass. */
3211 free_dominance_info (CDI_DOMINATORS);
3212 bitmap_obstack_release (&tm_obstack);
3213 all_tm_regions = NULL;
3214
3215 return 0;
3216 }
3217
3218 } // anon namespace
3219
3220 gimple_opt_pass *
3221 make_pass_tm_edges (gcc::context *ctxt)
3222 {
3223 return new pass_tm_edges (ctxt);
3224 }
3225 \f
3226 /* Helper function for expand_regions. Expand REGION and recurse to
3227 the inner region. Call CALLBACK on each region. CALLBACK returns
3228 NULL to continue the traversal, otherwise a non-null value which
3229 this function will return as well. TRAVERSE_CLONES is true if we
3230 should traverse transactional clones. */
3231
3232 static void *
3233 expand_regions_1 (struct tm_region *region,
3234 void *(*callback)(struct tm_region *, void *),
3235 void *data,
3236 bool traverse_clones)
3237 {
3238 void *retval = NULL;
3239 if (region->exit_blocks
3240 || (traverse_clones && decl_is_tm_clone (current_function_decl)))
3241 {
3242 retval = callback (region, data);
3243 if (retval)
3244 return retval;
3245 }
3246 if (region->inner)
3247 {
3248 retval = expand_regions (region->inner, callback, data, traverse_clones);
3249 if (retval)
3250 return retval;
3251 }
3252 return retval;
3253 }
3254
3255 /* Traverse the regions enclosed and including REGION. Execute
3256 CALLBACK for each region, passing DATA. CALLBACK returns NULL to
3257 continue the traversal, otherwise a non-null value which this
3258 function will return as well. TRAVERSE_CLONES is true if we should
3259 traverse transactional clones. */
3260
3261 static void *
3262 expand_regions (struct tm_region *region,
3263 void *(*callback)(struct tm_region *, void *),
3264 void *data,
3265 bool traverse_clones)
3266 {
3267 void *retval = NULL;
3268 while (region)
3269 {
3270 retval = expand_regions_1 (region, callback, data, traverse_clones);
3271 if (retval)
3272 return retval;
3273 region = region->next;
3274 }
3275 return retval;
3276 }
3277
3278 \f
3279 /* A unique TM memory operation. */
3280 typedef struct tm_memop
3281 {
3282 /* Unique ID that all memory operations to the same location have. */
3283 unsigned int value_id;
3284 /* Address of load/store. */
3285 tree addr;
3286 } *tm_memop_t;
3287
3288 /* TM memory operation hashtable helpers. */
3289
3290 struct tm_memop_hasher : typed_free_remove <tm_memop>
3291 {
3292 typedef tm_memop value_type;
3293 typedef tm_memop compare_type;
3294 static inline hashval_t hash (const value_type *);
3295 static inline bool equal (const value_type *, const compare_type *);
3296 };
3297
3298 /* Htab support. Return a hash value for a `tm_memop'. */
3299 inline hashval_t
3300 tm_memop_hasher::hash (const value_type *mem)
3301 {
3302 tree addr = mem->addr;
3303 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
3304 actually done with operand_equal_p (see tm_memop_eq). */
3305 if (TREE_CODE (addr) == ADDR_EXPR)
3306 addr = TREE_OPERAND (addr, 0);
3307 return iterative_hash_expr (addr, 0);
3308 }
3309
3310 /* Htab support. Return true if two tm_memop's are the same. */
3311 inline bool
3312 tm_memop_hasher::equal (const value_type *mem1, const compare_type *mem2)
3313 {
3314 return operand_equal_p (mem1->addr, mem2->addr, 0);
3315 }
3316
3317 /* Sets for solving data flow equations in the memory optimization pass. */
3318 struct tm_memopt_bitmaps
3319 {
3320 /* Stores available to this BB upon entry. Basically, stores that
3321 dominate this BB. */
3322 bitmap store_avail_in;
3323 /* Stores available at the end of this BB. */
3324 bitmap store_avail_out;
3325 bitmap store_antic_in;
3326 bitmap store_antic_out;
3327 /* Reads available to this BB upon entry. Basically, reads that
3328 dominate this BB. */
3329 bitmap read_avail_in;
3330 /* Reads available at the end of this BB. */
3331 bitmap read_avail_out;
3332 /* Reads performed in this BB. */
3333 bitmap read_local;
3334 /* Writes performed in this BB. */
3335 bitmap store_local;
3336
3337 /* Temporary storage for pass. */
3338 /* Is the current BB in the worklist? */
3339 bool avail_in_worklist_p;
3340 /* Have we visited this BB? */
3341 bool visited_p;
3342 };
3343
3344 static bitmap_obstack tm_memopt_obstack;
3345
3346 /* Unique counter for TM loads and stores. Loads and stores of the
3347 same address get the same ID. */
3348 static unsigned int tm_memopt_value_id;
3349 static hash_table<tm_memop_hasher> *tm_memopt_value_numbers;
3350
3351 #define STORE_AVAIL_IN(BB) \
3352 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
3353 #define STORE_AVAIL_OUT(BB) \
3354 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
3355 #define STORE_ANTIC_IN(BB) \
3356 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
3357 #define STORE_ANTIC_OUT(BB) \
3358 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
3359 #define READ_AVAIL_IN(BB) \
3360 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
3361 #define READ_AVAIL_OUT(BB) \
3362 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
3363 #define READ_LOCAL(BB) \
3364 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
3365 #define STORE_LOCAL(BB) \
3366 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
3367 #define AVAIL_IN_WORKLIST_P(BB) \
3368 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
3369 #define BB_VISITED_P(BB) \
3370 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
3371
3372 /* Given a TM load/store in STMT, return the value number for the address
3373 it accesses. */
3374
3375 static unsigned int
3376 tm_memopt_value_number (gimple stmt, enum insert_option op)
3377 {
3378 struct tm_memop tmpmem, *mem;
3379 tm_memop **slot;
3380
3381 gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
3382 tmpmem.addr = gimple_call_arg (stmt, 0);
3383 slot = tm_memopt_value_numbers->find_slot (&tmpmem, op);
3384 if (*slot)
3385 mem = *slot;
3386 else if (op == INSERT)
3387 {
3388 mem = XNEW (struct tm_memop);
3389 *slot = mem;
3390 mem->value_id = tm_memopt_value_id++;
3391 mem->addr = tmpmem.addr;
3392 }
3393 else
3394 gcc_unreachable ();
3395 return mem->value_id;
3396 }
3397
3398 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
3399
3400 static void
3401 tm_memopt_accumulate_memops (basic_block bb)
3402 {
3403 gimple_stmt_iterator gsi;
3404
3405 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3406 {
3407 gimple stmt = gsi_stmt (gsi);
3408 bitmap bits;
3409 unsigned int loc;
3410
3411 if (is_tm_store (stmt))
3412 bits = STORE_LOCAL (bb);
3413 else if (is_tm_load (stmt))
3414 bits = READ_LOCAL (bb);
3415 else
3416 continue;
3417
3418 loc = tm_memopt_value_number (stmt, INSERT);
3419 bitmap_set_bit (bits, loc);
3420 if (dump_file)
3421 {
3422 fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
3423 is_tm_load (stmt) ? "LOAD" : "STORE", loc,
3424 gimple_bb (stmt)->index);
3425 print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0);
3426 fprintf (dump_file, "\n");
3427 }
3428 }
3429 }
3430
3431 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
3432
3433 static void
3434 dump_tm_memopt_set (const char *set_name, bitmap bits)
3435 {
3436 unsigned i;
3437 bitmap_iterator bi;
3438 const char *comma = "";
3439
3440 fprintf (dump_file, "TM memopt: %s: [", set_name);
3441 EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
3442 {
3443 hash_table<tm_memop_hasher>::iterator hi;
3444 struct tm_memop *mem = NULL;
3445
3446 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
3447 FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers, mem, tm_memop_t, hi)
3448 if (mem->value_id == i)
3449 break;
3450 gcc_assert (mem->value_id == i);
3451 fprintf (dump_file, "%s", comma);
3452 comma = ", ";
3453 print_generic_expr (dump_file, mem->addr, 0);
3454 }
3455 fprintf (dump_file, "]\n");
3456 }
3457
3458 /* Prettily dump all of the memopt sets in BLOCKS. */
3459
3460 static void
3461 dump_tm_memopt_sets (vec<basic_block> blocks)
3462 {
3463 size_t i;
3464 basic_block bb;
3465
3466 for (i = 0; blocks.iterate (i, &bb); ++i)
3467 {
3468 fprintf (dump_file, "------------BB %d---------\n", bb->index);
3469 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
3470 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
3471 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
3472 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
3473 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
3474 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
3475 }
3476 }
3477
3478 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3479
3480 static void
3481 tm_memopt_compute_avin (basic_block bb)
3482 {
3483 edge e;
3484 unsigned ix;
3485
3486 /* Seed with the AVOUT of any predecessor. */
3487 for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
3488 {
3489 e = EDGE_PRED (bb, ix);
3490 /* Make sure we have already visited this BB, and is thus
3491 initialized.
3492
3493 If e->src->aux is NULL, this predecessor is actually on an
3494 enclosing transaction. We only care about the current
3495 transaction, so ignore it. */
3496 if (e->src->aux && BB_VISITED_P (e->src))
3497 {
3498 bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3499 bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3500 break;
3501 }
3502 }
3503
3504 for (; ix < EDGE_COUNT (bb->preds); ix++)
3505 {
3506 e = EDGE_PRED (bb, ix);
3507 if (e->src->aux && BB_VISITED_P (e->src))
3508 {
3509 bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3510 bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3511 }
3512 }
3513
3514 BB_VISITED_P (bb) = true;
3515 }
3516
3517 /* Compute the STORE_ANTIC_IN for the basic block BB. */
3518
3519 static void
3520 tm_memopt_compute_antin (basic_block bb)
3521 {
3522 edge e;
3523 unsigned ix;
3524
3525 /* Seed with the ANTIC_OUT of any successor. */
3526 for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
3527 {
3528 e = EDGE_SUCC (bb, ix);
3529 /* Make sure we have already visited this BB, and is thus
3530 initialized. */
3531 if (BB_VISITED_P (e->dest))
3532 {
3533 bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3534 break;
3535 }
3536 }
3537
3538 for (; ix < EDGE_COUNT (bb->succs); ix++)
3539 {
3540 e = EDGE_SUCC (bb, ix);
3541 if (BB_VISITED_P (e->dest))
3542 bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3543 }
3544
3545 BB_VISITED_P (bb) = true;
3546 }
3547
3548 /* Compute the AVAIL sets for every basic block in BLOCKS.
3549
3550 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3551
3552 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3553 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3554
3555 This is basically what we do in lcm's compute_available(), but here
3556 we calculate two sets of sets (one for STOREs and one for READs),
3557 and we work on a region instead of the entire CFG.
3558
3559 REGION is the TM region.
3560 BLOCKS are the basic blocks in the region. */
3561
3562 static void
3563 tm_memopt_compute_available (struct tm_region *region,
3564 vec<basic_block> blocks)
3565 {
3566 edge e;
3567 basic_block *worklist, *qin, *qout, *qend, bb;
3568 unsigned int qlen, i;
3569 edge_iterator ei;
3570 bool changed;
3571
3572 /* Allocate a worklist array/queue. Entries are only added to the
3573 list if they were not already on the list. So the size is
3574 bounded by the number of basic blocks in the region. */
3575 qlen = blocks.length () - 1;
3576 qin = qout = worklist =
3577 XNEWVEC (basic_block, qlen);
3578
3579 /* Put every block in the region on the worklist. */
3580 for (i = 0; blocks.iterate (i, &bb); ++i)
3581 {
3582 /* Seed AVAIL_OUT with the LOCAL set. */
3583 bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
3584 bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
3585
3586 AVAIL_IN_WORKLIST_P (bb) = true;
3587 /* No need to insert the entry block, since it has an AVIN of
3588 null, and an AVOUT that has already been seeded in. */
3589 if (bb != region->entry_block)
3590 *qin++ = bb;
3591 }
3592
3593 /* The entry block has been initialized with the local sets. */
3594 BB_VISITED_P (region->entry_block) = true;
3595
3596 qin = worklist;
3597 qend = &worklist[qlen];
3598
3599 /* Iterate until the worklist is empty. */
3600 while (qlen)
3601 {
3602 /* Take the first entry off the worklist. */
3603 bb = *qout++;
3604 qlen--;
3605
3606 if (qout >= qend)
3607 qout = worklist;
3608
3609 /* This block can be added to the worklist again if necessary. */
3610 AVAIL_IN_WORKLIST_P (bb) = false;
3611 tm_memopt_compute_avin (bb);
3612
3613 /* Note: We do not add the LOCAL sets here because we already
3614 seeded the AVAIL_OUT sets with them. */
3615 changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
3616 changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
3617 if (changed
3618 && (region->exit_blocks == NULL
3619 || !bitmap_bit_p (region->exit_blocks, bb->index)))
3620 /* If the out state of this block changed, then we need to add
3621 its successors to the worklist if they are not already in. */
3622 FOR_EACH_EDGE (e, ei, bb->succs)
3623 if (!AVAIL_IN_WORKLIST_P (e->dest)
3624 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
3625 {
3626 *qin++ = e->dest;
3627 AVAIL_IN_WORKLIST_P (e->dest) = true;
3628 qlen++;
3629
3630 if (qin >= qend)
3631 qin = worklist;
3632 }
3633 }
3634
3635 free (worklist);
3636
3637 if (dump_file)
3638 dump_tm_memopt_sets (blocks);
3639 }
3640
3641 /* Compute ANTIC sets for every basic block in BLOCKS.
3642
3643 We compute STORE_ANTIC_OUT as follows:
3644
3645 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3646 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3647
3648 REGION is the TM region.
3649 BLOCKS are the basic blocks in the region. */
3650
3651 static void
3652 tm_memopt_compute_antic (struct tm_region *region,
3653 vec<basic_block> blocks)
3654 {
3655 edge e;
3656 basic_block *worklist, *qin, *qout, *qend, bb;
3657 unsigned int qlen;
3658 int i;
3659 edge_iterator ei;
3660
3661 /* Allocate a worklist array/queue. Entries are only added to the
3662 list if they were not already on the list. So the size is
3663 bounded by the number of basic blocks in the region. */
3664 qin = qout = worklist = XNEWVEC (basic_block, blocks.length ());
3665
3666 for (qlen = 0, i = blocks.length () - 1; i >= 0; --i)
3667 {
3668 bb = blocks[i];
3669
3670 /* Seed ANTIC_OUT with the LOCAL set. */
3671 bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
3672
3673 /* Put every block in the region on the worklist. */
3674 AVAIL_IN_WORKLIST_P (bb) = true;
3675 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3676 and their ANTIC_OUT has already been seeded in. */
3677 if (region->exit_blocks
3678 && !bitmap_bit_p (region->exit_blocks, bb->index))
3679 {
3680 qlen++;
3681 *qin++ = bb;
3682 }
3683 }
3684
3685 /* The exit blocks have been initialized with the local sets. */
3686 if (region->exit_blocks)
3687 {
3688 unsigned int i;
3689 bitmap_iterator bi;
3690 EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
3691 BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun, i)) = true;
3692 }
3693
3694 qin = worklist;
3695 qend = &worklist[qlen];
3696
3697 /* Iterate until the worklist is empty. */
3698 while (qlen)
3699 {
3700 /* Take the first entry off the worklist. */
3701 bb = *qout++;
3702 qlen--;
3703
3704 if (qout >= qend)
3705 qout = worklist;
3706
3707 /* This block can be added to the worklist again if necessary. */
3708 AVAIL_IN_WORKLIST_P (bb) = false;
3709 tm_memopt_compute_antin (bb);
3710
3711 /* Note: We do not add the LOCAL sets here because we already
3712 seeded the ANTIC_OUT sets with them. */
3713 if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
3714 && bb != region->entry_block)
3715 /* If the out state of this block changed, then we need to add
3716 its predecessors to the worklist if they are not already in. */
3717 FOR_EACH_EDGE (e, ei, bb->preds)
3718 if (!AVAIL_IN_WORKLIST_P (e->src))
3719 {
3720 *qin++ = e->src;
3721 AVAIL_IN_WORKLIST_P (e->src) = true;
3722 qlen++;
3723
3724 if (qin >= qend)
3725 qin = worklist;
3726 }
3727 }
3728
3729 free (worklist);
3730
3731 if (dump_file)
3732 dump_tm_memopt_sets (blocks);
3733 }
3734
3735 /* Offsets of load variants from TM_LOAD. For example,
3736 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3737 See gtm-builtins.def. */
3738 #define TRANSFORM_RAR 1
3739 #define TRANSFORM_RAW 2
3740 #define TRANSFORM_RFW 3
3741 /* Offsets of store variants from TM_STORE. */
3742 #define TRANSFORM_WAR 1
3743 #define TRANSFORM_WAW 2
3744
3745 /* Inform about a load/store optimization. */
3746
3747 static void
3748 dump_tm_memopt_transform (gimple stmt)
3749 {
3750 if (dump_file)
3751 {
3752 fprintf (dump_file, "TM memopt: transforming: ");
3753 print_gimple_stmt (dump_file, stmt, 0, 0);
3754 fprintf (dump_file, "\n");
3755 }
3756 }
3757
3758 /* Perform a read/write optimization. Replaces the TM builtin in STMT
3759 by a builtin that is OFFSET entries down in the builtins table in
3760 gtm-builtins.def. */
3761
3762 static void
3763 tm_memopt_transform_stmt (unsigned int offset,
3764 gimple stmt,
3765 gimple_stmt_iterator *gsi)
3766 {
3767 tree fn = gimple_call_fn (stmt);
3768 gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
3769 TREE_OPERAND (fn, 0)
3770 = builtin_decl_explicit ((enum built_in_function)
3771 (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
3772 + offset));
3773 gimple_call_set_fn (stmt, fn);
3774 gsi_replace (gsi, stmt, true);
3775 dump_tm_memopt_transform (stmt);
3776 }
3777
3778 /* Perform the actual TM memory optimization transformations in the
3779 basic blocks in BLOCKS. */
3780
3781 static void
3782 tm_memopt_transform_blocks (vec<basic_block> blocks)
3783 {
3784 size_t i;
3785 basic_block bb;
3786 gimple_stmt_iterator gsi;
3787
3788 for (i = 0; blocks.iterate (i, &bb); ++i)
3789 {
3790 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3791 {
3792 gimple stmt = gsi_stmt (gsi);
3793 bitmap read_avail = READ_AVAIL_IN (bb);
3794 bitmap store_avail = STORE_AVAIL_IN (bb);
3795 bitmap store_antic = STORE_ANTIC_OUT (bb);
3796 unsigned int loc;
3797
3798 if (is_tm_simple_load (stmt))
3799 {
3800 loc = tm_memopt_value_number (stmt, NO_INSERT);
3801 if (store_avail && bitmap_bit_p (store_avail, loc))
3802 tm_memopt_transform_stmt (TRANSFORM_RAW, stmt, &gsi);
3803 else if (store_antic && bitmap_bit_p (store_antic, loc))
3804 {
3805 tm_memopt_transform_stmt (TRANSFORM_RFW, stmt, &gsi);
3806 bitmap_set_bit (store_avail, loc);
3807 }
3808 else if (read_avail && bitmap_bit_p (read_avail, loc))
3809 tm_memopt_transform_stmt (TRANSFORM_RAR, stmt, &gsi);
3810 else
3811 bitmap_set_bit (read_avail, loc);
3812 }
3813 else if (is_tm_simple_store (stmt))
3814 {
3815 loc = tm_memopt_value_number (stmt, NO_INSERT);
3816 if (store_avail && bitmap_bit_p (store_avail, loc))
3817 tm_memopt_transform_stmt (TRANSFORM_WAW, stmt, &gsi);
3818 else
3819 {
3820 if (read_avail && bitmap_bit_p (read_avail, loc))
3821 tm_memopt_transform_stmt (TRANSFORM_WAR, stmt, &gsi);
3822 bitmap_set_bit (store_avail, loc);
3823 }
3824 }
3825 }
3826 }
3827 }
3828
3829 /* Return a new set of bitmaps for a BB. */
3830
3831 static struct tm_memopt_bitmaps *
3832 tm_memopt_init_sets (void)
3833 {
3834 struct tm_memopt_bitmaps *b
3835 = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
3836 b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3837 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3838 b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
3839 b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
3840 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3841 b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3842 b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3843 b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
3844 b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
3845 return b;
3846 }
3847
3848 /* Free sets computed for each BB. */
3849
3850 static void
3851 tm_memopt_free_sets (vec<basic_block> blocks)
3852 {
3853 size_t i;
3854 basic_block bb;
3855
3856 for (i = 0; blocks.iterate (i, &bb); ++i)
3857 bb->aux = NULL;
3858 }
3859
3860 /* Clear the visited bit for every basic block in BLOCKS. */
3861
3862 static void
3863 tm_memopt_clear_visited (vec<basic_block> blocks)
3864 {
3865 size_t i;
3866 basic_block bb;
3867
3868 for (i = 0; blocks.iterate (i, &bb); ++i)
3869 BB_VISITED_P (bb) = false;
3870 }
3871
3872 /* Replace TM load/stores with hints for the runtime. We handle
3873 things like read-after-write, write-after-read, read-after-read,
3874 read-for-write, etc. */
3875
3876 static unsigned int
3877 execute_tm_memopt (void)
3878 {
3879 struct tm_region *region;
3880 vec<basic_block> bbs;
3881
3882 tm_memopt_value_id = 0;
3883 tm_memopt_value_numbers = new hash_table<tm_memop_hasher> (10);
3884
3885 for (region = all_tm_regions; region; region = region->next)
3886 {
3887 /* All the TM stores/loads in the current region. */
3888 size_t i;
3889 basic_block bb;
3890
3891 bitmap_obstack_initialize (&tm_memopt_obstack);
3892
3893 /* Save all BBs for the current region. */
3894 bbs = get_tm_region_blocks (region->entry_block,
3895 region->exit_blocks,
3896 region->irr_blocks,
3897 NULL,
3898 false);
3899
3900 /* Collect all the memory operations. */
3901 for (i = 0; bbs.iterate (i, &bb); ++i)
3902 {
3903 bb->aux = tm_memopt_init_sets ();
3904 tm_memopt_accumulate_memops (bb);
3905 }
3906
3907 /* Solve data flow equations and transform each block accordingly. */
3908 tm_memopt_clear_visited (bbs);
3909 tm_memopt_compute_available (region, bbs);
3910 tm_memopt_clear_visited (bbs);
3911 tm_memopt_compute_antic (region, bbs);
3912 tm_memopt_transform_blocks (bbs);
3913
3914 tm_memopt_free_sets (bbs);
3915 bbs.release ();
3916 bitmap_obstack_release (&tm_memopt_obstack);
3917 tm_memopt_value_numbers->empty ();
3918 }
3919
3920 delete tm_memopt_value_numbers;
3921 tm_memopt_value_numbers = NULL;
3922 return 0;
3923 }
3924
3925 namespace {
3926
3927 const pass_data pass_data_tm_memopt =
3928 {
3929 GIMPLE_PASS, /* type */
3930 "tmmemopt", /* name */
3931 OPTGROUP_NONE, /* optinfo_flags */
3932 TV_TRANS_MEM, /* tv_id */
3933 ( PROP_ssa | PROP_cfg ), /* properties_required */
3934 0, /* properties_provided */
3935 0, /* properties_destroyed */
3936 0, /* todo_flags_start */
3937 0, /* todo_flags_finish */
3938 };
3939
3940 class pass_tm_memopt : public gimple_opt_pass
3941 {
3942 public:
3943 pass_tm_memopt (gcc::context *ctxt)
3944 : gimple_opt_pass (pass_data_tm_memopt, ctxt)
3945 {}
3946
3947 /* opt_pass methods: */
3948 virtual bool gate (function *) { return flag_tm && optimize > 0; }
3949 virtual unsigned int execute (function *) { return execute_tm_memopt (); }
3950
3951 }; // class pass_tm_memopt
3952
3953 } // anon namespace
3954
3955 gimple_opt_pass *
3956 make_pass_tm_memopt (gcc::context *ctxt)
3957 {
3958 return new pass_tm_memopt (ctxt);
3959 }
3960
3961 \f
3962 /* Interprocedual analysis for the creation of transactional clones.
3963 The aim of this pass is to find which functions are referenced in
3964 a non-irrevocable transaction context, and for those over which
3965 we have control (or user directive), create a version of the
3966 function which uses only the transactional interface to reference
3967 protected memories. This analysis proceeds in several steps:
3968
3969 (1) Collect the set of all possible transactional clones:
3970
3971 (a) For all local public functions marked tm_callable, push
3972 it onto the tm_callee queue.
3973
3974 (b) For all local functions, scan for calls in transaction blocks.
3975 Push the caller and callee onto the tm_caller and tm_callee
3976 queues. Count the number of callers for each callee.
3977
3978 (c) For each local function on the callee list, assume we will
3979 create a transactional clone. Push *all* calls onto the
3980 callee queues; count the number of clone callers separately
3981 to the number of original callers.
3982
3983 (2) Propagate irrevocable status up the dominator tree:
3984
3985 (a) Any external function on the callee list that is not marked
3986 tm_callable is irrevocable. Push all callers of such onto
3987 a worklist.
3988
3989 (b) For each function on the worklist, mark each block that
3990 contains an irrevocable call. Use the AND operator to
3991 propagate that mark up the dominator tree.
3992
3993 (c) If we reach the entry block for a possible transactional
3994 clone, then the transactional clone is irrevocable, and
3995 we should not create the clone after all. Push all
3996 callers onto the worklist.
3997
3998 (d) Place tm_irrevocable calls at the beginning of the relevant
3999 blocks. Special case here is the entry block for the entire
4000 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
4001 the library to begin the region in serial mode. Decrement
4002 the call count for all callees in the irrevocable region.
4003
4004 (3) Create the transactional clones:
4005
4006 Any tm_callee that still has a non-zero call count is cloned.
4007 */
4008
4009 /* This structure is stored in the AUX field of each cgraph_node. */
4010 struct tm_ipa_cg_data
4011 {
4012 /* The clone of the function that got created. */
4013 struct cgraph_node *clone;
4014
4015 /* The tm regions in the normal function. */
4016 struct tm_region *all_tm_regions;
4017
4018 /* The blocks of the normal/clone functions that contain irrevocable
4019 calls, or blocks that are post-dominated by irrevocable calls. */
4020 bitmap irrevocable_blocks_normal;
4021 bitmap irrevocable_blocks_clone;
4022
4023 /* The blocks of the normal function that are involved in transactions. */
4024 bitmap transaction_blocks_normal;
4025
4026 /* The number of callers to the transactional clone of this function
4027 from normal and transactional clones respectively. */
4028 unsigned tm_callers_normal;
4029 unsigned tm_callers_clone;
4030
4031 /* True if all calls to this function's transactional clone
4032 are irrevocable. Also automatically true if the function
4033 has no transactional clone. */
4034 bool is_irrevocable;
4035
4036 /* Flags indicating the presence of this function in various queues. */
4037 bool in_callee_queue;
4038 bool in_worklist;
4039
4040 /* Flags indicating the kind of scan desired while in the worklist. */
4041 bool want_irr_scan_normal;
4042 };
4043
4044 typedef vec<cgraph_node *> cgraph_node_queue;
4045
4046 /* Return the ipa data associated with NODE, allocating zeroed memory
4047 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
4048 and set *NODE accordingly. */
4049
4050 static struct tm_ipa_cg_data *
4051 get_cg_data (struct cgraph_node **node, bool traverse_aliases)
4052 {
4053 struct tm_ipa_cg_data *d;
4054
4055 if (traverse_aliases && (*node)->alias)
4056 *node = (*node)->get_alias_target ();
4057
4058 d = (struct tm_ipa_cg_data *) (*node)->aux;
4059
4060 if (d == NULL)
4061 {
4062 d = (struct tm_ipa_cg_data *)
4063 obstack_alloc (&tm_obstack.obstack, sizeof (*d));
4064 (*node)->aux = (void *) d;
4065 memset (d, 0, sizeof (*d));
4066 }
4067
4068 return d;
4069 }
4070
4071 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
4072 it is already present. */
4073
4074 static void
4075 maybe_push_queue (struct cgraph_node *node,
4076 cgraph_node_queue *queue_p, bool *in_queue_p)
4077 {
4078 if (!*in_queue_p)
4079 {
4080 *in_queue_p = true;
4081 queue_p->safe_push (node);
4082 }
4083 }
4084
4085 /* Duplicate the basic blocks in QUEUE for use in the uninstrumented
4086 code path. QUEUE are the basic blocks inside the transaction
4087 represented in REGION.
4088
4089 Later in split_code_paths() we will add the conditional to choose
4090 between the two alternatives. */
4091
4092 static void
4093 ipa_uninstrument_transaction (struct tm_region *region,
4094 vec<basic_block> queue)
4095 {
4096 gimple transaction = region->transaction_stmt;
4097 basic_block transaction_bb = gimple_bb (transaction);
4098 int n = queue.length ();
4099 basic_block *new_bbs = XNEWVEC (basic_block, n);
4100
4101 copy_bbs (queue.address (), n, new_bbs, NULL, 0, NULL, NULL, transaction_bb,
4102 true);
4103 edge e = make_edge (transaction_bb, new_bbs[0], EDGE_TM_UNINSTRUMENTED);
4104 add_phi_args_after_copy (new_bbs, n, e);
4105
4106 // Now we will have a GIMPLE_ATOMIC with 3 possible edges out of it.
4107 // a) EDGE_FALLTHRU into the transaction
4108 // b) EDGE_TM_ABORT out of the transaction
4109 // c) EDGE_TM_UNINSTRUMENTED into the uninstrumented blocks.
4110
4111 free (new_bbs);
4112 }
4113
4114 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
4115 Queue all callees within block BB. */
4116
4117 static void
4118 ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
4119 basic_block bb, bool for_clone)
4120 {
4121 gimple_stmt_iterator gsi;
4122
4123 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4124 {
4125 gimple stmt = gsi_stmt (gsi);
4126 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4127 {
4128 tree fndecl = gimple_call_fndecl (stmt);
4129 if (fndecl)
4130 {
4131 struct tm_ipa_cg_data *d;
4132 unsigned *pcallers;
4133 struct cgraph_node *node;
4134
4135 if (is_tm_ending_fndecl (fndecl))
4136 continue;
4137 if (find_tm_replacement_function (fndecl))
4138 continue;
4139
4140 node = cgraph_node::get (fndecl);
4141 gcc_assert (node != NULL);
4142 d = get_cg_data (&node, true);
4143
4144 pcallers = (for_clone ? &d->tm_callers_clone
4145 : &d->tm_callers_normal);
4146 *pcallers += 1;
4147
4148 maybe_push_queue (node, callees_p, &d->in_callee_queue);
4149 }
4150 }
4151 }
4152 }
4153
4154 /* Scan all calls in NODE that are within a transaction region,
4155 and push the resulting nodes into the callee queue. */
4156
4157 static void
4158 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
4159 cgraph_node_queue *callees_p)
4160 {
4161 struct tm_region *r;
4162
4163 d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
4164 d->all_tm_regions = all_tm_regions;
4165
4166 for (r = all_tm_regions; r; r = r->next)
4167 {
4168 vec<basic_block> bbs;
4169 basic_block bb;
4170 unsigned i;
4171
4172 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
4173 d->transaction_blocks_normal, false);
4174
4175 // Generate the uninstrumented code path for this transaction.
4176 ipa_uninstrument_transaction (r, bbs);
4177
4178 FOR_EACH_VEC_ELT (bbs, i, bb)
4179 ipa_tm_scan_calls_block (callees_p, bb, false);
4180
4181 bbs.release ();
4182 }
4183
4184 // ??? copy_bbs should maintain cgraph edges for the blocks as it is
4185 // copying them, rather than forcing us to do this externally.
4186 cgraph_edge::rebuild_edges ();
4187
4188 // ??? In ipa_uninstrument_transaction we don't try to update dominators
4189 // because copy_bbs doesn't return a VEC like iterate_fix_dominators expects.
4190 // Instead, just release dominators here so update_ssa recomputes them.
4191 free_dominance_info (CDI_DOMINATORS);
4192
4193 // When building the uninstrumented code path, copy_bbs will have invoked
4194 // create_new_def_for starting an "ssa update context". There is only one
4195 // instance of this context, so resolve ssa updates before moving on to
4196 // the next function.
4197 update_ssa (TODO_update_ssa);
4198 }
4199
4200 /* Scan all calls in NODE as if this is the transactional clone,
4201 and push the destinations into the callee queue. */
4202
4203 static void
4204 ipa_tm_scan_calls_clone (struct cgraph_node *node,
4205 cgraph_node_queue *callees_p)
4206 {
4207 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
4208 basic_block bb;
4209
4210 FOR_EACH_BB_FN (bb, fn)
4211 ipa_tm_scan_calls_block (callees_p, bb, true);
4212 }
4213
4214 /* The function NODE has been detected to be irrevocable. Push all
4215 of its callers onto WORKLIST for the purpose of re-scanning them. */
4216
4217 static void
4218 ipa_tm_note_irrevocable (struct cgraph_node *node,
4219 cgraph_node_queue *worklist_p)
4220 {
4221 struct tm_ipa_cg_data *d = get_cg_data (&node, true);
4222 struct cgraph_edge *e;
4223
4224 d->is_irrevocable = true;
4225
4226 for (e = node->callers; e ; e = e->next_caller)
4227 {
4228 basic_block bb;
4229 struct cgraph_node *caller;
4230
4231 /* Don't examine recursive calls. */
4232 if (e->caller == node)
4233 continue;
4234 /* Even if we think we can go irrevocable, believe the user
4235 above all. */
4236 if (is_tm_safe_or_pure (e->caller->decl))
4237 continue;
4238
4239 caller = e->caller;
4240 d = get_cg_data (&caller, true);
4241
4242 /* Check if the callee is in a transactional region. If so,
4243 schedule the function for normal re-scan as well. */
4244 bb = gimple_bb (e->call_stmt);
4245 gcc_assert (bb != NULL);
4246 if (d->transaction_blocks_normal
4247 && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
4248 d->want_irr_scan_normal = true;
4249
4250 maybe_push_queue (caller, worklist_p, &d->in_worklist);
4251 }
4252 }
4253
4254 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
4255 within the block is irrevocable. */
4256
4257 static bool
4258 ipa_tm_scan_irr_block (basic_block bb)
4259 {
4260 gimple_stmt_iterator gsi;
4261 tree fn;
4262
4263 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4264 {
4265 gimple stmt = gsi_stmt (gsi);
4266 switch (gimple_code (stmt))
4267 {
4268 case GIMPLE_ASSIGN:
4269 if (gimple_assign_single_p (stmt))
4270 {
4271 tree lhs = gimple_assign_lhs (stmt);
4272 tree rhs = gimple_assign_rhs1 (stmt);
4273 if (volatile_var_p (lhs) || volatile_var_p (rhs))
4274 return true;
4275 }
4276 break;
4277
4278 case GIMPLE_CALL:
4279 {
4280 tree lhs = gimple_call_lhs (stmt);
4281 if (lhs && volatile_var_p (lhs))
4282 return true;
4283
4284 if (is_tm_pure_call (stmt))
4285 break;
4286
4287 fn = gimple_call_fn (stmt);
4288
4289 /* Functions with the attribute are by definition irrevocable. */
4290 if (is_tm_irrevocable (fn))
4291 return true;
4292
4293 /* For direct function calls, go ahead and check for replacement
4294 functions, or transitive irrevocable functions. For indirect
4295 functions, we'll ask the runtime. */
4296 if (TREE_CODE (fn) == ADDR_EXPR)
4297 {
4298 struct tm_ipa_cg_data *d;
4299 struct cgraph_node *node;
4300
4301 fn = TREE_OPERAND (fn, 0);
4302 if (is_tm_ending_fndecl (fn))
4303 break;
4304 if (find_tm_replacement_function (fn))
4305 break;
4306
4307 node = cgraph_node::get (fn);
4308 d = get_cg_data (&node, true);
4309
4310 /* Return true if irrevocable, but above all, believe
4311 the user. */
4312 if (d->is_irrevocable
4313 && !is_tm_safe_or_pure (fn))
4314 return true;
4315 }
4316 break;
4317 }
4318
4319 case GIMPLE_ASM:
4320 /* ??? The Approved Method of indicating that an inline
4321 assembly statement is not relevant to the transaction
4322 is to wrap it in a __tm_waiver block. This is not
4323 yet implemented, so we can't check for it. */
4324 if (is_tm_safe (current_function_decl))
4325 {
4326 tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
4327 SET_EXPR_LOCATION (t, gimple_location (stmt));
4328 error ("%Kasm not allowed in %<transaction_safe%> function", t);
4329 }
4330 return true;
4331
4332 default:
4333 break;
4334 }
4335 }
4336
4337 return false;
4338 }
4339
4340 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking
4341 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
4342 scanning past OLD_IRR or EXIT_BLOCKS. */
4343
4344 static bool
4345 ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr,
4346 bitmap old_irr, bitmap exit_blocks)
4347 {
4348 bool any_new_irr = false;
4349 edge e;
4350 edge_iterator ei;
4351 bitmap visited_blocks = BITMAP_ALLOC (NULL);
4352
4353 do
4354 {
4355 basic_block bb = pqueue->pop ();
4356
4357 /* Don't re-scan blocks we know already are irrevocable. */
4358 if (old_irr && bitmap_bit_p (old_irr, bb->index))
4359 continue;
4360
4361 if (ipa_tm_scan_irr_block (bb))
4362 {
4363 bitmap_set_bit (new_irr, bb->index);
4364 any_new_irr = true;
4365 }
4366 else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
4367 {
4368 FOR_EACH_EDGE (e, ei, bb->succs)
4369 if (!bitmap_bit_p (visited_blocks, e->dest->index))
4370 {
4371 bitmap_set_bit (visited_blocks, e->dest->index);
4372 pqueue->safe_push (e->dest);
4373 }
4374 }
4375 }
4376 while (!pqueue->is_empty ());
4377
4378 BITMAP_FREE (visited_blocks);
4379
4380 return any_new_irr;
4381 }
4382
4383 /* Propagate the irrevocable property both up and down the dominator tree.
4384 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
4385 TM regions; OLD_IRR are the results of a previous scan of the dominator
4386 tree which has been fully propagated; NEW_IRR is the set of new blocks
4387 which are gaining the irrevocable property during the current scan. */
4388
4389 static void
4390 ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
4391 bitmap old_irr, bitmap exit_blocks)
4392 {
4393 vec<basic_block> bbs;
4394 bitmap all_region_blocks;
4395
4396 /* If this block is in the old set, no need to rescan. */
4397 if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
4398 return;
4399
4400 all_region_blocks = BITMAP_ALLOC (&tm_obstack);
4401 bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
4402 all_region_blocks, false);
4403 do
4404 {
4405 basic_block bb = bbs.pop ();
4406 bool this_irr = bitmap_bit_p (new_irr, bb->index);
4407 bool all_son_irr = false;
4408 edge_iterator ei;
4409 edge e;
4410
4411 /* Propagate up. If my children are, I am too, but we must have
4412 at least one child that is. */
4413 if (!this_irr)
4414 {
4415 FOR_EACH_EDGE (e, ei, bb->succs)
4416 {
4417 if (!bitmap_bit_p (new_irr, e->dest->index))
4418 {
4419 all_son_irr = false;
4420 break;
4421 }
4422 else
4423 all_son_irr = true;
4424 }
4425 if (all_son_irr)
4426 {
4427 /* Add block to new_irr if it hasn't already been processed. */
4428 if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
4429 {
4430 bitmap_set_bit (new_irr, bb->index);
4431 this_irr = true;
4432 }
4433 }
4434 }
4435
4436 /* Propagate down to everyone we immediately dominate. */
4437 if (this_irr)
4438 {
4439 basic_block son;
4440 for (son = first_dom_son (CDI_DOMINATORS, bb);
4441 son;
4442 son = next_dom_son (CDI_DOMINATORS, son))
4443 {
4444 /* Make sure block is actually in a TM region, and it
4445 isn't already in old_irr. */
4446 if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
4447 && bitmap_bit_p (all_region_blocks, son->index))
4448 bitmap_set_bit (new_irr, son->index);
4449 }
4450 }
4451 }
4452 while (!bbs.is_empty ());
4453
4454 BITMAP_FREE (all_region_blocks);
4455 bbs.release ();
4456 }
4457
4458 static void
4459 ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
4460 {
4461 gimple_stmt_iterator gsi;
4462
4463 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4464 {
4465 gimple stmt = gsi_stmt (gsi);
4466 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4467 {
4468 tree fndecl = gimple_call_fndecl (stmt);
4469 if (fndecl)
4470 {
4471 struct tm_ipa_cg_data *d;
4472 unsigned *pcallers;
4473 struct cgraph_node *tnode;
4474
4475 if (is_tm_ending_fndecl (fndecl))
4476 continue;
4477 if (find_tm_replacement_function (fndecl))
4478 continue;
4479
4480 tnode = cgraph_node::get (fndecl);
4481 d = get_cg_data (&tnode, true);
4482
4483 pcallers = (for_clone ? &d->tm_callers_clone
4484 : &d->tm_callers_normal);
4485
4486 gcc_assert (*pcallers > 0);
4487 *pcallers -= 1;
4488 }
4489 }
4490 }
4491 }
4492
4493 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
4494 as well as other irrevocable actions such as inline assembly. Mark all
4495 such blocks as irrevocable and decrement the number of calls to
4496 transactional clones. Return true if, for the transactional clone, the
4497 entire function is irrevocable. */
4498
4499 static bool
4500 ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
4501 {
4502 struct tm_ipa_cg_data *d;
4503 bitmap new_irr, old_irr;
4504 bool ret = false;
4505
4506 /* Builtin operators (operator new, and such). */
4507 if (DECL_STRUCT_FUNCTION (node->decl) == NULL
4508 || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL)
4509 return false;
4510
4511 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4512 calculate_dominance_info (CDI_DOMINATORS);
4513
4514 d = get_cg_data (&node, true);
4515 auto_vec<basic_block, 10> queue;
4516 new_irr = BITMAP_ALLOC (&tm_obstack);
4517
4518 /* Scan each tm region, propagating irrevocable status through the tree. */
4519 if (for_clone)
4520 {
4521 old_irr = d->irrevocable_blocks_clone;
4522 queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
4523 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
4524 {
4525 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
4526 new_irr,
4527 old_irr, NULL);
4528 ret = bitmap_bit_p (new_irr,
4529 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index);
4530 }
4531 }
4532 else
4533 {
4534 struct tm_region *region;
4535
4536 old_irr = d->irrevocable_blocks_normal;
4537 for (region = d->all_tm_regions; region; region = region->next)
4538 {
4539 queue.quick_push (region->entry_block);
4540 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
4541 region->exit_blocks))
4542 ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
4543 region->exit_blocks);
4544 }
4545 }
4546
4547 /* If we found any new irrevocable blocks, reduce the call count for
4548 transactional clones within the irrevocable blocks. Save the new
4549 set of irrevocable blocks for next time. */
4550 if (!bitmap_empty_p (new_irr))
4551 {
4552 bitmap_iterator bmi;
4553 unsigned i;
4554
4555 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4556 ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun, i),
4557 for_clone);
4558
4559 if (old_irr)
4560 {
4561 bitmap_ior_into (old_irr, new_irr);
4562 BITMAP_FREE (new_irr);
4563 }
4564 else if (for_clone)
4565 d->irrevocable_blocks_clone = new_irr;
4566 else
4567 d->irrevocable_blocks_normal = new_irr;
4568
4569 if (dump_file && new_irr)
4570 {
4571 const char *dname;
4572 bitmap_iterator bmi;
4573 unsigned i;
4574
4575 dname = lang_hooks.decl_printable_name (current_function_decl, 2);
4576 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4577 fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
4578 }
4579 }
4580 else
4581 BITMAP_FREE (new_irr);
4582
4583 pop_cfun ();
4584
4585 return ret;
4586 }
4587
4588 /* Return true if, for the transactional clone of NODE, any call
4589 may enter irrevocable mode. */
4590
4591 static bool
4592 ipa_tm_mayenterirr_function (struct cgraph_node *node)
4593 {
4594 struct tm_ipa_cg_data *d;
4595 tree decl;
4596 unsigned flags;
4597
4598 d = get_cg_data (&node, true);
4599 decl = node->decl;
4600 flags = flags_from_decl_or_type (decl);
4601
4602 /* Handle some TM builtins. Ordinarily these aren't actually generated
4603 at this point, but handling these functions when written in by the
4604 user makes it easier to build unit tests. */
4605 if (flags & ECF_TM_BUILTIN)
4606 return false;
4607
4608 /* Filter out all functions that are marked. */
4609 if (flags & ECF_TM_PURE)
4610 return false;
4611 if (is_tm_safe (decl))
4612 return false;
4613 if (is_tm_irrevocable (decl))
4614 return true;
4615 if (is_tm_callable (decl))
4616 return true;
4617 if (find_tm_replacement_function (decl))
4618 return true;
4619
4620 /* If we aren't seeing the final version of the function we don't
4621 know what it will contain at runtime. */
4622 if (node->get_availability () < AVAIL_AVAILABLE)
4623 return true;
4624
4625 /* If the function must go irrevocable, then of course true. */
4626 if (d->is_irrevocable)
4627 return true;
4628
4629 /* If there are any blocks marked irrevocable, then the function
4630 as a whole may enter irrevocable. */
4631 if (d->irrevocable_blocks_clone)
4632 return true;
4633
4634 /* We may have previously marked this function as tm_may_enter_irr;
4635 see pass_diagnose_tm_blocks. */
4636 if (node->local.tm_may_enter_irr)
4637 return true;
4638
4639 /* Recurse on the main body for aliases. In general, this will
4640 result in one of the bits above being set so that we will not
4641 have to recurse next time. */
4642 if (node->alias)
4643 return ipa_tm_mayenterirr_function (cgraph_node::get (node->thunk.alias));
4644
4645 /* What remains is unmarked local functions without items that force
4646 the function to go irrevocable. */
4647 return false;
4648 }
4649
4650 /* Diagnose calls from transaction_safe functions to unmarked
4651 functions that are determined to not be safe. */
4652
4653 static void
4654 ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
4655 {
4656 struct cgraph_edge *e;
4657
4658 for (e = node->callees; e ; e = e->next_callee)
4659 if (!is_tm_callable (e->callee->decl)
4660 && e->callee->local.tm_may_enter_irr)
4661 error_at (gimple_location (e->call_stmt),
4662 "unsafe function call %qD within "
4663 "%<transaction_safe%> function", e->callee->decl);
4664 }
4665
4666 /* Diagnose call from atomic transactions to unmarked functions
4667 that are determined to not be safe. */
4668
4669 static void
4670 ipa_tm_diagnose_transaction (struct cgraph_node *node,
4671 struct tm_region *all_tm_regions)
4672 {
4673 struct tm_region *r;
4674
4675 for (r = all_tm_regions; r ; r = r->next)
4676 if (gimple_transaction_subcode (r->transaction_stmt) & GTMA_IS_RELAXED)
4677 {
4678 /* Atomic transactions can be nested inside relaxed. */
4679 if (r->inner)
4680 ipa_tm_diagnose_transaction (node, r->inner);
4681 }
4682 else
4683 {
4684 vec<basic_block> bbs;
4685 gimple_stmt_iterator gsi;
4686 basic_block bb;
4687 size_t i;
4688
4689 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
4690 r->irr_blocks, NULL, false);
4691
4692 for (i = 0; bbs.iterate (i, &bb); ++i)
4693 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4694 {
4695 gimple stmt = gsi_stmt (gsi);
4696 tree fndecl;
4697
4698 if (gimple_code (stmt) == GIMPLE_ASM)
4699 {
4700 error_at (gimple_location (stmt),
4701 "asm not allowed in atomic transaction");
4702 continue;
4703 }
4704
4705 if (!is_gimple_call (stmt))
4706 continue;
4707 fndecl = gimple_call_fndecl (stmt);
4708
4709 /* Indirect function calls have been diagnosed already. */
4710 if (!fndecl)
4711 continue;
4712
4713 /* Stop at the end of the transaction. */
4714 if (is_tm_ending_fndecl (fndecl))
4715 {
4716 if (bitmap_bit_p (r->exit_blocks, bb->index))
4717 break;
4718 continue;
4719 }
4720
4721 /* Marked functions have been diagnosed already. */
4722 if (is_tm_pure_call (stmt))
4723 continue;
4724 if (is_tm_callable (fndecl))
4725 continue;
4726
4727 if (cgraph_node::local_info (fndecl)->tm_may_enter_irr)
4728 error_at (gimple_location (stmt),
4729 "unsafe function call %qD within "
4730 "atomic transaction", fndecl);
4731 }
4732
4733 bbs.release ();
4734 }
4735 }
4736
4737 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4738 OLD_DECL. The returned value is a freshly malloced pointer that
4739 should be freed by the caller. */
4740
4741 static tree
4742 tm_mangle (tree old_asm_id)
4743 {
4744 const char *old_asm_name;
4745 char *tm_name;
4746 void *alloc = NULL;
4747 struct demangle_component *dc;
4748 tree new_asm_id;
4749
4750 /* Determine if the symbol is already a valid C++ mangled name. Do this
4751 even for C, which might be interfacing with C++ code via appropriately
4752 ugly identifiers. */
4753 /* ??? We could probably do just as well checking for "_Z" and be done. */
4754 old_asm_name = IDENTIFIER_POINTER (old_asm_id);
4755 dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
4756
4757 if (dc == NULL)
4758 {
4759 char length[8];
4760
4761 do_unencoded:
4762 sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
4763 tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
4764 }
4765 else
4766 {
4767 old_asm_name += 2; /* Skip _Z */
4768
4769 switch (dc->type)
4770 {
4771 case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
4772 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
4773 /* Don't play silly games, you! */
4774 goto do_unencoded;
4775
4776 case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
4777 /* I'd really like to know if we can ever be passed one of
4778 these from the C++ front end. The Logical Thing would
4779 seem that hidden-alias should be outer-most, so that we
4780 get hidden-alias of a transaction-clone and not vice-versa. */
4781 old_asm_name += 2;
4782 break;
4783
4784 default:
4785 break;
4786 }
4787
4788 tm_name = concat ("_ZGTt", old_asm_name, NULL);
4789 }
4790 free (alloc);
4791
4792 new_asm_id = get_identifier (tm_name);
4793 free (tm_name);
4794
4795 return new_asm_id;
4796 }
4797
4798 static inline void
4799 ipa_tm_mark_force_output_node (struct cgraph_node *node)
4800 {
4801 node->mark_force_output ();
4802 node->analyzed = true;
4803 }
4804
4805 static inline void
4806 ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node)
4807 {
4808 node->forced_by_abi = true;
4809 node->analyzed = true;
4810 }
4811
4812 /* Callback data for ipa_tm_create_version_alias. */
4813 struct create_version_alias_info
4814 {
4815 struct cgraph_node *old_node;
4816 tree new_decl;
4817 };
4818
4819 /* A subroutine of ipa_tm_create_version, called via
4820 cgraph_for_node_and_aliases. Create new tm clones for each of
4821 the existing aliases. */
4822 static bool
4823 ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
4824 {
4825 struct create_version_alias_info *info
4826 = (struct create_version_alias_info *)data;
4827 tree old_decl, new_decl, tm_name;
4828 struct cgraph_node *new_node;
4829
4830 if (!node->cpp_implicit_alias)
4831 return false;
4832
4833 old_decl = node->decl;
4834 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4835 new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
4836 TREE_CODE (old_decl), tm_name,
4837 TREE_TYPE (old_decl));
4838
4839 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4840 SET_DECL_RTL (new_decl, NULL);
4841
4842 /* Based loosely on C++'s make_alias_for(). */
4843 TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
4844 DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
4845 DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
4846 TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
4847 DECL_EXTERNAL (new_decl) = 0;
4848 DECL_ARTIFICIAL (new_decl) = 1;
4849 TREE_ADDRESSABLE (new_decl) = 1;
4850 TREE_USED (new_decl) = 1;
4851 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4852
4853 /* Perform the same remapping to the comdat group. */
4854 if (DECL_ONE_ONLY (new_decl))
4855 varpool_node::get (new_decl)->set_comdat_group
4856 (tm_mangle (decl_comdat_group_id (old_decl)));
4857
4858 new_node = cgraph_node::create_same_body_alias (new_decl, info->new_decl);
4859 new_node->tm_clone = true;
4860 new_node->externally_visible = info->old_node->externally_visible;
4861 new_node->no_reorder = info->old_node->no_reorder;
4862 /* ?? Do not traverse aliases here. */
4863 get_cg_data (&node, false)->clone = new_node;
4864
4865 record_tm_clone_pair (old_decl, new_decl);
4866
4867 if (info->old_node->force_output
4868 || info->old_node->ref_list.first_referring ())
4869 ipa_tm_mark_force_output_node (new_node);
4870 if (info->old_node->forced_by_abi)
4871 ipa_tm_mark_forced_by_abi_node (new_node);
4872 return false;
4873 }
4874
4875 /* Create a copy of the function (possibly declaration only) of OLD_NODE,
4876 appropriate for the transactional clone. */
4877
4878 static void
4879 ipa_tm_create_version (struct cgraph_node *old_node)
4880 {
4881 tree new_decl, old_decl, tm_name;
4882 struct cgraph_node *new_node;
4883
4884 old_decl = old_node->decl;
4885 new_decl = copy_node (old_decl);
4886
4887 /* DECL_ASSEMBLER_NAME needs to be set before we call
4888 cgraph_copy_node_for_versioning below, because cgraph_node will
4889 fill the assembler_name_hash. */
4890 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4891 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4892 SET_DECL_RTL (new_decl, NULL);
4893 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4894
4895 /* Perform the same remapping to the comdat group. */
4896 if (DECL_ONE_ONLY (new_decl))
4897 varpool_node::get (new_decl)->set_comdat_group
4898 (tm_mangle (DECL_COMDAT_GROUP (old_decl)));
4899
4900 gcc_assert (!old_node->ipa_transforms_to_apply.exists ());
4901 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
4902 new_node->local.local = false;
4903 new_node->externally_visible = old_node->externally_visible;
4904 new_node->lowered = true;
4905 new_node->tm_clone = 1;
4906 get_cg_data (&old_node, true)->clone = new_node;
4907
4908 if (old_node->get_availability () >= AVAIL_INTERPOSABLE)
4909 {
4910 /* Remap extern inline to static inline. */
4911 /* ??? Is it worth trying to use make_decl_one_only? */
4912 if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
4913 {
4914 DECL_EXTERNAL (new_decl) = 0;
4915 TREE_PUBLIC (new_decl) = 0;
4916 DECL_WEAK (new_decl) = 0;
4917 }
4918
4919 tree_function_versioning (old_decl, new_decl,
4920 NULL, false, NULL,
4921 false, NULL, NULL);
4922 }
4923
4924 record_tm_clone_pair (old_decl, new_decl);
4925
4926 symtab->call_cgraph_insertion_hooks (new_node);
4927 if (old_node->force_output
4928 || old_node->ref_list.first_referring ())
4929 ipa_tm_mark_force_output_node (new_node);
4930 if (old_node->forced_by_abi)
4931 ipa_tm_mark_forced_by_abi_node (new_node);
4932
4933 /* Do the same thing, but for any aliases of the original node. */
4934 {
4935 struct create_version_alias_info data;
4936 data.old_node = old_node;
4937 data.new_decl = new_decl;
4938 old_node->call_for_symbol_thunks_and_aliases (ipa_tm_create_version_alias,
4939 &data, true);
4940 }
4941 }
4942
4943 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
4944
4945 static void
4946 ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
4947 basic_block bb)
4948 {
4949 gimple_stmt_iterator gsi;
4950 gimple g;
4951
4952 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4953
4954 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
4955 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
4956
4957 split_block_after_labels (bb);
4958 gsi = gsi_after_labels (bb);
4959 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
4960
4961 node->create_edge (cgraph_node::get_create
4962 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
4963 g, 0,
4964 compute_call_stmt_bb_frequency (node->decl,
4965 gimple_bb (g)));
4966 }
4967
4968 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */
4969
4970 static bool
4971 ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
4972 struct tm_region *region,
4973 gimple_stmt_iterator *gsi, gimple stmt)
4974 {
4975 tree gettm_fn, ret, old_fn, callfn;
4976 gimple g, g2;
4977 bool safe;
4978
4979 old_fn = gimple_call_fn (stmt);
4980
4981 if (TREE_CODE (old_fn) == ADDR_EXPR)
4982 {
4983 tree fndecl = TREE_OPERAND (old_fn, 0);
4984 tree clone = get_tm_clone_pair (fndecl);
4985
4986 /* By transforming the call into a TM_GETTMCLONE, we are
4987 technically taking the address of the original function and
4988 its clone. Explain this so inlining will know this function
4989 is needed. */
4990 cgraph_node::get (fndecl)->mark_address_taken () ;
4991 if (clone)
4992 cgraph_node::get (clone)->mark_address_taken ();
4993 }
4994
4995 safe = is_tm_safe (TREE_TYPE (old_fn));
4996 gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
4997 : BUILT_IN_TM_GETTMCLONE_IRR);
4998 ret = create_tmp_var (ptr_type_node, NULL);
4999
5000 if (!safe)
5001 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
5002
5003 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
5004 if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
5005 old_fn = OBJ_TYPE_REF_EXPR (old_fn);
5006
5007 g = gimple_build_call (gettm_fn, 1, old_fn);
5008 ret = make_ssa_name (ret, g);
5009 gimple_call_set_lhs (g, ret);
5010
5011 gsi_insert_before (gsi, g, GSI_SAME_STMT);
5012
5013 node->create_edge (cgraph_node::get_create (gettm_fn), g, 0,
5014 compute_call_stmt_bb_frequency (node->decl,
5015 gimple_bb (g)));
5016
5017 /* Cast return value from tm_gettmclone* into appropriate function
5018 pointer. */
5019 callfn = create_tmp_var (TREE_TYPE (old_fn), NULL);
5020 g2 = gimple_build_assign (callfn,
5021 fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
5022 callfn = make_ssa_name (callfn, g2);
5023 gimple_assign_set_lhs (g2, callfn);
5024 gsi_insert_before (gsi, g2, GSI_SAME_STMT);
5025
5026 /* ??? This is a hack to preserve the NOTHROW bit on the call,
5027 which we would have derived from the decl. Failure to save
5028 this bit means we might have to split the basic block. */
5029 if (gimple_call_nothrow_p (stmt))
5030 gimple_call_set_nothrow (stmt, true);
5031
5032 gimple_call_set_fn (stmt, callfn);
5033
5034 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
5035 for a call statement. Fix it. */
5036 {
5037 tree lhs = gimple_call_lhs (stmt);
5038 tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
5039 if (lhs
5040 && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
5041 {
5042 tree temp;
5043
5044 temp = create_tmp_reg (rettype, 0);
5045 gimple_call_set_lhs (stmt, temp);
5046
5047 g2 = gimple_build_assign (lhs,
5048 fold_build1 (VIEW_CONVERT_EXPR,
5049 TREE_TYPE (lhs), temp));
5050 gsi_insert_after (gsi, g2, GSI_SAME_STMT);
5051 }
5052 }
5053
5054 update_stmt (stmt);
5055 cgraph_edge *e = cgraph_node::get (current_function_decl)->get_edge (stmt);
5056 if (e && e->indirect_info)
5057 e->indirect_info->polymorphic = false;
5058
5059 return true;
5060 }
5061
5062 /* Helper function for ipa_tm_transform_calls*. Given a call
5063 statement in GSI which resides inside transaction REGION, redirect
5064 the call to either its wrapper function, or its clone. */
5065
5066 static void
5067 ipa_tm_transform_calls_redirect (struct cgraph_node *node,
5068 struct tm_region *region,
5069 gimple_stmt_iterator *gsi,
5070 bool *need_ssa_rename_p)
5071 {
5072 gimple stmt = gsi_stmt (*gsi);
5073 struct cgraph_node *new_node;
5074 struct cgraph_edge *e = node->get_edge (stmt);
5075 tree fndecl = gimple_call_fndecl (stmt);
5076
5077 /* For indirect calls, pass the address through the runtime. */
5078 if (fndecl == NULL)
5079 {
5080 *need_ssa_rename_p |=
5081 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5082 return;
5083 }
5084
5085 /* Handle some TM builtins. Ordinarily these aren't actually generated
5086 at this point, but handling these functions when written in by the
5087 user makes it easier to build unit tests. */
5088 if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
5089 return;
5090
5091 /* Fixup recursive calls inside clones. */
5092 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
5093 for recursion but not update the call statements themselves? */
5094 if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
5095 {
5096 gimple_call_set_fndecl (stmt, current_function_decl);
5097 return;
5098 }
5099
5100 /* If there is a replacement, use it. */
5101 fndecl = find_tm_replacement_function (fndecl);
5102 if (fndecl)
5103 {
5104 new_node = cgraph_node::get_create (fndecl);
5105
5106 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
5107
5108 We can't do this earlier in record_tm_replacement because
5109 cgraph_remove_unreachable_nodes is called before we inject
5110 references to the node. Further, we can't do this in some
5111 nice central place in ipa_tm_execute because we don't have
5112 the exact list of wrapper functions that would be used.
5113 Marking more wrappers than necessary results in the creation
5114 of unnecessary cgraph_nodes, which can cause some of the
5115 other IPA passes to crash.
5116
5117 We do need to mark these nodes so that we get the proper
5118 result in expand_call_tm. */
5119 /* ??? This seems broken. How is it that we're marking the
5120 CALLEE as may_enter_irr? Surely we should be marking the
5121 CALLER. Also note that find_tm_replacement_function also
5122 contains mappings into the TM runtime, e.g. memcpy. These
5123 we know won't go irrevocable. */
5124 new_node->local.tm_may_enter_irr = 1;
5125 }
5126 else
5127 {
5128 struct tm_ipa_cg_data *d;
5129 struct cgraph_node *tnode = e->callee;
5130
5131 d = get_cg_data (&tnode, true);
5132 new_node = d->clone;
5133
5134 /* As we've already skipped pure calls and appropriate builtins,
5135 and we've already marked irrevocable blocks, if we can't come
5136 up with a static replacement, then ask the runtime. */
5137 if (new_node == NULL)
5138 {
5139 *need_ssa_rename_p |=
5140 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5141 return;
5142 }
5143
5144 fndecl = new_node->decl;
5145 }
5146
5147 e->redirect_callee (new_node);
5148 gimple_call_set_fndecl (stmt, fndecl);
5149 }
5150
5151 /* Helper function for ipa_tm_transform_calls. For a given BB,
5152 install calls to tm_irrevocable when IRR_BLOCKS are reached,
5153 redirect other calls to the generated transactional clone. */
5154
5155 static bool
5156 ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
5157 basic_block bb, bitmap irr_blocks)
5158 {
5159 gimple_stmt_iterator gsi;
5160 bool need_ssa_rename = false;
5161
5162 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5163 {
5164 ipa_tm_insert_irr_call (node, region, bb);
5165 return true;
5166 }
5167
5168 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5169 {
5170 gimple stmt = gsi_stmt (gsi);
5171
5172 if (!is_gimple_call (stmt))
5173 continue;
5174 if (is_tm_pure_call (stmt))
5175 continue;
5176
5177 /* Redirect edges to the appropriate replacement or clone. */
5178 ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
5179 }
5180
5181 return need_ssa_rename;
5182 }
5183
5184 /* Walk the CFG for REGION, beginning at BB. Install calls to
5185 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
5186 the generated transactional clone. */
5187
5188 static bool
5189 ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
5190 basic_block bb, bitmap irr_blocks)
5191 {
5192 bool need_ssa_rename = false;
5193 edge e;
5194 edge_iterator ei;
5195 auto_vec<basic_block> queue;
5196 bitmap visited_blocks = BITMAP_ALLOC (NULL);
5197
5198 queue.safe_push (bb);
5199 do
5200 {
5201 bb = queue.pop ();
5202
5203 need_ssa_rename |=
5204 ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
5205
5206 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5207 continue;
5208
5209 if (region && bitmap_bit_p (region->exit_blocks, bb->index))
5210 continue;
5211
5212 FOR_EACH_EDGE (e, ei, bb->succs)
5213 if (!bitmap_bit_p (visited_blocks, e->dest->index))
5214 {
5215 bitmap_set_bit (visited_blocks, e->dest->index);
5216 queue.safe_push (e->dest);
5217 }
5218 }
5219 while (!queue.is_empty ());
5220
5221 BITMAP_FREE (visited_blocks);
5222
5223 return need_ssa_rename;
5224 }
5225
5226 /* Transform the calls within the TM regions within NODE. */
5227
5228 static void
5229 ipa_tm_transform_transaction (struct cgraph_node *node)
5230 {
5231 struct tm_ipa_cg_data *d;
5232 struct tm_region *region;
5233 bool need_ssa_rename = false;
5234
5235 d = get_cg_data (&node, true);
5236
5237 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
5238 calculate_dominance_info (CDI_DOMINATORS);
5239
5240 for (region = d->all_tm_regions; region; region = region->next)
5241 {
5242 /* If we're sure to go irrevocable, don't transform anything. */
5243 if (d->irrevocable_blocks_normal
5244 && bitmap_bit_p (d->irrevocable_blocks_normal,
5245 region->entry_block->index))
5246 {
5247 transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE
5248 | GTMA_MAY_ENTER_IRREVOCABLE
5249 | GTMA_HAS_NO_INSTRUMENTATION);
5250 continue;
5251 }
5252
5253 need_ssa_rename |=
5254 ipa_tm_transform_calls (node, region, region->entry_block,
5255 d->irrevocable_blocks_normal);
5256 }
5257
5258 if (need_ssa_rename)
5259 update_ssa (TODO_update_ssa_only_virtuals);
5260
5261 pop_cfun ();
5262 }
5263
5264 /* Transform the calls within the transactional clone of NODE. */
5265
5266 static void
5267 ipa_tm_transform_clone (struct cgraph_node *node)
5268 {
5269 struct tm_ipa_cg_data *d;
5270 bool need_ssa_rename;
5271
5272 d = get_cg_data (&node, true);
5273
5274 /* If this function makes no calls and has no irrevocable blocks,
5275 then there's nothing to do. */
5276 /* ??? Remove non-aborting top-level transactions. */
5277 if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone)
5278 return;
5279
5280 push_cfun (DECL_STRUCT_FUNCTION (d->clone->decl));
5281 calculate_dominance_info (CDI_DOMINATORS);
5282
5283 need_ssa_rename =
5284 ipa_tm_transform_calls (d->clone, NULL,
5285 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
5286 d->irrevocable_blocks_clone);
5287
5288 if (need_ssa_rename)
5289 update_ssa (TODO_update_ssa_only_virtuals);
5290
5291 pop_cfun ();
5292 }
5293
5294 /* Main entry point for the transactional memory IPA pass. */
5295
5296 static unsigned int
5297 ipa_tm_execute (void)
5298 {
5299 cgraph_node_queue tm_callees = cgraph_node_queue ();
5300 /* List of functions that will go irrevocable. */
5301 cgraph_node_queue irr_worklist = cgraph_node_queue ();
5302
5303 struct cgraph_node *node;
5304 struct tm_ipa_cg_data *d;
5305 enum availability a;
5306 unsigned int i;
5307
5308 #ifdef ENABLE_CHECKING
5309 cgraph_node::verify_cgraph_nodes ();
5310 #endif
5311
5312 bitmap_obstack_initialize (&tm_obstack);
5313 initialize_original_copy_tables ();
5314
5315 /* For all local functions marked tm_callable, queue them. */
5316 FOR_EACH_DEFINED_FUNCTION (node)
5317 if (is_tm_callable (node->decl)
5318 && node->get_availability () >= AVAIL_INTERPOSABLE)
5319 {
5320 d = get_cg_data (&node, true);
5321 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5322 }
5323
5324 /* For all local reachable functions... */
5325 FOR_EACH_DEFINED_FUNCTION (node)
5326 if (node->lowered
5327 && node->get_availability () >= AVAIL_INTERPOSABLE)
5328 {
5329 /* ... marked tm_pure, record that fact for the runtime by
5330 indicating that the pure function is its own tm_callable.
5331 No need to do this if the function's address can't be taken. */
5332 if (is_tm_pure (node->decl))
5333 {
5334 if (!node->local.local)
5335 record_tm_clone_pair (node->decl, node->decl);
5336 continue;
5337 }
5338
5339 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
5340 calculate_dominance_info (CDI_DOMINATORS);
5341
5342 tm_region_init (NULL);
5343 if (all_tm_regions)
5344 {
5345 d = get_cg_data (&node, true);
5346
5347 /* Scan for calls that are in each transaction, and
5348 generate the uninstrumented code path. */
5349 ipa_tm_scan_calls_transaction (d, &tm_callees);
5350
5351 /* Put it in the worklist so we can scan the function
5352 later (ipa_tm_scan_irr_function) and mark the
5353 irrevocable blocks. */
5354 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5355 d->want_irr_scan_normal = true;
5356 }
5357
5358 pop_cfun ();
5359 }
5360
5361 /* For every local function on the callee list, scan as if we will be
5362 creating a transactional clone, queueing all new functions we find
5363 along the way. */
5364 for (i = 0; i < tm_callees.length (); ++i)
5365 {
5366 node = tm_callees[i];
5367 a = node->get_availability ();
5368 d = get_cg_data (&node, true);
5369
5370 /* Put it in the worklist so we can scan the function later
5371 (ipa_tm_scan_irr_function) and mark the irrevocable
5372 blocks. */
5373 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5374
5375 /* Some callees cannot be arbitrarily cloned. These will always be
5376 irrevocable. Mark these now, so that we need not scan them. */
5377 if (is_tm_irrevocable (node->decl))
5378 ipa_tm_note_irrevocable (node, &irr_worklist);
5379 else if (a <= AVAIL_NOT_AVAILABLE
5380 && !is_tm_safe_or_pure (node->decl))
5381 ipa_tm_note_irrevocable (node, &irr_worklist);
5382 else if (a >= AVAIL_INTERPOSABLE)
5383 {
5384 if (!tree_versionable_function_p (node->decl))
5385 ipa_tm_note_irrevocable (node, &irr_worklist);
5386 else if (!d->is_irrevocable)
5387 {
5388 /* If this is an alias, make sure its base is queued as well.
5389 we need not scan the callees now, as the base will do. */
5390 if (node->alias)
5391 {
5392 node = cgraph_node::get (node->thunk.alias);
5393 d = get_cg_data (&node, true);
5394 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5395 continue;
5396 }
5397
5398 /* Add all nodes called by this function into
5399 tm_callees as well. */
5400 ipa_tm_scan_calls_clone (node, &tm_callees);
5401 }
5402 }
5403 }
5404
5405 /* Iterate scans until no more work to be done. Prefer not to use
5406 vec::pop because the worklist tends to follow a breadth-first
5407 search of the callgraph, which should allow convergance with a
5408 minimum number of scans. But we also don't want the worklist
5409 array to grow without bound, so we shift the array up periodically. */
5410 for (i = 0; i < irr_worklist.length (); ++i)
5411 {
5412 if (i > 256 && i == irr_worklist.length () / 8)
5413 {
5414 irr_worklist.block_remove (0, i);
5415 i = 0;
5416 }
5417
5418 node = irr_worklist[i];
5419 d = get_cg_data (&node, true);
5420 d->in_worklist = false;
5421
5422 if (d->want_irr_scan_normal)
5423 {
5424 d->want_irr_scan_normal = false;
5425 ipa_tm_scan_irr_function (node, false);
5426 }
5427 if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
5428 ipa_tm_note_irrevocable (node, &irr_worklist);
5429 }
5430
5431 /* For every function on the callee list, collect the tm_may_enter_irr
5432 bit on the node. */
5433 irr_worklist.truncate (0);
5434 for (i = 0; i < tm_callees.length (); ++i)
5435 {
5436 node = tm_callees[i];
5437 if (ipa_tm_mayenterirr_function (node))
5438 {
5439 d = get_cg_data (&node, true);
5440 gcc_assert (d->in_worklist == false);
5441 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5442 }
5443 }
5444
5445 /* Propagate the tm_may_enter_irr bit to callers until stable. */
5446 for (i = 0; i < irr_worklist.length (); ++i)
5447 {
5448 struct cgraph_node *caller;
5449 struct cgraph_edge *e;
5450 struct ipa_ref *ref;
5451
5452 if (i > 256 && i == irr_worklist.length () / 8)
5453 {
5454 irr_worklist.block_remove (0, i);
5455 i = 0;
5456 }
5457
5458 node = irr_worklist[i];
5459 d = get_cg_data (&node, true);
5460 d->in_worklist = false;
5461 node->local.tm_may_enter_irr = true;
5462
5463 /* Propagate back to normal callers. */
5464 for (e = node->callers; e ; e = e->next_caller)
5465 {
5466 caller = e->caller;
5467 if (!is_tm_safe_or_pure (caller->decl)
5468 && !caller->local.tm_may_enter_irr)
5469 {
5470 d = get_cg_data (&caller, true);
5471 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5472 }
5473 }
5474
5475 /* Propagate back to referring aliases as well. */
5476 FOR_EACH_ALIAS (node, ref)
5477 {
5478 caller = dyn_cast<cgraph_node *> (ref->referring);
5479 if (!caller->local.tm_may_enter_irr)
5480 {
5481 /* ?? Do not traverse aliases here. */
5482 d = get_cg_data (&caller, false);
5483 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5484 }
5485 }
5486 }
5487
5488 /* Now validate all tm_safe functions, and all atomic regions in
5489 other functions. */
5490 FOR_EACH_DEFINED_FUNCTION (node)
5491 if (node->lowered
5492 && node->get_availability () >= AVAIL_INTERPOSABLE)
5493 {
5494 d = get_cg_data (&node, true);
5495 if (is_tm_safe (node->decl))
5496 ipa_tm_diagnose_tm_safe (node);
5497 else if (d->all_tm_regions)
5498 ipa_tm_diagnose_transaction (node, d->all_tm_regions);
5499 }
5500
5501 /* Create clones. Do those that are not irrevocable and have a
5502 positive call count. Do those publicly visible functions that
5503 the user directed us to clone. */
5504 for (i = 0; i < tm_callees.length (); ++i)
5505 {
5506 bool doit = false;
5507
5508 node = tm_callees[i];
5509 if (node->cpp_implicit_alias)
5510 continue;
5511
5512 a = node->get_availability ();
5513 d = get_cg_data (&node, true);
5514
5515 if (a <= AVAIL_NOT_AVAILABLE)
5516 doit = is_tm_callable (node->decl);
5517 else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl))
5518 doit = true;
5519 else if (!d->is_irrevocable
5520 && d->tm_callers_normal + d->tm_callers_clone > 0)
5521 doit = true;
5522
5523 if (doit)
5524 ipa_tm_create_version (node);
5525 }
5526
5527 /* Redirect calls to the new clones, and insert irrevocable marks. */
5528 for (i = 0; i < tm_callees.length (); ++i)
5529 {
5530 node = tm_callees[i];
5531 if (node->analyzed)
5532 {
5533 d = get_cg_data (&node, true);
5534 if (d->clone)
5535 ipa_tm_transform_clone (node);
5536 }
5537 }
5538 FOR_EACH_DEFINED_FUNCTION (node)
5539 if (node->lowered
5540 && node->get_availability () >= AVAIL_INTERPOSABLE)
5541 {
5542 d = get_cg_data (&node, true);
5543 if (d->all_tm_regions)
5544 ipa_tm_transform_transaction (node);
5545 }
5546
5547 /* Free and clear all data structures. */
5548 tm_callees.release ();
5549 irr_worklist.release ();
5550 bitmap_obstack_release (&tm_obstack);
5551 free_original_copy_tables ();
5552
5553 FOR_EACH_FUNCTION (node)
5554 node->aux = NULL;
5555
5556 #ifdef ENABLE_CHECKING
5557 cgraph_node::verify_cgraph_nodes ();
5558 #endif
5559
5560 return 0;
5561 }
5562
5563 namespace {
5564
5565 const pass_data pass_data_ipa_tm =
5566 {
5567 SIMPLE_IPA_PASS, /* type */
5568 "tmipa", /* name */
5569 OPTGROUP_NONE, /* optinfo_flags */
5570 TV_TRANS_MEM, /* tv_id */
5571 ( PROP_ssa | PROP_cfg ), /* properties_required */
5572 0, /* properties_provided */
5573 0, /* properties_destroyed */
5574 0, /* todo_flags_start */
5575 0, /* todo_flags_finish */
5576 };
5577
5578 class pass_ipa_tm : public simple_ipa_opt_pass
5579 {
5580 public:
5581 pass_ipa_tm (gcc::context *ctxt)
5582 : simple_ipa_opt_pass (pass_data_ipa_tm, ctxt)
5583 {}
5584
5585 /* opt_pass methods: */
5586 virtual bool gate (function *) { return flag_tm; }
5587 virtual unsigned int execute (function *) { return ipa_tm_execute (); }
5588
5589 }; // class pass_ipa_tm
5590
5591 } // anon namespace
5592
5593 simple_ipa_opt_pass *
5594 make_pass_ipa_tm (gcc::context *ctxt)
5595 {
5596 return new pass_ipa_tm (ctxt);
5597 }
5598
5599 #include "gt-trans-mem.h"