]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/trans-mem.c
remove many typedefs
[thirdparty/gcc.git] / gcc / trans-mem.c
1 /* Passes for transactional memory support.
2 Copyright (C) 2008-2015 Free Software Foundation, Inc.
3 Contributed by Richard Henderson <rth@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "alias.h"
26 #include "backend.h"
27 #include "cfghooks.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "rtl.h"
31 #include "ssa.h"
32 #include "options.h"
33 #include "fold-const.h"
34 #include "internal-fn.h"
35 #include "tree-eh.h"
36 #include "calls.h"
37 #include "emit-rtl.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "gimple-walk.h"
42 #include "cgraph.h"
43 #include "tree-cfg.h"
44 #include "tree-into-ssa.h"
45 #include "tree-pass.h"
46 #include "tree-inline.h"
47 #include "diagnostic-core.h"
48 #include "demangle.h"
49 #include "output.h"
50 #include "trans-mem.h"
51 #include "params.h"
52 #include "target.h"
53 #include "langhooks.h"
54 #include "gimple-pretty-print.h"
55 #include "cfgloop.h"
56 #include "tree-ssa-address.h"
57
58
59 #define A_RUNINSTRUMENTEDCODE 0x0001
60 #define A_RUNUNINSTRUMENTEDCODE 0x0002
61 #define A_SAVELIVEVARIABLES 0x0004
62 #define A_RESTORELIVEVARIABLES 0x0008
63 #define A_ABORTTRANSACTION 0x0010
64
65 #define AR_USERABORT 0x0001
66 #define AR_USERRETRY 0x0002
67 #define AR_TMCONFLICT 0x0004
68 #define AR_EXCEPTIONBLOCKABORT 0x0008
69 #define AR_OUTERABORT 0x0010
70
71 #define MODE_SERIALIRREVOCABLE 0x0000
72
73
74 /* The representation of a transaction changes several times during the
75 lowering process. In the beginning, in the front-end we have the
76 GENERIC tree TRANSACTION_EXPR. For example,
77
78 __transaction {
79 local++;
80 if (++global == 10)
81 __tm_abort;
82 }
83
84 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
85 trivially replaced with a GIMPLE_TRANSACTION node.
86
87 During pass_lower_tm, we examine the body of transactions looking
88 for aborts. Transactions that do not contain an abort may be
89 merged into an outer transaction. We also add a TRY-FINALLY node
90 to arrange for the transaction to be committed on any exit.
91
92 [??? Think about how this arrangement affects throw-with-commit
93 and throw-with-abort operations. In this case we want the TRY to
94 handle gotos, but not to catch any exceptions because the transaction
95 will already be closed.]
96
97 GIMPLE_TRANSACTION [label=NULL] {
98 try {
99 local = local + 1;
100 t0 = global;
101 t1 = t0 + 1;
102 global = t1;
103 if (t1 == 10)
104 __builtin___tm_abort ();
105 } finally {
106 __builtin___tm_commit ();
107 }
108 }
109
110 During pass_lower_eh, we create EH regions for the transactions,
111 intermixed with the regular EH stuff. This gives us a nice persistent
112 mapping (all the way through rtl) from transactional memory operation
113 back to the transaction, which allows us to get the abnormal edges
114 correct to model transaction aborts and restarts:
115
116 GIMPLE_TRANSACTION [label=over]
117 local = local + 1;
118 t0 = global;
119 t1 = t0 + 1;
120 global = t1;
121 if (t1 == 10)
122 __builtin___tm_abort ();
123 __builtin___tm_commit ();
124 over:
125
126 This is the end of all_lowering_passes, and so is what is present
127 during the IPA passes, and through all of the optimization passes.
128
129 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
130 functions and mark functions for cloning.
131
132 At the end of gimple optimization, before exiting SSA form,
133 pass_tm_edges replaces statements that perform transactional
134 memory operations with the appropriate TM builtins, and swap
135 out function calls with their transactional clones. At this
136 point we introduce the abnormal transaction restart edges and
137 complete lowering of the GIMPLE_TRANSACTION node.
138
139 x = __builtin___tm_start (MAY_ABORT);
140 eh_label:
141 if (x & abort_transaction)
142 goto over;
143 local = local + 1;
144 t0 = __builtin___tm_load (global);
145 t1 = t0 + 1;
146 __builtin___tm_store (&global, t1);
147 if (t1 == 10)
148 __builtin___tm_abort ();
149 __builtin___tm_commit ();
150 over:
151 */
152
153 static void *expand_regions (struct tm_region *,
154 void *(*callback)(struct tm_region *, void *),
155 void *, bool);
156
157 \f
158 /* Return the attributes we want to examine for X, or NULL if it's not
159 something we examine. We look at function types, but allow pointers
160 to function types and function decls and peek through. */
161
162 static tree
163 get_attrs_for (const_tree x)
164 {
165 if (x == NULL_TREE)
166 return NULL_TREE;
167
168 switch (TREE_CODE (x))
169 {
170 case FUNCTION_DECL:
171 return TYPE_ATTRIBUTES (TREE_TYPE (x));
172 break;
173
174 default:
175 if (TYPE_P (x))
176 return NULL_TREE;
177 x = TREE_TYPE (x);
178 if (TREE_CODE (x) != POINTER_TYPE)
179 return NULL_TREE;
180 /* FALLTHRU */
181
182 case POINTER_TYPE:
183 x = TREE_TYPE (x);
184 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
185 return NULL_TREE;
186 /* FALLTHRU */
187
188 case FUNCTION_TYPE:
189 case METHOD_TYPE:
190 return TYPE_ATTRIBUTES (x);
191 }
192 }
193
194 /* Return true if X has been marked TM_PURE. */
195
196 bool
197 is_tm_pure (const_tree x)
198 {
199 unsigned flags;
200
201 switch (TREE_CODE (x))
202 {
203 case FUNCTION_DECL:
204 case FUNCTION_TYPE:
205 case METHOD_TYPE:
206 break;
207
208 default:
209 if (TYPE_P (x))
210 return false;
211 x = TREE_TYPE (x);
212 if (TREE_CODE (x) != POINTER_TYPE)
213 return false;
214 /* FALLTHRU */
215
216 case POINTER_TYPE:
217 x = TREE_TYPE (x);
218 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
219 return false;
220 break;
221 }
222
223 flags = flags_from_decl_or_type (x);
224 return (flags & ECF_TM_PURE) != 0;
225 }
226
227 /* Return true if X has been marked TM_IRREVOCABLE. */
228
229 static bool
230 is_tm_irrevocable (tree x)
231 {
232 tree attrs = get_attrs_for (x);
233
234 if (attrs && lookup_attribute ("transaction_unsafe", attrs))
235 return true;
236
237 /* A call to the irrevocable builtin is by definition,
238 irrevocable. */
239 if (TREE_CODE (x) == ADDR_EXPR)
240 x = TREE_OPERAND (x, 0);
241 if (TREE_CODE (x) == FUNCTION_DECL
242 && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
243 && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
244 return true;
245
246 return false;
247 }
248
249 /* Return true if X has been marked TM_SAFE. */
250
251 bool
252 is_tm_safe (const_tree x)
253 {
254 if (flag_tm)
255 {
256 tree attrs = get_attrs_for (x);
257 if (attrs)
258 {
259 if (lookup_attribute ("transaction_safe", attrs))
260 return true;
261 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
262 return true;
263 }
264 }
265 return false;
266 }
267
268 /* Return true if CALL is const, or tm_pure. */
269
270 static bool
271 is_tm_pure_call (gimple *call)
272 {
273 tree fn = gimple_call_fn (call);
274
275 if (TREE_CODE (fn) == ADDR_EXPR)
276 {
277 fn = TREE_OPERAND (fn, 0);
278 gcc_assert (TREE_CODE (fn) == FUNCTION_DECL);
279 }
280 else
281 fn = TREE_TYPE (fn);
282
283 return is_tm_pure (fn);
284 }
285
286 /* Return true if X has been marked TM_CALLABLE. */
287
288 static bool
289 is_tm_callable (tree x)
290 {
291 tree attrs = get_attrs_for (x);
292 if (attrs)
293 {
294 if (lookup_attribute ("transaction_callable", attrs))
295 return true;
296 if (lookup_attribute ("transaction_safe", attrs))
297 return true;
298 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
299 return true;
300 }
301 return false;
302 }
303
304 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
305
306 bool
307 is_tm_may_cancel_outer (tree x)
308 {
309 tree attrs = get_attrs_for (x);
310 if (attrs)
311 return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
312 return false;
313 }
314
315 /* Return true for built in functions that "end" a transaction. */
316
317 bool
318 is_tm_ending_fndecl (tree fndecl)
319 {
320 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
321 switch (DECL_FUNCTION_CODE (fndecl))
322 {
323 case BUILT_IN_TM_COMMIT:
324 case BUILT_IN_TM_COMMIT_EH:
325 case BUILT_IN_TM_ABORT:
326 case BUILT_IN_TM_IRREVOCABLE:
327 return true;
328 default:
329 break;
330 }
331
332 return false;
333 }
334
335 /* Return true if STMT is a built in function call that "ends" a
336 transaction. */
337
338 bool
339 is_tm_ending (gimple *stmt)
340 {
341 tree fndecl;
342
343 if (gimple_code (stmt) != GIMPLE_CALL)
344 return false;
345
346 fndecl = gimple_call_fndecl (stmt);
347 return (fndecl != NULL_TREE
348 && is_tm_ending_fndecl (fndecl));
349 }
350
351 /* Return true if STMT is a TM load. */
352
353 static bool
354 is_tm_load (gimple *stmt)
355 {
356 tree fndecl;
357
358 if (gimple_code (stmt) != GIMPLE_CALL)
359 return false;
360
361 fndecl = gimple_call_fndecl (stmt);
362 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
363 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
364 }
365
366 /* Same as above, but for simple TM loads, that is, not the
367 after-write, after-read, etc optimized variants. */
368
369 static bool
370 is_tm_simple_load (gimple *stmt)
371 {
372 tree fndecl;
373
374 if (gimple_code (stmt) != GIMPLE_CALL)
375 return false;
376
377 fndecl = gimple_call_fndecl (stmt);
378 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
379 {
380 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
381 return (fcode == BUILT_IN_TM_LOAD_1
382 || fcode == BUILT_IN_TM_LOAD_2
383 || fcode == BUILT_IN_TM_LOAD_4
384 || fcode == BUILT_IN_TM_LOAD_8
385 || fcode == BUILT_IN_TM_LOAD_FLOAT
386 || fcode == BUILT_IN_TM_LOAD_DOUBLE
387 || fcode == BUILT_IN_TM_LOAD_LDOUBLE
388 || fcode == BUILT_IN_TM_LOAD_M64
389 || fcode == BUILT_IN_TM_LOAD_M128
390 || fcode == BUILT_IN_TM_LOAD_M256);
391 }
392 return false;
393 }
394
395 /* Return true if STMT is a TM store. */
396
397 static bool
398 is_tm_store (gimple *stmt)
399 {
400 tree fndecl;
401
402 if (gimple_code (stmt) != GIMPLE_CALL)
403 return false;
404
405 fndecl = gimple_call_fndecl (stmt);
406 return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
407 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
408 }
409
410 /* Same as above, but for simple TM stores, that is, not the
411 after-write, after-read, etc optimized variants. */
412
413 static bool
414 is_tm_simple_store (gimple *stmt)
415 {
416 tree fndecl;
417
418 if (gimple_code (stmt) != GIMPLE_CALL)
419 return false;
420
421 fndecl = gimple_call_fndecl (stmt);
422 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
423 {
424 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
425 return (fcode == BUILT_IN_TM_STORE_1
426 || fcode == BUILT_IN_TM_STORE_2
427 || fcode == BUILT_IN_TM_STORE_4
428 || fcode == BUILT_IN_TM_STORE_8
429 || fcode == BUILT_IN_TM_STORE_FLOAT
430 || fcode == BUILT_IN_TM_STORE_DOUBLE
431 || fcode == BUILT_IN_TM_STORE_LDOUBLE
432 || fcode == BUILT_IN_TM_STORE_M64
433 || fcode == BUILT_IN_TM_STORE_M128
434 || fcode == BUILT_IN_TM_STORE_M256);
435 }
436 return false;
437 }
438
439 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */
440
441 static bool
442 is_tm_abort (tree fndecl)
443 {
444 return (fndecl
445 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
446 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
447 }
448
449 /* Build a GENERIC tree for a user abort. This is called by front ends
450 while transforming the __tm_abort statement. */
451
452 tree
453 build_tm_abort_call (location_t loc, bool is_outer)
454 {
455 return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
456 build_int_cst (integer_type_node,
457 AR_USERABORT
458 | (is_outer ? AR_OUTERABORT : 0)));
459 }
460 \f
461 /* Map for aribtrary function replacement under TM, as created
462 by the tm_wrap attribute. */
463
464 struct tm_wrapper_hasher : ggc_cache_ptr_hash<tree_map>
465 {
466 static inline hashval_t hash (tree_map *m) { return m->hash; }
467 static inline bool
468 equal (tree_map *a, tree_map *b)
469 {
470 return a->base.from == b->base.from;
471 }
472
473 static int
474 keep_cache_entry (tree_map *&m)
475 {
476 return ggc_marked_p (m->base.from);
477 }
478 };
479
480 static GTY((cache)) hash_table<tm_wrapper_hasher> *tm_wrap_map;
481
482 void
483 record_tm_replacement (tree from, tree to)
484 {
485 struct tree_map **slot, *h;
486
487 /* Do not inline wrapper functions that will get replaced in the TM
488 pass.
489
490 Suppose you have foo() that will get replaced into tmfoo(). Make
491 sure the inliner doesn't try to outsmart us and inline foo()
492 before we get a chance to do the TM replacement. */
493 DECL_UNINLINABLE (from) = 1;
494
495 if (tm_wrap_map == NULL)
496 tm_wrap_map = hash_table<tm_wrapper_hasher>::create_ggc (32);
497
498 h = ggc_alloc<tree_map> ();
499 h->hash = htab_hash_pointer (from);
500 h->base.from = from;
501 h->to = to;
502
503 slot = tm_wrap_map->find_slot_with_hash (h, h->hash, INSERT);
504 *slot = h;
505 }
506
507 /* Return a TM-aware replacement function for DECL. */
508
509 static tree
510 find_tm_replacement_function (tree fndecl)
511 {
512 if (tm_wrap_map)
513 {
514 struct tree_map *h, in;
515
516 in.base.from = fndecl;
517 in.hash = htab_hash_pointer (fndecl);
518 h = tm_wrap_map->find_with_hash (&in, in.hash);
519 if (h)
520 return h->to;
521 }
522
523 /* ??? We may well want TM versions of most of the common <string.h>
524 functions. For now, we've already these two defined. */
525 /* Adjust expand_call_tm() attributes as necessary for the cases
526 handled here: */
527 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
528 switch (DECL_FUNCTION_CODE (fndecl))
529 {
530 case BUILT_IN_MEMCPY:
531 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
532 case BUILT_IN_MEMMOVE:
533 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
534 case BUILT_IN_MEMSET:
535 return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
536 default:
537 return NULL;
538 }
539
540 return NULL;
541 }
542
543 /* When appropriate, record TM replacement for memory allocation functions.
544
545 FROM is the FNDECL to wrap. */
546 void
547 tm_malloc_replacement (tree from)
548 {
549 const char *str;
550 tree to;
551
552 if (TREE_CODE (from) != FUNCTION_DECL)
553 return;
554
555 /* If we have a previous replacement, the user must be explicitly
556 wrapping malloc/calloc/free. They better know what they're
557 doing... */
558 if (find_tm_replacement_function (from))
559 return;
560
561 str = IDENTIFIER_POINTER (DECL_NAME (from));
562
563 if (!strcmp (str, "malloc"))
564 to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
565 else if (!strcmp (str, "calloc"))
566 to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
567 else if (!strcmp (str, "free"))
568 to = builtin_decl_explicit (BUILT_IN_TM_FREE);
569 else
570 return;
571
572 TREE_NOTHROW (to) = 0;
573
574 record_tm_replacement (from, to);
575 }
576 \f
577 /* Diagnostics for tm_safe functions/regions. Called by the front end
578 once we've lowered the function to high-gimple. */
579
580 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
581 Process exactly one statement. WI->INFO is set to non-null when in
582 the context of a tm_safe function, and null for a __transaction block. */
583
584 #define DIAG_TM_OUTER 1
585 #define DIAG_TM_SAFE 2
586 #define DIAG_TM_RELAXED 4
587
588 struct diagnose_tm
589 {
590 unsigned int summary_flags : 8;
591 unsigned int block_flags : 8;
592 unsigned int func_flags : 8;
593 unsigned int saw_volatile : 1;
594 gimple *stmt;
595 };
596
597 /* Return true if T is a volatile variable of some kind. */
598
599 static bool
600 volatile_var_p (tree t)
601 {
602 return (SSA_VAR_P (t)
603 && TREE_THIS_VOLATILE (TREE_TYPE (t)));
604 }
605
606 /* Tree callback function for diagnose_tm pass. */
607
608 static tree
609 diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
610 void *data)
611 {
612 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
613 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
614
615 if (volatile_var_p (*tp)
616 && d->block_flags & DIAG_TM_SAFE
617 && !d->saw_volatile)
618 {
619 d->saw_volatile = 1;
620 error_at (gimple_location (d->stmt),
621 "invalid volatile use of %qD inside transaction",
622 *tp);
623 }
624
625 return NULL_TREE;
626 }
627
628 static inline bool
629 is_tm_safe_or_pure (const_tree x)
630 {
631 return is_tm_safe (x) || is_tm_pure (x);
632 }
633
634 static tree
635 diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
636 struct walk_stmt_info *wi)
637 {
638 gimple *stmt = gsi_stmt (*gsi);
639 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
640
641 /* Save stmt for use in leaf analysis. */
642 d->stmt = stmt;
643
644 switch (gimple_code (stmt))
645 {
646 case GIMPLE_CALL:
647 {
648 tree fn = gimple_call_fn (stmt);
649
650 if ((d->summary_flags & DIAG_TM_OUTER) == 0
651 && is_tm_may_cancel_outer (fn))
652 error_at (gimple_location (stmt),
653 "%<transaction_may_cancel_outer%> function call not within"
654 " outer transaction or %<transaction_may_cancel_outer%>");
655
656 if (d->summary_flags & DIAG_TM_SAFE)
657 {
658 bool is_safe, direct_call_p;
659 tree replacement;
660
661 if (TREE_CODE (fn) == ADDR_EXPR
662 && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
663 {
664 direct_call_p = true;
665 replacement = TREE_OPERAND (fn, 0);
666 replacement = find_tm_replacement_function (replacement);
667 if (replacement)
668 fn = replacement;
669 }
670 else
671 {
672 direct_call_p = false;
673 replacement = NULL_TREE;
674 }
675
676 if (is_tm_safe_or_pure (fn))
677 is_safe = true;
678 else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
679 {
680 /* A function explicitly marked transaction_callable as
681 opposed to transaction_safe is being defined to be
682 unsafe as part of its ABI, regardless of its contents. */
683 is_safe = false;
684 }
685 else if (direct_call_p)
686 {
687 if (IS_TYPE_OR_DECL_P (fn)
688 && flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
689 is_safe = true;
690 else if (replacement)
691 {
692 /* ??? At present we've been considering replacements
693 merely transaction_callable, and therefore might
694 enter irrevocable. The tm_wrap attribute has not
695 yet made it into the new language spec. */
696 is_safe = false;
697 }
698 else
699 {
700 /* ??? Diagnostics for unmarked direct calls moved into
701 the IPA pass. Section 3.2 of the spec details how
702 functions not marked should be considered "implicitly
703 safe" based on having examined the function body. */
704 is_safe = true;
705 }
706 }
707 else
708 {
709 /* An unmarked indirect call. Consider it unsafe even
710 though optimization may yet figure out how to inline. */
711 is_safe = false;
712 }
713
714 if (!is_safe)
715 {
716 if (TREE_CODE (fn) == ADDR_EXPR)
717 fn = TREE_OPERAND (fn, 0);
718 if (d->block_flags & DIAG_TM_SAFE)
719 {
720 if (direct_call_p)
721 error_at (gimple_location (stmt),
722 "unsafe function call %qD within "
723 "atomic transaction", fn);
724 else
725 {
726 if (!DECL_P (fn) || DECL_NAME (fn))
727 error_at (gimple_location (stmt),
728 "unsafe function call %qE within "
729 "atomic transaction", fn);
730 else
731 error_at (gimple_location (stmt),
732 "unsafe indirect function call within "
733 "atomic transaction");
734 }
735 }
736 else
737 {
738 if (direct_call_p)
739 error_at (gimple_location (stmt),
740 "unsafe function call %qD within "
741 "%<transaction_safe%> function", fn);
742 else
743 {
744 if (!DECL_P (fn) || DECL_NAME (fn))
745 error_at (gimple_location (stmt),
746 "unsafe function call %qE within "
747 "%<transaction_safe%> function", fn);
748 else
749 error_at (gimple_location (stmt),
750 "unsafe indirect function call within "
751 "%<transaction_safe%> function");
752 }
753 }
754 }
755 }
756 }
757 break;
758
759 case GIMPLE_ASM:
760 /* ??? We ought to come up with a way to add attributes to
761 asm statements, and then add "transaction_safe" to it.
762 Either that or get the language spec to resurrect __tm_waiver. */
763 if (d->block_flags & DIAG_TM_SAFE)
764 error_at (gimple_location (stmt),
765 "asm not allowed in atomic transaction");
766 else if (d->func_flags & DIAG_TM_SAFE)
767 error_at (gimple_location (stmt),
768 "asm not allowed in %<transaction_safe%> function");
769 break;
770
771 case GIMPLE_TRANSACTION:
772 {
773 gtransaction *trans_stmt = as_a <gtransaction *> (stmt);
774 unsigned char inner_flags = DIAG_TM_SAFE;
775
776 if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_RELAXED)
777 {
778 if (d->block_flags & DIAG_TM_SAFE)
779 error_at (gimple_location (stmt),
780 "relaxed transaction in atomic transaction");
781 else if (d->func_flags & DIAG_TM_SAFE)
782 error_at (gimple_location (stmt),
783 "relaxed transaction in %<transaction_safe%> function");
784 inner_flags = DIAG_TM_RELAXED;
785 }
786 else if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_OUTER)
787 {
788 if (d->block_flags)
789 error_at (gimple_location (stmt),
790 "outer transaction in transaction");
791 else if (d->func_flags & DIAG_TM_OUTER)
792 error_at (gimple_location (stmt),
793 "outer transaction in "
794 "%<transaction_may_cancel_outer%> function");
795 else if (d->func_flags & DIAG_TM_SAFE)
796 error_at (gimple_location (stmt),
797 "outer transaction in %<transaction_safe%> function");
798 inner_flags |= DIAG_TM_OUTER;
799 }
800
801 *handled_ops_p = true;
802 if (gimple_transaction_body (trans_stmt))
803 {
804 struct walk_stmt_info wi_inner;
805 struct diagnose_tm d_inner;
806
807 memset (&d_inner, 0, sizeof (d_inner));
808 d_inner.func_flags = d->func_flags;
809 d_inner.block_flags = d->block_flags | inner_flags;
810 d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
811
812 memset (&wi_inner, 0, sizeof (wi_inner));
813 wi_inner.info = &d_inner;
814
815 walk_gimple_seq (gimple_transaction_body (trans_stmt),
816 diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
817 }
818 }
819 break;
820
821 default:
822 break;
823 }
824
825 return NULL_TREE;
826 }
827
828 static unsigned int
829 diagnose_tm_blocks (void)
830 {
831 struct walk_stmt_info wi;
832 struct diagnose_tm d;
833
834 memset (&d, 0, sizeof (d));
835 if (is_tm_may_cancel_outer (current_function_decl))
836 d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
837 else if (is_tm_safe (current_function_decl))
838 d.func_flags = DIAG_TM_SAFE;
839 d.summary_flags = d.func_flags;
840
841 memset (&wi, 0, sizeof (wi));
842 wi.info = &d;
843
844 walk_gimple_seq (gimple_body (current_function_decl),
845 diagnose_tm_1, diagnose_tm_1_op, &wi);
846
847 return 0;
848 }
849
850 namespace {
851
852 const pass_data pass_data_diagnose_tm_blocks =
853 {
854 GIMPLE_PASS, /* type */
855 "*diagnose_tm_blocks", /* name */
856 OPTGROUP_NONE, /* optinfo_flags */
857 TV_TRANS_MEM, /* tv_id */
858 PROP_gimple_any, /* properties_required */
859 0, /* properties_provided */
860 0, /* properties_destroyed */
861 0, /* todo_flags_start */
862 0, /* todo_flags_finish */
863 };
864
865 class pass_diagnose_tm_blocks : public gimple_opt_pass
866 {
867 public:
868 pass_diagnose_tm_blocks (gcc::context *ctxt)
869 : gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt)
870 {}
871
872 /* opt_pass methods: */
873 virtual bool gate (function *) { return flag_tm; }
874 virtual unsigned int execute (function *) { return diagnose_tm_blocks (); }
875
876 }; // class pass_diagnose_tm_blocks
877
878 } // anon namespace
879
880 gimple_opt_pass *
881 make_pass_diagnose_tm_blocks (gcc::context *ctxt)
882 {
883 return new pass_diagnose_tm_blocks (ctxt);
884 }
885 \f
886 /* Instead of instrumenting thread private memory, we save the
887 addresses in a log which we later use to save/restore the addresses
888 upon transaction start/restart.
889
890 The log is keyed by address, where each element contains individual
891 statements among different code paths that perform the store.
892
893 This log is later used to generate either plain save/restore of the
894 addresses upon transaction start/restart, or calls to the ITM_L*
895 logging functions.
896
897 So for something like:
898
899 struct large { int x[1000]; };
900 struct large lala = { 0 };
901 __transaction {
902 lala.x[i] = 123;
903 ...
904 }
905
906 We can either save/restore:
907
908 lala = { 0 };
909 trxn = _ITM_startTransaction ();
910 if (trxn & a_saveLiveVariables)
911 tmp_lala1 = lala.x[i];
912 else if (a & a_restoreLiveVariables)
913 lala.x[i] = tmp_lala1;
914
915 or use the logging functions:
916
917 lala = { 0 };
918 trxn = _ITM_startTransaction ();
919 _ITM_LU4 (&lala.x[i]);
920
921 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
922 far up the dominator tree to shadow all of the writes to a given
923 location (thus reducing the total number of logging calls), but not
924 so high as to be called on a path that does not perform a
925 write. */
926
927 /* One individual log entry. We may have multiple statements for the
928 same location if neither dominate each other (on different
929 execution paths). */
930 struct tm_log_entry
931 {
932 /* Address to save. */
933 tree addr;
934 /* Entry block for the transaction this address occurs in. */
935 basic_block entry_block;
936 /* Dominating statements the store occurs in. */
937 vec<gimple *> stmts;
938 /* Initially, while we are building the log, we place a nonzero
939 value here to mean that this address *will* be saved with a
940 save/restore sequence. Later, when generating the save sequence
941 we place the SSA temp generated here. */
942 tree save_var;
943 };
944
945
946 /* Log entry hashtable helpers. */
947
948 struct log_entry_hasher : pointer_hash <tm_log_entry>
949 {
950 static inline hashval_t hash (const tm_log_entry *);
951 static inline bool equal (const tm_log_entry *, const tm_log_entry *);
952 static inline void remove (tm_log_entry *);
953 };
954
955 /* Htab support. Return hash value for a `tm_log_entry'. */
956 inline hashval_t
957 log_entry_hasher::hash (const tm_log_entry *log)
958 {
959 return iterative_hash_expr (log->addr, 0);
960 }
961
962 /* Htab support. Return true if two log entries are the same. */
963 inline bool
964 log_entry_hasher::equal (const tm_log_entry *log1, const tm_log_entry *log2)
965 {
966 /* FIXME:
967
968 rth: I suggest that we get rid of the component refs etc.
969 I.e. resolve the reference to base + offset.
970
971 We may need to actually finish a merge with mainline for this,
972 since we'd like to be presented with Richi's MEM_REF_EXPRs more
973 often than not. But in the meantime your tm_log_entry could save
974 the results of get_inner_reference.
975
976 See: g++.dg/tm/pr46653.C
977 */
978
979 /* Special case plain equality because operand_equal_p() below will
980 return FALSE if the addresses are equal but they have
981 side-effects (e.g. a volatile address). */
982 if (log1->addr == log2->addr)
983 return true;
984
985 return operand_equal_p (log1->addr, log2->addr, 0);
986 }
987
988 /* Htab support. Free one tm_log_entry. */
989 inline void
990 log_entry_hasher::remove (tm_log_entry *lp)
991 {
992 lp->stmts.release ();
993 free (lp);
994 }
995
996
997 /* The actual log. */
998 static hash_table<log_entry_hasher> *tm_log;
999
1000 /* Addresses to log with a save/restore sequence. These should be in
1001 dominator order. */
1002 static vec<tree> tm_log_save_addresses;
1003
1004 enum thread_memory_type
1005 {
1006 mem_non_local = 0,
1007 mem_thread_local,
1008 mem_transaction_local,
1009 mem_max
1010 };
1011
1012 struct tm_new_mem_map
1013 {
1014 /* SSA_NAME being dereferenced. */
1015 tree val;
1016 enum thread_memory_type local_new_memory;
1017 };
1018
1019 /* Hashtable helpers. */
1020
1021 struct tm_mem_map_hasher : free_ptr_hash <tm_new_mem_map>
1022 {
1023 static inline hashval_t hash (const tm_new_mem_map *);
1024 static inline bool equal (const tm_new_mem_map *, const tm_new_mem_map *);
1025 };
1026
1027 inline hashval_t
1028 tm_mem_map_hasher::hash (const tm_new_mem_map *v)
1029 {
1030 return (intptr_t)v->val >> 4;
1031 }
1032
1033 inline bool
1034 tm_mem_map_hasher::equal (const tm_new_mem_map *v, const tm_new_mem_map *c)
1035 {
1036 return v->val == c->val;
1037 }
1038
1039 /* Map for an SSA_NAME originally pointing to a non aliased new piece
1040 of memory (malloc, alloc, etc). */
1041 static hash_table<tm_mem_map_hasher> *tm_new_mem_hash;
1042
1043 /* Initialize logging data structures. */
1044 static void
1045 tm_log_init (void)
1046 {
1047 tm_log = new hash_table<log_entry_hasher> (10);
1048 tm_new_mem_hash = new hash_table<tm_mem_map_hasher> (5);
1049 tm_log_save_addresses.create (5);
1050 }
1051
1052 /* Free logging data structures. */
1053 static void
1054 tm_log_delete (void)
1055 {
1056 delete tm_log;
1057 tm_log = NULL;
1058 delete tm_new_mem_hash;
1059 tm_new_mem_hash = NULL;
1060 tm_log_save_addresses.release ();
1061 }
1062
1063 /* Return true if MEM is a transaction invariant memory for the TM
1064 region starting at REGION_ENTRY_BLOCK. */
1065 static bool
1066 transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
1067 {
1068 if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
1069 && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
1070 {
1071 basic_block def_bb;
1072
1073 def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
1074 return def_bb != region_entry_block
1075 && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
1076 }
1077
1078 mem = strip_invariant_refs (mem);
1079 return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
1080 }
1081
1082 /* Given an address ADDR in STMT, find it in the memory log or add it,
1083 making sure to keep only the addresses highest in the dominator
1084 tree.
1085
1086 ENTRY_BLOCK is the entry_block for the transaction.
1087
1088 If we find the address in the log, make sure it's either the same
1089 address, or an equivalent one that dominates ADDR.
1090
1091 If we find the address, but neither ADDR dominates the found
1092 address, nor the found one dominates ADDR, we're on different
1093 execution paths. Add it.
1094
1095 If known, ENTRY_BLOCK is the entry block for the region, otherwise
1096 NULL. */
1097 static void
1098 tm_log_add (basic_block entry_block, tree addr, gimple *stmt)
1099 {
1100 tm_log_entry **slot;
1101 struct tm_log_entry l, *lp;
1102
1103 l.addr = addr;
1104 slot = tm_log->find_slot (&l, INSERT);
1105 if (!*slot)
1106 {
1107 tree type = TREE_TYPE (addr);
1108
1109 lp = XNEW (struct tm_log_entry);
1110 lp->addr = addr;
1111 *slot = lp;
1112
1113 /* Small invariant addresses can be handled as save/restores. */
1114 if (entry_block
1115 && transaction_invariant_address_p (lp->addr, entry_block)
1116 && TYPE_SIZE_UNIT (type) != NULL
1117 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
1118 && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type))
1119 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
1120 /* We must be able to copy this type normally. I.e., no
1121 special constructors and the like. */
1122 && !TREE_ADDRESSABLE (type))
1123 {
1124 lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
1125 lp->stmts.create (0);
1126 lp->entry_block = entry_block;
1127 /* Save addresses separately in dominator order so we don't
1128 get confused by overlapping addresses in the save/restore
1129 sequence. */
1130 tm_log_save_addresses.safe_push (lp->addr);
1131 }
1132 else
1133 {
1134 /* Use the logging functions. */
1135 lp->stmts.create (5);
1136 lp->stmts.quick_push (stmt);
1137 lp->save_var = NULL;
1138 }
1139 }
1140 else
1141 {
1142 size_t i;
1143 gimple *oldstmt;
1144
1145 lp = *slot;
1146
1147 /* If we're generating a save/restore sequence, we don't care
1148 about statements. */
1149 if (lp->save_var)
1150 return;
1151
1152 for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i)
1153 {
1154 if (stmt == oldstmt)
1155 return;
1156 /* We already have a store to the same address, higher up the
1157 dominator tree. Nothing to do. */
1158 if (dominated_by_p (CDI_DOMINATORS,
1159 gimple_bb (stmt), gimple_bb (oldstmt)))
1160 return;
1161 /* We should be processing blocks in dominator tree order. */
1162 gcc_assert (!dominated_by_p (CDI_DOMINATORS,
1163 gimple_bb (oldstmt), gimple_bb (stmt)));
1164 }
1165 /* Store is on a different code path. */
1166 lp->stmts.safe_push (stmt);
1167 }
1168 }
1169
1170 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1171 result, insert the new statements before GSI. */
1172
1173 static tree
1174 gimplify_addr (gimple_stmt_iterator *gsi, tree x)
1175 {
1176 if (TREE_CODE (x) == TARGET_MEM_REF)
1177 x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
1178 else
1179 x = build_fold_addr_expr (x);
1180 return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
1181 }
1182
1183 /* Instrument one address with the logging functions.
1184 ADDR is the address to save.
1185 STMT is the statement before which to place it. */
1186 static void
1187 tm_log_emit_stmt (tree addr, gimple *stmt)
1188 {
1189 tree type = TREE_TYPE (addr);
1190 tree size = TYPE_SIZE_UNIT (type);
1191 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1192 gimple *log;
1193 enum built_in_function code = BUILT_IN_TM_LOG;
1194
1195 if (type == float_type_node)
1196 code = BUILT_IN_TM_LOG_FLOAT;
1197 else if (type == double_type_node)
1198 code = BUILT_IN_TM_LOG_DOUBLE;
1199 else if (type == long_double_type_node)
1200 code = BUILT_IN_TM_LOG_LDOUBLE;
1201 else if (tree_fits_uhwi_p (size))
1202 {
1203 unsigned int n = tree_to_uhwi (size);
1204 switch (n)
1205 {
1206 case 1:
1207 code = BUILT_IN_TM_LOG_1;
1208 break;
1209 case 2:
1210 code = BUILT_IN_TM_LOG_2;
1211 break;
1212 case 4:
1213 code = BUILT_IN_TM_LOG_4;
1214 break;
1215 case 8:
1216 code = BUILT_IN_TM_LOG_8;
1217 break;
1218 default:
1219 code = BUILT_IN_TM_LOG;
1220 if (TREE_CODE (type) == VECTOR_TYPE)
1221 {
1222 if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64))
1223 code = BUILT_IN_TM_LOG_M64;
1224 else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128))
1225 code = BUILT_IN_TM_LOG_M128;
1226 else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256))
1227 code = BUILT_IN_TM_LOG_M256;
1228 }
1229 break;
1230 }
1231 }
1232
1233 addr = gimplify_addr (&gsi, addr);
1234 if (code == BUILT_IN_TM_LOG)
1235 log = gimple_build_call (builtin_decl_explicit (code), 2, addr, size);
1236 else
1237 log = gimple_build_call (builtin_decl_explicit (code), 1, addr);
1238 gsi_insert_before (&gsi, log, GSI_SAME_STMT);
1239 }
1240
1241 /* Go through the log and instrument address that must be instrumented
1242 with the logging functions. Leave the save/restore addresses for
1243 later. */
1244 static void
1245 tm_log_emit (void)
1246 {
1247 hash_table<log_entry_hasher>::iterator hi;
1248 struct tm_log_entry *lp;
1249
1250 FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi)
1251 {
1252 size_t i;
1253 gimple *stmt;
1254
1255 if (dump_file)
1256 {
1257 fprintf (dump_file, "TM thread private mem logging: ");
1258 print_generic_expr (dump_file, lp->addr, 0);
1259 fprintf (dump_file, "\n");
1260 }
1261
1262 if (lp->save_var)
1263 {
1264 if (dump_file)
1265 fprintf (dump_file, "DUMPING to variable\n");
1266 continue;
1267 }
1268 else
1269 {
1270 if (dump_file)
1271 fprintf (dump_file, "DUMPING with logging functions\n");
1272 for (i = 0; lp->stmts.iterate (i, &stmt); ++i)
1273 tm_log_emit_stmt (lp->addr, stmt);
1274 }
1275 }
1276 }
1277
1278 /* Emit the save sequence for the corresponding addresses in the log.
1279 ENTRY_BLOCK is the entry block for the transaction.
1280 BB is the basic block to insert the code in. */
1281 static void
1282 tm_log_emit_saves (basic_block entry_block, basic_block bb)
1283 {
1284 size_t i;
1285 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1286 gimple *stmt;
1287 struct tm_log_entry l, *lp;
1288
1289 for (i = 0; i < tm_log_save_addresses.length (); ++i)
1290 {
1291 l.addr = tm_log_save_addresses[i];
1292 lp = *(tm_log->find_slot (&l, NO_INSERT));
1293 gcc_assert (lp->save_var != NULL);
1294
1295 /* We only care about variables in the current transaction. */
1296 if (lp->entry_block != entry_block)
1297 continue;
1298
1299 stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
1300
1301 /* Make sure we can create an SSA_NAME for this type. For
1302 instance, aggregates aren't allowed, in which case the system
1303 will create a VOP for us and everything will just work. */
1304 if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
1305 {
1306 lp->save_var = make_ssa_name (lp->save_var, stmt);
1307 gimple_assign_set_lhs (stmt, lp->save_var);
1308 }
1309
1310 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1311 }
1312 }
1313
1314 /* Emit the restore sequence for the corresponding addresses in the log.
1315 ENTRY_BLOCK is the entry block for the transaction.
1316 BB is the basic block to insert the code in. */
1317 static void
1318 tm_log_emit_restores (basic_block entry_block, basic_block bb)
1319 {
1320 int i;
1321 struct tm_log_entry l, *lp;
1322 gimple_stmt_iterator gsi;
1323 gimple *stmt;
1324
1325 for (i = tm_log_save_addresses.length () - 1; i >= 0; i--)
1326 {
1327 l.addr = tm_log_save_addresses[i];
1328 lp = *(tm_log->find_slot (&l, NO_INSERT));
1329 gcc_assert (lp->save_var != NULL);
1330
1331 /* We only care about variables in the current transaction. */
1332 if (lp->entry_block != entry_block)
1333 continue;
1334
1335 /* Restores are in LIFO order from the saves in case we have
1336 overlaps. */
1337 gsi = gsi_start_bb (bb);
1338
1339 stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
1340 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1341 }
1342 }
1343
1344 \f
1345 static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
1346 struct walk_stmt_info *);
1347 static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
1348 struct walk_stmt_info *);
1349
1350 /* Evaluate an address X being dereferenced and determine if it
1351 originally points to a non aliased new chunk of memory (malloc,
1352 alloca, etc).
1353
1354 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1355 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1356 Return MEM_NON_LOCAL otherwise.
1357
1358 ENTRY_BLOCK is the entry block to the transaction containing the
1359 dereference of X. */
1360 static enum thread_memory_type
1361 thread_private_new_memory (basic_block entry_block, tree x)
1362 {
1363 gimple *stmt = NULL;
1364 enum tree_code code;
1365 tm_new_mem_map **slot;
1366 tm_new_mem_map elt, *elt_p;
1367 tree val = x;
1368 enum thread_memory_type retval = mem_transaction_local;
1369
1370 if (!entry_block
1371 || TREE_CODE (x) != SSA_NAME
1372 /* Possible uninitialized use, or a function argument. In
1373 either case, we don't care. */
1374 || SSA_NAME_IS_DEFAULT_DEF (x))
1375 return mem_non_local;
1376
1377 /* Look in cache first. */
1378 elt.val = x;
1379 slot = tm_new_mem_hash->find_slot (&elt, INSERT);
1380 elt_p = *slot;
1381 if (elt_p)
1382 return elt_p->local_new_memory;
1383
1384 /* Optimistically assume the memory is transaction local during
1385 processing. This catches recursion into this variable. */
1386 *slot = elt_p = XNEW (tm_new_mem_map);
1387 elt_p->val = val;
1388 elt_p->local_new_memory = mem_transaction_local;
1389
1390 /* Search DEF chain to find the original definition of this address. */
1391 do
1392 {
1393 if (ptr_deref_may_alias_global_p (x))
1394 {
1395 /* Address escapes. This is not thread-private. */
1396 retval = mem_non_local;
1397 goto new_memory_ret;
1398 }
1399
1400 stmt = SSA_NAME_DEF_STMT (x);
1401
1402 /* If the malloc call is outside the transaction, this is
1403 thread-local. */
1404 if (retval != mem_thread_local
1405 && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
1406 retval = mem_thread_local;
1407
1408 if (is_gimple_assign (stmt))
1409 {
1410 code = gimple_assign_rhs_code (stmt);
1411 /* x = foo ==> foo */
1412 if (code == SSA_NAME)
1413 x = gimple_assign_rhs1 (stmt);
1414 /* x = foo + n ==> foo */
1415 else if (code == POINTER_PLUS_EXPR)
1416 x = gimple_assign_rhs1 (stmt);
1417 /* x = (cast*) foo ==> foo */
1418 else if (code == VIEW_CONVERT_EXPR || CONVERT_EXPR_CODE_P (code))
1419 x = gimple_assign_rhs1 (stmt);
1420 /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
1421 else if (code == COND_EXPR)
1422 {
1423 tree op1 = gimple_assign_rhs2 (stmt);
1424 tree op2 = gimple_assign_rhs3 (stmt);
1425 enum thread_memory_type mem;
1426 retval = thread_private_new_memory (entry_block, op1);
1427 if (retval == mem_non_local)
1428 goto new_memory_ret;
1429 mem = thread_private_new_memory (entry_block, op2);
1430 retval = MIN (retval, mem);
1431 goto new_memory_ret;
1432 }
1433 else
1434 {
1435 retval = mem_non_local;
1436 goto new_memory_ret;
1437 }
1438 }
1439 else
1440 {
1441 if (gimple_code (stmt) == GIMPLE_PHI)
1442 {
1443 unsigned int i;
1444 enum thread_memory_type mem;
1445 tree phi_result = gimple_phi_result (stmt);
1446
1447 /* If any of the ancestors are non-local, we are sure to
1448 be non-local. Otherwise we can avoid doing anything
1449 and inherit what has already been generated. */
1450 retval = mem_max;
1451 for (i = 0; i < gimple_phi_num_args (stmt); ++i)
1452 {
1453 tree op = PHI_ARG_DEF (stmt, i);
1454
1455 /* Exclude self-assignment. */
1456 if (phi_result == op)
1457 continue;
1458
1459 mem = thread_private_new_memory (entry_block, op);
1460 if (mem == mem_non_local)
1461 {
1462 retval = mem;
1463 goto new_memory_ret;
1464 }
1465 retval = MIN (retval, mem);
1466 }
1467 goto new_memory_ret;
1468 }
1469 break;
1470 }
1471 }
1472 while (TREE_CODE (x) == SSA_NAME);
1473
1474 if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
1475 /* Thread-local or transaction-local. */
1476 ;
1477 else
1478 retval = mem_non_local;
1479
1480 new_memory_ret:
1481 elt_p->local_new_memory = retval;
1482 return retval;
1483 }
1484
1485 /* Determine whether X has to be instrumented using a read
1486 or write barrier.
1487
1488 ENTRY_BLOCK is the entry block for the region where stmt resides
1489 in. NULL if unknown.
1490
1491 STMT is the statement in which X occurs in. It is used for thread
1492 private memory instrumentation. If no TPM instrumentation is
1493 desired, STMT should be null. */
1494 static bool
1495 requires_barrier (basic_block entry_block, tree x, gimple *stmt)
1496 {
1497 tree orig = x;
1498 while (handled_component_p (x))
1499 x = TREE_OPERAND (x, 0);
1500
1501 switch (TREE_CODE (x))
1502 {
1503 case INDIRECT_REF:
1504 case MEM_REF:
1505 {
1506 enum thread_memory_type ret;
1507
1508 ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
1509 if (ret == mem_non_local)
1510 return true;
1511 if (stmt && ret == mem_thread_local)
1512 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1513 tm_log_add (entry_block, orig, stmt);
1514
1515 /* Transaction-locals require nothing at all. For malloc, a
1516 transaction restart frees the memory and we reallocate.
1517 For alloca, the stack pointer gets reset by the retry and
1518 we reallocate. */
1519 return false;
1520 }
1521
1522 case TARGET_MEM_REF:
1523 if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
1524 return true;
1525 x = TREE_OPERAND (TMR_BASE (x), 0);
1526 if (TREE_CODE (x) == PARM_DECL)
1527 return false;
1528 gcc_assert (TREE_CODE (x) == VAR_DECL);
1529 /* FALLTHRU */
1530
1531 case PARM_DECL:
1532 case RESULT_DECL:
1533 case VAR_DECL:
1534 if (DECL_BY_REFERENCE (x))
1535 {
1536 /* ??? This value is a pointer, but aggregate_value_p has been
1537 jigged to return true which confuses needs_to_live_in_memory.
1538 This ought to be cleaned up generically.
1539
1540 FIXME: Verify this still happens after the next mainline
1541 merge. Testcase ie g++.dg/tm/pr47554.C.
1542 */
1543 return false;
1544 }
1545
1546 if (is_global_var (x))
1547 return !TREE_READONLY (x);
1548 if (/* FIXME: This condition should actually go below in the
1549 tm_log_add() call, however is_call_clobbered() depends on
1550 aliasing info which is not available during
1551 gimplification. Since requires_barrier() gets called
1552 during lower_sequence_tm/gimplification, leave the call
1553 to needs_to_live_in_memory until we eliminate
1554 lower_sequence_tm altogether. */
1555 needs_to_live_in_memory (x))
1556 return true;
1557 else
1558 {
1559 /* For local memory that doesn't escape (aka thread private
1560 memory), we can either save the value at the beginning of
1561 the transaction and restore on restart, or call a tm
1562 function to dynamically save and restore on restart
1563 (ITM_L*). */
1564 if (stmt)
1565 tm_log_add (entry_block, orig, stmt);
1566 return false;
1567 }
1568
1569 default:
1570 return false;
1571 }
1572 }
1573
1574 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1575 a transaction region. */
1576
1577 static void
1578 examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
1579 {
1580 gimple *stmt = gsi_stmt (*gsi);
1581
1582 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
1583 *state |= GTMA_HAVE_LOAD;
1584 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
1585 *state |= GTMA_HAVE_STORE;
1586 }
1587
1588 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1589
1590 static void
1591 examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
1592 {
1593 gimple *stmt = gsi_stmt (*gsi);
1594 tree fn;
1595
1596 if (is_tm_pure_call (stmt))
1597 return;
1598
1599 /* Check if this call is a transaction abort. */
1600 fn = gimple_call_fndecl (stmt);
1601 if (is_tm_abort (fn))
1602 *state |= GTMA_HAVE_ABORT;
1603
1604 /* Note that something may happen. */
1605 *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
1606 }
1607
1608 /* Lower a GIMPLE_TRANSACTION statement. */
1609
1610 static void
1611 lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
1612 {
1613 gimple *g;
1614 gtransaction *stmt = as_a <gtransaction *> (gsi_stmt (*gsi));
1615 unsigned int *outer_state = (unsigned int *) wi->info;
1616 unsigned int this_state = 0;
1617 struct walk_stmt_info this_wi;
1618
1619 /* First, lower the body. The scanning that we do inside gives
1620 us some idea of what we're dealing with. */
1621 memset (&this_wi, 0, sizeof (this_wi));
1622 this_wi.info = (void *) &this_state;
1623 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
1624 lower_sequence_tm, NULL, &this_wi);
1625
1626 /* If there was absolutely nothing transaction related inside the
1627 transaction, we may elide it. Likewise if this is a nested
1628 transaction and does not contain an abort. */
1629 if (this_state == 0
1630 || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
1631 {
1632 if (outer_state)
1633 *outer_state |= this_state;
1634
1635 gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
1636 GSI_SAME_STMT);
1637 gimple_transaction_set_body (stmt, NULL);
1638
1639 gsi_remove (gsi, true);
1640 wi->removed_stmt = true;
1641 return;
1642 }
1643
1644 /* Wrap the body of the transaction in a try-finally node so that
1645 the commit call is always properly called. */
1646 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
1647 if (flag_exceptions)
1648 {
1649 tree ptr;
1650 gimple_seq n_seq, e_seq;
1651
1652 n_seq = gimple_seq_alloc_with_stmt (g);
1653 e_seq = NULL;
1654
1655 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1656 1, integer_zero_node);
1657 ptr = create_tmp_var (ptr_type_node);
1658 gimple_call_set_lhs (g, ptr);
1659 gimple_seq_add_stmt (&e_seq, g);
1660
1661 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
1662 1, ptr);
1663 gimple_seq_add_stmt (&e_seq, g);
1664
1665 g = gimple_build_eh_else (n_seq, e_seq);
1666 }
1667
1668 g = gimple_build_try (gimple_transaction_body (stmt),
1669 gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
1670 gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
1671
1672 gimple_transaction_set_body (stmt, NULL);
1673
1674 /* If the transaction calls abort or if this is an outer transaction,
1675 add an "over" label afterwards. */
1676 if ((this_state & (GTMA_HAVE_ABORT))
1677 || (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER))
1678 {
1679 tree label = create_artificial_label (UNKNOWN_LOCATION);
1680 gimple_transaction_set_label (stmt, label);
1681 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
1682 }
1683
1684 /* Record the set of operations found for use later. */
1685 this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
1686 gimple_transaction_set_subcode (stmt, this_state);
1687 }
1688
1689 /* Iterate through the statements in the sequence, lowering them all
1690 as appropriate for being in a transaction. */
1691
1692 static tree
1693 lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1694 struct walk_stmt_info *wi)
1695 {
1696 unsigned int *state = (unsigned int *) wi->info;
1697 gimple *stmt = gsi_stmt (*gsi);
1698
1699 *handled_ops_p = true;
1700 switch (gimple_code (stmt))
1701 {
1702 case GIMPLE_ASSIGN:
1703 /* Only memory reads/writes need to be instrumented. */
1704 if (gimple_assign_single_p (stmt))
1705 examine_assign_tm (state, gsi);
1706 break;
1707
1708 case GIMPLE_CALL:
1709 examine_call_tm (state, gsi);
1710 break;
1711
1712 case GIMPLE_ASM:
1713 *state |= GTMA_MAY_ENTER_IRREVOCABLE;
1714 break;
1715
1716 case GIMPLE_TRANSACTION:
1717 lower_transaction (gsi, wi);
1718 break;
1719
1720 default:
1721 *handled_ops_p = !gimple_has_substatements (stmt);
1722 break;
1723 }
1724
1725 return NULL_TREE;
1726 }
1727
1728 /* Iterate through the statements in the sequence, lowering them all
1729 as appropriate for being outside of a transaction. */
1730
1731 static tree
1732 lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1733 struct walk_stmt_info * wi)
1734 {
1735 gimple *stmt = gsi_stmt (*gsi);
1736
1737 if (gimple_code (stmt) == GIMPLE_TRANSACTION)
1738 {
1739 *handled_ops_p = true;
1740 lower_transaction (gsi, wi);
1741 }
1742 else
1743 *handled_ops_p = !gimple_has_substatements (stmt);
1744
1745 return NULL_TREE;
1746 }
1747
1748 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1749 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1750 been moved out, and all the data required for constructing a proper
1751 CFG has been recorded. */
1752
1753 static unsigned int
1754 execute_lower_tm (void)
1755 {
1756 struct walk_stmt_info wi;
1757 gimple_seq body;
1758
1759 /* Transactional clones aren't created until a later pass. */
1760 gcc_assert (!decl_is_tm_clone (current_function_decl));
1761
1762 body = gimple_body (current_function_decl);
1763 memset (&wi, 0, sizeof (wi));
1764 walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi);
1765 gimple_set_body (current_function_decl, body);
1766
1767 return 0;
1768 }
1769
1770 namespace {
1771
1772 const pass_data pass_data_lower_tm =
1773 {
1774 GIMPLE_PASS, /* type */
1775 "tmlower", /* name */
1776 OPTGROUP_NONE, /* optinfo_flags */
1777 TV_TRANS_MEM, /* tv_id */
1778 PROP_gimple_lcf, /* properties_required */
1779 0, /* properties_provided */
1780 0, /* properties_destroyed */
1781 0, /* todo_flags_start */
1782 0, /* todo_flags_finish */
1783 };
1784
1785 class pass_lower_tm : public gimple_opt_pass
1786 {
1787 public:
1788 pass_lower_tm (gcc::context *ctxt)
1789 : gimple_opt_pass (pass_data_lower_tm, ctxt)
1790 {}
1791
1792 /* opt_pass methods: */
1793 virtual bool gate (function *) { return flag_tm; }
1794 virtual unsigned int execute (function *) { return execute_lower_tm (); }
1795
1796 }; // class pass_lower_tm
1797
1798 } // anon namespace
1799
1800 gimple_opt_pass *
1801 make_pass_lower_tm (gcc::context *ctxt)
1802 {
1803 return new pass_lower_tm (ctxt);
1804 }
1805 \f
1806 /* Collect region information for each transaction. */
1807
1808 struct tm_region
1809 {
1810 public:
1811
1812 /* The field "transaction_stmt" is initially a gtransaction *,
1813 but eventually gets lowered to a gcall *(to BUILT_IN_TM_START).
1814
1815 Helper method to get it as a gtransaction *, with code-checking
1816 in a checked-build. */
1817
1818 gtransaction *
1819 get_transaction_stmt () const
1820 {
1821 return as_a <gtransaction *> (transaction_stmt);
1822 }
1823
1824 public:
1825
1826 /* Link to the next unnested transaction. */
1827 struct tm_region *next;
1828
1829 /* Link to the next inner transaction. */
1830 struct tm_region *inner;
1831
1832 /* Link to the next outer transaction. */
1833 struct tm_region *outer;
1834
1835 /* The GIMPLE_TRANSACTION statement beginning this transaction.
1836 After TM_MARK, this gets replaced by a call to
1837 BUILT_IN_TM_START.
1838 Hence this will be either a gtransaction *or a gcall *. */
1839 gimple *transaction_stmt;
1840
1841 /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
1842 BUILT_IN_TM_START, this field is true if the transaction is an
1843 outer transaction. */
1844 bool original_transaction_was_outer;
1845
1846 /* Return value from BUILT_IN_TM_START. */
1847 tree tm_state;
1848
1849 /* The entry block to this region. This will always be the first
1850 block of the body of the transaction. */
1851 basic_block entry_block;
1852
1853 /* The first block after an expanded call to _ITM_beginTransaction. */
1854 basic_block restart_block;
1855
1856 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1857 These blocks are still a part of the region (i.e., the border is
1858 inclusive). Note that this set is only complete for paths in the CFG
1859 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1860 the edge to the "over" label. */
1861 bitmap exit_blocks;
1862
1863 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1864 bitmap irr_blocks;
1865 };
1866
1867 /* True if there are pending edge statements to be committed for the
1868 current function being scanned in the tmmark pass. */
1869 bool pending_edge_inserts_p;
1870
1871 static struct tm_region *all_tm_regions;
1872 static bitmap_obstack tm_obstack;
1873
1874
1875 /* A subroutine of tm_region_init. Record the existence of the
1876 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1877
1878 static struct tm_region *
1879 tm_region_init_0 (struct tm_region *outer, basic_block bb,
1880 gtransaction *stmt)
1881 {
1882 struct tm_region *region;
1883
1884 region = (struct tm_region *)
1885 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1886
1887 if (outer)
1888 {
1889 region->next = outer->inner;
1890 outer->inner = region;
1891 }
1892 else
1893 {
1894 region->next = all_tm_regions;
1895 all_tm_regions = region;
1896 }
1897 region->inner = NULL;
1898 region->outer = outer;
1899
1900 region->transaction_stmt = stmt;
1901 region->original_transaction_was_outer = false;
1902 region->tm_state = NULL;
1903
1904 /* There are either one or two edges out of the block containing
1905 the GIMPLE_TRANSACTION, one to the actual region and one to the
1906 "over" label if the region contains an abort. The former will
1907 always be the one marked FALLTHRU. */
1908 region->entry_block = FALLTHRU_EDGE (bb)->dest;
1909
1910 region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
1911 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1912
1913 return region;
1914 }
1915
1916 /* A subroutine of tm_region_init. Record all the exit and
1917 irrevocable blocks in BB into the region's exit_blocks and
1918 irr_blocks bitmaps. Returns the new region being scanned. */
1919
1920 static struct tm_region *
1921 tm_region_init_1 (struct tm_region *region, basic_block bb)
1922 {
1923 gimple_stmt_iterator gsi;
1924 gimple *g;
1925
1926 if (!region
1927 || (!region->irr_blocks && !region->exit_blocks))
1928 return region;
1929
1930 /* Check to see if this is the end of a region by seeing if it
1931 contains a call to __builtin_tm_commit{,_eh}. Note that the
1932 outermost region for DECL_IS_TM_CLONE need not collect this. */
1933 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
1934 {
1935 g = gsi_stmt (gsi);
1936 if (gimple_code (g) == GIMPLE_CALL)
1937 {
1938 tree fn = gimple_call_fndecl (g);
1939 if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
1940 {
1941 if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
1942 || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
1943 && region->exit_blocks)
1944 {
1945 bitmap_set_bit (region->exit_blocks, bb->index);
1946 region = region->outer;
1947 break;
1948 }
1949 if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
1950 bitmap_set_bit (region->irr_blocks, bb->index);
1951 }
1952 }
1953 }
1954 return region;
1955 }
1956
1957 /* Collect all of the transaction regions within the current function
1958 and record them in ALL_TM_REGIONS. The REGION parameter may specify
1959 an "outermost" region for use by tm clones. */
1960
1961 static void
1962 tm_region_init (struct tm_region *region)
1963 {
1964 gimple *g;
1965 edge_iterator ei;
1966 edge e;
1967 basic_block bb;
1968 auto_vec<basic_block> queue;
1969 bitmap visited_blocks = BITMAP_ALLOC (NULL);
1970 struct tm_region *old_region;
1971 auto_vec<tm_region *> bb_regions;
1972
1973 all_tm_regions = region;
1974 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
1975
1976 /* We could store this information in bb->aux, but we may get called
1977 through get_all_tm_blocks() from another pass that may be already
1978 using bb->aux. */
1979 bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun));
1980
1981 queue.safe_push (bb);
1982 bb_regions[bb->index] = region;
1983 do
1984 {
1985 bb = queue.pop ();
1986 region = bb_regions[bb->index];
1987 bb_regions[bb->index] = NULL;
1988
1989 /* Record exit and irrevocable blocks. */
1990 region = tm_region_init_1 (region, bb);
1991
1992 /* Check for the last statement in the block beginning a new region. */
1993 g = last_stmt (bb);
1994 old_region = region;
1995 if (g)
1996 if (gtransaction *trans_stmt = dyn_cast <gtransaction *> (g))
1997 region = tm_region_init_0 (region, bb, trans_stmt);
1998
1999 /* Process subsequent blocks. */
2000 FOR_EACH_EDGE (e, ei, bb->succs)
2001 if (!bitmap_bit_p (visited_blocks, e->dest->index))
2002 {
2003 bitmap_set_bit (visited_blocks, e->dest->index);
2004 queue.safe_push (e->dest);
2005
2006 /* If the current block started a new region, make sure that only
2007 the entry block of the new region is associated with this region.
2008 Other successors are still part of the old region. */
2009 if (old_region != region && e->dest != region->entry_block)
2010 bb_regions[e->dest->index] = old_region;
2011 else
2012 bb_regions[e->dest->index] = region;
2013 }
2014 }
2015 while (!queue.is_empty ());
2016 BITMAP_FREE (visited_blocks);
2017 }
2018
2019 /* The "gate" function for all transactional memory expansion and optimization
2020 passes. We collect region information for each top-level transaction, and
2021 if we don't find any, we skip all of the TM passes. Each region will have
2022 all of the exit blocks recorded, and the originating statement. */
2023
2024 static bool
2025 gate_tm_init (void)
2026 {
2027 if (!flag_tm)
2028 return false;
2029
2030 calculate_dominance_info (CDI_DOMINATORS);
2031 bitmap_obstack_initialize (&tm_obstack);
2032
2033 /* If the function is a TM_CLONE, then the entire function is the region. */
2034 if (decl_is_tm_clone (current_function_decl))
2035 {
2036 struct tm_region *region = (struct tm_region *)
2037 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
2038 memset (region, 0, sizeof (*region));
2039 region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2040 /* For a clone, the entire function is the region. But even if
2041 we don't need to record any exit blocks, we may need to
2042 record irrevocable blocks. */
2043 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
2044
2045 tm_region_init (region);
2046 }
2047 else
2048 {
2049 tm_region_init (NULL);
2050
2051 /* If we didn't find any regions, cleanup and skip the whole tree
2052 of tm-related optimizations. */
2053 if (all_tm_regions == NULL)
2054 {
2055 bitmap_obstack_release (&tm_obstack);
2056 return false;
2057 }
2058 }
2059
2060 return true;
2061 }
2062
2063 namespace {
2064
2065 const pass_data pass_data_tm_init =
2066 {
2067 GIMPLE_PASS, /* type */
2068 "*tminit", /* name */
2069 OPTGROUP_NONE, /* optinfo_flags */
2070 TV_TRANS_MEM, /* tv_id */
2071 ( PROP_ssa | PROP_cfg ), /* properties_required */
2072 0, /* properties_provided */
2073 0, /* properties_destroyed */
2074 0, /* todo_flags_start */
2075 0, /* todo_flags_finish */
2076 };
2077
2078 class pass_tm_init : public gimple_opt_pass
2079 {
2080 public:
2081 pass_tm_init (gcc::context *ctxt)
2082 : gimple_opt_pass (pass_data_tm_init, ctxt)
2083 {}
2084
2085 /* opt_pass methods: */
2086 virtual bool gate (function *) { return gate_tm_init (); }
2087
2088 }; // class pass_tm_init
2089
2090 } // anon namespace
2091
2092 gimple_opt_pass *
2093 make_pass_tm_init (gcc::context *ctxt)
2094 {
2095 return new pass_tm_init (ctxt);
2096 }
2097 \f
2098 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
2099 represented by STATE. */
2100
2101 static inline void
2102 transaction_subcode_ior (struct tm_region *region, unsigned flags)
2103 {
2104 if (region && region->transaction_stmt)
2105 {
2106 gtransaction *transaction_stmt = region->get_transaction_stmt ();
2107 flags |= gimple_transaction_subcode (transaction_stmt);
2108 gimple_transaction_set_subcode (transaction_stmt, flags);
2109 }
2110 }
2111
2112 /* Construct a memory load in a transactional context. Return the
2113 gimple statement performing the load, or NULL if there is no
2114 TM_LOAD builtin of the appropriate size to do the load.
2115
2116 LOC is the location to use for the new statement(s). */
2117
2118 static gcall *
2119 build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2120 {
2121 enum built_in_function code = END_BUILTINS;
2122 tree t, type = TREE_TYPE (rhs), decl;
2123 gcall *gcall;
2124
2125 if (type == float_type_node)
2126 code = BUILT_IN_TM_LOAD_FLOAT;
2127 else if (type == double_type_node)
2128 code = BUILT_IN_TM_LOAD_DOUBLE;
2129 else if (type == long_double_type_node)
2130 code = BUILT_IN_TM_LOAD_LDOUBLE;
2131 else if (TYPE_SIZE_UNIT (type) != NULL
2132 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
2133 {
2134 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type)))
2135 {
2136 case 1:
2137 code = BUILT_IN_TM_LOAD_1;
2138 break;
2139 case 2:
2140 code = BUILT_IN_TM_LOAD_2;
2141 break;
2142 case 4:
2143 code = BUILT_IN_TM_LOAD_4;
2144 break;
2145 case 8:
2146 code = BUILT_IN_TM_LOAD_8;
2147 break;
2148 }
2149 }
2150
2151 if (code == END_BUILTINS)
2152 {
2153 decl = targetm.vectorize.builtin_tm_load (type);
2154 if (!decl)
2155 return NULL;
2156 }
2157 else
2158 decl = builtin_decl_explicit (code);
2159
2160 t = gimplify_addr (gsi, rhs);
2161 gcall = gimple_build_call (decl, 1, t);
2162 gimple_set_location (gcall, loc);
2163
2164 t = TREE_TYPE (TREE_TYPE (decl));
2165 if (useless_type_conversion_p (type, t))
2166 {
2167 gimple_call_set_lhs (gcall, lhs);
2168 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2169 }
2170 else
2171 {
2172 gimple *g;
2173 tree temp;
2174
2175 temp = create_tmp_reg (t);
2176 gimple_call_set_lhs (gcall, temp);
2177 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2178
2179 t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
2180 g = gimple_build_assign (lhs, t);
2181 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2182 }
2183
2184 return gcall;
2185 }
2186
2187
2188 /* Similarly for storing TYPE in a transactional context. */
2189
2190 static gcall *
2191 build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2192 {
2193 enum built_in_function code = END_BUILTINS;
2194 tree t, fn, type = TREE_TYPE (rhs), simple_type;
2195 gcall *gcall;
2196
2197 if (type == float_type_node)
2198 code = BUILT_IN_TM_STORE_FLOAT;
2199 else if (type == double_type_node)
2200 code = BUILT_IN_TM_STORE_DOUBLE;
2201 else if (type == long_double_type_node)
2202 code = BUILT_IN_TM_STORE_LDOUBLE;
2203 else if (TYPE_SIZE_UNIT (type) != NULL
2204 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
2205 {
2206 switch (tree_to_uhwi (TYPE_SIZE_UNIT (type)))
2207 {
2208 case 1:
2209 code = BUILT_IN_TM_STORE_1;
2210 break;
2211 case 2:
2212 code = BUILT_IN_TM_STORE_2;
2213 break;
2214 case 4:
2215 code = BUILT_IN_TM_STORE_4;
2216 break;
2217 case 8:
2218 code = BUILT_IN_TM_STORE_8;
2219 break;
2220 }
2221 }
2222
2223 if (code == END_BUILTINS)
2224 {
2225 fn = targetm.vectorize.builtin_tm_store (type);
2226 if (!fn)
2227 return NULL;
2228 }
2229 else
2230 fn = builtin_decl_explicit (code);
2231
2232 simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
2233
2234 if (TREE_CODE (rhs) == CONSTRUCTOR)
2235 {
2236 /* Handle the easy initialization to zero. */
2237 if (!CONSTRUCTOR_ELTS (rhs))
2238 rhs = build_int_cst (simple_type, 0);
2239 else
2240 {
2241 /* ...otherwise punt to the caller and probably use
2242 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2243 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2244 valid gimple. */
2245 return NULL;
2246 }
2247 }
2248 else if (!useless_type_conversion_p (simple_type, type))
2249 {
2250 gimple *g;
2251 tree temp;
2252
2253 temp = create_tmp_reg (simple_type);
2254 t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
2255 g = gimple_build_assign (temp, t);
2256 gimple_set_location (g, loc);
2257 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2258
2259 rhs = temp;
2260 }
2261
2262 t = gimplify_addr (gsi, lhs);
2263 gcall = gimple_build_call (fn, 2, t, rhs);
2264 gimple_set_location (gcall, loc);
2265 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2266
2267 return gcall;
2268 }
2269
2270
2271 /* Expand an assignment statement into transactional builtins. */
2272
2273 static void
2274 expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
2275 {
2276 gimple *stmt = gsi_stmt (*gsi);
2277 location_t loc = gimple_location (stmt);
2278 tree lhs = gimple_assign_lhs (stmt);
2279 tree rhs = gimple_assign_rhs1 (stmt);
2280 bool store_p = requires_barrier (region->entry_block, lhs, NULL);
2281 bool load_p = requires_barrier (region->entry_block, rhs, NULL);
2282 gimple *gcall = NULL;
2283
2284 if (!load_p && !store_p)
2285 {
2286 /* Add thread private addresses to log if applicable. */
2287 requires_barrier (region->entry_block, lhs, stmt);
2288 gsi_next (gsi);
2289 return;
2290 }
2291
2292 // Remove original load/store statement.
2293 gsi_remove (gsi, true);
2294
2295 if (load_p && !store_p)
2296 {
2297 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2298 gcall = build_tm_load (loc, lhs, rhs, gsi);
2299 }
2300 else if (store_p && !load_p)
2301 {
2302 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2303 gcall = build_tm_store (loc, lhs, rhs, gsi);
2304 }
2305 if (!gcall)
2306 {
2307 tree lhs_addr, rhs_addr, tmp;
2308
2309 if (load_p)
2310 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2311 if (store_p)
2312 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2313
2314 /* ??? Figure out if there's any possible overlap between the LHS
2315 and the RHS and if not, use MEMCPY. */
2316
2317 if (load_p && is_gimple_reg (lhs))
2318 {
2319 tmp = create_tmp_var (TREE_TYPE (lhs));
2320 lhs_addr = build_fold_addr_expr (tmp);
2321 }
2322 else
2323 {
2324 tmp = NULL_TREE;
2325 lhs_addr = gimplify_addr (gsi, lhs);
2326 }
2327 rhs_addr = gimplify_addr (gsi, rhs);
2328 gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
2329 3, lhs_addr, rhs_addr,
2330 TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
2331 gimple_set_location (gcall, loc);
2332 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2333
2334 if (tmp)
2335 {
2336 gcall = gimple_build_assign (lhs, tmp);
2337 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2338 }
2339 }
2340
2341 /* Now that we have the load/store in its instrumented form, add
2342 thread private addresses to the log if applicable. */
2343 if (!store_p)
2344 requires_barrier (region->entry_block, lhs, gcall);
2345
2346 // The calls to build_tm_{store,load} above inserted the instrumented
2347 // call into the stream.
2348 // gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2349 }
2350
2351
2352 /* Expand a call statement as appropriate for a transaction. That is,
2353 either verify that the call does not affect the transaction, or
2354 redirect the call to a clone that handles transactions, or change
2355 the transaction state to IRREVOCABLE. Return true if the call is
2356 one of the builtins that end a transaction. */
2357
2358 static bool
2359 expand_call_tm (struct tm_region *region,
2360 gimple_stmt_iterator *gsi)
2361 {
2362 gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi));
2363 tree lhs = gimple_call_lhs (stmt);
2364 tree fn_decl;
2365 struct cgraph_node *node;
2366 bool retval = false;
2367
2368 fn_decl = gimple_call_fndecl (stmt);
2369
2370 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
2371 || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
2372 transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
2373 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
2374 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2375
2376 if (is_tm_pure_call (stmt))
2377 return false;
2378
2379 if (fn_decl)
2380 retval = is_tm_ending_fndecl (fn_decl);
2381 if (!retval)
2382 {
2383 /* Assume all non-const/pure calls write to memory, except
2384 transaction ending builtins. */
2385 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2386 }
2387
2388 /* For indirect calls, we already generated a call into the runtime. */
2389 if (!fn_decl)
2390 {
2391 tree fn = gimple_call_fn (stmt);
2392
2393 /* We are guaranteed never to go irrevocable on a safe or pure
2394 call, and the pure call was handled above. */
2395 if (is_tm_safe (fn))
2396 return false;
2397 else
2398 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2399
2400 return false;
2401 }
2402
2403 node = cgraph_node::get (fn_decl);
2404 /* All calls should have cgraph here. */
2405 if (!node)
2406 {
2407 /* We can have a nodeless call here if some pass after IPA-tm
2408 added uninstrumented calls. For example, loop distribution
2409 can transform certain loop constructs into __builtin_mem*
2410 calls. In this case, see if we have a suitable TM
2411 replacement and fill in the gaps. */
2412 gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL);
2413 enum built_in_function code = DECL_FUNCTION_CODE (fn_decl);
2414 gcc_assert (code == BUILT_IN_MEMCPY
2415 || code == BUILT_IN_MEMMOVE
2416 || code == BUILT_IN_MEMSET);
2417
2418 tree repl = find_tm_replacement_function (fn_decl);
2419 if (repl)
2420 {
2421 gimple_call_set_fndecl (stmt, repl);
2422 update_stmt (stmt);
2423 node = cgraph_node::create (repl);
2424 node->local.tm_may_enter_irr = false;
2425 return expand_call_tm (region, gsi);
2426 }
2427 gcc_unreachable ();
2428 }
2429 if (node->local.tm_may_enter_irr)
2430 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2431
2432 if (is_tm_abort (fn_decl))
2433 {
2434 transaction_subcode_ior (region, GTMA_HAVE_ABORT);
2435 return true;
2436 }
2437
2438 /* Instrument the store if needed.
2439
2440 If the assignment happens inside the function call (return slot
2441 optimization), there is no instrumentation to be done, since
2442 the callee should have done the right thing. */
2443 if (lhs && requires_barrier (region->entry_block, lhs, stmt)
2444 && !gimple_call_return_slot_opt_p (stmt))
2445 {
2446 tree tmp = create_tmp_reg (TREE_TYPE (lhs));
2447 location_t loc = gimple_location (stmt);
2448 edge fallthru_edge = NULL;
2449 gassign *assign_stmt;
2450
2451 /* Remember if the call was going to throw. */
2452 if (stmt_can_throw_internal (stmt))
2453 {
2454 edge_iterator ei;
2455 edge e;
2456 basic_block bb = gimple_bb (stmt);
2457
2458 FOR_EACH_EDGE (e, ei, bb->succs)
2459 if (e->flags & EDGE_FALLTHRU)
2460 {
2461 fallthru_edge = e;
2462 break;
2463 }
2464 }
2465
2466 gimple_call_set_lhs (stmt, tmp);
2467 update_stmt (stmt);
2468 assign_stmt = gimple_build_assign (lhs, tmp);
2469 gimple_set_location (assign_stmt, loc);
2470
2471 /* We cannot throw in the middle of a BB. If the call was going
2472 to throw, place the instrumentation on the fallthru edge, so
2473 the call remains the last statement in the block. */
2474 if (fallthru_edge)
2475 {
2476 gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (assign_stmt);
2477 gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
2478 expand_assign_tm (region, &fallthru_gsi);
2479 gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
2480 pending_edge_inserts_p = true;
2481 }
2482 else
2483 {
2484 gsi_insert_after (gsi, assign_stmt, GSI_CONTINUE_LINKING);
2485 expand_assign_tm (region, gsi);
2486 }
2487
2488 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2489 }
2490
2491 return retval;
2492 }
2493
2494
2495 /* Expand all statements in BB as appropriate for being inside
2496 a transaction. */
2497
2498 static void
2499 expand_block_tm (struct tm_region *region, basic_block bb)
2500 {
2501 gimple_stmt_iterator gsi;
2502
2503 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2504 {
2505 gimple *stmt = gsi_stmt (gsi);
2506 switch (gimple_code (stmt))
2507 {
2508 case GIMPLE_ASSIGN:
2509 /* Only memory reads/writes need to be instrumented. */
2510 if (gimple_assign_single_p (stmt)
2511 && !gimple_clobber_p (stmt))
2512 {
2513 expand_assign_tm (region, &gsi);
2514 continue;
2515 }
2516 break;
2517
2518 case GIMPLE_CALL:
2519 if (expand_call_tm (region, &gsi))
2520 return;
2521 break;
2522
2523 case GIMPLE_ASM:
2524 gcc_unreachable ();
2525
2526 default:
2527 break;
2528 }
2529 if (!gsi_end_p (gsi))
2530 gsi_next (&gsi);
2531 }
2532 }
2533
2534 /* Return the list of basic-blocks in REGION.
2535
2536 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2537 following a TM_IRREVOCABLE call.
2538
2539 INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the
2540 uninstrumented code path blocks in the list of basic blocks
2541 returned, false otherwise. */
2542
2543 static vec<basic_block>
2544 get_tm_region_blocks (basic_block entry_block,
2545 bitmap exit_blocks,
2546 bitmap irr_blocks,
2547 bitmap all_region_blocks,
2548 bool stop_at_irrevocable_p,
2549 bool include_uninstrumented_p = true)
2550 {
2551 vec<basic_block> bbs = vNULL;
2552 unsigned i;
2553 edge e;
2554 edge_iterator ei;
2555 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2556
2557 i = 0;
2558 bbs.safe_push (entry_block);
2559 bitmap_set_bit (visited_blocks, entry_block->index);
2560
2561 do
2562 {
2563 basic_block bb = bbs[i++];
2564
2565 if (exit_blocks &&
2566 bitmap_bit_p (exit_blocks, bb->index))
2567 continue;
2568
2569 if (stop_at_irrevocable_p
2570 && irr_blocks
2571 && bitmap_bit_p (irr_blocks, bb->index))
2572 continue;
2573
2574 FOR_EACH_EDGE (e, ei, bb->succs)
2575 if ((include_uninstrumented_p
2576 || !(e->flags & EDGE_TM_UNINSTRUMENTED))
2577 && !bitmap_bit_p (visited_blocks, e->dest->index))
2578 {
2579 bitmap_set_bit (visited_blocks, e->dest->index);
2580 bbs.safe_push (e->dest);
2581 }
2582 }
2583 while (i < bbs.length ());
2584
2585 if (all_region_blocks)
2586 bitmap_ior_into (all_region_blocks, visited_blocks);
2587
2588 BITMAP_FREE (visited_blocks);
2589 return bbs;
2590 }
2591
2592 // Callback data for collect_bb2reg.
2593 struct bb2reg_stuff
2594 {
2595 vec<tm_region *> *bb2reg;
2596 bool include_uninstrumented_p;
2597 };
2598
2599 // Callback for expand_regions, collect innermost region data for each bb.
2600 static void *
2601 collect_bb2reg (struct tm_region *region, void *data)
2602 {
2603 struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data;
2604 vec<tm_region *> *bb2reg = stuff->bb2reg;
2605 vec<basic_block> queue;
2606 unsigned int i;
2607 basic_block bb;
2608
2609 queue = get_tm_region_blocks (region->entry_block,
2610 region->exit_blocks,
2611 region->irr_blocks,
2612 NULL,
2613 /*stop_at_irr_p=*/true,
2614 stuff->include_uninstrumented_p);
2615
2616 // We expect expand_region to perform a post-order traversal of the region
2617 // tree. Therefore the last region seen for any bb is the innermost.
2618 FOR_EACH_VEC_ELT (queue, i, bb)
2619 (*bb2reg)[bb->index] = region;
2620
2621 queue.release ();
2622 return NULL;
2623 }
2624
2625 // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to
2626 // which a basic block belongs. Note that we only consider the instrumented
2627 // code paths for the region; the uninstrumented code paths are ignored if
2628 // INCLUDE_UNINSTRUMENTED_P is false.
2629 //
2630 // ??? This data is very similar to the bb_regions array that is collected
2631 // during tm_region_init. Or, rather, this data is similar to what could
2632 // be used within tm_region_init. The actual computation in tm_region_init
2633 // begins and ends with bb_regions entirely full of NULL pointers, due to
2634 // the way in which pointers are swapped in and out of the array.
2635 //
2636 // ??? Our callers expect that blocks are not shared between transactions.
2637 // When the optimizers get too smart, and blocks are shared, then during
2638 // the tm_mark phase we'll add log entries to only one of the two transactions,
2639 // and in the tm_edge phase we'll add edges to the CFG that create invalid
2640 // cycles. The symptom being SSA defs that do not dominate their uses.
2641 // Note that the optimizers were locally correct with their transformation,
2642 // as we have no info within the program that suggests that the blocks cannot
2643 // be shared.
2644 //
2645 // ??? There is currently a hack inside tree-ssa-pre.c to work around the
2646 // only known instance of this block sharing.
2647
2648 static vec<tm_region *>
2649 get_bb_regions_instrumented (bool traverse_clones,
2650 bool include_uninstrumented_p)
2651 {
2652 unsigned n = last_basic_block_for_fn (cfun);
2653 struct bb2reg_stuff stuff;
2654 vec<tm_region *> ret;
2655
2656 ret.create (n);
2657 ret.safe_grow_cleared (n);
2658 stuff.bb2reg = &ret;
2659 stuff.include_uninstrumented_p = include_uninstrumented_p;
2660 expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones);
2661
2662 return ret;
2663 }
2664
2665 /* Set the IN_TRANSACTION for all gimple statements that appear in a
2666 transaction. */
2667
2668 void
2669 compute_transaction_bits (void)
2670 {
2671 struct tm_region *region;
2672 vec<basic_block> queue;
2673 unsigned int i;
2674 basic_block bb;
2675
2676 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2677 certainly don't need it to calculate CDI_DOMINATOR info. */
2678 gate_tm_init ();
2679
2680 FOR_EACH_BB_FN (bb, cfun)
2681 bb->flags &= ~BB_IN_TRANSACTION;
2682
2683 for (region = all_tm_regions; region; region = region->next)
2684 {
2685 queue = get_tm_region_blocks (region->entry_block,
2686 region->exit_blocks,
2687 region->irr_blocks,
2688 NULL,
2689 /*stop_at_irr_p=*/true);
2690 for (i = 0; queue.iterate (i, &bb); ++i)
2691 bb->flags |= BB_IN_TRANSACTION;
2692 queue.release ();
2693 }
2694
2695 if (all_tm_regions)
2696 bitmap_obstack_release (&tm_obstack);
2697 }
2698
2699 /* Replace the GIMPLE_TRANSACTION in this region with the corresponding
2700 call to BUILT_IN_TM_START. */
2701
2702 static void *
2703 expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
2704 {
2705 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2706 basic_block transaction_bb = gimple_bb (region->transaction_stmt);
2707 tree tm_state = region->tm_state;
2708 tree tm_state_type = TREE_TYPE (tm_state);
2709 edge abort_edge = NULL;
2710 edge inst_edge = NULL;
2711 edge uninst_edge = NULL;
2712 edge fallthru_edge = NULL;
2713
2714 // Identify the various successors of the transaction start.
2715 {
2716 edge_iterator i;
2717 edge e;
2718 FOR_EACH_EDGE (e, i, transaction_bb->succs)
2719 {
2720 if (e->flags & EDGE_TM_ABORT)
2721 abort_edge = e;
2722 else if (e->flags & EDGE_TM_UNINSTRUMENTED)
2723 uninst_edge = e;
2724 else
2725 inst_edge = e;
2726 if (e->flags & EDGE_FALLTHRU)
2727 fallthru_edge = e;
2728 }
2729 }
2730
2731 /* ??? There are plenty of bits here we're not computing. */
2732 {
2733 int subcode = gimple_transaction_subcode (region->get_transaction_stmt ());
2734 int flags = 0;
2735 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2736 flags |= PR_DOESGOIRREVOCABLE;
2737 if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
2738 flags |= PR_HASNOIRREVOCABLE;
2739 /* If the transaction does not have an abort in lexical scope and is not
2740 marked as an outer transaction, then it will never abort. */
2741 if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0)
2742 flags |= PR_HASNOABORT;
2743 if ((subcode & GTMA_HAVE_STORE) == 0)
2744 flags |= PR_READONLY;
2745 if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION))
2746 flags |= PR_INSTRUMENTEDCODE;
2747 if (uninst_edge)
2748 flags |= PR_UNINSTRUMENTEDCODE;
2749 if (subcode & GTMA_IS_OUTER)
2750 region->original_transaction_was_outer = true;
2751 tree t = build_int_cst (tm_state_type, flags);
2752 gcall *call = gimple_build_call (tm_start, 1, t);
2753 gimple_call_set_lhs (call, tm_state);
2754 gimple_set_location (call, gimple_location (region->transaction_stmt));
2755
2756 // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START.
2757 gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb);
2758 gcc_assert (gsi_stmt (gsi) == region->transaction_stmt);
2759 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
2760 gsi_remove (&gsi, true);
2761 region->transaction_stmt = call;
2762 }
2763
2764 // Generate log saves.
2765 if (!tm_log_save_addresses.is_empty ())
2766 tm_log_emit_saves (region->entry_block, transaction_bb);
2767
2768 // In the beginning, we've no tests to perform on transaction restart.
2769 // Note that after this point, transaction_bb becomes the "most recent
2770 // block containing tests for the transaction".
2771 region->restart_block = region->entry_block;
2772
2773 // Generate log restores.
2774 if (!tm_log_save_addresses.is_empty ())
2775 {
2776 basic_block test_bb = create_empty_bb (transaction_bb);
2777 basic_block code_bb = create_empty_bb (test_bb);
2778 basic_block join_bb = create_empty_bb (code_bb);
2779 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2780 add_bb_to_loop (code_bb, transaction_bb->loop_father);
2781 add_bb_to_loop (join_bb, transaction_bb->loop_father);
2782 if (region->restart_block == region->entry_block)
2783 region->restart_block = test_bb;
2784
2785 tree t1 = create_tmp_reg (tm_state_type);
2786 tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES);
2787 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
2788 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2789 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2790
2791 t2 = build_int_cst (tm_state_type, 0);
2792 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2793 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2794
2795 tm_log_emit_restores (region->entry_block, code_bb);
2796
2797 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2798 edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE);
2799 edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE);
2800 redirect_edge_pred (fallthru_edge, join_bb);
2801
2802 join_bb->frequency = test_bb->frequency = transaction_bb->frequency;
2803 join_bb->count = test_bb->count = transaction_bb->count;
2804
2805 ei->probability = PROB_ALWAYS;
2806 et->probability = PROB_LIKELY;
2807 ef->probability = PROB_UNLIKELY;
2808 et->count = apply_probability (test_bb->count, et->probability);
2809 ef->count = apply_probability (test_bb->count, ef->probability);
2810
2811 code_bb->count = et->count;
2812 code_bb->frequency = EDGE_FREQUENCY (et);
2813
2814 transaction_bb = join_bb;
2815 }
2816
2817 // If we have an ABORT edge, create a test to perform the abort.
2818 if (abort_edge)
2819 {
2820 basic_block test_bb = create_empty_bb (transaction_bb);
2821 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2822 if (region->restart_block == region->entry_block)
2823 region->restart_block = test_bb;
2824
2825 tree t1 = create_tmp_reg (tm_state_type);
2826 tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION);
2827 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
2828 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2829 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2830
2831 t2 = build_int_cst (tm_state_type, 0);
2832 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2833 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2834
2835 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2836 test_bb->frequency = transaction_bb->frequency;
2837 test_bb->count = transaction_bb->count;
2838 ei->probability = PROB_ALWAYS;
2839
2840 // Not abort edge. If both are live, chose one at random as we'll
2841 // we'll be fixing that up below.
2842 redirect_edge_pred (fallthru_edge, test_bb);
2843 fallthru_edge->flags = EDGE_FALSE_VALUE;
2844 fallthru_edge->probability = PROB_VERY_LIKELY;
2845 fallthru_edge->count
2846 = apply_probability (test_bb->count, fallthru_edge->probability);
2847
2848 // Abort/over edge.
2849 redirect_edge_pred (abort_edge, test_bb);
2850 abort_edge->flags = EDGE_TRUE_VALUE;
2851 abort_edge->probability = PROB_VERY_UNLIKELY;
2852 abort_edge->count
2853 = apply_probability (test_bb->count, abort_edge->probability);
2854
2855 transaction_bb = test_bb;
2856 }
2857
2858 // If we have both instrumented and uninstrumented code paths, select one.
2859 if (inst_edge && uninst_edge)
2860 {
2861 basic_block test_bb = create_empty_bb (transaction_bb);
2862 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2863 if (region->restart_block == region->entry_block)
2864 region->restart_block = test_bb;
2865
2866 tree t1 = create_tmp_reg (tm_state_type);
2867 tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE);
2868
2869 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
2870 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2871 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2872
2873 t2 = build_int_cst (tm_state_type, 0);
2874 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2875 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2876
2877 // Create the edge into test_bb first, as we want to copy values
2878 // out of the fallthru edge.
2879 edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags);
2880 e->probability = fallthru_edge->probability;
2881 test_bb->count = e->count = fallthru_edge->count;
2882 test_bb->frequency = EDGE_FREQUENCY (e);
2883
2884 // Now update the edges to the inst/uninist implementations.
2885 // For now assume that the paths are equally likely. When using HTM,
2886 // we'll try the uninst path first and fallback to inst path if htm
2887 // buffers are exceeded. Without HTM we start with the inst path and
2888 // use the uninst path when falling back to serial mode.
2889 redirect_edge_pred (inst_edge, test_bb);
2890 inst_edge->flags = EDGE_FALSE_VALUE;
2891 inst_edge->probability = REG_BR_PROB_BASE / 2;
2892 inst_edge->count
2893 = apply_probability (test_bb->count, inst_edge->probability);
2894
2895 redirect_edge_pred (uninst_edge, test_bb);
2896 uninst_edge->flags = EDGE_TRUE_VALUE;
2897 uninst_edge->probability = REG_BR_PROB_BASE / 2;
2898 uninst_edge->count
2899 = apply_probability (test_bb->count, uninst_edge->probability);
2900 }
2901
2902 // If we have no previous special cases, and we have PHIs at the beginning
2903 // of the atomic region, this means we have a loop at the beginning of the
2904 // atomic region that shares the first block. This can cause problems with
2905 // the transaction restart abnormal edges to be added in the tm_edges pass.
2906 // Solve this by adding a new empty block to receive the abnormal edges.
2907 if (region->restart_block == region->entry_block
2908 && phi_nodes (region->entry_block))
2909 {
2910 basic_block empty_bb = create_empty_bb (transaction_bb);
2911 region->restart_block = empty_bb;
2912 add_bb_to_loop (empty_bb, transaction_bb->loop_father);
2913
2914 redirect_edge_pred (fallthru_edge, empty_bb);
2915 make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU);
2916 }
2917
2918 return NULL;
2919 }
2920
2921 /* Generate the temporary to be used for the return value of
2922 BUILT_IN_TM_START. */
2923
2924 static void *
2925 generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
2926 {
2927 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2928 region->tm_state =
2929 create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
2930
2931 // Reset the subcode, post optimizations. We'll fill this in
2932 // again as we process blocks.
2933 if (region->exit_blocks)
2934 {
2935 gtransaction *transaction_stmt = region->get_transaction_stmt ();
2936 unsigned int subcode = gimple_transaction_subcode (transaction_stmt);
2937
2938 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2939 subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
2940 | GTMA_MAY_ENTER_IRREVOCABLE
2941 | GTMA_HAS_NO_INSTRUMENTATION);
2942 else
2943 subcode &= GTMA_DECLARATION_MASK;
2944 gimple_transaction_set_subcode (transaction_stmt, subcode);
2945 }
2946
2947 return NULL;
2948 }
2949
2950 // Propagate flags from inner transactions outwards.
2951 static void
2952 propagate_tm_flags_out (struct tm_region *region)
2953 {
2954 if (region == NULL)
2955 return;
2956 propagate_tm_flags_out (region->inner);
2957
2958 if (region->outer && region->outer->transaction_stmt)
2959 {
2960 unsigned s
2961 = gimple_transaction_subcode (region->get_transaction_stmt ());
2962 s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE
2963 | GTMA_MAY_ENTER_IRREVOCABLE);
2964 s |= gimple_transaction_subcode (region->outer->get_transaction_stmt ());
2965 gimple_transaction_set_subcode (region->outer->get_transaction_stmt (),
2966 s);
2967 }
2968
2969 propagate_tm_flags_out (region->next);
2970 }
2971
2972 /* Entry point to the MARK phase of TM expansion. Here we replace
2973 transactional memory statements with calls to builtins, and function
2974 calls with their transactional clones (if available). But we don't
2975 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
2976
2977 static unsigned int
2978 execute_tm_mark (void)
2979 {
2980 pending_edge_inserts_p = false;
2981
2982 expand_regions (all_tm_regions, generate_tm_state, NULL,
2983 /*traverse_clones=*/true);
2984
2985 tm_log_init ();
2986
2987 vec<tm_region *> bb_regions
2988 = get_bb_regions_instrumented (/*traverse_clones=*/true,
2989 /*include_uninstrumented_p=*/false);
2990 struct tm_region *r;
2991 unsigned i;
2992
2993 // Expand memory operations into calls into the runtime.
2994 // This collects log entries as well.
2995 FOR_EACH_VEC_ELT (bb_regions, i, r)
2996 {
2997 if (r != NULL)
2998 {
2999 if (r->transaction_stmt)
3000 {
3001 unsigned sub
3002 = gimple_transaction_subcode (r->get_transaction_stmt ());
3003
3004 /* If we're sure to go irrevocable, there won't be
3005 anything to expand, since the run-time will go
3006 irrevocable right away. */
3007 if (sub & GTMA_DOES_GO_IRREVOCABLE
3008 && sub & GTMA_MAY_ENTER_IRREVOCABLE)
3009 continue;
3010 }
3011 expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i));
3012 }
3013 }
3014
3015 bb_regions.release ();
3016
3017 // Propagate flags from inner transactions outwards.
3018 propagate_tm_flags_out (all_tm_regions);
3019
3020 // Expand GIMPLE_TRANSACTIONs into calls into the runtime.
3021 expand_regions (all_tm_regions, expand_transaction, NULL,
3022 /*traverse_clones=*/false);
3023
3024 tm_log_emit ();
3025 tm_log_delete ();
3026
3027 if (pending_edge_inserts_p)
3028 gsi_commit_edge_inserts ();
3029 free_dominance_info (CDI_DOMINATORS);
3030 return 0;
3031 }
3032
3033 namespace {
3034
3035 const pass_data pass_data_tm_mark =
3036 {
3037 GIMPLE_PASS, /* type */
3038 "tmmark", /* name */
3039 OPTGROUP_NONE, /* optinfo_flags */
3040 TV_TRANS_MEM, /* tv_id */
3041 ( PROP_ssa | PROP_cfg ), /* properties_required */
3042 0, /* properties_provided */
3043 0, /* properties_destroyed */
3044 0, /* todo_flags_start */
3045 TODO_update_ssa, /* todo_flags_finish */
3046 };
3047
3048 class pass_tm_mark : public gimple_opt_pass
3049 {
3050 public:
3051 pass_tm_mark (gcc::context *ctxt)
3052 : gimple_opt_pass (pass_data_tm_mark, ctxt)
3053 {}
3054
3055 /* opt_pass methods: */
3056 virtual unsigned int execute (function *) { return execute_tm_mark (); }
3057
3058 }; // class pass_tm_mark
3059
3060 } // anon namespace
3061
3062 gimple_opt_pass *
3063 make_pass_tm_mark (gcc::context *ctxt)
3064 {
3065 return new pass_tm_mark (ctxt);
3066 }
3067 \f
3068
3069 /* Create an abnormal edge from STMT at iter, splitting the block
3070 as necessary. Adjust *PNEXT as needed for the split block. */
3071
3072 static inline void
3073 split_bb_make_tm_edge (gimple *stmt, basic_block dest_bb,
3074 gimple_stmt_iterator iter, gimple_stmt_iterator *pnext)
3075 {
3076 basic_block bb = gimple_bb (stmt);
3077 if (!gsi_one_before_end_p (iter))
3078 {
3079 edge e = split_block (bb, stmt);
3080 *pnext = gsi_start_bb (e->dest);
3081 }
3082 make_edge (bb, dest_bb, EDGE_ABNORMAL);
3083
3084 // Record the need for the edge for the benefit of the rtl passes.
3085 if (cfun->gimple_df->tm_restart == NULL)
3086 cfun->gimple_df->tm_restart
3087 = hash_table<tm_restart_hasher>::create_ggc (31);
3088
3089 struct tm_restart_node dummy;
3090 dummy.stmt = stmt;
3091 dummy.label_or_list = gimple_block_label (dest_bb);
3092
3093 tm_restart_node **slot = cfun->gimple_df->tm_restart->find_slot (&dummy,
3094 INSERT);
3095 struct tm_restart_node *n = *slot;
3096 if (n == NULL)
3097 {
3098 n = ggc_alloc<tm_restart_node> ();
3099 *n = dummy;
3100 }
3101 else
3102 {
3103 tree old = n->label_or_list;
3104 if (TREE_CODE (old) == LABEL_DECL)
3105 old = tree_cons (NULL, old, NULL);
3106 n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
3107 }
3108 }
3109
3110 /* Split block BB as necessary for every builtin function we added, and
3111 wire up the abnormal back edges implied by the transaction restart. */
3112
3113 static void
3114 expand_block_edges (struct tm_region *const region, basic_block bb)
3115 {
3116 gimple_stmt_iterator gsi, next_gsi;
3117
3118 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi)
3119 {
3120 gimple *stmt = gsi_stmt (gsi);
3121 gcall *call_stmt;
3122
3123 next_gsi = gsi;
3124 gsi_next (&next_gsi);
3125
3126 // ??? Shouldn't we split for any non-pure, non-irrevocable function?
3127 call_stmt = dyn_cast <gcall *> (stmt);
3128 if ((!call_stmt)
3129 || (gimple_call_flags (call_stmt) & ECF_TM_BUILTIN) == 0)
3130 continue;
3131
3132 if (DECL_FUNCTION_CODE (gimple_call_fndecl (call_stmt))
3133 == BUILT_IN_TM_ABORT)
3134 {
3135 // If we have a ``_transaction_cancel [[outer]]'', there is only
3136 // one abnormal edge: to the transaction marked OUTER.
3137 // All compiler-generated instances of BUILT_IN_TM_ABORT have a
3138 // constant argument, which we can examine here. Users invoking
3139 // TM_ABORT directly get what they deserve.
3140 tree arg = gimple_call_arg (call_stmt, 0);
3141 if (TREE_CODE (arg) == INTEGER_CST
3142 && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0
3143 && !decl_is_tm_clone (current_function_decl))
3144 {
3145 // Find the GTMA_IS_OUTER transaction.
3146 for (struct tm_region *o = region; o; o = o->outer)
3147 if (o->original_transaction_was_outer)
3148 {
3149 split_bb_make_tm_edge (call_stmt, o->restart_block,
3150 gsi, &next_gsi);
3151 break;
3152 }
3153
3154 // Otherwise, the front-end should have semantically checked
3155 // outer aborts, but in either case the target region is not
3156 // within this function.
3157 continue;
3158 }
3159
3160 // Non-outer, TM aborts have an abnormal edge to the inner-most
3161 // transaction, the one being aborted;
3162 split_bb_make_tm_edge (call_stmt, region->restart_block, gsi,
3163 &next_gsi);
3164 }
3165
3166 // All TM builtins have an abnormal edge to the outer-most transaction.
3167 // We never restart inner transactions. For tm clones, we know a-priori
3168 // that the outer-most transaction is outside the function.
3169 if (decl_is_tm_clone (current_function_decl))
3170 continue;
3171
3172 if (cfun->gimple_df->tm_restart == NULL)
3173 cfun->gimple_df->tm_restart
3174 = hash_table<tm_restart_hasher>::create_ggc (31);
3175
3176 // All TM builtins have an abnormal edge to the outer-most transaction.
3177 // We never restart inner transactions.
3178 for (struct tm_region *o = region; o; o = o->outer)
3179 if (!o->outer)
3180 {
3181 split_bb_make_tm_edge (call_stmt, o->restart_block, gsi, &next_gsi);
3182 break;
3183 }
3184
3185 // Delete any tail-call annotation that may have been added.
3186 // The tail-call pass may have mis-identified the commit as being
3187 // a candidate because we had not yet added this restart edge.
3188 gimple_call_set_tail (call_stmt, false);
3189 }
3190 }
3191
3192 /* Entry point to the final expansion of transactional nodes. */
3193
3194 namespace {
3195
3196 const pass_data pass_data_tm_edges =
3197 {
3198 GIMPLE_PASS, /* type */
3199 "tmedge", /* name */
3200 OPTGROUP_NONE, /* optinfo_flags */
3201 TV_TRANS_MEM, /* tv_id */
3202 ( PROP_ssa | PROP_cfg ), /* properties_required */
3203 0, /* properties_provided */
3204 0, /* properties_destroyed */
3205 0, /* todo_flags_start */
3206 TODO_update_ssa, /* todo_flags_finish */
3207 };
3208
3209 class pass_tm_edges : public gimple_opt_pass
3210 {
3211 public:
3212 pass_tm_edges (gcc::context *ctxt)
3213 : gimple_opt_pass (pass_data_tm_edges, ctxt)
3214 {}
3215
3216 /* opt_pass methods: */
3217 virtual unsigned int execute (function *);
3218
3219 }; // class pass_tm_edges
3220
3221 unsigned int
3222 pass_tm_edges::execute (function *fun)
3223 {
3224 vec<tm_region *> bb_regions
3225 = get_bb_regions_instrumented (/*traverse_clones=*/false,
3226 /*include_uninstrumented_p=*/true);
3227 struct tm_region *r;
3228 unsigned i;
3229
3230 FOR_EACH_VEC_ELT (bb_regions, i, r)
3231 if (r != NULL)
3232 expand_block_edges (r, BASIC_BLOCK_FOR_FN (fun, i));
3233
3234 bb_regions.release ();
3235
3236 /* We've got to release the dominance info now, to indicate that it
3237 must be rebuilt completely. Otherwise we'll crash trying to update
3238 the SSA web in the TODO section following this pass. */
3239 free_dominance_info (CDI_DOMINATORS);
3240 bitmap_obstack_release (&tm_obstack);
3241 all_tm_regions = NULL;
3242
3243 return 0;
3244 }
3245
3246 } // anon namespace
3247
3248 gimple_opt_pass *
3249 make_pass_tm_edges (gcc::context *ctxt)
3250 {
3251 return new pass_tm_edges (ctxt);
3252 }
3253 \f
3254 /* Helper function for expand_regions. Expand REGION and recurse to
3255 the inner region. Call CALLBACK on each region. CALLBACK returns
3256 NULL to continue the traversal, otherwise a non-null value which
3257 this function will return as well. TRAVERSE_CLONES is true if we
3258 should traverse transactional clones. */
3259
3260 static void *
3261 expand_regions_1 (struct tm_region *region,
3262 void *(*callback)(struct tm_region *, void *),
3263 void *data,
3264 bool traverse_clones)
3265 {
3266 void *retval = NULL;
3267 if (region->exit_blocks
3268 || (traverse_clones && decl_is_tm_clone (current_function_decl)))
3269 {
3270 retval = callback (region, data);
3271 if (retval)
3272 return retval;
3273 }
3274 if (region->inner)
3275 {
3276 retval = expand_regions (region->inner, callback, data, traverse_clones);
3277 if (retval)
3278 return retval;
3279 }
3280 return retval;
3281 }
3282
3283 /* Traverse the regions enclosed and including REGION. Execute
3284 CALLBACK for each region, passing DATA. CALLBACK returns NULL to
3285 continue the traversal, otherwise a non-null value which this
3286 function will return as well. TRAVERSE_CLONES is true if we should
3287 traverse transactional clones. */
3288
3289 static void *
3290 expand_regions (struct tm_region *region,
3291 void *(*callback)(struct tm_region *, void *),
3292 void *data,
3293 bool traverse_clones)
3294 {
3295 void *retval = NULL;
3296 while (region)
3297 {
3298 retval = expand_regions_1 (region, callback, data, traverse_clones);
3299 if (retval)
3300 return retval;
3301 region = region->next;
3302 }
3303 return retval;
3304 }
3305
3306 \f
3307 /* A unique TM memory operation. */
3308 struct tm_memop
3309 {
3310 /* Unique ID that all memory operations to the same location have. */
3311 unsigned int value_id;
3312 /* Address of load/store. */
3313 tree addr;
3314 };
3315
3316 /* TM memory operation hashtable helpers. */
3317
3318 struct tm_memop_hasher : free_ptr_hash <tm_memop>
3319 {
3320 static inline hashval_t hash (const tm_memop *);
3321 static inline bool equal (const tm_memop *, const tm_memop *);
3322 };
3323
3324 /* Htab support. Return a hash value for a `tm_memop'. */
3325 inline hashval_t
3326 tm_memop_hasher::hash (const tm_memop *mem)
3327 {
3328 tree addr = mem->addr;
3329 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
3330 actually done with operand_equal_p (see tm_memop_eq). */
3331 if (TREE_CODE (addr) == ADDR_EXPR)
3332 addr = TREE_OPERAND (addr, 0);
3333 return iterative_hash_expr (addr, 0);
3334 }
3335
3336 /* Htab support. Return true if two tm_memop's are the same. */
3337 inline bool
3338 tm_memop_hasher::equal (const tm_memop *mem1, const tm_memop *mem2)
3339 {
3340 return operand_equal_p (mem1->addr, mem2->addr, 0);
3341 }
3342
3343 /* Sets for solving data flow equations in the memory optimization pass. */
3344 struct tm_memopt_bitmaps
3345 {
3346 /* Stores available to this BB upon entry. Basically, stores that
3347 dominate this BB. */
3348 bitmap store_avail_in;
3349 /* Stores available at the end of this BB. */
3350 bitmap store_avail_out;
3351 bitmap store_antic_in;
3352 bitmap store_antic_out;
3353 /* Reads available to this BB upon entry. Basically, reads that
3354 dominate this BB. */
3355 bitmap read_avail_in;
3356 /* Reads available at the end of this BB. */
3357 bitmap read_avail_out;
3358 /* Reads performed in this BB. */
3359 bitmap read_local;
3360 /* Writes performed in this BB. */
3361 bitmap store_local;
3362
3363 /* Temporary storage for pass. */
3364 /* Is the current BB in the worklist? */
3365 bool avail_in_worklist_p;
3366 /* Have we visited this BB? */
3367 bool visited_p;
3368 };
3369
3370 static bitmap_obstack tm_memopt_obstack;
3371
3372 /* Unique counter for TM loads and stores. Loads and stores of the
3373 same address get the same ID. */
3374 static unsigned int tm_memopt_value_id;
3375 static hash_table<tm_memop_hasher> *tm_memopt_value_numbers;
3376
3377 #define STORE_AVAIL_IN(BB) \
3378 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
3379 #define STORE_AVAIL_OUT(BB) \
3380 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
3381 #define STORE_ANTIC_IN(BB) \
3382 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
3383 #define STORE_ANTIC_OUT(BB) \
3384 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
3385 #define READ_AVAIL_IN(BB) \
3386 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
3387 #define READ_AVAIL_OUT(BB) \
3388 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
3389 #define READ_LOCAL(BB) \
3390 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
3391 #define STORE_LOCAL(BB) \
3392 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
3393 #define AVAIL_IN_WORKLIST_P(BB) \
3394 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
3395 #define BB_VISITED_P(BB) \
3396 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
3397
3398 /* Given a TM load/store in STMT, return the value number for the address
3399 it accesses. */
3400
3401 static unsigned int
3402 tm_memopt_value_number (gimple *stmt, enum insert_option op)
3403 {
3404 struct tm_memop tmpmem, *mem;
3405 tm_memop **slot;
3406
3407 gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
3408 tmpmem.addr = gimple_call_arg (stmt, 0);
3409 slot = tm_memopt_value_numbers->find_slot (&tmpmem, op);
3410 if (*slot)
3411 mem = *slot;
3412 else if (op == INSERT)
3413 {
3414 mem = XNEW (struct tm_memop);
3415 *slot = mem;
3416 mem->value_id = tm_memopt_value_id++;
3417 mem->addr = tmpmem.addr;
3418 }
3419 else
3420 gcc_unreachable ();
3421 return mem->value_id;
3422 }
3423
3424 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
3425
3426 static void
3427 tm_memopt_accumulate_memops (basic_block bb)
3428 {
3429 gimple_stmt_iterator gsi;
3430
3431 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3432 {
3433 gimple *stmt = gsi_stmt (gsi);
3434 bitmap bits;
3435 unsigned int loc;
3436
3437 if (is_tm_store (stmt))
3438 bits = STORE_LOCAL (bb);
3439 else if (is_tm_load (stmt))
3440 bits = READ_LOCAL (bb);
3441 else
3442 continue;
3443
3444 loc = tm_memopt_value_number (stmt, INSERT);
3445 bitmap_set_bit (bits, loc);
3446 if (dump_file)
3447 {
3448 fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
3449 is_tm_load (stmt) ? "LOAD" : "STORE", loc,
3450 gimple_bb (stmt)->index);
3451 print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0);
3452 fprintf (dump_file, "\n");
3453 }
3454 }
3455 }
3456
3457 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
3458
3459 static void
3460 dump_tm_memopt_set (const char *set_name, bitmap bits)
3461 {
3462 unsigned i;
3463 bitmap_iterator bi;
3464 const char *comma = "";
3465
3466 fprintf (dump_file, "TM memopt: %s: [", set_name);
3467 EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
3468 {
3469 hash_table<tm_memop_hasher>::iterator hi;
3470 struct tm_memop *mem = NULL;
3471
3472 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
3473 FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers, mem, tm_memop_t, hi)
3474 if (mem->value_id == i)
3475 break;
3476 gcc_assert (mem->value_id == i);
3477 fprintf (dump_file, "%s", comma);
3478 comma = ", ";
3479 print_generic_expr (dump_file, mem->addr, 0);
3480 }
3481 fprintf (dump_file, "]\n");
3482 }
3483
3484 /* Prettily dump all of the memopt sets in BLOCKS. */
3485
3486 static void
3487 dump_tm_memopt_sets (vec<basic_block> blocks)
3488 {
3489 size_t i;
3490 basic_block bb;
3491
3492 for (i = 0; blocks.iterate (i, &bb); ++i)
3493 {
3494 fprintf (dump_file, "------------BB %d---------\n", bb->index);
3495 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
3496 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
3497 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
3498 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
3499 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
3500 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
3501 }
3502 }
3503
3504 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3505
3506 static void
3507 tm_memopt_compute_avin (basic_block bb)
3508 {
3509 edge e;
3510 unsigned ix;
3511
3512 /* Seed with the AVOUT of any predecessor. */
3513 for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
3514 {
3515 e = EDGE_PRED (bb, ix);
3516 /* Make sure we have already visited this BB, and is thus
3517 initialized.
3518
3519 If e->src->aux is NULL, this predecessor is actually on an
3520 enclosing transaction. We only care about the current
3521 transaction, so ignore it. */
3522 if (e->src->aux && BB_VISITED_P (e->src))
3523 {
3524 bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3525 bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3526 break;
3527 }
3528 }
3529
3530 for (; ix < EDGE_COUNT (bb->preds); ix++)
3531 {
3532 e = EDGE_PRED (bb, ix);
3533 if (e->src->aux && BB_VISITED_P (e->src))
3534 {
3535 bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3536 bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3537 }
3538 }
3539
3540 BB_VISITED_P (bb) = true;
3541 }
3542
3543 /* Compute the STORE_ANTIC_IN for the basic block BB. */
3544
3545 static void
3546 tm_memopt_compute_antin (basic_block bb)
3547 {
3548 edge e;
3549 unsigned ix;
3550
3551 /* Seed with the ANTIC_OUT of any successor. */
3552 for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
3553 {
3554 e = EDGE_SUCC (bb, ix);
3555 /* Make sure we have already visited this BB, and is thus
3556 initialized. */
3557 if (BB_VISITED_P (e->dest))
3558 {
3559 bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3560 break;
3561 }
3562 }
3563
3564 for (; ix < EDGE_COUNT (bb->succs); ix++)
3565 {
3566 e = EDGE_SUCC (bb, ix);
3567 if (BB_VISITED_P (e->dest))
3568 bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3569 }
3570
3571 BB_VISITED_P (bb) = true;
3572 }
3573
3574 /* Compute the AVAIL sets for every basic block in BLOCKS.
3575
3576 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3577
3578 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3579 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3580
3581 This is basically what we do in lcm's compute_available(), but here
3582 we calculate two sets of sets (one for STOREs and one for READs),
3583 and we work on a region instead of the entire CFG.
3584
3585 REGION is the TM region.
3586 BLOCKS are the basic blocks in the region. */
3587
3588 static void
3589 tm_memopt_compute_available (struct tm_region *region,
3590 vec<basic_block> blocks)
3591 {
3592 edge e;
3593 basic_block *worklist, *qin, *qout, *qend, bb;
3594 unsigned int qlen, i;
3595 edge_iterator ei;
3596 bool changed;
3597
3598 /* Allocate a worklist array/queue. Entries are only added to the
3599 list if they were not already on the list. So the size is
3600 bounded by the number of basic blocks in the region. */
3601 qlen = blocks.length () - 1;
3602 qin = qout = worklist =
3603 XNEWVEC (basic_block, qlen);
3604
3605 /* Put every block in the region on the worklist. */
3606 for (i = 0; blocks.iterate (i, &bb); ++i)
3607 {
3608 /* Seed AVAIL_OUT with the LOCAL set. */
3609 bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
3610 bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
3611
3612 AVAIL_IN_WORKLIST_P (bb) = true;
3613 /* No need to insert the entry block, since it has an AVIN of
3614 null, and an AVOUT that has already been seeded in. */
3615 if (bb != region->entry_block)
3616 *qin++ = bb;
3617 }
3618
3619 /* The entry block has been initialized with the local sets. */
3620 BB_VISITED_P (region->entry_block) = true;
3621
3622 qin = worklist;
3623 qend = &worklist[qlen];
3624
3625 /* Iterate until the worklist is empty. */
3626 while (qlen)
3627 {
3628 /* Take the first entry off the worklist. */
3629 bb = *qout++;
3630 qlen--;
3631
3632 if (qout >= qend)
3633 qout = worklist;
3634
3635 /* This block can be added to the worklist again if necessary. */
3636 AVAIL_IN_WORKLIST_P (bb) = false;
3637 tm_memopt_compute_avin (bb);
3638
3639 /* Note: We do not add the LOCAL sets here because we already
3640 seeded the AVAIL_OUT sets with them. */
3641 changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
3642 changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
3643 if (changed
3644 && (region->exit_blocks == NULL
3645 || !bitmap_bit_p (region->exit_blocks, bb->index)))
3646 /* If the out state of this block changed, then we need to add
3647 its successors to the worklist if they are not already in. */
3648 FOR_EACH_EDGE (e, ei, bb->succs)
3649 if (!AVAIL_IN_WORKLIST_P (e->dest)
3650 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
3651 {
3652 *qin++ = e->dest;
3653 AVAIL_IN_WORKLIST_P (e->dest) = true;
3654 qlen++;
3655
3656 if (qin >= qend)
3657 qin = worklist;
3658 }
3659 }
3660
3661 free (worklist);
3662
3663 if (dump_file)
3664 dump_tm_memopt_sets (blocks);
3665 }
3666
3667 /* Compute ANTIC sets for every basic block in BLOCKS.
3668
3669 We compute STORE_ANTIC_OUT as follows:
3670
3671 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3672 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3673
3674 REGION is the TM region.
3675 BLOCKS are the basic blocks in the region. */
3676
3677 static void
3678 tm_memopt_compute_antic (struct tm_region *region,
3679 vec<basic_block> blocks)
3680 {
3681 edge e;
3682 basic_block *worklist, *qin, *qout, *qend, bb;
3683 unsigned int qlen;
3684 int i;
3685 edge_iterator ei;
3686
3687 /* Allocate a worklist array/queue. Entries are only added to the
3688 list if they were not already on the list. So the size is
3689 bounded by the number of basic blocks in the region. */
3690 qin = qout = worklist = XNEWVEC (basic_block, blocks.length ());
3691
3692 for (qlen = 0, i = blocks.length () - 1; i >= 0; --i)
3693 {
3694 bb = blocks[i];
3695
3696 /* Seed ANTIC_OUT with the LOCAL set. */
3697 bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
3698
3699 /* Put every block in the region on the worklist. */
3700 AVAIL_IN_WORKLIST_P (bb) = true;
3701 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3702 and their ANTIC_OUT has already been seeded in. */
3703 if (region->exit_blocks
3704 && !bitmap_bit_p (region->exit_blocks, bb->index))
3705 {
3706 qlen++;
3707 *qin++ = bb;
3708 }
3709 }
3710
3711 /* The exit blocks have been initialized with the local sets. */
3712 if (region->exit_blocks)
3713 {
3714 unsigned int i;
3715 bitmap_iterator bi;
3716 EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
3717 BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun, i)) = true;
3718 }
3719
3720 qin = worklist;
3721 qend = &worklist[qlen];
3722
3723 /* Iterate until the worklist is empty. */
3724 while (qlen)
3725 {
3726 /* Take the first entry off the worklist. */
3727 bb = *qout++;
3728 qlen--;
3729
3730 if (qout >= qend)
3731 qout = worklist;
3732
3733 /* This block can be added to the worklist again if necessary. */
3734 AVAIL_IN_WORKLIST_P (bb) = false;
3735 tm_memopt_compute_antin (bb);
3736
3737 /* Note: We do not add the LOCAL sets here because we already
3738 seeded the ANTIC_OUT sets with them. */
3739 if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
3740 && bb != region->entry_block)
3741 /* If the out state of this block changed, then we need to add
3742 its predecessors to the worklist if they are not already in. */
3743 FOR_EACH_EDGE (e, ei, bb->preds)
3744 if (!AVAIL_IN_WORKLIST_P (e->src))
3745 {
3746 *qin++ = e->src;
3747 AVAIL_IN_WORKLIST_P (e->src) = true;
3748 qlen++;
3749
3750 if (qin >= qend)
3751 qin = worklist;
3752 }
3753 }
3754
3755 free (worklist);
3756
3757 if (dump_file)
3758 dump_tm_memopt_sets (blocks);
3759 }
3760
3761 /* Offsets of load variants from TM_LOAD. For example,
3762 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3763 See gtm-builtins.def. */
3764 #define TRANSFORM_RAR 1
3765 #define TRANSFORM_RAW 2
3766 #define TRANSFORM_RFW 3
3767 /* Offsets of store variants from TM_STORE. */
3768 #define TRANSFORM_WAR 1
3769 #define TRANSFORM_WAW 2
3770
3771 /* Inform about a load/store optimization. */
3772
3773 static void
3774 dump_tm_memopt_transform (gimple *stmt)
3775 {
3776 if (dump_file)
3777 {
3778 fprintf (dump_file, "TM memopt: transforming: ");
3779 print_gimple_stmt (dump_file, stmt, 0, 0);
3780 fprintf (dump_file, "\n");
3781 }
3782 }
3783
3784 /* Perform a read/write optimization. Replaces the TM builtin in STMT
3785 by a builtin that is OFFSET entries down in the builtins table in
3786 gtm-builtins.def. */
3787
3788 static void
3789 tm_memopt_transform_stmt (unsigned int offset,
3790 gcall *stmt,
3791 gimple_stmt_iterator *gsi)
3792 {
3793 tree fn = gimple_call_fn (stmt);
3794 gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
3795 TREE_OPERAND (fn, 0)
3796 = builtin_decl_explicit ((enum built_in_function)
3797 (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
3798 + offset));
3799 gimple_call_set_fn (stmt, fn);
3800 gsi_replace (gsi, stmt, true);
3801 dump_tm_memopt_transform (stmt);
3802 }
3803
3804 /* Perform the actual TM memory optimization transformations in the
3805 basic blocks in BLOCKS. */
3806
3807 static void
3808 tm_memopt_transform_blocks (vec<basic_block> blocks)
3809 {
3810 size_t i;
3811 basic_block bb;
3812 gimple_stmt_iterator gsi;
3813
3814 for (i = 0; blocks.iterate (i, &bb); ++i)
3815 {
3816 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3817 {
3818 gimple *stmt = gsi_stmt (gsi);
3819 bitmap read_avail = READ_AVAIL_IN (bb);
3820 bitmap store_avail = STORE_AVAIL_IN (bb);
3821 bitmap store_antic = STORE_ANTIC_OUT (bb);
3822 unsigned int loc;
3823
3824 if (is_tm_simple_load (stmt))
3825 {
3826 gcall *call_stmt = as_a <gcall *> (stmt);
3827 loc = tm_memopt_value_number (stmt, NO_INSERT);
3828 if (store_avail && bitmap_bit_p (store_avail, loc))
3829 tm_memopt_transform_stmt (TRANSFORM_RAW, call_stmt, &gsi);
3830 else if (store_antic && bitmap_bit_p (store_antic, loc))
3831 {
3832 tm_memopt_transform_stmt (TRANSFORM_RFW, call_stmt, &gsi);
3833 bitmap_set_bit (store_avail, loc);
3834 }
3835 else if (read_avail && bitmap_bit_p (read_avail, loc))
3836 tm_memopt_transform_stmt (TRANSFORM_RAR, call_stmt, &gsi);
3837 else
3838 bitmap_set_bit (read_avail, loc);
3839 }
3840 else if (is_tm_simple_store (stmt))
3841 {
3842 gcall *call_stmt = as_a <gcall *> (stmt);
3843 loc = tm_memopt_value_number (stmt, NO_INSERT);
3844 if (store_avail && bitmap_bit_p (store_avail, loc))
3845 tm_memopt_transform_stmt (TRANSFORM_WAW, call_stmt, &gsi);
3846 else
3847 {
3848 if (read_avail && bitmap_bit_p (read_avail, loc))
3849 tm_memopt_transform_stmt (TRANSFORM_WAR, call_stmt, &gsi);
3850 bitmap_set_bit (store_avail, loc);
3851 }
3852 }
3853 }
3854 }
3855 }
3856
3857 /* Return a new set of bitmaps for a BB. */
3858
3859 static struct tm_memopt_bitmaps *
3860 tm_memopt_init_sets (void)
3861 {
3862 struct tm_memopt_bitmaps *b
3863 = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
3864 b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3865 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3866 b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
3867 b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
3868 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3869 b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3870 b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3871 b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
3872 b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
3873 return b;
3874 }
3875
3876 /* Free sets computed for each BB. */
3877
3878 static void
3879 tm_memopt_free_sets (vec<basic_block> blocks)
3880 {
3881 size_t i;
3882 basic_block bb;
3883
3884 for (i = 0; blocks.iterate (i, &bb); ++i)
3885 bb->aux = NULL;
3886 }
3887
3888 /* Clear the visited bit for every basic block in BLOCKS. */
3889
3890 static void
3891 tm_memopt_clear_visited (vec<basic_block> blocks)
3892 {
3893 size_t i;
3894 basic_block bb;
3895
3896 for (i = 0; blocks.iterate (i, &bb); ++i)
3897 BB_VISITED_P (bb) = false;
3898 }
3899
3900 /* Replace TM load/stores with hints for the runtime. We handle
3901 things like read-after-write, write-after-read, read-after-read,
3902 read-for-write, etc. */
3903
3904 static unsigned int
3905 execute_tm_memopt (void)
3906 {
3907 struct tm_region *region;
3908 vec<basic_block> bbs;
3909
3910 tm_memopt_value_id = 0;
3911 tm_memopt_value_numbers = new hash_table<tm_memop_hasher> (10);
3912
3913 for (region = all_tm_regions; region; region = region->next)
3914 {
3915 /* All the TM stores/loads in the current region. */
3916 size_t i;
3917 basic_block bb;
3918
3919 bitmap_obstack_initialize (&tm_memopt_obstack);
3920
3921 /* Save all BBs for the current region. */
3922 bbs = get_tm_region_blocks (region->entry_block,
3923 region->exit_blocks,
3924 region->irr_blocks,
3925 NULL,
3926 false);
3927
3928 /* Collect all the memory operations. */
3929 for (i = 0; bbs.iterate (i, &bb); ++i)
3930 {
3931 bb->aux = tm_memopt_init_sets ();
3932 tm_memopt_accumulate_memops (bb);
3933 }
3934
3935 /* Solve data flow equations and transform each block accordingly. */
3936 tm_memopt_clear_visited (bbs);
3937 tm_memopt_compute_available (region, bbs);
3938 tm_memopt_clear_visited (bbs);
3939 tm_memopt_compute_antic (region, bbs);
3940 tm_memopt_transform_blocks (bbs);
3941
3942 tm_memopt_free_sets (bbs);
3943 bbs.release ();
3944 bitmap_obstack_release (&tm_memopt_obstack);
3945 tm_memopt_value_numbers->empty ();
3946 }
3947
3948 delete tm_memopt_value_numbers;
3949 tm_memopt_value_numbers = NULL;
3950 return 0;
3951 }
3952
3953 namespace {
3954
3955 const pass_data pass_data_tm_memopt =
3956 {
3957 GIMPLE_PASS, /* type */
3958 "tmmemopt", /* name */
3959 OPTGROUP_NONE, /* optinfo_flags */
3960 TV_TRANS_MEM, /* tv_id */
3961 ( PROP_ssa | PROP_cfg ), /* properties_required */
3962 0, /* properties_provided */
3963 0, /* properties_destroyed */
3964 0, /* todo_flags_start */
3965 0, /* todo_flags_finish */
3966 };
3967
3968 class pass_tm_memopt : public gimple_opt_pass
3969 {
3970 public:
3971 pass_tm_memopt (gcc::context *ctxt)
3972 : gimple_opt_pass (pass_data_tm_memopt, ctxt)
3973 {}
3974
3975 /* opt_pass methods: */
3976 virtual bool gate (function *) { return flag_tm && optimize > 0; }
3977 virtual unsigned int execute (function *) { return execute_tm_memopt (); }
3978
3979 }; // class pass_tm_memopt
3980
3981 } // anon namespace
3982
3983 gimple_opt_pass *
3984 make_pass_tm_memopt (gcc::context *ctxt)
3985 {
3986 return new pass_tm_memopt (ctxt);
3987 }
3988
3989 \f
3990 /* Interprocedual analysis for the creation of transactional clones.
3991 The aim of this pass is to find which functions are referenced in
3992 a non-irrevocable transaction context, and for those over which
3993 we have control (or user directive), create a version of the
3994 function which uses only the transactional interface to reference
3995 protected memories. This analysis proceeds in several steps:
3996
3997 (1) Collect the set of all possible transactional clones:
3998
3999 (a) For all local public functions marked tm_callable, push
4000 it onto the tm_callee queue.
4001
4002 (b) For all local functions, scan for calls in transaction blocks.
4003 Push the caller and callee onto the tm_caller and tm_callee
4004 queues. Count the number of callers for each callee.
4005
4006 (c) For each local function on the callee list, assume we will
4007 create a transactional clone. Push *all* calls onto the
4008 callee queues; count the number of clone callers separately
4009 to the number of original callers.
4010
4011 (2) Propagate irrevocable status up the dominator tree:
4012
4013 (a) Any external function on the callee list that is not marked
4014 tm_callable is irrevocable. Push all callers of such onto
4015 a worklist.
4016
4017 (b) For each function on the worklist, mark each block that
4018 contains an irrevocable call. Use the AND operator to
4019 propagate that mark up the dominator tree.
4020
4021 (c) If we reach the entry block for a possible transactional
4022 clone, then the transactional clone is irrevocable, and
4023 we should not create the clone after all. Push all
4024 callers onto the worklist.
4025
4026 (d) Place tm_irrevocable calls at the beginning of the relevant
4027 blocks. Special case here is the entry block for the entire
4028 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
4029 the library to begin the region in serial mode. Decrement
4030 the call count for all callees in the irrevocable region.
4031
4032 (3) Create the transactional clones:
4033
4034 Any tm_callee that still has a non-zero call count is cloned.
4035 */
4036
4037 /* This structure is stored in the AUX field of each cgraph_node. */
4038 struct tm_ipa_cg_data
4039 {
4040 /* The clone of the function that got created. */
4041 struct cgraph_node *clone;
4042
4043 /* The tm regions in the normal function. */
4044 struct tm_region *all_tm_regions;
4045
4046 /* The blocks of the normal/clone functions that contain irrevocable
4047 calls, or blocks that are post-dominated by irrevocable calls. */
4048 bitmap irrevocable_blocks_normal;
4049 bitmap irrevocable_blocks_clone;
4050
4051 /* The blocks of the normal function that are involved in transactions. */
4052 bitmap transaction_blocks_normal;
4053
4054 /* The number of callers to the transactional clone of this function
4055 from normal and transactional clones respectively. */
4056 unsigned tm_callers_normal;
4057 unsigned tm_callers_clone;
4058
4059 /* True if all calls to this function's transactional clone
4060 are irrevocable. Also automatically true if the function
4061 has no transactional clone. */
4062 bool is_irrevocable;
4063
4064 /* Flags indicating the presence of this function in various queues. */
4065 bool in_callee_queue;
4066 bool in_worklist;
4067
4068 /* Flags indicating the kind of scan desired while in the worklist. */
4069 bool want_irr_scan_normal;
4070 };
4071
4072 typedef vec<cgraph_node *> cgraph_node_queue;
4073
4074 /* Return the ipa data associated with NODE, allocating zeroed memory
4075 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
4076 and set *NODE accordingly. */
4077
4078 static struct tm_ipa_cg_data *
4079 get_cg_data (struct cgraph_node **node, bool traverse_aliases)
4080 {
4081 struct tm_ipa_cg_data *d;
4082
4083 if (traverse_aliases && (*node)->alias)
4084 *node = (*node)->get_alias_target ();
4085
4086 d = (struct tm_ipa_cg_data *) (*node)->aux;
4087
4088 if (d == NULL)
4089 {
4090 d = (struct tm_ipa_cg_data *)
4091 obstack_alloc (&tm_obstack.obstack, sizeof (*d));
4092 (*node)->aux = (void *) d;
4093 memset (d, 0, sizeof (*d));
4094 }
4095
4096 return d;
4097 }
4098
4099 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
4100 it is already present. */
4101
4102 static void
4103 maybe_push_queue (struct cgraph_node *node,
4104 cgraph_node_queue *queue_p, bool *in_queue_p)
4105 {
4106 if (!*in_queue_p)
4107 {
4108 *in_queue_p = true;
4109 queue_p->safe_push (node);
4110 }
4111 }
4112
4113 /* Duplicate the basic blocks in QUEUE for use in the uninstrumented
4114 code path. QUEUE are the basic blocks inside the transaction
4115 represented in REGION.
4116
4117 Later in split_code_paths() we will add the conditional to choose
4118 between the two alternatives. */
4119
4120 static void
4121 ipa_uninstrument_transaction (struct tm_region *region,
4122 vec<basic_block> queue)
4123 {
4124 gimple *transaction = region->transaction_stmt;
4125 basic_block transaction_bb = gimple_bb (transaction);
4126 int n = queue.length ();
4127 basic_block *new_bbs = XNEWVEC (basic_block, n);
4128
4129 copy_bbs (queue.address (), n, new_bbs, NULL, 0, NULL, NULL, transaction_bb,
4130 true);
4131 edge e = make_edge (transaction_bb, new_bbs[0], EDGE_TM_UNINSTRUMENTED);
4132 add_phi_args_after_copy (new_bbs, n, e);
4133
4134 // Now we will have a GIMPLE_ATOMIC with 3 possible edges out of it.
4135 // a) EDGE_FALLTHRU into the transaction
4136 // b) EDGE_TM_ABORT out of the transaction
4137 // c) EDGE_TM_UNINSTRUMENTED into the uninstrumented blocks.
4138
4139 free (new_bbs);
4140 }
4141
4142 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
4143 Queue all callees within block BB. */
4144
4145 static void
4146 ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
4147 basic_block bb, bool for_clone)
4148 {
4149 gimple_stmt_iterator gsi;
4150
4151 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4152 {
4153 gimple *stmt = gsi_stmt (gsi);
4154 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4155 {
4156 tree fndecl = gimple_call_fndecl (stmt);
4157 if (fndecl)
4158 {
4159 struct tm_ipa_cg_data *d;
4160 unsigned *pcallers;
4161 struct cgraph_node *node;
4162
4163 if (is_tm_ending_fndecl (fndecl))
4164 continue;
4165 if (find_tm_replacement_function (fndecl))
4166 continue;
4167
4168 node = cgraph_node::get (fndecl);
4169 gcc_assert (node != NULL);
4170 d = get_cg_data (&node, true);
4171
4172 pcallers = (for_clone ? &d->tm_callers_clone
4173 : &d->tm_callers_normal);
4174 *pcallers += 1;
4175
4176 maybe_push_queue (node, callees_p, &d->in_callee_queue);
4177 }
4178 }
4179 }
4180 }
4181
4182 /* Scan all calls in NODE that are within a transaction region,
4183 and push the resulting nodes into the callee queue. */
4184
4185 static void
4186 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
4187 cgraph_node_queue *callees_p)
4188 {
4189 struct tm_region *r;
4190
4191 d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
4192 d->all_tm_regions = all_tm_regions;
4193
4194 for (r = all_tm_regions; r; r = r->next)
4195 {
4196 vec<basic_block> bbs;
4197 basic_block bb;
4198 unsigned i;
4199
4200 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
4201 d->transaction_blocks_normal, false);
4202
4203 // Generate the uninstrumented code path for this transaction.
4204 ipa_uninstrument_transaction (r, bbs);
4205
4206 FOR_EACH_VEC_ELT (bbs, i, bb)
4207 ipa_tm_scan_calls_block (callees_p, bb, false);
4208
4209 bbs.release ();
4210 }
4211
4212 // ??? copy_bbs should maintain cgraph edges for the blocks as it is
4213 // copying them, rather than forcing us to do this externally.
4214 cgraph_edge::rebuild_edges ();
4215
4216 // ??? In ipa_uninstrument_transaction we don't try to update dominators
4217 // because copy_bbs doesn't return a VEC like iterate_fix_dominators expects.
4218 // Instead, just release dominators here so update_ssa recomputes them.
4219 free_dominance_info (CDI_DOMINATORS);
4220
4221 // When building the uninstrumented code path, copy_bbs will have invoked
4222 // create_new_def_for starting an "ssa update context". There is only one
4223 // instance of this context, so resolve ssa updates before moving on to
4224 // the next function.
4225 update_ssa (TODO_update_ssa);
4226 }
4227
4228 /* Scan all calls in NODE as if this is the transactional clone,
4229 and push the destinations into the callee queue. */
4230
4231 static void
4232 ipa_tm_scan_calls_clone (struct cgraph_node *node,
4233 cgraph_node_queue *callees_p)
4234 {
4235 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
4236 basic_block bb;
4237
4238 FOR_EACH_BB_FN (bb, fn)
4239 ipa_tm_scan_calls_block (callees_p, bb, true);
4240 }
4241
4242 /* The function NODE has been detected to be irrevocable. Push all
4243 of its callers onto WORKLIST for the purpose of re-scanning them. */
4244
4245 static void
4246 ipa_tm_note_irrevocable (struct cgraph_node *node,
4247 cgraph_node_queue *worklist_p)
4248 {
4249 struct tm_ipa_cg_data *d = get_cg_data (&node, true);
4250 struct cgraph_edge *e;
4251
4252 d->is_irrevocable = true;
4253
4254 for (e = node->callers; e ; e = e->next_caller)
4255 {
4256 basic_block bb;
4257 struct cgraph_node *caller;
4258
4259 /* Don't examine recursive calls. */
4260 if (e->caller == node)
4261 continue;
4262 /* Even if we think we can go irrevocable, believe the user
4263 above all. */
4264 if (is_tm_safe_or_pure (e->caller->decl))
4265 continue;
4266
4267 caller = e->caller;
4268 d = get_cg_data (&caller, true);
4269
4270 /* Check if the callee is in a transactional region. If so,
4271 schedule the function for normal re-scan as well. */
4272 bb = gimple_bb (e->call_stmt);
4273 gcc_assert (bb != NULL);
4274 if (d->transaction_blocks_normal
4275 && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
4276 d->want_irr_scan_normal = true;
4277
4278 maybe_push_queue (caller, worklist_p, &d->in_worklist);
4279 }
4280 }
4281
4282 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
4283 within the block is irrevocable. */
4284
4285 static bool
4286 ipa_tm_scan_irr_block (basic_block bb)
4287 {
4288 gimple_stmt_iterator gsi;
4289 tree fn;
4290
4291 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4292 {
4293 gimple *stmt = gsi_stmt (gsi);
4294 switch (gimple_code (stmt))
4295 {
4296 case GIMPLE_ASSIGN:
4297 if (gimple_assign_single_p (stmt))
4298 {
4299 tree lhs = gimple_assign_lhs (stmt);
4300 tree rhs = gimple_assign_rhs1 (stmt);
4301 if (volatile_var_p (lhs) || volatile_var_p (rhs))
4302 return true;
4303 }
4304 break;
4305
4306 case GIMPLE_CALL:
4307 {
4308 tree lhs = gimple_call_lhs (stmt);
4309 if (lhs && volatile_var_p (lhs))
4310 return true;
4311
4312 if (is_tm_pure_call (stmt))
4313 break;
4314
4315 fn = gimple_call_fn (stmt);
4316
4317 /* Functions with the attribute are by definition irrevocable. */
4318 if (is_tm_irrevocable (fn))
4319 return true;
4320
4321 /* For direct function calls, go ahead and check for replacement
4322 functions, or transitive irrevocable functions. For indirect
4323 functions, we'll ask the runtime. */
4324 if (TREE_CODE (fn) == ADDR_EXPR)
4325 {
4326 struct tm_ipa_cg_data *d;
4327 struct cgraph_node *node;
4328
4329 fn = TREE_OPERAND (fn, 0);
4330 if (is_tm_ending_fndecl (fn))
4331 break;
4332 if (find_tm_replacement_function (fn))
4333 break;
4334
4335 node = cgraph_node::get (fn);
4336 d = get_cg_data (&node, true);
4337
4338 /* Return true if irrevocable, but above all, believe
4339 the user. */
4340 if (d->is_irrevocable
4341 && !is_tm_safe_or_pure (fn))
4342 return true;
4343 }
4344 break;
4345 }
4346
4347 case GIMPLE_ASM:
4348 /* ??? The Approved Method of indicating that an inline
4349 assembly statement is not relevant to the transaction
4350 is to wrap it in a __tm_waiver block. This is not
4351 yet implemented, so we can't check for it. */
4352 if (is_tm_safe (current_function_decl))
4353 {
4354 tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
4355 SET_EXPR_LOCATION (t, gimple_location (stmt));
4356 error ("%Kasm not allowed in %<transaction_safe%> function", t);
4357 }
4358 return true;
4359
4360 default:
4361 break;
4362 }
4363 }
4364
4365 return false;
4366 }
4367
4368 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking
4369 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
4370 scanning past OLD_IRR or EXIT_BLOCKS. */
4371
4372 static bool
4373 ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr,
4374 bitmap old_irr, bitmap exit_blocks)
4375 {
4376 bool any_new_irr = false;
4377 edge e;
4378 edge_iterator ei;
4379 bitmap visited_blocks = BITMAP_ALLOC (NULL);
4380
4381 do
4382 {
4383 basic_block bb = pqueue->pop ();
4384
4385 /* Don't re-scan blocks we know already are irrevocable. */
4386 if (old_irr && bitmap_bit_p (old_irr, bb->index))
4387 continue;
4388
4389 if (ipa_tm_scan_irr_block (bb))
4390 {
4391 bitmap_set_bit (new_irr, bb->index);
4392 any_new_irr = true;
4393 }
4394 else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
4395 {
4396 FOR_EACH_EDGE (e, ei, bb->succs)
4397 if (!bitmap_bit_p (visited_blocks, e->dest->index))
4398 {
4399 bitmap_set_bit (visited_blocks, e->dest->index);
4400 pqueue->safe_push (e->dest);
4401 }
4402 }
4403 }
4404 while (!pqueue->is_empty ());
4405
4406 BITMAP_FREE (visited_blocks);
4407
4408 return any_new_irr;
4409 }
4410
4411 /* Propagate the irrevocable property both up and down the dominator tree.
4412 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
4413 TM regions; OLD_IRR are the results of a previous scan of the dominator
4414 tree which has been fully propagated; NEW_IRR is the set of new blocks
4415 which are gaining the irrevocable property during the current scan. */
4416
4417 static void
4418 ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
4419 bitmap old_irr, bitmap exit_blocks)
4420 {
4421 vec<basic_block> bbs;
4422 bitmap all_region_blocks;
4423
4424 /* If this block is in the old set, no need to rescan. */
4425 if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
4426 return;
4427
4428 all_region_blocks = BITMAP_ALLOC (&tm_obstack);
4429 bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
4430 all_region_blocks, false);
4431 do
4432 {
4433 basic_block bb = bbs.pop ();
4434 bool this_irr = bitmap_bit_p (new_irr, bb->index);
4435 bool all_son_irr = false;
4436 edge_iterator ei;
4437 edge e;
4438
4439 /* Propagate up. If my children are, I am too, but we must have
4440 at least one child that is. */
4441 if (!this_irr)
4442 {
4443 FOR_EACH_EDGE (e, ei, bb->succs)
4444 {
4445 if (!bitmap_bit_p (new_irr, e->dest->index))
4446 {
4447 all_son_irr = false;
4448 break;
4449 }
4450 else
4451 all_son_irr = true;
4452 }
4453 if (all_son_irr)
4454 {
4455 /* Add block to new_irr if it hasn't already been processed. */
4456 if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
4457 {
4458 bitmap_set_bit (new_irr, bb->index);
4459 this_irr = true;
4460 }
4461 }
4462 }
4463
4464 /* Propagate down to everyone we immediately dominate. */
4465 if (this_irr)
4466 {
4467 basic_block son;
4468 for (son = first_dom_son (CDI_DOMINATORS, bb);
4469 son;
4470 son = next_dom_son (CDI_DOMINATORS, son))
4471 {
4472 /* Make sure block is actually in a TM region, and it
4473 isn't already in old_irr. */
4474 if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
4475 && bitmap_bit_p (all_region_blocks, son->index))
4476 bitmap_set_bit (new_irr, son->index);
4477 }
4478 }
4479 }
4480 while (!bbs.is_empty ());
4481
4482 BITMAP_FREE (all_region_blocks);
4483 bbs.release ();
4484 }
4485
4486 static void
4487 ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
4488 {
4489 gimple_stmt_iterator gsi;
4490
4491 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4492 {
4493 gimple *stmt = gsi_stmt (gsi);
4494 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4495 {
4496 tree fndecl = gimple_call_fndecl (stmt);
4497 if (fndecl)
4498 {
4499 struct tm_ipa_cg_data *d;
4500 unsigned *pcallers;
4501 struct cgraph_node *tnode;
4502
4503 if (is_tm_ending_fndecl (fndecl))
4504 continue;
4505 if (find_tm_replacement_function (fndecl))
4506 continue;
4507
4508 tnode = cgraph_node::get (fndecl);
4509 d = get_cg_data (&tnode, true);
4510
4511 pcallers = (for_clone ? &d->tm_callers_clone
4512 : &d->tm_callers_normal);
4513
4514 gcc_assert (*pcallers > 0);
4515 *pcallers -= 1;
4516 }
4517 }
4518 }
4519 }
4520
4521 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
4522 as well as other irrevocable actions such as inline assembly. Mark all
4523 such blocks as irrevocable and decrement the number of calls to
4524 transactional clones. Return true if, for the transactional clone, the
4525 entire function is irrevocable. */
4526
4527 static bool
4528 ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
4529 {
4530 struct tm_ipa_cg_data *d;
4531 bitmap new_irr, old_irr;
4532 bool ret = false;
4533
4534 /* Builtin operators (operator new, and such). */
4535 if (DECL_STRUCT_FUNCTION (node->decl) == NULL
4536 || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL)
4537 return false;
4538
4539 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4540 calculate_dominance_info (CDI_DOMINATORS);
4541
4542 d = get_cg_data (&node, true);
4543 auto_vec<basic_block, 10> queue;
4544 new_irr = BITMAP_ALLOC (&tm_obstack);
4545
4546 /* Scan each tm region, propagating irrevocable status through the tree. */
4547 if (for_clone)
4548 {
4549 old_irr = d->irrevocable_blocks_clone;
4550 queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
4551 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
4552 {
4553 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
4554 new_irr,
4555 old_irr, NULL);
4556 ret = bitmap_bit_p (new_irr,
4557 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index);
4558 }
4559 }
4560 else
4561 {
4562 struct tm_region *region;
4563
4564 old_irr = d->irrevocable_blocks_normal;
4565 for (region = d->all_tm_regions; region; region = region->next)
4566 {
4567 queue.quick_push (region->entry_block);
4568 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
4569 region->exit_blocks))
4570 ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
4571 region->exit_blocks);
4572 }
4573 }
4574
4575 /* If we found any new irrevocable blocks, reduce the call count for
4576 transactional clones within the irrevocable blocks. Save the new
4577 set of irrevocable blocks for next time. */
4578 if (!bitmap_empty_p (new_irr))
4579 {
4580 bitmap_iterator bmi;
4581 unsigned i;
4582
4583 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4584 ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun, i),
4585 for_clone);
4586
4587 if (old_irr)
4588 {
4589 bitmap_ior_into (old_irr, new_irr);
4590 BITMAP_FREE (new_irr);
4591 }
4592 else if (for_clone)
4593 d->irrevocable_blocks_clone = new_irr;
4594 else
4595 d->irrevocable_blocks_normal = new_irr;
4596
4597 if (dump_file && new_irr)
4598 {
4599 const char *dname;
4600 bitmap_iterator bmi;
4601 unsigned i;
4602
4603 dname = lang_hooks.decl_printable_name (current_function_decl, 2);
4604 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4605 fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
4606 }
4607 }
4608 else
4609 BITMAP_FREE (new_irr);
4610
4611 pop_cfun ();
4612
4613 return ret;
4614 }
4615
4616 /* Return true if, for the transactional clone of NODE, any call
4617 may enter irrevocable mode. */
4618
4619 static bool
4620 ipa_tm_mayenterirr_function (struct cgraph_node *node)
4621 {
4622 struct tm_ipa_cg_data *d;
4623 tree decl;
4624 unsigned flags;
4625
4626 d = get_cg_data (&node, true);
4627 decl = node->decl;
4628 flags = flags_from_decl_or_type (decl);
4629
4630 /* Handle some TM builtins. Ordinarily these aren't actually generated
4631 at this point, but handling these functions when written in by the
4632 user makes it easier to build unit tests. */
4633 if (flags & ECF_TM_BUILTIN)
4634 return false;
4635
4636 /* Filter out all functions that are marked. */
4637 if (flags & ECF_TM_PURE)
4638 return false;
4639 if (is_tm_safe (decl))
4640 return false;
4641 if (is_tm_irrevocable (decl))
4642 return true;
4643 if (is_tm_callable (decl))
4644 return true;
4645 if (find_tm_replacement_function (decl))
4646 return true;
4647
4648 /* If we aren't seeing the final version of the function we don't
4649 know what it will contain at runtime. */
4650 if (node->get_availability () < AVAIL_AVAILABLE)
4651 return true;
4652
4653 /* If the function must go irrevocable, then of course true. */
4654 if (d->is_irrevocable)
4655 return true;
4656
4657 /* If there are any blocks marked irrevocable, then the function
4658 as a whole may enter irrevocable. */
4659 if (d->irrevocable_blocks_clone)
4660 return true;
4661
4662 /* We may have previously marked this function as tm_may_enter_irr;
4663 see pass_diagnose_tm_blocks. */
4664 if (node->local.tm_may_enter_irr)
4665 return true;
4666
4667 /* Recurse on the main body for aliases. In general, this will
4668 result in one of the bits above being set so that we will not
4669 have to recurse next time. */
4670 if (node->alias)
4671 return ipa_tm_mayenterirr_function (cgraph_node::get (node->thunk.alias));
4672
4673 /* What remains is unmarked local functions without items that force
4674 the function to go irrevocable. */
4675 return false;
4676 }
4677
4678 /* Diagnose calls from transaction_safe functions to unmarked
4679 functions that are determined to not be safe. */
4680
4681 static void
4682 ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
4683 {
4684 struct cgraph_edge *e;
4685
4686 for (e = node->callees; e ; e = e->next_callee)
4687 if (!is_tm_callable (e->callee->decl)
4688 && e->callee->local.tm_may_enter_irr)
4689 error_at (gimple_location (e->call_stmt),
4690 "unsafe function call %qD within "
4691 "%<transaction_safe%> function", e->callee->decl);
4692 }
4693
4694 /* Diagnose call from atomic transactions to unmarked functions
4695 that are determined to not be safe. */
4696
4697 static void
4698 ipa_tm_diagnose_transaction (struct cgraph_node *node,
4699 struct tm_region *all_tm_regions)
4700 {
4701 struct tm_region *r;
4702
4703 for (r = all_tm_regions; r ; r = r->next)
4704 if (gimple_transaction_subcode (r->get_transaction_stmt ())
4705 & GTMA_IS_RELAXED)
4706 {
4707 /* Atomic transactions can be nested inside relaxed. */
4708 if (r->inner)
4709 ipa_tm_diagnose_transaction (node, r->inner);
4710 }
4711 else
4712 {
4713 vec<basic_block> bbs;
4714 gimple_stmt_iterator gsi;
4715 basic_block bb;
4716 size_t i;
4717
4718 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
4719 r->irr_blocks, NULL, false);
4720
4721 for (i = 0; bbs.iterate (i, &bb); ++i)
4722 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4723 {
4724 gimple *stmt = gsi_stmt (gsi);
4725 tree fndecl;
4726
4727 if (gimple_code (stmt) == GIMPLE_ASM)
4728 {
4729 error_at (gimple_location (stmt),
4730 "asm not allowed in atomic transaction");
4731 continue;
4732 }
4733
4734 if (!is_gimple_call (stmt))
4735 continue;
4736 fndecl = gimple_call_fndecl (stmt);
4737
4738 /* Indirect function calls have been diagnosed already. */
4739 if (!fndecl)
4740 continue;
4741
4742 /* Stop at the end of the transaction. */
4743 if (is_tm_ending_fndecl (fndecl))
4744 {
4745 if (bitmap_bit_p (r->exit_blocks, bb->index))
4746 break;
4747 continue;
4748 }
4749
4750 /* Marked functions have been diagnosed already. */
4751 if (is_tm_pure_call (stmt))
4752 continue;
4753 if (is_tm_callable (fndecl))
4754 continue;
4755
4756 if (cgraph_node::local_info (fndecl)->tm_may_enter_irr)
4757 error_at (gimple_location (stmt),
4758 "unsafe function call %qD within "
4759 "atomic transaction", fndecl);
4760 }
4761
4762 bbs.release ();
4763 }
4764 }
4765
4766 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4767 OLD_DECL. The returned value is a freshly malloced pointer that
4768 should be freed by the caller. */
4769
4770 static tree
4771 tm_mangle (tree old_asm_id)
4772 {
4773 const char *old_asm_name;
4774 char *tm_name;
4775 void *alloc = NULL;
4776 struct demangle_component *dc;
4777 tree new_asm_id;
4778
4779 /* Determine if the symbol is already a valid C++ mangled name. Do this
4780 even for C, which might be interfacing with C++ code via appropriately
4781 ugly identifiers. */
4782 /* ??? We could probably do just as well checking for "_Z" and be done. */
4783 old_asm_name = IDENTIFIER_POINTER (old_asm_id);
4784 dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
4785
4786 if (dc == NULL)
4787 {
4788 char length[8];
4789
4790 do_unencoded:
4791 sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
4792 tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
4793 }
4794 else
4795 {
4796 old_asm_name += 2; /* Skip _Z */
4797
4798 switch (dc->type)
4799 {
4800 case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
4801 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
4802 /* Don't play silly games, you! */
4803 goto do_unencoded;
4804
4805 case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
4806 /* I'd really like to know if we can ever be passed one of
4807 these from the C++ front end. The Logical Thing would
4808 seem that hidden-alias should be outer-most, so that we
4809 get hidden-alias of a transaction-clone and not vice-versa. */
4810 old_asm_name += 2;
4811 break;
4812
4813 default:
4814 break;
4815 }
4816
4817 tm_name = concat ("_ZGTt", old_asm_name, NULL);
4818 }
4819 free (alloc);
4820
4821 new_asm_id = get_identifier (tm_name);
4822 free (tm_name);
4823
4824 return new_asm_id;
4825 }
4826
4827 static inline void
4828 ipa_tm_mark_force_output_node (struct cgraph_node *node)
4829 {
4830 node->mark_force_output ();
4831 node->analyzed = true;
4832 }
4833
4834 static inline void
4835 ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node)
4836 {
4837 node->forced_by_abi = true;
4838 node->analyzed = true;
4839 }
4840
4841 /* Callback data for ipa_tm_create_version_alias. */
4842 struct create_version_alias_info
4843 {
4844 struct cgraph_node *old_node;
4845 tree new_decl;
4846 };
4847
4848 /* A subroutine of ipa_tm_create_version, called via
4849 cgraph_for_node_and_aliases. Create new tm clones for each of
4850 the existing aliases. */
4851 static bool
4852 ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
4853 {
4854 struct create_version_alias_info *info
4855 = (struct create_version_alias_info *)data;
4856 tree old_decl, new_decl, tm_name;
4857 struct cgraph_node *new_node;
4858
4859 if (!node->cpp_implicit_alias)
4860 return false;
4861
4862 old_decl = node->decl;
4863 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4864 new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
4865 TREE_CODE (old_decl), tm_name,
4866 TREE_TYPE (old_decl));
4867
4868 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4869 SET_DECL_RTL (new_decl, NULL);
4870
4871 /* Based loosely on C++'s make_alias_for(). */
4872 TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
4873 DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
4874 DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
4875 TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
4876 DECL_EXTERNAL (new_decl) = 0;
4877 DECL_ARTIFICIAL (new_decl) = 1;
4878 TREE_ADDRESSABLE (new_decl) = 1;
4879 TREE_USED (new_decl) = 1;
4880 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4881
4882 /* Perform the same remapping to the comdat group. */
4883 if (DECL_ONE_ONLY (new_decl))
4884 varpool_node::get (new_decl)->set_comdat_group
4885 (tm_mangle (decl_comdat_group_id (old_decl)));
4886
4887 new_node = cgraph_node::create_same_body_alias (new_decl, info->new_decl);
4888 new_node->tm_clone = true;
4889 new_node->externally_visible = info->old_node->externally_visible;
4890 new_node->no_reorder = info->old_node->no_reorder;
4891 /* ?? Do not traverse aliases here. */
4892 get_cg_data (&node, false)->clone = new_node;
4893
4894 record_tm_clone_pair (old_decl, new_decl);
4895
4896 if (info->old_node->force_output
4897 || info->old_node->ref_list.first_referring ())
4898 ipa_tm_mark_force_output_node (new_node);
4899 if (info->old_node->forced_by_abi)
4900 ipa_tm_mark_forced_by_abi_node (new_node);
4901 return false;
4902 }
4903
4904 /* Create a copy of the function (possibly declaration only) of OLD_NODE,
4905 appropriate for the transactional clone. */
4906
4907 static void
4908 ipa_tm_create_version (struct cgraph_node *old_node)
4909 {
4910 tree new_decl, old_decl, tm_name;
4911 struct cgraph_node *new_node;
4912
4913 old_decl = old_node->decl;
4914 new_decl = copy_node (old_decl);
4915
4916 /* DECL_ASSEMBLER_NAME needs to be set before we call
4917 cgraph_copy_node_for_versioning below, because cgraph_node will
4918 fill the assembler_name_hash. */
4919 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4920 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4921 SET_DECL_RTL (new_decl, NULL);
4922 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4923
4924 /* Perform the same remapping to the comdat group. */
4925 if (DECL_ONE_ONLY (new_decl))
4926 varpool_node::get (new_decl)->set_comdat_group
4927 (tm_mangle (DECL_COMDAT_GROUP (old_decl)));
4928
4929 gcc_assert (!old_node->ipa_transforms_to_apply.exists ());
4930 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
4931 new_node->local.local = false;
4932 new_node->externally_visible = old_node->externally_visible;
4933 new_node->lowered = true;
4934 new_node->tm_clone = 1;
4935 if (!old_node->implicit_section)
4936 new_node->set_section (old_node->get_section ());
4937 get_cg_data (&old_node, true)->clone = new_node;
4938
4939 if (old_node->get_availability () >= AVAIL_INTERPOSABLE)
4940 {
4941 /* Remap extern inline to static inline. */
4942 /* ??? Is it worth trying to use make_decl_one_only? */
4943 if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
4944 {
4945 DECL_EXTERNAL (new_decl) = 0;
4946 TREE_PUBLIC (new_decl) = 0;
4947 DECL_WEAK (new_decl) = 0;
4948 }
4949
4950 tree_function_versioning (old_decl, new_decl,
4951 NULL, false, NULL,
4952 false, NULL, NULL);
4953 }
4954
4955 record_tm_clone_pair (old_decl, new_decl);
4956
4957 symtab->call_cgraph_insertion_hooks (new_node);
4958 if (old_node->force_output
4959 || old_node->ref_list.first_referring ())
4960 ipa_tm_mark_force_output_node (new_node);
4961 if (old_node->forced_by_abi)
4962 ipa_tm_mark_forced_by_abi_node (new_node);
4963
4964 /* Do the same thing, but for any aliases of the original node. */
4965 {
4966 struct create_version_alias_info data;
4967 data.old_node = old_node;
4968 data.new_decl = new_decl;
4969 old_node->call_for_symbol_thunks_and_aliases (ipa_tm_create_version_alias,
4970 &data, true);
4971 }
4972 }
4973
4974 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
4975
4976 static void
4977 ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
4978 basic_block bb)
4979 {
4980 gimple_stmt_iterator gsi;
4981 gcall *g;
4982
4983 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4984
4985 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
4986 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
4987
4988 split_block_after_labels (bb);
4989 gsi = gsi_after_labels (bb);
4990 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
4991
4992 node->create_edge (cgraph_node::get_create
4993 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
4994 g, 0,
4995 compute_call_stmt_bb_frequency (node->decl,
4996 gimple_bb (g)));
4997 }
4998
4999 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */
5000
5001 static bool
5002 ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
5003 struct tm_region *region,
5004 gimple_stmt_iterator *gsi, gcall *stmt)
5005 {
5006 tree gettm_fn, ret, old_fn, callfn;
5007 gcall *g;
5008 gassign *g2;
5009 bool safe;
5010
5011 old_fn = gimple_call_fn (stmt);
5012
5013 if (TREE_CODE (old_fn) == ADDR_EXPR)
5014 {
5015 tree fndecl = TREE_OPERAND (old_fn, 0);
5016 tree clone = get_tm_clone_pair (fndecl);
5017
5018 /* By transforming the call into a TM_GETTMCLONE, we are
5019 technically taking the address of the original function and
5020 its clone. Explain this so inlining will know this function
5021 is needed. */
5022 cgraph_node::get (fndecl)->mark_address_taken () ;
5023 if (clone)
5024 cgraph_node::get (clone)->mark_address_taken ();
5025 }
5026
5027 safe = is_tm_safe (TREE_TYPE (old_fn));
5028 gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
5029 : BUILT_IN_TM_GETTMCLONE_IRR);
5030 ret = create_tmp_var (ptr_type_node);
5031
5032 if (!safe)
5033 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
5034
5035 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
5036 if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
5037 old_fn = OBJ_TYPE_REF_EXPR (old_fn);
5038
5039 g = gimple_build_call (gettm_fn, 1, old_fn);
5040 ret = make_ssa_name (ret, g);
5041 gimple_call_set_lhs (g, ret);
5042
5043 gsi_insert_before (gsi, g, GSI_SAME_STMT);
5044
5045 node->create_edge (cgraph_node::get_create (gettm_fn), g, 0,
5046 compute_call_stmt_bb_frequency (node->decl,
5047 gimple_bb (g)));
5048
5049 /* Cast return value from tm_gettmclone* into appropriate function
5050 pointer. */
5051 callfn = create_tmp_var (TREE_TYPE (old_fn));
5052 g2 = gimple_build_assign (callfn,
5053 fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
5054 callfn = make_ssa_name (callfn, g2);
5055 gimple_assign_set_lhs (g2, callfn);
5056 gsi_insert_before (gsi, g2, GSI_SAME_STMT);
5057
5058 /* ??? This is a hack to preserve the NOTHROW bit on the call,
5059 which we would have derived from the decl. Failure to save
5060 this bit means we might have to split the basic block. */
5061 if (gimple_call_nothrow_p (stmt))
5062 gimple_call_set_nothrow (stmt, true);
5063
5064 gimple_call_set_fn (stmt, callfn);
5065
5066 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
5067 for a call statement. Fix it. */
5068 {
5069 tree lhs = gimple_call_lhs (stmt);
5070 tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
5071 if (lhs
5072 && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
5073 {
5074 tree temp;
5075
5076 temp = create_tmp_reg (rettype);
5077 gimple_call_set_lhs (stmt, temp);
5078
5079 g2 = gimple_build_assign (lhs,
5080 fold_build1 (VIEW_CONVERT_EXPR,
5081 TREE_TYPE (lhs), temp));
5082 gsi_insert_after (gsi, g2, GSI_SAME_STMT);
5083 }
5084 }
5085
5086 update_stmt (stmt);
5087 cgraph_edge *e = cgraph_node::get (current_function_decl)->get_edge (stmt);
5088 if (e && e->indirect_info)
5089 e->indirect_info->polymorphic = false;
5090
5091 return true;
5092 }
5093
5094 /* Helper function for ipa_tm_transform_calls*. Given a call
5095 statement in GSI which resides inside transaction REGION, redirect
5096 the call to either its wrapper function, or its clone. */
5097
5098 static void
5099 ipa_tm_transform_calls_redirect (struct cgraph_node *node,
5100 struct tm_region *region,
5101 gimple_stmt_iterator *gsi,
5102 bool *need_ssa_rename_p)
5103 {
5104 gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi));
5105 struct cgraph_node *new_node;
5106 struct cgraph_edge *e = node->get_edge (stmt);
5107 tree fndecl = gimple_call_fndecl (stmt);
5108
5109 /* For indirect calls, pass the address through the runtime. */
5110 if (fndecl == NULL)
5111 {
5112 *need_ssa_rename_p |=
5113 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5114 return;
5115 }
5116
5117 /* Handle some TM builtins. Ordinarily these aren't actually generated
5118 at this point, but handling these functions when written in by the
5119 user makes it easier to build unit tests. */
5120 if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
5121 return;
5122
5123 /* Fixup recursive calls inside clones. */
5124 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
5125 for recursion but not update the call statements themselves? */
5126 if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
5127 {
5128 gimple_call_set_fndecl (stmt, current_function_decl);
5129 return;
5130 }
5131
5132 /* If there is a replacement, use it. */
5133 fndecl = find_tm_replacement_function (fndecl);
5134 if (fndecl)
5135 {
5136 new_node = cgraph_node::get_create (fndecl);
5137
5138 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
5139
5140 We can't do this earlier in record_tm_replacement because
5141 cgraph_remove_unreachable_nodes is called before we inject
5142 references to the node. Further, we can't do this in some
5143 nice central place in ipa_tm_execute because we don't have
5144 the exact list of wrapper functions that would be used.
5145 Marking more wrappers than necessary results in the creation
5146 of unnecessary cgraph_nodes, which can cause some of the
5147 other IPA passes to crash.
5148
5149 We do need to mark these nodes so that we get the proper
5150 result in expand_call_tm. */
5151 /* ??? This seems broken. How is it that we're marking the
5152 CALLEE as may_enter_irr? Surely we should be marking the
5153 CALLER. Also note that find_tm_replacement_function also
5154 contains mappings into the TM runtime, e.g. memcpy. These
5155 we know won't go irrevocable. */
5156 new_node->local.tm_may_enter_irr = 1;
5157 }
5158 else
5159 {
5160 struct tm_ipa_cg_data *d;
5161 struct cgraph_node *tnode = e->callee;
5162
5163 d = get_cg_data (&tnode, true);
5164 new_node = d->clone;
5165
5166 /* As we've already skipped pure calls and appropriate builtins,
5167 and we've already marked irrevocable blocks, if we can't come
5168 up with a static replacement, then ask the runtime. */
5169 if (new_node == NULL)
5170 {
5171 *need_ssa_rename_p |=
5172 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5173 return;
5174 }
5175
5176 fndecl = new_node->decl;
5177 }
5178
5179 e->redirect_callee (new_node);
5180 gimple_call_set_fndecl (stmt, fndecl);
5181 }
5182
5183 /* Helper function for ipa_tm_transform_calls. For a given BB,
5184 install calls to tm_irrevocable when IRR_BLOCKS are reached,
5185 redirect other calls to the generated transactional clone. */
5186
5187 static bool
5188 ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
5189 basic_block bb, bitmap irr_blocks)
5190 {
5191 gimple_stmt_iterator gsi;
5192 bool need_ssa_rename = false;
5193
5194 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5195 {
5196 ipa_tm_insert_irr_call (node, region, bb);
5197 return true;
5198 }
5199
5200 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5201 {
5202 gimple *stmt = gsi_stmt (gsi);
5203
5204 if (!is_gimple_call (stmt))
5205 continue;
5206 if (is_tm_pure_call (stmt))
5207 continue;
5208
5209 /* Redirect edges to the appropriate replacement or clone. */
5210 ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
5211 }
5212
5213 return need_ssa_rename;
5214 }
5215
5216 /* Walk the CFG for REGION, beginning at BB. Install calls to
5217 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
5218 the generated transactional clone. */
5219
5220 static bool
5221 ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
5222 basic_block bb, bitmap irr_blocks)
5223 {
5224 bool need_ssa_rename = false;
5225 edge e;
5226 edge_iterator ei;
5227 auto_vec<basic_block> queue;
5228 bitmap visited_blocks = BITMAP_ALLOC (NULL);
5229
5230 queue.safe_push (bb);
5231 do
5232 {
5233 bb = queue.pop ();
5234
5235 need_ssa_rename |=
5236 ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
5237
5238 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5239 continue;
5240
5241 if (region && bitmap_bit_p (region->exit_blocks, bb->index))
5242 continue;
5243
5244 FOR_EACH_EDGE (e, ei, bb->succs)
5245 if (!bitmap_bit_p (visited_blocks, e->dest->index))
5246 {
5247 bitmap_set_bit (visited_blocks, e->dest->index);
5248 queue.safe_push (e->dest);
5249 }
5250 }
5251 while (!queue.is_empty ());
5252
5253 BITMAP_FREE (visited_blocks);
5254
5255 return need_ssa_rename;
5256 }
5257
5258 /* Transform the calls within the TM regions within NODE. */
5259
5260 static void
5261 ipa_tm_transform_transaction (struct cgraph_node *node)
5262 {
5263 struct tm_ipa_cg_data *d;
5264 struct tm_region *region;
5265 bool need_ssa_rename = false;
5266
5267 d = get_cg_data (&node, true);
5268
5269 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
5270 calculate_dominance_info (CDI_DOMINATORS);
5271
5272 for (region = d->all_tm_regions; region; region = region->next)
5273 {
5274 /* If we're sure to go irrevocable, don't transform anything. */
5275 if (d->irrevocable_blocks_normal
5276 && bitmap_bit_p (d->irrevocable_blocks_normal,
5277 region->entry_block->index))
5278 {
5279 transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE
5280 | GTMA_MAY_ENTER_IRREVOCABLE
5281 | GTMA_HAS_NO_INSTRUMENTATION);
5282 continue;
5283 }
5284
5285 need_ssa_rename |=
5286 ipa_tm_transform_calls (node, region, region->entry_block,
5287 d->irrevocable_blocks_normal);
5288 }
5289
5290 if (need_ssa_rename)
5291 update_ssa (TODO_update_ssa_only_virtuals);
5292
5293 pop_cfun ();
5294 }
5295
5296 /* Transform the calls within the transactional clone of NODE. */
5297
5298 static void
5299 ipa_tm_transform_clone (struct cgraph_node *node)
5300 {
5301 struct tm_ipa_cg_data *d;
5302 bool need_ssa_rename;
5303
5304 d = get_cg_data (&node, true);
5305
5306 /* If this function makes no calls and has no irrevocable blocks,
5307 then there's nothing to do. */
5308 /* ??? Remove non-aborting top-level transactions. */
5309 if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone)
5310 return;
5311
5312 push_cfun (DECL_STRUCT_FUNCTION (d->clone->decl));
5313 calculate_dominance_info (CDI_DOMINATORS);
5314
5315 need_ssa_rename =
5316 ipa_tm_transform_calls (d->clone, NULL,
5317 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
5318 d->irrevocable_blocks_clone);
5319
5320 if (need_ssa_rename)
5321 update_ssa (TODO_update_ssa_only_virtuals);
5322
5323 pop_cfun ();
5324 }
5325
5326 /* Main entry point for the transactional memory IPA pass. */
5327
5328 static unsigned int
5329 ipa_tm_execute (void)
5330 {
5331 cgraph_node_queue tm_callees = cgraph_node_queue ();
5332 /* List of functions that will go irrevocable. */
5333 cgraph_node_queue irr_worklist = cgraph_node_queue ();
5334
5335 struct cgraph_node *node;
5336 struct tm_ipa_cg_data *d;
5337 enum availability a;
5338 unsigned int i;
5339
5340 #ifdef ENABLE_CHECKING
5341 cgraph_node::verify_cgraph_nodes ();
5342 #endif
5343
5344 bitmap_obstack_initialize (&tm_obstack);
5345 initialize_original_copy_tables ();
5346
5347 /* For all local functions marked tm_callable, queue them. */
5348 FOR_EACH_DEFINED_FUNCTION (node)
5349 if (is_tm_callable (node->decl)
5350 && node->get_availability () >= AVAIL_INTERPOSABLE)
5351 {
5352 d = get_cg_data (&node, true);
5353 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5354 }
5355
5356 /* For all local reachable functions... */
5357 FOR_EACH_DEFINED_FUNCTION (node)
5358 if (node->lowered
5359 && node->get_availability () >= AVAIL_INTERPOSABLE)
5360 {
5361 /* ... marked tm_pure, record that fact for the runtime by
5362 indicating that the pure function is its own tm_callable.
5363 No need to do this if the function's address can't be taken. */
5364 if (is_tm_pure (node->decl))
5365 {
5366 if (!node->local.local)
5367 record_tm_clone_pair (node->decl, node->decl);
5368 continue;
5369 }
5370
5371 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
5372 calculate_dominance_info (CDI_DOMINATORS);
5373
5374 tm_region_init (NULL);
5375 if (all_tm_regions)
5376 {
5377 d = get_cg_data (&node, true);
5378
5379 /* Scan for calls that are in each transaction, and
5380 generate the uninstrumented code path. */
5381 ipa_tm_scan_calls_transaction (d, &tm_callees);
5382
5383 /* Put it in the worklist so we can scan the function
5384 later (ipa_tm_scan_irr_function) and mark the
5385 irrevocable blocks. */
5386 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5387 d->want_irr_scan_normal = true;
5388 }
5389
5390 pop_cfun ();
5391 }
5392
5393 /* For every local function on the callee list, scan as if we will be
5394 creating a transactional clone, queueing all new functions we find
5395 along the way. */
5396 for (i = 0; i < tm_callees.length (); ++i)
5397 {
5398 node = tm_callees[i];
5399 a = node->get_availability ();
5400 d = get_cg_data (&node, true);
5401
5402 /* Put it in the worklist so we can scan the function later
5403 (ipa_tm_scan_irr_function) and mark the irrevocable
5404 blocks. */
5405 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5406
5407 /* Some callees cannot be arbitrarily cloned. These will always be
5408 irrevocable. Mark these now, so that we need not scan them. */
5409 if (is_tm_irrevocable (node->decl))
5410 ipa_tm_note_irrevocable (node, &irr_worklist);
5411 else if (a <= AVAIL_NOT_AVAILABLE
5412 && !is_tm_safe_or_pure (node->decl))
5413 ipa_tm_note_irrevocable (node, &irr_worklist);
5414 else if (a >= AVAIL_INTERPOSABLE)
5415 {
5416 if (!tree_versionable_function_p (node->decl))
5417 ipa_tm_note_irrevocable (node, &irr_worklist);
5418 else if (!d->is_irrevocable)
5419 {
5420 /* If this is an alias, make sure its base is queued as well.
5421 we need not scan the callees now, as the base will do. */
5422 if (node->alias)
5423 {
5424 node = cgraph_node::get (node->thunk.alias);
5425 d = get_cg_data (&node, true);
5426 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5427 continue;
5428 }
5429
5430 /* Add all nodes called by this function into
5431 tm_callees as well. */
5432 ipa_tm_scan_calls_clone (node, &tm_callees);
5433 }
5434 }
5435 }
5436
5437 /* Iterate scans until no more work to be done. Prefer not to use
5438 vec::pop because the worklist tends to follow a breadth-first
5439 search of the callgraph, which should allow convergance with a
5440 minimum number of scans. But we also don't want the worklist
5441 array to grow without bound, so we shift the array up periodically. */
5442 for (i = 0; i < irr_worklist.length (); ++i)
5443 {
5444 if (i > 256 && i == irr_worklist.length () / 8)
5445 {
5446 irr_worklist.block_remove (0, i);
5447 i = 0;
5448 }
5449
5450 node = irr_worklist[i];
5451 d = get_cg_data (&node, true);
5452 d->in_worklist = false;
5453
5454 if (d->want_irr_scan_normal)
5455 {
5456 d->want_irr_scan_normal = false;
5457 ipa_tm_scan_irr_function (node, false);
5458 }
5459 if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
5460 ipa_tm_note_irrevocable (node, &irr_worklist);
5461 }
5462
5463 /* For every function on the callee list, collect the tm_may_enter_irr
5464 bit on the node. */
5465 irr_worklist.truncate (0);
5466 for (i = 0; i < tm_callees.length (); ++i)
5467 {
5468 node = tm_callees[i];
5469 if (ipa_tm_mayenterirr_function (node))
5470 {
5471 d = get_cg_data (&node, true);
5472 gcc_assert (d->in_worklist == false);
5473 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5474 }
5475 }
5476
5477 /* Propagate the tm_may_enter_irr bit to callers until stable. */
5478 for (i = 0; i < irr_worklist.length (); ++i)
5479 {
5480 struct cgraph_node *caller;
5481 struct cgraph_edge *e;
5482 struct ipa_ref *ref;
5483
5484 if (i > 256 && i == irr_worklist.length () / 8)
5485 {
5486 irr_worklist.block_remove (0, i);
5487 i = 0;
5488 }
5489
5490 node = irr_worklist[i];
5491 d = get_cg_data (&node, true);
5492 d->in_worklist = false;
5493 node->local.tm_may_enter_irr = true;
5494
5495 /* Propagate back to normal callers. */
5496 for (e = node->callers; e ; e = e->next_caller)
5497 {
5498 caller = e->caller;
5499 if (!is_tm_safe_or_pure (caller->decl)
5500 && !caller->local.tm_may_enter_irr)
5501 {
5502 d = get_cg_data (&caller, true);
5503 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5504 }
5505 }
5506
5507 /* Propagate back to referring aliases as well. */
5508 FOR_EACH_ALIAS (node, ref)
5509 {
5510 caller = dyn_cast<cgraph_node *> (ref->referring);
5511 if (!caller->local.tm_may_enter_irr)
5512 {
5513 /* ?? Do not traverse aliases here. */
5514 d = get_cg_data (&caller, false);
5515 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5516 }
5517 }
5518 }
5519
5520 /* Now validate all tm_safe functions, and all atomic regions in
5521 other functions. */
5522 FOR_EACH_DEFINED_FUNCTION (node)
5523 if (node->lowered
5524 && node->get_availability () >= AVAIL_INTERPOSABLE)
5525 {
5526 d = get_cg_data (&node, true);
5527 if (is_tm_safe (node->decl))
5528 ipa_tm_diagnose_tm_safe (node);
5529 else if (d->all_tm_regions)
5530 ipa_tm_diagnose_transaction (node, d->all_tm_regions);
5531 }
5532
5533 /* Create clones. Do those that are not irrevocable and have a
5534 positive call count. Do those publicly visible functions that
5535 the user directed us to clone. */
5536 for (i = 0; i < tm_callees.length (); ++i)
5537 {
5538 bool doit = false;
5539
5540 node = tm_callees[i];
5541 if (node->cpp_implicit_alias)
5542 continue;
5543
5544 a = node->get_availability ();
5545 d = get_cg_data (&node, true);
5546
5547 if (a <= AVAIL_NOT_AVAILABLE)
5548 doit = is_tm_callable (node->decl);
5549 else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl))
5550 doit = true;
5551 else if (!d->is_irrevocable
5552 && d->tm_callers_normal + d->tm_callers_clone > 0)
5553 doit = true;
5554
5555 if (doit)
5556 ipa_tm_create_version (node);
5557 }
5558
5559 /* Redirect calls to the new clones, and insert irrevocable marks. */
5560 for (i = 0; i < tm_callees.length (); ++i)
5561 {
5562 node = tm_callees[i];
5563 if (node->analyzed)
5564 {
5565 d = get_cg_data (&node, true);
5566 if (d->clone)
5567 ipa_tm_transform_clone (node);
5568 }
5569 }
5570 FOR_EACH_DEFINED_FUNCTION (node)
5571 if (node->lowered
5572 && node->get_availability () >= AVAIL_INTERPOSABLE)
5573 {
5574 d = get_cg_data (&node, true);
5575 if (d->all_tm_regions)
5576 ipa_tm_transform_transaction (node);
5577 }
5578
5579 /* Free and clear all data structures. */
5580 tm_callees.release ();
5581 irr_worklist.release ();
5582 bitmap_obstack_release (&tm_obstack);
5583 free_original_copy_tables ();
5584
5585 FOR_EACH_FUNCTION (node)
5586 node->aux = NULL;
5587
5588 #ifdef ENABLE_CHECKING
5589 cgraph_node::verify_cgraph_nodes ();
5590 #endif
5591
5592 return 0;
5593 }
5594
5595 namespace {
5596
5597 const pass_data pass_data_ipa_tm =
5598 {
5599 SIMPLE_IPA_PASS, /* type */
5600 "tmipa", /* name */
5601 OPTGROUP_NONE, /* optinfo_flags */
5602 TV_TRANS_MEM, /* tv_id */
5603 ( PROP_ssa | PROP_cfg ), /* properties_required */
5604 0, /* properties_provided */
5605 0, /* properties_destroyed */
5606 0, /* todo_flags_start */
5607 0, /* todo_flags_finish */
5608 };
5609
5610 class pass_ipa_tm : public simple_ipa_opt_pass
5611 {
5612 public:
5613 pass_ipa_tm (gcc::context *ctxt)
5614 : simple_ipa_opt_pass (pass_data_ipa_tm, ctxt)
5615 {}
5616
5617 /* opt_pass methods: */
5618 virtual bool gate (function *) { return flag_tm; }
5619 virtual unsigned int execute (function *) { return ipa_tm_execute (); }
5620
5621 }; // class pass_ipa_tm
5622
5623 } // anon namespace
5624
5625 simple_ipa_opt_pass *
5626 make_pass_ipa_tm (gcc::context *ctxt)
5627 {
5628 return new pass_ipa_tm (ctxt);
5629 }
5630
5631 #include "gt-trans-mem.h"