]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/trans-mem.c
PR bootstrap/90819
[thirdparty/gcc.git] / gcc / trans-mem.c
1 /* Passes for transactional memory support.
2 Copyright (C) 2008-2019 Free Software Foundation, Inc.
3 Contributed by Richard Henderson <rth@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "cgraph.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
36 #include "fold-const.h"
37 #include "tree-eh.h"
38 #include "calls.h"
39 #include "gimplify.h"
40 #include "gimple-iterator.h"
41 #include "gimplify-me.h"
42 #include "gimple-walk.h"
43 #include "tree-cfg.h"
44 #include "tree-into-ssa.h"
45 #include "tree-inline.h"
46 #include "demangle.h"
47 #include "output.h"
48 #include "trans-mem.h"
49 #include "params.h"
50 #include "langhooks.h"
51 #include "cfgloop.h"
52 #include "tree-ssa-address.h"
53 #include "stringpool.h"
54 #include "attribs.h"
55
56 #define A_RUNINSTRUMENTEDCODE 0x0001
57 #define A_RUNUNINSTRUMENTEDCODE 0x0002
58 #define A_SAVELIVEVARIABLES 0x0004
59 #define A_RESTORELIVEVARIABLES 0x0008
60 #define A_ABORTTRANSACTION 0x0010
61
62 #define AR_USERABORT 0x0001
63 #define AR_USERRETRY 0x0002
64 #define AR_TMCONFLICT 0x0004
65 #define AR_EXCEPTIONBLOCKABORT 0x0008
66 #define AR_OUTERABORT 0x0010
67
68 #define MODE_SERIALIRREVOCABLE 0x0000
69
70
71 /* The representation of a transaction changes several times during the
72 lowering process. In the beginning, in the front-end we have the
73 GENERIC tree TRANSACTION_EXPR. For example,
74
75 __transaction {
76 local++;
77 if (++global == 10)
78 __tm_abort;
79 }
80
81 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
82 trivially replaced with a GIMPLE_TRANSACTION node.
83
84 During pass_lower_tm, we examine the body of transactions looking
85 for aborts. Transactions that do not contain an abort may be
86 merged into an outer transaction. We also add a TRY-FINALLY node
87 to arrange for the transaction to be committed on any exit.
88
89 [??? Think about how this arrangement affects throw-with-commit
90 and throw-with-abort operations. In this case we want the TRY to
91 handle gotos, but not to catch any exceptions because the transaction
92 will already be closed.]
93
94 GIMPLE_TRANSACTION [label=NULL] {
95 try {
96 local = local + 1;
97 t0 = global;
98 t1 = t0 + 1;
99 global = t1;
100 if (t1 == 10)
101 __builtin___tm_abort ();
102 } finally {
103 __builtin___tm_commit ();
104 }
105 }
106
107 During pass_lower_eh, we create EH regions for the transactions,
108 intermixed with the regular EH stuff. This gives us a nice persistent
109 mapping (all the way through rtl) from transactional memory operation
110 back to the transaction, which allows us to get the abnormal edges
111 correct to model transaction aborts and restarts:
112
113 GIMPLE_TRANSACTION [label=over]
114 local = local + 1;
115 t0 = global;
116 t1 = t0 + 1;
117 global = t1;
118 if (t1 == 10)
119 __builtin___tm_abort ();
120 __builtin___tm_commit ();
121 over:
122
123 This is the end of all_lowering_passes, and so is what is present
124 during the IPA passes, and through all of the optimization passes.
125
126 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
127 functions and mark functions for cloning.
128
129 At the end of gimple optimization, before exiting SSA form,
130 pass_tm_edges replaces statements that perform transactional
131 memory operations with the appropriate TM builtins, and swap
132 out function calls with their transactional clones. At this
133 point we introduce the abnormal transaction restart edges and
134 complete lowering of the GIMPLE_TRANSACTION node.
135
136 x = __builtin___tm_start (MAY_ABORT);
137 eh_label:
138 if (x & abort_transaction)
139 goto over;
140 local = local + 1;
141 t0 = __builtin___tm_load (global);
142 t1 = t0 + 1;
143 __builtin___tm_store (&global, t1);
144 if (t1 == 10)
145 __builtin___tm_abort ();
146 __builtin___tm_commit ();
147 over:
148 */
149
150 static void *expand_regions (struct tm_region *,
151 void *(*callback)(struct tm_region *, void *),
152 void *, bool);
153
154 \f
155 /* Return the attributes we want to examine for X, or NULL if it's not
156 something we examine. We look at function types, but allow pointers
157 to function types and function decls and peek through. */
158
159 static tree
160 get_attrs_for (const_tree x)
161 {
162 if (x == NULL_TREE)
163 return NULL_TREE;
164
165 switch (TREE_CODE (x))
166 {
167 case FUNCTION_DECL:
168 return TYPE_ATTRIBUTES (TREE_TYPE (x));
169
170 default:
171 if (TYPE_P (x))
172 return NULL_TREE;
173 x = TREE_TYPE (x);
174 if (TREE_CODE (x) != POINTER_TYPE)
175 return NULL_TREE;
176 /* FALLTHRU */
177
178 case POINTER_TYPE:
179 x = TREE_TYPE (x);
180 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
181 return NULL_TREE;
182 /* FALLTHRU */
183
184 case FUNCTION_TYPE:
185 case METHOD_TYPE:
186 return TYPE_ATTRIBUTES (x);
187 }
188 }
189
190 /* Return true if X has been marked TM_PURE. */
191
192 bool
193 is_tm_pure (const_tree x)
194 {
195 unsigned flags;
196
197 switch (TREE_CODE (x))
198 {
199 case FUNCTION_DECL:
200 case FUNCTION_TYPE:
201 case METHOD_TYPE:
202 break;
203
204 default:
205 if (TYPE_P (x))
206 return false;
207 x = TREE_TYPE (x);
208 if (TREE_CODE (x) != POINTER_TYPE)
209 return false;
210 /* FALLTHRU */
211
212 case POINTER_TYPE:
213 x = TREE_TYPE (x);
214 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
215 return false;
216 break;
217 }
218
219 flags = flags_from_decl_or_type (x);
220 return (flags & ECF_TM_PURE) != 0;
221 }
222
223 /* Return true if X has been marked TM_IRREVOCABLE. */
224
225 static bool
226 is_tm_irrevocable (tree x)
227 {
228 tree attrs = get_attrs_for (x);
229
230 if (attrs && lookup_attribute ("transaction_unsafe", attrs))
231 return true;
232
233 /* A call to the irrevocable builtin is by definition,
234 irrevocable. */
235 if (TREE_CODE (x) == ADDR_EXPR)
236 x = TREE_OPERAND (x, 0);
237 if (TREE_CODE (x) == FUNCTION_DECL
238 && fndecl_built_in_p (x, BUILT_IN_TM_IRREVOCABLE))
239 return true;
240
241 return false;
242 }
243
244 /* Return true if X has been marked TM_SAFE. */
245
246 bool
247 is_tm_safe (const_tree x)
248 {
249 if (flag_tm)
250 {
251 tree attrs = get_attrs_for (x);
252 if (attrs)
253 {
254 if (lookup_attribute ("transaction_safe", attrs))
255 return true;
256 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
257 return true;
258 }
259 }
260 return false;
261 }
262
263 /* Return true if CALL is const, or tm_pure. */
264
265 static bool
266 is_tm_pure_call (gimple *call)
267 {
268 return (gimple_call_flags (call) & (ECF_CONST | ECF_TM_PURE)) != 0;
269 }
270
271 /* Return true if X has been marked TM_CALLABLE. */
272
273 static bool
274 is_tm_callable (tree x)
275 {
276 tree attrs = get_attrs_for (x);
277 if (attrs)
278 {
279 if (lookup_attribute ("transaction_callable", attrs))
280 return true;
281 if (lookup_attribute ("transaction_safe", attrs))
282 return true;
283 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
284 return true;
285 }
286 return false;
287 }
288
289 /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
290
291 bool
292 is_tm_may_cancel_outer (tree x)
293 {
294 tree attrs = get_attrs_for (x);
295 if (attrs)
296 return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
297 return false;
298 }
299
300 /* Return true for built in functions that "end" a transaction. */
301
302 bool
303 is_tm_ending_fndecl (tree fndecl)
304 {
305 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
306 switch (DECL_FUNCTION_CODE (fndecl))
307 {
308 case BUILT_IN_TM_COMMIT:
309 case BUILT_IN_TM_COMMIT_EH:
310 case BUILT_IN_TM_ABORT:
311 case BUILT_IN_TM_IRREVOCABLE:
312 return true;
313 default:
314 break;
315 }
316
317 return false;
318 }
319
320 /* Return true if STMT is a built in function call that "ends" a
321 transaction. */
322
323 bool
324 is_tm_ending (gimple *stmt)
325 {
326 tree fndecl;
327
328 if (gimple_code (stmt) != GIMPLE_CALL)
329 return false;
330
331 fndecl = gimple_call_fndecl (stmt);
332 return (fndecl != NULL_TREE
333 && is_tm_ending_fndecl (fndecl));
334 }
335
336 /* Return true if STMT is a TM load. */
337
338 static bool
339 is_tm_load (gimple *stmt)
340 {
341 tree fndecl;
342
343 if (gimple_code (stmt) != GIMPLE_CALL)
344 return false;
345
346 fndecl = gimple_call_fndecl (stmt);
347 return (fndecl
348 && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
349 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
350 }
351
352 /* Same as above, but for simple TM loads, that is, not the
353 after-write, after-read, etc optimized variants. */
354
355 static bool
356 is_tm_simple_load (gimple *stmt)
357 {
358 tree fndecl;
359
360 if (gimple_code (stmt) != GIMPLE_CALL)
361 return false;
362
363 fndecl = gimple_call_fndecl (stmt);
364 if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
365 {
366 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
367 return (fcode == BUILT_IN_TM_LOAD_1
368 || fcode == BUILT_IN_TM_LOAD_2
369 || fcode == BUILT_IN_TM_LOAD_4
370 || fcode == BUILT_IN_TM_LOAD_8
371 || fcode == BUILT_IN_TM_LOAD_FLOAT
372 || fcode == BUILT_IN_TM_LOAD_DOUBLE
373 || fcode == BUILT_IN_TM_LOAD_LDOUBLE
374 || fcode == BUILT_IN_TM_LOAD_M64
375 || fcode == BUILT_IN_TM_LOAD_M128
376 || fcode == BUILT_IN_TM_LOAD_M256);
377 }
378 return false;
379 }
380
381 /* Return true if STMT is a TM store. */
382
383 static bool
384 is_tm_store (gimple *stmt)
385 {
386 tree fndecl;
387
388 if (gimple_code (stmt) != GIMPLE_CALL)
389 return false;
390
391 fndecl = gimple_call_fndecl (stmt);
392 return (fndecl
393 && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
394 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
395 }
396
397 /* Same as above, but for simple TM stores, that is, not the
398 after-write, after-read, etc optimized variants. */
399
400 static bool
401 is_tm_simple_store (gimple *stmt)
402 {
403 tree fndecl;
404
405 if (gimple_code (stmt) != GIMPLE_CALL)
406 return false;
407
408 fndecl = gimple_call_fndecl (stmt);
409 if (fndecl
410 && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
411 {
412 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
413 return (fcode == BUILT_IN_TM_STORE_1
414 || fcode == BUILT_IN_TM_STORE_2
415 || fcode == BUILT_IN_TM_STORE_4
416 || fcode == BUILT_IN_TM_STORE_8
417 || fcode == BUILT_IN_TM_STORE_FLOAT
418 || fcode == BUILT_IN_TM_STORE_DOUBLE
419 || fcode == BUILT_IN_TM_STORE_LDOUBLE
420 || fcode == BUILT_IN_TM_STORE_M64
421 || fcode == BUILT_IN_TM_STORE_M128
422 || fcode == BUILT_IN_TM_STORE_M256);
423 }
424 return false;
425 }
426
427 /* Return true if FNDECL is BUILT_IN_TM_ABORT. */
428
429 static bool
430 is_tm_abort (tree fndecl)
431 {
432 return (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_TM_ABORT));
433 }
434
435 /* Build a GENERIC tree for a user abort. This is called by front ends
436 while transforming the __tm_abort statement. */
437
438 tree
439 build_tm_abort_call (location_t loc, bool is_outer)
440 {
441 return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
442 build_int_cst (integer_type_node,
443 AR_USERABORT
444 | (is_outer ? AR_OUTERABORT : 0)));
445 }
446 \f
447 /* Map for arbitrary function replacement under TM, as created
448 by the tm_wrap attribute. */
449
450 struct tm_wrapper_hasher : ggc_cache_ptr_hash<tree_map>
451 {
452 static inline hashval_t hash (tree_map *m) { return m->hash; }
453 static inline bool
454 equal (tree_map *a, tree_map *b)
455 {
456 return a->base.from == b->base.from;
457 }
458
459 static int
460 keep_cache_entry (tree_map *&m)
461 {
462 return ggc_marked_p (m->base.from);
463 }
464 };
465
466 static GTY((cache)) hash_table<tm_wrapper_hasher> *tm_wrap_map;
467
468 void
469 record_tm_replacement (tree from, tree to)
470 {
471 struct tree_map **slot, *h;
472
473 /* Do not inline wrapper functions that will get replaced in the TM
474 pass.
475
476 Suppose you have foo() that will get replaced into tmfoo(). Make
477 sure the inliner doesn't try to outsmart us and inline foo()
478 before we get a chance to do the TM replacement. */
479 DECL_UNINLINABLE (from) = 1;
480
481 if (tm_wrap_map == NULL)
482 tm_wrap_map = hash_table<tm_wrapper_hasher>::create_ggc (32);
483
484 h = ggc_alloc<tree_map> ();
485 h->hash = htab_hash_pointer (from);
486 h->base.from = from;
487 h->to = to;
488
489 slot = tm_wrap_map->find_slot_with_hash (h, h->hash, INSERT);
490 *slot = h;
491 }
492
493 /* Return a TM-aware replacement function for DECL. */
494
495 static tree
496 find_tm_replacement_function (tree fndecl)
497 {
498 if (tm_wrap_map)
499 {
500 struct tree_map *h, in;
501
502 in.base.from = fndecl;
503 in.hash = htab_hash_pointer (fndecl);
504 h = tm_wrap_map->find_with_hash (&in, in.hash);
505 if (h)
506 return h->to;
507 }
508
509 /* ??? We may well want TM versions of most of the common <string.h>
510 functions. For now, we've already these two defined. */
511 /* Adjust expand_call_tm() attributes as necessary for the cases
512 handled here: */
513 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
514 switch (DECL_FUNCTION_CODE (fndecl))
515 {
516 case BUILT_IN_MEMCPY:
517 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
518 case BUILT_IN_MEMMOVE:
519 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
520 case BUILT_IN_MEMSET:
521 return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
522 default:
523 return NULL;
524 }
525
526 return NULL;
527 }
528
529 /* When appropriate, record TM replacement for memory allocation functions.
530
531 FROM is the FNDECL to wrap. */
532 void
533 tm_malloc_replacement (tree from)
534 {
535 const char *str;
536 tree to;
537
538 if (TREE_CODE (from) != FUNCTION_DECL)
539 return;
540
541 /* If we have a previous replacement, the user must be explicitly
542 wrapping malloc/calloc/free. They better know what they're
543 doing... */
544 if (find_tm_replacement_function (from))
545 return;
546
547 str = IDENTIFIER_POINTER (DECL_NAME (from));
548
549 if (!strcmp (str, "malloc"))
550 to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
551 else if (!strcmp (str, "calloc"))
552 to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
553 else if (!strcmp (str, "free"))
554 to = builtin_decl_explicit (BUILT_IN_TM_FREE);
555 else
556 return;
557
558 TREE_NOTHROW (to) = 0;
559
560 record_tm_replacement (from, to);
561 }
562 \f
563 /* Diagnostics for tm_safe functions/regions. Called by the front end
564 once we've lowered the function to high-gimple. */
565
566 /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
567 Process exactly one statement. WI->INFO is set to non-null when in
568 the context of a tm_safe function, and null for a __transaction block. */
569
570 #define DIAG_TM_OUTER 1
571 #define DIAG_TM_SAFE 2
572 #define DIAG_TM_RELAXED 4
573
574 struct diagnose_tm
575 {
576 unsigned int summary_flags : 8;
577 unsigned int block_flags : 8;
578 unsigned int func_flags : 8;
579 unsigned int saw_volatile : 1;
580 gimple *stmt;
581 };
582
583 /* Return true if T is a volatile lvalue of some kind. */
584
585 static bool
586 volatile_lvalue_p (tree t)
587 {
588 return ((SSA_VAR_P (t) || REFERENCE_CLASS_P (t))
589 && TREE_THIS_VOLATILE (TREE_TYPE (t)));
590 }
591
592 /* Tree callback function for diagnose_tm pass. */
593
594 static tree
595 diagnose_tm_1_op (tree *tp, int *walk_subtrees, void *data)
596 {
597 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
598 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
599
600 if (TYPE_P (*tp))
601 *walk_subtrees = false;
602 else if (volatile_lvalue_p (*tp)
603 && !d->saw_volatile)
604 {
605 d->saw_volatile = 1;
606 if (d->block_flags & DIAG_TM_SAFE)
607 error_at (gimple_location (d->stmt),
608 "invalid use of volatile lvalue inside transaction");
609 else if (d->func_flags & DIAG_TM_SAFE)
610 error_at (gimple_location (d->stmt),
611 "invalid use of volatile lvalue inside %<transaction_safe%> "
612 "function");
613 }
614
615 return NULL_TREE;
616 }
617
618 static inline bool
619 is_tm_safe_or_pure (const_tree x)
620 {
621 return is_tm_safe (x) || is_tm_pure (x);
622 }
623
624 static tree
625 diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
626 struct walk_stmt_info *wi)
627 {
628 gimple *stmt = gsi_stmt (*gsi);
629 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
630
631 /* Save stmt for use in leaf analysis. */
632 d->stmt = stmt;
633
634 switch (gimple_code (stmt))
635 {
636 case GIMPLE_CALL:
637 {
638 tree fn = gimple_call_fn (stmt);
639
640 if ((d->summary_flags & DIAG_TM_OUTER) == 0
641 && is_tm_may_cancel_outer (fn))
642 error_at (gimple_location (stmt),
643 "%<transaction_may_cancel_outer%> function call not within"
644 " outer transaction or %<transaction_may_cancel_outer%>");
645
646 if (d->summary_flags & DIAG_TM_SAFE)
647 {
648 bool is_safe, direct_call_p;
649 tree replacement;
650
651 if (TREE_CODE (fn) == ADDR_EXPR
652 && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
653 {
654 direct_call_p = true;
655 replacement = TREE_OPERAND (fn, 0);
656 replacement = find_tm_replacement_function (replacement);
657 if (replacement)
658 fn = replacement;
659 }
660 else
661 {
662 direct_call_p = false;
663 replacement = NULL_TREE;
664 }
665
666 if (is_tm_safe_or_pure (fn))
667 is_safe = true;
668 else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
669 {
670 /* A function explicitly marked transaction_callable as
671 opposed to transaction_safe is being defined to be
672 unsafe as part of its ABI, regardless of its contents. */
673 is_safe = false;
674 }
675 else if (direct_call_p)
676 {
677 if (IS_TYPE_OR_DECL_P (fn)
678 && flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
679 is_safe = true;
680 else if (replacement)
681 {
682 /* ??? At present we've been considering replacements
683 merely transaction_callable, and therefore might
684 enter irrevocable. The tm_wrap attribute has not
685 yet made it into the new language spec. */
686 is_safe = false;
687 }
688 else
689 {
690 /* ??? Diagnostics for unmarked direct calls moved into
691 the IPA pass. Section 3.2 of the spec details how
692 functions not marked should be considered "implicitly
693 safe" based on having examined the function body. */
694 is_safe = true;
695 }
696 }
697 else
698 {
699 /* An unmarked indirect call. Consider it unsafe even
700 though optimization may yet figure out how to inline. */
701 is_safe = false;
702 }
703
704 if (!is_safe)
705 {
706 if (TREE_CODE (fn) == ADDR_EXPR)
707 fn = TREE_OPERAND (fn, 0);
708 if (d->block_flags & DIAG_TM_SAFE)
709 {
710 if (direct_call_p)
711 error_at (gimple_location (stmt),
712 "unsafe function call %qD within "
713 "atomic transaction", fn);
714 else
715 {
716 if ((!DECL_P (fn) || DECL_NAME (fn))
717 && TREE_CODE (fn) != SSA_NAME)
718 error_at (gimple_location (stmt),
719 "unsafe function call %qE within "
720 "atomic transaction", fn);
721 else
722 error_at (gimple_location (stmt),
723 "unsafe indirect function call within "
724 "atomic transaction");
725 }
726 }
727 else
728 {
729 if (direct_call_p)
730 error_at (gimple_location (stmt),
731 "unsafe function call %qD within "
732 "%<transaction_safe%> function", fn);
733 else
734 {
735 if ((!DECL_P (fn) || DECL_NAME (fn))
736 && TREE_CODE (fn) != SSA_NAME)
737 error_at (gimple_location (stmt),
738 "unsafe function call %qE within "
739 "%<transaction_safe%> function", fn);
740 else
741 error_at (gimple_location (stmt),
742 "unsafe indirect function call within "
743 "%<transaction_safe%> function");
744 }
745 }
746 }
747 }
748 }
749 break;
750
751 case GIMPLE_ASM:
752 /* ??? We ought to come up with a way to add attributes to
753 asm statements, and then add "transaction_safe" to it.
754 Either that or get the language spec to resurrect __tm_waiver. */
755 if (d->block_flags & DIAG_TM_SAFE)
756 error_at (gimple_location (stmt),
757 "%<asm%> not allowed in atomic transaction");
758 else if (d->func_flags & DIAG_TM_SAFE)
759 error_at (gimple_location (stmt),
760 "%<asm%> not allowed in %<transaction_safe%> function");
761 break;
762
763 case GIMPLE_TRANSACTION:
764 {
765 gtransaction *trans_stmt = as_a <gtransaction *> (stmt);
766 unsigned char inner_flags = DIAG_TM_SAFE;
767
768 if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_RELAXED)
769 {
770 if (d->block_flags & DIAG_TM_SAFE)
771 error_at (gimple_location (stmt),
772 "relaxed transaction in atomic transaction");
773 else if (d->func_flags & DIAG_TM_SAFE)
774 error_at (gimple_location (stmt),
775 "relaxed transaction in %<transaction_safe%> function");
776 inner_flags = DIAG_TM_RELAXED;
777 }
778 else if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_OUTER)
779 {
780 if (d->block_flags)
781 error_at (gimple_location (stmt),
782 "outer transaction in transaction");
783 else if (d->func_flags & DIAG_TM_OUTER)
784 error_at (gimple_location (stmt),
785 "outer transaction in "
786 "%<transaction_may_cancel_outer%> function");
787 else if (d->func_flags & DIAG_TM_SAFE)
788 error_at (gimple_location (stmt),
789 "outer transaction in %<transaction_safe%> function");
790 inner_flags |= DIAG_TM_OUTER;
791 }
792
793 *handled_ops_p = true;
794 if (gimple_transaction_body (trans_stmt))
795 {
796 struct walk_stmt_info wi_inner;
797 struct diagnose_tm d_inner;
798
799 memset (&d_inner, 0, sizeof (d_inner));
800 d_inner.func_flags = d->func_flags;
801 d_inner.block_flags = d->block_flags | inner_flags;
802 d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
803
804 memset (&wi_inner, 0, sizeof (wi_inner));
805 wi_inner.info = &d_inner;
806
807 walk_gimple_seq (gimple_transaction_body (trans_stmt),
808 diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
809 }
810 }
811 break;
812
813 default:
814 break;
815 }
816
817 return NULL_TREE;
818 }
819
820 static unsigned int
821 diagnose_tm_blocks (void)
822 {
823 struct walk_stmt_info wi;
824 struct diagnose_tm d;
825
826 memset (&d, 0, sizeof (d));
827 if (is_tm_may_cancel_outer (current_function_decl))
828 d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
829 else if (is_tm_safe (current_function_decl))
830 d.func_flags = DIAG_TM_SAFE;
831 d.summary_flags = d.func_flags;
832
833 memset (&wi, 0, sizeof (wi));
834 wi.info = &d;
835
836 walk_gimple_seq (gimple_body (current_function_decl),
837 diagnose_tm_1, diagnose_tm_1_op, &wi);
838
839 return 0;
840 }
841
842 namespace {
843
844 const pass_data pass_data_diagnose_tm_blocks =
845 {
846 GIMPLE_PASS, /* type */
847 "*diagnose_tm_blocks", /* name */
848 OPTGROUP_NONE, /* optinfo_flags */
849 TV_TRANS_MEM, /* tv_id */
850 PROP_gimple_any, /* properties_required */
851 0, /* properties_provided */
852 0, /* properties_destroyed */
853 0, /* todo_flags_start */
854 0, /* todo_flags_finish */
855 };
856
857 class pass_diagnose_tm_blocks : public gimple_opt_pass
858 {
859 public:
860 pass_diagnose_tm_blocks (gcc::context *ctxt)
861 : gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt)
862 {}
863
864 /* opt_pass methods: */
865 virtual bool gate (function *) { return flag_tm; }
866 virtual unsigned int execute (function *) { return diagnose_tm_blocks (); }
867
868 }; // class pass_diagnose_tm_blocks
869
870 } // anon namespace
871
872 gimple_opt_pass *
873 make_pass_diagnose_tm_blocks (gcc::context *ctxt)
874 {
875 return new pass_diagnose_tm_blocks (ctxt);
876 }
877 \f
878 /* Instead of instrumenting thread private memory, we save the
879 addresses in a log which we later use to save/restore the addresses
880 upon transaction start/restart.
881
882 The log is keyed by address, where each element contains individual
883 statements among different code paths that perform the store.
884
885 This log is later used to generate either plain save/restore of the
886 addresses upon transaction start/restart, or calls to the ITM_L*
887 logging functions.
888
889 So for something like:
890
891 struct large { int x[1000]; };
892 struct large lala = { 0 };
893 __transaction {
894 lala.x[i] = 123;
895 ...
896 }
897
898 We can either save/restore:
899
900 lala = { 0 };
901 trxn = _ITM_startTransaction ();
902 if (trxn & a_saveLiveVariables)
903 tmp_lala1 = lala.x[i];
904 else if (a & a_restoreLiveVariables)
905 lala.x[i] = tmp_lala1;
906
907 or use the logging functions:
908
909 lala = { 0 };
910 trxn = _ITM_startTransaction ();
911 _ITM_LU4 (&lala.x[i]);
912
913 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
914 far up the dominator tree to shadow all of the writes to a given
915 location (thus reducing the total number of logging calls), but not
916 so high as to be called on a path that does not perform a
917 write. */
918
919 /* One individual log entry. We may have multiple statements for the
920 same location if neither dominate each other (on different
921 execution paths). */
922 struct tm_log_entry
923 {
924 /* Address to save. */
925 tree addr;
926 /* Entry block for the transaction this address occurs in. */
927 basic_block entry_block;
928 /* Dominating statements the store occurs in. */
929 vec<gimple *> stmts;
930 /* Initially, while we are building the log, we place a nonzero
931 value here to mean that this address *will* be saved with a
932 save/restore sequence. Later, when generating the save sequence
933 we place the SSA temp generated here. */
934 tree save_var;
935 };
936
937
938 /* Log entry hashtable helpers. */
939
940 struct log_entry_hasher : pointer_hash <tm_log_entry>
941 {
942 static inline hashval_t hash (const tm_log_entry *);
943 static inline bool equal (const tm_log_entry *, const tm_log_entry *);
944 static inline void remove (tm_log_entry *);
945 };
946
947 /* Htab support. Return hash value for a `tm_log_entry'. */
948 inline hashval_t
949 log_entry_hasher::hash (const tm_log_entry *log)
950 {
951 return iterative_hash_expr (log->addr, 0);
952 }
953
954 /* Htab support. Return true if two log entries are the same. */
955 inline bool
956 log_entry_hasher::equal (const tm_log_entry *log1, const tm_log_entry *log2)
957 {
958 /* FIXME:
959
960 rth: I suggest that we get rid of the component refs etc.
961 I.e. resolve the reference to base + offset.
962
963 We may need to actually finish a merge with mainline for this,
964 since we'd like to be presented with Richi's MEM_REF_EXPRs more
965 often than not. But in the meantime your tm_log_entry could save
966 the results of get_inner_reference.
967
968 See: g++.dg/tm/pr46653.C
969 */
970
971 /* Special case plain equality because operand_equal_p() below will
972 return FALSE if the addresses are equal but they have
973 side-effects (e.g. a volatile address). */
974 if (log1->addr == log2->addr)
975 return true;
976
977 return operand_equal_p (log1->addr, log2->addr, 0);
978 }
979
980 /* Htab support. Free one tm_log_entry. */
981 inline void
982 log_entry_hasher::remove (tm_log_entry *lp)
983 {
984 lp->stmts.release ();
985 free (lp);
986 }
987
988
989 /* The actual log. */
990 static hash_table<log_entry_hasher> *tm_log;
991
992 /* Addresses to log with a save/restore sequence. These should be in
993 dominator order. */
994 static vec<tree> tm_log_save_addresses;
995
996 enum thread_memory_type
997 {
998 mem_non_local = 0,
999 mem_thread_local,
1000 mem_transaction_local,
1001 mem_max
1002 };
1003
1004 struct tm_new_mem_map
1005 {
1006 /* SSA_NAME being dereferenced. */
1007 tree val;
1008 enum thread_memory_type local_new_memory;
1009 };
1010
1011 /* Hashtable helpers. */
1012
1013 struct tm_mem_map_hasher : free_ptr_hash <tm_new_mem_map>
1014 {
1015 static inline hashval_t hash (const tm_new_mem_map *);
1016 static inline bool equal (const tm_new_mem_map *, const tm_new_mem_map *);
1017 };
1018
1019 inline hashval_t
1020 tm_mem_map_hasher::hash (const tm_new_mem_map *v)
1021 {
1022 return (intptr_t)v->val >> 4;
1023 }
1024
1025 inline bool
1026 tm_mem_map_hasher::equal (const tm_new_mem_map *v, const tm_new_mem_map *c)
1027 {
1028 return v->val == c->val;
1029 }
1030
1031 /* Map for an SSA_NAME originally pointing to a non aliased new piece
1032 of memory (malloc, alloc, etc). */
1033 static hash_table<tm_mem_map_hasher> *tm_new_mem_hash;
1034
1035 /* Initialize logging data structures. */
1036 static void
1037 tm_log_init (void)
1038 {
1039 tm_log = new hash_table<log_entry_hasher> (10);
1040 tm_new_mem_hash = new hash_table<tm_mem_map_hasher> (5);
1041 tm_log_save_addresses.create (5);
1042 }
1043
1044 /* Free logging data structures. */
1045 static void
1046 tm_log_delete (void)
1047 {
1048 delete tm_log;
1049 tm_log = NULL;
1050 delete tm_new_mem_hash;
1051 tm_new_mem_hash = NULL;
1052 tm_log_save_addresses.release ();
1053 }
1054
1055 /* Return true if MEM is a transaction invariant memory for the TM
1056 region starting at REGION_ENTRY_BLOCK. */
1057 static bool
1058 transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
1059 {
1060 if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
1061 && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
1062 {
1063 basic_block def_bb;
1064
1065 def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
1066 return def_bb != region_entry_block
1067 && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
1068 }
1069
1070 mem = strip_invariant_refs (mem);
1071 return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
1072 }
1073
1074 /* Given an address ADDR in STMT, find it in the memory log or add it,
1075 making sure to keep only the addresses highest in the dominator
1076 tree.
1077
1078 ENTRY_BLOCK is the entry_block for the transaction.
1079
1080 If we find the address in the log, make sure it's either the same
1081 address, or an equivalent one that dominates ADDR.
1082
1083 If we find the address, but neither ADDR dominates the found
1084 address, nor the found one dominates ADDR, we're on different
1085 execution paths. Add it.
1086
1087 If known, ENTRY_BLOCK is the entry block for the region, otherwise
1088 NULL. */
1089 static void
1090 tm_log_add (basic_block entry_block, tree addr, gimple *stmt)
1091 {
1092 tm_log_entry **slot;
1093 struct tm_log_entry l, *lp;
1094
1095 l.addr = addr;
1096 slot = tm_log->find_slot (&l, INSERT);
1097 if (!*slot)
1098 {
1099 tree type = TREE_TYPE (addr);
1100
1101 lp = XNEW (struct tm_log_entry);
1102 lp->addr = addr;
1103 *slot = lp;
1104
1105 /* Small invariant addresses can be handled as save/restores. */
1106 if (entry_block
1107 && transaction_invariant_address_p (lp->addr, entry_block)
1108 && TYPE_SIZE_UNIT (type) != NULL
1109 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
1110 && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type))
1111 < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
1112 /* We must be able to copy this type normally. I.e., no
1113 special constructors and the like. */
1114 && !TREE_ADDRESSABLE (type))
1115 {
1116 lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
1117 lp->stmts.create (0);
1118 lp->entry_block = entry_block;
1119 /* Save addresses separately in dominator order so we don't
1120 get confused by overlapping addresses in the save/restore
1121 sequence. */
1122 tm_log_save_addresses.safe_push (lp->addr);
1123 }
1124 else
1125 {
1126 /* Use the logging functions. */
1127 lp->stmts.create (5);
1128 lp->stmts.quick_push (stmt);
1129 lp->save_var = NULL;
1130 }
1131 }
1132 else
1133 {
1134 size_t i;
1135 gimple *oldstmt;
1136
1137 lp = *slot;
1138
1139 /* If we're generating a save/restore sequence, we don't care
1140 about statements. */
1141 if (lp->save_var)
1142 return;
1143
1144 for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i)
1145 {
1146 if (stmt == oldstmt)
1147 return;
1148 /* We already have a store to the same address, higher up the
1149 dominator tree. Nothing to do. */
1150 if (dominated_by_p (CDI_DOMINATORS,
1151 gimple_bb (stmt), gimple_bb (oldstmt)))
1152 return;
1153 /* We should be processing blocks in dominator tree order. */
1154 gcc_assert (!dominated_by_p (CDI_DOMINATORS,
1155 gimple_bb (oldstmt), gimple_bb (stmt)));
1156 }
1157 /* Store is on a different code path. */
1158 lp->stmts.safe_push (stmt);
1159 }
1160 }
1161
1162 /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1163 result, insert the new statements before GSI. */
1164
1165 static tree
1166 gimplify_addr (gimple_stmt_iterator *gsi, tree x)
1167 {
1168 if (TREE_CODE (x) == TARGET_MEM_REF)
1169 x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
1170 else
1171 x = build_fold_addr_expr (x);
1172 return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
1173 }
1174
1175 /* Instrument one address with the logging functions.
1176 ADDR is the address to save.
1177 STMT is the statement before which to place it. */
1178 static void
1179 tm_log_emit_stmt (tree addr, gimple *stmt)
1180 {
1181 tree type = TREE_TYPE (addr);
1182 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1183 gimple *log;
1184 enum built_in_function code = BUILT_IN_TM_LOG;
1185
1186 if (type == float_type_node)
1187 code = BUILT_IN_TM_LOG_FLOAT;
1188 else if (type == double_type_node)
1189 code = BUILT_IN_TM_LOG_DOUBLE;
1190 else if (type == long_double_type_node)
1191 code = BUILT_IN_TM_LOG_LDOUBLE;
1192 else if (TYPE_SIZE (type) != NULL
1193 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1194 {
1195 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
1196
1197 if (TREE_CODE (type) == VECTOR_TYPE)
1198 {
1199 switch (type_size)
1200 {
1201 case 64:
1202 code = BUILT_IN_TM_LOG_M64;
1203 break;
1204 case 128:
1205 code = BUILT_IN_TM_LOG_M128;
1206 break;
1207 case 256:
1208 code = BUILT_IN_TM_LOG_M256;
1209 break;
1210 default:
1211 goto unhandled_vec;
1212 }
1213 if (!builtin_decl_explicit_p (code))
1214 goto unhandled_vec;
1215 }
1216 else
1217 {
1218 unhandled_vec:
1219 switch (type_size)
1220 {
1221 case 8:
1222 code = BUILT_IN_TM_LOG_1;
1223 break;
1224 case 16:
1225 code = BUILT_IN_TM_LOG_2;
1226 break;
1227 case 32:
1228 code = BUILT_IN_TM_LOG_4;
1229 break;
1230 case 64:
1231 code = BUILT_IN_TM_LOG_8;
1232 break;
1233 }
1234 }
1235 }
1236
1237 if (code != BUILT_IN_TM_LOG && !builtin_decl_explicit_p (code))
1238 code = BUILT_IN_TM_LOG;
1239 tree decl = builtin_decl_explicit (code);
1240
1241 addr = gimplify_addr (&gsi, addr);
1242 if (code == BUILT_IN_TM_LOG)
1243 log = gimple_build_call (decl, 2, addr, TYPE_SIZE_UNIT (type));
1244 else
1245 log = gimple_build_call (decl, 1, addr);
1246 gsi_insert_before (&gsi, log, GSI_SAME_STMT);
1247 }
1248
1249 /* Go through the log and instrument address that must be instrumented
1250 with the logging functions. Leave the save/restore addresses for
1251 later. */
1252 static void
1253 tm_log_emit (void)
1254 {
1255 hash_table<log_entry_hasher>::iterator hi;
1256 struct tm_log_entry *lp;
1257
1258 FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi)
1259 {
1260 size_t i;
1261 gimple *stmt;
1262
1263 if (dump_file)
1264 {
1265 fprintf (dump_file, "TM thread private mem logging: ");
1266 print_generic_expr (dump_file, lp->addr);
1267 fprintf (dump_file, "\n");
1268 }
1269
1270 if (lp->save_var)
1271 {
1272 if (dump_file)
1273 fprintf (dump_file, "DUMPING to variable\n");
1274 continue;
1275 }
1276 else
1277 {
1278 if (dump_file)
1279 fprintf (dump_file, "DUMPING with logging functions\n");
1280 for (i = 0; lp->stmts.iterate (i, &stmt); ++i)
1281 tm_log_emit_stmt (lp->addr, stmt);
1282 }
1283 }
1284 }
1285
1286 /* Emit the save sequence for the corresponding addresses in the log.
1287 ENTRY_BLOCK is the entry block for the transaction.
1288 BB is the basic block to insert the code in. */
1289 static void
1290 tm_log_emit_saves (basic_block entry_block, basic_block bb)
1291 {
1292 size_t i;
1293 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1294 gimple *stmt;
1295 struct tm_log_entry l, *lp;
1296
1297 for (i = 0; i < tm_log_save_addresses.length (); ++i)
1298 {
1299 l.addr = tm_log_save_addresses[i];
1300 lp = *(tm_log->find_slot (&l, NO_INSERT));
1301 gcc_assert (lp->save_var != NULL);
1302
1303 /* We only care about variables in the current transaction. */
1304 if (lp->entry_block != entry_block)
1305 continue;
1306
1307 stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
1308
1309 /* Make sure we can create an SSA_NAME for this type. For
1310 instance, aggregates aren't allowed, in which case the system
1311 will create a VOP for us and everything will just work. */
1312 if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
1313 {
1314 lp->save_var = make_ssa_name (lp->save_var, stmt);
1315 gimple_assign_set_lhs (stmt, lp->save_var);
1316 }
1317
1318 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1319 }
1320 }
1321
1322 /* Emit the restore sequence for the corresponding addresses in the log.
1323 ENTRY_BLOCK is the entry block for the transaction.
1324 BB is the basic block to insert the code in. */
1325 static void
1326 tm_log_emit_restores (basic_block entry_block, basic_block bb)
1327 {
1328 int i;
1329 struct tm_log_entry l, *lp;
1330 gimple_stmt_iterator gsi;
1331 gimple *stmt;
1332
1333 for (i = tm_log_save_addresses.length () - 1; i >= 0; i--)
1334 {
1335 l.addr = tm_log_save_addresses[i];
1336 lp = *(tm_log->find_slot (&l, NO_INSERT));
1337 gcc_assert (lp->save_var != NULL);
1338
1339 /* We only care about variables in the current transaction. */
1340 if (lp->entry_block != entry_block)
1341 continue;
1342
1343 /* Restores are in LIFO order from the saves in case we have
1344 overlaps. */
1345 gsi = gsi_start_bb (bb);
1346
1347 stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
1348 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1349 }
1350 }
1351
1352 \f
1353 static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
1354 struct walk_stmt_info *);
1355 static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
1356 struct walk_stmt_info *);
1357
1358 /* Evaluate an address X being dereferenced and determine if it
1359 originally points to a non aliased new chunk of memory (malloc,
1360 alloca, etc).
1361
1362 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1363 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1364 Return MEM_NON_LOCAL otherwise.
1365
1366 ENTRY_BLOCK is the entry block to the transaction containing the
1367 dereference of X. */
1368 static enum thread_memory_type
1369 thread_private_new_memory (basic_block entry_block, tree x)
1370 {
1371 gimple *stmt = NULL;
1372 enum tree_code code;
1373 tm_new_mem_map **slot;
1374 tm_new_mem_map elt, *elt_p;
1375 tree val = x;
1376 enum thread_memory_type retval = mem_transaction_local;
1377
1378 if (!entry_block
1379 || TREE_CODE (x) != SSA_NAME
1380 /* Possible uninitialized use, or a function argument. In
1381 either case, we don't care. */
1382 || SSA_NAME_IS_DEFAULT_DEF (x))
1383 return mem_non_local;
1384
1385 /* Look in cache first. */
1386 elt.val = x;
1387 slot = tm_new_mem_hash->find_slot (&elt, INSERT);
1388 elt_p = *slot;
1389 if (elt_p)
1390 return elt_p->local_new_memory;
1391
1392 /* Optimistically assume the memory is transaction local during
1393 processing. This catches recursion into this variable. */
1394 *slot = elt_p = XNEW (tm_new_mem_map);
1395 elt_p->val = val;
1396 elt_p->local_new_memory = mem_transaction_local;
1397
1398 /* Search DEF chain to find the original definition of this address. */
1399 do
1400 {
1401 if (ptr_deref_may_alias_global_p (x))
1402 {
1403 /* Address escapes. This is not thread-private. */
1404 retval = mem_non_local;
1405 goto new_memory_ret;
1406 }
1407
1408 stmt = SSA_NAME_DEF_STMT (x);
1409
1410 /* If the malloc call is outside the transaction, this is
1411 thread-local. */
1412 if (retval != mem_thread_local
1413 && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
1414 retval = mem_thread_local;
1415
1416 if (is_gimple_assign (stmt))
1417 {
1418 code = gimple_assign_rhs_code (stmt);
1419 /* x = foo ==> foo */
1420 if (code == SSA_NAME)
1421 x = gimple_assign_rhs1 (stmt);
1422 /* x = foo + n ==> foo */
1423 else if (code == POINTER_PLUS_EXPR)
1424 x = gimple_assign_rhs1 (stmt);
1425 /* x = (cast*) foo ==> foo */
1426 else if (code == VIEW_CONVERT_EXPR || CONVERT_EXPR_CODE_P (code))
1427 x = gimple_assign_rhs1 (stmt);
1428 /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
1429 else if (code == COND_EXPR)
1430 {
1431 tree op1 = gimple_assign_rhs2 (stmt);
1432 tree op2 = gimple_assign_rhs3 (stmt);
1433 enum thread_memory_type mem;
1434 retval = thread_private_new_memory (entry_block, op1);
1435 if (retval == mem_non_local)
1436 goto new_memory_ret;
1437 mem = thread_private_new_memory (entry_block, op2);
1438 retval = MIN (retval, mem);
1439 goto new_memory_ret;
1440 }
1441 else
1442 {
1443 retval = mem_non_local;
1444 goto new_memory_ret;
1445 }
1446 }
1447 else
1448 {
1449 if (gimple_code (stmt) == GIMPLE_PHI)
1450 {
1451 unsigned int i;
1452 enum thread_memory_type mem;
1453 tree phi_result = gimple_phi_result (stmt);
1454
1455 /* If any of the ancestors are non-local, we are sure to
1456 be non-local. Otherwise we can avoid doing anything
1457 and inherit what has already been generated. */
1458 retval = mem_max;
1459 for (i = 0; i < gimple_phi_num_args (stmt); ++i)
1460 {
1461 tree op = PHI_ARG_DEF (stmt, i);
1462
1463 /* Exclude self-assignment. */
1464 if (phi_result == op)
1465 continue;
1466
1467 mem = thread_private_new_memory (entry_block, op);
1468 if (mem == mem_non_local)
1469 {
1470 retval = mem;
1471 goto new_memory_ret;
1472 }
1473 retval = MIN (retval, mem);
1474 }
1475 goto new_memory_ret;
1476 }
1477 break;
1478 }
1479 }
1480 while (TREE_CODE (x) == SSA_NAME);
1481
1482 if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
1483 /* Thread-local or transaction-local. */
1484 ;
1485 else
1486 retval = mem_non_local;
1487
1488 new_memory_ret:
1489 elt_p->local_new_memory = retval;
1490 return retval;
1491 }
1492
1493 /* Determine whether X has to be instrumented using a read
1494 or write barrier.
1495
1496 ENTRY_BLOCK is the entry block for the region where stmt resides
1497 in. NULL if unknown.
1498
1499 STMT is the statement in which X occurs in. It is used for thread
1500 private memory instrumentation. If no TPM instrumentation is
1501 desired, STMT should be null. */
1502 static bool
1503 requires_barrier (basic_block entry_block, tree x, gimple *stmt)
1504 {
1505 tree orig = x;
1506 while (handled_component_p (x))
1507 x = TREE_OPERAND (x, 0);
1508
1509 switch (TREE_CODE (x))
1510 {
1511 case INDIRECT_REF:
1512 case MEM_REF:
1513 {
1514 enum thread_memory_type ret;
1515
1516 ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
1517 if (ret == mem_non_local)
1518 return true;
1519 if (stmt && ret == mem_thread_local)
1520 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1521 tm_log_add (entry_block, orig, stmt);
1522
1523 /* Transaction-locals require nothing at all. For malloc, a
1524 transaction restart frees the memory and we reallocate.
1525 For alloca, the stack pointer gets reset by the retry and
1526 we reallocate. */
1527 return false;
1528 }
1529
1530 case TARGET_MEM_REF:
1531 if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
1532 return true;
1533 x = TREE_OPERAND (TMR_BASE (x), 0);
1534 if (TREE_CODE (x) == PARM_DECL)
1535 return false;
1536 gcc_assert (VAR_P (x));
1537 /* FALLTHRU */
1538
1539 case PARM_DECL:
1540 case RESULT_DECL:
1541 case VAR_DECL:
1542 if (DECL_BY_REFERENCE (x))
1543 {
1544 /* ??? This value is a pointer, but aggregate_value_p has been
1545 jigged to return true which confuses needs_to_live_in_memory.
1546 This ought to be cleaned up generically.
1547
1548 FIXME: Verify this still happens after the next mainline
1549 merge. Testcase ie g++.dg/tm/pr47554.C.
1550 */
1551 return false;
1552 }
1553
1554 if (is_global_var (x))
1555 return !TREE_READONLY (x);
1556 if (/* FIXME: This condition should actually go below in the
1557 tm_log_add() call, however is_call_clobbered() depends on
1558 aliasing info which is not available during
1559 gimplification. Since requires_barrier() gets called
1560 during lower_sequence_tm/gimplification, leave the call
1561 to needs_to_live_in_memory until we eliminate
1562 lower_sequence_tm altogether. */
1563 needs_to_live_in_memory (x))
1564 return true;
1565 else
1566 {
1567 /* For local memory that doesn't escape (aka thread private
1568 memory), we can either save the value at the beginning of
1569 the transaction and restore on restart, or call a tm
1570 function to dynamically save and restore on restart
1571 (ITM_L*). */
1572 if (stmt)
1573 tm_log_add (entry_block, orig, stmt);
1574 return false;
1575 }
1576
1577 default:
1578 return false;
1579 }
1580 }
1581
1582 /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1583 a transaction region. */
1584
1585 static void
1586 examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
1587 {
1588 gimple *stmt = gsi_stmt (*gsi);
1589
1590 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
1591 *state |= GTMA_HAVE_LOAD;
1592 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
1593 *state |= GTMA_HAVE_STORE;
1594 }
1595
1596 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1597
1598 static void
1599 examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
1600 {
1601 gimple *stmt = gsi_stmt (*gsi);
1602 tree fn;
1603
1604 if (is_tm_pure_call (stmt))
1605 return;
1606
1607 /* Check if this call is a transaction abort. */
1608 fn = gimple_call_fndecl (stmt);
1609 if (is_tm_abort (fn))
1610 *state |= GTMA_HAVE_ABORT;
1611
1612 /* Note that something may happen. */
1613 *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
1614 }
1615
1616 /* Iterate through the statements in the sequence, moving labels
1617 (and thus edges) of transactions from "label_norm" to "label_uninst". */
1618
1619 static tree
1620 make_tm_uninst (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1621 struct walk_stmt_info *)
1622 {
1623 gimple *stmt = gsi_stmt (*gsi);
1624
1625 if (gtransaction *txn = dyn_cast <gtransaction *> (stmt))
1626 {
1627 *handled_ops_p = true;
1628 txn->label_uninst = txn->label_norm;
1629 txn->label_norm = NULL;
1630 }
1631 else
1632 *handled_ops_p = !gimple_has_substatements (stmt);
1633
1634 return NULL_TREE;
1635 }
1636
1637 /* Lower a GIMPLE_TRANSACTION statement. */
1638
1639 static void
1640 lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
1641 {
1642 gimple *g;
1643 gtransaction *stmt = as_a <gtransaction *> (gsi_stmt (*gsi));
1644 unsigned int *outer_state = (unsigned int *) wi->info;
1645 unsigned int this_state = 0;
1646 struct walk_stmt_info this_wi;
1647
1648 /* First, lower the body. The scanning that we do inside gives
1649 us some idea of what we're dealing with. */
1650 memset (&this_wi, 0, sizeof (this_wi));
1651 this_wi.info = (void *) &this_state;
1652 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
1653 lower_sequence_tm, NULL, &this_wi);
1654
1655 /* If there was absolutely nothing transaction related inside the
1656 transaction, we may elide it. Likewise if this is a nested
1657 transaction and does not contain an abort. */
1658 if (this_state == 0
1659 || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
1660 {
1661 if (outer_state)
1662 *outer_state |= this_state;
1663
1664 gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
1665 GSI_SAME_STMT);
1666 gimple_transaction_set_body (stmt, NULL);
1667
1668 gsi_remove (gsi, true);
1669 wi->removed_stmt = true;
1670 return;
1671 }
1672
1673 /* Wrap the body of the transaction in a try-finally node so that
1674 the commit call is always properly called. */
1675 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
1676 if (flag_exceptions)
1677 {
1678 tree ptr;
1679 gimple_seq n_seq, e_seq;
1680
1681 n_seq = gimple_seq_alloc_with_stmt (g);
1682 e_seq = NULL;
1683
1684 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1685 1, integer_zero_node);
1686 ptr = create_tmp_var (ptr_type_node);
1687 gimple_call_set_lhs (g, ptr);
1688 gimple_seq_add_stmt (&e_seq, g);
1689
1690 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
1691 1, ptr);
1692 gimple_seq_add_stmt (&e_seq, g);
1693
1694 g = gimple_build_eh_else (n_seq, e_seq);
1695 }
1696
1697 g = gimple_build_try (gimple_transaction_body (stmt),
1698 gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
1699
1700 /* For a (potentially) outer transaction, create two paths. */
1701 gimple_seq uninst = NULL;
1702 if (outer_state == NULL)
1703 {
1704 uninst = copy_gimple_seq_and_replace_locals (g);
1705 /* In the uninstrumented copy, reset inner transactions to have only
1706 an uninstrumented code path. */
1707 memset (&this_wi, 0, sizeof (this_wi));
1708 walk_gimple_seq (uninst, make_tm_uninst, NULL, &this_wi);
1709 }
1710
1711 tree label1 = create_artificial_label (UNKNOWN_LOCATION);
1712 gsi_insert_after (gsi, gimple_build_label (label1), GSI_CONTINUE_LINKING);
1713 gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
1714 gimple_transaction_set_label_norm (stmt, label1);
1715
1716 /* If the transaction calls abort or if this is an outer transaction,
1717 add an "over" label afterwards. */
1718 tree label3 = NULL;
1719 if ((this_state & GTMA_HAVE_ABORT)
1720 || outer_state == NULL
1721 || (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER))
1722 {
1723 label3 = create_artificial_label (UNKNOWN_LOCATION);
1724 gimple_transaction_set_label_over (stmt, label3);
1725 }
1726
1727 if (uninst != NULL)
1728 {
1729 gsi_insert_after (gsi, gimple_build_goto (label3), GSI_CONTINUE_LINKING);
1730
1731 tree label2 = create_artificial_label (UNKNOWN_LOCATION);
1732 gsi_insert_after (gsi, gimple_build_label (label2), GSI_CONTINUE_LINKING);
1733 gsi_insert_seq_after (gsi, uninst, GSI_CONTINUE_LINKING);
1734 gimple_transaction_set_label_uninst (stmt, label2);
1735 }
1736
1737 if (label3 != NULL)
1738 gsi_insert_after (gsi, gimple_build_label (label3), GSI_CONTINUE_LINKING);
1739
1740 gimple_transaction_set_body (stmt, NULL);
1741
1742 /* Record the set of operations found for use later. */
1743 this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
1744 gimple_transaction_set_subcode (stmt, this_state);
1745 }
1746
1747 /* Iterate through the statements in the sequence, lowering them all
1748 as appropriate for being in a transaction. */
1749
1750 static tree
1751 lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1752 struct walk_stmt_info *wi)
1753 {
1754 unsigned int *state = (unsigned int *) wi->info;
1755 gimple *stmt = gsi_stmt (*gsi);
1756
1757 *handled_ops_p = true;
1758 switch (gimple_code (stmt))
1759 {
1760 case GIMPLE_ASSIGN:
1761 /* Only memory reads/writes need to be instrumented. */
1762 if (gimple_assign_single_p (stmt))
1763 examine_assign_tm (state, gsi);
1764 break;
1765
1766 case GIMPLE_CALL:
1767 examine_call_tm (state, gsi);
1768 break;
1769
1770 case GIMPLE_ASM:
1771 *state |= GTMA_MAY_ENTER_IRREVOCABLE;
1772 break;
1773
1774 case GIMPLE_TRANSACTION:
1775 lower_transaction (gsi, wi);
1776 break;
1777
1778 default:
1779 *handled_ops_p = !gimple_has_substatements (stmt);
1780 break;
1781 }
1782
1783 return NULL_TREE;
1784 }
1785
1786 /* Iterate through the statements in the sequence, lowering them all
1787 as appropriate for being outside of a transaction. */
1788
1789 static tree
1790 lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1791 struct walk_stmt_info * wi)
1792 {
1793 gimple *stmt = gsi_stmt (*gsi);
1794
1795 if (gimple_code (stmt) == GIMPLE_TRANSACTION)
1796 {
1797 *handled_ops_p = true;
1798 lower_transaction (gsi, wi);
1799 }
1800 else
1801 *handled_ops_p = !gimple_has_substatements (stmt);
1802
1803 return NULL_TREE;
1804 }
1805
1806 /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1807 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1808 been moved out, and all the data required for constructing a proper
1809 CFG has been recorded. */
1810
1811 static unsigned int
1812 execute_lower_tm (void)
1813 {
1814 struct walk_stmt_info wi;
1815 gimple_seq body;
1816
1817 /* Transactional clones aren't created until a later pass. */
1818 gcc_assert (!decl_is_tm_clone (current_function_decl));
1819
1820 body = gimple_body (current_function_decl);
1821 memset (&wi, 0, sizeof (wi));
1822 walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi);
1823 gimple_set_body (current_function_decl, body);
1824
1825 return 0;
1826 }
1827
1828 namespace {
1829
1830 const pass_data pass_data_lower_tm =
1831 {
1832 GIMPLE_PASS, /* type */
1833 "tmlower", /* name */
1834 OPTGROUP_NONE, /* optinfo_flags */
1835 TV_TRANS_MEM, /* tv_id */
1836 PROP_gimple_lcf, /* properties_required */
1837 0, /* properties_provided */
1838 0, /* properties_destroyed */
1839 0, /* todo_flags_start */
1840 0, /* todo_flags_finish */
1841 };
1842
1843 class pass_lower_tm : public gimple_opt_pass
1844 {
1845 public:
1846 pass_lower_tm (gcc::context *ctxt)
1847 : gimple_opt_pass (pass_data_lower_tm, ctxt)
1848 {}
1849
1850 /* opt_pass methods: */
1851 virtual bool gate (function *) { return flag_tm; }
1852 virtual unsigned int execute (function *) { return execute_lower_tm (); }
1853
1854 }; // class pass_lower_tm
1855
1856 } // anon namespace
1857
1858 gimple_opt_pass *
1859 make_pass_lower_tm (gcc::context *ctxt)
1860 {
1861 return new pass_lower_tm (ctxt);
1862 }
1863 \f
1864 /* Collect region information for each transaction. */
1865
1866 struct tm_region
1867 {
1868 public:
1869
1870 /* The field "transaction_stmt" is initially a gtransaction *,
1871 but eventually gets lowered to a gcall *(to BUILT_IN_TM_START).
1872
1873 Helper method to get it as a gtransaction *, with code-checking
1874 in a checked-build. */
1875
1876 gtransaction *
1877 get_transaction_stmt () const
1878 {
1879 return as_a <gtransaction *> (transaction_stmt);
1880 }
1881
1882 public:
1883
1884 /* Link to the next unnested transaction. */
1885 struct tm_region *next;
1886
1887 /* Link to the next inner transaction. */
1888 struct tm_region *inner;
1889
1890 /* Link to the next outer transaction. */
1891 struct tm_region *outer;
1892
1893 /* The GIMPLE_TRANSACTION statement beginning this transaction.
1894 After TM_MARK, this gets replaced by a call to
1895 BUILT_IN_TM_START.
1896 Hence this will be either a gtransaction *or a gcall *. */
1897 gimple *transaction_stmt;
1898
1899 /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
1900 BUILT_IN_TM_START, this field is true if the transaction is an
1901 outer transaction. */
1902 bool original_transaction_was_outer;
1903
1904 /* Return value from BUILT_IN_TM_START. */
1905 tree tm_state;
1906
1907 /* The entry block to this region. This will always be the first
1908 block of the body of the transaction. */
1909 basic_block entry_block;
1910
1911 /* The first block after an expanded call to _ITM_beginTransaction. */
1912 basic_block restart_block;
1913
1914 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1915 These blocks are still a part of the region (i.e., the border is
1916 inclusive). Note that this set is only complete for paths in the CFG
1917 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1918 the edge to the "over" label. */
1919 bitmap exit_blocks;
1920
1921 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1922 bitmap irr_blocks;
1923 };
1924
1925 /* True if there are pending edge statements to be committed for the
1926 current function being scanned in the tmmark pass. */
1927 bool pending_edge_inserts_p;
1928
1929 static struct tm_region *all_tm_regions;
1930 static bitmap_obstack tm_obstack;
1931
1932
1933 /* A subroutine of tm_region_init. Record the existence of the
1934 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1935
1936 static struct tm_region *
1937 tm_region_init_0 (struct tm_region *outer, basic_block bb,
1938 gtransaction *stmt)
1939 {
1940 struct tm_region *region;
1941
1942 region = (struct tm_region *)
1943 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1944
1945 if (outer)
1946 {
1947 region->next = outer->inner;
1948 outer->inner = region;
1949 }
1950 else
1951 {
1952 region->next = all_tm_regions;
1953 all_tm_regions = region;
1954 }
1955 region->inner = NULL;
1956 region->outer = outer;
1957
1958 region->transaction_stmt = stmt;
1959 region->original_transaction_was_outer = false;
1960 region->tm_state = NULL;
1961
1962 /* There are either one or two edges out of the block containing
1963 the GIMPLE_TRANSACTION, one to the actual region and one to the
1964 "over" label if the region contains an abort. The former will
1965 always be the one marked FALLTHRU. */
1966 region->entry_block = FALLTHRU_EDGE (bb)->dest;
1967
1968 region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
1969 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1970
1971 return region;
1972 }
1973
1974 /* A subroutine of tm_region_init. Record all the exit and
1975 irrevocable blocks in BB into the region's exit_blocks and
1976 irr_blocks bitmaps. Returns the new region being scanned. */
1977
1978 static struct tm_region *
1979 tm_region_init_1 (struct tm_region *region, basic_block bb)
1980 {
1981 gimple_stmt_iterator gsi;
1982 gimple *g;
1983
1984 if (!region
1985 || (!region->irr_blocks && !region->exit_blocks))
1986 return region;
1987
1988 /* Check to see if this is the end of a region by seeing if it
1989 contains a call to __builtin_tm_commit{,_eh}. Note that the
1990 outermost region for DECL_IS_TM_CLONE need not collect this. */
1991 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
1992 {
1993 g = gsi_stmt (gsi);
1994 if (gimple_code (g) == GIMPLE_CALL)
1995 {
1996 tree fn = gimple_call_fndecl (g);
1997 if (fn && fndecl_built_in_p (fn, BUILT_IN_NORMAL))
1998 {
1999 if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
2000 || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
2001 && region->exit_blocks)
2002 {
2003 bitmap_set_bit (region->exit_blocks, bb->index);
2004 region = region->outer;
2005 break;
2006 }
2007 if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
2008 bitmap_set_bit (region->irr_blocks, bb->index);
2009 }
2010 }
2011 }
2012 return region;
2013 }
2014
2015 /* Collect all of the transaction regions within the current function
2016 and record them in ALL_TM_REGIONS. The REGION parameter may specify
2017 an "outermost" region for use by tm clones. */
2018
2019 static void
2020 tm_region_init (struct tm_region *region)
2021 {
2022 gimple *g;
2023 edge_iterator ei;
2024 edge e;
2025 basic_block bb;
2026 auto_vec<basic_block> queue;
2027 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2028 struct tm_region *old_region;
2029 auto_vec<tm_region *> bb_regions;
2030
2031 /* We could store this information in bb->aux, but we may get called
2032 through get_all_tm_blocks() from another pass that may be already
2033 using bb->aux. */
2034 bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun));
2035
2036 all_tm_regions = region;
2037 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2038 queue.safe_push (bb);
2039 bitmap_set_bit (visited_blocks, bb->index);
2040 bb_regions[bb->index] = region;
2041
2042 do
2043 {
2044 bb = queue.pop ();
2045 region = bb_regions[bb->index];
2046 bb_regions[bb->index] = NULL;
2047
2048 /* Record exit and irrevocable blocks. */
2049 region = tm_region_init_1 (region, bb);
2050
2051 /* Check for the last statement in the block beginning a new region. */
2052 g = last_stmt (bb);
2053 old_region = region;
2054 if (g)
2055 if (gtransaction *trans_stmt = dyn_cast <gtransaction *> (g))
2056 region = tm_region_init_0 (region, bb, trans_stmt);
2057
2058 /* Process subsequent blocks. */
2059 FOR_EACH_EDGE (e, ei, bb->succs)
2060 if (!bitmap_bit_p (visited_blocks, e->dest->index))
2061 {
2062 bitmap_set_bit (visited_blocks, e->dest->index);
2063 queue.safe_push (e->dest);
2064
2065 /* If the current block started a new region, make sure that only
2066 the entry block of the new region is associated with this region.
2067 Other successors are still part of the old region. */
2068 if (old_region != region && e->dest != region->entry_block)
2069 bb_regions[e->dest->index] = old_region;
2070 else
2071 bb_regions[e->dest->index] = region;
2072 }
2073 }
2074 while (!queue.is_empty ());
2075 BITMAP_FREE (visited_blocks);
2076 }
2077
2078 /* The "gate" function for all transactional memory expansion and optimization
2079 passes. We collect region information for each top-level transaction, and
2080 if we don't find any, we skip all of the TM passes. Each region will have
2081 all of the exit blocks recorded, and the originating statement. */
2082
2083 static bool
2084 gate_tm_init (void)
2085 {
2086 if (!flag_tm)
2087 return false;
2088
2089 calculate_dominance_info (CDI_DOMINATORS);
2090 bitmap_obstack_initialize (&tm_obstack);
2091
2092 /* If the function is a TM_CLONE, then the entire function is the region. */
2093 if (decl_is_tm_clone (current_function_decl))
2094 {
2095 struct tm_region *region = (struct tm_region *)
2096 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
2097 memset (region, 0, sizeof (*region));
2098 region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2099 /* For a clone, the entire function is the region. But even if
2100 we don't need to record any exit blocks, we may need to
2101 record irrevocable blocks. */
2102 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
2103
2104 tm_region_init (region);
2105 }
2106 else
2107 {
2108 tm_region_init (NULL);
2109
2110 /* If we didn't find any regions, cleanup and skip the whole tree
2111 of tm-related optimizations. */
2112 if (all_tm_regions == NULL)
2113 {
2114 bitmap_obstack_release (&tm_obstack);
2115 return false;
2116 }
2117 }
2118
2119 return true;
2120 }
2121
2122 namespace {
2123
2124 const pass_data pass_data_tm_init =
2125 {
2126 GIMPLE_PASS, /* type */
2127 "*tminit", /* name */
2128 OPTGROUP_NONE, /* optinfo_flags */
2129 TV_TRANS_MEM, /* tv_id */
2130 ( PROP_ssa | PROP_cfg ), /* properties_required */
2131 0, /* properties_provided */
2132 0, /* properties_destroyed */
2133 0, /* todo_flags_start */
2134 0, /* todo_flags_finish */
2135 };
2136
2137 class pass_tm_init : public gimple_opt_pass
2138 {
2139 public:
2140 pass_tm_init (gcc::context *ctxt)
2141 : gimple_opt_pass (pass_data_tm_init, ctxt)
2142 {}
2143
2144 /* opt_pass methods: */
2145 virtual bool gate (function *) { return gate_tm_init (); }
2146
2147 }; // class pass_tm_init
2148
2149 } // anon namespace
2150
2151 gimple_opt_pass *
2152 make_pass_tm_init (gcc::context *ctxt)
2153 {
2154 return new pass_tm_init (ctxt);
2155 }
2156 \f
2157 /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
2158 represented by STATE. */
2159
2160 static inline void
2161 transaction_subcode_ior (struct tm_region *region, unsigned flags)
2162 {
2163 if (region && region->transaction_stmt)
2164 {
2165 gtransaction *transaction_stmt = region->get_transaction_stmt ();
2166 flags |= gimple_transaction_subcode (transaction_stmt);
2167 gimple_transaction_set_subcode (transaction_stmt, flags);
2168 }
2169 }
2170
2171 /* Construct a memory load in a transactional context. Return the
2172 gimple statement performing the load, or NULL if there is no
2173 TM_LOAD builtin of the appropriate size to do the load.
2174
2175 LOC is the location to use for the new statement(s). */
2176
2177 static gcall *
2178 build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2179 {
2180 tree t, type = TREE_TYPE (rhs);
2181 gcall *gcall;
2182
2183 built_in_function code;
2184 if (type == float_type_node)
2185 code = BUILT_IN_TM_LOAD_FLOAT;
2186 else if (type == double_type_node)
2187 code = BUILT_IN_TM_LOAD_DOUBLE;
2188 else if (type == long_double_type_node)
2189 code = BUILT_IN_TM_LOAD_LDOUBLE;
2190 else
2191 {
2192 if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type)))
2193 return NULL;
2194 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
2195
2196 if (TREE_CODE (type) == VECTOR_TYPE)
2197 {
2198 switch (type_size)
2199 {
2200 case 64:
2201 code = BUILT_IN_TM_LOAD_M64;
2202 break;
2203 case 128:
2204 code = BUILT_IN_TM_LOAD_M128;
2205 break;
2206 case 256:
2207 code = BUILT_IN_TM_LOAD_M256;
2208 break;
2209 default:
2210 goto unhandled_vec;
2211 }
2212 if (!builtin_decl_explicit_p (code))
2213 goto unhandled_vec;
2214 }
2215 else
2216 {
2217 unhandled_vec:
2218 switch (type_size)
2219 {
2220 case 8:
2221 code = BUILT_IN_TM_LOAD_1;
2222 break;
2223 case 16:
2224 code = BUILT_IN_TM_LOAD_2;
2225 break;
2226 case 32:
2227 code = BUILT_IN_TM_LOAD_4;
2228 break;
2229 case 64:
2230 code = BUILT_IN_TM_LOAD_8;
2231 break;
2232 default:
2233 return NULL;
2234 }
2235 }
2236 }
2237
2238 tree decl = builtin_decl_explicit (code);
2239 gcc_assert (decl);
2240
2241 t = gimplify_addr (gsi, rhs);
2242 gcall = gimple_build_call (decl, 1, t);
2243 gimple_set_location (gcall, loc);
2244
2245 t = TREE_TYPE (TREE_TYPE (decl));
2246 if (useless_type_conversion_p (type, t))
2247 {
2248 gimple_call_set_lhs (gcall, lhs);
2249 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2250 }
2251 else
2252 {
2253 gimple *g;
2254 tree temp;
2255
2256 temp = create_tmp_reg (t);
2257 gimple_call_set_lhs (gcall, temp);
2258 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2259
2260 t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
2261 g = gimple_build_assign (lhs, t);
2262 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2263 }
2264
2265 return gcall;
2266 }
2267
2268
2269 /* Similarly for storing TYPE in a transactional context. */
2270
2271 static gcall *
2272 build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2273 {
2274 tree t, fn, type = TREE_TYPE (rhs), simple_type;
2275 gcall *gcall;
2276
2277 built_in_function code;
2278 if (type == float_type_node)
2279 code = BUILT_IN_TM_STORE_FLOAT;
2280 else if (type == double_type_node)
2281 code = BUILT_IN_TM_STORE_DOUBLE;
2282 else if (type == long_double_type_node)
2283 code = BUILT_IN_TM_STORE_LDOUBLE;
2284 else
2285 {
2286 if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type)))
2287 return NULL;
2288 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
2289
2290 if (TREE_CODE (type) == VECTOR_TYPE)
2291 {
2292 switch (type_size)
2293 {
2294 case 64:
2295 code = BUILT_IN_TM_STORE_M64;
2296 break;
2297 case 128:
2298 code = BUILT_IN_TM_STORE_M128;
2299 break;
2300 case 256:
2301 code = BUILT_IN_TM_STORE_M256;
2302 break;
2303 default:
2304 goto unhandled_vec;
2305 }
2306 if (!builtin_decl_explicit_p (code))
2307 goto unhandled_vec;
2308 }
2309 else
2310 {
2311 unhandled_vec:
2312 switch (type_size)
2313 {
2314 case 8:
2315 code = BUILT_IN_TM_STORE_1;
2316 break;
2317 case 16:
2318 code = BUILT_IN_TM_STORE_2;
2319 break;
2320 case 32:
2321 code = BUILT_IN_TM_STORE_4;
2322 break;
2323 case 64:
2324 code = BUILT_IN_TM_STORE_8;
2325 break;
2326 default:
2327 return NULL;
2328 }
2329 }
2330 }
2331
2332 fn = builtin_decl_explicit (code);
2333 gcc_assert (fn);
2334
2335 simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
2336
2337 if (TREE_CODE (rhs) == CONSTRUCTOR)
2338 {
2339 /* Handle the easy initialization to zero. */
2340 if (!CONSTRUCTOR_ELTS (rhs))
2341 rhs = build_int_cst (simple_type, 0);
2342 else
2343 {
2344 /* ...otherwise punt to the caller and probably use
2345 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2346 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2347 valid gimple. */
2348 return NULL;
2349 }
2350 }
2351 else if (!useless_type_conversion_p (simple_type, type))
2352 {
2353 gimple *g;
2354 tree temp;
2355
2356 temp = create_tmp_reg (simple_type);
2357 t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
2358 g = gimple_build_assign (temp, t);
2359 gimple_set_location (g, loc);
2360 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2361
2362 rhs = temp;
2363 }
2364
2365 t = gimplify_addr (gsi, lhs);
2366 gcall = gimple_build_call (fn, 2, t, rhs);
2367 gimple_set_location (gcall, loc);
2368 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2369
2370 return gcall;
2371 }
2372
2373
2374 /* Expand an assignment statement into transactional builtins. */
2375
2376 static void
2377 expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
2378 {
2379 gimple *stmt = gsi_stmt (*gsi);
2380 location_t loc = gimple_location (stmt);
2381 tree lhs = gimple_assign_lhs (stmt);
2382 tree rhs = gimple_assign_rhs1 (stmt);
2383 bool store_p = requires_barrier (region->entry_block, lhs, NULL);
2384 bool load_p = requires_barrier (region->entry_block, rhs, NULL);
2385 gimple *gcall = NULL;
2386
2387 if (!load_p && !store_p)
2388 {
2389 /* Add thread private addresses to log if applicable. */
2390 requires_barrier (region->entry_block, lhs, stmt);
2391 gsi_next (gsi);
2392 return;
2393 }
2394
2395 if (load_p)
2396 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2397 if (store_p)
2398 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2399
2400 // Remove original load/store statement.
2401 gsi_remove (gsi, true);
2402
2403 // Attempt to use a simple load/store helper function.
2404 if (load_p && !store_p)
2405 gcall = build_tm_load (loc, lhs, rhs, gsi);
2406 else if (store_p && !load_p)
2407 gcall = build_tm_store (loc, lhs, rhs, gsi);
2408
2409 // If gcall has not been set, then we do not have a simple helper
2410 // function available for the type. This may be true of larger
2411 // structures, vectors, and non-standard float types.
2412 if (!gcall)
2413 {
2414 tree lhs_addr, rhs_addr, ltmp = NULL, copy_fn;
2415
2416 // If this is a type that we couldn't handle above, but it's
2417 // in a register, we must spill it to memory for the copy.
2418 if (is_gimple_reg (lhs))
2419 {
2420 ltmp = create_tmp_var (TREE_TYPE (lhs));
2421 lhs_addr = build_fold_addr_expr (ltmp);
2422 }
2423 else
2424 lhs_addr = gimplify_addr (gsi, lhs);
2425 if (is_gimple_reg (rhs))
2426 {
2427 tree rtmp = create_tmp_var (TREE_TYPE (rhs));
2428 rhs_addr = build_fold_addr_expr (rtmp);
2429 gcall = gimple_build_assign (rtmp, rhs);
2430 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2431 }
2432 else
2433 rhs_addr = gimplify_addr (gsi, rhs);
2434
2435 // Choose the appropriate memory transfer function.
2436 if (load_p && store_p)
2437 {
2438 // ??? Figure out if there's any possible overlap between
2439 // the LHS and the RHS and if not, use MEMCPY.
2440 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
2441 }
2442 else if (load_p)
2443 {
2444 // Note that the store is non-transactional and cannot overlap.
2445 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RTWN);
2446 }
2447 else
2448 {
2449 // Note that the load is non-transactional and cannot overlap.
2450 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RNWT);
2451 }
2452
2453 gcall = gimple_build_call (copy_fn, 3, lhs_addr, rhs_addr,
2454 TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
2455 gimple_set_location (gcall, loc);
2456 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2457
2458 if (ltmp)
2459 {
2460 gcall = gimple_build_assign (lhs, ltmp);
2461 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2462 }
2463 }
2464
2465 // Now that we have the load/store in its instrumented form, add
2466 // thread private addresses to the log if applicable.
2467 if (!store_p)
2468 requires_barrier (region->entry_block, lhs, gcall);
2469 }
2470
2471
2472 /* Expand a call statement as appropriate for a transaction. That is,
2473 either verify that the call does not affect the transaction, or
2474 redirect the call to a clone that handles transactions, or change
2475 the transaction state to IRREVOCABLE. Return true if the call is
2476 one of the builtins that end a transaction. */
2477
2478 static bool
2479 expand_call_tm (struct tm_region *region,
2480 gimple_stmt_iterator *gsi)
2481 {
2482 gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi));
2483 tree lhs = gimple_call_lhs (stmt);
2484 tree fn_decl;
2485 struct cgraph_node *node;
2486 bool retval = false;
2487
2488 fn_decl = gimple_call_fndecl (stmt);
2489
2490 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
2491 || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
2492 transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
2493 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
2494 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2495
2496 if (is_tm_pure_call (stmt))
2497 return false;
2498
2499 if (fn_decl)
2500 retval = is_tm_ending_fndecl (fn_decl);
2501 if (!retval)
2502 {
2503 /* Assume all non-const/pure calls write to memory, except
2504 transaction ending builtins. */
2505 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2506 }
2507
2508 /* For indirect calls, we already generated a call into the runtime. */
2509 if (!fn_decl)
2510 {
2511 tree fn = gimple_call_fn (stmt);
2512
2513 /* We are guaranteed never to go irrevocable on a safe or pure
2514 call, and the pure call was handled above. */
2515 if (is_tm_safe (fn))
2516 return false;
2517 else
2518 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2519
2520 return false;
2521 }
2522
2523 node = cgraph_node::get (fn_decl);
2524 /* All calls should have cgraph here. */
2525 if (!node)
2526 {
2527 /* We can have a nodeless call here if some pass after IPA-tm
2528 added uninstrumented calls. For example, loop distribution
2529 can transform certain loop constructs into __builtin_mem*
2530 calls. In this case, see if we have a suitable TM
2531 replacement and fill in the gaps. */
2532 gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL);
2533 enum built_in_function code = DECL_FUNCTION_CODE (fn_decl);
2534 gcc_assert (code == BUILT_IN_MEMCPY
2535 || code == BUILT_IN_MEMMOVE
2536 || code == BUILT_IN_MEMSET);
2537
2538 tree repl = find_tm_replacement_function (fn_decl);
2539 if (repl)
2540 {
2541 gimple_call_set_fndecl (stmt, repl);
2542 update_stmt (stmt);
2543 node = cgraph_node::create (repl);
2544 node->local.tm_may_enter_irr = false;
2545 return expand_call_tm (region, gsi);
2546 }
2547 gcc_unreachable ();
2548 }
2549 if (node->local.tm_may_enter_irr)
2550 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2551
2552 if (is_tm_abort (fn_decl))
2553 {
2554 transaction_subcode_ior (region, GTMA_HAVE_ABORT);
2555 return true;
2556 }
2557
2558 /* Instrument the store if needed.
2559
2560 If the assignment happens inside the function call (return slot
2561 optimization), there is no instrumentation to be done, since
2562 the callee should have done the right thing. */
2563 if (lhs && requires_barrier (region->entry_block, lhs, stmt)
2564 && !gimple_call_return_slot_opt_p (stmt))
2565 {
2566 tree tmp = create_tmp_reg (TREE_TYPE (lhs));
2567 location_t loc = gimple_location (stmt);
2568 edge fallthru_edge = NULL;
2569 gassign *assign_stmt;
2570
2571 /* Remember if the call was going to throw. */
2572 if (stmt_can_throw_internal (cfun, stmt))
2573 {
2574 edge_iterator ei;
2575 edge e;
2576 basic_block bb = gimple_bb (stmt);
2577
2578 FOR_EACH_EDGE (e, ei, bb->succs)
2579 if (e->flags & EDGE_FALLTHRU)
2580 {
2581 fallthru_edge = e;
2582 break;
2583 }
2584 }
2585
2586 gimple_call_set_lhs (stmt, tmp);
2587 update_stmt (stmt);
2588 assign_stmt = gimple_build_assign (lhs, tmp);
2589 gimple_set_location (assign_stmt, loc);
2590
2591 /* We cannot throw in the middle of a BB. If the call was going
2592 to throw, place the instrumentation on the fallthru edge, so
2593 the call remains the last statement in the block. */
2594 if (fallthru_edge)
2595 {
2596 gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (assign_stmt);
2597 gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
2598 expand_assign_tm (region, &fallthru_gsi);
2599 gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
2600 pending_edge_inserts_p = true;
2601 }
2602 else
2603 {
2604 gsi_insert_after (gsi, assign_stmt, GSI_CONTINUE_LINKING);
2605 expand_assign_tm (region, gsi);
2606 }
2607
2608 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2609 }
2610
2611 return retval;
2612 }
2613
2614
2615 /* Expand all statements in BB as appropriate for being inside
2616 a transaction. */
2617
2618 static void
2619 expand_block_tm (struct tm_region *region, basic_block bb)
2620 {
2621 gimple_stmt_iterator gsi;
2622
2623 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2624 {
2625 gimple *stmt = gsi_stmt (gsi);
2626 switch (gimple_code (stmt))
2627 {
2628 case GIMPLE_ASSIGN:
2629 /* Only memory reads/writes need to be instrumented. */
2630 if (gimple_assign_single_p (stmt)
2631 && !gimple_clobber_p (stmt))
2632 {
2633 expand_assign_tm (region, &gsi);
2634 continue;
2635 }
2636 break;
2637
2638 case GIMPLE_CALL:
2639 if (expand_call_tm (region, &gsi))
2640 return;
2641 break;
2642
2643 case GIMPLE_ASM:
2644 gcc_unreachable ();
2645
2646 default:
2647 break;
2648 }
2649 if (!gsi_end_p (gsi))
2650 gsi_next (&gsi);
2651 }
2652 }
2653
2654 /* Return the list of basic-blocks in REGION.
2655
2656 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2657 following a TM_IRREVOCABLE call.
2658
2659 INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the
2660 uninstrumented code path blocks in the list of basic blocks
2661 returned, false otherwise. */
2662
2663 static vec<basic_block>
2664 get_tm_region_blocks (basic_block entry_block,
2665 bitmap exit_blocks,
2666 bitmap irr_blocks,
2667 bitmap all_region_blocks,
2668 bool stop_at_irrevocable_p,
2669 bool include_uninstrumented_p = true)
2670 {
2671 vec<basic_block> bbs = vNULL;
2672 unsigned i;
2673 edge e;
2674 edge_iterator ei;
2675 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2676
2677 i = 0;
2678 bbs.safe_push (entry_block);
2679 bitmap_set_bit (visited_blocks, entry_block->index);
2680
2681 do
2682 {
2683 basic_block bb = bbs[i++];
2684
2685 if (exit_blocks &&
2686 bitmap_bit_p (exit_blocks, bb->index))
2687 continue;
2688
2689 if (stop_at_irrevocable_p
2690 && irr_blocks
2691 && bitmap_bit_p (irr_blocks, bb->index))
2692 continue;
2693
2694 FOR_EACH_EDGE (e, ei, bb->succs)
2695 if ((include_uninstrumented_p
2696 || !(e->flags & EDGE_TM_UNINSTRUMENTED))
2697 && !bitmap_bit_p (visited_blocks, e->dest->index))
2698 {
2699 bitmap_set_bit (visited_blocks, e->dest->index);
2700 bbs.safe_push (e->dest);
2701 }
2702 }
2703 while (i < bbs.length ());
2704
2705 if (all_region_blocks)
2706 bitmap_ior_into (all_region_blocks, visited_blocks);
2707
2708 BITMAP_FREE (visited_blocks);
2709 return bbs;
2710 }
2711
2712 // Callback data for collect_bb2reg.
2713 struct bb2reg_stuff
2714 {
2715 vec<tm_region *> *bb2reg;
2716 bool include_uninstrumented_p;
2717 };
2718
2719 // Callback for expand_regions, collect innermost region data for each bb.
2720 static void *
2721 collect_bb2reg (struct tm_region *region, void *data)
2722 {
2723 struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data;
2724 vec<tm_region *> *bb2reg = stuff->bb2reg;
2725 vec<basic_block> queue;
2726 unsigned int i;
2727 basic_block bb;
2728
2729 queue = get_tm_region_blocks (region->entry_block,
2730 region->exit_blocks,
2731 region->irr_blocks,
2732 NULL,
2733 /*stop_at_irr_p=*/true,
2734 stuff->include_uninstrumented_p);
2735
2736 // We expect expand_region to perform a post-order traversal of the region
2737 // tree. Therefore the last region seen for any bb is the innermost.
2738 FOR_EACH_VEC_ELT (queue, i, bb)
2739 (*bb2reg)[bb->index] = region;
2740
2741 queue.release ();
2742 return NULL;
2743 }
2744
2745 // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to
2746 // which a basic block belongs. Note that we only consider the instrumented
2747 // code paths for the region; the uninstrumented code paths are ignored if
2748 // INCLUDE_UNINSTRUMENTED_P is false.
2749 //
2750 // ??? This data is very similar to the bb_regions array that is collected
2751 // during tm_region_init. Or, rather, this data is similar to what could
2752 // be used within tm_region_init. The actual computation in tm_region_init
2753 // begins and ends with bb_regions entirely full of NULL pointers, due to
2754 // the way in which pointers are swapped in and out of the array.
2755 //
2756 // ??? Our callers expect that blocks are not shared between transactions.
2757 // When the optimizers get too smart, and blocks are shared, then during
2758 // the tm_mark phase we'll add log entries to only one of the two transactions,
2759 // and in the tm_edge phase we'll add edges to the CFG that create invalid
2760 // cycles. The symptom being SSA defs that do not dominate their uses.
2761 // Note that the optimizers were locally correct with their transformation,
2762 // as we have no info within the program that suggests that the blocks cannot
2763 // be shared.
2764 //
2765 // ??? There is currently a hack inside tree-ssa-pre.c to work around the
2766 // only known instance of this block sharing.
2767
2768 static vec<tm_region *>
2769 get_bb_regions_instrumented (bool traverse_clones,
2770 bool include_uninstrumented_p)
2771 {
2772 unsigned n = last_basic_block_for_fn (cfun);
2773 struct bb2reg_stuff stuff;
2774 vec<tm_region *> ret;
2775
2776 ret.create (n);
2777 ret.safe_grow_cleared (n);
2778 stuff.bb2reg = &ret;
2779 stuff.include_uninstrumented_p = include_uninstrumented_p;
2780 expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones);
2781
2782 return ret;
2783 }
2784
2785 /* Set the IN_TRANSACTION for all gimple statements that appear in a
2786 transaction. */
2787
2788 void
2789 compute_transaction_bits (void)
2790 {
2791 struct tm_region *region;
2792 vec<basic_block> queue;
2793 unsigned int i;
2794 basic_block bb;
2795
2796 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2797 certainly don't need it to calculate CDI_DOMINATOR info. */
2798 gate_tm_init ();
2799
2800 FOR_EACH_BB_FN (bb, cfun)
2801 bb->flags &= ~BB_IN_TRANSACTION;
2802
2803 for (region = all_tm_regions; region; region = region->next)
2804 {
2805 queue = get_tm_region_blocks (region->entry_block,
2806 region->exit_blocks,
2807 region->irr_blocks,
2808 NULL,
2809 /*stop_at_irr_p=*/true);
2810 for (i = 0; queue.iterate (i, &bb); ++i)
2811 bb->flags |= BB_IN_TRANSACTION;
2812 queue.release ();
2813 }
2814
2815 if (all_tm_regions)
2816 bitmap_obstack_release (&tm_obstack);
2817 }
2818
2819 /* Replace the GIMPLE_TRANSACTION in this region with the corresponding
2820 call to BUILT_IN_TM_START. */
2821
2822 static void *
2823 expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
2824 {
2825 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2826 basic_block transaction_bb = gimple_bb (region->transaction_stmt);
2827 tree tm_state = region->tm_state;
2828 tree tm_state_type = TREE_TYPE (tm_state);
2829 edge abort_edge = NULL;
2830 edge inst_edge = NULL;
2831 edge uninst_edge = NULL;
2832 edge fallthru_edge = NULL;
2833
2834 // Identify the various successors of the transaction start.
2835 {
2836 edge_iterator i;
2837 edge e;
2838 FOR_EACH_EDGE (e, i, transaction_bb->succs)
2839 {
2840 if (e->flags & EDGE_TM_ABORT)
2841 abort_edge = e;
2842 else if (e->flags & EDGE_TM_UNINSTRUMENTED)
2843 uninst_edge = e;
2844 else
2845 inst_edge = e;
2846 if (e->flags & EDGE_FALLTHRU)
2847 fallthru_edge = e;
2848 }
2849 }
2850
2851 /* ??? There are plenty of bits here we're not computing. */
2852 {
2853 int subcode = gimple_transaction_subcode (region->get_transaction_stmt ());
2854 int flags = 0;
2855 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2856 flags |= PR_DOESGOIRREVOCABLE;
2857 if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
2858 flags |= PR_HASNOIRREVOCABLE;
2859 /* If the transaction does not have an abort in lexical scope and is not
2860 marked as an outer transaction, then it will never abort. */
2861 if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0)
2862 flags |= PR_HASNOABORT;
2863 if ((subcode & GTMA_HAVE_STORE) == 0)
2864 flags |= PR_READONLY;
2865 if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION))
2866 flags |= PR_INSTRUMENTEDCODE;
2867 if (uninst_edge)
2868 flags |= PR_UNINSTRUMENTEDCODE;
2869 if (subcode & GTMA_IS_OUTER)
2870 region->original_transaction_was_outer = true;
2871 tree t = build_int_cst (tm_state_type, flags);
2872 gcall *call = gimple_build_call (tm_start, 1, t);
2873 gimple_call_set_lhs (call, tm_state);
2874 gimple_set_location (call, gimple_location (region->transaction_stmt));
2875
2876 // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START.
2877 gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb);
2878 gcc_assert (gsi_stmt (gsi) == region->transaction_stmt);
2879 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
2880 gsi_remove (&gsi, true);
2881 region->transaction_stmt = call;
2882 }
2883
2884 // Generate log saves.
2885 if (!tm_log_save_addresses.is_empty ())
2886 tm_log_emit_saves (region->entry_block, transaction_bb);
2887
2888 // In the beginning, we've no tests to perform on transaction restart.
2889 // Note that after this point, transaction_bb becomes the "most recent
2890 // block containing tests for the transaction".
2891 region->restart_block = region->entry_block;
2892
2893 // Generate log restores.
2894 if (!tm_log_save_addresses.is_empty ())
2895 {
2896 basic_block test_bb = create_empty_bb (transaction_bb);
2897 basic_block code_bb = create_empty_bb (test_bb);
2898 basic_block join_bb = create_empty_bb (code_bb);
2899 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2900 add_bb_to_loop (code_bb, transaction_bb->loop_father);
2901 add_bb_to_loop (join_bb, transaction_bb->loop_father);
2902 if (region->restart_block == region->entry_block)
2903 region->restart_block = test_bb;
2904
2905 tree t1 = create_tmp_reg (tm_state_type);
2906 tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES);
2907 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
2908 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2909 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2910
2911 t2 = build_int_cst (tm_state_type, 0);
2912 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2913 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2914
2915 tm_log_emit_restores (region->entry_block, code_bb);
2916
2917 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2918 edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE);
2919 edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE);
2920 redirect_edge_pred (fallthru_edge, join_bb);
2921
2922 join_bb->count = test_bb->count = transaction_bb->count;
2923
2924 ei->probability = profile_probability::always ();
2925 et->probability = profile_probability::likely ();
2926 ef->probability = profile_probability::unlikely ();
2927
2928 code_bb->count = et->count ();
2929
2930 transaction_bb = join_bb;
2931 }
2932
2933 // If we have an ABORT edge, create a test to perform the abort.
2934 if (abort_edge)
2935 {
2936 basic_block test_bb = create_empty_bb (transaction_bb);
2937 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2938 if (region->restart_block == region->entry_block)
2939 region->restart_block = test_bb;
2940
2941 tree t1 = create_tmp_reg (tm_state_type);
2942 tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION);
2943 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
2944 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2945 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2946
2947 t2 = build_int_cst (tm_state_type, 0);
2948 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2949 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2950
2951 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2952 test_bb->count = transaction_bb->count;
2953 ei->probability = profile_probability::always ();
2954
2955 // Not abort edge. If both are live, chose one at random as we'll
2956 // we'll be fixing that up below.
2957 redirect_edge_pred (fallthru_edge, test_bb);
2958 fallthru_edge->flags = EDGE_FALSE_VALUE;
2959 fallthru_edge->probability = profile_probability::very_likely ();
2960
2961 // Abort/over edge.
2962 redirect_edge_pred (abort_edge, test_bb);
2963 abort_edge->flags = EDGE_TRUE_VALUE;
2964 abort_edge->probability = profile_probability::unlikely ();
2965
2966 transaction_bb = test_bb;
2967 }
2968
2969 // If we have both instrumented and uninstrumented code paths, select one.
2970 if (inst_edge && uninst_edge)
2971 {
2972 basic_block test_bb = create_empty_bb (transaction_bb);
2973 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2974 if (region->restart_block == region->entry_block)
2975 region->restart_block = test_bb;
2976
2977 tree t1 = create_tmp_reg (tm_state_type);
2978 tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE);
2979
2980 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
2981 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2982 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2983
2984 t2 = build_int_cst (tm_state_type, 0);
2985 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2986 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2987
2988 // Create the edge into test_bb first, as we want to copy values
2989 // out of the fallthru edge.
2990 edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags);
2991 e->probability = fallthru_edge->probability;
2992 test_bb->count = fallthru_edge->count ();
2993
2994 // Now update the edges to the inst/uninist implementations.
2995 // For now assume that the paths are equally likely. When using HTM,
2996 // we'll try the uninst path first and fallback to inst path if htm
2997 // buffers are exceeded. Without HTM we start with the inst path and
2998 // use the uninst path when falling back to serial mode.
2999 redirect_edge_pred (inst_edge, test_bb);
3000 inst_edge->flags = EDGE_FALSE_VALUE;
3001 inst_edge->probability = profile_probability::even ();
3002
3003 redirect_edge_pred (uninst_edge, test_bb);
3004 uninst_edge->flags = EDGE_TRUE_VALUE;
3005 uninst_edge->probability = profile_probability::even ();
3006 }
3007
3008 // If we have no previous special cases, and we have PHIs at the beginning
3009 // of the atomic region, this means we have a loop at the beginning of the
3010 // atomic region that shares the first block. This can cause problems with
3011 // the transaction restart abnormal edges to be added in the tm_edges pass.
3012 // Solve this by adding a new empty block to receive the abnormal edges.
3013 if (region->restart_block == region->entry_block
3014 && phi_nodes (region->entry_block))
3015 {
3016 basic_block empty_bb = create_empty_bb (transaction_bb);
3017 region->restart_block = empty_bb;
3018 add_bb_to_loop (empty_bb, transaction_bb->loop_father);
3019
3020 redirect_edge_pred (fallthru_edge, empty_bb);
3021 make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU);
3022 }
3023
3024 return NULL;
3025 }
3026
3027 /* Generate the temporary to be used for the return value of
3028 BUILT_IN_TM_START. */
3029
3030 static void *
3031 generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
3032 {
3033 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
3034 region->tm_state =
3035 create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
3036
3037 // Reset the subcode, post optimizations. We'll fill this in
3038 // again as we process blocks.
3039 if (region->exit_blocks)
3040 {
3041 gtransaction *transaction_stmt = region->get_transaction_stmt ();
3042 unsigned int subcode = gimple_transaction_subcode (transaction_stmt);
3043
3044 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
3045 subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
3046 | GTMA_MAY_ENTER_IRREVOCABLE
3047 | GTMA_HAS_NO_INSTRUMENTATION);
3048 else
3049 subcode &= GTMA_DECLARATION_MASK;
3050 gimple_transaction_set_subcode (transaction_stmt, subcode);
3051 }
3052
3053 return NULL;
3054 }
3055
3056 // Propagate flags from inner transactions outwards.
3057 static void
3058 propagate_tm_flags_out (struct tm_region *region)
3059 {
3060 if (region == NULL)
3061 return;
3062 propagate_tm_flags_out (region->inner);
3063
3064 if (region->outer && region->outer->transaction_stmt)
3065 {
3066 unsigned s
3067 = gimple_transaction_subcode (region->get_transaction_stmt ());
3068 s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE
3069 | GTMA_MAY_ENTER_IRREVOCABLE);
3070 s |= gimple_transaction_subcode (region->outer->get_transaction_stmt ());
3071 gimple_transaction_set_subcode (region->outer->get_transaction_stmt (),
3072 s);
3073 }
3074
3075 propagate_tm_flags_out (region->next);
3076 }
3077
3078 /* Entry point to the MARK phase of TM expansion. Here we replace
3079 transactional memory statements with calls to builtins, and function
3080 calls with their transactional clones (if available). But we don't
3081 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
3082
3083 static unsigned int
3084 execute_tm_mark (void)
3085 {
3086 pending_edge_inserts_p = false;
3087
3088 expand_regions (all_tm_regions, generate_tm_state, NULL,
3089 /*traverse_clones=*/true);
3090
3091 tm_log_init ();
3092
3093 vec<tm_region *> bb_regions
3094 = get_bb_regions_instrumented (/*traverse_clones=*/true,
3095 /*include_uninstrumented_p=*/false);
3096 struct tm_region *r;
3097 unsigned i;
3098
3099 // Expand memory operations into calls into the runtime.
3100 // This collects log entries as well.
3101 FOR_EACH_VEC_ELT (bb_regions, i, r)
3102 {
3103 if (r != NULL)
3104 {
3105 if (r->transaction_stmt)
3106 {
3107 unsigned sub
3108 = gimple_transaction_subcode (r->get_transaction_stmt ());
3109
3110 /* If we're sure to go irrevocable, there won't be
3111 anything to expand, since the run-time will go
3112 irrevocable right away. */
3113 if (sub & GTMA_DOES_GO_IRREVOCABLE
3114 && sub & GTMA_MAY_ENTER_IRREVOCABLE)
3115 continue;
3116 }
3117 expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i));
3118 }
3119 }
3120
3121 bb_regions.release ();
3122
3123 // Propagate flags from inner transactions outwards.
3124 propagate_tm_flags_out (all_tm_regions);
3125
3126 // Expand GIMPLE_TRANSACTIONs into calls into the runtime.
3127 expand_regions (all_tm_regions, expand_transaction, NULL,
3128 /*traverse_clones=*/false);
3129
3130 tm_log_emit ();
3131 tm_log_delete ();
3132
3133 if (pending_edge_inserts_p)
3134 gsi_commit_edge_inserts ();
3135 free_dominance_info (CDI_DOMINATORS);
3136 return 0;
3137 }
3138
3139 namespace {
3140
3141 const pass_data pass_data_tm_mark =
3142 {
3143 GIMPLE_PASS, /* type */
3144 "tmmark", /* name */
3145 OPTGROUP_NONE, /* optinfo_flags */
3146 TV_TRANS_MEM, /* tv_id */
3147 ( PROP_ssa | PROP_cfg ), /* properties_required */
3148 0, /* properties_provided */
3149 0, /* properties_destroyed */
3150 0, /* todo_flags_start */
3151 TODO_update_ssa, /* todo_flags_finish */
3152 };
3153
3154 class pass_tm_mark : public gimple_opt_pass
3155 {
3156 public:
3157 pass_tm_mark (gcc::context *ctxt)
3158 : gimple_opt_pass (pass_data_tm_mark, ctxt)
3159 {}
3160
3161 /* opt_pass methods: */
3162 virtual unsigned int execute (function *) { return execute_tm_mark (); }
3163
3164 }; // class pass_tm_mark
3165
3166 } // anon namespace
3167
3168 gimple_opt_pass *
3169 make_pass_tm_mark (gcc::context *ctxt)
3170 {
3171 return new pass_tm_mark (ctxt);
3172 }
3173 \f
3174
3175 /* Create an abnormal edge from STMT at iter, splitting the block
3176 as necessary. Adjust *PNEXT as needed for the split block. */
3177
3178 static inline void
3179 split_bb_make_tm_edge (gimple *stmt, basic_block dest_bb,
3180 gimple_stmt_iterator iter, gimple_stmt_iterator *pnext)
3181 {
3182 basic_block bb = gimple_bb (stmt);
3183 if (!gsi_one_before_end_p (iter))
3184 {
3185 edge e = split_block (bb, stmt);
3186 *pnext = gsi_start_bb (e->dest);
3187 }
3188 edge e = make_edge (bb, dest_bb, EDGE_ABNORMAL);
3189 if (e)
3190 e->probability = profile_probability::guessed_never ();
3191
3192 // Record the need for the edge for the benefit of the rtl passes.
3193 if (cfun->gimple_df->tm_restart == NULL)
3194 cfun->gimple_df->tm_restart
3195 = hash_table<tm_restart_hasher>::create_ggc (31);
3196
3197 struct tm_restart_node dummy;
3198 dummy.stmt = stmt;
3199 dummy.label_or_list = gimple_block_label (dest_bb);
3200
3201 tm_restart_node **slot = cfun->gimple_df->tm_restart->find_slot (&dummy,
3202 INSERT);
3203 struct tm_restart_node *n = *slot;
3204 if (n == NULL)
3205 {
3206 n = ggc_alloc<tm_restart_node> ();
3207 *n = dummy;
3208 }
3209 else
3210 {
3211 tree old = n->label_or_list;
3212 if (TREE_CODE (old) == LABEL_DECL)
3213 old = tree_cons (NULL, old, NULL);
3214 n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
3215 }
3216 }
3217
3218 /* Split block BB as necessary for every builtin function we added, and
3219 wire up the abnormal back edges implied by the transaction restart. */
3220
3221 static void
3222 expand_block_edges (struct tm_region *const region, basic_block bb)
3223 {
3224 gimple_stmt_iterator gsi, next_gsi;
3225
3226 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi)
3227 {
3228 gimple *stmt = gsi_stmt (gsi);
3229 gcall *call_stmt;
3230
3231 next_gsi = gsi;
3232 gsi_next (&next_gsi);
3233
3234 // ??? Shouldn't we split for any non-pure, non-irrevocable function?
3235 call_stmt = dyn_cast <gcall *> (stmt);
3236 if ((!call_stmt)
3237 || (gimple_call_flags (call_stmt) & ECF_TM_BUILTIN) == 0)
3238 continue;
3239
3240 if (DECL_FUNCTION_CODE (gimple_call_fndecl (call_stmt))
3241 == BUILT_IN_TM_ABORT)
3242 {
3243 // If we have a ``_transaction_cancel [[outer]]'', there is only
3244 // one abnormal edge: to the transaction marked OUTER.
3245 // All compiler-generated instances of BUILT_IN_TM_ABORT have a
3246 // constant argument, which we can examine here. Users invoking
3247 // TM_ABORT directly get what they deserve.
3248 tree arg = gimple_call_arg (call_stmt, 0);
3249 if (TREE_CODE (arg) == INTEGER_CST
3250 && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0
3251 && !decl_is_tm_clone (current_function_decl))
3252 {
3253 // Find the GTMA_IS_OUTER transaction.
3254 for (struct tm_region *o = region; o; o = o->outer)
3255 if (o->original_transaction_was_outer)
3256 {
3257 split_bb_make_tm_edge (call_stmt, o->restart_block,
3258 gsi, &next_gsi);
3259 break;
3260 }
3261
3262 // Otherwise, the front-end should have semantically checked
3263 // outer aborts, but in either case the target region is not
3264 // within this function.
3265 continue;
3266 }
3267
3268 // Non-outer, TM aborts have an abnormal edge to the inner-most
3269 // transaction, the one being aborted;
3270 split_bb_make_tm_edge (call_stmt, region->restart_block, gsi,
3271 &next_gsi);
3272 }
3273
3274 // All TM builtins have an abnormal edge to the outer-most transaction.
3275 // We never restart inner transactions. For tm clones, we know a-priori
3276 // that the outer-most transaction is outside the function.
3277 if (decl_is_tm_clone (current_function_decl))
3278 continue;
3279
3280 if (cfun->gimple_df->tm_restart == NULL)
3281 cfun->gimple_df->tm_restart
3282 = hash_table<tm_restart_hasher>::create_ggc (31);
3283
3284 // All TM builtins have an abnormal edge to the outer-most transaction.
3285 // We never restart inner transactions.
3286 for (struct tm_region *o = region; o; o = o->outer)
3287 if (!o->outer)
3288 {
3289 split_bb_make_tm_edge (call_stmt, o->restart_block, gsi, &next_gsi);
3290 break;
3291 }
3292
3293 // Delete any tail-call annotation that may have been added.
3294 // The tail-call pass may have mis-identified the commit as being
3295 // a candidate because we had not yet added this restart edge.
3296 gimple_call_set_tail (call_stmt, false);
3297 }
3298 }
3299
3300 /* Entry point to the final expansion of transactional nodes. */
3301
3302 namespace {
3303
3304 const pass_data pass_data_tm_edges =
3305 {
3306 GIMPLE_PASS, /* type */
3307 "tmedge", /* name */
3308 OPTGROUP_NONE, /* optinfo_flags */
3309 TV_TRANS_MEM, /* tv_id */
3310 ( PROP_ssa | PROP_cfg ), /* properties_required */
3311 0, /* properties_provided */
3312 0, /* properties_destroyed */
3313 0, /* todo_flags_start */
3314 TODO_update_ssa, /* todo_flags_finish */
3315 };
3316
3317 class pass_tm_edges : public gimple_opt_pass
3318 {
3319 public:
3320 pass_tm_edges (gcc::context *ctxt)
3321 : gimple_opt_pass (pass_data_tm_edges, ctxt)
3322 {}
3323
3324 /* opt_pass methods: */
3325 virtual unsigned int execute (function *);
3326
3327 }; // class pass_tm_edges
3328
3329 unsigned int
3330 pass_tm_edges::execute (function *fun)
3331 {
3332 vec<tm_region *> bb_regions
3333 = get_bb_regions_instrumented (/*traverse_clones=*/false,
3334 /*include_uninstrumented_p=*/true);
3335 struct tm_region *r;
3336 unsigned i;
3337
3338 FOR_EACH_VEC_ELT (bb_regions, i, r)
3339 if (r != NULL)
3340 expand_block_edges (r, BASIC_BLOCK_FOR_FN (fun, i));
3341
3342 bb_regions.release ();
3343
3344 /* We've got to release the dominance info now, to indicate that it
3345 must be rebuilt completely. Otherwise we'll crash trying to update
3346 the SSA web in the TODO section following this pass. */
3347 free_dominance_info (CDI_DOMINATORS);
3348 /* We'ge also wrecked loops badly with inserting of abnormal edges. */
3349 loops_state_set (LOOPS_NEED_FIXUP);
3350 bitmap_obstack_release (&tm_obstack);
3351 all_tm_regions = NULL;
3352
3353 return 0;
3354 }
3355
3356 } // anon namespace
3357
3358 gimple_opt_pass *
3359 make_pass_tm_edges (gcc::context *ctxt)
3360 {
3361 return new pass_tm_edges (ctxt);
3362 }
3363 \f
3364 /* Helper function for expand_regions. Expand REGION and recurse to
3365 the inner region. Call CALLBACK on each region. CALLBACK returns
3366 NULL to continue the traversal, otherwise a non-null value which
3367 this function will return as well. TRAVERSE_CLONES is true if we
3368 should traverse transactional clones. */
3369
3370 static void *
3371 expand_regions_1 (struct tm_region *region,
3372 void *(*callback)(struct tm_region *, void *),
3373 void *data,
3374 bool traverse_clones)
3375 {
3376 void *retval = NULL;
3377 if (region->exit_blocks
3378 || (traverse_clones && decl_is_tm_clone (current_function_decl)))
3379 {
3380 retval = callback (region, data);
3381 if (retval)
3382 return retval;
3383 }
3384 if (region->inner)
3385 {
3386 retval = expand_regions (region->inner, callback, data, traverse_clones);
3387 if (retval)
3388 return retval;
3389 }
3390 return retval;
3391 }
3392
3393 /* Traverse the regions enclosed and including REGION. Execute
3394 CALLBACK for each region, passing DATA. CALLBACK returns NULL to
3395 continue the traversal, otherwise a non-null value which this
3396 function will return as well. TRAVERSE_CLONES is true if we should
3397 traverse transactional clones. */
3398
3399 static void *
3400 expand_regions (struct tm_region *region,
3401 void *(*callback)(struct tm_region *, void *),
3402 void *data,
3403 bool traverse_clones)
3404 {
3405 void *retval = NULL;
3406 while (region)
3407 {
3408 retval = expand_regions_1 (region, callback, data, traverse_clones);
3409 if (retval)
3410 return retval;
3411 region = region->next;
3412 }
3413 return retval;
3414 }
3415
3416 \f
3417 /* A unique TM memory operation. */
3418 struct tm_memop
3419 {
3420 /* Unique ID that all memory operations to the same location have. */
3421 unsigned int value_id;
3422 /* Address of load/store. */
3423 tree addr;
3424 };
3425
3426 /* TM memory operation hashtable helpers. */
3427
3428 struct tm_memop_hasher : free_ptr_hash <tm_memop>
3429 {
3430 static inline hashval_t hash (const tm_memop *);
3431 static inline bool equal (const tm_memop *, const tm_memop *);
3432 };
3433
3434 /* Htab support. Return a hash value for a `tm_memop'. */
3435 inline hashval_t
3436 tm_memop_hasher::hash (const tm_memop *mem)
3437 {
3438 tree addr = mem->addr;
3439 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
3440 actually done with operand_equal_p (see tm_memop_eq). */
3441 if (TREE_CODE (addr) == ADDR_EXPR)
3442 addr = TREE_OPERAND (addr, 0);
3443 return iterative_hash_expr (addr, 0);
3444 }
3445
3446 /* Htab support. Return true if two tm_memop's are the same. */
3447 inline bool
3448 tm_memop_hasher::equal (const tm_memop *mem1, const tm_memop *mem2)
3449 {
3450 return operand_equal_p (mem1->addr, mem2->addr, 0);
3451 }
3452
3453 /* Sets for solving data flow equations in the memory optimization pass. */
3454 struct tm_memopt_bitmaps
3455 {
3456 /* Stores available to this BB upon entry. Basically, stores that
3457 dominate this BB. */
3458 bitmap store_avail_in;
3459 /* Stores available at the end of this BB. */
3460 bitmap store_avail_out;
3461 bitmap store_antic_in;
3462 bitmap store_antic_out;
3463 /* Reads available to this BB upon entry. Basically, reads that
3464 dominate this BB. */
3465 bitmap read_avail_in;
3466 /* Reads available at the end of this BB. */
3467 bitmap read_avail_out;
3468 /* Reads performed in this BB. */
3469 bitmap read_local;
3470 /* Writes performed in this BB. */
3471 bitmap store_local;
3472
3473 /* Temporary storage for pass. */
3474 /* Is the current BB in the worklist? */
3475 bool avail_in_worklist_p;
3476 /* Have we visited this BB? */
3477 bool visited_p;
3478 };
3479
3480 static bitmap_obstack tm_memopt_obstack;
3481
3482 /* Unique counter for TM loads and stores. Loads and stores of the
3483 same address get the same ID. */
3484 static unsigned int tm_memopt_value_id;
3485 static hash_table<tm_memop_hasher> *tm_memopt_value_numbers;
3486
3487 #define STORE_AVAIL_IN(BB) \
3488 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
3489 #define STORE_AVAIL_OUT(BB) \
3490 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
3491 #define STORE_ANTIC_IN(BB) \
3492 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
3493 #define STORE_ANTIC_OUT(BB) \
3494 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
3495 #define READ_AVAIL_IN(BB) \
3496 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
3497 #define READ_AVAIL_OUT(BB) \
3498 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
3499 #define READ_LOCAL(BB) \
3500 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
3501 #define STORE_LOCAL(BB) \
3502 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
3503 #define AVAIL_IN_WORKLIST_P(BB) \
3504 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
3505 #define BB_VISITED_P(BB) \
3506 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
3507
3508 /* Given a TM load/store in STMT, return the value number for the address
3509 it accesses. */
3510
3511 static unsigned int
3512 tm_memopt_value_number (gimple *stmt, enum insert_option op)
3513 {
3514 struct tm_memop tmpmem, *mem;
3515 tm_memop **slot;
3516
3517 gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
3518 tmpmem.addr = gimple_call_arg (stmt, 0);
3519 slot = tm_memopt_value_numbers->find_slot (&tmpmem, op);
3520 if (*slot)
3521 mem = *slot;
3522 else if (op == INSERT)
3523 {
3524 mem = XNEW (struct tm_memop);
3525 *slot = mem;
3526 mem->value_id = tm_memopt_value_id++;
3527 mem->addr = tmpmem.addr;
3528 }
3529 else
3530 gcc_unreachable ();
3531 return mem->value_id;
3532 }
3533
3534 /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
3535
3536 static void
3537 tm_memopt_accumulate_memops (basic_block bb)
3538 {
3539 gimple_stmt_iterator gsi;
3540
3541 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3542 {
3543 gimple *stmt = gsi_stmt (gsi);
3544 bitmap bits;
3545 unsigned int loc;
3546
3547 if (is_tm_store (stmt))
3548 bits = STORE_LOCAL (bb);
3549 else if (is_tm_load (stmt))
3550 bits = READ_LOCAL (bb);
3551 else
3552 continue;
3553
3554 loc = tm_memopt_value_number (stmt, INSERT);
3555 bitmap_set_bit (bits, loc);
3556 if (dump_file)
3557 {
3558 fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
3559 is_tm_load (stmt) ? "LOAD" : "STORE", loc,
3560 gimple_bb (stmt)->index);
3561 print_generic_expr (dump_file, gimple_call_arg (stmt, 0));
3562 fprintf (dump_file, "\n");
3563 }
3564 }
3565 }
3566
3567 /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
3568
3569 static void
3570 dump_tm_memopt_set (const char *set_name, bitmap bits)
3571 {
3572 unsigned i;
3573 bitmap_iterator bi;
3574 const char *comma = "";
3575
3576 fprintf (dump_file, "TM memopt: %s: [", set_name);
3577 EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
3578 {
3579 hash_table<tm_memop_hasher>::iterator hi;
3580 struct tm_memop *mem = NULL;
3581
3582 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
3583 FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers, mem, tm_memop_t, hi)
3584 if (mem->value_id == i)
3585 break;
3586 gcc_assert (mem->value_id == i);
3587 fprintf (dump_file, "%s", comma);
3588 comma = ", ";
3589 print_generic_expr (dump_file, mem->addr);
3590 }
3591 fprintf (dump_file, "]\n");
3592 }
3593
3594 /* Prettily dump all of the memopt sets in BLOCKS. */
3595
3596 static void
3597 dump_tm_memopt_sets (vec<basic_block> blocks)
3598 {
3599 size_t i;
3600 basic_block bb;
3601
3602 for (i = 0; blocks.iterate (i, &bb); ++i)
3603 {
3604 fprintf (dump_file, "------------BB %d---------\n", bb->index);
3605 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
3606 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
3607 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
3608 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
3609 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
3610 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
3611 }
3612 }
3613
3614 /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3615
3616 static void
3617 tm_memopt_compute_avin (basic_block bb)
3618 {
3619 edge e;
3620 unsigned ix;
3621
3622 /* Seed with the AVOUT of any predecessor. */
3623 for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
3624 {
3625 e = EDGE_PRED (bb, ix);
3626 /* Make sure we have already visited this BB, and is thus
3627 initialized.
3628
3629 If e->src->aux is NULL, this predecessor is actually on an
3630 enclosing transaction. We only care about the current
3631 transaction, so ignore it. */
3632 if (e->src->aux && BB_VISITED_P (e->src))
3633 {
3634 bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3635 bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3636 break;
3637 }
3638 }
3639
3640 for (; ix < EDGE_COUNT (bb->preds); ix++)
3641 {
3642 e = EDGE_PRED (bb, ix);
3643 if (e->src->aux && BB_VISITED_P (e->src))
3644 {
3645 bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3646 bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3647 }
3648 }
3649
3650 BB_VISITED_P (bb) = true;
3651 }
3652
3653 /* Compute the STORE_ANTIC_IN for the basic block BB. */
3654
3655 static void
3656 tm_memopt_compute_antin (basic_block bb)
3657 {
3658 edge e;
3659 unsigned ix;
3660
3661 /* Seed with the ANTIC_OUT of any successor. */
3662 for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
3663 {
3664 e = EDGE_SUCC (bb, ix);
3665 /* Make sure we have already visited this BB, and is thus
3666 initialized. */
3667 if (BB_VISITED_P (e->dest))
3668 {
3669 bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3670 break;
3671 }
3672 }
3673
3674 for (; ix < EDGE_COUNT (bb->succs); ix++)
3675 {
3676 e = EDGE_SUCC (bb, ix);
3677 if (BB_VISITED_P (e->dest))
3678 bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3679 }
3680
3681 BB_VISITED_P (bb) = true;
3682 }
3683
3684 /* Compute the AVAIL sets for every basic block in BLOCKS.
3685
3686 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3687
3688 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3689 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3690
3691 This is basically what we do in lcm's compute_available(), but here
3692 we calculate two sets of sets (one for STOREs and one for READs),
3693 and we work on a region instead of the entire CFG.
3694
3695 REGION is the TM region.
3696 BLOCKS are the basic blocks in the region. */
3697
3698 static void
3699 tm_memopt_compute_available (struct tm_region *region,
3700 vec<basic_block> blocks)
3701 {
3702 edge e;
3703 basic_block *worklist, *qin, *qout, *qend, bb;
3704 unsigned int qlen, i;
3705 edge_iterator ei;
3706 bool changed;
3707
3708 /* Allocate a worklist array/queue. Entries are only added to the
3709 list if they were not already on the list. So the size is
3710 bounded by the number of basic blocks in the region. */
3711 gcc_assert (!blocks.is_empty ());
3712 qlen = blocks.length () - 1;
3713 qin = qout = worklist = XNEWVEC (basic_block, qlen);
3714
3715 /* Put every block in the region on the worklist. */
3716 for (i = 0; blocks.iterate (i, &bb); ++i)
3717 {
3718 /* Seed AVAIL_OUT with the LOCAL set. */
3719 bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
3720 bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
3721
3722 AVAIL_IN_WORKLIST_P (bb) = true;
3723 /* No need to insert the entry block, since it has an AVIN of
3724 null, and an AVOUT that has already been seeded in. */
3725 if (bb != region->entry_block)
3726 *qin++ = bb;
3727 }
3728
3729 /* The entry block has been initialized with the local sets. */
3730 BB_VISITED_P (region->entry_block) = true;
3731
3732 qin = worklist;
3733 qend = &worklist[qlen];
3734
3735 /* Iterate until the worklist is empty. */
3736 while (qlen)
3737 {
3738 /* Take the first entry off the worklist. */
3739 bb = *qout++;
3740 qlen--;
3741
3742 if (qout >= qend)
3743 qout = worklist;
3744
3745 /* This block can be added to the worklist again if necessary. */
3746 AVAIL_IN_WORKLIST_P (bb) = false;
3747 tm_memopt_compute_avin (bb);
3748
3749 /* Note: We do not add the LOCAL sets here because we already
3750 seeded the AVAIL_OUT sets with them. */
3751 changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
3752 changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
3753 if (changed
3754 && (region->exit_blocks == NULL
3755 || !bitmap_bit_p (region->exit_blocks, bb->index)))
3756 /* If the out state of this block changed, then we need to add
3757 its successors to the worklist if they are not already in. */
3758 FOR_EACH_EDGE (e, ei, bb->succs)
3759 if (!AVAIL_IN_WORKLIST_P (e->dest)
3760 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
3761 {
3762 *qin++ = e->dest;
3763 AVAIL_IN_WORKLIST_P (e->dest) = true;
3764 qlen++;
3765
3766 if (qin >= qend)
3767 qin = worklist;
3768 }
3769 }
3770
3771 free (worklist);
3772
3773 if (dump_file)
3774 dump_tm_memopt_sets (blocks);
3775 }
3776
3777 /* Compute ANTIC sets for every basic block in BLOCKS.
3778
3779 We compute STORE_ANTIC_OUT as follows:
3780
3781 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3782 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3783
3784 REGION is the TM region.
3785 BLOCKS are the basic blocks in the region. */
3786
3787 static void
3788 tm_memopt_compute_antic (struct tm_region *region,
3789 vec<basic_block> blocks)
3790 {
3791 edge e;
3792 basic_block *worklist, *qin, *qout, *qend, bb;
3793 unsigned int qlen;
3794 int i;
3795 edge_iterator ei;
3796
3797 /* Allocate a worklist array/queue. Entries are only added to the
3798 list if they were not already on the list. So the size is
3799 bounded by the number of basic blocks in the region. */
3800 qin = qout = worklist = XNEWVEC (basic_block, blocks.length ());
3801
3802 for (qlen = 0, i = blocks.length () - 1; i >= 0; --i)
3803 {
3804 bb = blocks[i];
3805
3806 /* Seed ANTIC_OUT with the LOCAL set. */
3807 bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
3808
3809 /* Put every block in the region on the worklist. */
3810 AVAIL_IN_WORKLIST_P (bb) = true;
3811 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3812 and their ANTIC_OUT has already been seeded in. */
3813 if (region->exit_blocks
3814 && !bitmap_bit_p (region->exit_blocks, bb->index))
3815 {
3816 qlen++;
3817 *qin++ = bb;
3818 }
3819 }
3820
3821 /* The exit blocks have been initialized with the local sets. */
3822 if (region->exit_blocks)
3823 {
3824 unsigned int i;
3825 bitmap_iterator bi;
3826 EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
3827 BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun, i)) = true;
3828 }
3829
3830 qin = worklist;
3831 qend = &worklist[qlen];
3832
3833 /* Iterate until the worklist is empty. */
3834 while (qlen)
3835 {
3836 /* Take the first entry off the worklist. */
3837 bb = *qout++;
3838 qlen--;
3839
3840 if (qout >= qend)
3841 qout = worklist;
3842
3843 /* This block can be added to the worklist again if necessary. */
3844 AVAIL_IN_WORKLIST_P (bb) = false;
3845 tm_memopt_compute_antin (bb);
3846
3847 /* Note: We do not add the LOCAL sets here because we already
3848 seeded the ANTIC_OUT sets with them. */
3849 if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
3850 && bb != region->entry_block)
3851 /* If the out state of this block changed, then we need to add
3852 its predecessors to the worklist if they are not already in. */
3853 FOR_EACH_EDGE (e, ei, bb->preds)
3854 if (!AVAIL_IN_WORKLIST_P (e->src))
3855 {
3856 *qin++ = e->src;
3857 AVAIL_IN_WORKLIST_P (e->src) = true;
3858 qlen++;
3859
3860 if (qin >= qend)
3861 qin = worklist;
3862 }
3863 }
3864
3865 free (worklist);
3866
3867 if (dump_file)
3868 dump_tm_memopt_sets (blocks);
3869 }
3870
3871 /* Offsets of load variants from TM_LOAD. For example,
3872 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3873 See gtm-builtins.def. */
3874 #define TRANSFORM_RAR 1
3875 #define TRANSFORM_RAW 2
3876 #define TRANSFORM_RFW 3
3877 /* Offsets of store variants from TM_STORE. */
3878 #define TRANSFORM_WAR 1
3879 #define TRANSFORM_WAW 2
3880
3881 /* Inform about a load/store optimization. */
3882
3883 static void
3884 dump_tm_memopt_transform (gimple *stmt)
3885 {
3886 if (dump_file)
3887 {
3888 fprintf (dump_file, "TM memopt: transforming: ");
3889 print_gimple_stmt (dump_file, stmt, 0);
3890 fprintf (dump_file, "\n");
3891 }
3892 }
3893
3894 /* Perform a read/write optimization. Replaces the TM builtin in STMT
3895 by a builtin that is OFFSET entries down in the builtins table in
3896 gtm-builtins.def. */
3897
3898 static void
3899 tm_memopt_transform_stmt (unsigned int offset,
3900 gcall *stmt,
3901 gimple_stmt_iterator *gsi)
3902 {
3903 tree fn = gimple_call_fn (stmt);
3904 gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
3905 TREE_OPERAND (fn, 0)
3906 = builtin_decl_explicit ((enum built_in_function)
3907 (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
3908 + offset));
3909 gimple_call_set_fn (stmt, fn);
3910 gsi_replace (gsi, stmt, true);
3911 dump_tm_memopt_transform (stmt);
3912 }
3913
3914 /* Perform the actual TM memory optimization transformations in the
3915 basic blocks in BLOCKS. */
3916
3917 static void
3918 tm_memopt_transform_blocks (vec<basic_block> blocks)
3919 {
3920 size_t i;
3921 basic_block bb;
3922 gimple_stmt_iterator gsi;
3923
3924 for (i = 0; blocks.iterate (i, &bb); ++i)
3925 {
3926 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3927 {
3928 gimple *stmt = gsi_stmt (gsi);
3929 bitmap read_avail = READ_AVAIL_IN (bb);
3930 bitmap store_avail = STORE_AVAIL_IN (bb);
3931 bitmap store_antic = STORE_ANTIC_OUT (bb);
3932 unsigned int loc;
3933
3934 if (is_tm_simple_load (stmt))
3935 {
3936 gcall *call_stmt = as_a <gcall *> (stmt);
3937 loc = tm_memopt_value_number (stmt, NO_INSERT);
3938 if (store_avail && bitmap_bit_p (store_avail, loc))
3939 tm_memopt_transform_stmt (TRANSFORM_RAW, call_stmt, &gsi);
3940 else if (store_antic && bitmap_bit_p (store_antic, loc))
3941 {
3942 tm_memopt_transform_stmt (TRANSFORM_RFW, call_stmt, &gsi);
3943 bitmap_set_bit (store_avail, loc);
3944 }
3945 else if (read_avail && bitmap_bit_p (read_avail, loc))
3946 tm_memopt_transform_stmt (TRANSFORM_RAR, call_stmt, &gsi);
3947 else
3948 bitmap_set_bit (read_avail, loc);
3949 }
3950 else if (is_tm_simple_store (stmt))
3951 {
3952 gcall *call_stmt = as_a <gcall *> (stmt);
3953 loc = tm_memopt_value_number (stmt, NO_INSERT);
3954 if (store_avail && bitmap_bit_p (store_avail, loc))
3955 tm_memopt_transform_stmt (TRANSFORM_WAW, call_stmt, &gsi);
3956 else
3957 {
3958 if (read_avail && bitmap_bit_p (read_avail, loc))
3959 tm_memopt_transform_stmt (TRANSFORM_WAR, call_stmt, &gsi);
3960 bitmap_set_bit (store_avail, loc);
3961 }
3962 }
3963 }
3964 }
3965 }
3966
3967 /* Return a new set of bitmaps for a BB. */
3968
3969 static struct tm_memopt_bitmaps *
3970 tm_memopt_init_sets (void)
3971 {
3972 struct tm_memopt_bitmaps *b
3973 = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
3974 b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3975 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3976 b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
3977 b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
3978 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3979 b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3980 b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3981 b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
3982 b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
3983 return b;
3984 }
3985
3986 /* Free sets computed for each BB. */
3987
3988 static void
3989 tm_memopt_free_sets (vec<basic_block> blocks)
3990 {
3991 size_t i;
3992 basic_block bb;
3993
3994 for (i = 0; blocks.iterate (i, &bb); ++i)
3995 bb->aux = NULL;
3996 }
3997
3998 /* Clear the visited bit for every basic block in BLOCKS. */
3999
4000 static void
4001 tm_memopt_clear_visited (vec<basic_block> blocks)
4002 {
4003 size_t i;
4004 basic_block bb;
4005
4006 for (i = 0; blocks.iterate (i, &bb); ++i)
4007 BB_VISITED_P (bb) = false;
4008 }
4009
4010 /* Replace TM load/stores with hints for the runtime. We handle
4011 things like read-after-write, write-after-read, read-after-read,
4012 read-for-write, etc. */
4013
4014 static unsigned int
4015 execute_tm_memopt (void)
4016 {
4017 struct tm_region *region;
4018 vec<basic_block> bbs;
4019
4020 tm_memopt_value_id = 0;
4021 tm_memopt_value_numbers = new hash_table<tm_memop_hasher> (10);
4022
4023 for (region = all_tm_regions; region; region = region->next)
4024 {
4025 /* All the TM stores/loads in the current region. */
4026 size_t i;
4027 basic_block bb;
4028
4029 bitmap_obstack_initialize (&tm_memopt_obstack);
4030
4031 /* Save all BBs for the current region. */
4032 bbs = get_tm_region_blocks (region->entry_block,
4033 region->exit_blocks,
4034 region->irr_blocks,
4035 NULL,
4036 false);
4037
4038 /* Collect all the memory operations. */
4039 for (i = 0; bbs.iterate (i, &bb); ++i)
4040 {
4041 bb->aux = tm_memopt_init_sets ();
4042 tm_memopt_accumulate_memops (bb);
4043 }
4044
4045 /* Solve data flow equations and transform each block accordingly. */
4046 tm_memopt_clear_visited (bbs);
4047 tm_memopt_compute_available (region, bbs);
4048 tm_memopt_clear_visited (bbs);
4049 tm_memopt_compute_antic (region, bbs);
4050 tm_memopt_transform_blocks (bbs);
4051
4052 tm_memopt_free_sets (bbs);
4053 bbs.release ();
4054 bitmap_obstack_release (&tm_memopt_obstack);
4055 tm_memopt_value_numbers->empty ();
4056 }
4057
4058 delete tm_memopt_value_numbers;
4059 tm_memopt_value_numbers = NULL;
4060 return 0;
4061 }
4062
4063 namespace {
4064
4065 const pass_data pass_data_tm_memopt =
4066 {
4067 GIMPLE_PASS, /* type */
4068 "tmmemopt", /* name */
4069 OPTGROUP_NONE, /* optinfo_flags */
4070 TV_TRANS_MEM, /* tv_id */
4071 ( PROP_ssa | PROP_cfg ), /* properties_required */
4072 0, /* properties_provided */
4073 0, /* properties_destroyed */
4074 0, /* todo_flags_start */
4075 0, /* todo_flags_finish */
4076 };
4077
4078 class pass_tm_memopt : public gimple_opt_pass
4079 {
4080 public:
4081 pass_tm_memopt (gcc::context *ctxt)
4082 : gimple_opt_pass (pass_data_tm_memopt, ctxt)
4083 {}
4084
4085 /* opt_pass methods: */
4086 virtual bool gate (function *) { return flag_tm && optimize > 0; }
4087 virtual unsigned int execute (function *) { return execute_tm_memopt (); }
4088
4089 }; // class pass_tm_memopt
4090
4091 } // anon namespace
4092
4093 gimple_opt_pass *
4094 make_pass_tm_memopt (gcc::context *ctxt)
4095 {
4096 return new pass_tm_memopt (ctxt);
4097 }
4098
4099 \f
4100 /* Interprocedual analysis for the creation of transactional clones.
4101 The aim of this pass is to find which functions are referenced in
4102 a non-irrevocable transaction context, and for those over which
4103 we have control (or user directive), create a version of the
4104 function which uses only the transactional interface to reference
4105 protected memories. This analysis proceeds in several steps:
4106
4107 (1) Collect the set of all possible transactional clones:
4108
4109 (a) For all local public functions marked tm_callable, push
4110 it onto the tm_callee queue.
4111
4112 (b) For all local functions, scan for calls in transaction blocks.
4113 Push the caller and callee onto the tm_caller and tm_callee
4114 queues. Count the number of callers for each callee.
4115
4116 (c) For each local function on the callee list, assume we will
4117 create a transactional clone. Push *all* calls onto the
4118 callee queues; count the number of clone callers separately
4119 to the number of original callers.
4120
4121 (2) Propagate irrevocable status up the dominator tree:
4122
4123 (a) Any external function on the callee list that is not marked
4124 tm_callable is irrevocable. Push all callers of such onto
4125 a worklist.
4126
4127 (b) For each function on the worklist, mark each block that
4128 contains an irrevocable call. Use the AND operator to
4129 propagate that mark up the dominator tree.
4130
4131 (c) If we reach the entry block for a possible transactional
4132 clone, then the transactional clone is irrevocable, and
4133 we should not create the clone after all. Push all
4134 callers onto the worklist.
4135
4136 (d) Place tm_irrevocable calls at the beginning of the relevant
4137 blocks. Special case here is the entry block for the entire
4138 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
4139 the library to begin the region in serial mode. Decrement
4140 the call count for all callees in the irrevocable region.
4141
4142 (3) Create the transactional clones:
4143
4144 Any tm_callee that still has a non-zero call count is cloned.
4145 */
4146
4147 /* This structure is stored in the AUX field of each cgraph_node. */
4148 struct tm_ipa_cg_data
4149 {
4150 /* The clone of the function that got created. */
4151 struct cgraph_node *clone;
4152
4153 /* The tm regions in the normal function. */
4154 struct tm_region *all_tm_regions;
4155
4156 /* The blocks of the normal/clone functions that contain irrevocable
4157 calls, or blocks that are post-dominated by irrevocable calls. */
4158 bitmap irrevocable_blocks_normal;
4159 bitmap irrevocable_blocks_clone;
4160
4161 /* The blocks of the normal function that are involved in transactions. */
4162 bitmap transaction_blocks_normal;
4163
4164 /* The number of callers to the transactional clone of this function
4165 from normal and transactional clones respectively. */
4166 unsigned tm_callers_normal;
4167 unsigned tm_callers_clone;
4168
4169 /* True if all calls to this function's transactional clone
4170 are irrevocable. Also automatically true if the function
4171 has no transactional clone. */
4172 bool is_irrevocable;
4173
4174 /* Flags indicating the presence of this function in various queues. */
4175 bool in_callee_queue;
4176 bool in_worklist;
4177
4178 /* Flags indicating the kind of scan desired while in the worklist. */
4179 bool want_irr_scan_normal;
4180 };
4181
4182 typedef vec<cgraph_node *> cgraph_node_queue;
4183
4184 /* Return the ipa data associated with NODE, allocating zeroed memory
4185 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
4186 and set *NODE accordingly. */
4187
4188 static struct tm_ipa_cg_data *
4189 get_cg_data (struct cgraph_node **node, bool traverse_aliases)
4190 {
4191 struct tm_ipa_cg_data *d;
4192
4193 if (traverse_aliases && (*node)->alias)
4194 *node = (*node)->get_alias_target ();
4195
4196 d = (struct tm_ipa_cg_data *) (*node)->aux;
4197
4198 if (d == NULL)
4199 {
4200 d = (struct tm_ipa_cg_data *)
4201 obstack_alloc (&tm_obstack.obstack, sizeof (*d));
4202 (*node)->aux = (void *) d;
4203 memset (d, 0, sizeof (*d));
4204 }
4205
4206 return d;
4207 }
4208
4209 /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
4210 it is already present. */
4211
4212 static void
4213 maybe_push_queue (struct cgraph_node *node,
4214 cgraph_node_queue *queue_p, bool *in_queue_p)
4215 {
4216 if (!*in_queue_p)
4217 {
4218 *in_queue_p = true;
4219 queue_p->safe_push (node);
4220 }
4221 }
4222
4223 /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
4224 Queue all callees within block BB. */
4225
4226 static void
4227 ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
4228 basic_block bb, bool for_clone)
4229 {
4230 gimple_stmt_iterator gsi;
4231
4232 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4233 {
4234 gimple *stmt = gsi_stmt (gsi);
4235 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4236 {
4237 tree fndecl = gimple_call_fndecl (stmt);
4238 if (fndecl)
4239 {
4240 struct tm_ipa_cg_data *d;
4241 unsigned *pcallers;
4242 struct cgraph_node *node;
4243
4244 if (is_tm_ending_fndecl (fndecl))
4245 continue;
4246 if (find_tm_replacement_function (fndecl))
4247 continue;
4248
4249 node = cgraph_node::get (fndecl);
4250 gcc_assert (node != NULL);
4251 d = get_cg_data (&node, true);
4252
4253 pcallers = (for_clone ? &d->tm_callers_clone
4254 : &d->tm_callers_normal);
4255 *pcallers += 1;
4256
4257 maybe_push_queue (node, callees_p, &d->in_callee_queue);
4258 }
4259 }
4260 }
4261 }
4262
4263 /* Scan all calls in NODE that are within a transaction region,
4264 and push the resulting nodes into the callee queue. */
4265
4266 static void
4267 ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
4268 cgraph_node_queue *callees_p)
4269 {
4270 d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
4271 d->all_tm_regions = all_tm_regions;
4272
4273 for (tm_region *r = all_tm_regions; r; r = r->next)
4274 {
4275 vec<basic_block> bbs;
4276 basic_block bb;
4277 unsigned i;
4278
4279 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
4280 d->transaction_blocks_normal, false, false);
4281
4282 FOR_EACH_VEC_ELT (bbs, i, bb)
4283 ipa_tm_scan_calls_block (callees_p, bb, false);
4284
4285 bbs.release ();
4286 }
4287 }
4288
4289 /* Scan all calls in NODE as if this is the transactional clone,
4290 and push the destinations into the callee queue. */
4291
4292 static void
4293 ipa_tm_scan_calls_clone (struct cgraph_node *node,
4294 cgraph_node_queue *callees_p)
4295 {
4296 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
4297 basic_block bb;
4298
4299 FOR_EACH_BB_FN (bb, fn)
4300 ipa_tm_scan_calls_block (callees_p, bb, true);
4301 }
4302
4303 /* The function NODE has been detected to be irrevocable. Push all
4304 of its callers onto WORKLIST for the purpose of re-scanning them. */
4305
4306 static void
4307 ipa_tm_note_irrevocable (struct cgraph_node *node,
4308 cgraph_node_queue *worklist_p)
4309 {
4310 struct tm_ipa_cg_data *d = get_cg_data (&node, true);
4311 struct cgraph_edge *e;
4312
4313 d->is_irrevocable = true;
4314
4315 for (e = node->callers; e ; e = e->next_caller)
4316 {
4317 basic_block bb;
4318 struct cgraph_node *caller;
4319
4320 /* Don't examine recursive calls. */
4321 if (e->caller == node)
4322 continue;
4323 /* Even if we think we can go irrevocable, believe the user
4324 above all. */
4325 if (is_tm_safe_or_pure (e->caller->decl))
4326 continue;
4327
4328 caller = e->caller;
4329 d = get_cg_data (&caller, true);
4330
4331 /* Check if the callee is in a transactional region. If so,
4332 schedule the function for normal re-scan as well. */
4333 bb = gimple_bb (e->call_stmt);
4334 gcc_assert (bb != NULL);
4335 if (d->transaction_blocks_normal
4336 && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
4337 d->want_irr_scan_normal = true;
4338
4339 maybe_push_queue (caller, worklist_p, &d->in_worklist);
4340 }
4341 }
4342
4343 /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
4344 within the block is irrevocable. */
4345
4346 static bool
4347 ipa_tm_scan_irr_block (basic_block bb)
4348 {
4349 gimple_stmt_iterator gsi;
4350 tree fn;
4351
4352 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4353 {
4354 gimple *stmt = gsi_stmt (gsi);
4355 switch (gimple_code (stmt))
4356 {
4357 case GIMPLE_ASSIGN:
4358 if (gimple_assign_single_p (stmt))
4359 {
4360 tree lhs = gimple_assign_lhs (stmt);
4361 tree rhs = gimple_assign_rhs1 (stmt);
4362 if (volatile_lvalue_p (lhs) || volatile_lvalue_p (rhs))
4363 return true;
4364 }
4365 break;
4366
4367 case GIMPLE_CALL:
4368 {
4369 tree lhs = gimple_call_lhs (stmt);
4370 if (lhs && volatile_lvalue_p (lhs))
4371 return true;
4372
4373 if (is_tm_pure_call (stmt))
4374 break;
4375
4376 fn = gimple_call_fn (stmt);
4377
4378 /* Functions with the attribute are by definition irrevocable. */
4379 if (is_tm_irrevocable (fn))
4380 return true;
4381
4382 /* For direct function calls, go ahead and check for replacement
4383 functions, or transitive irrevocable functions. For indirect
4384 functions, we'll ask the runtime. */
4385 if (TREE_CODE (fn) == ADDR_EXPR)
4386 {
4387 struct tm_ipa_cg_data *d;
4388 struct cgraph_node *node;
4389
4390 fn = TREE_OPERAND (fn, 0);
4391 if (is_tm_ending_fndecl (fn))
4392 break;
4393 if (find_tm_replacement_function (fn))
4394 break;
4395
4396 node = cgraph_node::get (fn);
4397 d = get_cg_data (&node, true);
4398
4399 /* Return true if irrevocable, but above all, believe
4400 the user. */
4401 if (d->is_irrevocable
4402 && !is_tm_safe_or_pure (fn))
4403 return true;
4404 }
4405 break;
4406 }
4407
4408 case GIMPLE_ASM:
4409 /* ??? The Approved Method of indicating that an inline
4410 assembly statement is not relevant to the transaction
4411 is to wrap it in a __tm_waiver block. This is not
4412 yet implemented, so we can't check for it. */
4413 if (is_tm_safe (current_function_decl))
4414 {
4415 tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
4416 SET_EXPR_LOCATION (t, gimple_location (stmt));
4417 error ("%K%<asm%> not allowed in %<transaction_safe%> function",
4418 t);
4419 }
4420 return true;
4421
4422 default:
4423 break;
4424 }
4425 }
4426
4427 return false;
4428 }
4429
4430 /* For each of the blocks seeded witin PQUEUE, walk the CFG looking
4431 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
4432 scanning past OLD_IRR or EXIT_BLOCKS. */
4433
4434 static bool
4435 ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr,
4436 bitmap old_irr, bitmap exit_blocks)
4437 {
4438 bool any_new_irr = false;
4439 edge e;
4440 edge_iterator ei;
4441 bitmap visited_blocks = BITMAP_ALLOC (NULL);
4442
4443 do
4444 {
4445 basic_block bb = pqueue->pop ();
4446
4447 /* Don't re-scan blocks we know already are irrevocable. */
4448 if (old_irr && bitmap_bit_p (old_irr, bb->index))
4449 continue;
4450
4451 if (ipa_tm_scan_irr_block (bb))
4452 {
4453 bitmap_set_bit (new_irr, bb->index);
4454 any_new_irr = true;
4455 }
4456 else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
4457 {
4458 FOR_EACH_EDGE (e, ei, bb->succs)
4459 if (!bitmap_bit_p (visited_blocks, e->dest->index))
4460 {
4461 bitmap_set_bit (visited_blocks, e->dest->index);
4462 pqueue->safe_push (e->dest);
4463 }
4464 }
4465 }
4466 while (!pqueue->is_empty ());
4467
4468 BITMAP_FREE (visited_blocks);
4469
4470 return any_new_irr;
4471 }
4472
4473 /* Propagate the irrevocable property both up and down the dominator tree.
4474 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
4475 TM regions; OLD_IRR are the results of a previous scan of the dominator
4476 tree which has been fully propagated; NEW_IRR is the set of new blocks
4477 which are gaining the irrevocable property during the current scan. */
4478
4479 static void
4480 ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
4481 bitmap old_irr, bitmap exit_blocks)
4482 {
4483 vec<basic_block> bbs;
4484 bitmap all_region_blocks;
4485
4486 /* If this block is in the old set, no need to rescan. */
4487 if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
4488 return;
4489
4490 all_region_blocks = BITMAP_ALLOC (&tm_obstack);
4491 bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
4492 all_region_blocks, false);
4493 do
4494 {
4495 basic_block bb = bbs.pop ();
4496 bool this_irr = bitmap_bit_p (new_irr, bb->index);
4497 bool all_son_irr = false;
4498 edge_iterator ei;
4499 edge e;
4500
4501 /* Propagate up. If my children are, I am too, but we must have
4502 at least one child that is. */
4503 if (!this_irr)
4504 {
4505 FOR_EACH_EDGE (e, ei, bb->succs)
4506 {
4507 if (!bitmap_bit_p (new_irr, e->dest->index))
4508 {
4509 all_son_irr = false;
4510 break;
4511 }
4512 else
4513 all_son_irr = true;
4514 }
4515 if (all_son_irr)
4516 {
4517 /* Add block to new_irr if it hasn't already been processed. */
4518 if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
4519 {
4520 bitmap_set_bit (new_irr, bb->index);
4521 this_irr = true;
4522 }
4523 }
4524 }
4525
4526 /* Propagate down to everyone we immediately dominate. */
4527 if (this_irr)
4528 {
4529 basic_block son;
4530 for (son = first_dom_son (CDI_DOMINATORS, bb);
4531 son;
4532 son = next_dom_son (CDI_DOMINATORS, son))
4533 {
4534 /* Make sure block is actually in a TM region, and it
4535 isn't already in old_irr. */
4536 if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
4537 && bitmap_bit_p (all_region_blocks, son->index))
4538 bitmap_set_bit (new_irr, son->index);
4539 }
4540 }
4541 }
4542 while (!bbs.is_empty ());
4543
4544 BITMAP_FREE (all_region_blocks);
4545 bbs.release ();
4546 }
4547
4548 static void
4549 ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
4550 {
4551 gimple_stmt_iterator gsi;
4552
4553 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4554 {
4555 gimple *stmt = gsi_stmt (gsi);
4556 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4557 {
4558 tree fndecl = gimple_call_fndecl (stmt);
4559 if (fndecl)
4560 {
4561 struct tm_ipa_cg_data *d;
4562 unsigned *pcallers;
4563 struct cgraph_node *tnode;
4564
4565 if (is_tm_ending_fndecl (fndecl))
4566 continue;
4567 if (find_tm_replacement_function (fndecl))
4568 continue;
4569
4570 tnode = cgraph_node::get (fndecl);
4571 d = get_cg_data (&tnode, true);
4572
4573 pcallers = (for_clone ? &d->tm_callers_clone
4574 : &d->tm_callers_normal);
4575
4576 gcc_assert (*pcallers > 0);
4577 *pcallers -= 1;
4578 }
4579 }
4580 }
4581 }
4582
4583 /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
4584 as well as other irrevocable actions such as inline assembly. Mark all
4585 such blocks as irrevocable and decrement the number of calls to
4586 transactional clones. Return true if, for the transactional clone, the
4587 entire function is irrevocable. */
4588
4589 static bool
4590 ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
4591 {
4592 struct tm_ipa_cg_data *d;
4593 bitmap new_irr, old_irr;
4594 bool ret = false;
4595
4596 /* Builtin operators (operator new, and such). */
4597 if (DECL_STRUCT_FUNCTION (node->decl) == NULL
4598 || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL)
4599 return false;
4600
4601 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4602 calculate_dominance_info (CDI_DOMINATORS);
4603
4604 d = get_cg_data (&node, true);
4605 auto_vec<basic_block, 10> queue;
4606 new_irr = BITMAP_ALLOC (&tm_obstack);
4607
4608 /* Scan each tm region, propagating irrevocable status through the tree. */
4609 if (for_clone)
4610 {
4611 old_irr = d->irrevocable_blocks_clone;
4612 queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
4613 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
4614 {
4615 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
4616 new_irr,
4617 old_irr, NULL);
4618 ret = bitmap_bit_p (new_irr,
4619 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index);
4620 }
4621 }
4622 else
4623 {
4624 struct tm_region *region;
4625
4626 old_irr = d->irrevocable_blocks_normal;
4627 for (region = d->all_tm_regions; region; region = region->next)
4628 {
4629 queue.quick_push (region->entry_block);
4630 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
4631 region->exit_blocks))
4632 ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
4633 region->exit_blocks);
4634 }
4635 }
4636
4637 /* If we found any new irrevocable blocks, reduce the call count for
4638 transactional clones within the irrevocable blocks. Save the new
4639 set of irrevocable blocks for next time. */
4640 if (!bitmap_empty_p (new_irr))
4641 {
4642 bitmap_iterator bmi;
4643 unsigned i;
4644
4645 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4646 ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun, i),
4647 for_clone);
4648
4649 if (old_irr)
4650 {
4651 bitmap_ior_into (old_irr, new_irr);
4652 BITMAP_FREE (new_irr);
4653 }
4654 else if (for_clone)
4655 d->irrevocable_blocks_clone = new_irr;
4656 else
4657 d->irrevocable_blocks_normal = new_irr;
4658
4659 if (dump_file && new_irr)
4660 {
4661 const char *dname;
4662 bitmap_iterator bmi;
4663 unsigned i;
4664
4665 dname = lang_hooks.decl_printable_name (current_function_decl, 2);
4666 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4667 fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
4668 }
4669 }
4670 else
4671 BITMAP_FREE (new_irr);
4672
4673 pop_cfun ();
4674
4675 return ret;
4676 }
4677
4678 /* Return true if, for the transactional clone of NODE, any call
4679 may enter irrevocable mode. */
4680
4681 static bool
4682 ipa_tm_mayenterirr_function (struct cgraph_node *node)
4683 {
4684 struct tm_ipa_cg_data *d;
4685 tree decl;
4686 unsigned flags;
4687
4688 d = get_cg_data (&node, true);
4689 decl = node->decl;
4690 flags = flags_from_decl_or_type (decl);
4691
4692 /* Handle some TM builtins. Ordinarily these aren't actually generated
4693 at this point, but handling these functions when written in by the
4694 user makes it easier to build unit tests. */
4695 if (flags & ECF_TM_BUILTIN)
4696 return false;
4697
4698 /* Filter out all functions that are marked. */
4699 if (flags & ECF_TM_PURE)
4700 return false;
4701 if (is_tm_safe (decl))
4702 return false;
4703 if (is_tm_irrevocable (decl))
4704 return true;
4705 if (is_tm_callable (decl))
4706 return true;
4707 if (find_tm_replacement_function (decl))
4708 return true;
4709
4710 /* If we aren't seeing the final version of the function we don't
4711 know what it will contain at runtime. */
4712 if (node->get_availability () < AVAIL_AVAILABLE)
4713 return true;
4714
4715 /* If the function must go irrevocable, then of course true. */
4716 if (d->is_irrevocable)
4717 return true;
4718
4719 /* If there are any blocks marked irrevocable, then the function
4720 as a whole may enter irrevocable. */
4721 if (d->irrevocable_blocks_clone)
4722 return true;
4723
4724 /* We may have previously marked this function as tm_may_enter_irr;
4725 see pass_diagnose_tm_blocks. */
4726 if (node->local.tm_may_enter_irr)
4727 return true;
4728
4729 /* Recurse on the main body for aliases. In general, this will
4730 result in one of the bits above being set so that we will not
4731 have to recurse next time. */
4732 if (node->alias)
4733 return ipa_tm_mayenterirr_function (cgraph_node::get (node->thunk.alias));
4734
4735 /* What remains is unmarked local functions without items that force
4736 the function to go irrevocable. */
4737 return false;
4738 }
4739
4740 /* Diagnose calls from transaction_safe functions to unmarked
4741 functions that are determined to not be safe. */
4742
4743 static void
4744 ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
4745 {
4746 struct cgraph_edge *e;
4747
4748 for (e = node->callees; e ; e = e->next_callee)
4749 if (!is_tm_callable (e->callee->decl)
4750 && e->callee->local.tm_may_enter_irr)
4751 error_at (gimple_location (e->call_stmt),
4752 "unsafe function call %qD within "
4753 "%<transaction_safe%> function", e->callee->decl);
4754 }
4755
4756 /* Diagnose call from atomic transactions to unmarked functions
4757 that are determined to not be safe. */
4758
4759 static void
4760 ipa_tm_diagnose_transaction (struct cgraph_node *node,
4761 struct tm_region *all_tm_regions)
4762 {
4763 struct tm_region *r;
4764
4765 for (r = all_tm_regions; r ; r = r->next)
4766 if (gimple_transaction_subcode (r->get_transaction_stmt ())
4767 & GTMA_IS_RELAXED)
4768 {
4769 /* Atomic transactions can be nested inside relaxed. */
4770 if (r->inner)
4771 ipa_tm_diagnose_transaction (node, r->inner);
4772 }
4773 else
4774 {
4775 vec<basic_block> bbs;
4776 gimple_stmt_iterator gsi;
4777 basic_block bb;
4778 size_t i;
4779
4780 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
4781 r->irr_blocks, NULL, false);
4782
4783 for (i = 0; bbs.iterate (i, &bb); ++i)
4784 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4785 {
4786 gimple *stmt = gsi_stmt (gsi);
4787 tree fndecl;
4788
4789 if (gimple_code (stmt) == GIMPLE_ASM)
4790 {
4791 error_at (gimple_location (stmt),
4792 "%<asm%> not allowed in atomic transaction");
4793 continue;
4794 }
4795
4796 if (!is_gimple_call (stmt))
4797 continue;
4798 fndecl = gimple_call_fndecl (stmt);
4799
4800 /* Indirect function calls have been diagnosed already. */
4801 if (!fndecl)
4802 continue;
4803
4804 /* Stop at the end of the transaction. */
4805 if (is_tm_ending_fndecl (fndecl))
4806 {
4807 if (bitmap_bit_p (r->exit_blocks, bb->index))
4808 break;
4809 continue;
4810 }
4811
4812 /* Marked functions have been diagnosed already. */
4813 if (is_tm_pure_call (stmt))
4814 continue;
4815 if (is_tm_callable (fndecl))
4816 continue;
4817
4818 if (cgraph_node::local_info (fndecl)->tm_may_enter_irr)
4819 error_at (gimple_location (stmt),
4820 "unsafe function call %qD within "
4821 "atomic transaction", fndecl);
4822 }
4823
4824 bbs.release ();
4825 }
4826 }
4827
4828 /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4829 OLD_DECL. The returned value is a freshly malloced pointer that
4830 should be freed by the caller. */
4831
4832 static tree
4833 tm_mangle (tree old_asm_id)
4834 {
4835 const char *old_asm_name;
4836 char *tm_name;
4837 void *alloc = NULL;
4838 struct demangle_component *dc;
4839 tree new_asm_id;
4840
4841 /* Determine if the symbol is already a valid C++ mangled name. Do this
4842 even for C, which might be interfacing with C++ code via appropriately
4843 ugly identifiers. */
4844 /* ??? We could probably do just as well checking for "_Z" and be done. */
4845 old_asm_name = IDENTIFIER_POINTER (old_asm_id);
4846 dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
4847
4848 if (dc == NULL)
4849 {
4850 char length[12];
4851
4852 do_unencoded:
4853 sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
4854 tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
4855 }
4856 else
4857 {
4858 old_asm_name += 2; /* Skip _Z */
4859
4860 switch (dc->type)
4861 {
4862 case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
4863 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
4864 /* Don't play silly games, you! */
4865 goto do_unencoded;
4866
4867 case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
4868 /* I'd really like to know if we can ever be passed one of
4869 these from the C++ front end. The Logical Thing would
4870 seem that hidden-alias should be outer-most, so that we
4871 get hidden-alias of a transaction-clone and not vice-versa. */
4872 old_asm_name += 2;
4873 break;
4874
4875 default:
4876 break;
4877 }
4878
4879 tm_name = concat ("_ZGTt", old_asm_name, NULL);
4880 }
4881 free (alloc);
4882
4883 new_asm_id = get_identifier (tm_name);
4884 free (tm_name);
4885
4886 return new_asm_id;
4887 }
4888
4889 static inline void
4890 ipa_tm_mark_force_output_node (struct cgraph_node *node)
4891 {
4892 node->mark_force_output ();
4893 node->analyzed = true;
4894 }
4895
4896 static inline void
4897 ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node)
4898 {
4899 node->forced_by_abi = true;
4900 node->analyzed = true;
4901 }
4902
4903 /* Callback data for ipa_tm_create_version_alias. */
4904 struct create_version_alias_info
4905 {
4906 struct cgraph_node *old_node;
4907 tree new_decl;
4908 };
4909
4910 /* A subroutine of ipa_tm_create_version, called via
4911 cgraph_for_node_and_aliases. Create new tm clones for each of
4912 the existing aliases. */
4913 static bool
4914 ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
4915 {
4916 struct create_version_alias_info *info
4917 = (struct create_version_alias_info *)data;
4918 tree old_decl, new_decl, tm_name;
4919 struct cgraph_node *new_node;
4920
4921 if (!node->cpp_implicit_alias)
4922 return false;
4923
4924 old_decl = node->decl;
4925 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4926 new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
4927 TREE_CODE (old_decl), tm_name,
4928 TREE_TYPE (old_decl));
4929
4930 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4931 SET_DECL_RTL (new_decl, NULL);
4932
4933 /* Based loosely on C++'s make_alias_for(). */
4934 TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
4935 DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
4936 DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
4937 TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
4938 DECL_EXTERNAL (new_decl) = 0;
4939 DECL_ARTIFICIAL (new_decl) = 1;
4940 TREE_ADDRESSABLE (new_decl) = 1;
4941 TREE_USED (new_decl) = 1;
4942 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4943
4944 /* Perform the same remapping to the comdat group. */
4945 if (DECL_ONE_ONLY (new_decl))
4946 varpool_node::get (new_decl)->set_comdat_group
4947 (tm_mangle (decl_comdat_group_id (old_decl)));
4948
4949 new_node = cgraph_node::create_same_body_alias (new_decl, info->new_decl);
4950 new_node->tm_clone = true;
4951 new_node->externally_visible = info->old_node->externally_visible;
4952 new_node->no_reorder = info->old_node->no_reorder;
4953 /* ?? Do not traverse aliases here. */
4954 get_cg_data (&node, false)->clone = new_node;
4955
4956 record_tm_clone_pair (old_decl, new_decl);
4957
4958 if (info->old_node->force_output
4959 || info->old_node->ref_list.first_referring ())
4960 ipa_tm_mark_force_output_node (new_node);
4961 if (info->old_node->forced_by_abi)
4962 ipa_tm_mark_forced_by_abi_node (new_node);
4963 return false;
4964 }
4965
4966 /* Create a copy of the function (possibly declaration only) of OLD_NODE,
4967 appropriate for the transactional clone. */
4968
4969 static void
4970 ipa_tm_create_version (struct cgraph_node *old_node)
4971 {
4972 tree new_decl, old_decl, tm_name;
4973 struct cgraph_node *new_node;
4974
4975 old_decl = old_node->decl;
4976 new_decl = copy_node (old_decl);
4977
4978 /* DECL_ASSEMBLER_NAME needs to be set before we call
4979 cgraph_copy_node_for_versioning below, because cgraph_node will
4980 fill the assembler_name_hash. */
4981 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4982 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4983 SET_DECL_RTL (new_decl, NULL);
4984 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4985
4986 /* Perform the same remapping to the comdat group. */
4987 if (DECL_ONE_ONLY (new_decl))
4988 varpool_node::get (new_decl)->set_comdat_group
4989 (tm_mangle (DECL_COMDAT_GROUP (old_decl)));
4990
4991 gcc_assert (!old_node->ipa_transforms_to_apply.exists ());
4992 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
4993 new_node->local.local = false;
4994 new_node->externally_visible = old_node->externally_visible;
4995 new_node->lowered = true;
4996 new_node->tm_clone = 1;
4997 if (!old_node->implicit_section)
4998 new_node->set_section (old_node->get_section ());
4999 get_cg_data (&old_node, true)->clone = new_node;
5000
5001 if (old_node->get_availability () >= AVAIL_INTERPOSABLE)
5002 {
5003 /* Remap extern inline to static inline. */
5004 /* ??? Is it worth trying to use make_decl_one_only? */
5005 if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
5006 {
5007 DECL_EXTERNAL (new_decl) = 0;
5008 TREE_PUBLIC (new_decl) = 0;
5009 DECL_WEAK (new_decl) = 0;
5010 }
5011
5012 tree_function_versioning (old_decl, new_decl,
5013 NULL, false, NULL,
5014 false, NULL, NULL);
5015 }
5016
5017 record_tm_clone_pair (old_decl, new_decl);
5018
5019 symtab->call_cgraph_insertion_hooks (new_node);
5020 if (old_node->force_output
5021 || old_node->ref_list.first_referring ())
5022 ipa_tm_mark_force_output_node (new_node);
5023 if (old_node->forced_by_abi)
5024 ipa_tm_mark_forced_by_abi_node (new_node);
5025
5026 /* Do the same thing, but for any aliases of the original node. */
5027 {
5028 struct create_version_alias_info data;
5029 data.old_node = old_node;
5030 data.new_decl = new_decl;
5031 old_node->call_for_symbol_thunks_and_aliases (ipa_tm_create_version_alias,
5032 &data, true);
5033 }
5034 }
5035
5036 /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
5037
5038 static void
5039 ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
5040 basic_block bb)
5041 {
5042 gimple_stmt_iterator gsi;
5043 gcall *g;
5044
5045 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
5046
5047 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
5048 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
5049
5050 split_block_after_labels (bb);
5051 gsi = gsi_after_labels (bb);
5052 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
5053
5054 node->create_edge (cgraph_node::get_create
5055 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
5056 g, gimple_bb (g)->count);
5057 }
5058
5059 /* Construct a call to TM_GETTMCLONE and insert it before GSI. */
5060
5061 static bool
5062 ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
5063 struct tm_region *region,
5064 gimple_stmt_iterator *gsi, gcall *stmt)
5065 {
5066 tree gettm_fn, ret, old_fn, callfn;
5067 gcall *g;
5068 gassign *g2;
5069 bool safe;
5070
5071 old_fn = gimple_call_fn (stmt);
5072
5073 if (TREE_CODE (old_fn) == ADDR_EXPR)
5074 {
5075 tree fndecl = TREE_OPERAND (old_fn, 0);
5076 tree clone = get_tm_clone_pair (fndecl);
5077
5078 /* By transforming the call into a TM_GETTMCLONE, we are
5079 technically taking the address of the original function and
5080 its clone. Explain this so inlining will know this function
5081 is needed. */
5082 cgraph_node::get (fndecl)->mark_address_taken () ;
5083 if (clone)
5084 cgraph_node::get (clone)->mark_address_taken ();
5085 }
5086
5087 safe = is_tm_safe (TREE_TYPE (old_fn));
5088 gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
5089 : BUILT_IN_TM_GETTMCLONE_IRR);
5090 ret = create_tmp_var (ptr_type_node);
5091
5092 if (!safe)
5093 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
5094
5095 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
5096 if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
5097 old_fn = OBJ_TYPE_REF_EXPR (old_fn);
5098
5099 g = gimple_build_call (gettm_fn, 1, old_fn);
5100 ret = make_ssa_name (ret, g);
5101 gimple_call_set_lhs (g, ret);
5102
5103 gsi_insert_before (gsi, g, GSI_SAME_STMT);
5104
5105 node->create_edge (cgraph_node::get_create (gettm_fn), g, gimple_bb (g)->count);
5106
5107 /* Cast return value from tm_gettmclone* into appropriate function
5108 pointer. */
5109 callfn = create_tmp_var (TREE_TYPE (old_fn));
5110 g2 = gimple_build_assign (callfn,
5111 fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
5112 callfn = make_ssa_name (callfn, g2);
5113 gimple_assign_set_lhs (g2, callfn);
5114 gsi_insert_before (gsi, g2, GSI_SAME_STMT);
5115
5116 /* ??? This is a hack to preserve the NOTHROW bit on the call,
5117 which we would have derived from the decl. Failure to save
5118 this bit means we might have to split the basic block. */
5119 if (gimple_call_nothrow_p (stmt))
5120 gimple_call_set_nothrow (stmt, true);
5121
5122 gimple_call_set_fn (stmt, callfn);
5123
5124 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
5125 for a call statement. Fix it. */
5126 {
5127 tree lhs = gimple_call_lhs (stmt);
5128 tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
5129 if (lhs
5130 && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
5131 {
5132 tree temp;
5133
5134 temp = create_tmp_reg (rettype);
5135 gimple_call_set_lhs (stmt, temp);
5136
5137 g2 = gimple_build_assign (lhs,
5138 fold_build1 (VIEW_CONVERT_EXPR,
5139 TREE_TYPE (lhs), temp));
5140 gsi_insert_after (gsi, g2, GSI_SAME_STMT);
5141 }
5142 }
5143
5144 update_stmt (stmt);
5145 cgraph_edge *e = cgraph_node::get (current_function_decl)->get_edge (stmt);
5146 if (e && e->indirect_info)
5147 e->indirect_info->polymorphic = false;
5148
5149 return true;
5150 }
5151
5152 /* Helper function for ipa_tm_transform_calls*. Given a call
5153 statement in GSI which resides inside transaction REGION, redirect
5154 the call to either its wrapper function, or its clone. */
5155
5156 static void
5157 ipa_tm_transform_calls_redirect (struct cgraph_node *node,
5158 struct tm_region *region,
5159 gimple_stmt_iterator *gsi,
5160 bool *need_ssa_rename_p)
5161 {
5162 gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi));
5163 struct cgraph_node *new_node;
5164 struct cgraph_edge *e = node->get_edge (stmt);
5165 tree fndecl = gimple_call_fndecl (stmt);
5166
5167 /* For indirect calls, pass the address through the runtime. */
5168 if (fndecl == NULL)
5169 {
5170 *need_ssa_rename_p |=
5171 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5172 return;
5173 }
5174
5175 /* Handle some TM builtins. Ordinarily these aren't actually generated
5176 at this point, but handling these functions when written in by the
5177 user makes it easier to build unit tests. */
5178 if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
5179 return;
5180
5181 /* Fixup recursive calls inside clones. */
5182 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
5183 for recursion but not update the call statements themselves? */
5184 if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
5185 {
5186 gimple_call_set_fndecl (stmt, current_function_decl);
5187 return;
5188 }
5189
5190 /* If there is a replacement, use it. */
5191 fndecl = find_tm_replacement_function (fndecl);
5192 if (fndecl)
5193 {
5194 new_node = cgraph_node::get_create (fndecl);
5195
5196 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
5197
5198 We can't do this earlier in record_tm_replacement because
5199 cgraph_remove_unreachable_nodes is called before we inject
5200 references to the node. Further, we can't do this in some
5201 nice central place in ipa_tm_execute because we don't have
5202 the exact list of wrapper functions that would be used.
5203 Marking more wrappers than necessary results in the creation
5204 of unnecessary cgraph_nodes, which can cause some of the
5205 other IPA passes to crash.
5206
5207 We do need to mark these nodes so that we get the proper
5208 result in expand_call_tm. */
5209 /* ??? This seems broken. How is it that we're marking the
5210 CALLEE as may_enter_irr? Surely we should be marking the
5211 CALLER. Also note that find_tm_replacement_function also
5212 contains mappings into the TM runtime, e.g. memcpy. These
5213 we know won't go irrevocable. */
5214 new_node->local.tm_may_enter_irr = 1;
5215 }
5216 else
5217 {
5218 struct tm_ipa_cg_data *d;
5219 struct cgraph_node *tnode = e->callee;
5220
5221 d = get_cg_data (&tnode, true);
5222 new_node = d->clone;
5223
5224 /* As we've already skipped pure calls and appropriate builtins,
5225 and we've already marked irrevocable blocks, if we can't come
5226 up with a static replacement, then ask the runtime. */
5227 if (new_node == NULL)
5228 {
5229 *need_ssa_rename_p |=
5230 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5231 return;
5232 }
5233
5234 fndecl = new_node->decl;
5235 }
5236
5237 e->redirect_callee (new_node);
5238 gimple_call_set_fndecl (stmt, fndecl);
5239 }
5240
5241 /* Helper function for ipa_tm_transform_calls. For a given BB,
5242 install calls to tm_irrevocable when IRR_BLOCKS are reached,
5243 redirect other calls to the generated transactional clone. */
5244
5245 static bool
5246 ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
5247 basic_block bb, bitmap irr_blocks)
5248 {
5249 gimple_stmt_iterator gsi;
5250 bool need_ssa_rename = false;
5251
5252 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5253 {
5254 ipa_tm_insert_irr_call (node, region, bb);
5255 return true;
5256 }
5257
5258 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5259 {
5260 gimple *stmt = gsi_stmt (gsi);
5261
5262 if (!is_gimple_call (stmt))
5263 continue;
5264 if (is_tm_pure_call (stmt))
5265 continue;
5266
5267 /* Redirect edges to the appropriate replacement or clone. */
5268 ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
5269 }
5270
5271 return need_ssa_rename;
5272 }
5273
5274 /* Walk the CFG for REGION, beginning at BB. Install calls to
5275 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
5276 the generated transactional clone. */
5277
5278 static bool
5279 ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
5280 basic_block bb, bitmap irr_blocks)
5281 {
5282 bool need_ssa_rename = false;
5283 edge e;
5284 edge_iterator ei;
5285 auto_vec<basic_block> queue;
5286 bitmap visited_blocks = BITMAP_ALLOC (NULL);
5287
5288 queue.safe_push (bb);
5289 do
5290 {
5291 bb = queue.pop ();
5292
5293 need_ssa_rename |=
5294 ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
5295
5296 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5297 continue;
5298
5299 if (region && bitmap_bit_p (region->exit_blocks, bb->index))
5300 continue;
5301
5302 FOR_EACH_EDGE (e, ei, bb->succs)
5303 if (!bitmap_bit_p (visited_blocks, e->dest->index))
5304 {
5305 bitmap_set_bit (visited_blocks, e->dest->index);
5306 queue.safe_push (e->dest);
5307 }
5308 }
5309 while (!queue.is_empty ());
5310
5311 BITMAP_FREE (visited_blocks);
5312
5313 return need_ssa_rename;
5314 }
5315
5316 /* Transform the calls within the TM regions within NODE. */
5317
5318 static void
5319 ipa_tm_transform_transaction (struct cgraph_node *node)
5320 {
5321 struct tm_ipa_cg_data *d;
5322 struct tm_region *region;
5323 bool need_ssa_rename = false;
5324
5325 d = get_cg_data (&node, true);
5326
5327 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
5328 calculate_dominance_info (CDI_DOMINATORS);
5329
5330 for (region = d->all_tm_regions; region; region = region->next)
5331 {
5332 /* If we're sure to go irrevocable, don't transform anything. */
5333 if (d->irrevocable_blocks_normal
5334 && bitmap_bit_p (d->irrevocable_blocks_normal,
5335 region->entry_block->index))
5336 {
5337 transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE
5338 | GTMA_MAY_ENTER_IRREVOCABLE
5339 | GTMA_HAS_NO_INSTRUMENTATION);
5340 continue;
5341 }
5342
5343 need_ssa_rename |=
5344 ipa_tm_transform_calls (node, region, region->entry_block,
5345 d->irrevocable_blocks_normal);
5346 }
5347
5348 if (need_ssa_rename)
5349 update_ssa (TODO_update_ssa_only_virtuals);
5350
5351 pop_cfun ();
5352 }
5353
5354 /* Transform the calls within the transactional clone of NODE. */
5355
5356 static void
5357 ipa_tm_transform_clone (struct cgraph_node *node)
5358 {
5359 struct tm_ipa_cg_data *d;
5360 bool need_ssa_rename;
5361
5362 d = get_cg_data (&node, true);
5363
5364 /* If this function makes no calls and has no irrevocable blocks,
5365 then there's nothing to do. */
5366 /* ??? Remove non-aborting top-level transactions. */
5367 if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone)
5368 return;
5369
5370 push_cfun (DECL_STRUCT_FUNCTION (d->clone->decl));
5371 calculate_dominance_info (CDI_DOMINATORS);
5372
5373 need_ssa_rename =
5374 ipa_tm_transform_calls (d->clone, NULL,
5375 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
5376 d->irrevocable_blocks_clone);
5377
5378 if (need_ssa_rename)
5379 update_ssa (TODO_update_ssa_only_virtuals);
5380
5381 pop_cfun ();
5382 }
5383
5384 /* Main entry point for the transactional memory IPA pass. */
5385
5386 static unsigned int
5387 ipa_tm_execute (void)
5388 {
5389 cgraph_node_queue tm_callees = cgraph_node_queue ();
5390 /* List of functions that will go irrevocable. */
5391 cgraph_node_queue irr_worklist = cgraph_node_queue ();
5392
5393 struct cgraph_node *node;
5394 struct tm_ipa_cg_data *d;
5395 enum availability a;
5396 unsigned int i;
5397
5398 cgraph_node::checking_verify_cgraph_nodes ();
5399
5400 bitmap_obstack_initialize (&tm_obstack);
5401 initialize_original_copy_tables ();
5402
5403 /* For all local functions marked tm_callable, queue them. */
5404 FOR_EACH_DEFINED_FUNCTION (node)
5405 if (is_tm_callable (node->decl)
5406 && node->get_availability () >= AVAIL_INTERPOSABLE)
5407 {
5408 d = get_cg_data (&node, true);
5409 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5410 }
5411
5412 /* For all local reachable functions... */
5413 FOR_EACH_DEFINED_FUNCTION (node)
5414 if (node->lowered
5415 && node->get_availability () >= AVAIL_INTERPOSABLE)
5416 {
5417 /* ... marked tm_pure, record that fact for the runtime by
5418 indicating that the pure function is its own tm_callable.
5419 No need to do this if the function's address can't be taken. */
5420 if (is_tm_pure (node->decl))
5421 {
5422 if (!node->local.local)
5423 record_tm_clone_pair (node->decl, node->decl);
5424 continue;
5425 }
5426
5427 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
5428 calculate_dominance_info (CDI_DOMINATORS);
5429
5430 tm_region_init (NULL);
5431 if (all_tm_regions)
5432 {
5433 d = get_cg_data (&node, true);
5434
5435 /* Scan for calls that are in each transaction, and
5436 generate the uninstrumented code path. */
5437 ipa_tm_scan_calls_transaction (d, &tm_callees);
5438
5439 /* Put it in the worklist so we can scan the function
5440 later (ipa_tm_scan_irr_function) and mark the
5441 irrevocable blocks. */
5442 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5443 d->want_irr_scan_normal = true;
5444 }
5445
5446 pop_cfun ();
5447 }
5448
5449 /* For every local function on the callee list, scan as if we will be
5450 creating a transactional clone, queueing all new functions we find
5451 along the way. */
5452 for (i = 0; i < tm_callees.length (); ++i)
5453 {
5454 node = tm_callees[i];
5455 a = node->get_availability ();
5456 d = get_cg_data (&node, true);
5457
5458 /* Put it in the worklist so we can scan the function later
5459 (ipa_tm_scan_irr_function) and mark the irrevocable
5460 blocks. */
5461 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5462
5463 /* Some callees cannot be arbitrarily cloned. These will always be
5464 irrevocable. Mark these now, so that we need not scan them. */
5465 if (is_tm_irrevocable (node->decl))
5466 ipa_tm_note_irrevocable (node, &irr_worklist);
5467 else if (a <= AVAIL_NOT_AVAILABLE
5468 && !is_tm_safe_or_pure (node->decl))
5469 ipa_tm_note_irrevocable (node, &irr_worklist);
5470 else if (a >= AVAIL_INTERPOSABLE)
5471 {
5472 if (!tree_versionable_function_p (node->decl))
5473 ipa_tm_note_irrevocable (node, &irr_worklist);
5474 else if (!d->is_irrevocable)
5475 {
5476 /* If this is an alias, make sure its base is queued as well.
5477 we need not scan the callees now, as the base will do. */
5478 if (node->alias)
5479 {
5480 node = cgraph_node::get (node->thunk.alias);
5481 d = get_cg_data (&node, true);
5482 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5483 continue;
5484 }
5485
5486 /* Add all nodes called by this function into
5487 tm_callees as well. */
5488 ipa_tm_scan_calls_clone (node, &tm_callees);
5489 }
5490 }
5491 }
5492
5493 /* Iterate scans until no more work to be done. Prefer not to use
5494 vec::pop because the worklist tends to follow a breadth-first
5495 search of the callgraph, which should allow convergance with a
5496 minimum number of scans. But we also don't want the worklist
5497 array to grow without bound, so we shift the array up periodically. */
5498 for (i = 0; i < irr_worklist.length (); ++i)
5499 {
5500 if (i > 256 && i == irr_worklist.length () / 8)
5501 {
5502 irr_worklist.block_remove (0, i);
5503 i = 0;
5504 }
5505
5506 node = irr_worklist[i];
5507 d = get_cg_data (&node, true);
5508 d->in_worklist = false;
5509
5510 if (d->want_irr_scan_normal)
5511 {
5512 d->want_irr_scan_normal = false;
5513 ipa_tm_scan_irr_function (node, false);
5514 }
5515 if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
5516 ipa_tm_note_irrevocable (node, &irr_worklist);
5517 }
5518
5519 /* For every function on the callee list, collect the tm_may_enter_irr
5520 bit on the node. */
5521 irr_worklist.truncate (0);
5522 for (i = 0; i < tm_callees.length (); ++i)
5523 {
5524 node = tm_callees[i];
5525 if (ipa_tm_mayenterirr_function (node))
5526 {
5527 d = get_cg_data (&node, true);
5528 gcc_assert (d->in_worklist == false);
5529 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5530 }
5531 }
5532
5533 /* Propagate the tm_may_enter_irr bit to callers until stable. */
5534 for (i = 0; i < irr_worklist.length (); ++i)
5535 {
5536 struct cgraph_node *caller;
5537 struct cgraph_edge *e;
5538 struct ipa_ref *ref;
5539
5540 if (i > 256 && i == irr_worklist.length () / 8)
5541 {
5542 irr_worklist.block_remove (0, i);
5543 i = 0;
5544 }
5545
5546 node = irr_worklist[i];
5547 d = get_cg_data (&node, true);
5548 d->in_worklist = false;
5549 node->local.tm_may_enter_irr = true;
5550
5551 /* Propagate back to normal callers. */
5552 for (e = node->callers; e ; e = e->next_caller)
5553 {
5554 caller = e->caller;
5555 if (!is_tm_safe_or_pure (caller->decl)
5556 && !caller->local.tm_may_enter_irr)
5557 {
5558 d = get_cg_data (&caller, true);
5559 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5560 }
5561 }
5562
5563 /* Propagate back to referring aliases as well. */
5564 FOR_EACH_ALIAS (node, ref)
5565 {
5566 caller = dyn_cast<cgraph_node *> (ref->referring);
5567 if (!caller->local.tm_may_enter_irr)
5568 {
5569 /* ?? Do not traverse aliases here. */
5570 d = get_cg_data (&caller, false);
5571 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5572 }
5573 }
5574 }
5575
5576 /* Now validate all tm_safe functions, and all atomic regions in
5577 other functions. */
5578 FOR_EACH_DEFINED_FUNCTION (node)
5579 if (node->lowered
5580 && node->get_availability () >= AVAIL_INTERPOSABLE)
5581 {
5582 d = get_cg_data (&node, true);
5583 if (is_tm_safe (node->decl))
5584 ipa_tm_diagnose_tm_safe (node);
5585 else if (d->all_tm_regions)
5586 ipa_tm_diagnose_transaction (node, d->all_tm_regions);
5587 }
5588
5589 /* Create clones. Do those that are not irrevocable and have a
5590 positive call count. Do those publicly visible functions that
5591 the user directed us to clone. */
5592 for (i = 0; i < tm_callees.length (); ++i)
5593 {
5594 bool doit = false;
5595
5596 node = tm_callees[i];
5597 if (node->cpp_implicit_alias)
5598 continue;
5599
5600 a = node->get_availability ();
5601 d = get_cg_data (&node, true);
5602
5603 if (a <= AVAIL_NOT_AVAILABLE)
5604 doit = is_tm_callable (node->decl);
5605 else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl))
5606 doit = true;
5607 else if (!d->is_irrevocable
5608 && d->tm_callers_normal + d->tm_callers_clone > 0)
5609 doit = true;
5610
5611 if (doit)
5612 ipa_tm_create_version (node);
5613 }
5614
5615 /* Redirect calls to the new clones, and insert irrevocable marks. */
5616 for (i = 0; i < tm_callees.length (); ++i)
5617 {
5618 node = tm_callees[i];
5619 if (node->analyzed)
5620 {
5621 d = get_cg_data (&node, true);
5622 if (d->clone)
5623 ipa_tm_transform_clone (node);
5624 }
5625 }
5626 FOR_EACH_DEFINED_FUNCTION (node)
5627 if (node->lowered
5628 && node->get_availability () >= AVAIL_INTERPOSABLE)
5629 {
5630 d = get_cg_data (&node, true);
5631 if (d->all_tm_regions)
5632 ipa_tm_transform_transaction (node);
5633 }
5634
5635 /* Free and clear all data structures. */
5636 tm_callees.release ();
5637 irr_worklist.release ();
5638 bitmap_obstack_release (&tm_obstack);
5639 free_original_copy_tables ();
5640
5641 FOR_EACH_FUNCTION (node)
5642 node->aux = NULL;
5643
5644 cgraph_node::checking_verify_cgraph_nodes ();
5645
5646 return 0;
5647 }
5648
5649 namespace {
5650
5651 const pass_data pass_data_ipa_tm =
5652 {
5653 SIMPLE_IPA_PASS, /* type */
5654 "tmipa", /* name */
5655 OPTGROUP_NONE, /* optinfo_flags */
5656 TV_TRANS_MEM, /* tv_id */
5657 ( PROP_ssa | PROP_cfg ), /* properties_required */
5658 0, /* properties_provided */
5659 0, /* properties_destroyed */
5660 0, /* todo_flags_start */
5661 0, /* todo_flags_finish */
5662 };
5663
5664 class pass_ipa_tm : public simple_ipa_opt_pass
5665 {
5666 public:
5667 pass_ipa_tm (gcc::context *ctxt)
5668 : simple_ipa_opt_pass (pass_data_ipa_tm, ctxt)
5669 {}
5670
5671 /* opt_pass methods: */
5672 virtual bool gate (function *) { return flag_tm; }
5673 virtual unsigned int execute (function *) { return ipa_tm_execute (); }
5674
5675 }; // class pass_ipa_tm
5676
5677 } // anon namespace
5678
5679 simple_ipa_opt_pass *
5680 make_pass_ipa_tm (gcc::context *ctxt)
5681 {
5682 return new pass_ipa_tm (ctxt);
5683 }
5684
5685 #include "gt-trans-mem.h"