]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/trans-mem.c
expander: Optimize store_expr from STRING_CST [PR95052]
[thirdparty/gcc.git] / gcc / trans-mem.c
CommitLineData
0a35513e 1/* Passes for transactional memory support.
8d9254fc 2 Copyright (C) 2008-2020 Free Software Foundation, Inc.
d4c61094
AH
3 Contributed by Richard Henderson <rth@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
0a35513e
AH
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
c7131fb2 25#include "backend.h"
957060b5
AM
26#include "target.h"
27#include "rtl.h"
40e23961 28#include "tree.h"
c7131fb2 29#include "gimple.h"
957060b5
AM
30#include "cfghooks.h"
31#include "tree-pass.h"
c7131fb2 32#include "ssa.h"
957060b5
AM
33#include "cgraph.h"
34#include "gimple-pretty-print.h"
35#include "diagnostic-core.h"
40e23961 36#include "fold-const.h"
2fb9a547 37#include "tree-eh.h"
d8a2d370 38#include "calls.h"
45b0be94 39#include "gimplify.h"
5be5c238 40#include "gimple-iterator.h"
18f429e2 41#include "gimplify-me.h"
5be5c238 42#include "gimple-walk.h"
442b4905 43#include "tree-cfg.h"
442b4905 44#include "tree-into-ssa.h"
0a35513e 45#include "tree-inline.h"
0a35513e
AH
46#include "demangle.h"
47#include "output.h"
48#include "trans-mem.h"
0a35513e 49#include "langhooks.h"
7d776ee2 50#include "cfgloop.h"
4484a35a 51#include "tree-ssa-address.h"
314e6352
ML
52#include "stringpool.h"
53#include "attribs.h"
0a35513e 54
0a35513e
AH
55#define A_RUNINSTRUMENTEDCODE 0x0001
56#define A_RUNUNINSTRUMENTEDCODE 0x0002
57#define A_SAVELIVEVARIABLES 0x0004
58#define A_RESTORELIVEVARIABLES 0x0008
59#define A_ABORTTRANSACTION 0x0010
60
61#define AR_USERABORT 0x0001
62#define AR_USERRETRY 0x0002
63#define AR_TMCONFLICT 0x0004
64#define AR_EXCEPTIONBLOCKABORT 0x0008
65#define AR_OUTERABORT 0x0010
66
67#define MODE_SERIALIRREVOCABLE 0x0000
68
69
70/* The representation of a transaction changes several times during the
71 lowering process. In the beginning, in the front-end we have the
72 GENERIC tree TRANSACTION_EXPR. For example,
73
74 __transaction {
75 local++;
76 if (++global == 10)
77 __tm_abort;
78 }
79
80 During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
81 trivially replaced with a GIMPLE_TRANSACTION node.
82
83 During pass_lower_tm, we examine the body of transactions looking
84 for aborts. Transactions that do not contain an abort may be
85 merged into an outer transaction. We also add a TRY-FINALLY node
86 to arrange for the transaction to be committed on any exit.
87
88 [??? Think about how this arrangement affects throw-with-commit
89 and throw-with-abort operations. In this case we want the TRY to
90 handle gotos, but not to catch any exceptions because the transaction
91 will already be closed.]
92
93 GIMPLE_TRANSACTION [label=NULL] {
94 try {
95 local = local + 1;
96 t0 = global;
97 t1 = t0 + 1;
98 global = t1;
99 if (t1 == 10)
100 __builtin___tm_abort ();
101 } finally {
102 __builtin___tm_commit ();
103 }
104 }
105
106 During pass_lower_eh, we create EH regions for the transactions,
107 intermixed with the regular EH stuff. This gives us a nice persistent
108 mapping (all the way through rtl) from transactional memory operation
109 back to the transaction, which allows us to get the abnormal edges
110 correct to model transaction aborts and restarts:
111
112 GIMPLE_TRANSACTION [label=over]
113 local = local + 1;
114 t0 = global;
115 t1 = t0 + 1;
116 global = t1;
117 if (t1 == 10)
118 __builtin___tm_abort ();
119 __builtin___tm_commit ();
120 over:
121
122 This is the end of all_lowering_passes, and so is what is present
123 during the IPA passes, and through all of the optimization passes.
124
125 During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
126 functions and mark functions for cloning.
127
128 At the end of gimple optimization, before exiting SSA form,
129 pass_tm_edges replaces statements that perform transactional
130 memory operations with the appropriate TM builtins, and swap
131 out function calls with their transactional clones. At this
132 point we introduce the abnormal transaction restart edges and
133 complete lowering of the GIMPLE_TRANSACTION node.
134
135 x = __builtin___tm_start (MAY_ABORT);
136 eh_label:
137 if (x & abort_transaction)
138 goto over;
139 local = local + 1;
140 t0 = __builtin___tm_load (global);
141 t1 = t0 + 1;
142 __builtin___tm_store (&global, t1);
143 if (t1 == 10)
144 __builtin___tm_abort ();
145 __builtin___tm_commit ();
146 over:
147*/
148
398b1daa
AH
149static void *expand_regions (struct tm_region *,
150 void *(*callback)(struct tm_region *, void *),
b5e10eac 151 void *, bool);
398b1daa 152
0a35513e
AH
153\f
154/* Return the attributes we want to examine for X, or NULL if it's not
155 something we examine. We look at function types, but allow pointers
156 to function types and function decls and peek through. */
157
158static tree
159get_attrs_for (const_tree x)
160{
59b8926c
MP
161 if (x == NULL_TREE)
162 return NULL_TREE;
163
0a35513e
AH
164 switch (TREE_CODE (x))
165 {
166 case FUNCTION_DECL:
167 return TYPE_ATTRIBUTES (TREE_TYPE (x));
0a35513e
AH
168
169 default:
170 if (TYPE_P (x))
59b8926c 171 return NULL_TREE;
0a35513e
AH
172 x = TREE_TYPE (x);
173 if (TREE_CODE (x) != POINTER_TYPE)
59b8926c 174 return NULL_TREE;
0a35513e
AH
175 /* FALLTHRU */
176
177 case POINTER_TYPE:
178 x = TREE_TYPE (x);
179 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
59b8926c 180 return NULL_TREE;
0a35513e
AH
181 /* FALLTHRU */
182
183 case FUNCTION_TYPE:
184 case METHOD_TYPE:
185 return TYPE_ATTRIBUTES (x);
186 }
187}
188
189/* Return true if X has been marked TM_PURE. */
190
191bool
192is_tm_pure (const_tree x)
193{
194 unsigned flags;
195
196 switch (TREE_CODE (x))
197 {
198 case FUNCTION_DECL:
199 case FUNCTION_TYPE:
200 case METHOD_TYPE:
201 break;
202
203 default:
204 if (TYPE_P (x))
205 return false;
206 x = TREE_TYPE (x);
207 if (TREE_CODE (x) != POINTER_TYPE)
208 return false;
209 /* FALLTHRU */
210
211 case POINTER_TYPE:
212 x = TREE_TYPE (x);
213 if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
214 return false;
215 break;
216 }
217
218 flags = flags_from_decl_or_type (x);
219 return (flags & ECF_TM_PURE) != 0;
220}
221
222/* Return true if X has been marked TM_IRREVOCABLE. */
223
224static bool
225is_tm_irrevocable (tree x)
226{
227 tree attrs = get_attrs_for (x);
228
229 if (attrs && lookup_attribute ("transaction_unsafe", attrs))
230 return true;
231
232 /* A call to the irrevocable builtin is by definition,
233 irrevocable. */
234 if (TREE_CODE (x) == ADDR_EXPR)
235 x = TREE_OPERAND (x, 0);
236 if (TREE_CODE (x) == FUNCTION_DECL
3d78e008 237 && fndecl_built_in_p (x, BUILT_IN_TM_IRREVOCABLE))
0a35513e
AH
238 return true;
239
240 return false;
241}
242
243/* Return true if X has been marked TM_SAFE. */
244
245bool
246is_tm_safe (const_tree x)
247{
248 if (flag_tm)
249 {
250 tree attrs = get_attrs_for (x);
251 if (attrs)
252 {
253 if (lookup_attribute ("transaction_safe", attrs))
254 return true;
255 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
256 return true;
257 }
258 }
259 return false;
260}
261
262/* Return true if CALL is const, or tm_pure. */
263
264static bool
355fe088 265is_tm_pure_call (gimple *call)
0a35513e 266{
0e02fb26 267 return (gimple_call_flags (call) & (ECF_CONST | ECF_TM_PURE)) != 0;
0a35513e
AH
268}
269
270/* Return true if X has been marked TM_CALLABLE. */
271
272static bool
273is_tm_callable (tree x)
274{
275 tree attrs = get_attrs_for (x);
276 if (attrs)
277 {
278 if (lookup_attribute ("transaction_callable", attrs))
279 return true;
280 if (lookup_attribute ("transaction_safe", attrs))
281 return true;
282 if (lookup_attribute ("transaction_may_cancel_outer", attrs))
283 return true;
284 }
285 return false;
286}
287
288/* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
289
290bool
291is_tm_may_cancel_outer (tree x)
292{
293 tree attrs = get_attrs_for (x);
294 if (attrs)
295 return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
296 return false;
297}
298
299/* Return true for built in functions that "end" a transaction. */
300
301bool
302is_tm_ending_fndecl (tree fndecl)
303{
304 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
305 switch (DECL_FUNCTION_CODE (fndecl))
306 {
307 case BUILT_IN_TM_COMMIT:
308 case BUILT_IN_TM_COMMIT_EH:
309 case BUILT_IN_TM_ABORT:
310 case BUILT_IN_TM_IRREVOCABLE:
311 return true;
312 default:
313 break;
314 }
315
316 return false;
317}
318
d5ae1c25
TV
319/* Return true if STMT is a built in function call that "ends" a
320 transaction. */
321
322bool
355fe088 323is_tm_ending (gimple *stmt)
d5ae1c25
TV
324{
325 tree fndecl;
326
327 if (gimple_code (stmt) != GIMPLE_CALL)
328 return false;
329
330 fndecl = gimple_call_fndecl (stmt);
331 return (fndecl != NULL_TREE
332 && is_tm_ending_fndecl (fndecl));
333}
334
0a35513e
AH
335/* Return true if STMT is a TM load. */
336
337static bool
355fe088 338is_tm_load (gimple *stmt)
0a35513e
AH
339{
340 tree fndecl;
341
342 if (gimple_code (stmt) != GIMPLE_CALL)
343 return false;
344
345 fndecl = gimple_call_fndecl (stmt);
3d78e008
ML
346 return (fndecl
347 && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
0a35513e
AH
348 && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
349}
350
351/* Same as above, but for simple TM loads, that is, not the
352 after-write, after-read, etc optimized variants. */
353
354static bool
355fe088 355is_tm_simple_load (gimple *stmt)
0a35513e
AH
356{
357 tree fndecl;
358
359 if (gimple_code (stmt) != GIMPLE_CALL)
360 return false;
361
362 fndecl = gimple_call_fndecl (stmt);
3d78e008 363 if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
0a35513e
AH
364 {
365 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
366 return (fcode == BUILT_IN_TM_LOAD_1
367 || fcode == BUILT_IN_TM_LOAD_2
368 || fcode == BUILT_IN_TM_LOAD_4
369 || fcode == BUILT_IN_TM_LOAD_8
370 || fcode == BUILT_IN_TM_LOAD_FLOAT
371 || fcode == BUILT_IN_TM_LOAD_DOUBLE
372 || fcode == BUILT_IN_TM_LOAD_LDOUBLE
373 || fcode == BUILT_IN_TM_LOAD_M64
374 || fcode == BUILT_IN_TM_LOAD_M128
375 || fcode == BUILT_IN_TM_LOAD_M256);
376 }
377 return false;
378}
379
380/* Return true if STMT is a TM store. */
381
382static bool
355fe088 383is_tm_store (gimple *stmt)
0a35513e
AH
384{
385 tree fndecl;
386
387 if (gimple_code (stmt) != GIMPLE_CALL)
388 return false;
389
390 fndecl = gimple_call_fndecl (stmt);
3d78e008
ML
391 return (fndecl
392 && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
0a35513e
AH
393 && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
394}
395
396/* Same as above, but for simple TM stores, that is, not the
397 after-write, after-read, etc optimized variants. */
398
399static bool
355fe088 400is_tm_simple_store (gimple *stmt)
0a35513e
AH
401{
402 tree fndecl;
403
404 if (gimple_code (stmt) != GIMPLE_CALL)
405 return false;
406
407 fndecl = gimple_call_fndecl (stmt);
3d78e008
ML
408 if (fndecl
409 && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
0a35513e
AH
410 {
411 enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
412 return (fcode == BUILT_IN_TM_STORE_1
413 || fcode == BUILT_IN_TM_STORE_2
414 || fcode == BUILT_IN_TM_STORE_4
415 || fcode == BUILT_IN_TM_STORE_8
416 || fcode == BUILT_IN_TM_STORE_FLOAT
417 || fcode == BUILT_IN_TM_STORE_DOUBLE
418 || fcode == BUILT_IN_TM_STORE_LDOUBLE
419 || fcode == BUILT_IN_TM_STORE_M64
420 || fcode == BUILT_IN_TM_STORE_M128
421 || fcode == BUILT_IN_TM_STORE_M256);
422 }
423 return false;
424}
425
426/* Return true if FNDECL is BUILT_IN_TM_ABORT. */
427
428static bool
429is_tm_abort (tree fndecl)
430{
3d78e008 431 return (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_TM_ABORT));
0a35513e
AH
432}
433
434/* Build a GENERIC tree for a user abort. This is called by front ends
435 while transforming the __tm_abort statement. */
436
437tree
438build_tm_abort_call (location_t loc, bool is_outer)
439{
440 return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
441 build_int_cst (integer_type_node,
442 AR_USERABORT
443 | (is_outer ? AR_OUTERABORT : 0)));
444}
0a35513e 445\f
5764ee3c 446/* Map for arbitrary function replacement under TM, as created
0a35513e
AH
447 by the tm_wrap attribute. */
448
6c907cff 449struct tm_wrapper_hasher : ggc_cache_ptr_hash<tree_map>
d242408f
TS
450{
451 static inline hashval_t hash (tree_map *m) { return m->hash; }
452 static inline bool
453 equal (tree_map *a, tree_map *b)
454 {
455 return a->base.from == b->base.from;
456 }
457
08ec2754
RS
458 static int
459 keep_cache_entry (tree_map *&m)
460 {
461 return ggc_marked_p (m->base.from);
462 }
d242408f
TS
463};
464
465static GTY((cache)) hash_table<tm_wrapper_hasher> *tm_wrap_map;
0a35513e
AH
466
467void
468record_tm_replacement (tree from, tree to)
469{
470 struct tree_map **slot, *h;
471
472 /* Do not inline wrapper functions that will get replaced in the TM
473 pass.
474
475 Suppose you have foo() that will get replaced into tmfoo(). Make
476 sure the inliner doesn't try to outsmart us and inline foo()
477 before we get a chance to do the TM replacement. */
478 DECL_UNINLINABLE (from) = 1;
479
480 if (tm_wrap_map == NULL)
d242408f 481 tm_wrap_map = hash_table<tm_wrapper_hasher>::create_ggc (32);
0a35513e 482
766090c2 483 h = ggc_alloc<tree_map> ();
0a35513e
AH
484 h->hash = htab_hash_pointer (from);
485 h->base.from = from;
486 h->to = to;
487
d242408f 488 slot = tm_wrap_map->find_slot_with_hash (h, h->hash, INSERT);
0a35513e
AH
489 *slot = h;
490}
491
492/* Return a TM-aware replacement function for DECL. */
493
494static tree
495find_tm_replacement_function (tree fndecl)
496{
497 if (tm_wrap_map)
498 {
499 struct tree_map *h, in;
500
501 in.base.from = fndecl;
502 in.hash = htab_hash_pointer (fndecl);
d242408f 503 h = tm_wrap_map->find_with_hash (&in, in.hash);
0a35513e
AH
504 if (h)
505 return h->to;
506 }
507
508 /* ??? We may well want TM versions of most of the common <string.h>
509 functions. For now, we've already these two defined. */
510 /* Adjust expand_call_tm() attributes as necessary for the cases
511 handled here: */
512 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
513 switch (DECL_FUNCTION_CODE (fndecl))
514 {
515 case BUILT_IN_MEMCPY:
516 return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
517 case BUILT_IN_MEMMOVE:
518 return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
519 case BUILT_IN_MEMSET:
520 return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
521 default:
522 return NULL;
523 }
524
525 return NULL;
526}
527
528/* When appropriate, record TM replacement for memory allocation functions.
529
530 FROM is the FNDECL to wrap. */
531void
532tm_malloc_replacement (tree from)
533{
534 const char *str;
535 tree to;
536
537 if (TREE_CODE (from) != FUNCTION_DECL)
538 return;
539
540 /* If we have a previous replacement, the user must be explicitly
541 wrapping malloc/calloc/free. They better know what they're
542 doing... */
543 if (find_tm_replacement_function (from))
544 return;
545
546 str = IDENTIFIER_POINTER (DECL_NAME (from));
547
548 if (!strcmp (str, "malloc"))
549 to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
550 else if (!strcmp (str, "calloc"))
551 to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
552 else if (!strcmp (str, "free"))
553 to = builtin_decl_explicit (BUILT_IN_TM_FREE);
554 else
555 return;
556
557 TREE_NOTHROW (to) = 0;
558
559 record_tm_replacement (from, to);
560}
561\f
562/* Diagnostics for tm_safe functions/regions. Called by the front end
563 once we've lowered the function to high-gimple. */
564
565/* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
566 Process exactly one statement. WI->INFO is set to non-null when in
567 the context of a tm_safe function, and null for a __transaction block. */
568
569#define DIAG_TM_OUTER 1
570#define DIAG_TM_SAFE 2
571#define DIAG_TM_RELAXED 4
572
573struct diagnose_tm
574{
575 unsigned int summary_flags : 8;
576 unsigned int block_flags : 8;
577 unsigned int func_flags : 8;
0a35513e 578 unsigned int saw_volatile : 1;
355fe088 579 gimple *stmt;
0a35513e
AH
580};
581
1d4afc04 582/* Return true if T is a volatile lvalue of some kind. */
a3770d3b
AH
583
584static bool
1d4afc04 585volatile_lvalue_p (tree t)
a3770d3b 586{
1d4afc04 587 return ((SSA_VAR_P (t) || REFERENCE_CLASS_P (t))
a3770d3b
AH
588 && TREE_THIS_VOLATILE (TREE_TYPE (t)));
589}
590
0a35513e
AH
591/* Tree callback function for diagnose_tm pass. */
592
593static tree
1d4afc04 594diagnose_tm_1_op (tree *tp, int *walk_subtrees, void *data)
0a35513e
AH
595{
596 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
597 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
0a35513e 598
1d4afc04
JM
599 if (TYPE_P (*tp))
600 *walk_subtrees = false;
601 else if (volatile_lvalue_p (*tp)
602 && !d->saw_volatile)
0a35513e
AH
603 {
604 d->saw_volatile = 1;
1d4afc04
JM
605 if (d->block_flags & DIAG_TM_SAFE)
606 error_at (gimple_location (d->stmt),
607 "invalid use of volatile lvalue inside transaction");
608 else if (d->func_flags & DIAG_TM_SAFE)
609 error_at (gimple_location (d->stmt),
aa326bfb 610 "invalid use of volatile lvalue inside %<transaction_safe%> "
1d4afc04 611 "function");
0a35513e
AH
612 }
613
614 return NULL_TREE;
615}
616
862d0b35
DN
617static inline bool
618is_tm_safe_or_pure (const_tree x)
619{
620 return is_tm_safe (x) || is_tm_pure (x);
621}
622
0a35513e
AH
623static tree
624diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
625 struct walk_stmt_info *wi)
626{
355fe088 627 gimple *stmt = gsi_stmt (*gsi);
0a35513e
AH
628 struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
629
630 /* Save stmt for use in leaf analysis. */
631 d->stmt = stmt;
632
633 switch (gimple_code (stmt))
634 {
635 case GIMPLE_CALL:
636 {
637 tree fn = gimple_call_fn (stmt);
638
639 if ((d->summary_flags & DIAG_TM_OUTER) == 0
640 && is_tm_may_cancel_outer (fn))
641 error_at (gimple_location (stmt),
642 "%<transaction_may_cancel_outer%> function call not within"
643 " outer transaction or %<transaction_may_cancel_outer%>");
644
645 if (d->summary_flags & DIAG_TM_SAFE)
646 {
647 bool is_safe, direct_call_p;
648 tree replacement;
649
650 if (TREE_CODE (fn) == ADDR_EXPR
651 && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
652 {
653 direct_call_p = true;
654 replacement = TREE_OPERAND (fn, 0);
655 replacement = find_tm_replacement_function (replacement);
656 if (replacement)
657 fn = replacement;
658 }
659 else
660 {
661 direct_call_p = false;
662 replacement = NULL_TREE;
663 }
664
665 if (is_tm_safe_or_pure (fn))
666 is_safe = true;
667 else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
668 {
669 /* A function explicitly marked transaction_callable as
670 opposed to transaction_safe is being defined to be
671 unsafe as part of its ABI, regardless of its contents. */
672 is_safe = false;
673 }
674 else if (direct_call_p)
675 {
17fc8d6f
AH
676 if (IS_TYPE_OR_DECL_P (fn)
677 && flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
0a35513e
AH
678 is_safe = true;
679 else if (replacement)
680 {
681 /* ??? At present we've been considering replacements
682 merely transaction_callable, and therefore might
683 enter irrevocable. The tm_wrap attribute has not
684 yet made it into the new language spec. */
685 is_safe = false;
686 }
687 else
688 {
689 /* ??? Diagnostics for unmarked direct calls moved into
690 the IPA pass. Section 3.2 of the spec details how
691 functions not marked should be considered "implicitly
692 safe" based on having examined the function body. */
693 is_safe = true;
694 }
695 }
696 else
697 {
698 /* An unmarked indirect call. Consider it unsafe even
699 though optimization may yet figure out how to inline. */
700 is_safe = false;
701 }
702
703 if (!is_safe)
704 {
705 if (TREE_CODE (fn) == ADDR_EXPR)
706 fn = TREE_OPERAND (fn, 0);
707 if (d->block_flags & DIAG_TM_SAFE)
cd6baa16
TR
708 {
709 if (direct_call_p)
710 error_at (gimple_location (stmt),
711 "unsafe function call %qD within "
712 "atomic transaction", fn);
713 else
3a54c456 714 {
381cdae4
RB
715 if ((!DECL_P (fn) || DECL_NAME (fn))
716 && TREE_CODE (fn) != SSA_NAME)
3a54c456
AH
717 error_at (gimple_location (stmt),
718 "unsafe function call %qE within "
719 "atomic transaction", fn);
720 else
721 error_at (gimple_location (stmt),
722 "unsafe indirect function call within "
723 "atomic transaction");
724 }
cd6baa16 725 }
0a35513e 726 else
cd6baa16
TR
727 {
728 if (direct_call_p)
729 error_at (gimple_location (stmt),
730 "unsafe function call %qD within "
731 "%<transaction_safe%> function", fn);
732 else
3a54c456 733 {
381cdae4
RB
734 if ((!DECL_P (fn) || DECL_NAME (fn))
735 && TREE_CODE (fn) != SSA_NAME)
3a54c456
AH
736 error_at (gimple_location (stmt),
737 "unsafe function call %qE within "
738 "%<transaction_safe%> function", fn);
739 else
740 error_at (gimple_location (stmt),
741 "unsafe indirect function call within "
742 "%<transaction_safe%> function");
743 }
cd6baa16 744 }
0a35513e
AH
745 }
746 }
747 }
748 break;
749
750 case GIMPLE_ASM:
751 /* ??? We ought to come up with a way to add attributes to
752 asm statements, and then add "transaction_safe" to it.
753 Either that or get the language spec to resurrect __tm_waiver. */
754 if (d->block_flags & DIAG_TM_SAFE)
755 error_at (gimple_location (stmt),
a9c697b8 756 "%<asm%> not allowed in atomic transaction");
0a35513e
AH
757 else if (d->func_flags & DIAG_TM_SAFE)
758 error_at (gimple_location (stmt),
a9c697b8 759 "%<asm%> not allowed in %<transaction_safe%> function");
0a35513e
AH
760 break;
761
762 case GIMPLE_TRANSACTION:
763 {
538dd0b7 764 gtransaction *trans_stmt = as_a <gtransaction *> (stmt);
0a35513e
AH
765 unsigned char inner_flags = DIAG_TM_SAFE;
766
538dd0b7 767 if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_RELAXED)
0a35513e
AH
768 {
769 if (d->block_flags & DIAG_TM_SAFE)
770 error_at (gimple_location (stmt),
771 "relaxed transaction in atomic transaction");
772 else if (d->func_flags & DIAG_TM_SAFE)
773 error_at (gimple_location (stmt),
774 "relaxed transaction in %<transaction_safe%> function");
0a35513e
AH
775 inner_flags = DIAG_TM_RELAXED;
776 }
538dd0b7 777 else if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_OUTER)
0a35513e
AH
778 {
779 if (d->block_flags)
780 error_at (gimple_location (stmt),
781 "outer transaction in transaction");
782 else if (d->func_flags & DIAG_TM_OUTER)
783 error_at (gimple_location (stmt),
784 "outer transaction in "
785 "%<transaction_may_cancel_outer%> function");
786 else if (d->func_flags & DIAG_TM_SAFE)
787 error_at (gimple_location (stmt),
788 "outer transaction in %<transaction_safe%> function");
0a35513e
AH
789 inner_flags |= DIAG_TM_OUTER;
790 }
791
792 *handled_ops_p = true;
538dd0b7 793 if (gimple_transaction_body (trans_stmt))
0a35513e
AH
794 {
795 struct walk_stmt_info wi_inner;
796 struct diagnose_tm d_inner;
797
798 memset (&d_inner, 0, sizeof (d_inner));
799 d_inner.func_flags = d->func_flags;
800 d_inner.block_flags = d->block_flags | inner_flags;
801 d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
802
803 memset (&wi_inner, 0, sizeof (wi_inner));
804 wi_inner.info = &d_inner;
805
538dd0b7 806 walk_gimple_seq (gimple_transaction_body (trans_stmt),
0a35513e 807 diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
0a35513e
AH
808 }
809 }
810 break;
811
812 default:
813 break;
814 }
815
816 return NULL_TREE;
817}
818
819static unsigned int
820diagnose_tm_blocks (void)
821{
822 struct walk_stmt_info wi;
823 struct diagnose_tm d;
824
825 memset (&d, 0, sizeof (d));
826 if (is_tm_may_cancel_outer (current_function_decl))
827 d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
828 else if (is_tm_safe (current_function_decl))
829 d.func_flags = DIAG_TM_SAFE;
830 d.summary_flags = d.func_flags;
831
832 memset (&wi, 0, sizeof (wi));
833 wi.info = &d;
834
835 walk_gimple_seq (gimple_body (current_function_decl),
836 diagnose_tm_1, diagnose_tm_1_op, &wi);
837
0a35513e
AH
838 return 0;
839}
840
17795822
TS
841namespace {
842
843const pass_data pass_data_diagnose_tm_blocks =
27a4cd48
DM
844{
845 GIMPLE_PASS, /* type */
846 "*diagnose_tm_blocks", /* name */
847 OPTGROUP_NONE, /* optinfo_flags */
27a4cd48
DM
848 TV_TRANS_MEM, /* tv_id */
849 PROP_gimple_any, /* properties_required */
850 0, /* properties_provided */
851 0, /* properties_destroyed */
852 0, /* todo_flags_start */
853 0, /* todo_flags_finish */
0a35513e 854};
27a4cd48 855
17795822 856class pass_diagnose_tm_blocks : public gimple_opt_pass
27a4cd48
DM
857{
858public:
c3284718
RS
859 pass_diagnose_tm_blocks (gcc::context *ctxt)
860 : gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt)
27a4cd48
DM
861 {}
862
863 /* opt_pass methods: */
1a3d085c 864 virtual bool gate (function *) { return flag_tm; }
be55bfe6 865 virtual unsigned int execute (function *) { return diagnose_tm_blocks (); }
27a4cd48
DM
866
867}; // class pass_diagnose_tm_blocks
868
17795822
TS
869} // anon namespace
870
27a4cd48
DM
871gimple_opt_pass *
872make_pass_diagnose_tm_blocks (gcc::context *ctxt)
873{
874 return new pass_diagnose_tm_blocks (ctxt);
875}
0a35513e
AH
876\f
877/* Instead of instrumenting thread private memory, we save the
878 addresses in a log which we later use to save/restore the addresses
879 upon transaction start/restart.
880
881 The log is keyed by address, where each element contains individual
882 statements among different code paths that perform the store.
883
884 This log is later used to generate either plain save/restore of the
885 addresses upon transaction start/restart, or calls to the ITM_L*
886 logging functions.
887
888 So for something like:
889
890 struct large { int x[1000]; };
891 struct large lala = { 0 };
892 __transaction {
893 lala.x[i] = 123;
894 ...
895 }
896
897 We can either save/restore:
898
899 lala = { 0 };
900 trxn = _ITM_startTransaction ();
901 if (trxn & a_saveLiveVariables)
902 tmp_lala1 = lala.x[i];
903 else if (a & a_restoreLiveVariables)
904 lala.x[i] = tmp_lala1;
905
906 or use the logging functions:
907
908 lala = { 0 };
909 trxn = _ITM_startTransaction ();
910 _ITM_LU4 (&lala.x[i]);
911
912 Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
913 far up the dominator tree to shadow all of the writes to a given
914 location (thus reducing the total number of logging calls), but not
915 so high as to be called on a path that does not perform a
916 write. */
917
918/* One individual log entry. We may have multiple statements for the
919 same location if neither dominate each other (on different
920 execution paths). */
526ceb68 921struct tm_log_entry
0a35513e
AH
922{
923 /* Address to save. */
924 tree addr;
925 /* Entry block for the transaction this address occurs in. */
926 basic_block entry_block;
927 /* Dominating statements the store occurs in. */
355fe088 928 vec<gimple *> stmts;
0a35513e
AH
929 /* Initially, while we are building the log, we place a nonzero
930 value here to mean that this address *will* be saved with a
931 save/restore sequence. Later, when generating the save sequence
932 we place the SSA temp generated here. */
933 tree save_var;
526ceb68 934};
0a35513e 935
0a35513e 936
4a8fb1a1 937/* Log entry hashtable helpers. */
0a35513e 938
7edd9b15 939struct log_entry_hasher : pointer_hash <tm_log_entry>
0a35513e 940{
67f58944
TS
941 static inline hashval_t hash (const tm_log_entry *);
942 static inline bool equal (const tm_log_entry *, const tm_log_entry *);
943 static inline void remove (tm_log_entry *);
4a8fb1a1 944};
0a35513e
AH
945
946/* Htab support. Return hash value for a `tm_log_entry'. */
4a8fb1a1 947inline hashval_t
67f58944 948log_entry_hasher::hash (const tm_log_entry *log)
0a35513e 949{
0a35513e
AH
950 return iterative_hash_expr (log->addr, 0);
951}
952
953/* Htab support. Return true if two log entries are the same. */
4a8fb1a1 954inline bool
67f58944 955log_entry_hasher::equal (const tm_log_entry *log1, const tm_log_entry *log2)
0a35513e 956{
0a35513e
AH
957 /* FIXME:
958
959 rth: I suggest that we get rid of the component refs etc.
960 I.e. resolve the reference to base + offset.
961
962 We may need to actually finish a merge with mainline for this,
963 since we'd like to be presented with Richi's MEM_REF_EXPRs more
964 often than not. But in the meantime your tm_log_entry could save
965 the results of get_inner_reference.
966
967 See: g++.dg/tm/pr46653.C
968 */
969
970 /* Special case plain equality because operand_equal_p() below will
971 return FALSE if the addresses are equal but they have
972 side-effects (e.g. a volatile address). */
973 if (log1->addr == log2->addr)
974 return true;
975
976 return operand_equal_p (log1->addr, log2->addr, 0);
977}
978
979/* Htab support. Free one tm_log_entry. */
4a8fb1a1 980inline void
67f58944 981log_entry_hasher::remove (tm_log_entry *lp)
0a35513e 982{
9771b263 983 lp->stmts.release ();
0a35513e
AH
984 free (lp);
985}
986
4a8fb1a1
LC
987
988/* The actual log. */
c203e8a7 989static hash_table<log_entry_hasher> *tm_log;
4a8fb1a1
LC
990
991/* Addresses to log with a save/restore sequence. These should be in
992 dominator order. */
993static vec<tree> tm_log_save_addresses;
994
995enum thread_memory_type
996 {
997 mem_non_local = 0,
998 mem_thread_local,
999 mem_transaction_local,
1000 mem_max
1001 };
1002
526ceb68 1003struct tm_new_mem_map
4a8fb1a1
LC
1004{
1005 /* SSA_NAME being dereferenced. */
1006 tree val;
1007 enum thread_memory_type local_new_memory;
526ceb68 1008};
4a8fb1a1
LC
1009
1010/* Hashtable helpers. */
1011
526ceb68 1012struct tm_mem_map_hasher : free_ptr_hash <tm_new_mem_map>
4a8fb1a1 1013{
526ceb68
TS
1014 static inline hashval_t hash (const tm_new_mem_map *);
1015 static inline bool equal (const tm_new_mem_map *, const tm_new_mem_map *);
4a8fb1a1
LC
1016};
1017
1018inline hashval_t
526ceb68 1019tm_mem_map_hasher::hash (const tm_new_mem_map *v)
4a8fb1a1
LC
1020{
1021 return (intptr_t)v->val >> 4;
1022}
1023
1024inline bool
526ceb68 1025tm_mem_map_hasher::equal (const tm_new_mem_map *v, const tm_new_mem_map *c)
4a8fb1a1
LC
1026{
1027 return v->val == c->val;
1028}
1029
1030/* Map for an SSA_NAME originally pointing to a non aliased new piece
1031 of memory (malloc, alloc, etc). */
c203e8a7 1032static hash_table<tm_mem_map_hasher> *tm_new_mem_hash;
4a8fb1a1 1033
0a35513e
AH
1034/* Initialize logging data structures. */
1035static void
1036tm_log_init (void)
1037{
c203e8a7
TS
1038 tm_log = new hash_table<log_entry_hasher> (10);
1039 tm_new_mem_hash = new hash_table<tm_mem_map_hasher> (5);
9771b263 1040 tm_log_save_addresses.create (5);
0a35513e
AH
1041}
1042
1043/* Free logging data structures. */
1044static void
1045tm_log_delete (void)
1046{
c203e8a7
TS
1047 delete tm_log;
1048 tm_log = NULL;
1049 delete tm_new_mem_hash;
1050 tm_new_mem_hash = NULL;
9771b263 1051 tm_log_save_addresses.release ();
0a35513e
AH
1052}
1053
1054/* Return true if MEM is a transaction invariant memory for the TM
1055 region starting at REGION_ENTRY_BLOCK. */
1056static bool
1057transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
1058{
1059 if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
1060 && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
1061 {
1062 basic_block def_bb;
1063
1064 def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
1065 return def_bb != region_entry_block
1066 && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
1067 }
1068
1069 mem = strip_invariant_refs (mem);
1070 return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
1071}
1072
1073/* Given an address ADDR in STMT, find it in the memory log or add it,
1074 making sure to keep only the addresses highest in the dominator
1075 tree.
1076
1077 ENTRY_BLOCK is the entry_block for the transaction.
1078
1079 If we find the address in the log, make sure it's either the same
1080 address, or an equivalent one that dominates ADDR.
1081
1082 If we find the address, but neither ADDR dominates the found
1083 address, nor the found one dominates ADDR, we're on different
1084 execution paths. Add it.
1085
1086 If known, ENTRY_BLOCK is the entry block for the region, otherwise
1087 NULL. */
1088static void
355fe088 1089tm_log_add (basic_block entry_block, tree addr, gimple *stmt)
0a35513e 1090{
4a8fb1a1 1091 tm_log_entry **slot;
0a35513e
AH
1092 struct tm_log_entry l, *lp;
1093
1094 l.addr = addr;
c203e8a7 1095 slot = tm_log->find_slot (&l, INSERT);
0a35513e
AH
1096 if (!*slot)
1097 {
1098 tree type = TREE_TYPE (addr);
1099
1100 lp = XNEW (struct tm_log_entry);
1101 lp->addr = addr;
1102 *slot = lp;
1103
1104 /* Small invariant addresses can be handled as save/restores. */
1105 if (entry_block
1106 && transaction_invariant_address_p (lp->addr, entry_block)
1107 && TYPE_SIZE_UNIT (type) != NULL
cc269bb6 1108 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
7d362f6c 1109 && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type))
028d4092 1110 < param_tm_max_aggregate_size)
0a35513e
AH
1111 /* We must be able to copy this type normally. I.e., no
1112 special constructors and the like. */
1113 && !TREE_ADDRESSABLE (type))
1114 {
2091795a 1115 lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
9771b263 1116 lp->stmts.create (0);
0a35513e
AH
1117 lp->entry_block = entry_block;
1118 /* Save addresses separately in dominator order so we don't
1119 get confused by overlapping addresses in the save/restore
1120 sequence. */
9771b263 1121 tm_log_save_addresses.safe_push (lp->addr);
0a35513e
AH
1122 }
1123 else
1124 {
1125 /* Use the logging functions. */
9771b263
DN
1126 lp->stmts.create (5);
1127 lp->stmts.quick_push (stmt);
0a35513e
AH
1128 lp->save_var = NULL;
1129 }
1130 }
1131 else
1132 {
1133 size_t i;
355fe088 1134 gimple *oldstmt;
0a35513e 1135
4a8fb1a1 1136 lp = *slot;
0a35513e
AH
1137
1138 /* If we're generating a save/restore sequence, we don't care
1139 about statements. */
1140 if (lp->save_var)
1141 return;
1142
9771b263 1143 for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i)
0a35513e
AH
1144 {
1145 if (stmt == oldstmt)
1146 return;
1147 /* We already have a store to the same address, higher up the
1148 dominator tree. Nothing to do. */
1149 if (dominated_by_p (CDI_DOMINATORS,
1150 gimple_bb (stmt), gimple_bb (oldstmt)))
1151 return;
1152 /* We should be processing blocks in dominator tree order. */
1153 gcc_assert (!dominated_by_p (CDI_DOMINATORS,
1154 gimple_bb (oldstmt), gimple_bb (stmt)));
1155 }
1156 /* Store is on a different code path. */
9771b263 1157 lp->stmts.safe_push (stmt);
0a35513e
AH
1158 }
1159}
1160
1161/* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
1162 result, insert the new statements before GSI. */
1163
1164static tree
1165gimplify_addr (gimple_stmt_iterator *gsi, tree x)
1166{
1167 if (TREE_CODE (x) == TARGET_MEM_REF)
1168 x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
1169 else
1170 x = build_fold_addr_expr (x);
1171 return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
1172}
1173
1174/* Instrument one address with the logging functions.
1175 ADDR is the address to save.
1176 STMT is the statement before which to place it. */
1177static void
355fe088 1178tm_log_emit_stmt (tree addr, gimple *stmt)
0a35513e
AH
1179{
1180 tree type = TREE_TYPE (addr);
0a35513e 1181 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
355fe088 1182 gimple *log;
0a35513e
AH
1183 enum built_in_function code = BUILT_IN_TM_LOG;
1184
1185 if (type == float_type_node)
1186 code = BUILT_IN_TM_LOG_FLOAT;
1187 else if (type == double_type_node)
1188 code = BUILT_IN_TM_LOG_DOUBLE;
1189 else if (type == long_double_type_node)
1190 code = BUILT_IN_TM_LOG_LDOUBLE;
4c868789
RH
1191 else if (TYPE_SIZE (type) != NULL
1192 && tree_fits_uhwi_p (TYPE_SIZE (type)))
0a35513e 1193 {
4c868789
RH
1194 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
1195
1196 if (TREE_CODE (type) == VECTOR_TYPE)
0a35513e 1197 {
4c868789 1198 switch (type_size)
0a35513e 1199 {
4c868789
RH
1200 case 64:
1201 code = BUILT_IN_TM_LOG_M64;
1202 break;
1203 case 128:
1204 code = BUILT_IN_TM_LOG_M128;
1205 break;
1206 case 256:
1207 code = BUILT_IN_TM_LOG_M256;
1208 break;
1209 default:
1210 goto unhandled_vec;
1211 }
1212 if (!builtin_decl_explicit_p (code))
1213 goto unhandled_vec;
1214 }
1215 else
1216 {
1217 unhandled_vec:
1218 switch (type_size)
1219 {
47c147b0 1220 case 8:
4c868789
RH
1221 code = BUILT_IN_TM_LOG_1;
1222 break;
47c147b0 1223 case 16:
4c868789
RH
1224 code = BUILT_IN_TM_LOG_2;
1225 break;
47c147b0 1226 case 32:
4c868789
RH
1227 code = BUILT_IN_TM_LOG_4;
1228 break;
47c147b0 1229 case 64:
4c868789
RH
1230 code = BUILT_IN_TM_LOG_8;
1231 break;
0a35513e 1232 }
0a35513e
AH
1233 }
1234 }
1235
4c868789
RH
1236 if (code != BUILT_IN_TM_LOG && !builtin_decl_explicit_p (code))
1237 code = BUILT_IN_TM_LOG;
1238 tree decl = builtin_decl_explicit (code);
1239
0a35513e
AH
1240 addr = gimplify_addr (&gsi, addr);
1241 if (code == BUILT_IN_TM_LOG)
4c868789 1242 log = gimple_build_call (decl, 2, addr, TYPE_SIZE_UNIT (type));
0a35513e 1243 else
4c868789 1244 log = gimple_build_call (decl, 1, addr);
0a35513e
AH
1245 gsi_insert_before (&gsi, log, GSI_SAME_STMT);
1246}
1247
1248/* Go through the log and instrument address that must be instrumented
1249 with the logging functions. Leave the save/restore addresses for
1250 later. */
1251static void
1252tm_log_emit (void)
1253{
c203e8a7 1254 hash_table<log_entry_hasher>::iterator hi;
0a35513e
AH
1255 struct tm_log_entry *lp;
1256
c203e8a7 1257 FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi)
0a35513e
AH
1258 {
1259 size_t i;
355fe088 1260 gimple *stmt;
0a35513e
AH
1261
1262 if (dump_file)
1263 {
1264 fprintf (dump_file, "TM thread private mem logging: ");
ef6cb4c7 1265 print_generic_expr (dump_file, lp->addr);
0a35513e
AH
1266 fprintf (dump_file, "\n");
1267 }
1268
1269 if (lp->save_var)
1270 {
1271 if (dump_file)
1272 fprintf (dump_file, "DUMPING to variable\n");
1273 continue;
1274 }
1275 else
1276 {
1277 if (dump_file)
1278 fprintf (dump_file, "DUMPING with logging functions\n");
9771b263 1279 for (i = 0; lp->stmts.iterate (i, &stmt); ++i)
0a35513e
AH
1280 tm_log_emit_stmt (lp->addr, stmt);
1281 }
1282 }
1283}
1284
1285/* Emit the save sequence for the corresponding addresses in the log.
1286 ENTRY_BLOCK is the entry block for the transaction.
1287 BB is the basic block to insert the code in. */
1288static void
1289tm_log_emit_saves (basic_block entry_block, basic_block bb)
1290{
1291 size_t i;
1292 gimple_stmt_iterator gsi = gsi_last_bb (bb);
355fe088 1293 gimple *stmt;
0a35513e
AH
1294 struct tm_log_entry l, *lp;
1295
9771b263 1296 for (i = 0; i < tm_log_save_addresses.length (); ++i)
0a35513e 1297 {
9771b263 1298 l.addr = tm_log_save_addresses[i];
c203e8a7 1299 lp = *(tm_log->find_slot (&l, NO_INSERT));
0a35513e
AH
1300 gcc_assert (lp->save_var != NULL);
1301
1302 /* We only care about variables in the current transaction. */
1303 if (lp->entry_block != entry_block)
1304 continue;
1305
1306 stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
1307
1308 /* Make sure we can create an SSA_NAME for this type. For
1309 instance, aggregates aren't allowed, in which case the system
1310 will create a VOP for us and everything will just work. */
1311 if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
1312 {
1313 lp->save_var = make_ssa_name (lp->save_var, stmt);
1314 gimple_assign_set_lhs (stmt, lp->save_var);
1315 }
1316
1317 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1318 }
1319}
1320
1321/* Emit the restore sequence for the corresponding addresses in the log.
1322 ENTRY_BLOCK is the entry block for the transaction.
1323 BB is the basic block to insert the code in. */
1324static void
1325tm_log_emit_restores (basic_block entry_block, basic_block bb)
1326{
1327 int i;
1328 struct tm_log_entry l, *lp;
1329 gimple_stmt_iterator gsi;
355fe088 1330 gimple *stmt;
0a35513e 1331
9771b263 1332 for (i = tm_log_save_addresses.length () - 1; i >= 0; i--)
0a35513e 1333 {
9771b263 1334 l.addr = tm_log_save_addresses[i];
c203e8a7 1335 lp = *(tm_log->find_slot (&l, NO_INSERT));
0a35513e
AH
1336 gcc_assert (lp->save_var != NULL);
1337
1338 /* We only care about variables in the current transaction. */
1339 if (lp->entry_block != entry_block)
1340 continue;
1341
1342 /* Restores are in LIFO order from the saves in case we have
1343 overlaps. */
1344 gsi = gsi_start_bb (bb);
1345
1346 stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
1347 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1348 }
1349}
1350
0a35513e
AH
1351\f
1352static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
1353 struct walk_stmt_info *);
1354static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
1355 struct walk_stmt_info *);
1356
1357/* Evaluate an address X being dereferenced and determine if it
1358 originally points to a non aliased new chunk of memory (malloc,
1359 alloca, etc).
1360
1361 Return MEM_THREAD_LOCAL if it points to a thread-local address.
1362 Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1363 Return MEM_NON_LOCAL otherwise.
1364
1365 ENTRY_BLOCK is the entry block to the transaction containing the
1366 dereference of X. */
1367static enum thread_memory_type
1368thread_private_new_memory (basic_block entry_block, tree x)
1369{
355fe088 1370 gimple *stmt = NULL;
0a35513e 1371 enum tree_code code;
526ceb68
TS
1372 tm_new_mem_map **slot;
1373 tm_new_mem_map elt, *elt_p;
0a35513e
AH
1374 tree val = x;
1375 enum thread_memory_type retval = mem_transaction_local;
1376
1377 if (!entry_block
1378 || TREE_CODE (x) != SSA_NAME
1379 /* Possible uninitialized use, or a function argument. In
1380 either case, we don't care. */
1381 || SSA_NAME_IS_DEFAULT_DEF (x))
1382 return mem_non_local;
1383
1384 /* Look in cache first. */
1385 elt.val = x;
c203e8a7 1386 slot = tm_new_mem_hash->find_slot (&elt, INSERT);
4a8fb1a1 1387 elt_p = *slot;
0a35513e
AH
1388 if (elt_p)
1389 return elt_p->local_new_memory;
1390
1391 /* Optimistically assume the memory is transaction local during
1392 processing. This catches recursion into this variable. */
526ceb68 1393 *slot = elt_p = XNEW (tm_new_mem_map);
0a35513e
AH
1394 elt_p->val = val;
1395 elt_p->local_new_memory = mem_transaction_local;
1396
1397 /* Search DEF chain to find the original definition of this address. */
1398 do
1399 {
1400 if (ptr_deref_may_alias_global_p (x))
1401 {
1402 /* Address escapes. This is not thread-private. */
1403 retval = mem_non_local;
1404 goto new_memory_ret;
1405 }
1406
1407 stmt = SSA_NAME_DEF_STMT (x);
1408
1409 /* If the malloc call is outside the transaction, this is
1410 thread-local. */
1411 if (retval != mem_thread_local
1412 && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
1413 retval = mem_thread_local;
1414
1415 if (is_gimple_assign (stmt))
1416 {
1417 code = gimple_assign_rhs_code (stmt);
1418 /* x = foo ==> foo */
1419 if (code == SSA_NAME)
1420 x = gimple_assign_rhs1 (stmt);
1421 /* x = foo + n ==> foo */
1422 else if (code == POINTER_PLUS_EXPR)
1423 x = gimple_assign_rhs1 (stmt);
1424 /* x = (cast*) foo ==> foo */
625a9766 1425 else if (code == VIEW_CONVERT_EXPR || CONVERT_EXPR_CODE_P (code))
0a35513e 1426 x = gimple_assign_rhs1 (stmt);
01a723fa
AP
1427 /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */
1428 else if (code == COND_EXPR)
1429 {
1430 tree op1 = gimple_assign_rhs2 (stmt);
1431 tree op2 = gimple_assign_rhs3 (stmt);
1432 enum thread_memory_type mem;
1433 retval = thread_private_new_memory (entry_block, op1);
1434 if (retval == mem_non_local)
1435 goto new_memory_ret;
1436 mem = thread_private_new_memory (entry_block, op2);
1437 retval = MIN (retval, mem);
1438 goto new_memory_ret;
1439 }
0a35513e
AH
1440 else
1441 {
1442 retval = mem_non_local;
1443 goto new_memory_ret;
1444 }
1445 }
1446 else
1447 {
1448 if (gimple_code (stmt) == GIMPLE_PHI)
1449 {
1450 unsigned int i;
1451 enum thread_memory_type mem;
1452 tree phi_result = gimple_phi_result (stmt);
1453
1454 /* If any of the ancestors are non-local, we are sure to
1455 be non-local. Otherwise we can avoid doing anything
1456 and inherit what has already been generated. */
1457 retval = mem_max;
1458 for (i = 0; i < gimple_phi_num_args (stmt); ++i)
1459 {
1460 tree op = PHI_ARG_DEF (stmt, i);
1461
1462 /* Exclude self-assignment. */
1463 if (phi_result == op)
1464 continue;
1465
1466 mem = thread_private_new_memory (entry_block, op);
1467 if (mem == mem_non_local)
1468 {
1469 retval = mem;
1470 goto new_memory_ret;
1471 }
1472 retval = MIN (retval, mem);
1473 }
1474 goto new_memory_ret;
1475 }
1476 break;
1477 }
1478 }
1479 while (TREE_CODE (x) == SSA_NAME);
1480
1481 if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
1482 /* Thread-local or transaction-local. */
1483 ;
1484 else
1485 retval = mem_non_local;
1486
1487 new_memory_ret:
1488 elt_p->local_new_memory = retval;
1489 return retval;
1490}
1491
1492/* Determine whether X has to be instrumented using a read
1493 or write barrier.
1494
1495 ENTRY_BLOCK is the entry block for the region where stmt resides
1496 in. NULL if unknown.
1497
1498 STMT is the statement in which X occurs in. It is used for thread
1499 private memory instrumentation. If no TPM instrumentation is
1500 desired, STMT should be null. */
1501static bool
355fe088 1502requires_barrier (basic_block entry_block, tree x, gimple *stmt)
0a35513e
AH
1503{
1504 tree orig = x;
1505 while (handled_component_p (x))
1506 x = TREE_OPERAND (x, 0);
1507
1508 switch (TREE_CODE (x))
1509 {
1510 case INDIRECT_REF:
1511 case MEM_REF:
1512 {
1513 enum thread_memory_type ret;
1514
1515 ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
1516 if (ret == mem_non_local)
1517 return true;
1518 if (stmt && ret == mem_thread_local)
1519 /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
1520 tm_log_add (entry_block, orig, stmt);
1521
1522 /* Transaction-locals require nothing at all. For malloc, a
1523 transaction restart frees the memory and we reallocate.
1524 For alloca, the stack pointer gets reset by the retry and
1525 we reallocate. */
1526 return false;
1527 }
1528
1529 case TARGET_MEM_REF:
1530 if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
1531 return true;
1532 x = TREE_OPERAND (TMR_BASE (x), 0);
1533 if (TREE_CODE (x) == PARM_DECL)
1534 return false;
8813a647 1535 gcc_assert (VAR_P (x));
0a35513e
AH
1536 /* FALLTHRU */
1537
1538 case PARM_DECL:
1539 case RESULT_DECL:
1540 case VAR_DECL:
1541 if (DECL_BY_REFERENCE (x))
1542 {
1543 /* ??? This value is a pointer, but aggregate_value_p has been
1544 jigged to return true which confuses needs_to_live_in_memory.
1545 This ought to be cleaned up generically.
1546
1547 FIXME: Verify this still happens after the next mainline
1548 merge. Testcase ie g++.dg/tm/pr47554.C.
1549 */
1550 return false;
1551 }
1552
1553 if (is_global_var (x))
3be18e47 1554 return !TREE_READONLY (x);
0a35513e
AH
1555 if (/* FIXME: This condition should actually go below in the
1556 tm_log_add() call, however is_call_clobbered() depends on
1557 aliasing info which is not available during
1558 gimplification. Since requires_barrier() gets called
1559 during lower_sequence_tm/gimplification, leave the call
1560 to needs_to_live_in_memory until we eliminate
1561 lower_sequence_tm altogether. */
63c0efdb 1562 needs_to_live_in_memory (x))
0a35513e 1563 return true;
3be18e47
RH
1564 else
1565 {
1566 /* For local memory that doesn't escape (aka thread private
1567 memory), we can either save the value at the beginning of
1568 the transaction and restore on restart, or call a tm
1569 function to dynamically save and restore on restart
1570 (ITM_L*). */
1571 if (stmt)
1572 tm_log_add (entry_block, orig, stmt);
1573 return false;
1574 }
0a35513e
AH
1575
1576 default:
1577 return false;
1578 }
1579}
1580
1581/* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1582 a transaction region. */
1583
1584static void
1585examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
1586{
355fe088 1587 gimple *stmt = gsi_stmt (*gsi);
0a35513e
AH
1588
1589 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
1590 *state |= GTMA_HAVE_LOAD;
1591 if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
1592 *state |= GTMA_HAVE_STORE;
1593}
1594
1595/* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
1596
1597static void
1598examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
1599{
355fe088 1600 gimple *stmt = gsi_stmt (*gsi);
0a35513e
AH
1601 tree fn;
1602
1603 if (is_tm_pure_call (stmt))
1604 return;
1605
1606 /* Check if this call is a transaction abort. */
1607 fn = gimple_call_fndecl (stmt);
1608 if (is_tm_abort (fn))
1609 *state |= GTMA_HAVE_ABORT;
1610
1611 /* Note that something may happen. */
1612 *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
1613}
1614
7c11b0fe
RH
1615/* Iterate through the statements in the sequence, moving labels
1616 (and thus edges) of transactions from "label_norm" to "label_uninst". */
1617
1618static tree
1619make_tm_uninst (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1620 struct walk_stmt_info *)
1621{
1622 gimple *stmt = gsi_stmt (*gsi);
1623
1624 if (gtransaction *txn = dyn_cast <gtransaction *> (stmt))
1625 {
1626 *handled_ops_p = true;
1627 txn->label_uninst = txn->label_norm;
1628 txn->label_norm = NULL;
1629 }
1630 else
1631 *handled_ops_p = !gimple_has_substatements (stmt);
1632
1633 return NULL_TREE;
1634}
1635
0a35513e
AH
1636/* Lower a GIMPLE_TRANSACTION statement. */
1637
1638static void
1639lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
1640{
355fe088 1641 gimple *g;
538dd0b7 1642 gtransaction *stmt = as_a <gtransaction *> (gsi_stmt (*gsi));
0a35513e
AH
1643 unsigned int *outer_state = (unsigned int *) wi->info;
1644 unsigned int this_state = 0;
1645 struct walk_stmt_info this_wi;
1646
1647 /* First, lower the body. The scanning that we do inside gives
1648 us some idea of what we're dealing with. */
1649 memset (&this_wi, 0, sizeof (this_wi));
1650 this_wi.info = (void *) &this_state;
355a7673
MM
1651 walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
1652 lower_sequence_tm, NULL, &this_wi);
0a35513e
AH
1653
1654 /* If there was absolutely nothing transaction related inside the
1655 transaction, we may elide it. Likewise if this is a nested
1656 transaction and does not contain an abort. */
1657 if (this_state == 0
1658 || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
1659 {
1660 if (outer_state)
1661 *outer_state |= this_state;
1662
1663 gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
1664 GSI_SAME_STMT);
1665 gimple_transaction_set_body (stmt, NULL);
1666
1667 gsi_remove (gsi, true);
1668 wi->removed_stmt = true;
1669 return;
1670 }
1671
1672 /* Wrap the body of the transaction in a try-finally node so that
1673 the commit call is always properly called. */
1674 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
1675 if (flag_exceptions)
1676 {
1677 tree ptr;
1678 gimple_seq n_seq, e_seq;
1679
1680 n_seq = gimple_seq_alloc_with_stmt (g);
355a7673 1681 e_seq = NULL;
0a35513e
AH
1682
1683 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1684 1, integer_zero_node);
b731b390 1685 ptr = create_tmp_var (ptr_type_node);
0a35513e
AH
1686 gimple_call_set_lhs (g, ptr);
1687 gimple_seq_add_stmt (&e_seq, g);
1688
1689 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
1690 1, ptr);
1691 gimple_seq_add_stmt (&e_seq, g);
1692
1693 g = gimple_build_eh_else (n_seq, e_seq);
1694 }
1695
1696 g = gimple_build_try (gimple_transaction_body (stmt),
1697 gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
0a35513e 1698
7c11b0fe
RH
1699 /* For a (potentially) outer transaction, create two paths. */
1700 gimple_seq uninst = NULL;
1701 if (outer_state == NULL)
1702 {
1703 uninst = copy_gimple_seq_and_replace_locals (g);
1704 /* In the uninstrumented copy, reset inner transactions to have only
1705 an uninstrumented code path. */
1706 memset (&this_wi, 0, sizeof (this_wi));
1707 walk_gimple_seq (uninst, make_tm_uninst, NULL, &this_wi);
1708 }
1709
1710 tree label1 = create_artificial_label (UNKNOWN_LOCATION);
1711 gsi_insert_after (gsi, gimple_build_label (label1), GSI_CONTINUE_LINKING);
1712 gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
1713 gimple_transaction_set_label_norm (stmt, label1);
0a35513e
AH
1714
1715 /* If the transaction calls abort or if this is an outer transaction,
1716 add an "over" label afterwards. */
7c11b0fe
RH
1717 tree label3 = NULL;
1718 if ((this_state & GTMA_HAVE_ABORT)
1719 || outer_state == NULL
c3284718 1720 || (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER))
0a35513e 1721 {
7c11b0fe
RH
1722 label3 = create_artificial_label (UNKNOWN_LOCATION);
1723 gimple_transaction_set_label_over (stmt, label3);
0a35513e
AH
1724 }
1725
7c11b0fe
RH
1726 if (uninst != NULL)
1727 {
1728 gsi_insert_after (gsi, gimple_build_goto (label3), GSI_CONTINUE_LINKING);
1729
1730 tree label2 = create_artificial_label (UNKNOWN_LOCATION);
1731 gsi_insert_after (gsi, gimple_build_label (label2), GSI_CONTINUE_LINKING);
1732 gsi_insert_seq_after (gsi, uninst, GSI_CONTINUE_LINKING);
1733 gimple_transaction_set_label_uninst (stmt, label2);
1734 }
1735
1736 if (label3 != NULL)
1737 gsi_insert_after (gsi, gimple_build_label (label3), GSI_CONTINUE_LINKING);
1738
1739 gimple_transaction_set_body (stmt, NULL);
1740
0a35513e
AH
1741 /* Record the set of operations found for use later. */
1742 this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
1743 gimple_transaction_set_subcode (stmt, this_state);
1744}
1745
1746/* Iterate through the statements in the sequence, lowering them all
1747 as appropriate for being in a transaction. */
1748
1749static tree
1750lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1751 struct walk_stmt_info *wi)
1752{
1753 unsigned int *state = (unsigned int *) wi->info;
355fe088 1754 gimple *stmt = gsi_stmt (*gsi);
0a35513e
AH
1755
1756 *handled_ops_p = true;
1757 switch (gimple_code (stmt))
1758 {
1759 case GIMPLE_ASSIGN:
1760 /* Only memory reads/writes need to be instrumented. */
1761 if (gimple_assign_single_p (stmt))
1762 examine_assign_tm (state, gsi);
1763 break;
1764
1765 case GIMPLE_CALL:
1766 examine_call_tm (state, gsi);
1767 break;
1768
1769 case GIMPLE_ASM:
1770 *state |= GTMA_MAY_ENTER_IRREVOCABLE;
1771 break;
1772
1773 case GIMPLE_TRANSACTION:
1774 lower_transaction (gsi, wi);
1775 break;
1776
1777 default:
1778 *handled_ops_p = !gimple_has_substatements (stmt);
1779 break;
1780 }
1781
1782 return NULL_TREE;
1783}
1784
1785/* Iterate through the statements in the sequence, lowering them all
1786 as appropriate for being outside of a transaction. */
1787
1788static tree
1789lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1790 struct walk_stmt_info * wi)
1791{
355fe088 1792 gimple *stmt = gsi_stmt (*gsi);
0a35513e
AH
1793
1794 if (gimple_code (stmt) == GIMPLE_TRANSACTION)
1795 {
1796 *handled_ops_p = true;
1797 lower_transaction (gsi, wi);
1798 }
1799 else
1800 *handled_ops_p = !gimple_has_substatements (stmt);
1801
1802 return NULL_TREE;
1803}
1804
1805/* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
1806 this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1807 been moved out, and all the data required for constructing a proper
1808 CFG has been recorded. */
1809
1810static unsigned int
1811execute_lower_tm (void)
1812{
1813 struct walk_stmt_info wi;
355a7673 1814 gimple_seq body;
0a35513e
AH
1815
1816 /* Transactional clones aren't created until a later pass. */
1817 gcc_assert (!decl_is_tm_clone (current_function_decl));
1818
355a7673 1819 body = gimple_body (current_function_decl);
0a35513e 1820 memset (&wi, 0, sizeof (wi));
355a7673
MM
1821 walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi);
1822 gimple_set_body (current_function_decl, body);
0a35513e
AH
1823
1824 return 0;
1825}
1826
17795822
TS
1827namespace {
1828
1829const pass_data pass_data_lower_tm =
27a4cd48
DM
1830{
1831 GIMPLE_PASS, /* type */
1832 "tmlower", /* name */
1833 OPTGROUP_NONE, /* optinfo_flags */
27a4cd48
DM
1834 TV_TRANS_MEM, /* tv_id */
1835 PROP_gimple_lcf, /* properties_required */
1836 0, /* properties_provided */
1837 0, /* properties_destroyed */
1838 0, /* todo_flags_start */
1839 0, /* todo_flags_finish */
0a35513e 1840};
27a4cd48 1841
17795822 1842class pass_lower_tm : public gimple_opt_pass
27a4cd48
DM
1843{
1844public:
c3284718
RS
1845 pass_lower_tm (gcc::context *ctxt)
1846 : gimple_opt_pass (pass_data_lower_tm, ctxt)
27a4cd48
DM
1847 {}
1848
1849 /* opt_pass methods: */
1a3d085c 1850 virtual bool gate (function *) { return flag_tm; }
be55bfe6 1851 virtual unsigned int execute (function *) { return execute_lower_tm (); }
27a4cd48
DM
1852
1853}; // class pass_lower_tm
1854
17795822
TS
1855} // anon namespace
1856
27a4cd48
DM
1857gimple_opt_pass *
1858make_pass_lower_tm (gcc::context *ctxt)
1859{
1860 return new pass_lower_tm (ctxt);
1861}
0a35513e
AH
1862\f
1863/* Collect region information for each transaction. */
1864
1865struct tm_region
1866{
538dd0b7
DM
1867public:
1868
1869 /* The field "transaction_stmt" is initially a gtransaction *,
1870 but eventually gets lowered to a gcall *(to BUILT_IN_TM_START).
1871
1872 Helper method to get it as a gtransaction *, with code-checking
1873 in a checked-build. */
1874
1875 gtransaction *
1876 get_transaction_stmt () const
1877 {
1878 return as_a <gtransaction *> (transaction_stmt);
1879 }
1880
1881public:
1882
0a35513e
AH
1883 /* Link to the next unnested transaction. */
1884 struct tm_region *next;
1885
1886 /* Link to the next inner transaction. */
1887 struct tm_region *inner;
1888
1889 /* Link to the next outer transaction. */
1890 struct tm_region *outer;
1891
398b1daa
AH
1892 /* The GIMPLE_TRANSACTION statement beginning this transaction.
1893 After TM_MARK, this gets replaced by a call to
538dd0b7
DM
1894 BUILT_IN_TM_START.
1895 Hence this will be either a gtransaction *or a gcall *. */
355fe088 1896 gimple *transaction_stmt;
0a35513e 1897
398b1daa
AH
1898 /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to
1899 BUILT_IN_TM_START, this field is true if the transaction is an
1900 outer transaction. */
1901 bool original_transaction_was_outer;
1902
1903 /* Return value from BUILT_IN_TM_START. */
1904 tree tm_state;
1905
1906 /* The entry block to this region. This will always be the first
1907 block of the body of the transaction. */
0a35513e
AH
1908 basic_block entry_block;
1909
398b1daa
AH
1910 /* The first block after an expanded call to _ITM_beginTransaction. */
1911 basic_block restart_block;
1912
0a35513e
AH
1913 /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1914 These blocks are still a part of the region (i.e., the border is
1915 inclusive). Note that this set is only complete for paths in the CFG
1916 starting at ENTRY_BLOCK, and that there is no exit block recorded for
1917 the edge to the "over" label. */
1918 bitmap exit_blocks;
1919
1920 /* The set of all blocks that have an TM_IRREVOCABLE call. */
1921 bitmap irr_blocks;
1922};
1923
1924/* True if there are pending edge statements to be committed for the
1925 current function being scanned in the tmmark pass. */
1926bool pending_edge_inserts_p;
1927
1928static struct tm_region *all_tm_regions;
1929static bitmap_obstack tm_obstack;
1930
1931
073a8998 1932/* A subroutine of tm_region_init. Record the existence of the
0a35513e
AH
1933 GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
1934
1935static struct tm_region *
538dd0b7
DM
1936tm_region_init_0 (struct tm_region *outer, basic_block bb,
1937 gtransaction *stmt)
0a35513e
AH
1938{
1939 struct tm_region *region;
1940
1941 region = (struct tm_region *)
1942 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1943
1944 if (outer)
1945 {
1946 region->next = outer->inner;
1947 outer->inner = region;
1948 }
1949 else
1950 {
1951 region->next = all_tm_regions;
1952 all_tm_regions = region;
1953 }
1954 region->inner = NULL;
1955 region->outer = outer;
1956
1957 region->transaction_stmt = stmt;
398b1daa
AH
1958 region->original_transaction_was_outer = false;
1959 region->tm_state = NULL;
0a35513e
AH
1960
1961 /* There are either one or two edges out of the block containing
1962 the GIMPLE_TRANSACTION, one to the actual region and one to the
1963 "over" label if the region contains an abort. The former will
1964 always be the one marked FALLTHRU. */
1965 region->entry_block = FALLTHRU_EDGE (bb)->dest;
1966
1967 region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
1968 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1969
1970 return region;
1971}
1972
1973/* A subroutine of tm_region_init. Record all the exit and
1974 irrevocable blocks in BB into the region's exit_blocks and
1975 irr_blocks bitmaps. Returns the new region being scanned. */
1976
1977static struct tm_region *
1978tm_region_init_1 (struct tm_region *region, basic_block bb)
1979{
1980 gimple_stmt_iterator gsi;
355fe088 1981 gimple *g;
0a35513e
AH
1982
1983 if (!region
1984 || (!region->irr_blocks && !region->exit_blocks))
1985 return region;
1986
1987 /* Check to see if this is the end of a region by seeing if it
1988 contains a call to __builtin_tm_commit{,_eh}. Note that the
1989 outermost region for DECL_IS_TM_CLONE need not collect this. */
1990 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
1991 {
1992 g = gsi_stmt (gsi);
1993 if (gimple_code (g) == GIMPLE_CALL)
1994 {
1995 tree fn = gimple_call_fndecl (g);
3d78e008 1996 if (fn && fndecl_built_in_p (fn, BUILT_IN_NORMAL))
0a35513e
AH
1997 {
1998 if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
1999 || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
2000 && region->exit_blocks)
2001 {
2002 bitmap_set_bit (region->exit_blocks, bb->index);
2003 region = region->outer;
2004 break;
2005 }
2006 if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
2007 bitmap_set_bit (region->irr_blocks, bb->index);
2008 }
2009 }
2010 }
2011 return region;
2012}
2013
2014/* Collect all of the transaction regions within the current function
2015 and record them in ALL_TM_REGIONS. The REGION parameter may specify
2016 an "outermost" region for use by tm clones. */
2017
2018static void
2019tm_region_init (struct tm_region *region)
2020{
355fe088 2021 gimple *g;
0a35513e
AH
2022 edge_iterator ei;
2023 edge e;
2024 basic_block bb;
ef062b13 2025 auto_vec<basic_block> queue;
0a35513e
AH
2026 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2027 struct tm_region *old_region;
526ceb68 2028 auto_vec<tm_region *> bb_regions;
0a35513e 2029
19c0d7df
AH
2030 /* We could store this information in bb->aux, but we may get called
2031 through get_all_tm_blocks() from another pass that may be already
2032 using bb->aux. */
8b1c6fd7 2033 bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun));
19c0d7df 2034
ffc445f4
RH
2035 all_tm_regions = region;
2036 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
9771b263 2037 queue.safe_push (bb);
ffc445f4 2038 bitmap_set_bit (visited_blocks, bb->index);
9771b263 2039 bb_regions[bb->index] = region;
ffc445f4 2040
0a35513e
AH
2041 do
2042 {
9771b263
DN
2043 bb = queue.pop ();
2044 region = bb_regions[bb->index];
2045 bb_regions[bb->index] = NULL;
0a35513e
AH
2046
2047 /* Record exit and irrevocable blocks. */
2048 region = tm_region_init_1 (region, bb);
2049
2050 /* Check for the last statement in the block beginning a new region. */
2051 g = last_stmt (bb);
2052 old_region = region;
538dd0b7
DM
2053 if (g)
2054 if (gtransaction *trans_stmt = dyn_cast <gtransaction *> (g))
2055 region = tm_region_init_0 (region, bb, trans_stmt);
0a35513e
AH
2056
2057 /* Process subsequent blocks. */
2058 FOR_EACH_EDGE (e, ei, bb->succs)
2059 if (!bitmap_bit_p (visited_blocks, e->dest->index))
2060 {
2061 bitmap_set_bit (visited_blocks, e->dest->index);
9771b263 2062 queue.safe_push (e->dest);
0a35513e
AH
2063
2064 /* If the current block started a new region, make sure that only
2065 the entry block of the new region is associated with this region.
2066 Other successors are still part of the old region. */
2067 if (old_region != region && e->dest != region->entry_block)
9771b263 2068 bb_regions[e->dest->index] = old_region;
0a35513e 2069 else
9771b263 2070 bb_regions[e->dest->index] = region;
0a35513e
AH
2071 }
2072 }
9771b263 2073 while (!queue.is_empty ());
0a35513e
AH
2074 BITMAP_FREE (visited_blocks);
2075}
2076
2077/* The "gate" function for all transactional memory expansion and optimization
2078 passes. We collect region information for each top-level transaction, and
2079 if we don't find any, we skip all of the TM passes. Each region will have
2080 all of the exit blocks recorded, and the originating statement. */
2081
2082static bool
2083gate_tm_init (void)
2084{
2085 if (!flag_tm)
2086 return false;
2087
2088 calculate_dominance_info (CDI_DOMINATORS);
2089 bitmap_obstack_initialize (&tm_obstack);
2090
2091 /* If the function is a TM_CLONE, then the entire function is the region. */
2092 if (decl_is_tm_clone (current_function_decl))
2093 {
2094 struct tm_region *region = (struct tm_region *)
2095 obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
2096 memset (region, 0, sizeof (*region));
fefa31b5 2097 region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
0a35513e
AH
2098 /* For a clone, the entire function is the region. But even if
2099 we don't need to record any exit blocks, we may need to
2100 record irrevocable blocks. */
2101 region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
2102
2103 tm_region_init (region);
2104 }
2105 else
2106 {
2107 tm_region_init (NULL);
2108
2109 /* If we didn't find any regions, cleanup and skip the whole tree
2110 of tm-related optimizations. */
2111 if (all_tm_regions == NULL)
2112 {
2113 bitmap_obstack_release (&tm_obstack);
2114 return false;
2115 }
2116 }
2117
2118 return true;
2119}
2120
17795822
TS
2121namespace {
2122
2123const pass_data pass_data_tm_init =
27a4cd48
DM
2124{
2125 GIMPLE_PASS, /* type */
2126 "*tminit", /* name */
2127 OPTGROUP_NONE, /* optinfo_flags */
27a4cd48
DM
2128 TV_TRANS_MEM, /* tv_id */
2129 ( PROP_ssa | PROP_cfg ), /* properties_required */
2130 0, /* properties_provided */
2131 0, /* properties_destroyed */
2132 0, /* todo_flags_start */
2133 0, /* todo_flags_finish */
0a35513e 2134};
27a4cd48 2135
17795822 2136class pass_tm_init : public gimple_opt_pass
27a4cd48
DM
2137{
2138public:
c3284718
RS
2139 pass_tm_init (gcc::context *ctxt)
2140 : gimple_opt_pass (pass_data_tm_init, ctxt)
27a4cd48
DM
2141 {}
2142
2143 /* opt_pass methods: */
1a3d085c 2144 virtual bool gate (function *) { return gate_tm_init (); }
27a4cd48
DM
2145
2146}; // class pass_tm_init
2147
17795822
TS
2148} // anon namespace
2149
27a4cd48
DM
2150gimple_opt_pass *
2151make_pass_tm_init (gcc::context *ctxt)
2152{
2153 return new pass_tm_init (ctxt);
2154}
0a35513e
AH
2155\f
2156/* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
2157 represented by STATE. */
2158
2159static inline void
2160transaction_subcode_ior (struct tm_region *region, unsigned flags)
2161{
2162 if (region && region->transaction_stmt)
2163 {
538dd0b7
DM
2164 gtransaction *transaction_stmt = region->get_transaction_stmt ();
2165 flags |= gimple_transaction_subcode (transaction_stmt);
2166 gimple_transaction_set_subcode (transaction_stmt, flags);
0a35513e
AH
2167 }
2168}
2169
2170/* Construct a memory load in a transactional context. Return the
2171 gimple statement performing the load, or NULL if there is no
2172 TM_LOAD builtin of the appropriate size to do the load.
2173
2174 LOC is the location to use for the new statement(s). */
2175
538dd0b7 2176static gcall *
0a35513e
AH
2177build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2178{
4c868789 2179 tree t, type = TREE_TYPE (rhs);
538dd0b7 2180 gcall *gcall;
0a35513e 2181
4c868789 2182 built_in_function code;
0a35513e
AH
2183 if (type == float_type_node)
2184 code = BUILT_IN_TM_LOAD_FLOAT;
2185 else if (type == double_type_node)
2186 code = BUILT_IN_TM_LOAD_DOUBLE;
2187 else if (type == long_double_type_node)
2188 code = BUILT_IN_TM_LOAD_LDOUBLE;
4c868789 2189 else
0a35513e 2190 {
4c868789
RH
2191 if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type)))
2192 return NULL;
2193 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
2194
2195 if (TREE_CODE (type) == VECTOR_TYPE)
0a35513e 2196 {
4c868789
RH
2197 switch (type_size)
2198 {
2199 case 64:
2200 code = BUILT_IN_TM_LOAD_M64;
2201 break;
2202 case 128:
2203 code = BUILT_IN_TM_LOAD_M128;
2204 break;
2205 case 256:
2206 code = BUILT_IN_TM_LOAD_M256;
2207 break;
2208 default:
2209 goto unhandled_vec;
2210 }
2211 if (!builtin_decl_explicit_p (code))
2212 goto unhandled_vec;
2213 }
2214 else
2215 {
2216 unhandled_vec:
2217 switch (type_size)
2218 {
2219 case 8:
2220 code = BUILT_IN_TM_LOAD_1;
2221 break;
2222 case 16:
2223 code = BUILT_IN_TM_LOAD_2;
2224 break;
2225 case 32:
2226 code = BUILT_IN_TM_LOAD_4;
2227 break;
2228 case 64:
2229 code = BUILT_IN_TM_LOAD_8;
2230 break;
2231 default:
2232 return NULL;
2233 }
0a35513e
AH
2234 }
2235 }
2236
4c868789
RH
2237 tree decl = builtin_decl_explicit (code);
2238 gcc_assert (decl);
0a35513e
AH
2239
2240 t = gimplify_addr (gsi, rhs);
2241 gcall = gimple_build_call (decl, 1, t);
2242 gimple_set_location (gcall, loc);
2243
2244 t = TREE_TYPE (TREE_TYPE (decl));
2245 if (useless_type_conversion_p (type, t))
2246 {
2247 gimple_call_set_lhs (gcall, lhs);
2248 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2249 }
2250 else
2251 {
355fe088 2252 gimple *g;
0a35513e
AH
2253 tree temp;
2254
b731b390 2255 temp = create_tmp_reg (t);
0a35513e
AH
2256 gimple_call_set_lhs (gcall, temp);
2257 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2258
2259 t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
2260 g = gimple_build_assign (lhs, t);
2261 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2262 }
2263
2264 return gcall;
2265}
2266
2267
2268/* Similarly for storing TYPE in a transactional context. */
2269
538dd0b7 2270static gcall *
0a35513e
AH
2271build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2272{
0a35513e 2273 tree t, fn, type = TREE_TYPE (rhs), simple_type;
538dd0b7 2274 gcall *gcall;
0a35513e 2275
4c868789 2276 built_in_function code;
0a35513e
AH
2277 if (type == float_type_node)
2278 code = BUILT_IN_TM_STORE_FLOAT;
2279 else if (type == double_type_node)
2280 code = BUILT_IN_TM_STORE_DOUBLE;
2281 else if (type == long_double_type_node)
2282 code = BUILT_IN_TM_STORE_LDOUBLE;
4c868789 2283 else
0a35513e 2284 {
4c868789
RH
2285 if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type)))
2286 return NULL;
2287 unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type));
2288
2289 if (TREE_CODE (type) == VECTOR_TYPE)
0a35513e 2290 {
4c868789
RH
2291 switch (type_size)
2292 {
2293 case 64:
2294 code = BUILT_IN_TM_STORE_M64;
2295 break;
2296 case 128:
2297 code = BUILT_IN_TM_STORE_M128;
2298 break;
2299 case 256:
2300 code = BUILT_IN_TM_STORE_M256;
2301 break;
2302 default:
2303 goto unhandled_vec;
2304 }
2305 if (!builtin_decl_explicit_p (code))
2306 goto unhandled_vec;
2307 }
2308 else
2309 {
2310 unhandled_vec:
2311 switch (type_size)
2312 {
2313 case 8:
2314 code = BUILT_IN_TM_STORE_1;
2315 break;
2316 case 16:
2317 code = BUILT_IN_TM_STORE_2;
2318 break;
2319 case 32:
2320 code = BUILT_IN_TM_STORE_4;
2321 break;
2322 case 64:
2323 code = BUILT_IN_TM_STORE_8;
2324 break;
2325 default:
2326 return NULL;
2327 }
0a35513e
AH
2328 }
2329 }
2330
4c868789
RH
2331 fn = builtin_decl_explicit (code);
2332 gcc_assert (fn);
0a35513e
AH
2333
2334 simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
2335
2336 if (TREE_CODE (rhs) == CONSTRUCTOR)
2337 {
2338 /* Handle the easy initialization to zero. */
9771b263 2339 if (!CONSTRUCTOR_ELTS (rhs))
0a35513e
AH
2340 rhs = build_int_cst (simple_type, 0);
2341 else
2342 {
2343 /* ...otherwise punt to the caller and probably use
2344 BUILT_IN_TM_MEMMOVE, because we can't wrap a
2345 VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2346 valid gimple. */
2347 return NULL;
2348 }
2349 }
2350 else if (!useless_type_conversion_p (simple_type, type))
2351 {
355fe088 2352 gimple *g;
0a35513e
AH
2353 tree temp;
2354
b731b390 2355 temp = create_tmp_reg (simple_type);
0a35513e
AH
2356 t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
2357 g = gimple_build_assign (temp, t);
2358 gimple_set_location (g, loc);
2359 gsi_insert_before (gsi, g, GSI_SAME_STMT);
2360
2361 rhs = temp;
2362 }
2363
2364 t = gimplify_addr (gsi, lhs);
2365 gcall = gimple_build_call (fn, 2, t, rhs);
2366 gimple_set_location (gcall, loc);
2367 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2368
2369 return gcall;
2370}
2371
2372
2373/* Expand an assignment statement into transactional builtins. */
2374
2375static void
2376expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
2377{
355fe088 2378 gimple *stmt = gsi_stmt (*gsi);
0a35513e
AH
2379 location_t loc = gimple_location (stmt);
2380 tree lhs = gimple_assign_lhs (stmt);
2381 tree rhs = gimple_assign_rhs1 (stmt);
2382 bool store_p = requires_barrier (region->entry_block, lhs, NULL);
2383 bool load_p = requires_barrier (region->entry_block, rhs, NULL);
355fe088 2384 gimple *gcall = NULL;
0a35513e
AH
2385
2386 if (!load_p && !store_p)
2387 {
2388 /* Add thread private addresses to log if applicable. */
2389 requires_barrier (region->entry_block, lhs, stmt);
2390 gsi_next (gsi);
2391 return;
2392 }
2393
4c868789
RH
2394 if (load_p)
2395 transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2396 if (store_p)
2397 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2398
398b1daa 2399 // Remove original load/store statement.
0a35513e
AH
2400 gsi_remove (gsi, true);
2401
4c868789 2402 // Attempt to use a simple load/store helper function.
0a35513e 2403 if (load_p && !store_p)
4c868789 2404 gcall = build_tm_load (loc, lhs, rhs, gsi);
0a35513e 2405 else if (store_p && !load_p)
4c868789
RH
2406 gcall = build_tm_store (loc, lhs, rhs, gsi);
2407
2408 // If gcall has not been set, then we do not have a simple helper
2409 // function available for the type. This may be true of larger
2410 // structures, vectors, and non-standard float types.
0a35513e
AH
2411 if (!gcall)
2412 {
4c868789 2413 tree lhs_addr, rhs_addr, ltmp = NULL, copy_fn;
0a35513e 2414
4c868789
RH
2415 // If this is a type that we couldn't handle above, but it's
2416 // in a register, we must spill it to memory for the copy.
2417 if (is_gimple_reg (lhs))
2418 {
2419 ltmp = create_tmp_var (TREE_TYPE (lhs));
2420 lhs_addr = build_fold_addr_expr (ltmp);
2421 }
2422 else
2423 lhs_addr = gimplify_addr (gsi, lhs);
2424 if (is_gimple_reg (rhs))
2425 {
2426 tree rtmp = create_tmp_var (TREE_TYPE (rhs));
2427 rhs_addr = build_fold_addr_expr (rtmp);
2428 gcall = gimple_build_assign (rtmp, rhs);
2429 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2430 }
2431 else
2432 rhs_addr = gimplify_addr (gsi, rhs);
713b8dfb 2433
4c868789
RH
2434 // Choose the appropriate memory transfer function.
2435 if (load_p && store_p)
713b8dfb 2436 {
4c868789
RH
2437 // ??? Figure out if there's any possible overlap between
2438 // the LHS and the RHS and if not, use MEMCPY.
2439 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
2440 }
2441 else if (load_p)
2442 {
2443 // Note that the store is non-transactional and cannot overlap.
2444 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RTWN);
713b8dfb
AH
2445 }
2446 else
2447 {
4c868789
RH
2448 // Note that the load is non-transactional and cannot overlap.
2449 copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RNWT);
713b8dfb 2450 }
4c868789
RH
2451
2452 gcall = gimple_build_call (copy_fn, 3, lhs_addr, rhs_addr,
0a35513e
AH
2453 TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
2454 gimple_set_location (gcall, loc);
2455 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
713b8dfb 2456
4c868789 2457 if (ltmp)
713b8dfb 2458 {
4c868789 2459 gcall = gimple_build_assign (lhs, ltmp);
713b8dfb
AH
2460 gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2461 }
0a35513e
AH
2462 }
2463
4c868789
RH
2464 // Now that we have the load/store in its instrumented form, add
2465 // thread private addresses to the log if applicable.
0a35513e
AH
2466 if (!store_p)
2467 requires_barrier (region->entry_block, lhs, gcall);
0a35513e
AH
2468}
2469
2470
2471/* Expand a call statement as appropriate for a transaction. That is,
2472 either verify that the call does not affect the transaction, or
2473 redirect the call to a clone that handles transactions, or change
2474 the transaction state to IRREVOCABLE. Return true if the call is
2475 one of the builtins that end a transaction. */
2476
2477static bool
2478expand_call_tm (struct tm_region *region,
2479 gimple_stmt_iterator *gsi)
2480{
538dd0b7 2481 gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi));
0a35513e
AH
2482 tree lhs = gimple_call_lhs (stmt);
2483 tree fn_decl;
2484 struct cgraph_node *node;
2485 bool retval = false;
2486
2487 fn_decl = gimple_call_fndecl (stmt);
2488
2489 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
2490 || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
2491 transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
2492 if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
2493 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2494
2495 if (is_tm_pure_call (stmt))
2496 return false;
2497
2498 if (fn_decl)
2499 retval = is_tm_ending_fndecl (fn_decl);
2500 if (!retval)
2501 {
2502 /* Assume all non-const/pure calls write to memory, except
2503 transaction ending builtins. */
2504 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2505 }
2506
2507 /* For indirect calls, we already generated a call into the runtime. */
2508 if (!fn_decl)
2509 {
2510 tree fn = gimple_call_fn (stmt);
2511
2512 /* We are guaranteed never to go irrevocable on a safe or pure
2513 call, and the pure call was handled above. */
2514 if (is_tm_safe (fn))
2515 return false;
2516 else
2517 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2518
2519 return false;
2520 }
2521
d52f5295 2522 node = cgraph_node::get (fn_decl);
91cad09b
AH
2523 /* All calls should have cgraph here. */
2524 if (!node)
2525 {
2526 /* We can have a nodeless call here if some pass after IPA-tm
2527 added uninstrumented calls. For example, loop distribution
2528 can transform certain loop constructs into __builtin_mem*
2529 calls. In this case, see if we have a suitable TM
2530 replacement and fill in the gaps. */
2531 gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL);
2532 enum built_in_function code = DECL_FUNCTION_CODE (fn_decl);
2533 gcc_assert (code == BUILT_IN_MEMCPY
2534 || code == BUILT_IN_MEMMOVE
2535 || code == BUILT_IN_MEMSET);
2536
2537 tree repl = find_tm_replacement_function (fn_decl);
2538 if (repl)
2539 {
2540 gimple_call_set_fndecl (stmt, repl);
2541 update_stmt (stmt);
d52f5295 2542 node = cgraph_node::create (repl);
87f94429 2543 node->tm_may_enter_irr = false;
91cad09b
AH
2544 return expand_call_tm (region, gsi);
2545 }
2546 gcc_unreachable ();
2547 }
87f94429 2548 if (node->tm_may_enter_irr)
0a35513e
AH
2549 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2550
2551 if (is_tm_abort (fn_decl))
2552 {
2553 transaction_subcode_ior (region, GTMA_HAVE_ABORT);
2554 return true;
2555 }
2556
2557 /* Instrument the store if needed.
2558
2559 If the assignment happens inside the function call (return slot
2560 optimization), there is no instrumentation to be done, since
2561 the callee should have done the right thing. */
2562 if (lhs && requires_barrier (region->entry_block, lhs, stmt)
2563 && !gimple_call_return_slot_opt_p (stmt))
2564 {
b731b390 2565 tree tmp = create_tmp_reg (TREE_TYPE (lhs));
0a35513e
AH
2566 location_t loc = gimple_location (stmt);
2567 edge fallthru_edge = NULL;
538dd0b7 2568 gassign *assign_stmt;
0a35513e
AH
2569
2570 /* Remember if the call was going to throw. */
36bbc05d 2571 if (stmt_can_throw_internal (cfun, stmt))
0a35513e
AH
2572 {
2573 edge_iterator ei;
2574 edge e;
2575 basic_block bb = gimple_bb (stmt);
2576
2577 FOR_EACH_EDGE (e, ei, bb->succs)
2578 if (e->flags & EDGE_FALLTHRU)
2579 {
2580 fallthru_edge = e;
2581 break;
2582 }
2583 }
2584
2585 gimple_call_set_lhs (stmt, tmp);
2586 update_stmt (stmt);
538dd0b7
DM
2587 assign_stmt = gimple_build_assign (lhs, tmp);
2588 gimple_set_location (assign_stmt, loc);
0a35513e
AH
2589
2590 /* We cannot throw in the middle of a BB. If the call was going
2591 to throw, place the instrumentation on the fallthru edge, so
2592 the call remains the last statement in the block. */
2593 if (fallthru_edge)
2594 {
538dd0b7 2595 gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (assign_stmt);
0a35513e
AH
2596 gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
2597 expand_assign_tm (region, &fallthru_gsi);
2598 gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
2599 pending_edge_inserts_p = true;
2600 }
2601 else
2602 {
538dd0b7 2603 gsi_insert_after (gsi, assign_stmt, GSI_CONTINUE_LINKING);
0a35513e
AH
2604 expand_assign_tm (region, gsi);
2605 }
2606
2607 transaction_subcode_ior (region, GTMA_HAVE_STORE);
2608 }
2609
2610 return retval;
2611}
2612
2613
2614/* Expand all statements in BB as appropriate for being inside
2615 a transaction. */
2616
2617static void
2618expand_block_tm (struct tm_region *region, basic_block bb)
2619{
2620 gimple_stmt_iterator gsi;
2621
2622 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2623 {
355fe088 2624 gimple *stmt = gsi_stmt (gsi);
0a35513e
AH
2625 switch (gimple_code (stmt))
2626 {
2627 case GIMPLE_ASSIGN:
2628 /* Only memory reads/writes need to be instrumented. */
56cb44d4
MM
2629 if (gimple_assign_single_p (stmt)
2630 && !gimple_clobber_p (stmt))
0a35513e
AH
2631 {
2632 expand_assign_tm (region, &gsi);
2633 continue;
2634 }
2635 break;
2636
2637 case GIMPLE_CALL:
2638 if (expand_call_tm (region, &gsi))
2639 return;
2640 break;
2641
2642 case GIMPLE_ASM:
2643 gcc_unreachable ();
2644
2645 default:
2646 break;
2647 }
2648 if (!gsi_end_p (gsi))
2649 gsi_next (&gsi);
2650 }
2651}
2652
2653/* Return the list of basic-blocks in REGION.
2654
2655 STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
0ed3d24e
AH
2656 following a TM_IRREVOCABLE call.
2657
2658 INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the
2659 uninstrumented code path blocks in the list of basic blocks
2660 returned, false otherwise. */
0a35513e 2661
9771b263 2662static vec<basic_block>
0a35513e
AH
2663get_tm_region_blocks (basic_block entry_block,
2664 bitmap exit_blocks,
2665 bitmap irr_blocks,
2666 bitmap all_region_blocks,
0ed3d24e
AH
2667 bool stop_at_irrevocable_p,
2668 bool include_uninstrumented_p = true)
0a35513e 2669{
6e1aa848 2670 vec<basic_block> bbs = vNULL;
0a35513e
AH
2671 unsigned i;
2672 edge e;
2673 edge_iterator ei;
2674 bitmap visited_blocks = BITMAP_ALLOC (NULL);
2675
2676 i = 0;
9771b263 2677 bbs.safe_push (entry_block);
0a35513e
AH
2678 bitmap_set_bit (visited_blocks, entry_block->index);
2679
2680 do
2681 {
9771b263 2682 basic_block bb = bbs[i++];
0a35513e
AH
2683
2684 if (exit_blocks &&
2685 bitmap_bit_p (exit_blocks, bb->index))
2686 continue;
2687
2688 if (stop_at_irrevocable_p
2689 && irr_blocks
2690 && bitmap_bit_p (irr_blocks, bb->index))
2691 continue;
2692
2693 FOR_EACH_EDGE (e, ei, bb->succs)
0ed3d24e
AH
2694 if ((include_uninstrumented_p
2695 || !(e->flags & EDGE_TM_UNINSTRUMENTED))
2696 && !bitmap_bit_p (visited_blocks, e->dest->index))
0a35513e
AH
2697 {
2698 bitmap_set_bit (visited_blocks, e->dest->index);
9771b263 2699 bbs.safe_push (e->dest);
0a35513e
AH
2700 }
2701 }
9771b263 2702 while (i < bbs.length ());
0a35513e
AH
2703
2704 if (all_region_blocks)
2705 bitmap_ior_into (all_region_blocks, visited_blocks);
2706
2707 BITMAP_FREE (visited_blocks);
2708 return bbs;
2709}
2710
0ed3d24e
AH
2711// Callback data for collect_bb2reg.
2712struct bb2reg_stuff
2713{
526ceb68 2714 vec<tm_region *> *bb2reg;
0ed3d24e
AH
2715 bool include_uninstrumented_p;
2716};
2717
398b1daa
AH
2718// Callback for expand_regions, collect innermost region data for each bb.
2719static void *
2720collect_bb2reg (struct tm_region *region, void *data)
2721{
0ed3d24e 2722 struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data;
526ceb68 2723 vec<tm_region *> *bb2reg = stuff->bb2reg;
9771b263 2724 vec<basic_block> queue;
398b1daa
AH
2725 unsigned int i;
2726 basic_block bb;
2727
2728 queue = get_tm_region_blocks (region->entry_block,
2729 region->exit_blocks,
2730 region->irr_blocks,
2731 NULL,
0ed3d24e
AH
2732 /*stop_at_irr_p=*/true,
2733 stuff->include_uninstrumented_p);
398b1daa
AH
2734
2735 // We expect expand_region to perform a post-order traversal of the region
2736 // tree. Therefore the last region seen for any bb is the innermost.
9771b263
DN
2737 FOR_EACH_VEC_ELT (queue, i, bb)
2738 (*bb2reg)[bb->index] = region;
398b1daa 2739
9771b263 2740 queue.release ();
398b1daa
AH
2741 return NULL;
2742}
2743
2744// Returns a vector, indexed by BB->INDEX, of the innermost tm_region to
2745// which a basic block belongs. Note that we only consider the instrumented
0ed3d24e
AH
2746// code paths for the region; the uninstrumented code paths are ignored if
2747// INCLUDE_UNINSTRUMENTED_P is false.
398b1daa
AH
2748//
2749// ??? This data is very similar to the bb_regions array that is collected
2750// during tm_region_init. Or, rather, this data is similar to what could
2751// be used within tm_region_init. The actual computation in tm_region_init
2752// begins and ends with bb_regions entirely full of NULL pointers, due to
2753// the way in which pointers are swapped in and out of the array.
2754//
2755// ??? Our callers expect that blocks are not shared between transactions.
2756// When the optimizers get too smart, and blocks are shared, then during
2757// the tm_mark phase we'll add log entries to only one of the two transactions,
2758// and in the tm_edge phase we'll add edges to the CFG that create invalid
2759// cycles. The symptom being SSA defs that do not dominate their uses.
2760// Note that the optimizers were locally correct with their transformation,
2761// as we have no info within the program that suggests that the blocks cannot
2762// be shared.
2763//
2764// ??? There is currently a hack inside tree-ssa-pre.c to work around the
2765// only known instance of this block sharing.
2766
526ceb68 2767static vec<tm_region *>
0ed3d24e
AH
2768get_bb_regions_instrumented (bool traverse_clones,
2769 bool include_uninstrumented_p)
398b1daa 2770{
8b1c6fd7 2771 unsigned n = last_basic_block_for_fn (cfun);
0ed3d24e 2772 struct bb2reg_stuff stuff;
526ceb68 2773 vec<tm_region *> ret;
398b1daa 2774
9771b263
DN
2775 ret.create (n);
2776 ret.safe_grow_cleared (n);
0ed3d24e
AH
2777 stuff.bb2reg = &ret;
2778 stuff.include_uninstrumented_p = include_uninstrumented_p;
2779 expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones);
398b1daa
AH
2780
2781 return ret;
2782}
2783
19c0d7df
AH
2784/* Set the IN_TRANSACTION for all gimple statements that appear in a
2785 transaction. */
2786
2787void
2788compute_transaction_bits (void)
2789{
2790 struct tm_region *region;
9771b263 2791 vec<basic_block> queue;
19c0d7df 2792 unsigned int i;
19c0d7df
AH
2793 basic_block bb;
2794
2795 /* ?? Perhaps we need to abstract gate_tm_init further, because we
2796 certainly don't need it to calculate CDI_DOMINATOR info. */
2797 gate_tm_init ();
2798
11cd3bed 2799 FOR_EACH_BB_FN (bb, cfun)
83e1a7f0
AH
2800 bb->flags &= ~BB_IN_TRANSACTION;
2801
19c0d7df
AH
2802 for (region = all_tm_regions; region; region = region->next)
2803 {
2804 queue = get_tm_region_blocks (region->entry_block,
2805 region->exit_blocks,
2806 region->irr_blocks,
2807 NULL,
2808 /*stop_at_irr_p=*/true);
9771b263 2809 for (i = 0; queue.iterate (i, &bb); ++i)
83e1a7f0 2810 bb->flags |= BB_IN_TRANSACTION;
9771b263 2811 queue.release ();
19c0d7df
AH
2812 }
2813
2814 if (all_tm_regions)
2815 bitmap_obstack_release (&tm_obstack);
2816}
2817
398b1daa
AH
2818/* Replace the GIMPLE_TRANSACTION in this region with the corresponding
2819 call to BUILT_IN_TM_START. */
2820
2821static void *
2822expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
2823{
2824 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2825 basic_block transaction_bb = gimple_bb (region->transaction_stmt);
2826 tree tm_state = region->tm_state;
2827 tree tm_state_type = TREE_TYPE (tm_state);
2828 edge abort_edge = NULL;
2829 edge inst_edge = NULL;
2830 edge uninst_edge = NULL;
2831 edge fallthru_edge = NULL;
2832
2833 // Identify the various successors of the transaction start.
2834 {
2835 edge_iterator i;
2836 edge e;
2837 FOR_EACH_EDGE (e, i, transaction_bb->succs)
2838 {
2839 if (e->flags & EDGE_TM_ABORT)
2840 abort_edge = e;
2841 else if (e->flags & EDGE_TM_UNINSTRUMENTED)
2842 uninst_edge = e;
2843 else
2844 inst_edge = e;
2845 if (e->flags & EDGE_FALLTHRU)
2846 fallthru_edge = e;
2847 }
2848 }
2849
2850 /* ??? There are plenty of bits here we're not computing. */
2851 {
538dd0b7 2852 int subcode = gimple_transaction_subcode (region->get_transaction_stmt ());
398b1daa
AH
2853 int flags = 0;
2854 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2855 flags |= PR_DOESGOIRREVOCABLE;
2856 if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
2857 flags |= PR_HASNOIRREVOCABLE;
2858 /* If the transaction does not have an abort in lexical scope and is not
2859 marked as an outer transaction, then it will never abort. */
2860 if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0)
2861 flags |= PR_HASNOABORT;
2862 if ((subcode & GTMA_HAVE_STORE) == 0)
2863 flags |= PR_READONLY;
b7a78683 2864 if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION))
398b1daa
AH
2865 flags |= PR_INSTRUMENTEDCODE;
2866 if (uninst_edge)
2867 flags |= PR_UNINSTRUMENTEDCODE;
2868 if (subcode & GTMA_IS_OUTER)
2869 region->original_transaction_was_outer = true;
2870 tree t = build_int_cst (tm_state_type, flags);
538dd0b7 2871 gcall *call = gimple_build_call (tm_start, 1, t);
398b1daa
AH
2872 gimple_call_set_lhs (call, tm_state);
2873 gimple_set_location (call, gimple_location (region->transaction_stmt));
2874
2875 // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START.
2876 gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb);
2877 gcc_assert (gsi_stmt (gsi) == region->transaction_stmt);
2878 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
2879 gsi_remove (&gsi, true);
2880 region->transaction_stmt = call;
2881 }
2882
2883 // Generate log saves.
9771b263 2884 if (!tm_log_save_addresses.is_empty ())
398b1daa
AH
2885 tm_log_emit_saves (region->entry_block, transaction_bb);
2886
2887 // In the beginning, we've no tests to perform on transaction restart.
2888 // Note that after this point, transaction_bb becomes the "most recent
2889 // block containing tests for the transaction".
2890 region->restart_block = region->entry_block;
2891
2892 // Generate log restores.
9771b263 2893 if (!tm_log_save_addresses.is_empty ())
398b1daa
AH
2894 {
2895 basic_block test_bb = create_empty_bb (transaction_bb);
2896 basic_block code_bb = create_empty_bb (test_bb);
2897 basic_block join_bb = create_empty_bb (code_bb);
726338f4
RB
2898 add_bb_to_loop (test_bb, transaction_bb->loop_father);
2899 add_bb_to_loop (code_bb, transaction_bb->loop_father);
2900 add_bb_to_loop (join_bb, transaction_bb->loop_father);
398b1daa
AH
2901 if (region->restart_block == region->entry_block)
2902 region->restart_block = test_bb;
2903
b731b390 2904 tree t1 = create_tmp_reg (tm_state_type);
398b1daa 2905 tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES);
355fe088 2906 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
398b1daa
AH
2907 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2908 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2909
2910 t2 = build_int_cst (tm_state_type, 0);
2911 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2912 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2913
2914 tm_log_emit_restores (region->entry_block, code_bb);
2915
2916 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
2917 edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE);
2918 edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE);
2919 redirect_edge_pred (fallthru_edge, join_bb);
2920
398b1daa
AH
2921 join_bb->count = test_bb->count = transaction_bb->count;
2922
357067f2
JH
2923 ei->probability = profile_probability::always ();
2924 et->probability = profile_probability::likely ();
2925 ef->probability = profile_probability::unlikely ();
398b1daa 2926
ef30ab83 2927 code_bb->count = et->count ();
398b1daa
AH
2928
2929 transaction_bb = join_bb;
2930 }
2931
2932 // If we have an ABORT edge, create a test to perform the abort.
2933 if (abort_edge)
2934 {
2935 basic_block test_bb = create_empty_bb (transaction_bb);
726338f4 2936 add_bb_to_loop (test_bb, transaction_bb->loop_father);
398b1daa
AH
2937 if (region->restart_block == region->entry_block)
2938 region->restart_block = test_bb;
2939
b731b390 2940 tree t1 = create_tmp_reg (tm_state_type);
398b1daa 2941 tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION);
355fe088 2942 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
398b1daa
AH
2943 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2944 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2945
2946 t2 = build_int_cst (tm_state_type, 0);
2947 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2948 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2949
2950 edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU);
398b1daa 2951 test_bb->count = transaction_bb->count;
357067f2 2952 ei->probability = profile_probability::always ();
398b1daa
AH
2953
2954 // Not abort edge. If both are live, chose one at random as we'll
2955 // we'll be fixing that up below.
2956 redirect_edge_pred (fallthru_edge, test_bb);
2957 fallthru_edge->flags = EDGE_FALSE_VALUE;
357067f2 2958 fallthru_edge->probability = profile_probability::very_likely ();
398b1daa
AH
2959
2960 // Abort/over edge.
2961 redirect_edge_pred (abort_edge, test_bb);
2962 abort_edge->flags = EDGE_TRUE_VALUE;
357067f2 2963 abort_edge->probability = profile_probability::unlikely ();
398b1daa
AH
2964
2965 transaction_bb = test_bb;
2966 }
2967
2968 // If we have both instrumented and uninstrumented code paths, select one.
2969 if (inst_edge && uninst_edge)
2970 {
2971 basic_block test_bb = create_empty_bb (transaction_bb);
726338f4 2972 add_bb_to_loop (test_bb, transaction_bb->loop_father);
398b1daa
AH
2973 if (region->restart_block == region->entry_block)
2974 region->restart_block = test_bb;
2975
b731b390 2976 tree t1 = create_tmp_reg (tm_state_type);
398b1daa
AH
2977 tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE);
2978
355fe088 2979 gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2);
398b1daa
AH
2980 gimple_stmt_iterator gsi = gsi_last_bb (test_bb);
2981 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2982
2983 t2 = build_int_cst (tm_state_type, 0);
2984 stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2985 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2986
2987 // Create the edge into test_bb first, as we want to copy values
2988 // out of the fallthru edge.
2989 edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags);
2990 e->probability = fallthru_edge->probability;
ef30ab83 2991 test_bb->count = fallthru_edge->count ();
398b1daa
AH
2992
2993 // Now update the edges to the inst/uninist implementations.
2994 // For now assume that the paths are equally likely. When using HTM,
2995 // we'll try the uninst path first and fallback to inst path if htm
2996 // buffers are exceeded. Without HTM we start with the inst path and
2997 // use the uninst path when falling back to serial mode.
2998 redirect_edge_pred (inst_edge, test_bb);
2999 inst_edge->flags = EDGE_FALSE_VALUE;
357067f2 3000 inst_edge->probability = profile_probability::even ();
398b1daa
AH
3001
3002 redirect_edge_pred (uninst_edge, test_bb);
3003 uninst_edge->flags = EDGE_TRUE_VALUE;
357067f2 3004 uninst_edge->probability = profile_probability::even ();
398b1daa
AH
3005 }
3006
3007 // If we have no previous special cases, and we have PHIs at the beginning
3008 // of the atomic region, this means we have a loop at the beginning of the
3009 // atomic region that shares the first block. This can cause problems with
3010 // the transaction restart abnormal edges to be added in the tm_edges pass.
3011 // Solve this by adding a new empty block to receive the abnormal edges.
3012 if (region->restart_block == region->entry_block
3013 && phi_nodes (region->entry_block))
3014 {
3015 basic_block empty_bb = create_empty_bb (transaction_bb);
3016 region->restart_block = empty_bb;
726338f4 3017 add_bb_to_loop (empty_bb, transaction_bb->loop_father);
398b1daa
AH
3018
3019 redirect_edge_pred (fallthru_edge, empty_bb);
3020 make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU);
3021 }
3022
3023 return NULL;
3024}
3025
3026/* Generate the temporary to be used for the return value of
3027 BUILT_IN_TM_START. */
3028
3029static void *
3030generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED)
3031{
3032 tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
3033 region->tm_state =
3034 create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
3035
3036 // Reset the subcode, post optimizations. We'll fill this in
3037 // again as we process blocks.
3038 if (region->exit_blocks)
3039 {
538dd0b7
DM
3040 gtransaction *transaction_stmt = region->get_transaction_stmt ();
3041 unsigned int subcode = gimple_transaction_subcode (transaction_stmt);
398b1daa
AH
3042
3043 if (subcode & GTMA_DOES_GO_IRREVOCABLE)
3044 subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
b7a78683
AH
3045 | GTMA_MAY_ENTER_IRREVOCABLE
3046 | GTMA_HAS_NO_INSTRUMENTATION);
398b1daa
AH
3047 else
3048 subcode &= GTMA_DECLARATION_MASK;
538dd0b7 3049 gimple_transaction_set_subcode (transaction_stmt, subcode);
398b1daa
AH
3050 }
3051
3052 return NULL;
3053}
3054
3055// Propagate flags from inner transactions outwards.
3056static void
3057propagate_tm_flags_out (struct tm_region *region)
3058{
3059 if (region == NULL)
3060 return;
3061 propagate_tm_flags_out (region->inner);
3062
3063 if (region->outer && region->outer->transaction_stmt)
3064 {
538dd0b7
DM
3065 unsigned s
3066 = gimple_transaction_subcode (region->get_transaction_stmt ());
398b1daa
AH
3067 s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE
3068 | GTMA_MAY_ENTER_IRREVOCABLE);
538dd0b7
DM
3069 s |= gimple_transaction_subcode (region->outer->get_transaction_stmt ());
3070 gimple_transaction_set_subcode (region->outer->get_transaction_stmt (),
3071 s);
398b1daa
AH
3072 }
3073
3074 propagate_tm_flags_out (region->next);
3075}
3076
0a35513e
AH
3077/* Entry point to the MARK phase of TM expansion. Here we replace
3078 transactional memory statements with calls to builtins, and function
3079 calls with their transactional clones (if available). But we don't
3080 yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
3081
3082static unsigned int
3083execute_tm_mark (void)
3084{
0a35513e
AH
3085 pending_edge_inserts_p = false;
3086
b5e10eac
AH
3087 expand_regions (all_tm_regions, generate_tm_state, NULL,
3088 /*traverse_clones=*/true);
0a35513e 3089
398b1daa 3090 tm_log_init ();
0a35513e 3091
526ceb68 3092 vec<tm_region *> bb_regions
0ed3d24e
AH
3093 = get_bb_regions_instrumented (/*traverse_clones=*/true,
3094 /*include_uninstrumented_p=*/false);
398b1daa
AH
3095 struct tm_region *r;
3096 unsigned i;
0a35513e 3097
398b1daa
AH
3098 // Expand memory operations into calls into the runtime.
3099 // This collects log entries as well.
9771b263 3100 FOR_EACH_VEC_ELT (bb_regions, i, r)
6aad4455
AH
3101 {
3102 if (r != NULL)
3103 {
3104 if (r->transaction_stmt)
3105 {
538dd0b7
DM
3106 unsigned sub
3107 = gimple_transaction_subcode (r->get_transaction_stmt ());
6aad4455
AH
3108
3109 /* If we're sure to go irrevocable, there won't be
3110 anything to expand, since the run-time will go
3111 irrevocable right away. */
3112 if (sub & GTMA_DOES_GO_IRREVOCABLE
3113 && sub & GTMA_MAY_ENTER_IRREVOCABLE)
3114 continue;
3115 }
06e28de2 3116 expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i));
6aad4455
AH
3117 }
3118 }
398b1daa 3119
639498a8
AH
3120 bb_regions.release ();
3121
398b1daa
AH
3122 // Propagate flags from inner transactions outwards.
3123 propagate_tm_flags_out (all_tm_regions);
3124
3125 // Expand GIMPLE_TRANSACTIONs into calls into the runtime.
b5e10eac
AH
3126 expand_regions (all_tm_regions, expand_transaction, NULL,
3127 /*traverse_clones=*/false);
398b1daa
AH
3128
3129 tm_log_emit ();
3130 tm_log_delete ();
0a35513e
AH
3131
3132 if (pending_edge_inserts_p)
3133 gsi_commit_edge_inserts ();
398b1daa 3134 free_dominance_info (CDI_DOMINATORS);
0a35513e
AH
3135 return 0;
3136}
3137
17795822
TS
3138namespace {
3139
3140const pass_data pass_data_tm_mark =
27a4cd48
DM
3141{
3142 GIMPLE_PASS, /* type */
3143 "tmmark", /* name */
3144 OPTGROUP_NONE, /* optinfo_flags */
27a4cd48
DM
3145 TV_TRANS_MEM, /* tv_id */
3146 ( PROP_ssa | PROP_cfg ), /* properties_required */
3147 0, /* properties_provided */
3148 0, /* properties_destroyed */
3149 0, /* todo_flags_start */
3bea341f 3150 TODO_update_ssa, /* todo_flags_finish */
0a35513e 3151};
27a4cd48 3152
17795822 3153class pass_tm_mark : public gimple_opt_pass
27a4cd48
DM
3154{
3155public:
c3284718
RS
3156 pass_tm_mark (gcc::context *ctxt)
3157 : gimple_opt_pass (pass_data_tm_mark, ctxt)
27a4cd48
DM
3158 {}
3159
3160 /* opt_pass methods: */
be55bfe6 3161 virtual unsigned int execute (function *) { return execute_tm_mark (); }
27a4cd48
DM
3162
3163}; // class pass_tm_mark
3164
17795822
TS
3165} // anon namespace
3166
27a4cd48
DM
3167gimple_opt_pass *
3168make_pass_tm_mark (gcc::context *ctxt)
3169{
3170 return new pass_tm_mark (ctxt);
3171}
0a35513e 3172\f
398b1daa
AH
3173
3174/* Create an abnormal edge from STMT at iter, splitting the block
3175 as necessary. Adjust *PNEXT as needed for the split block. */
0a35513e
AH
3176
3177static inline void
355fe088 3178split_bb_make_tm_edge (gimple *stmt, basic_block dest_bb,
398b1daa 3179 gimple_stmt_iterator iter, gimple_stmt_iterator *pnext)
0a35513e 3180{
398b1daa
AH
3181 basic_block bb = gimple_bb (stmt);
3182 if (!gsi_one_before_end_p (iter))
3183 {
3184 edge e = split_block (bb, stmt);
3185 *pnext = gsi_start_bb (e->dest);
3186 }
e39df546
JH
3187 edge e = make_edge (bb, dest_bb, EDGE_ABNORMAL);
3188 if (e)
ef30ab83 3189 e->probability = profile_probability::guessed_never ();
0a35513e 3190
398b1daa 3191 // Record the need for the edge for the benefit of the rtl passes.
0a35513e 3192 if (cfun->gimple_df->tm_restart == NULL)
50979347
TS
3193 cfun->gimple_df->tm_restart
3194 = hash_table<tm_restart_hasher>::create_ggc (31);
0a35513e 3195
398b1daa 3196 struct tm_restart_node dummy;
0a35513e 3197 dummy.stmt = stmt;
398b1daa
AH
3198 dummy.label_or_list = gimple_block_label (dest_bb);
3199
50979347
TS
3200 tm_restart_node **slot = cfun->gimple_df->tm_restart->find_slot (&dummy,
3201 INSERT);
3202 struct tm_restart_node *n = *slot;
0a35513e
AH
3203 if (n == NULL)
3204 {
766090c2 3205 n = ggc_alloc<tm_restart_node> ();
0a35513e
AH
3206 *n = dummy;
3207 }
3208 else
3209 {
3210 tree old = n->label_or_list;
3211 if (TREE_CODE (old) == LABEL_DECL)
398b1daa 3212 old = tree_cons (NULL, old, NULL);
0a35513e
AH
3213 n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
3214 }
0a35513e
AH
3215}
3216
0a35513e
AH
3217/* Split block BB as necessary for every builtin function we added, and
3218 wire up the abnormal back edges implied by the transaction restart. */
3219
3220static void
398b1daa 3221expand_block_edges (struct tm_region *const region, basic_block bb)
0a35513e 3222{
398b1daa 3223 gimple_stmt_iterator gsi, next_gsi;
0a35513e 3224
398b1daa 3225 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi)
0a35513e 3226 {
355fe088 3227 gimple *stmt = gsi_stmt (gsi);
538dd0b7 3228 gcall *call_stmt;
0a35513e 3229
398b1daa
AH
3230 next_gsi = gsi;
3231 gsi_next (&next_gsi);
3232
3233 // ??? Shouldn't we split for any non-pure, non-irrevocable function?
538dd0b7
DM
3234 call_stmt = dyn_cast <gcall *> (stmt);
3235 if ((!call_stmt)
3236 || (gimple_call_flags (call_stmt) & ECF_TM_BUILTIN) == 0)
398b1daa
AH
3237 continue;
3238
cb1180d5 3239 if (gimple_call_builtin_p (call_stmt, BUILT_IN_TM_ABORT))
0a35513e 3240 {
398b1daa
AH
3241 // If we have a ``_transaction_cancel [[outer]]'', there is only
3242 // one abnormal edge: to the transaction marked OUTER.
3243 // All compiler-generated instances of BUILT_IN_TM_ABORT have a
3244 // constant argument, which we can examine here. Users invoking
3245 // TM_ABORT directly get what they deserve.
538dd0b7 3246 tree arg = gimple_call_arg (call_stmt, 0);
398b1daa
AH
3247 if (TREE_CODE (arg) == INTEGER_CST
3248 && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0
3249 && !decl_is_tm_clone (current_function_decl))
0a35513e 3250 {
398b1daa
AH
3251 // Find the GTMA_IS_OUTER transaction.
3252 for (struct tm_region *o = region; o; o = o->outer)
3253 if (o->original_transaction_was_outer)
3254 {
538dd0b7 3255 split_bb_make_tm_edge (call_stmt, o->restart_block,
398b1daa
AH
3256 gsi, &next_gsi);
3257 break;
3258 }
3259
3260 // Otherwise, the front-end should have semantically checked
3261 // outer aborts, but in either case the target region is not
3262 // within this function.
3263 continue;
0a35513e
AH
3264 }
3265
398b1daa
AH
3266 // Non-outer, TM aborts have an abnormal edge to the inner-most
3267 // transaction, the one being aborted;
538dd0b7
DM
3268 split_bb_make_tm_edge (call_stmt, region->restart_block, gsi,
3269 &next_gsi);
0a35513e
AH
3270 }
3271
398b1daa
AH
3272 // All TM builtins have an abnormal edge to the outer-most transaction.
3273 // We never restart inner transactions. For tm clones, we know a-priori
3274 // that the outer-most transaction is outside the function.
3275 if (decl_is_tm_clone (current_function_decl))
3276 continue;
0a35513e 3277
398b1daa
AH
3278 if (cfun->gimple_df->tm_restart == NULL)
3279 cfun->gimple_df->tm_restart
50979347 3280 = hash_table<tm_restart_hasher>::create_ggc (31);
0a35513e 3281
398b1daa
AH
3282 // All TM builtins have an abnormal edge to the outer-most transaction.
3283 // We never restart inner transactions.
3284 for (struct tm_region *o = region; o; o = o->outer)
3285 if (!o->outer)
3286 {
538dd0b7 3287 split_bb_make_tm_edge (call_stmt, o->restart_block, gsi, &next_gsi);
398b1daa
AH
3288 break;
3289 }
0a35513e 3290
398b1daa
AH
3291 // Delete any tail-call annotation that may have been added.
3292 // The tail-call pass may have mis-identified the commit as being
3293 // a candidate because we had not yet added this restart edge.
538dd0b7 3294 gimple_call_set_tail (call_stmt, false);
0a35513e
AH
3295 }
3296}
3297
3298/* Entry point to the final expansion of transactional nodes. */
3299
17795822
TS
3300namespace {
3301
3302const pass_data pass_data_tm_edges =
27a4cd48
DM
3303{
3304 GIMPLE_PASS, /* type */
3305 "tmedge", /* name */
3306 OPTGROUP_NONE, /* optinfo_flags */
27a4cd48
DM
3307 TV_TRANS_MEM, /* tv_id */
3308 ( PROP_ssa | PROP_cfg ), /* properties_required */
3309 0, /* properties_provided */
3310 0, /* properties_destroyed */
3311 0, /* todo_flags_start */
3bea341f 3312 TODO_update_ssa, /* todo_flags_finish */
0a35513e 3313};
27a4cd48 3314
17795822 3315class pass_tm_edges : public gimple_opt_pass
27a4cd48
DM
3316{
3317public:
c3284718
RS
3318 pass_tm_edges (gcc::context *ctxt)
3319 : gimple_opt_pass (pass_data_tm_edges, ctxt)
27a4cd48
DM
3320 {}
3321
3322 /* opt_pass methods: */
be55bfe6 3323 virtual unsigned int execute (function *);
27a4cd48
DM
3324
3325}; // class pass_tm_edges
3326
be55bfe6
TS
3327unsigned int
3328pass_tm_edges::execute (function *fun)
3329{
526ceb68 3330 vec<tm_region *> bb_regions
be55bfe6
TS
3331 = get_bb_regions_instrumented (/*traverse_clones=*/false,
3332 /*include_uninstrumented_p=*/true);
3333 struct tm_region *r;
3334 unsigned i;
3335
3336 FOR_EACH_VEC_ELT (bb_regions, i, r)
3337 if (r != NULL)
3338 expand_block_edges (r, BASIC_BLOCK_FOR_FN (fun, i));
3339
3340 bb_regions.release ();
3341
3342 /* We've got to release the dominance info now, to indicate that it
3343 must be rebuilt completely. Otherwise we'll crash trying to update
3344 the SSA web in the TODO section following this pass. */
3345 free_dominance_info (CDI_DOMINATORS);
687aed9c
RB
3346 /* We'ge also wrecked loops badly with inserting of abnormal edges. */
3347 loops_state_set (LOOPS_NEED_FIXUP);
be55bfe6
TS
3348 bitmap_obstack_release (&tm_obstack);
3349 all_tm_regions = NULL;
3350
3351 return 0;
3352}
3353
17795822
TS
3354} // anon namespace
3355
27a4cd48
DM
3356gimple_opt_pass *
3357make_pass_tm_edges (gcc::context *ctxt)
3358{
3359 return new pass_tm_edges (ctxt);
3360}
398b1daa
AH
3361\f
3362/* Helper function for expand_regions. Expand REGION and recurse to
3363 the inner region. Call CALLBACK on each region. CALLBACK returns
3364 NULL to continue the traversal, otherwise a non-null value which
b5e10eac
AH
3365 this function will return as well. TRAVERSE_CLONES is true if we
3366 should traverse transactional clones. */
398b1daa
AH
3367
3368static void *
3369expand_regions_1 (struct tm_region *region,
3370 void *(*callback)(struct tm_region *, void *),
b5e10eac
AH
3371 void *data,
3372 bool traverse_clones)
398b1daa
AH
3373{
3374 void *retval = NULL;
b5e10eac
AH
3375 if (region->exit_blocks
3376 || (traverse_clones && decl_is_tm_clone (current_function_decl)))
398b1daa
AH
3377 {
3378 retval = callback (region, data);
3379 if (retval)
3380 return retval;
3381 }
3382 if (region->inner)
3383 {
b5e10eac 3384 retval = expand_regions (region->inner, callback, data, traverse_clones);
398b1daa
AH
3385 if (retval)
3386 return retval;
3387 }
3388 return retval;
3389}
3390
3391/* Traverse the regions enclosed and including REGION. Execute
3392 CALLBACK for each region, passing DATA. CALLBACK returns NULL to
3393 continue the traversal, otherwise a non-null value which this
b5e10eac
AH
3394 function will return as well. TRAVERSE_CLONES is true if we should
3395 traverse transactional clones. */
398b1daa
AH
3396
3397static void *
3398expand_regions (struct tm_region *region,
3399 void *(*callback)(struct tm_region *, void *),
b5e10eac
AH
3400 void *data,
3401 bool traverse_clones)
398b1daa
AH
3402{
3403 void *retval = NULL;
3404 while (region)
3405 {
b5e10eac 3406 retval = expand_regions_1 (region, callback, data, traverse_clones);
398b1daa
AH
3407 if (retval)
3408 return retval;
3409 region = region->next;
3410 }
3411 return retval;
3412}
3413
0a35513e
AH
3414\f
3415/* A unique TM memory operation. */
526ceb68 3416struct tm_memop
0a35513e
AH
3417{
3418 /* Unique ID that all memory operations to the same location have. */
3419 unsigned int value_id;
3420 /* Address of load/store. */
3421 tree addr;
526ceb68 3422};
0a35513e 3423
4a8fb1a1
LC
3424/* TM memory operation hashtable helpers. */
3425
95fbe13e 3426struct tm_memop_hasher : free_ptr_hash <tm_memop>
4a8fb1a1 3427{
67f58944
TS
3428 static inline hashval_t hash (const tm_memop *);
3429 static inline bool equal (const tm_memop *, const tm_memop *);
4a8fb1a1
LC
3430};
3431
3432/* Htab support. Return a hash value for a `tm_memop'. */
3433inline hashval_t
67f58944 3434tm_memop_hasher::hash (const tm_memop *mem)
4a8fb1a1
LC
3435{
3436 tree addr = mem->addr;
3437 /* We drill down to the SSA_NAME/DECL for the hash, but equality is
3438 actually done with operand_equal_p (see tm_memop_eq). */
3439 if (TREE_CODE (addr) == ADDR_EXPR)
3440 addr = TREE_OPERAND (addr, 0);
3441 return iterative_hash_expr (addr, 0);
3442}
3443
3444/* Htab support. Return true if two tm_memop's are the same. */
3445inline bool
67f58944 3446tm_memop_hasher::equal (const tm_memop *mem1, const tm_memop *mem2)
4a8fb1a1
LC
3447{
3448 return operand_equal_p (mem1->addr, mem2->addr, 0);
3449}
3450
0a35513e
AH
3451/* Sets for solving data flow equations in the memory optimization pass. */
3452struct tm_memopt_bitmaps
3453{
3454 /* Stores available to this BB upon entry. Basically, stores that
3455 dominate this BB. */
3456 bitmap store_avail_in;
3457 /* Stores available at the end of this BB. */
3458 bitmap store_avail_out;
3459 bitmap store_antic_in;
3460 bitmap store_antic_out;
3461 /* Reads available to this BB upon entry. Basically, reads that
3462 dominate this BB. */
3463 bitmap read_avail_in;
3464 /* Reads available at the end of this BB. */
3465 bitmap read_avail_out;
3466 /* Reads performed in this BB. */
3467 bitmap read_local;
3468 /* Writes performed in this BB. */
3469 bitmap store_local;
3470
3471 /* Temporary storage for pass. */
3472 /* Is the current BB in the worklist? */
3473 bool avail_in_worklist_p;
3474 /* Have we visited this BB? */
3475 bool visited_p;
3476};
3477
3478static bitmap_obstack tm_memopt_obstack;
3479
3480/* Unique counter for TM loads and stores. Loads and stores of the
3481 same address get the same ID. */
3482static unsigned int tm_memopt_value_id;
c203e8a7 3483static hash_table<tm_memop_hasher> *tm_memopt_value_numbers;
0a35513e
AH
3484
3485#define STORE_AVAIL_IN(BB) \
3486 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
3487#define STORE_AVAIL_OUT(BB) \
3488 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
3489#define STORE_ANTIC_IN(BB) \
3490 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
3491#define STORE_ANTIC_OUT(BB) \
3492 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
3493#define READ_AVAIL_IN(BB) \
3494 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
3495#define READ_AVAIL_OUT(BB) \
3496 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
3497#define READ_LOCAL(BB) \
3498 ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
3499#define STORE_LOCAL(BB) \
3500 ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
3501#define AVAIL_IN_WORKLIST_P(BB) \
3502 ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
3503#define BB_VISITED_P(BB) \
3504 ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
3505
0a35513e
AH
3506/* Given a TM load/store in STMT, return the value number for the address
3507 it accesses. */
3508
3509static unsigned int
355fe088 3510tm_memopt_value_number (gimple *stmt, enum insert_option op)
0a35513e
AH
3511{
3512 struct tm_memop tmpmem, *mem;
4a8fb1a1 3513 tm_memop **slot;
0a35513e
AH
3514
3515 gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
3516 tmpmem.addr = gimple_call_arg (stmt, 0);
c203e8a7 3517 slot = tm_memopt_value_numbers->find_slot (&tmpmem, op);
0a35513e 3518 if (*slot)
4a8fb1a1 3519 mem = *slot;
0a35513e
AH
3520 else if (op == INSERT)
3521 {
3522 mem = XNEW (struct tm_memop);
3523 *slot = mem;
3524 mem->value_id = tm_memopt_value_id++;
3525 mem->addr = tmpmem.addr;
3526 }
3527 else
3528 gcc_unreachable ();
3529 return mem->value_id;
3530}
3531
3532/* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
3533
3534static void
3535tm_memopt_accumulate_memops (basic_block bb)
3536{
3537 gimple_stmt_iterator gsi;
3538
3539 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3540 {
355fe088 3541 gimple *stmt = gsi_stmt (gsi);
0a35513e
AH
3542 bitmap bits;
3543 unsigned int loc;
3544
3545 if (is_tm_store (stmt))
3546 bits = STORE_LOCAL (bb);
3547 else if (is_tm_load (stmt))
3548 bits = READ_LOCAL (bb);
3549 else
3550 continue;
3551
3552 loc = tm_memopt_value_number (stmt, INSERT);
3553 bitmap_set_bit (bits, loc);
3554 if (dump_file)
3555 {
3556 fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
3557 is_tm_load (stmt) ? "LOAD" : "STORE", loc,
3558 gimple_bb (stmt)->index);
ef6cb4c7 3559 print_generic_expr (dump_file, gimple_call_arg (stmt, 0));
0a35513e
AH
3560 fprintf (dump_file, "\n");
3561 }
3562 }
3563}
3564
3565/* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
3566
3567static void
3568dump_tm_memopt_set (const char *set_name, bitmap bits)
3569{
3570 unsigned i;
3571 bitmap_iterator bi;
3572 const char *comma = "";
3573
3574 fprintf (dump_file, "TM memopt: %s: [", set_name);
3575 EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
3576 {
c203e8a7 3577 hash_table<tm_memop_hasher>::iterator hi;
4a8fb1a1 3578 struct tm_memop *mem = NULL;
0a35513e
AH
3579
3580 /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
c203e8a7 3581 FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers, mem, tm_memop_t, hi)
0a35513e
AH
3582 if (mem->value_id == i)
3583 break;
3584 gcc_assert (mem->value_id == i);
3585 fprintf (dump_file, "%s", comma);
3586 comma = ", ";
ef6cb4c7 3587 print_generic_expr (dump_file, mem->addr);
0a35513e
AH
3588 }
3589 fprintf (dump_file, "]\n");
3590}
3591
3592/* Prettily dump all of the memopt sets in BLOCKS. */
3593
3594static void
9771b263 3595dump_tm_memopt_sets (vec<basic_block> blocks)
0a35513e
AH
3596{
3597 size_t i;
3598 basic_block bb;
3599
9771b263 3600 for (i = 0; blocks.iterate (i, &bb); ++i)
0a35513e
AH
3601 {
3602 fprintf (dump_file, "------------BB %d---------\n", bb->index);
3603 dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
3604 dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
3605 dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
3606 dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
3607 dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
3608 dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
3609 }
3610}
3611
3612/* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
3613
3614static void
3615tm_memopt_compute_avin (basic_block bb)
3616{
3617 edge e;
3618 unsigned ix;
3619
3620 /* Seed with the AVOUT of any predecessor. */
3621 for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
3622 {
3623 e = EDGE_PRED (bb, ix);
3624 /* Make sure we have already visited this BB, and is thus
3625 initialized.
3626
3627 If e->src->aux is NULL, this predecessor is actually on an
3628 enclosing transaction. We only care about the current
3629 transaction, so ignore it. */
3630 if (e->src->aux && BB_VISITED_P (e->src))
3631 {
3632 bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3633 bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3634 break;
3635 }
3636 }
3637
3638 for (; ix < EDGE_COUNT (bb->preds); ix++)
3639 {
3640 e = EDGE_PRED (bb, ix);
3641 if (e->src->aux && BB_VISITED_P (e->src))
3642 {
3643 bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3644 bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3645 }
3646 }
3647
3648 BB_VISITED_P (bb) = true;
3649}
3650
3651/* Compute the STORE_ANTIC_IN for the basic block BB. */
3652
3653static void
3654tm_memopt_compute_antin (basic_block bb)
3655{
3656 edge e;
3657 unsigned ix;
3658
3659 /* Seed with the ANTIC_OUT of any successor. */
3660 for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
3661 {
3662 e = EDGE_SUCC (bb, ix);
3663 /* Make sure we have already visited this BB, and is thus
3664 initialized. */
3665 if (BB_VISITED_P (e->dest))
3666 {
3667 bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3668 break;
3669 }
3670 }
3671
3672 for (; ix < EDGE_COUNT (bb->succs); ix++)
3673 {
3674 e = EDGE_SUCC (bb, ix);
3675 if (BB_VISITED_P (e->dest))
3676 bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3677 }
3678
3679 BB_VISITED_P (bb) = true;
3680}
3681
3682/* Compute the AVAIL sets for every basic block in BLOCKS.
3683
3684 We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3685
3686 AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3687 AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
3688
3689 This is basically what we do in lcm's compute_available(), but here
3690 we calculate two sets of sets (one for STOREs and one for READs),
3691 and we work on a region instead of the entire CFG.
3692
3693 REGION is the TM region.
3694 BLOCKS are the basic blocks in the region. */
3695
3696static void
3697tm_memopt_compute_available (struct tm_region *region,
9771b263 3698 vec<basic_block> blocks)
0a35513e
AH
3699{
3700 edge e;
3701 basic_block *worklist, *qin, *qout, *qend, bb;
3702 unsigned int qlen, i;
3703 edge_iterator ei;
3704 bool changed;
3705
3706 /* Allocate a worklist array/queue. Entries are only added to the
3707 list if they were not already on the list. So the size is
3708 bounded by the number of basic blocks in the region. */
9f95c526 3709 gcc_assert (!blocks.is_empty ());
9771b263 3710 qlen = blocks.length () - 1;
9f95c526 3711 qin = qout = worklist = XNEWVEC (basic_block, qlen);
0a35513e
AH
3712
3713 /* Put every block in the region on the worklist. */
9771b263 3714 for (i = 0; blocks.iterate (i, &bb); ++i)
0a35513e
AH
3715 {
3716 /* Seed AVAIL_OUT with the LOCAL set. */
3717 bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
3718 bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
3719
3720 AVAIL_IN_WORKLIST_P (bb) = true;
3721 /* No need to insert the entry block, since it has an AVIN of
3722 null, and an AVOUT that has already been seeded in. */
3723 if (bb != region->entry_block)
3724 *qin++ = bb;
3725 }
3726
3727 /* The entry block has been initialized with the local sets. */
3728 BB_VISITED_P (region->entry_block) = true;
3729
3730 qin = worklist;
3731 qend = &worklist[qlen];
3732
3733 /* Iterate until the worklist is empty. */
3734 while (qlen)
3735 {
3736 /* Take the first entry off the worklist. */
3737 bb = *qout++;
3738 qlen--;
3739
3740 if (qout >= qend)
3741 qout = worklist;
3742
3743 /* This block can be added to the worklist again if necessary. */
3744 AVAIL_IN_WORKLIST_P (bb) = false;
3745 tm_memopt_compute_avin (bb);
3746
3747 /* Note: We do not add the LOCAL sets here because we already
3748 seeded the AVAIL_OUT sets with them. */
3749 changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
3750 changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
3751 if (changed
3752 && (region->exit_blocks == NULL
3753 || !bitmap_bit_p (region->exit_blocks, bb->index)))
3754 /* If the out state of this block changed, then we need to add
3755 its successors to the worklist if they are not already in. */
3756 FOR_EACH_EDGE (e, ei, bb->succs)
fefa31b5
DM
3757 if (!AVAIL_IN_WORKLIST_P (e->dest)
3758 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
0a35513e
AH
3759 {
3760 *qin++ = e->dest;
3761 AVAIL_IN_WORKLIST_P (e->dest) = true;
3762 qlen++;
3763
3764 if (qin >= qend)
3765 qin = worklist;
3766 }
3767 }
3768
3769 free (worklist);
3770
3771 if (dump_file)
3772 dump_tm_memopt_sets (blocks);
3773}
3774
3775/* Compute ANTIC sets for every basic block in BLOCKS.
3776
3777 We compute STORE_ANTIC_OUT as follows:
3778
3779 STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3780 STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
3781
3782 REGION is the TM region.
3783 BLOCKS are the basic blocks in the region. */
3784
3785static void
3786tm_memopt_compute_antic (struct tm_region *region,
9771b263 3787 vec<basic_block> blocks)
0a35513e
AH
3788{
3789 edge e;
3790 basic_block *worklist, *qin, *qout, *qend, bb;
3791 unsigned int qlen;
3792 int i;
3793 edge_iterator ei;
3794
3795 /* Allocate a worklist array/queue. Entries are only added to the
3796 list if they were not already on the list. So the size is
3797 bounded by the number of basic blocks in the region. */
9771b263 3798 qin = qout = worklist = XNEWVEC (basic_block, blocks.length ());
0a35513e 3799
9771b263 3800 for (qlen = 0, i = blocks.length () - 1; i >= 0; --i)
0a35513e 3801 {
9771b263 3802 bb = blocks[i];
0a35513e
AH
3803
3804 /* Seed ANTIC_OUT with the LOCAL set. */
3805 bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
3806
3807 /* Put every block in the region on the worklist. */
3808 AVAIL_IN_WORKLIST_P (bb) = true;
3809 /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3810 and their ANTIC_OUT has already been seeded in. */
3811 if (region->exit_blocks
3812 && !bitmap_bit_p (region->exit_blocks, bb->index))
3813 {
3814 qlen++;
3815 *qin++ = bb;
3816 }
3817 }
3818
3819 /* The exit blocks have been initialized with the local sets. */
3820 if (region->exit_blocks)
3821 {
3822 unsigned int i;
3823 bitmap_iterator bi;
3824 EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
06e28de2 3825 BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun, i)) = true;
0a35513e
AH
3826 }
3827
3828 qin = worklist;
3829 qend = &worklist[qlen];
3830
3831 /* Iterate until the worklist is empty. */
3832 while (qlen)
3833 {
3834 /* Take the first entry off the worklist. */
3835 bb = *qout++;
3836 qlen--;
3837
3838 if (qout >= qend)
3839 qout = worklist;
3840
3841 /* This block can be added to the worklist again if necessary. */
3842 AVAIL_IN_WORKLIST_P (bb) = false;
3843 tm_memopt_compute_antin (bb);
3844
3845 /* Note: We do not add the LOCAL sets here because we already
3846 seeded the ANTIC_OUT sets with them. */
3847 if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
3848 && bb != region->entry_block)
3849 /* If the out state of this block changed, then we need to add
3850 its predecessors to the worklist if they are not already in. */
3851 FOR_EACH_EDGE (e, ei, bb->preds)
3852 if (!AVAIL_IN_WORKLIST_P (e->src))
3853 {
3854 *qin++ = e->src;
3855 AVAIL_IN_WORKLIST_P (e->src) = true;
3856 qlen++;
3857
3858 if (qin >= qend)
3859 qin = worklist;
3860 }
3861 }
3862
3863 free (worklist);
3864
3865 if (dump_file)
3866 dump_tm_memopt_sets (blocks);
3867}
3868
3869/* Offsets of load variants from TM_LOAD. For example,
3870 BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3871 See gtm-builtins.def. */
3872#define TRANSFORM_RAR 1
3873#define TRANSFORM_RAW 2
3874#define TRANSFORM_RFW 3
3875/* Offsets of store variants from TM_STORE. */
3876#define TRANSFORM_WAR 1
3877#define TRANSFORM_WAW 2
3878
3879/* Inform about a load/store optimization. */
3880
3881static void
355fe088 3882dump_tm_memopt_transform (gimple *stmt)
0a35513e
AH
3883{
3884 if (dump_file)
3885 {
3886 fprintf (dump_file, "TM memopt: transforming: ");
ef6cb4c7 3887 print_gimple_stmt (dump_file, stmt, 0);
0a35513e
AH
3888 fprintf (dump_file, "\n");
3889 }
3890}
3891
3892/* Perform a read/write optimization. Replaces the TM builtin in STMT
3893 by a builtin that is OFFSET entries down in the builtins table in
3894 gtm-builtins.def. */
3895
3896static void
3897tm_memopt_transform_stmt (unsigned int offset,
538dd0b7 3898 gcall *stmt,
0a35513e
AH
3899 gimple_stmt_iterator *gsi)
3900{
3901 tree fn = gimple_call_fn (stmt);
3902 gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
3903 TREE_OPERAND (fn, 0)
3904 = builtin_decl_explicit ((enum built_in_function)
3905 (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
3906 + offset));
3907 gimple_call_set_fn (stmt, fn);
3908 gsi_replace (gsi, stmt, true);
3909 dump_tm_memopt_transform (stmt);
3910}
3911
3912/* Perform the actual TM memory optimization transformations in the
3913 basic blocks in BLOCKS. */
3914
3915static void
9771b263 3916tm_memopt_transform_blocks (vec<basic_block> blocks)
0a35513e
AH
3917{
3918 size_t i;
3919 basic_block bb;
3920 gimple_stmt_iterator gsi;
3921
9771b263 3922 for (i = 0; blocks.iterate (i, &bb); ++i)
0a35513e
AH
3923 {
3924 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3925 {
355fe088 3926 gimple *stmt = gsi_stmt (gsi);
0a35513e
AH
3927 bitmap read_avail = READ_AVAIL_IN (bb);
3928 bitmap store_avail = STORE_AVAIL_IN (bb);
3929 bitmap store_antic = STORE_ANTIC_OUT (bb);
3930 unsigned int loc;
3931
3932 if (is_tm_simple_load (stmt))
3933 {
538dd0b7 3934 gcall *call_stmt = as_a <gcall *> (stmt);
0a35513e
AH
3935 loc = tm_memopt_value_number (stmt, NO_INSERT);
3936 if (store_avail && bitmap_bit_p (store_avail, loc))
538dd0b7 3937 tm_memopt_transform_stmt (TRANSFORM_RAW, call_stmt, &gsi);
0a35513e
AH
3938 else if (store_antic && bitmap_bit_p (store_antic, loc))
3939 {
538dd0b7 3940 tm_memopt_transform_stmt (TRANSFORM_RFW, call_stmt, &gsi);
0a35513e
AH
3941 bitmap_set_bit (store_avail, loc);
3942 }
3943 else if (read_avail && bitmap_bit_p (read_avail, loc))
538dd0b7 3944 tm_memopt_transform_stmt (TRANSFORM_RAR, call_stmt, &gsi);
0a35513e
AH
3945 else
3946 bitmap_set_bit (read_avail, loc);
3947 }
3948 else if (is_tm_simple_store (stmt))
3949 {
538dd0b7 3950 gcall *call_stmt = as_a <gcall *> (stmt);
0a35513e
AH
3951 loc = tm_memopt_value_number (stmt, NO_INSERT);
3952 if (store_avail && bitmap_bit_p (store_avail, loc))
538dd0b7 3953 tm_memopt_transform_stmt (TRANSFORM_WAW, call_stmt, &gsi);
0a35513e
AH
3954 else
3955 {
3956 if (read_avail && bitmap_bit_p (read_avail, loc))
538dd0b7 3957 tm_memopt_transform_stmt (TRANSFORM_WAR, call_stmt, &gsi);
0a35513e
AH
3958 bitmap_set_bit (store_avail, loc);
3959 }
3960 }
3961 }
3962 }
3963}
3964
3965/* Return a new set of bitmaps for a BB. */
3966
3967static struct tm_memopt_bitmaps *
3968tm_memopt_init_sets (void)
3969{
3970 struct tm_memopt_bitmaps *b
3971 = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
3972 b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3973 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3974 b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
3975 b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
3976 b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3977 b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3978 b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3979 b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
3980 b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
3981 return b;
3982}
3983
3984/* Free sets computed for each BB. */
3985
3986static void
9771b263 3987tm_memopt_free_sets (vec<basic_block> blocks)
0a35513e
AH
3988{
3989 size_t i;
3990 basic_block bb;
3991
9771b263 3992 for (i = 0; blocks.iterate (i, &bb); ++i)
0a35513e
AH
3993 bb->aux = NULL;
3994}
3995
3996/* Clear the visited bit for every basic block in BLOCKS. */
3997
3998static void
9771b263 3999tm_memopt_clear_visited (vec<basic_block> blocks)
0a35513e
AH
4000{
4001 size_t i;
4002 basic_block bb;
4003
9771b263 4004 for (i = 0; blocks.iterate (i, &bb); ++i)
0a35513e
AH
4005 BB_VISITED_P (bb) = false;
4006}
4007
4008/* Replace TM load/stores with hints for the runtime. We handle
4009 things like read-after-write, write-after-read, read-after-read,
4010 read-for-write, etc. */
4011
4012static unsigned int
4013execute_tm_memopt (void)
4014{
4015 struct tm_region *region;
9771b263 4016 vec<basic_block> bbs;
0a35513e
AH
4017
4018 tm_memopt_value_id = 0;
c203e8a7 4019 tm_memopt_value_numbers = new hash_table<tm_memop_hasher> (10);
0a35513e
AH
4020
4021 for (region = all_tm_regions; region; region = region->next)
4022 {
4023 /* All the TM stores/loads in the current region. */
4024 size_t i;
4025 basic_block bb;
4026
4027 bitmap_obstack_initialize (&tm_memopt_obstack);
4028
4029 /* Save all BBs for the current region. */
4030 bbs = get_tm_region_blocks (region->entry_block,
4031 region->exit_blocks,
4032 region->irr_blocks,
4033 NULL,
4034 false);
4035
4036 /* Collect all the memory operations. */
9771b263 4037 for (i = 0; bbs.iterate (i, &bb); ++i)
0a35513e
AH
4038 {
4039 bb->aux = tm_memopt_init_sets ();
4040 tm_memopt_accumulate_memops (bb);
4041 }
4042
4043 /* Solve data flow equations and transform each block accordingly. */
4044 tm_memopt_clear_visited (bbs);
4045 tm_memopt_compute_available (region, bbs);
4046 tm_memopt_clear_visited (bbs);
4047 tm_memopt_compute_antic (region, bbs);
4048 tm_memopt_transform_blocks (bbs);
4049
4050 tm_memopt_free_sets (bbs);
9771b263 4051 bbs.release ();
0a35513e 4052 bitmap_obstack_release (&tm_memopt_obstack);
c203e8a7 4053 tm_memopt_value_numbers->empty ();
0a35513e
AH
4054 }
4055
c203e8a7
TS
4056 delete tm_memopt_value_numbers;
4057 tm_memopt_value_numbers = NULL;
0a35513e
AH
4058 return 0;
4059}
4060
17795822
TS
4061namespace {
4062
4063const pass_data pass_data_tm_memopt =
27a4cd48
DM
4064{
4065 GIMPLE_PASS, /* type */
4066 "tmmemopt", /* name */
4067 OPTGROUP_NONE, /* optinfo_flags */
27a4cd48
DM
4068 TV_TRANS_MEM, /* tv_id */
4069 ( PROP_ssa | PROP_cfg ), /* properties_required */
4070 0, /* properties_provided */
4071 0, /* properties_destroyed */
4072 0, /* todo_flags_start */
4073 0, /* todo_flags_finish */
0a35513e
AH
4074};
4075
17795822 4076class pass_tm_memopt : public gimple_opt_pass
27a4cd48
DM
4077{
4078public:
c3284718
RS
4079 pass_tm_memopt (gcc::context *ctxt)
4080 : gimple_opt_pass (pass_data_tm_memopt, ctxt)
27a4cd48
DM
4081 {}
4082
4083 /* opt_pass methods: */
1a3d085c 4084 virtual bool gate (function *) { return flag_tm && optimize > 0; }
be55bfe6 4085 virtual unsigned int execute (function *) { return execute_tm_memopt (); }
27a4cd48
DM
4086
4087}; // class pass_tm_memopt
4088
17795822
TS
4089} // anon namespace
4090
27a4cd48
DM
4091gimple_opt_pass *
4092make_pass_tm_memopt (gcc::context *ctxt)
4093{
4094 return new pass_tm_memopt (ctxt);
4095}
4096
0a35513e
AH
4097\f
4098/* Interprocedual analysis for the creation of transactional clones.
4099 The aim of this pass is to find which functions are referenced in
4100 a non-irrevocable transaction context, and for those over which
4101 we have control (or user directive), create a version of the
4102 function which uses only the transactional interface to reference
4103 protected memories. This analysis proceeds in several steps:
4104
4105 (1) Collect the set of all possible transactional clones:
4106
4107 (a) For all local public functions marked tm_callable, push
4108 it onto the tm_callee queue.
4109
4110 (b) For all local functions, scan for calls in transaction blocks.
4111 Push the caller and callee onto the tm_caller and tm_callee
4112 queues. Count the number of callers for each callee.
4113
4114 (c) For each local function on the callee list, assume we will
4115 create a transactional clone. Push *all* calls onto the
4116 callee queues; count the number of clone callers separately
4117 to the number of original callers.
4118
4119 (2) Propagate irrevocable status up the dominator tree:
4120
4121 (a) Any external function on the callee list that is not marked
4122 tm_callable is irrevocable. Push all callers of such onto
4123 a worklist.
4124
4125 (b) For each function on the worklist, mark each block that
4126 contains an irrevocable call. Use the AND operator to
4127 propagate that mark up the dominator tree.
4128
4129 (c) If we reach the entry block for a possible transactional
4130 clone, then the transactional clone is irrevocable, and
4131 we should not create the clone after all. Push all
4132 callers onto the worklist.
4133
4134 (d) Place tm_irrevocable calls at the beginning of the relevant
4135 blocks. Special case here is the entry block for the entire
4136 transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
4137 the library to begin the region in serial mode. Decrement
4138 the call count for all callees in the irrevocable region.
4139
4140 (3) Create the transactional clones:
4141
4142 Any tm_callee that still has a non-zero call count is cloned.
4143*/
4144
4145/* This structure is stored in the AUX field of each cgraph_node. */
4146struct tm_ipa_cg_data
4147{
4148 /* The clone of the function that got created. */
4149 struct cgraph_node *clone;
4150
4151 /* The tm regions in the normal function. */
4152 struct tm_region *all_tm_regions;
4153
4154 /* The blocks of the normal/clone functions that contain irrevocable
4155 calls, or blocks that are post-dominated by irrevocable calls. */
4156 bitmap irrevocable_blocks_normal;
4157 bitmap irrevocable_blocks_clone;
4158
4159 /* The blocks of the normal function that are involved in transactions. */
4160 bitmap transaction_blocks_normal;
4161
4162 /* The number of callers to the transactional clone of this function
4163 from normal and transactional clones respectively. */
4164 unsigned tm_callers_normal;
4165 unsigned tm_callers_clone;
4166
4167 /* True if all calls to this function's transactional clone
4168 are irrevocable. Also automatically true if the function
4169 has no transactional clone. */
4170 bool is_irrevocable;
4171
4172 /* Flags indicating the presence of this function in various queues. */
4173 bool in_callee_queue;
4174 bool in_worklist;
4175
4176 /* Flags indicating the kind of scan desired while in the worklist. */
4177 bool want_irr_scan_normal;
4178};
4179
d52f5295 4180typedef vec<cgraph_node *> cgraph_node_queue;
0a35513e
AH
4181
4182/* Return the ipa data associated with NODE, allocating zeroed memory
594ec92f
AH
4183 if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
4184 and set *NODE accordingly. */
0a35513e
AH
4185
4186static struct tm_ipa_cg_data *
594ec92f 4187get_cg_data (struct cgraph_node **node, bool traverse_aliases)
0a35513e 4188{
594ec92f
AH
4189 struct tm_ipa_cg_data *d;
4190
67348ccc 4191 if (traverse_aliases && (*node)->alias)
d52f5295 4192 *node = (*node)->get_alias_target ();
594ec92f 4193
67348ccc 4194 d = (struct tm_ipa_cg_data *) (*node)->aux;
0a35513e
AH
4195
4196 if (d == NULL)
4197 {
4198 d = (struct tm_ipa_cg_data *)
4199 obstack_alloc (&tm_obstack.obstack, sizeof (*d));
67348ccc 4200 (*node)->aux = (void *) d;
0a35513e
AH
4201 memset (d, 0, sizeof (*d));
4202 }
4203
4204 return d;
4205}
4206
4207/* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
4208 it is already present. */
4209
4210static void
4211maybe_push_queue (struct cgraph_node *node,
4212 cgraph_node_queue *queue_p, bool *in_queue_p)
4213{
4214 if (!*in_queue_p)
4215 {
4216 *in_queue_p = true;
9771b263 4217 queue_p->safe_push (node);
0a35513e
AH
4218 }
4219}
4220
4221/* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
4222 Queue all callees within block BB. */
4223
4224static void
4225ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
4226 basic_block bb, bool for_clone)
4227{
4228 gimple_stmt_iterator gsi;
4229
4230 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4231 {
355fe088 4232 gimple *stmt = gsi_stmt (gsi);
0a35513e
AH
4233 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4234 {
4235 tree fndecl = gimple_call_fndecl (stmt);
4236 if (fndecl)
4237 {
4238 struct tm_ipa_cg_data *d;
4239 unsigned *pcallers;
4240 struct cgraph_node *node;
4241
4242 if (is_tm_ending_fndecl (fndecl))
4243 continue;
4244 if (find_tm_replacement_function (fndecl))
4245 continue;
4246
d52f5295 4247 node = cgraph_node::get (fndecl);
0a35513e 4248 gcc_assert (node != NULL);
594ec92f 4249 d = get_cg_data (&node, true);
0a35513e
AH
4250
4251 pcallers = (for_clone ? &d->tm_callers_clone
4252 : &d->tm_callers_normal);
4253 *pcallers += 1;
4254
4255 maybe_push_queue (node, callees_p, &d->in_callee_queue);
4256 }
4257 }
4258 }
4259}
4260
4261/* Scan all calls in NODE that are within a transaction region,
4262 and push the resulting nodes into the callee queue. */
4263
4264static void
4265ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
4266 cgraph_node_queue *callees_p)
4267{
0a35513e
AH
4268 d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
4269 d->all_tm_regions = all_tm_regions;
4270
7c11b0fe 4271 for (tm_region *r = all_tm_regions; r; r = r->next)
0a35513e 4272 {
9771b263 4273 vec<basic_block> bbs;
0a35513e
AH
4274 basic_block bb;
4275 unsigned i;
4276
4277 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
7c11b0fe 4278 d->transaction_blocks_normal, false, false);
398b1daa 4279
9771b263 4280 FOR_EACH_VEC_ELT (bbs, i, bb)
0a35513e
AH
4281 ipa_tm_scan_calls_block (callees_p, bb, false);
4282
9771b263 4283 bbs.release ();
0a35513e
AH
4284 }
4285}
4286
4287/* Scan all calls in NODE as if this is the transactional clone,
4288 and push the destinations into the callee queue. */
4289
4290static void
4291ipa_tm_scan_calls_clone (struct cgraph_node *node,
4292 cgraph_node_queue *callees_p)
4293{
67348ccc 4294 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
0a35513e
AH
4295 basic_block bb;
4296
4297 FOR_EACH_BB_FN (bb, fn)
4298 ipa_tm_scan_calls_block (callees_p, bb, true);
4299}
4300
4301/* The function NODE has been detected to be irrevocable. Push all
4302 of its callers onto WORKLIST for the purpose of re-scanning them. */
4303
4304static void
4305ipa_tm_note_irrevocable (struct cgraph_node *node,
4306 cgraph_node_queue *worklist_p)
4307{
594ec92f 4308 struct tm_ipa_cg_data *d = get_cg_data (&node, true);
0a35513e
AH
4309 struct cgraph_edge *e;
4310
4311 d->is_irrevocable = true;
4312
4313 for (e = node->callers; e ; e = e->next_caller)
4314 {
4315 basic_block bb;
594ec92f 4316 struct cgraph_node *caller;
0a35513e
AH
4317
4318 /* Don't examine recursive calls. */
4319 if (e->caller == node)
4320 continue;
4321 /* Even if we think we can go irrevocable, believe the user
4322 above all. */
67348ccc 4323 if (is_tm_safe_or_pure (e->caller->decl))
0a35513e
AH
4324 continue;
4325
594ec92f
AH
4326 caller = e->caller;
4327 d = get_cg_data (&caller, true);
0a35513e
AH
4328
4329 /* Check if the callee is in a transactional region. If so,
4330 schedule the function for normal re-scan as well. */
4331 bb = gimple_bb (e->call_stmt);
4332 gcc_assert (bb != NULL);
4333 if (d->transaction_blocks_normal
4334 && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
4335 d->want_irr_scan_normal = true;
4336
594ec92f 4337 maybe_push_queue (caller, worklist_p, &d->in_worklist);
0a35513e
AH
4338 }
4339}
4340
4341/* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
4342 within the block is irrevocable. */
4343
4344static bool
4345ipa_tm_scan_irr_block (basic_block bb)
4346{
4347 gimple_stmt_iterator gsi;
4348 tree fn;
4349
4350 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4351 {
355fe088 4352 gimple *stmt = gsi_stmt (gsi);
0a35513e
AH
4353 switch (gimple_code (stmt))
4354 {
a3770d3b
AH
4355 case GIMPLE_ASSIGN:
4356 if (gimple_assign_single_p (stmt))
4357 {
4358 tree lhs = gimple_assign_lhs (stmt);
4359 tree rhs = gimple_assign_rhs1 (stmt);
1d4afc04 4360 if (volatile_lvalue_p (lhs) || volatile_lvalue_p (rhs))
a3770d3b
AH
4361 return true;
4362 }
4363 break;
4364
0a35513e 4365 case GIMPLE_CALL:
a3770d3b
AH
4366 {
4367 tree lhs = gimple_call_lhs (stmt);
1d4afc04 4368 if (lhs && volatile_lvalue_p (lhs))
a3770d3b 4369 return true;
0a35513e 4370
a3770d3b
AH
4371 if (is_tm_pure_call (stmt))
4372 break;
0a35513e 4373
a3770d3b 4374 fn = gimple_call_fn (stmt);
0a35513e 4375
a3770d3b
AH
4376 /* Functions with the attribute are by definition irrevocable. */
4377 if (is_tm_irrevocable (fn))
4378 return true;
0a35513e 4379
a3770d3b
AH
4380 /* For direct function calls, go ahead and check for replacement
4381 functions, or transitive irrevocable functions. For indirect
4382 functions, we'll ask the runtime. */
4383 if (TREE_CODE (fn) == ADDR_EXPR)
4384 {
4385 struct tm_ipa_cg_data *d;
4386 struct cgraph_node *node;
0a35513e 4387
a3770d3b
AH
4388 fn = TREE_OPERAND (fn, 0);
4389 if (is_tm_ending_fndecl (fn))
4390 break;
4391 if (find_tm_replacement_function (fn))
4392 break;
80fd8eba 4393
d52f5295 4394 node = cgraph_node::get (fn);
a3770d3b
AH
4395 d = get_cg_data (&node, true);
4396
4397 /* Return true if irrevocable, but above all, believe
4398 the user. */
4399 if (d->is_irrevocable
4400 && !is_tm_safe_or_pure (fn))
4401 return true;
4402 }
4403 break;
4404 }
0a35513e
AH
4405
4406 case GIMPLE_ASM:
4407 /* ??? The Approved Method of indicating that an inline
4408 assembly statement is not relevant to the transaction
4409 is to wrap it in a __tm_waiver block. This is not
4410 yet implemented, so we can't check for it. */
a4d031c7
AH
4411 if (is_tm_safe (current_function_decl))
4412 {
4413 tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
4414 SET_EXPR_LOCATION (t, gimple_location (stmt));
a9c697b8
MS
4415 error ("%K%<asm%> not allowed in %<transaction_safe%> function",
4416 t);
a4d031c7 4417 }
0a35513e
AH
4418 return true;
4419
4420 default:
4421 break;
4422 }
4423 }
4424
4425 return false;
4426}
4427
4428/* For each of the blocks seeded witin PQUEUE, walk the CFG looking
4429 for new irrevocable blocks, marking them in NEW_IRR. Don't bother
4430 scanning past OLD_IRR or EXIT_BLOCKS. */
4431
4432static bool
9771b263 4433ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr,
0a35513e
AH
4434 bitmap old_irr, bitmap exit_blocks)
4435{
4436 bool any_new_irr = false;
4437 edge e;
4438 edge_iterator ei;
4439 bitmap visited_blocks = BITMAP_ALLOC (NULL);
4440
4441 do
4442 {
9771b263 4443 basic_block bb = pqueue->pop ();
0a35513e
AH
4444
4445 /* Don't re-scan blocks we know already are irrevocable. */
4446 if (old_irr && bitmap_bit_p (old_irr, bb->index))
4447 continue;
4448
4449 if (ipa_tm_scan_irr_block (bb))
4450 {
4451 bitmap_set_bit (new_irr, bb->index);
4452 any_new_irr = true;
4453 }
4454 else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
4455 {
4456 FOR_EACH_EDGE (e, ei, bb->succs)
4457 if (!bitmap_bit_p (visited_blocks, e->dest->index))
4458 {
4459 bitmap_set_bit (visited_blocks, e->dest->index);
9771b263 4460 pqueue->safe_push (e->dest);
0a35513e
AH
4461 }
4462 }
4463 }
9771b263 4464 while (!pqueue->is_empty ());
0a35513e
AH
4465
4466 BITMAP_FREE (visited_blocks);
4467
4468 return any_new_irr;
4469}
4470
4471/* Propagate the irrevocable property both up and down the dominator tree.
4472 BB is the current block being scanned; EXIT_BLOCKS are the edges of the
4473 TM regions; OLD_IRR are the results of a previous scan of the dominator
4474 tree which has been fully propagated; NEW_IRR is the set of new blocks
4475 which are gaining the irrevocable property during the current scan. */
4476
4477static void
4478ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
4479 bitmap old_irr, bitmap exit_blocks)
4480{
9771b263 4481 vec<basic_block> bbs;
0a35513e
AH
4482 bitmap all_region_blocks;
4483
4484 /* If this block is in the old set, no need to rescan. */
4485 if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
4486 return;
4487
4488 all_region_blocks = BITMAP_ALLOC (&tm_obstack);
4489 bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
4490 all_region_blocks, false);
4491 do
4492 {
9771b263 4493 basic_block bb = bbs.pop ();
0a35513e
AH
4494 bool this_irr = bitmap_bit_p (new_irr, bb->index);
4495 bool all_son_irr = false;
4496 edge_iterator ei;
4497 edge e;
4498
4499 /* Propagate up. If my children are, I am too, but we must have
4500 at least one child that is. */
4501 if (!this_irr)
4502 {
4503 FOR_EACH_EDGE (e, ei, bb->succs)
4504 {
4505 if (!bitmap_bit_p (new_irr, e->dest->index))
4506 {
4507 all_son_irr = false;
4508 break;
4509 }
4510 else
4511 all_son_irr = true;
4512 }
4513 if (all_son_irr)
4514 {
4515 /* Add block to new_irr if it hasn't already been processed. */
4516 if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
4517 {
4518 bitmap_set_bit (new_irr, bb->index);
4519 this_irr = true;
4520 }
4521 }
4522 }
4523
4524 /* Propagate down to everyone we immediately dominate. */
4525 if (this_irr)
4526 {
4527 basic_block son;
4528 for (son = first_dom_son (CDI_DOMINATORS, bb);
4529 son;
4530 son = next_dom_son (CDI_DOMINATORS, son))
4531 {
4532 /* Make sure block is actually in a TM region, and it
4533 isn't already in old_irr. */
4534 if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
4535 && bitmap_bit_p (all_region_blocks, son->index))
4536 bitmap_set_bit (new_irr, son->index);
4537 }
4538 }
4539 }
9771b263 4540 while (!bbs.is_empty ());
0a35513e
AH
4541
4542 BITMAP_FREE (all_region_blocks);
9771b263 4543 bbs.release ();
0a35513e
AH
4544}
4545
4546static void
4547ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
4548{
4549 gimple_stmt_iterator gsi;
4550
4551 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4552 {
355fe088 4553 gimple *stmt = gsi_stmt (gsi);
0a35513e
AH
4554 if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
4555 {
4556 tree fndecl = gimple_call_fndecl (stmt);
4557 if (fndecl)
4558 {
4559 struct tm_ipa_cg_data *d;
4560 unsigned *pcallers;
594ec92f 4561 struct cgraph_node *tnode;
0a35513e
AH
4562
4563 if (is_tm_ending_fndecl (fndecl))
4564 continue;
4565 if (find_tm_replacement_function (fndecl))
4566 continue;
4567
d52f5295 4568 tnode = cgraph_node::get (fndecl);
594ec92f
AH
4569 d = get_cg_data (&tnode, true);
4570
0a35513e
AH
4571 pcallers = (for_clone ? &d->tm_callers_clone
4572 : &d->tm_callers_normal);
4573
4574 gcc_assert (*pcallers > 0);
4575 *pcallers -= 1;
4576 }
4577 }
4578 }
4579}
4580
4581/* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
4582 as well as other irrevocable actions such as inline assembly. Mark all
4583 such blocks as irrevocable and decrement the number of calls to
4584 transactional clones. Return true if, for the transactional clone, the
4585 entire function is irrevocable. */
4586
4587static bool
4588ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
4589{
4590 struct tm_ipa_cg_data *d;
4591 bitmap new_irr, old_irr;
0a35513e
AH
4592 bool ret = false;
4593
80fd8eba 4594 /* Builtin operators (operator new, and such). */
67348ccc
DM
4595 if (DECL_STRUCT_FUNCTION (node->decl) == NULL
4596 || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL)
80fd8eba
AH
4597 return false;
4598
67348ccc 4599 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
0a35513e
AH
4600 calculate_dominance_info (CDI_DOMINATORS);
4601
594ec92f 4602 d = get_cg_data (&node, true);
00f96dc9 4603 auto_vec<basic_block, 10> queue;
0a35513e
AH
4604 new_irr = BITMAP_ALLOC (&tm_obstack);
4605
4606 /* Scan each tm region, propagating irrevocable status through the tree. */
4607 if (for_clone)
4608 {
4609 old_irr = d->irrevocable_blocks_clone;
fefa31b5 4610 queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
0a35513e
AH
4611 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
4612 {
fefa31b5
DM
4613 ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
4614 new_irr,
0a35513e 4615 old_irr, NULL);
fefa31b5
DM
4616 ret = bitmap_bit_p (new_irr,
4617 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index);
0a35513e
AH
4618 }
4619 }
4620 else
4621 {
4622 struct tm_region *region;
4623
4624 old_irr = d->irrevocable_blocks_normal;
4625 for (region = d->all_tm_regions; region; region = region->next)
4626 {
9771b263 4627 queue.quick_push (region->entry_block);
0a35513e
AH
4628 if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
4629 region->exit_blocks))
4630 ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
4631 region->exit_blocks);
4632 }
4633 }
4634
4635 /* If we found any new irrevocable blocks, reduce the call count for
4636 transactional clones within the irrevocable blocks. Save the new
4637 set of irrevocable blocks for next time. */
4638 if (!bitmap_empty_p (new_irr))
4639 {
4640 bitmap_iterator bmi;
4641 unsigned i;
4642
4643 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
06e28de2
DM
4644 ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun, i),
4645 for_clone);
0a35513e
AH
4646
4647 if (old_irr)
4648 {
4649 bitmap_ior_into (old_irr, new_irr);
4650 BITMAP_FREE (new_irr);
4651 }
4652 else if (for_clone)
4653 d->irrevocable_blocks_clone = new_irr;
4654 else
4655 d->irrevocable_blocks_normal = new_irr;
4656
4657 if (dump_file && new_irr)
4658 {
4659 const char *dname;
4660 bitmap_iterator bmi;
4661 unsigned i;
4662
4663 dname = lang_hooks.decl_printable_name (current_function_decl, 2);
4664 EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4665 fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
4666 }
4667 }
4668 else
4669 BITMAP_FREE (new_irr);
4670
0a35513e 4671 pop_cfun ();
0a35513e
AH
4672
4673 return ret;
4674}
4675
4676/* Return true if, for the transactional clone of NODE, any call
4677 may enter irrevocable mode. */
4678
4679static bool
4680ipa_tm_mayenterirr_function (struct cgraph_node *node)
4681{
594ec92f
AH
4682 struct tm_ipa_cg_data *d;
4683 tree decl;
4684 unsigned flags;
4685
4686 d = get_cg_data (&node, true);
67348ccc 4687 decl = node->decl;
594ec92f 4688 flags = flags_from_decl_or_type (decl);
0a35513e
AH
4689
4690 /* Handle some TM builtins. Ordinarily these aren't actually generated
4691 at this point, but handling these functions when written in by the
4692 user makes it easier to build unit tests. */
4693 if (flags & ECF_TM_BUILTIN)
4694 return false;
4695
4696 /* Filter out all functions that are marked. */
4697 if (flags & ECF_TM_PURE)
4698 return false;
4699 if (is_tm_safe (decl))
4700 return false;
4701 if (is_tm_irrevocable (decl))
4702 return true;
4703 if (is_tm_callable (decl))
4704 return true;
4705 if (find_tm_replacement_function (decl))
4706 return true;
4707
4708 /* If we aren't seeing the final version of the function we don't
4709 know what it will contain at runtime. */
d52f5295 4710 if (node->get_availability () < AVAIL_AVAILABLE)
0a35513e
AH
4711 return true;
4712
4713 /* If the function must go irrevocable, then of course true. */
4714 if (d->is_irrevocable)
4715 return true;
4716
4717 /* If there are any blocks marked irrevocable, then the function
4718 as a whole may enter irrevocable. */
4719 if (d->irrevocable_blocks_clone)
4720 return true;
4721
4722 /* We may have previously marked this function as tm_may_enter_irr;
4723 see pass_diagnose_tm_blocks. */
87f94429 4724 if (node->tm_may_enter_irr)
0a35513e
AH
4725 return true;
4726
4727 /* Recurse on the main body for aliases. In general, this will
4728 result in one of the bits above being set so that we will not
4729 have to recurse next time. */
67348ccc 4730 if (node->alias)
d52f5295 4731 return ipa_tm_mayenterirr_function (cgraph_node::get (node->thunk.alias));
0a35513e
AH
4732
4733 /* What remains is unmarked local functions without items that force
4734 the function to go irrevocable. */
4735 return false;
4736}
4737
4738/* Diagnose calls from transaction_safe functions to unmarked
4739 functions that are determined to not be safe. */
4740
4741static void
4742ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
4743{
4744 struct cgraph_edge *e;
4745
4746 for (e = node->callees; e ; e = e->next_callee)
67348ccc 4747 if (!is_tm_callable (e->callee->decl)
87f94429 4748 && e->callee->tm_may_enter_irr)
0a35513e
AH
4749 error_at (gimple_location (e->call_stmt),
4750 "unsafe function call %qD within "
67348ccc 4751 "%<transaction_safe%> function", e->callee->decl);
0a35513e
AH
4752}
4753
4754/* Diagnose call from atomic transactions to unmarked functions
4755 that are determined to not be safe. */
4756
4757static void
4758ipa_tm_diagnose_transaction (struct cgraph_node *node,
4759 struct tm_region *all_tm_regions)
4760{
4761 struct tm_region *r;
4762
4763 for (r = all_tm_regions; r ; r = r->next)
538dd0b7
DM
4764 if (gimple_transaction_subcode (r->get_transaction_stmt ())
4765 & GTMA_IS_RELAXED)
0a35513e
AH
4766 {
4767 /* Atomic transactions can be nested inside relaxed. */
4768 if (r->inner)
4769 ipa_tm_diagnose_transaction (node, r->inner);
4770 }
4771 else
4772 {
9771b263 4773 vec<basic_block> bbs;
0a35513e
AH
4774 gimple_stmt_iterator gsi;
4775 basic_block bb;
4776 size_t i;
4777
4778 bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
4779 r->irr_blocks, NULL, false);
4780
9771b263 4781 for (i = 0; bbs.iterate (i, &bb); ++i)
0a35513e
AH
4782 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4783 {
355fe088 4784 gimple *stmt = gsi_stmt (gsi);
0a35513e
AH
4785 tree fndecl;
4786
4787 if (gimple_code (stmt) == GIMPLE_ASM)
4788 {
4789 error_at (gimple_location (stmt),
a9c697b8 4790 "%<asm%> not allowed in atomic transaction");
0a35513e
AH
4791 continue;
4792 }
4793
4794 if (!is_gimple_call (stmt))
4795 continue;
4796 fndecl = gimple_call_fndecl (stmt);
4797
4798 /* Indirect function calls have been diagnosed already. */
4799 if (!fndecl)
4800 continue;
4801
4802 /* Stop at the end of the transaction. */
4803 if (is_tm_ending_fndecl (fndecl))
4804 {
4805 if (bitmap_bit_p (r->exit_blocks, bb->index))
4806 break;
4807 continue;
4808 }
4809
4810 /* Marked functions have been diagnosed already. */
4811 if (is_tm_pure_call (stmt))
4812 continue;
4813 if (is_tm_callable (fndecl))
4814 continue;
4815
87f94429 4816 if (cgraph_node::local_info_node (fndecl)->tm_may_enter_irr)
0a35513e
AH
4817 error_at (gimple_location (stmt),
4818 "unsafe function call %qD within "
4819 "atomic transaction", fndecl);
4820 }
4821
9771b263 4822 bbs.release ();
0a35513e
AH
4823 }
4824}
4825
4826/* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4827 OLD_DECL. The returned value is a freshly malloced pointer that
4828 should be freed by the caller. */
4829
4830static tree
4831tm_mangle (tree old_asm_id)
4832{
4833 const char *old_asm_name;
4834 char *tm_name;
4835 void *alloc = NULL;
4836 struct demangle_component *dc;
4837 tree new_asm_id;
4838
4839 /* Determine if the symbol is already a valid C++ mangled name. Do this
4840 even for C, which might be interfacing with C++ code via appropriately
4841 ugly identifiers. */
4842 /* ??? We could probably do just as well checking for "_Z" and be done. */
4843 old_asm_name = IDENTIFIER_POINTER (old_asm_id);
4844 dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
4845
4846 if (dc == NULL)
4847 {
40ce7fa6 4848 char length[12];
0a35513e
AH
4849
4850 do_unencoded:
4851 sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
4852 tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
4853 }
4854 else
4855 {
4856 old_asm_name += 2; /* Skip _Z */
4857
4858 switch (dc->type)
4859 {
4860 case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
4861 case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
4862 /* Don't play silly games, you! */
4863 goto do_unencoded;
4864
4865 case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
4866 /* I'd really like to know if we can ever be passed one of
4867 these from the C++ front end. The Logical Thing would
4868 seem that hidden-alias should be outer-most, so that we
4869 get hidden-alias of a transaction-clone and not vice-versa. */
4870 old_asm_name += 2;
4871 break;
4872
4873 default:
4874 break;
4875 }
4876
4877 tm_name = concat ("_ZGTt", old_asm_name, NULL);
4878 }
4879 free (alloc);
4880
4881 new_asm_id = get_identifier (tm_name);
4882 free (tm_name);
4883
4884 return new_asm_id;
4885}
4886
4887static inline void
ead84f73 4888ipa_tm_mark_force_output_node (struct cgraph_node *node)
0a35513e 4889{
d52f5295 4890 node->mark_force_output ();
67348ccc 4891 node->analyzed = true;
0a35513e
AH
4892}
4893
edb983b2
JH
4894static inline void
4895ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node)
4896{
67348ccc
DM
4897 node->forced_by_abi = true;
4898 node->analyzed = true;
edb983b2
JH
4899}
4900
0a35513e
AH
4901/* Callback data for ipa_tm_create_version_alias. */
4902struct create_version_alias_info
4903{
4904 struct cgraph_node *old_node;
4905 tree new_decl;
4906};
4907
e617b554 4908/* A subroutine of ipa_tm_create_version, called via
0a35513e
AH
4909 cgraph_for_node_and_aliases. Create new tm clones for each of
4910 the existing aliases. */
4911static bool
4912ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
4913{
4914 struct create_version_alias_info *info
4915 = (struct create_version_alias_info *)data;
4916 tree old_decl, new_decl, tm_name;
4917 struct cgraph_node *new_node;
4918
67348ccc 4919 if (!node->cpp_implicit_alias)
0a35513e
AH
4920 return false;
4921
67348ccc 4922 old_decl = node->decl;
0a35513e
AH
4923 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4924 new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
4925 TREE_CODE (old_decl), tm_name,
4926 TREE_TYPE (old_decl));
4927
4928 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4929 SET_DECL_RTL (new_decl, NULL);
4930
4931 /* Based loosely on C++'s make_alias_for(). */
4932 TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
54c39478
AH
4933 DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
4934 DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
0a35513e
AH
4935 TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
4936 DECL_EXTERNAL (new_decl) = 0;
4937 DECL_ARTIFICIAL (new_decl) = 1;
4938 TREE_ADDRESSABLE (new_decl) = 1;
4939 TREE_USED (new_decl) = 1;
4940 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4941
4942 /* Perform the same remapping to the comdat group. */
43042ea7 4943 if (DECL_ONE_ONLY (new_decl))
9041d2e6
ML
4944 varpool_node::get (new_decl)->set_comdat_group
4945 (tm_mangle (decl_comdat_group_id (old_decl)));
0a35513e 4946
d52f5295 4947 new_node = cgraph_node::create_same_body_alias (new_decl, info->new_decl);
0a35513e 4948 new_node->tm_clone = true;
67348ccc 4949 new_node->externally_visible = info->old_node->externally_visible;
7861b648 4950 new_node->no_reorder = info->old_node->no_reorder;
594ec92f
AH
4951 /* ?? Do not traverse aliases here. */
4952 get_cg_data (&node, false)->clone = new_node;
0a35513e
AH
4953
4954 record_tm_clone_pair (old_decl, new_decl);
4955
67348ccc 4956 if (info->old_node->force_output
d122681a 4957 || info->old_node->ref_list.first_referring ())
ead84f73 4958 ipa_tm_mark_force_output_node (new_node);
67348ccc 4959 if (info->old_node->forced_by_abi)
edb983b2 4960 ipa_tm_mark_forced_by_abi_node (new_node);
0a35513e
AH
4961 return false;
4962}
4963
4964/* Create a copy of the function (possibly declaration only) of OLD_NODE,
4965 appropriate for the transactional clone. */
4966
4967static void
4968ipa_tm_create_version (struct cgraph_node *old_node)
4969{
4970 tree new_decl, old_decl, tm_name;
4971 struct cgraph_node *new_node;
4972
67348ccc 4973 old_decl = old_node->decl;
0a35513e
AH
4974 new_decl = copy_node (old_decl);
4975
4976 /* DECL_ASSEMBLER_NAME needs to be set before we call
4977 cgraph_copy_node_for_versioning below, because cgraph_node will
4978 fill the assembler_name_hash. */
4979 tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4980 SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4981 SET_DECL_RTL (new_decl, NULL);
4982 TREE_SYMBOL_REFERENCED (tm_name) = 1;
4983
4984 /* Perform the same remapping to the comdat group. */
43042ea7 4985 if (DECL_ONE_ONLY (new_decl))
9041d2e6
ML
4986 varpool_node::get (new_decl)->set_comdat_group
4987 (tm_mangle (DECL_COMDAT_GROUP (old_decl)));
0a35513e 4988
ca860d03 4989 gcc_assert (!old_node->ipa_transforms_to_apply.exists ());
d52f5295 4990 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
87f94429 4991 new_node->local = false;
67348ccc 4992 new_node->externally_visible = old_node->externally_visible;
0a35513e
AH
4993 new_node->lowered = true;
4994 new_node->tm_clone = 1;
4ab26ee0
JJ
4995 if (!old_node->implicit_section)
4996 new_node->set_section (old_node->get_section ());
594ec92f 4997 get_cg_data (&old_node, true)->clone = new_node;
0a35513e 4998
d52f5295 4999 if (old_node->get_availability () >= AVAIL_INTERPOSABLE)
0a35513e
AH
5000 {
5001 /* Remap extern inline to static inline. */
5002 /* ??? Is it worth trying to use make_decl_one_only? */
5003 if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
5004 {
5005 DECL_EXTERNAL (new_decl) = 0;
5006 TREE_PUBLIC (new_decl) = 0;
e0b22991 5007 DECL_WEAK (new_decl) = 0;
0a35513e
AH
5008 }
5009
9771b263 5010 tree_function_versioning (old_decl, new_decl,
ff6686d2 5011 NULL, NULL, false, NULL, NULL);
0a35513e
AH
5012 }
5013
5014 record_tm_clone_pair (old_decl, new_decl);
5015
3dafb85c 5016 symtab->call_cgraph_insertion_hooks (new_node);
67348ccc 5017 if (old_node->force_output
d122681a 5018 || old_node->ref_list.first_referring ())
ead84f73 5019 ipa_tm_mark_force_output_node (new_node);
67348ccc 5020 if (old_node->forced_by_abi)
edb983b2 5021 ipa_tm_mark_forced_by_abi_node (new_node);
0a35513e
AH
5022
5023 /* Do the same thing, but for any aliases of the original node. */
5024 {
5025 struct create_version_alias_info data;
5026 data.old_node = old_node;
5027 data.new_decl = new_decl;
d52f5295
ML
5028 old_node->call_for_symbol_thunks_and_aliases (ipa_tm_create_version_alias,
5029 &data, true);
0a35513e
AH
5030 }
5031}
5032
5033/* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
5034
5035static void
5036ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
5037 basic_block bb)
5038{
5039 gimple_stmt_iterator gsi;
538dd0b7 5040 gcall *g;
0a35513e
AH
5041
5042 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
5043
5044 g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
5045 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
5046
5047 split_block_after_labels (bb);
5048 gsi = gsi_after_labels (bb);
5049 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
5050
d52f5295
ML
5051 node->create_edge (cgraph_node::get_create
5052 (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
1bad9c18 5053 g, gimple_bb (g)->count);
0a35513e
AH
5054}
5055
5056/* Construct a call to TM_GETTMCLONE and insert it before GSI. */
5057
5058static bool
5059ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
5060 struct tm_region *region,
538dd0b7 5061 gimple_stmt_iterator *gsi, gcall *stmt)
0a35513e
AH
5062{
5063 tree gettm_fn, ret, old_fn, callfn;
538dd0b7
DM
5064 gcall *g;
5065 gassign *g2;
0a35513e
AH
5066 bool safe;
5067
5068 old_fn = gimple_call_fn (stmt);
5069
5070 if (TREE_CODE (old_fn) == ADDR_EXPR)
5071 {
5072 tree fndecl = TREE_OPERAND (old_fn, 0);
5073 tree clone = get_tm_clone_pair (fndecl);
5074
5075 /* By transforming the call into a TM_GETTMCLONE, we are
5076 technically taking the address of the original function and
5077 its clone. Explain this so inlining will know this function
5078 is needed. */
d52f5295 5079 cgraph_node::get (fndecl)->mark_address_taken () ;
0a35513e 5080 if (clone)
d52f5295 5081 cgraph_node::get (clone)->mark_address_taken ();
0a35513e
AH
5082 }
5083
5084 safe = is_tm_safe (TREE_TYPE (old_fn));
5085 gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
5086 : BUILT_IN_TM_GETTMCLONE_IRR);
b731b390 5087 ret = create_tmp_var (ptr_type_node);
0a35513e
AH
5088
5089 if (!safe)
5090 transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
5091
5092 /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
5093 if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
5094 old_fn = OBJ_TYPE_REF_EXPR (old_fn);
5095
5096 g = gimple_build_call (gettm_fn, 1, old_fn);
5097 ret = make_ssa_name (ret, g);
5098 gimple_call_set_lhs (g, ret);
5099
5100 gsi_insert_before (gsi, g, GSI_SAME_STMT);
5101
1bad9c18 5102 node->create_edge (cgraph_node::get_create (gettm_fn), g, gimple_bb (g)->count);
0a35513e
AH
5103
5104 /* Cast return value from tm_gettmclone* into appropriate function
5105 pointer. */
b731b390 5106 callfn = create_tmp_var (TREE_TYPE (old_fn));
0a35513e
AH
5107 g2 = gimple_build_assign (callfn,
5108 fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
5109 callfn = make_ssa_name (callfn, g2);
5110 gimple_assign_set_lhs (g2, callfn);
5111 gsi_insert_before (gsi, g2, GSI_SAME_STMT);
5112
5113 /* ??? This is a hack to preserve the NOTHROW bit on the call,
5114 which we would have derived from the decl. Failure to save
5115 this bit means we might have to split the basic block. */
5116 if (gimple_call_nothrow_p (stmt))
5117 gimple_call_set_nothrow (stmt, true);
5118
5119 gimple_call_set_fn (stmt, callfn);
5120
5121 /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
5122 for a call statement. Fix it. */
5123 {
5124 tree lhs = gimple_call_lhs (stmt);
5125 tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
5126 if (lhs
5127 && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
5128 {
5129 tree temp;
5130
b731b390 5131 temp = create_tmp_reg (rettype);
0a35513e
AH
5132 gimple_call_set_lhs (stmt, temp);
5133
5134 g2 = gimple_build_assign (lhs,
5135 fold_build1 (VIEW_CONVERT_EXPR,
5136 TREE_TYPE (lhs), temp));
5137 gsi_insert_after (gsi, g2, GSI_SAME_STMT);
5138 }
5139 }
5140
5141 update_stmt (stmt);
7d0aa05b
JH
5142 cgraph_edge *e = cgraph_node::get (current_function_decl)->get_edge (stmt);
5143 if (e && e->indirect_info)
5144 e->indirect_info->polymorphic = false;
0a35513e
AH
5145
5146 return true;
5147}
5148
5149/* Helper function for ipa_tm_transform_calls*. Given a call
5150 statement in GSI which resides inside transaction REGION, redirect
5151 the call to either its wrapper function, or its clone. */
5152
5153static void
5154ipa_tm_transform_calls_redirect (struct cgraph_node *node,
5155 struct tm_region *region,
5156 gimple_stmt_iterator *gsi,
5157 bool *need_ssa_rename_p)
5158{
538dd0b7 5159 gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi));
0a35513e 5160 struct cgraph_node *new_node;
d52f5295 5161 struct cgraph_edge *e = node->get_edge (stmt);
0a35513e
AH
5162 tree fndecl = gimple_call_fndecl (stmt);
5163
5164 /* For indirect calls, pass the address through the runtime. */
5165 if (fndecl == NULL)
5166 {
5167 *need_ssa_rename_p |=
5168 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
5169 return;
5170 }
5171
5172 /* Handle some TM builtins. Ordinarily these aren't actually generated
5173 at this point, but handling these functions when written in by the
5174 user makes it easier to build unit tests. */
5175 if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
5176 return;
5177
5178 /* Fixup recursive calls inside clones. */
5179 /* ??? Why did cgraph_copy_node_for_versioning update the call edges
5180 for recursion but not update the call statements themselves? */
5181 if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
5182 {
5183 gimple_call_set_fndecl (stmt, current_function_decl);
5184 return;
5185 }
5186
5187 /* If there is a replacement, use it. */
5188 fndecl = find_tm_replacement_function (fndecl);
5189 if (fndecl)
5190 {
d52f5295 5191 new_node = cgraph_node::get_create (fndecl);
0a35513e
AH
5192
5193 /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
5194
5195 We can't do this earlier in record_tm_replacement because
5196 cgraph_remove_unreachable_nodes is called before we inject
5197 references to the node. Further, we can't do this in some
5198 nice central place in ipa_tm_execute because we don't have
5199 the exact list of wrapper functions that would be used.
5200 Marking more wrappers than necessary results in the creation
5201 of unnecessary cgraph_nodes, which can cause some of the
5202 other IPA passes to crash.
5203
5204 We do need to mark these nodes so that we get the proper
5205 result in expand_call_tm. */
5206 /* ??? This seems broken. How is it that we're marking the
5207 CALLEE as may_enter_irr? Surely we should be marking the
5208 CALLER. Also note that find_tm_replacement_function also
5209 contains mappings into the TM runtime, e.g. memcpy. These
5210 we know won't go irrevocable. */
87f94429 5211 new_node->tm_may_enter_irr = 1;
0a35513e
AH
5212 }
5213 else
5214 {
594ec92f
AH
5215 struct tm_ipa_cg_data *d;
5216 struct cgraph_node *tnode = e->callee;
5217
5218 d = get_cg_data (&tnode, true);
0a35513e
AH
5219 new_node = d->clone;
5220
5221 /* As we've already skipped pure calls and appropriate builtins,
5222 and we've already marked irrevocable blocks, if we can't come
5223 up with a static replacement, then ask the runtime. */
5224 if (new_node == NULL)
5225 {
5226 *need_ssa_rename_p |=
5227 ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
0a35513e
AH
5228 return;
5229 }
5230
67348ccc 5231 fndecl = new_node->decl;
0a35513e
AH
5232 }
5233
3dafb85c 5234 e->redirect_callee (new_node);
0a35513e
AH
5235 gimple_call_set_fndecl (stmt, fndecl);
5236}
5237
5238/* Helper function for ipa_tm_transform_calls. For a given BB,
5239 install calls to tm_irrevocable when IRR_BLOCKS are reached,
5240 redirect other calls to the generated transactional clone. */
5241
5242static bool
5243ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
5244 basic_block bb, bitmap irr_blocks)
5245{
5246 gimple_stmt_iterator gsi;
5247 bool need_ssa_rename = false;
5248
5249 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5250 {
5251 ipa_tm_insert_irr_call (node, region, bb);
5252 return true;
5253 }
5254
5255 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5256 {
355fe088 5257 gimple *stmt = gsi_stmt (gsi);
0a35513e
AH
5258
5259 if (!is_gimple_call (stmt))
5260 continue;
5261 if (is_tm_pure_call (stmt))
5262 continue;
5263
5264 /* Redirect edges to the appropriate replacement or clone. */
5265 ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
5266 }
5267
5268 return need_ssa_rename;
5269}
5270
5271/* Walk the CFG for REGION, beginning at BB. Install calls to
5272 tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
5273 the generated transactional clone. */
5274
5275static bool
5276ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
5277 basic_block bb, bitmap irr_blocks)
5278{
5279 bool need_ssa_rename = false;
5280 edge e;
5281 edge_iterator ei;
ef062b13 5282 auto_vec<basic_block> queue;
0a35513e
AH
5283 bitmap visited_blocks = BITMAP_ALLOC (NULL);
5284
9771b263 5285 queue.safe_push (bb);
0a35513e
AH
5286 do
5287 {
9771b263 5288 bb = queue.pop ();
0a35513e
AH
5289
5290 need_ssa_rename |=
5291 ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
5292
5293 if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
5294 continue;
5295
5296 if (region && bitmap_bit_p (region->exit_blocks, bb->index))
5297 continue;
5298
5299 FOR_EACH_EDGE (e, ei, bb->succs)
5300 if (!bitmap_bit_p (visited_blocks, e->dest->index))
5301 {
5302 bitmap_set_bit (visited_blocks, e->dest->index);
9771b263 5303 queue.safe_push (e->dest);
0a35513e
AH
5304 }
5305 }
9771b263 5306 while (!queue.is_empty ());
0a35513e 5307
0a35513e
AH
5308 BITMAP_FREE (visited_blocks);
5309
5310 return need_ssa_rename;
5311}
5312
5313/* Transform the calls within the TM regions within NODE. */
5314
5315static void
5316ipa_tm_transform_transaction (struct cgraph_node *node)
5317{
594ec92f 5318 struct tm_ipa_cg_data *d;
0a35513e
AH
5319 struct tm_region *region;
5320 bool need_ssa_rename = false;
5321
594ec92f
AH
5322 d = get_cg_data (&node, true);
5323
67348ccc 5324 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
0a35513e
AH
5325 calculate_dominance_info (CDI_DOMINATORS);
5326
5327 for (region = d->all_tm_regions; region; region = region->next)
5328 {
5329 /* If we're sure to go irrevocable, don't transform anything. */
5330 if (d->irrevocable_blocks_normal
5331 && bitmap_bit_p (d->irrevocable_blocks_normal,
5332 region->entry_block->index))
5333 {
b7a78683
AH
5334 transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE
5335 | GTMA_MAY_ENTER_IRREVOCABLE
5336 | GTMA_HAS_NO_INSTRUMENTATION);
0a35513e
AH
5337 continue;
5338 }
5339
5340 need_ssa_rename |=
5341 ipa_tm_transform_calls (node, region, region->entry_block,
5342 d->irrevocable_blocks_normal);
5343 }
5344
5345 if (need_ssa_rename)
5346 update_ssa (TODO_update_ssa_only_virtuals);
5347
5348 pop_cfun ();
0a35513e
AH
5349}
5350
5351/* Transform the calls within the transactional clone of NODE. */
5352
5353static void
5354ipa_tm_transform_clone (struct cgraph_node *node)
5355{
594ec92f 5356 struct tm_ipa_cg_data *d;
0a35513e
AH
5357 bool need_ssa_rename;
5358
594ec92f
AH
5359 d = get_cg_data (&node, true);
5360
0a35513e
AH
5361 /* If this function makes no calls and has no irrevocable blocks,
5362 then there's nothing to do. */
5363 /* ??? Remove non-aborting top-level transactions. */
8730965e 5364 if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone)
0a35513e
AH
5365 return;
5366
67348ccc 5367 push_cfun (DECL_STRUCT_FUNCTION (d->clone->decl));
0a35513e
AH
5368 calculate_dominance_info (CDI_DOMINATORS);
5369
5370 need_ssa_rename =
fefa31b5
DM
5371 ipa_tm_transform_calls (d->clone, NULL,
5372 single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
0a35513e
AH
5373 d->irrevocable_blocks_clone);
5374
5375 if (need_ssa_rename)
5376 update_ssa (TODO_update_ssa_only_virtuals);
5377
5378 pop_cfun ();
0a35513e
AH
5379}
5380
5381/* Main entry point for the transactional memory IPA pass. */
5382
5383static unsigned int
5384ipa_tm_execute (void)
5385{
c3284718 5386 cgraph_node_queue tm_callees = cgraph_node_queue ();
0a35513e 5387 /* List of functions that will go irrevocable. */
c3284718 5388 cgraph_node_queue irr_worklist = cgraph_node_queue ();
0a35513e
AH
5389
5390 struct cgraph_node *node;
5391 struct tm_ipa_cg_data *d;
5392 enum availability a;
5393 unsigned int i;
5394
b2b29377 5395 cgraph_node::checking_verify_cgraph_nodes ();
0a35513e
AH
5396
5397 bitmap_obstack_initialize (&tm_obstack);
398b1daa 5398 initialize_original_copy_tables ();
0a35513e
AH
5399
5400 /* For all local functions marked tm_callable, queue them. */
65c70e6b 5401 FOR_EACH_DEFINED_FUNCTION (node)
67348ccc 5402 if (is_tm_callable (node->decl)
d52f5295 5403 && node->get_availability () >= AVAIL_INTERPOSABLE)
0a35513e 5404 {
594ec92f 5405 d = get_cg_data (&node, true);
0a35513e
AH
5406 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5407 }
5408
5409 /* For all local reachable functions... */
65c70e6b 5410 FOR_EACH_DEFINED_FUNCTION (node)
93a18a70 5411 if (node->lowered
d52f5295 5412 && node->get_availability () >= AVAIL_INTERPOSABLE)
0a35513e
AH
5413 {
5414 /* ... marked tm_pure, record that fact for the runtime by
5415 indicating that the pure function is its own tm_callable.
5416 No need to do this if the function's address can't be taken. */
67348ccc 5417 if (is_tm_pure (node->decl))
0a35513e 5418 {
87f94429 5419 if (!node->local)
67348ccc 5420 record_tm_clone_pair (node->decl, node->decl);
0a35513e
AH
5421 continue;
5422 }
5423
67348ccc 5424 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
0a35513e
AH
5425 calculate_dominance_info (CDI_DOMINATORS);
5426
5427 tm_region_init (NULL);
5428 if (all_tm_regions)
5429 {
594ec92f 5430 d = get_cg_data (&node, true);
0a35513e 5431
398b1daa
AH
5432 /* Scan for calls that are in each transaction, and
5433 generate the uninstrumented code path. */
0a35513e
AH
5434 ipa_tm_scan_calls_transaction (d, &tm_callees);
5435
80fd8eba
AH
5436 /* Put it in the worklist so we can scan the function
5437 later (ipa_tm_scan_irr_function) and mark the
5438 irrevocable blocks. */
5439 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5440 d->want_irr_scan_normal = true;
0a35513e
AH
5441 }
5442
5443 pop_cfun ();
0a35513e
AH
5444 }
5445
5446 /* For every local function on the callee list, scan as if we will be
5447 creating a transactional clone, queueing all new functions we find
5448 along the way. */
9771b263 5449 for (i = 0; i < tm_callees.length (); ++i)
0a35513e 5450 {
9771b263 5451 node = tm_callees[i];
d52f5295 5452 a = node->get_availability ();
594ec92f 5453 d = get_cg_data (&node, true);
0a35513e 5454
80fd8eba
AH
5455 /* Put it in the worklist so we can scan the function later
5456 (ipa_tm_scan_irr_function) and mark the irrevocable
5457 blocks. */
5458 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
0a35513e
AH
5459
5460 /* Some callees cannot be arbitrarily cloned. These will always be
5461 irrevocable. Mark these now, so that we need not scan them. */
67348ccc 5462 if (is_tm_irrevocable (node->decl))
0a35513e
AH
5463 ipa_tm_note_irrevocable (node, &irr_worklist);
5464 else if (a <= AVAIL_NOT_AVAILABLE
67348ccc 5465 && !is_tm_safe_or_pure (node->decl))
0a35513e 5466 ipa_tm_note_irrevocable (node, &irr_worklist);
d52f5295 5467 else if (a >= AVAIL_INTERPOSABLE)
0a35513e 5468 {
67348ccc 5469 if (!tree_versionable_function_p (node->decl))
0a35513e
AH
5470 ipa_tm_note_irrevocable (node, &irr_worklist);
5471 else if (!d->is_irrevocable)
5472 {
5473 /* If this is an alias, make sure its base is queued as well.
5474 we need not scan the callees now, as the base will do. */
67348ccc 5475 if (node->alias)
0a35513e 5476 {
d52f5295 5477 node = cgraph_node::get (node->thunk.alias);
594ec92f 5478 d = get_cg_data (&node, true);
0a35513e
AH
5479 maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
5480 continue;
5481 }
5482
5483 /* Add all nodes called by this function into
5484 tm_callees as well. */
5485 ipa_tm_scan_calls_clone (node, &tm_callees);
5486 }
5487 }
5488 }
5489
5490 /* Iterate scans until no more work to be done. Prefer not to use
9771b263 5491 vec::pop because the worklist tends to follow a breadth-first
0a35513e
AH
5492 search of the callgraph, which should allow convergance with a
5493 minimum number of scans. But we also don't want the worklist
5494 array to grow without bound, so we shift the array up periodically. */
9771b263 5495 for (i = 0; i < irr_worklist.length (); ++i)
0a35513e 5496 {
9771b263 5497 if (i > 256 && i == irr_worklist.length () / 8)
0a35513e 5498 {
9771b263 5499 irr_worklist.block_remove (0, i);
0a35513e
AH
5500 i = 0;
5501 }
5502
9771b263 5503 node = irr_worklist[i];
594ec92f 5504 d = get_cg_data (&node, true);
0a35513e
AH
5505 d->in_worklist = false;
5506
5507 if (d->want_irr_scan_normal)
5508 {
5509 d->want_irr_scan_normal = false;
5510 ipa_tm_scan_irr_function (node, false);
5511 }
5512 if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
5513 ipa_tm_note_irrevocable (node, &irr_worklist);
5514 }
5515
5516 /* For every function on the callee list, collect the tm_may_enter_irr
5517 bit on the node. */
9771b263
DN
5518 irr_worklist.truncate (0);
5519 for (i = 0; i < tm_callees.length (); ++i)
0a35513e 5520 {
9771b263 5521 node = tm_callees[i];
0a35513e
AH
5522 if (ipa_tm_mayenterirr_function (node))
5523 {
594ec92f 5524 d = get_cg_data (&node, true);
0a35513e
AH
5525 gcc_assert (d->in_worklist == false);
5526 maybe_push_queue (node, &irr_worklist, &d->in_worklist);
5527 }
5528 }
5529
5530 /* Propagate the tm_may_enter_irr bit to callers until stable. */
9771b263 5531 for (i = 0; i < irr_worklist.length (); ++i)
0a35513e
AH
5532 {
5533 struct cgraph_node *caller;
5534 struct cgraph_edge *e;
e55637b7 5535 struct ipa_ref *ref;
0a35513e 5536
9771b263 5537 if (i > 256 && i == irr_worklist.length () / 8)
0a35513e 5538 {
9771b263 5539 irr_worklist.block_remove (0, i);
0a35513e
AH
5540 i = 0;
5541 }
5542
9771b263 5543 node = irr_worklist[i];
594ec92f 5544 d = get_cg_data (&node, true);
0a35513e 5545 d->in_worklist = false;
87f94429 5546 node->tm_may_enter_irr = true;
0a35513e
AH
5547
5548 /* Propagate back to normal callers. */
5549 for (e = node->callers; e ; e = e->next_caller)
5550 {
5551 caller = e->caller;
67348ccc 5552 if (!is_tm_safe_or_pure (caller->decl)
87f94429 5553 && !caller->tm_may_enter_irr)
0a35513e 5554 {
594ec92f 5555 d = get_cg_data (&caller, true);
0a35513e
AH
5556 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5557 }
5558 }
5559
5560 /* Propagate back to referring aliases as well. */
e55637b7 5561 FOR_EACH_ALIAS (node, ref)
0a35513e 5562 {
d52f5295 5563 caller = dyn_cast<cgraph_node *> (ref->referring);
87f94429 5564 if (!caller->tm_may_enter_irr)
0a35513e 5565 {
594ec92f
AH
5566 /* ?? Do not traverse aliases here. */
5567 d = get_cg_data (&caller, false);
0a35513e
AH
5568 maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
5569 }
5570 }
5571 }
5572
5573 /* Now validate all tm_safe functions, and all atomic regions in
5574 other functions. */
65c70e6b 5575 FOR_EACH_DEFINED_FUNCTION (node)
93a18a70 5576 if (node->lowered
d52f5295 5577 && node->get_availability () >= AVAIL_INTERPOSABLE)
0a35513e 5578 {
594ec92f 5579 d = get_cg_data (&node, true);
67348ccc 5580 if (is_tm_safe (node->decl))
0a35513e
AH
5581 ipa_tm_diagnose_tm_safe (node);
5582 else if (d->all_tm_regions)
5583 ipa_tm_diagnose_transaction (node, d->all_tm_regions);
5584 }
5585
5586 /* Create clones. Do those that are not irrevocable and have a
5587 positive call count. Do those publicly visible functions that
5588 the user directed us to clone. */
9771b263 5589 for (i = 0; i < tm_callees.length (); ++i)
0a35513e
AH
5590 {
5591 bool doit = false;
5592
9771b263 5593 node = tm_callees[i];
67348ccc 5594 if (node->cpp_implicit_alias)
0a35513e
AH
5595 continue;
5596
d52f5295 5597 a = node->get_availability ();
594ec92f 5598 d = get_cg_data (&node, true);
0a35513e
AH
5599
5600 if (a <= AVAIL_NOT_AVAILABLE)
67348ccc
DM
5601 doit = is_tm_callable (node->decl);
5602 else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl))
0a35513e
AH
5603 doit = true;
5604 else if (!d->is_irrevocable
5605 && d->tm_callers_normal + d->tm_callers_clone > 0)
5606 doit = true;
5607
5608 if (doit)
5609 ipa_tm_create_version (node);
5610 }
5611
5612 /* Redirect calls to the new clones, and insert irrevocable marks. */
9771b263 5613 for (i = 0; i < tm_callees.length (); ++i)
0a35513e 5614 {
9771b263 5615 node = tm_callees[i];
67348ccc 5616 if (node->analyzed)
0a35513e 5617 {
594ec92f 5618 d = get_cg_data (&node, true);
0a35513e
AH
5619 if (d->clone)
5620 ipa_tm_transform_clone (node);
5621 }
5622 }
65c70e6b 5623 FOR_EACH_DEFINED_FUNCTION (node)
93a18a70 5624 if (node->lowered
d52f5295 5625 && node->get_availability () >= AVAIL_INTERPOSABLE)
0a35513e 5626 {
594ec92f 5627 d = get_cg_data (&node, true);
0a35513e
AH
5628 if (d->all_tm_regions)
5629 ipa_tm_transform_transaction (node);
5630 }
5631
5632 /* Free and clear all data structures. */
9771b263
DN
5633 tm_callees.release ();
5634 irr_worklist.release ();
0a35513e 5635 bitmap_obstack_release (&tm_obstack);
398b1daa 5636 free_original_copy_tables ();
0a35513e 5637
65c70e6b 5638 FOR_EACH_FUNCTION (node)
67348ccc 5639 node->aux = NULL;
0a35513e 5640
b2b29377 5641 cgraph_node::checking_verify_cgraph_nodes ();
0a35513e
AH
5642
5643 return 0;
5644}
5645
17795822
TS
5646namespace {
5647
5648const pass_data pass_data_ipa_tm =
27a4cd48
DM
5649{
5650 SIMPLE_IPA_PASS, /* type */
5651 "tmipa", /* name */
5652 OPTGROUP_NONE, /* optinfo_flags */
27a4cd48
DM
5653 TV_TRANS_MEM, /* tv_id */
5654 ( PROP_ssa | PROP_cfg ), /* properties_required */
5655 0, /* properties_provided */
5656 0, /* properties_destroyed */
5657 0, /* todo_flags_start */
5658 0, /* todo_flags_finish */
0a35513e
AH
5659};
5660
17795822 5661class pass_ipa_tm : public simple_ipa_opt_pass
27a4cd48
DM
5662{
5663public:
c3284718
RS
5664 pass_ipa_tm (gcc::context *ctxt)
5665 : simple_ipa_opt_pass (pass_data_ipa_tm, ctxt)
27a4cd48
DM
5666 {}
5667
5668 /* opt_pass methods: */
1a3d085c 5669 virtual bool gate (function *) { return flag_tm; }
be55bfe6 5670 virtual unsigned int execute (function *) { return ipa_tm_execute (); }
27a4cd48
DM
5671
5672}; // class pass_ipa_tm
5673
17795822
TS
5674} // anon namespace
5675
27a4cd48
DM
5676simple_ipa_opt_pass *
5677make_pass_ipa_tm (gcc::context *ctxt)
5678{
5679 return new pass_ipa_tm (ctxt);
5680}
5681
0a35513e 5682#include "gt-trans-mem.h"