]>
Commit | Line | Data |
---|---|---|
4c0315d0 | 1 | /* Passes for transactional memory support. |
d353bf18 | 2 | Copyright (C) 2008-2015 Free Software Foundation, Inc. |
99e3199b | 3 | Contributed by Richard Henderson <rth@redhat.com> |
4 | and Aldy Hernandez <aldyh@redhat.com>. | |
4c0315d0 | 5 | |
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify it under | |
9 | the terms of the GNU General Public License as published by the Free | |
10 | Software Foundation; either version 3, or (at your option) any later | |
11 | version. | |
12 | ||
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with GCC; see the file COPYING3. If not see | |
20 | <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #include "config.h" | |
23 | #include "system.h" | |
24 | #include "coretypes.h" | |
9ef16211 | 25 | #include "backend.h" |
7c29e30e | 26 | #include "target.h" |
27 | #include "rtl.h" | |
b20a8bb4 | 28 | #include "tree.h" |
9ef16211 | 29 | #include "gimple.h" |
7c29e30e | 30 | #include "cfghooks.h" |
31 | #include "tree-pass.h" | |
9ef16211 | 32 | #include "ssa.h" |
7c29e30e | 33 | #include "emit-rtl.h" |
34 | #include "cgraph.h" | |
35 | #include "gimple-pretty-print.h" | |
36 | #include "diagnostic-core.h" | |
37 | #include "alias.h" | |
b20a8bb4 | 38 | #include "fold-const.h" |
bc61cadb | 39 | #include "internal-fn.h" |
40 | #include "tree-eh.h" | |
9ed99284 | 41 | #include "calls.h" |
a8783bee | 42 | #include "gimplify.h" |
dcf1a1ec | 43 | #include "gimple-iterator.h" |
e795d6e1 | 44 | #include "gimplify-me.h" |
dcf1a1ec | 45 | #include "gimple-walk.h" |
073c1fd5 | 46 | #include "tree-cfg.h" |
073c1fd5 | 47 | #include "tree-into-ssa.h" |
4c0315d0 | 48 | #include "tree-inline.h" |
4c0315d0 | 49 | #include "demangle.h" |
50 | #include "output.h" | |
51 | #include "trans-mem.h" | |
52 | #include "params.h" | |
4c0315d0 | 53 | #include "langhooks.h" |
79f958cb | 54 | #include "cfgloop.h" |
424a4a92 | 55 | #include "tree-ssa-address.h" |
4c0315d0 | 56 | |
57 | ||
4c0315d0 | 58 | #define A_RUNINSTRUMENTEDCODE 0x0001 |
59 | #define A_RUNUNINSTRUMENTEDCODE 0x0002 | |
60 | #define A_SAVELIVEVARIABLES 0x0004 | |
61 | #define A_RESTORELIVEVARIABLES 0x0008 | |
62 | #define A_ABORTTRANSACTION 0x0010 | |
63 | ||
64 | #define AR_USERABORT 0x0001 | |
65 | #define AR_USERRETRY 0x0002 | |
66 | #define AR_TMCONFLICT 0x0004 | |
67 | #define AR_EXCEPTIONBLOCKABORT 0x0008 | |
68 | #define AR_OUTERABORT 0x0010 | |
69 | ||
70 | #define MODE_SERIALIRREVOCABLE 0x0000 | |
71 | ||
72 | ||
73 | /* The representation of a transaction changes several times during the | |
74 | lowering process. In the beginning, in the front-end we have the | |
75 | GENERIC tree TRANSACTION_EXPR. For example, | |
76 | ||
77 | __transaction { | |
78 | local++; | |
79 | if (++global == 10) | |
80 | __tm_abort; | |
81 | } | |
82 | ||
83 | During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is | |
84 | trivially replaced with a GIMPLE_TRANSACTION node. | |
85 | ||
86 | During pass_lower_tm, we examine the body of transactions looking | |
87 | for aborts. Transactions that do not contain an abort may be | |
88 | merged into an outer transaction. We also add a TRY-FINALLY node | |
89 | to arrange for the transaction to be committed on any exit. | |
90 | ||
91 | [??? Think about how this arrangement affects throw-with-commit | |
92 | and throw-with-abort operations. In this case we want the TRY to | |
93 | handle gotos, but not to catch any exceptions because the transaction | |
94 | will already be closed.] | |
95 | ||
96 | GIMPLE_TRANSACTION [label=NULL] { | |
97 | try { | |
98 | local = local + 1; | |
99 | t0 = global; | |
100 | t1 = t0 + 1; | |
101 | global = t1; | |
102 | if (t1 == 10) | |
103 | __builtin___tm_abort (); | |
104 | } finally { | |
105 | __builtin___tm_commit (); | |
106 | } | |
107 | } | |
108 | ||
109 | During pass_lower_eh, we create EH regions for the transactions, | |
110 | intermixed with the regular EH stuff. This gives us a nice persistent | |
111 | mapping (all the way through rtl) from transactional memory operation | |
112 | back to the transaction, which allows us to get the abnormal edges | |
113 | correct to model transaction aborts and restarts: | |
114 | ||
115 | GIMPLE_TRANSACTION [label=over] | |
116 | local = local + 1; | |
117 | t0 = global; | |
118 | t1 = t0 + 1; | |
119 | global = t1; | |
120 | if (t1 == 10) | |
121 | __builtin___tm_abort (); | |
122 | __builtin___tm_commit (); | |
123 | over: | |
124 | ||
125 | This is the end of all_lowering_passes, and so is what is present | |
126 | during the IPA passes, and through all of the optimization passes. | |
127 | ||
128 | During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all | |
129 | functions and mark functions for cloning. | |
130 | ||
131 | At the end of gimple optimization, before exiting SSA form, | |
132 | pass_tm_edges replaces statements that perform transactional | |
133 | memory operations with the appropriate TM builtins, and swap | |
134 | out function calls with their transactional clones. At this | |
135 | point we introduce the abnormal transaction restart edges and | |
136 | complete lowering of the GIMPLE_TRANSACTION node. | |
137 | ||
138 | x = __builtin___tm_start (MAY_ABORT); | |
139 | eh_label: | |
140 | if (x & abort_transaction) | |
141 | goto over; | |
142 | local = local + 1; | |
143 | t0 = __builtin___tm_load (global); | |
144 | t1 = t0 + 1; | |
145 | __builtin___tm_store (&global, t1); | |
146 | if (t1 == 10) | |
147 | __builtin___tm_abort (); | |
148 | __builtin___tm_commit (); | |
149 | over: | |
150 | */ | |
151 | ||
0cd02a19 | 152 | static void *expand_regions (struct tm_region *, |
153 | void *(*callback)(struct tm_region *, void *), | |
00d83cc8 | 154 | void *, bool); |
0cd02a19 | 155 | |
4c0315d0 | 156 | \f |
157 | /* Return the attributes we want to examine for X, or NULL if it's not | |
158 | something we examine. We look at function types, but allow pointers | |
159 | to function types and function decls and peek through. */ | |
160 | ||
161 | static tree | |
162 | get_attrs_for (const_tree x) | |
163 | { | |
547355ae | 164 | if (x == NULL_TREE) |
165 | return NULL_TREE; | |
166 | ||
4c0315d0 | 167 | switch (TREE_CODE (x)) |
168 | { | |
169 | case FUNCTION_DECL: | |
170 | return TYPE_ATTRIBUTES (TREE_TYPE (x)); | |
171 | break; | |
172 | ||
173 | default: | |
174 | if (TYPE_P (x)) | |
547355ae | 175 | return NULL_TREE; |
4c0315d0 | 176 | x = TREE_TYPE (x); |
177 | if (TREE_CODE (x) != POINTER_TYPE) | |
547355ae | 178 | return NULL_TREE; |
4c0315d0 | 179 | /* FALLTHRU */ |
180 | ||
181 | case POINTER_TYPE: | |
182 | x = TREE_TYPE (x); | |
183 | if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE) | |
547355ae | 184 | return NULL_TREE; |
4c0315d0 | 185 | /* FALLTHRU */ |
186 | ||
187 | case FUNCTION_TYPE: | |
188 | case METHOD_TYPE: | |
189 | return TYPE_ATTRIBUTES (x); | |
190 | } | |
191 | } | |
192 | ||
193 | /* Return true if X has been marked TM_PURE. */ | |
194 | ||
195 | bool | |
196 | is_tm_pure (const_tree x) | |
197 | { | |
198 | unsigned flags; | |
199 | ||
200 | switch (TREE_CODE (x)) | |
201 | { | |
202 | case FUNCTION_DECL: | |
203 | case FUNCTION_TYPE: | |
204 | case METHOD_TYPE: | |
205 | break; | |
206 | ||
207 | default: | |
208 | if (TYPE_P (x)) | |
209 | return false; | |
210 | x = TREE_TYPE (x); | |
211 | if (TREE_CODE (x) != POINTER_TYPE) | |
212 | return false; | |
213 | /* FALLTHRU */ | |
214 | ||
215 | case POINTER_TYPE: | |
216 | x = TREE_TYPE (x); | |
217 | if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE) | |
218 | return false; | |
219 | break; | |
220 | } | |
221 | ||
222 | flags = flags_from_decl_or_type (x); | |
223 | return (flags & ECF_TM_PURE) != 0; | |
224 | } | |
225 | ||
226 | /* Return true if X has been marked TM_IRREVOCABLE. */ | |
227 | ||
228 | static bool | |
229 | is_tm_irrevocable (tree x) | |
230 | { | |
231 | tree attrs = get_attrs_for (x); | |
232 | ||
233 | if (attrs && lookup_attribute ("transaction_unsafe", attrs)) | |
234 | return true; | |
235 | ||
236 | /* A call to the irrevocable builtin is by definition, | |
237 | irrevocable. */ | |
238 | if (TREE_CODE (x) == ADDR_EXPR) | |
239 | x = TREE_OPERAND (x, 0); | |
240 | if (TREE_CODE (x) == FUNCTION_DECL | |
241 | && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL | |
242 | && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE) | |
243 | return true; | |
244 | ||
245 | return false; | |
246 | } | |
247 | ||
248 | /* Return true if X has been marked TM_SAFE. */ | |
249 | ||
250 | bool | |
251 | is_tm_safe (const_tree x) | |
252 | { | |
253 | if (flag_tm) | |
254 | { | |
255 | tree attrs = get_attrs_for (x); | |
256 | if (attrs) | |
257 | { | |
258 | if (lookup_attribute ("transaction_safe", attrs)) | |
259 | return true; | |
260 | if (lookup_attribute ("transaction_may_cancel_outer", attrs)) | |
261 | return true; | |
262 | } | |
263 | } | |
264 | return false; | |
265 | } | |
266 | ||
267 | /* Return true if CALL is const, or tm_pure. */ | |
268 | ||
269 | static bool | |
42acab1c | 270 | is_tm_pure_call (gimple *call) |
4c0315d0 | 271 | { |
272 | tree fn = gimple_call_fn (call); | |
273 | ||
274 | if (TREE_CODE (fn) == ADDR_EXPR) | |
275 | { | |
276 | fn = TREE_OPERAND (fn, 0); | |
277 | gcc_assert (TREE_CODE (fn) == FUNCTION_DECL); | |
278 | } | |
279 | else | |
280 | fn = TREE_TYPE (fn); | |
281 | ||
282 | return is_tm_pure (fn); | |
283 | } | |
284 | ||
285 | /* Return true if X has been marked TM_CALLABLE. */ | |
286 | ||
287 | static bool | |
288 | is_tm_callable (tree x) | |
289 | { | |
290 | tree attrs = get_attrs_for (x); | |
291 | if (attrs) | |
292 | { | |
293 | if (lookup_attribute ("transaction_callable", attrs)) | |
294 | return true; | |
295 | if (lookup_attribute ("transaction_safe", attrs)) | |
296 | return true; | |
297 | if (lookup_attribute ("transaction_may_cancel_outer", attrs)) | |
298 | return true; | |
299 | } | |
300 | return false; | |
301 | } | |
302 | ||
303 | /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */ | |
304 | ||
305 | bool | |
306 | is_tm_may_cancel_outer (tree x) | |
307 | { | |
308 | tree attrs = get_attrs_for (x); | |
309 | if (attrs) | |
310 | return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL; | |
311 | return false; | |
312 | } | |
313 | ||
314 | /* Return true for built in functions that "end" a transaction. */ | |
315 | ||
316 | bool | |
317 | is_tm_ending_fndecl (tree fndecl) | |
318 | { | |
319 | if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
320 | switch (DECL_FUNCTION_CODE (fndecl)) | |
321 | { | |
322 | case BUILT_IN_TM_COMMIT: | |
323 | case BUILT_IN_TM_COMMIT_EH: | |
324 | case BUILT_IN_TM_ABORT: | |
325 | case BUILT_IN_TM_IRREVOCABLE: | |
326 | return true; | |
327 | default: | |
328 | break; | |
329 | } | |
330 | ||
331 | return false; | |
332 | } | |
333 | ||
50a50143 | 334 | /* Return true if STMT is a built in function call that "ends" a |
335 | transaction. */ | |
336 | ||
337 | bool | |
42acab1c | 338 | is_tm_ending (gimple *stmt) |
50a50143 | 339 | { |
340 | tree fndecl; | |
341 | ||
342 | if (gimple_code (stmt) != GIMPLE_CALL) | |
343 | return false; | |
344 | ||
345 | fndecl = gimple_call_fndecl (stmt); | |
346 | return (fndecl != NULL_TREE | |
347 | && is_tm_ending_fndecl (fndecl)); | |
348 | } | |
349 | ||
4c0315d0 | 350 | /* Return true if STMT is a TM load. */ |
351 | ||
352 | static bool | |
42acab1c | 353 | is_tm_load (gimple *stmt) |
4c0315d0 | 354 | { |
355 | tree fndecl; | |
356 | ||
357 | if (gimple_code (stmt) != GIMPLE_CALL) | |
358 | return false; | |
359 | ||
360 | fndecl = gimple_call_fndecl (stmt); | |
361 | return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL | |
362 | && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl))); | |
363 | } | |
364 | ||
365 | /* Same as above, but for simple TM loads, that is, not the | |
366 | after-write, after-read, etc optimized variants. */ | |
367 | ||
368 | static bool | |
42acab1c | 369 | is_tm_simple_load (gimple *stmt) |
4c0315d0 | 370 | { |
371 | tree fndecl; | |
372 | ||
373 | if (gimple_code (stmt) != GIMPLE_CALL) | |
374 | return false; | |
375 | ||
376 | fndecl = gimple_call_fndecl (stmt); | |
377 | if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
378 | { | |
379 | enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); | |
380 | return (fcode == BUILT_IN_TM_LOAD_1 | |
381 | || fcode == BUILT_IN_TM_LOAD_2 | |
382 | || fcode == BUILT_IN_TM_LOAD_4 | |
383 | || fcode == BUILT_IN_TM_LOAD_8 | |
384 | || fcode == BUILT_IN_TM_LOAD_FLOAT | |
385 | || fcode == BUILT_IN_TM_LOAD_DOUBLE | |
386 | || fcode == BUILT_IN_TM_LOAD_LDOUBLE | |
387 | || fcode == BUILT_IN_TM_LOAD_M64 | |
388 | || fcode == BUILT_IN_TM_LOAD_M128 | |
389 | || fcode == BUILT_IN_TM_LOAD_M256); | |
390 | } | |
391 | return false; | |
392 | } | |
393 | ||
394 | /* Return true if STMT is a TM store. */ | |
395 | ||
396 | static bool | |
42acab1c | 397 | is_tm_store (gimple *stmt) |
4c0315d0 | 398 | { |
399 | tree fndecl; | |
400 | ||
401 | if (gimple_code (stmt) != GIMPLE_CALL) | |
402 | return false; | |
403 | ||
404 | fndecl = gimple_call_fndecl (stmt); | |
405 | return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL | |
406 | && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl))); | |
407 | } | |
408 | ||
409 | /* Same as above, but for simple TM stores, that is, not the | |
410 | after-write, after-read, etc optimized variants. */ | |
411 | ||
412 | static bool | |
42acab1c | 413 | is_tm_simple_store (gimple *stmt) |
4c0315d0 | 414 | { |
415 | tree fndecl; | |
416 | ||
417 | if (gimple_code (stmt) != GIMPLE_CALL) | |
418 | return false; | |
419 | ||
420 | fndecl = gimple_call_fndecl (stmt); | |
421 | if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
422 | { | |
423 | enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); | |
424 | return (fcode == BUILT_IN_TM_STORE_1 | |
425 | || fcode == BUILT_IN_TM_STORE_2 | |
426 | || fcode == BUILT_IN_TM_STORE_4 | |
427 | || fcode == BUILT_IN_TM_STORE_8 | |
428 | || fcode == BUILT_IN_TM_STORE_FLOAT | |
429 | || fcode == BUILT_IN_TM_STORE_DOUBLE | |
430 | || fcode == BUILT_IN_TM_STORE_LDOUBLE | |
431 | || fcode == BUILT_IN_TM_STORE_M64 | |
432 | || fcode == BUILT_IN_TM_STORE_M128 | |
433 | || fcode == BUILT_IN_TM_STORE_M256); | |
434 | } | |
435 | return false; | |
436 | } | |
437 | ||
438 | /* Return true if FNDECL is BUILT_IN_TM_ABORT. */ | |
439 | ||
440 | static bool | |
441 | is_tm_abort (tree fndecl) | |
442 | { | |
443 | return (fndecl | |
444 | && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL | |
445 | && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT); | |
446 | } | |
447 | ||
448 | /* Build a GENERIC tree for a user abort. This is called by front ends | |
449 | while transforming the __tm_abort statement. */ | |
450 | ||
451 | tree | |
452 | build_tm_abort_call (location_t loc, bool is_outer) | |
453 | { | |
454 | return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1, | |
455 | build_int_cst (integer_type_node, | |
456 | AR_USERABORT | |
457 | | (is_outer ? AR_OUTERABORT : 0))); | |
458 | } | |
4c0315d0 | 459 | \f |
460 | /* Map for aribtrary function replacement under TM, as created | |
461 | by the tm_wrap attribute. */ | |
462 | ||
eae1ecb4 | 463 | struct tm_wrapper_hasher : ggc_cache_ptr_hash<tree_map> |
d1023d12 | 464 | { |
465 | static inline hashval_t hash (tree_map *m) { return m->hash; } | |
466 | static inline bool | |
467 | equal (tree_map *a, tree_map *b) | |
468 | { | |
469 | return a->base.from == b->base.from; | |
470 | } | |
471 | ||
99378011 | 472 | static int |
473 | keep_cache_entry (tree_map *&m) | |
474 | { | |
475 | return ggc_marked_p (m->base.from); | |
476 | } | |
d1023d12 | 477 | }; |
478 | ||
479 | static GTY((cache)) hash_table<tm_wrapper_hasher> *tm_wrap_map; | |
4c0315d0 | 480 | |
481 | void | |
482 | record_tm_replacement (tree from, tree to) | |
483 | { | |
484 | struct tree_map **slot, *h; | |
485 | ||
486 | /* Do not inline wrapper functions that will get replaced in the TM | |
487 | pass. | |
488 | ||
489 | Suppose you have foo() that will get replaced into tmfoo(). Make | |
490 | sure the inliner doesn't try to outsmart us and inline foo() | |
491 | before we get a chance to do the TM replacement. */ | |
492 | DECL_UNINLINABLE (from) = 1; | |
493 | ||
494 | if (tm_wrap_map == NULL) | |
d1023d12 | 495 | tm_wrap_map = hash_table<tm_wrapper_hasher>::create_ggc (32); |
4c0315d0 | 496 | |
25a27413 | 497 | h = ggc_alloc<tree_map> (); |
4c0315d0 | 498 | h->hash = htab_hash_pointer (from); |
499 | h->base.from = from; | |
500 | h->to = to; | |
501 | ||
d1023d12 | 502 | slot = tm_wrap_map->find_slot_with_hash (h, h->hash, INSERT); |
4c0315d0 | 503 | *slot = h; |
504 | } | |
505 | ||
506 | /* Return a TM-aware replacement function for DECL. */ | |
507 | ||
508 | static tree | |
509 | find_tm_replacement_function (tree fndecl) | |
510 | { | |
511 | if (tm_wrap_map) | |
512 | { | |
513 | struct tree_map *h, in; | |
514 | ||
515 | in.base.from = fndecl; | |
516 | in.hash = htab_hash_pointer (fndecl); | |
d1023d12 | 517 | h = tm_wrap_map->find_with_hash (&in, in.hash); |
4c0315d0 | 518 | if (h) |
519 | return h->to; | |
520 | } | |
521 | ||
522 | /* ??? We may well want TM versions of most of the common <string.h> | |
523 | functions. For now, we've already these two defined. */ | |
524 | /* Adjust expand_call_tm() attributes as necessary for the cases | |
525 | handled here: */ | |
526 | if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
527 | switch (DECL_FUNCTION_CODE (fndecl)) | |
528 | { | |
529 | case BUILT_IN_MEMCPY: | |
530 | return builtin_decl_explicit (BUILT_IN_TM_MEMCPY); | |
531 | case BUILT_IN_MEMMOVE: | |
532 | return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE); | |
533 | case BUILT_IN_MEMSET: | |
534 | return builtin_decl_explicit (BUILT_IN_TM_MEMSET); | |
535 | default: | |
536 | return NULL; | |
537 | } | |
538 | ||
539 | return NULL; | |
540 | } | |
541 | ||
542 | /* When appropriate, record TM replacement for memory allocation functions. | |
543 | ||
544 | FROM is the FNDECL to wrap. */ | |
545 | void | |
546 | tm_malloc_replacement (tree from) | |
547 | { | |
548 | const char *str; | |
549 | tree to; | |
550 | ||
551 | if (TREE_CODE (from) != FUNCTION_DECL) | |
552 | return; | |
553 | ||
554 | /* If we have a previous replacement, the user must be explicitly | |
555 | wrapping malloc/calloc/free. They better know what they're | |
556 | doing... */ | |
557 | if (find_tm_replacement_function (from)) | |
558 | return; | |
559 | ||
560 | str = IDENTIFIER_POINTER (DECL_NAME (from)); | |
561 | ||
562 | if (!strcmp (str, "malloc")) | |
563 | to = builtin_decl_explicit (BUILT_IN_TM_MALLOC); | |
564 | else if (!strcmp (str, "calloc")) | |
565 | to = builtin_decl_explicit (BUILT_IN_TM_CALLOC); | |
566 | else if (!strcmp (str, "free")) | |
567 | to = builtin_decl_explicit (BUILT_IN_TM_FREE); | |
568 | else | |
569 | return; | |
570 | ||
571 | TREE_NOTHROW (to) = 0; | |
572 | ||
573 | record_tm_replacement (from, to); | |
574 | } | |
575 | \f | |
576 | /* Diagnostics for tm_safe functions/regions. Called by the front end | |
577 | once we've lowered the function to high-gimple. */ | |
578 | ||
579 | /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq. | |
580 | Process exactly one statement. WI->INFO is set to non-null when in | |
581 | the context of a tm_safe function, and null for a __transaction block. */ | |
582 | ||
583 | #define DIAG_TM_OUTER 1 | |
584 | #define DIAG_TM_SAFE 2 | |
585 | #define DIAG_TM_RELAXED 4 | |
586 | ||
587 | struct diagnose_tm | |
588 | { | |
589 | unsigned int summary_flags : 8; | |
590 | unsigned int block_flags : 8; | |
591 | unsigned int func_flags : 8; | |
4c0315d0 | 592 | unsigned int saw_volatile : 1; |
42acab1c | 593 | gimple *stmt; |
4c0315d0 | 594 | }; |
595 | ||
639b72bd | 596 | /* Return true if T is a volatile lvalue of some kind. */ |
e153bd50 | 597 | |
598 | static bool | |
639b72bd | 599 | volatile_lvalue_p (tree t) |
e153bd50 | 600 | { |
639b72bd | 601 | return ((SSA_VAR_P (t) || REFERENCE_CLASS_P (t)) |
e153bd50 | 602 | && TREE_THIS_VOLATILE (TREE_TYPE (t))); |
603 | } | |
604 | ||
4c0315d0 | 605 | /* Tree callback function for diagnose_tm pass. */ |
606 | ||
607 | static tree | |
639b72bd | 608 | diagnose_tm_1_op (tree *tp, int *walk_subtrees, void *data) |
4c0315d0 | 609 | { |
610 | struct walk_stmt_info *wi = (struct walk_stmt_info *) data; | |
611 | struct diagnose_tm *d = (struct diagnose_tm *) wi->info; | |
4c0315d0 | 612 | |
639b72bd | 613 | if (TYPE_P (*tp)) |
614 | *walk_subtrees = false; | |
615 | else if (volatile_lvalue_p (*tp) | |
616 | && !d->saw_volatile) | |
4c0315d0 | 617 | { |
618 | d->saw_volatile = 1; | |
639b72bd | 619 | if (d->block_flags & DIAG_TM_SAFE) |
620 | error_at (gimple_location (d->stmt), | |
621 | "invalid use of volatile lvalue inside transaction"); | |
622 | else if (d->func_flags & DIAG_TM_SAFE) | |
623 | error_at (gimple_location (d->stmt), | |
624 | "invalid use of volatile lvalue inside %<transaction_safe%>" | |
625 | "function"); | |
4c0315d0 | 626 | } |
627 | ||
628 | return NULL_TREE; | |
629 | } | |
630 | ||
0e80b01d | 631 | static inline bool |
632 | is_tm_safe_or_pure (const_tree x) | |
633 | { | |
634 | return is_tm_safe (x) || is_tm_pure (x); | |
635 | } | |
636 | ||
4c0315d0 | 637 | static tree |
638 | diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
639 | struct walk_stmt_info *wi) | |
640 | { | |
42acab1c | 641 | gimple *stmt = gsi_stmt (*gsi); |
4c0315d0 | 642 | struct diagnose_tm *d = (struct diagnose_tm *) wi->info; |
643 | ||
644 | /* Save stmt for use in leaf analysis. */ | |
645 | d->stmt = stmt; | |
646 | ||
647 | switch (gimple_code (stmt)) | |
648 | { | |
649 | case GIMPLE_CALL: | |
650 | { | |
651 | tree fn = gimple_call_fn (stmt); | |
652 | ||
653 | if ((d->summary_flags & DIAG_TM_OUTER) == 0 | |
654 | && is_tm_may_cancel_outer (fn)) | |
655 | error_at (gimple_location (stmt), | |
656 | "%<transaction_may_cancel_outer%> function call not within" | |
657 | " outer transaction or %<transaction_may_cancel_outer%>"); | |
658 | ||
659 | if (d->summary_flags & DIAG_TM_SAFE) | |
660 | { | |
661 | bool is_safe, direct_call_p; | |
662 | tree replacement; | |
663 | ||
664 | if (TREE_CODE (fn) == ADDR_EXPR | |
665 | && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL) | |
666 | { | |
667 | direct_call_p = true; | |
668 | replacement = TREE_OPERAND (fn, 0); | |
669 | replacement = find_tm_replacement_function (replacement); | |
670 | if (replacement) | |
671 | fn = replacement; | |
672 | } | |
673 | else | |
674 | { | |
675 | direct_call_p = false; | |
676 | replacement = NULL_TREE; | |
677 | } | |
678 | ||
679 | if (is_tm_safe_or_pure (fn)) | |
680 | is_safe = true; | |
681 | else if (is_tm_callable (fn) || is_tm_irrevocable (fn)) | |
682 | { | |
683 | /* A function explicitly marked transaction_callable as | |
684 | opposed to transaction_safe is being defined to be | |
685 | unsafe as part of its ABI, regardless of its contents. */ | |
686 | is_safe = false; | |
687 | } | |
688 | else if (direct_call_p) | |
689 | { | |
c579aed5 | 690 | if (IS_TYPE_OR_DECL_P (fn) |
691 | && flags_from_decl_or_type (fn) & ECF_TM_BUILTIN) | |
4c0315d0 | 692 | is_safe = true; |
693 | else if (replacement) | |
694 | { | |
695 | /* ??? At present we've been considering replacements | |
696 | merely transaction_callable, and therefore might | |
697 | enter irrevocable. The tm_wrap attribute has not | |
698 | yet made it into the new language spec. */ | |
699 | is_safe = false; | |
700 | } | |
701 | else | |
702 | { | |
703 | /* ??? Diagnostics for unmarked direct calls moved into | |
704 | the IPA pass. Section 3.2 of the spec details how | |
705 | functions not marked should be considered "implicitly | |
706 | safe" based on having examined the function body. */ | |
707 | is_safe = true; | |
708 | } | |
709 | } | |
710 | else | |
711 | { | |
712 | /* An unmarked indirect call. Consider it unsafe even | |
713 | though optimization may yet figure out how to inline. */ | |
714 | is_safe = false; | |
715 | } | |
716 | ||
717 | if (!is_safe) | |
718 | { | |
719 | if (TREE_CODE (fn) == ADDR_EXPR) | |
720 | fn = TREE_OPERAND (fn, 0); | |
721 | if (d->block_flags & DIAG_TM_SAFE) | |
2d3bf658 | 722 | { |
723 | if (direct_call_p) | |
724 | error_at (gimple_location (stmt), | |
725 | "unsafe function call %qD within " | |
726 | "atomic transaction", fn); | |
727 | else | |
300eddba | 728 | { |
729 | if (!DECL_P (fn) || DECL_NAME (fn)) | |
730 | error_at (gimple_location (stmt), | |
731 | "unsafe function call %qE within " | |
732 | "atomic transaction", fn); | |
733 | else | |
734 | error_at (gimple_location (stmt), | |
735 | "unsafe indirect function call within " | |
736 | "atomic transaction"); | |
737 | } | |
2d3bf658 | 738 | } |
4c0315d0 | 739 | else |
2d3bf658 | 740 | { |
741 | if (direct_call_p) | |
742 | error_at (gimple_location (stmt), | |
743 | "unsafe function call %qD within " | |
744 | "%<transaction_safe%> function", fn); | |
745 | else | |
300eddba | 746 | { |
747 | if (!DECL_P (fn) || DECL_NAME (fn)) | |
748 | error_at (gimple_location (stmt), | |
749 | "unsafe function call %qE within " | |
750 | "%<transaction_safe%> function", fn); | |
751 | else | |
752 | error_at (gimple_location (stmt), | |
753 | "unsafe indirect function call within " | |
754 | "%<transaction_safe%> function"); | |
755 | } | |
2d3bf658 | 756 | } |
4c0315d0 | 757 | } |
758 | } | |
759 | } | |
760 | break; | |
761 | ||
762 | case GIMPLE_ASM: | |
763 | /* ??? We ought to come up with a way to add attributes to | |
764 | asm statements, and then add "transaction_safe" to it. | |
765 | Either that or get the language spec to resurrect __tm_waiver. */ | |
766 | if (d->block_flags & DIAG_TM_SAFE) | |
767 | error_at (gimple_location (stmt), | |
768 | "asm not allowed in atomic transaction"); | |
769 | else if (d->func_flags & DIAG_TM_SAFE) | |
770 | error_at (gimple_location (stmt), | |
771 | "asm not allowed in %<transaction_safe%> function"); | |
4c0315d0 | 772 | break; |
773 | ||
774 | case GIMPLE_TRANSACTION: | |
775 | { | |
1a91d914 | 776 | gtransaction *trans_stmt = as_a <gtransaction *> (stmt); |
4c0315d0 | 777 | unsigned char inner_flags = DIAG_TM_SAFE; |
778 | ||
1a91d914 | 779 | if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_RELAXED) |
4c0315d0 | 780 | { |
781 | if (d->block_flags & DIAG_TM_SAFE) | |
782 | error_at (gimple_location (stmt), | |
783 | "relaxed transaction in atomic transaction"); | |
784 | else if (d->func_flags & DIAG_TM_SAFE) | |
785 | error_at (gimple_location (stmt), | |
786 | "relaxed transaction in %<transaction_safe%> function"); | |
4c0315d0 | 787 | inner_flags = DIAG_TM_RELAXED; |
788 | } | |
1a91d914 | 789 | else if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_OUTER) |
4c0315d0 | 790 | { |
791 | if (d->block_flags) | |
792 | error_at (gimple_location (stmt), | |
793 | "outer transaction in transaction"); | |
794 | else if (d->func_flags & DIAG_TM_OUTER) | |
795 | error_at (gimple_location (stmt), | |
796 | "outer transaction in " | |
797 | "%<transaction_may_cancel_outer%> function"); | |
798 | else if (d->func_flags & DIAG_TM_SAFE) | |
799 | error_at (gimple_location (stmt), | |
800 | "outer transaction in %<transaction_safe%> function"); | |
4c0315d0 | 801 | inner_flags |= DIAG_TM_OUTER; |
802 | } | |
803 | ||
804 | *handled_ops_p = true; | |
1a91d914 | 805 | if (gimple_transaction_body (trans_stmt)) |
4c0315d0 | 806 | { |
807 | struct walk_stmt_info wi_inner; | |
808 | struct diagnose_tm d_inner; | |
809 | ||
810 | memset (&d_inner, 0, sizeof (d_inner)); | |
811 | d_inner.func_flags = d->func_flags; | |
812 | d_inner.block_flags = d->block_flags | inner_flags; | |
813 | d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags; | |
814 | ||
815 | memset (&wi_inner, 0, sizeof (wi_inner)); | |
816 | wi_inner.info = &d_inner; | |
817 | ||
1a91d914 | 818 | walk_gimple_seq (gimple_transaction_body (trans_stmt), |
4c0315d0 | 819 | diagnose_tm_1, diagnose_tm_1_op, &wi_inner); |
4c0315d0 | 820 | } |
821 | } | |
822 | break; | |
823 | ||
824 | default: | |
825 | break; | |
826 | } | |
827 | ||
828 | return NULL_TREE; | |
829 | } | |
830 | ||
831 | static unsigned int | |
832 | diagnose_tm_blocks (void) | |
833 | { | |
834 | struct walk_stmt_info wi; | |
835 | struct diagnose_tm d; | |
836 | ||
837 | memset (&d, 0, sizeof (d)); | |
838 | if (is_tm_may_cancel_outer (current_function_decl)) | |
839 | d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE; | |
840 | else if (is_tm_safe (current_function_decl)) | |
841 | d.func_flags = DIAG_TM_SAFE; | |
842 | d.summary_flags = d.func_flags; | |
843 | ||
844 | memset (&wi, 0, sizeof (wi)); | |
845 | wi.info = &d; | |
846 | ||
847 | walk_gimple_seq (gimple_body (current_function_decl), | |
848 | diagnose_tm_1, diagnose_tm_1_op, &wi); | |
849 | ||
4c0315d0 | 850 | return 0; |
851 | } | |
852 | ||
7620bc82 | 853 | namespace { |
854 | ||
855 | const pass_data pass_data_diagnose_tm_blocks = | |
cbe8bda8 | 856 | { |
857 | GIMPLE_PASS, /* type */ | |
858 | "*diagnose_tm_blocks", /* name */ | |
859 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 860 | TV_TRANS_MEM, /* tv_id */ |
861 | PROP_gimple_any, /* properties_required */ | |
862 | 0, /* properties_provided */ | |
863 | 0, /* properties_destroyed */ | |
864 | 0, /* todo_flags_start */ | |
865 | 0, /* todo_flags_finish */ | |
4c0315d0 | 866 | }; |
cbe8bda8 | 867 | |
7620bc82 | 868 | class pass_diagnose_tm_blocks : public gimple_opt_pass |
cbe8bda8 | 869 | { |
870 | public: | |
9af5ce0c | 871 | pass_diagnose_tm_blocks (gcc::context *ctxt) |
872 | : gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt) | |
cbe8bda8 | 873 | {} |
874 | ||
875 | /* opt_pass methods: */ | |
31315c24 | 876 | virtual bool gate (function *) { return flag_tm; } |
65b0537f | 877 | virtual unsigned int execute (function *) { return diagnose_tm_blocks (); } |
cbe8bda8 | 878 | |
879 | }; // class pass_diagnose_tm_blocks | |
880 | ||
7620bc82 | 881 | } // anon namespace |
882 | ||
cbe8bda8 | 883 | gimple_opt_pass * |
884 | make_pass_diagnose_tm_blocks (gcc::context *ctxt) | |
885 | { | |
886 | return new pass_diagnose_tm_blocks (ctxt); | |
887 | } | |
4c0315d0 | 888 | \f |
889 | /* Instead of instrumenting thread private memory, we save the | |
890 | addresses in a log which we later use to save/restore the addresses | |
891 | upon transaction start/restart. | |
892 | ||
893 | The log is keyed by address, where each element contains individual | |
894 | statements among different code paths that perform the store. | |
895 | ||
896 | This log is later used to generate either plain save/restore of the | |
897 | addresses upon transaction start/restart, or calls to the ITM_L* | |
898 | logging functions. | |
899 | ||
900 | So for something like: | |
901 | ||
902 | struct large { int x[1000]; }; | |
903 | struct large lala = { 0 }; | |
904 | __transaction { | |
905 | lala.x[i] = 123; | |
906 | ... | |
907 | } | |
908 | ||
909 | We can either save/restore: | |
910 | ||
911 | lala = { 0 }; | |
912 | trxn = _ITM_startTransaction (); | |
913 | if (trxn & a_saveLiveVariables) | |
914 | tmp_lala1 = lala.x[i]; | |
915 | else if (a & a_restoreLiveVariables) | |
916 | lala.x[i] = tmp_lala1; | |
917 | ||
918 | or use the logging functions: | |
919 | ||
920 | lala = { 0 }; | |
921 | trxn = _ITM_startTransaction (); | |
922 | _ITM_LU4 (&lala.x[i]); | |
923 | ||
924 | Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as | |
925 | far up the dominator tree to shadow all of the writes to a given | |
926 | location (thus reducing the total number of logging calls), but not | |
927 | so high as to be called on a path that does not perform a | |
928 | write. */ | |
929 | ||
930 | /* One individual log entry. We may have multiple statements for the | |
931 | same location if neither dominate each other (on different | |
932 | execution paths). */ | |
04009ada | 933 | struct tm_log_entry |
4c0315d0 | 934 | { |
935 | /* Address to save. */ | |
936 | tree addr; | |
937 | /* Entry block for the transaction this address occurs in. */ | |
938 | basic_block entry_block; | |
939 | /* Dominating statements the store occurs in. */ | |
42acab1c | 940 | vec<gimple *> stmts; |
4c0315d0 | 941 | /* Initially, while we are building the log, we place a nonzero |
942 | value here to mean that this address *will* be saved with a | |
943 | save/restore sequence. Later, when generating the save sequence | |
944 | we place the SSA temp generated here. */ | |
945 | tree save_var; | |
04009ada | 946 | }; |
4c0315d0 | 947 | |
4c0315d0 | 948 | |
d9dd21a8 | 949 | /* Log entry hashtable helpers. */ |
4c0315d0 | 950 | |
576d4555 | 951 | struct log_entry_hasher : pointer_hash <tm_log_entry> |
4c0315d0 | 952 | { |
9969c043 | 953 | static inline hashval_t hash (const tm_log_entry *); |
954 | static inline bool equal (const tm_log_entry *, const tm_log_entry *); | |
955 | static inline void remove (tm_log_entry *); | |
d9dd21a8 | 956 | }; |
4c0315d0 | 957 | |
958 | /* Htab support. Return hash value for a `tm_log_entry'. */ | |
d9dd21a8 | 959 | inline hashval_t |
9969c043 | 960 | log_entry_hasher::hash (const tm_log_entry *log) |
4c0315d0 | 961 | { |
4c0315d0 | 962 | return iterative_hash_expr (log->addr, 0); |
963 | } | |
964 | ||
965 | /* Htab support. Return true if two log entries are the same. */ | |
d9dd21a8 | 966 | inline bool |
9969c043 | 967 | log_entry_hasher::equal (const tm_log_entry *log1, const tm_log_entry *log2) |
4c0315d0 | 968 | { |
4c0315d0 | 969 | /* FIXME: |
970 | ||
971 | rth: I suggest that we get rid of the component refs etc. | |
972 | I.e. resolve the reference to base + offset. | |
973 | ||
974 | We may need to actually finish a merge with mainline for this, | |
975 | since we'd like to be presented with Richi's MEM_REF_EXPRs more | |
976 | often than not. But in the meantime your tm_log_entry could save | |
977 | the results of get_inner_reference. | |
978 | ||
979 | See: g++.dg/tm/pr46653.C | |
980 | */ | |
981 | ||
982 | /* Special case plain equality because operand_equal_p() below will | |
983 | return FALSE if the addresses are equal but they have | |
984 | side-effects (e.g. a volatile address). */ | |
985 | if (log1->addr == log2->addr) | |
986 | return true; | |
987 | ||
988 | return operand_equal_p (log1->addr, log2->addr, 0); | |
989 | } | |
990 | ||
991 | /* Htab support. Free one tm_log_entry. */ | |
d9dd21a8 | 992 | inline void |
9969c043 | 993 | log_entry_hasher::remove (tm_log_entry *lp) |
4c0315d0 | 994 | { |
f1f41a6c | 995 | lp->stmts.release (); |
4c0315d0 | 996 | free (lp); |
997 | } | |
998 | ||
d9dd21a8 | 999 | |
1000 | /* The actual log. */ | |
c1f445d2 | 1001 | static hash_table<log_entry_hasher> *tm_log; |
d9dd21a8 | 1002 | |
1003 | /* Addresses to log with a save/restore sequence. These should be in | |
1004 | dominator order. */ | |
1005 | static vec<tree> tm_log_save_addresses; | |
1006 | ||
1007 | enum thread_memory_type | |
1008 | { | |
1009 | mem_non_local = 0, | |
1010 | mem_thread_local, | |
1011 | mem_transaction_local, | |
1012 | mem_max | |
1013 | }; | |
1014 | ||
04009ada | 1015 | struct tm_new_mem_map |
d9dd21a8 | 1016 | { |
1017 | /* SSA_NAME being dereferenced. */ | |
1018 | tree val; | |
1019 | enum thread_memory_type local_new_memory; | |
04009ada | 1020 | }; |
d9dd21a8 | 1021 | |
1022 | /* Hashtable helpers. */ | |
1023 | ||
04009ada | 1024 | struct tm_mem_map_hasher : free_ptr_hash <tm_new_mem_map> |
d9dd21a8 | 1025 | { |
04009ada | 1026 | static inline hashval_t hash (const tm_new_mem_map *); |
1027 | static inline bool equal (const tm_new_mem_map *, const tm_new_mem_map *); | |
d9dd21a8 | 1028 | }; |
1029 | ||
1030 | inline hashval_t | |
04009ada | 1031 | tm_mem_map_hasher::hash (const tm_new_mem_map *v) |
d9dd21a8 | 1032 | { |
1033 | return (intptr_t)v->val >> 4; | |
1034 | } | |
1035 | ||
1036 | inline bool | |
04009ada | 1037 | tm_mem_map_hasher::equal (const tm_new_mem_map *v, const tm_new_mem_map *c) |
d9dd21a8 | 1038 | { |
1039 | return v->val == c->val; | |
1040 | } | |
1041 | ||
1042 | /* Map for an SSA_NAME originally pointing to a non aliased new piece | |
1043 | of memory (malloc, alloc, etc). */ | |
c1f445d2 | 1044 | static hash_table<tm_mem_map_hasher> *tm_new_mem_hash; |
d9dd21a8 | 1045 | |
4c0315d0 | 1046 | /* Initialize logging data structures. */ |
1047 | static void | |
1048 | tm_log_init (void) | |
1049 | { | |
c1f445d2 | 1050 | tm_log = new hash_table<log_entry_hasher> (10); |
1051 | tm_new_mem_hash = new hash_table<tm_mem_map_hasher> (5); | |
f1f41a6c | 1052 | tm_log_save_addresses.create (5); |
4c0315d0 | 1053 | } |
1054 | ||
1055 | /* Free logging data structures. */ | |
1056 | static void | |
1057 | tm_log_delete (void) | |
1058 | { | |
c1f445d2 | 1059 | delete tm_log; |
1060 | tm_log = NULL; | |
1061 | delete tm_new_mem_hash; | |
1062 | tm_new_mem_hash = NULL; | |
f1f41a6c | 1063 | tm_log_save_addresses.release (); |
4c0315d0 | 1064 | } |
1065 | ||
1066 | /* Return true if MEM is a transaction invariant memory for the TM | |
1067 | region starting at REGION_ENTRY_BLOCK. */ | |
1068 | static bool | |
1069 | transaction_invariant_address_p (const_tree mem, basic_block region_entry_block) | |
1070 | { | |
1071 | if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF) | |
1072 | && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME) | |
1073 | { | |
1074 | basic_block def_bb; | |
1075 | ||
1076 | def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0))); | |
1077 | return def_bb != region_entry_block | |
1078 | && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb); | |
1079 | } | |
1080 | ||
1081 | mem = strip_invariant_refs (mem); | |
1082 | return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem)); | |
1083 | } | |
1084 | ||
1085 | /* Given an address ADDR in STMT, find it in the memory log or add it, | |
1086 | making sure to keep only the addresses highest in the dominator | |
1087 | tree. | |
1088 | ||
1089 | ENTRY_BLOCK is the entry_block for the transaction. | |
1090 | ||
1091 | If we find the address in the log, make sure it's either the same | |
1092 | address, or an equivalent one that dominates ADDR. | |
1093 | ||
1094 | If we find the address, but neither ADDR dominates the found | |
1095 | address, nor the found one dominates ADDR, we're on different | |
1096 | execution paths. Add it. | |
1097 | ||
1098 | If known, ENTRY_BLOCK is the entry block for the region, otherwise | |
1099 | NULL. */ | |
1100 | static void | |
42acab1c | 1101 | tm_log_add (basic_block entry_block, tree addr, gimple *stmt) |
4c0315d0 | 1102 | { |
d9dd21a8 | 1103 | tm_log_entry **slot; |
4c0315d0 | 1104 | struct tm_log_entry l, *lp; |
1105 | ||
1106 | l.addr = addr; | |
c1f445d2 | 1107 | slot = tm_log->find_slot (&l, INSERT); |
4c0315d0 | 1108 | if (!*slot) |
1109 | { | |
1110 | tree type = TREE_TYPE (addr); | |
1111 | ||
1112 | lp = XNEW (struct tm_log_entry); | |
1113 | lp->addr = addr; | |
1114 | *slot = lp; | |
1115 | ||
1116 | /* Small invariant addresses can be handled as save/restores. */ | |
1117 | if (entry_block | |
1118 | && transaction_invariant_address_p (lp->addr, entry_block) | |
1119 | && TYPE_SIZE_UNIT (type) != NULL | |
cd4547bf | 1120 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)) |
aa59f000 | 1121 | && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type)) |
4c0315d0 | 1122 | < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE)) |
1123 | /* We must be able to copy this type normally. I.e., no | |
1124 | special constructors and the like. */ | |
1125 | && !TREE_ADDRESSABLE (type)) | |
1126 | { | |
54a0a932 | 1127 | lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save"); |
f1f41a6c | 1128 | lp->stmts.create (0); |
4c0315d0 | 1129 | lp->entry_block = entry_block; |
1130 | /* Save addresses separately in dominator order so we don't | |
1131 | get confused by overlapping addresses in the save/restore | |
1132 | sequence. */ | |
f1f41a6c | 1133 | tm_log_save_addresses.safe_push (lp->addr); |
4c0315d0 | 1134 | } |
1135 | else | |
1136 | { | |
1137 | /* Use the logging functions. */ | |
f1f41a6c | 1138 | lp->stmts.create (5); |
1139 | lp->stmts.quick_push (stmt); | |
4c0315d0 | 1140 | lp->save_var = NULL; |
1141 | } | |
1142 | } | |
1143 | else | |
1144 | { | |
1145 | size_t i; | |
42acab1c | 1146 | gimple *oldstmt; |
4c0315d0 | 1147 | |
d9dd21a8 | 1148 | lp = *slot; |
4c0315d0 | 1149 | |
1150 | /* If we're generating a save/restore sequence, we don't care | |
1151 | about statements. */ | |
1152 | if (lp->save_var) | |
1153 | return; | |
1154 | ||
f1f41a6c | 1155 | for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i) |
4c0315d0 | 1156 | { |
1157 | if (stmt == oldstmt) | |
1158 | return; | |
1159 | /* We already have a store to the same address, higher up the | |
1160 | dominator tree. Nothing to do. */ | |
1161 | if (dominated_by_p (CDI_DOMINATORS, | |
1162 | gimple_bb (stmt), gimple_bb (oldstmt))) | |
1163 | return; | |
1164 | /* We should be processing blocks in dominator tree order. */ | |
1165 | gcc_assert (!dominated_by_p (CDI_DOMINATORS, | |
1166 | gimple_bb (oldstmt), gimple_bb (stmt))); | |
1167 | } | |
1168 | /* Store is on a different code path. */ | |
f1f41a6c | 1169 | lp->stmts.safe_push (stmt); |
4c0315d0 | 1170 | } |
1171 | } | |
1172 | ||
1173 | /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME | |
1174 | result, insert the new statements before GSI. */ | |
1175 | ||
1176 | static tree | |
1177 | gimplify_addr (gimple_stmt_iterator *gsi, tree x) | |
1178 | { | |
1179 | if (TREE_CODE (x) == TARGET_MEM_REF) | |
1180 | x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x); | |
1181 | else | |
1182 | x = build_fold_addr_expr (x); | |
1183 | return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT); | |
1184 | } | |
1185 | ||
1186 | /* Instrument one address with the logging functions. | |
1187 | ADDR is the address to save. | |
1188 | STMT is the statement before which to place it. */ | |
1189 | static void | |
42acab1c | 1190 | tm_log_emit_stmt (tree addr, gimple *stmt) |
4c0315d0 | 1191 | { |
1192 | tree type = TREE_TYPE (addr); | |
1193 | tree size = TYPE_SIZE_UNIT (type); | |
1194 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
42acab1c | 1195 | gimple *log; |
4c0315d0 | 1196 | enum built_in_function code = BUILT_IN_TM_LOG; |
1197 | ||
1198 | if (type == float_type_node) | |
1199 | code = BUILT_IN_TM_LOG_FLOAT; | |
1200 | else if (type == double_type_node) | |
1201 | code = BUILT_IN_TM_LOG_DOUBLE; | |
1202 | else if (type == long_double_type_node) | |
1203 | code = BUILT_IN_TM_LOG_LDOUBLE; | |
cd4547bf | 1204 | else if (tree_fits_uhwi_p (size)) |
4c0315d0 | 1205 | { |
6a0712d4 | 1206 | unsigned int n = tree_to_uhwi (size); |
4c0315d0 | 1207 | switch (n) |
1208 | { | |
1209 | case 1: | |
1210 | code = BUILT_IN_TM_LOG_1; | |
1211 | break; | |
1212 | case 2: | |
1213 | code = BUILT_IN_TM_LOG_2; | |
1214 | break; | |
1215 | case 4: | |
1216 | code = BUILT_IN_TM_LOG_4; | |
1217 | break; | |
1218 | case 8: | |
1219 | code = BUILT_IN_TM_LOG_8; | |
1220 | break; | |
1221 | default: | |
1222 | code = BUILT_IN_TM_LOG; | |
1223 | if (TREE_CODE (type) == VECTOR_TYPE) | |
1224 | { | |
1225 | if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64)) | |
1226 | code = BUILT_IN_TM_LOG_M64; | |
1227 | else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128)) | |
1228 | code = BUILT_IN_TM_LOG_M128; | |
1229 | else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256)) | |
1230 | code = BUILT_IN_TM_LOG_M256; | |
1231 | } | |
1232 | break; | |
1233 | } | |
1234 | } | |
1235 | ||
1236 | addr = gimplify_addr (&gsi, addr); | |
1237 | if (code == BUILT_IN_TM_LOG) | |
1238 | log = gimple_build_call (builtin_decl_explicit (code), 2, addr, size); | |
1239 | else | |
1240 | log = gimple_build_call (builtin_decl_explicit (code), 1, addr); | |
1241 | gsi_insert_before (&gsi, log, GSI_SAME_STMT); | |
1242 | } | |
1243 | ||
1244 | /* Go through the log and instrument address that must be instrumented | |
1245 | with the logging functions. Leave the save/restore addresses for | |
1246 | later. */ | |
1247 | static void | |
1248 | tm_log_emit (void) | |
1249 | { | |
c1f445d2 | 1250 | hash_table<log_entry_hasher>::iterator hi; |
4c0315d0 | 1251 | struct tm_log_entry *lp; |
1252 | ||
c1f445d2 | 1253 | FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi) |
4c0315d0 | 1254 | { |
1255 | size_t i; | |
42acab1c | 1256 | gimple *stmt; |
4c0315d0 | 1257 | |
1258 | if (dump_file) | |
1259 | { | |
1260 | fprintf (dump_file, "TM thread private mem logging: "); | |
1261 | print_generic_expr (dump_file, lp->addr, 0); | |
1262 | fprintf (dump_file, "\n"); | |
1263 | } | |
1264 | ||
1265 | if (lp->save_var) | |
1266 | { | |
1267 | if (dump_file) | |
1268 | fprintf (dump_file, "DUMPING to variable\n"); | |
1269 | continue; | |
1270 | } | |
1271 | else | |
1272 | { | |
1273 | if (dump_file) | |
1274 | fprintf (dump_file, "DUMPING with logging functions\n"); | |
f1f41a6c | 1275 | for (i = 0; lp->stmts.iterate (i, &stmt); ++i) |
4c0315d0 | 1276 | tm_log_emit_stmt (lp->addr, stmt); |
1277 | } | |
1278 | } | |
1279 | } | |
1280 | ||
1281 | /* Emit the save sequence for the corresponding addresses in the log. | |
1282 | ENTRY_BLOCK is the entry block for the transaction. | |
1283 | BB is the basic block to insert the code in. */ | |
1284 | static void | |
1285 | tm_log_emit_saves (basic_block entry_block, basic_block bb) | |
1286 | { | |
1287 | size_t i; | |
1288 | gimple_stmt_iterator gsi = gsi_last_bb (bb); | |
42acab1c | 1289 | gimple *stmt; |
4c0315d0 | 1290 | struct tm_log_entry l, *lp; |
1291 | ||
f1f41a6c | 1292 | for (i = 0; i < tm_log_save_addresses.length (); ++i) |
4c0315d0 | 1293 | { |
f1f41a6c | 1294 | l.addr = tm_log_save_addresses[i]; |
c1f445d2 | 1295 | lp = *(tm_log->find_slot (&l, NO_INSERT)); |
4c0315d0 | 1296 | gcc_assert (lp->save_var != NULL); |
1297 | ||
1298 | /* We only care about variables in the current transaction. */ | |
1299 | if (lp->entry_block != entry_block) | |
1300 | continue; | |
1301 | ||
1302 | stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr)); | |
1303 | ||
1304 | /* Make sure we can create an SSA_NAME for this type. For | |
1305 | instance, aggregates aren't allowed, in which case the system | |
1306 | will create a VOP for us and everything will just work. */ | |
1307 | if (is_gimple_reg_type (TREE_TYPE (lp->save_var))) | |
1308 | { | |
1309 | lp->save_var = make_ssa_name (lp->save_var, stmt); | |
1310 | gimple_assign_set_lhs (stmt, lp->save_var); | |
1311 | } | |
1312 | ||
1313 | gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); | |
1314 | } | |
1315 | } | |
1316 | ||
1317 | /* Emit the restore sequence for the corresponding addresses in the log. | |
1318 | ENTRY_BLOCK is the entry block for the transaction. | |
1319 | BB is the basic block to insert the code in. */ | |
1320 | static void | |
1321 | tm_log_emit_restores (basic_block entry_block, basic_block bb) | |
1322 | { | |
1323 | int i; | |
1324 | struct tm_log_entry l, *lp; | |
1325 | gimple_stmt_iterator gsi; | |
42acab1c | 1326 | gimple *stmt; |
4c0315d0 | 1327 | |
f1f41a6c | 1328 | for (i = tm_log_save_addresses.length () - 1; i >= 0; i--) |
4c0315d0 | 1329 | { |
f1f41a6c | 1330 | l.addr = tm_log_save_addresses[i]; |
c1f445d2 | 1331 | lp = *(tm_log->find_slot (&l, NO_INSERT)); |
4c0315d0 | 1332 | gcc_assert (lp->save_var != NULL); |
1333 | ||
1334 | /* We only care about variables in the current transaction. */ | |
1335 | if (lp->entry_block != entry_block) | |
1336 | continue; | |
1337 | ||
1338 | /* Restores are in LIFO order from the saves in case we have | |
1339 | overlaps. */ | |
1340 | gsi = gsi_start_bb (bb); | |
1341 | ||
1342 | stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var); | |
1343 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
1344 | } | |
1345 | } | |
1346 | ||
4c0315d0 | 1347 | \f |
1348 | static tree lower_sequence_tm (gimple_stmt_iterator *, bool *, | |
1349 | struct walk_stmt_info *); | |
1350 | static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *, | |
1351 | struct walk_stmt_info *); | |
1352 | ||
1353 | /* Evaluate an address X being dereferenced and determine if it | |
1354 | originally points to a non aliased new chunk of memory (malloc, | |
1355 | alloca, etc). | |
1356 | ||
1357 | Return MEM_THREAD_LOCAL if it points to a thread-local address. | |
1358 | Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address. | |
1359 | Return MEM_NON_LOCAL otherwise. | |
1360 | ||
1361 | ENTRY_BLOCK is the entry block to the transaction containing the | |
1362 | dereference of X. */ | |
1363 | static enum thread_memory_type | |
1364 | thread_private_new_memory (basic_block entry_block, tree x) | |
1365 | { | |
42acab1c | 1366 | gimple *stmt = NULL; |
4c0315d0 | 1367 | enum tree_code code; |
04009ada | 1368 | tm_new_mem_map **slot; |
1369 | tm_new_mem_map elt, *elt_p; | |
4c0315d0 | 1370 | tree val = x; |
1371 | enum thread_memory_type retval = mem_transaction_local; | |
1372 | ||
1373 | if (!entry_block | |
1374 | || TREE_CODE (x) != SSA_NAME | |
1375 | /* Possible uninitialized use, or a function argument. In | |
1376 | either case, we don't care. */ | |
1377 | || SSA_NAME_IS_DEFAULT_DEF (x)) | |
1378 | return mem_non_local; | |
1379 | ||
1380 | /* Look in cache first. */ | |
1381 | elt.val = x; | |
c1f445d2 | 1382 | slot = tm_new_mem_hash->find_slot (&elt, INSERT); |
d9dd21a8 | 1383 | elt_p = *slot; |
4c0315d0 | 1384 | if (elt_p) |
1385 | return elt_p->local_new_memory; | |
1386 | ||
1387 | /* Optimistically assume the memory is transaction local during | |
1388 | processing. This catches recursion into this variable. */ | |
04009ada | 1389 | *slot = elt_p = XNEW (tm_new_mem_map); |
4c0315d0 | 1390 | elt_p->val = val; |
1391 | elt_p->local_new_memory = mem_transaction_local; | |
1392 | ||
1393 | /* Search DEF chain to find the original definition of this address. */ | |
1394 | do | |
1395 | { | |
1396 | if (ptr_deref_may_alias_global_p (x)) | |
1397 | { | |
1398 | /* Address escapes. This is not thread-private. */ | |
1399 | retval = mem_non_local; | |
1400 | goto new_memory_ret; | |
1401 | } | |
1402 | ||
1403 | stmt = SSA_NAME_DEF_STMT (x); | |
1404 | ||
1405 | /* If the malloc call is outside the transaction, this is | |
1406 | thread-local. */ | |
1407 | if (retval != mem_thread_local | |
1408 | && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block)) | |
1409 | retval = mem_thread_local; | |
1410 | ||
1411 | if (is_gimple_assign (stmt)) | |
1412 | { | |
1413 | code = gimple_assign_rhs_code (stmt); | |
1414 | /* x = foo ==> foo */ | |
1415 | if (code == SSA_NAME) | |
1416 | x = gimple_assign_rhs1 (stmt); | |
1417 | /* x = foo + n ==> foo */ | |
1418 | else if (code == POINTER_PLUS_EXPR) | |
1419 | x = gimple_assign_rhs1 (stmt); | |
1420 | /* x = (cast*) foo ==> foo */ | |
d09ef31a | 1421 | else if (code == VIEW_CONVERT_EXPR || CONVERT_EXPR_CODE_P (code)) |
4c0315d0 | 1422 | x = gimple_assign_rhs1 (stmt); |
43ee99ea | 1423 | /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */ |
1424 | else if (code == COND_EXPR) | |
1425 | { | |
1426 | tree op1 = gimple_assign_rhs2 (stmt); | |
1427 | tree op2 = gimple_assign_rhs3 (stmt); | |
1428 | enum thread_memory_type mem; | |
1429 | retval = thread_private_new_memory (entry_block, op1); | |
1430 | if (retval == mem_non_local) | |
1431 | goto new_memory_ret; | |
1432 | mem = thread_private_new_memory (entry_block, op2); | |
1433 | retval = MIN (retval, mem); | |
1434 | goto new_memory_ret; | |
1435 | } | |
4c0315d0 | 1436 | else |
1437 | { | |
1438 | retval = mem_non_local; | |
1439 | goto new_memory_ret; | |
1440 | } | |
1441 | } | |
1442 | else | |
1443 | { | |
1444 | if (gimple_code (stmt) == GIMPLE_PHI) | |
1445 | { | |
1446 | unsigned int i; | |
1447 | enum thread_memory_type mem; | |
1448 | tree phi_result = gimple_phi_result (stmt); | |
1449 | ||
1450 | /* If any of the ancestors are non-local, we are sure to | |
1451 | be non-local. Otherwise we can avoid doing anything | |
1452 | and inherit what has already been generated. */ | |
1453 | retval = mem_max; | |
1454 | for (i = 0; i < gimple_phi_num_args (stmt); ++i) | |
1455 | { | |
1456 | tree op = PHI_ARG_DEF (stmt, i); | |
1457 | ||
1458 | /* Exclude self-assignment. */ | |
1459 | if (phi_result == op) | |
1460 | continue; | |
1461 | ||
1462 | mem = thread_private_new_memory (entry_block, op); | |
1463 | if (mem == mem_non_local) | |
1464 | { | |
1465 | retval = mem; | |
1466 | goto new_memory_ret; | |
1467 | } | |
1468 | retval = MIN (retval, mem); | |
1469 | } | |
1470 | goto new_memory_ret; | |
1471 | } | |
1472 | break; | |
1473 | } | |
1474 | } | |
1475 | while (TREE_CODE (x) == SSA_NAME); | |
1476 | ||
1477 | if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC) | |
1478 | /* Thread-local or transaction-local. */ | |
1479 | ; | |
1480 | else | |
1481 | retval = mem_non_local; | |
1482 | ||
1483 | new_memory_ret: | |
1484 | elt_p->local_new_memory = retval; | |
1485 | return retval; | |
1486 | } | |
1487 | ||
1488 | /* Determine whether X has to be instrumented using a read | |
1489 | or write barrier. | |
1490 | ||
1491 | ENTRY_BLOCK is the entry block for the region where stmt resides | |
1492 | in. NULL if unknown. | |
1493 | ||
1494 | STMT is the statement in which X occurs in. It is used for thread | |
1495 | private memory instrumentation. If no TPM instrumentation is | |
1496 | desired, STMT should be null. */ | |
1497 | static bool | |
42acab1c | 1498 | requires_barrier (basic_block entry_block, tree x, gimple *stmt) |
4c0315d0 | 1499 | { |
1500 | tree orig = x; | |
1501 | while (handled_component_p (x)) | |
1502 | x = TREE_OPERAND (x, 0); | |
1503 | ||
1504 | switch (TREE_CODE (x)) | |
1505 | { | |
1506 | case INDIRECT_REF: | |
1507 | case MEM_REF: | |
1508 | { | |
1509 | enum thread_memory_type ret; | |
1510 | ||
1511 | ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0)); | |
1512 | if (ret == mem_non_local) | |
1513 | return true; | |
1514 | if (stmt && ret == mem_thread_local) | |
1515 | /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */ | |
1516 | tm_log_add (entry_block, orig, stmt); | |
1517 | ||
1518 | /* Transaction-locals require nothing at all. For malloc, a | |
1519 | transaction restart frees the memory and we reallocate. | |
1520 | For alloca, the stack pointer gets reset by the retry and | |
1521 | we reallocate. */ | |
1522 | return false; | |
1523 | } | |
1524 | ||
1525 | case TARGET_MEM_REF: | |
1526 | if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR) | |
1527 | return true; | |
1528 | x = TREE_OPERAND (TMR_BASE (x), 0); | |
1529 | if (TREE_CODE (x) == PARM_DECL) | |
1530 | return false; | |
1531 | gcc_assert (TREE_CODE (x) == VAR_DECL); | |
1532 | /* FALLTHRU */ | |
1533 | ||
1534 | case PARM_DECL: | |
1535 | case RESULT_DECL: | |
1536 | case VAR_DECL: | |
1537 | if (DECL_BY_REFERENCE (x)) | |
1538 | { | |
1539 | /* ??? This value is a pointer, but aggregate_value_p has been | |
1540 | jigged to return true which confuses needs_to_live_in_memory. | |
1541 | This ought to be cleaned up generically. | |
1542 | ||
1543 | FIXME: Verify this still happens after the next mainline | |
1544 | merge. Testcase ie g++.dg/tm/pr47554.C. | |
1545 | */ | |
1546 | return false; | |
1547 | } | |
1548 | ||
1549 | if (is_global_var (x)) | |
e0f3ea3e | 1550 | return !TREE_READONLY (x); |
4c0315d0 | 1551 | if (/* FIXME: This condition should actually go below in the |
1552 | tm_log_add() call, however is_call_clobbered() depends on | |
1553 | aliasing info which is not available during | |
1554 | gimplification. Since requires_barrier() gets called | |
1555 | during lower_sequence_tm/gimplification, leave the call | |
1556 | to needs_to_live_in_memory until we eliminate | |
1557 | lower_sequence_tm altogether. */ | |
3c8b8a41 | 1558 | needs_to_live_in_memory (x)) |
4c0315d0 | 1559 | return true; |
e0f3ea3e | 1560 | else |
1561 | { | |
1562 | /* For local memory that doesn't escape (aka thread private | |
1563 | memory), we can either save the value at the beginning of | |
1564 | the transaction and restore on restart, or call a tm | |
1565 | function to dynamically save and restore on restart | |
1566 | (ITM_L*). */ | |
1567 | if (stmt) | |
1568 | tm_log_add (entry_block, orig, stmt); | |
1569 | return false; | |
1570 | } | |
4c0315d0 | 1571 | |
1572 | default: | |
1573 | return false; | |
1574 | } | |
1575 | } | |
1576 | ||
1577 | /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside | |
1578 | a transaction region. */ | |
1579 | ||
1580 | static void | |
1581 | examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi) | |
1582 | { | |
42acab1c | 1583 | gimple *stmt = gsi_stmt (*gsi); |
4c0315d0 | 1584 | |
1585 | if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL)) | |
1586 | *state |= GTMA_HAVE_LOAD; | |
1587 | if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL)) | |
1588 | *state |= GTMA_HAVE_STORE; | |
1589 | } | |
1590 | ||
1591 | /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */ | |
1592 | ||
1593 | static void | |
1594 | examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi) | |
1595 | { | |
42acab1c | 1596 | gimple *stmt = gsi_stmt (*gsi); |
4c0315d0 | 1597 | tree fn; |
1598 | ||
1599 | if (is_tm_pure_call (stmt)) | |
1600 | return; | |
1601 | ||
1602 | /* Check if this call is a transaction abort. */ | |
1603 | fn = gimple_call_fndecl (stmt); | |
1604 | if (is_tm_abort (fn)) | |
1605 | *state |= GTMA_HAVE_ABORT; | |
1606 | ||
1607 | /* Note that something may happen. */ | |
1608 | *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE; | |
1609 | } | |
1610 | ||
1611 | /* Lower a GIMPLE_TRANSACTION statement. */ | |
1612 | ||
1613 | static void | |
1614 | lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi) | |
1615 | { | |
42acab1c | 1616 | gimple *g; |
1a91d914 | 1617 | gtransaction *stmt = as_a <gtransaction *> (gsi_stmt (*gsi)); |
4c0315d0 | 1618 | unsigned int *outer_state = (unsigned int *) wi->info; |
1619 | unsigned int this_state = 0; | |
1620 | struct walk_stmt_info this_wi; | |
1621 | ||
1622 | /* First, lower the body. The scanning that we do inside gives | |
1623 | us some idea of what we're dealing with. */ | |
1624 | memset (&this_wi, 0, sizeof (this_wi)); | |
1625 | this_wi.info = (void *) &this_state; | |
e3a19533 | 1626 | walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt), |
1627 | lower_sequence_tm, NULL, &this_wi); | |
4c0315d0 | 1628 | |
1629 | /* If there was absolutely nothing transaction related inside the | |
1630 | transaction, we may elide it. Likewise if this is a nested | |
1631 | transaction and does not contain an abort. */ | |
1632 | if (this_state == 0 | |
1633 | || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL)) | |
1634 | { | |
1635 | if (outer_state) | |
1636 | *outer_state |= this_state; | |
1637 | ||
1638 | gsi_insert_seq_before (gsi, gimple_transaction_body (stmt), | |
1639 | GSI_SAME_STMT); | |
1640 | gimple_transaction_set_body (stmt, NULL); | |
1641 | ||
1642 | gsi_remove (gsi, true); | |
1643 | wi->removed_stmt = true; | |
1644 | return; | |
1645 | } | |
1646 | ||
1647 | /* Wrap the body of the transaction in a try-finally node so that | |
1648 | the commit call is always properly called. */ | |
1649 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0); | |
1650 | if (flag_exceptions) | |
1651 | { | |
1652 | tree ptr; | |
1653 | gimple_seq n_seq, e_seq; | |
1654 | ||
1655 | n_seq = gimple_seq_alloc_with_stmt (g); | |
e3a19533 | 1656 | e_seq = NULL; |
4c0315d0 | 1657 | |
1658 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER), | |
1659 | 1, integer_zero_node); | |
f9e245b2 | 1660 | ptr = create_tmp_var (ptr_type_node); |
4c0315d0 | 1661 | gimple_call_set_lhs (g, ptr); |
1662 | gimple_seq_add_stmt (&e_seq, g); | |
1663 | ||
1664 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH), | |
1665 | 1, ptr); | |
1666 | gimple_seq_add_stmt (&e_seq, g); | |
1667 | ||
1668 | g = gimple_build_eh_else (n_seq, e_seq); | |
1669 | } | |
1670 | ||
1671 | g = gimple_build_try (gimple_transaction_body (stmt), | |
1672 | gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY); | |
1673 | gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING); | |
1674 | ||
1675 | gimple_transaction_set_body (stmt, NULL); | |
1676 | ||
1677 | /* If the transaction calls abort or if this is an outer transaction, | |
1678 | add an "over" label afterwards. */ | |
1679 | if ((this_state & (GTMA_HAVE_ABORT)) | |
9af5ce0c | 1680 | || (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)) |
4c0315d0 | 1681 | { |
1682 | tree label = create_artificial_label (UNKNOWN_LOCATION); | |
1683 | gimple_transaction_set_label (stmt, label); | |
1684 | gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING); | |
1685 | } | |
1686 | ||
1687 | /* Record the set of operations found for use later. */ | |
1688 | this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK; | |
1689 | gimple_transaction_set_subcode (stmt, this_state); | |
1690 | } | |
1691 | ||
1692 | /* Iterate through the statements in the sequence, lowering them all | |
1693 | as appropriate for being in a transaction. */ | |
1694 | ||
1695 | static tree | |
1696 | lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
1697 | struct walk_stmt_info *wi) | |
1698 | { | |
1699 | unsigned int *state = (unsigned int *) wi->info; | |
42acab1c | 1700 | gimple *stmt = gsi_stmt (*gsi); |
4c0315d0 | 1701 | |
1702 | *handled_ops_p = true; | |
1703 | switch (gimple_code (stmt)) | |
1704 | { | |
1705 | case GIMPLE_ASSIGN: | |
1706 | /* Only memory reads/writes need to be instrumented. */ | |
1707 | if (gimple_assign_single_p (stmt)) | |
1708 | examine_assign_tm (state, gsi); | |
1709 | break; | |
1710 | ||
1711 | case GIMPLE_CALL: | |
1712 | examine_call_tm (state, gsi); | |
1713 | break; | |
1714 | ||
1715 | case GIMPLE_ASM: | |
1716 | *state |= GTMA_MAY_ENTER_IRREVOCABLE; | |
1717 | break; | |
1718 | ||
1719 | case GIMPLE_TRANSACTION: | |
1720 | lower_transaction (gsi, wi); | |
1721 | break; | |
1722 | ||
1723 | default: | |
1724 | *handled_ops_p = !gimple_has_substatements (stmt); | |
1725 | break; | |
1726 | } | |
1727 | ||
1728 | return NULL_TREE; | |
1729 | } | |
1730 | ||
1731 | /* Iterate through the statements in the sequence, lowering them all | |
1732 | as appropriate for being outside of a transaction. */ | |
1733 | ||
1734 | static tree | |
1735 | lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
1736 | struct walk_stmt_info * wi) | |
1737 | { | |
42acab1c | 1738 | gimple *stmt = gsi_stmt (*gsi); |
4c0315d0 | 1739 | |
1740 | if (gimple_code (stmt) == GIMPLE_TRANSACTION) | |
1741 | { | |
1742 | *handled_ops_p = true; | |
1743 | lower_transaction (gsi, wi); | |
1744 | } | |
1745 | else | |
1746 | *handled_ops_p = !gimple_has_substatements (stmt); | |
1747 | ||
1748 | return NULL_TREE; | |
1749 | } | |
1750 | ||
1751 | /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After | |
1752 | this, GIMPLE_TRANSACTION nodes still exist, but the nested body has | |
1753 | been moved out, and all the data required for constructing a proper | |
1754 | CFG has been recorded. */ | |
1755 | ||
1756 | static unsigned int | |
1757 | execute_lower_tm (void) | |
1758 | { | |
1759 | struct walk_stmt_info wi; | |
e3a19533 | 1760 | gimple_seq body; |
4c0315d0 | 1761 | |
1762 | /* Transactional clones aren't created until a later pass. */ | |
1763 | gcc_assert (!decl_is_tm_clone (current_function_decl)); | |
1764 | ||
e3a19533 | 1765 | body = gimple_body (current_function_decl); |
4c0315d0 | 1766 | memset (&wi, 0, sizeof (wi)); |
e3a19533 | 1767 | walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi); |
1768 | gimple_set_body (current_function_decl, body); | |
4c0315d0 | 1769 | |
1770 | return 0; | |
1771 | } | |
1772 | ||
7620bc82 | 1773 | namespace { |
1774 | ||
1775 | const pass_data pass_data_lower_tm = | |
cbe8bda8 | 1776 | { |
1777 | GIMPLE_PASS, /* type */ | |
1778 | "tmlower", /* name */ | |
1779 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 1780 | TV_TRANS_MEM, /* tv_id */ |
1781 | PROP_gimple_lcf, /* properties_required */ | |
1782 | 0, /* properties_provided */ | |
1783 | 0, /* properties_destroyed */ | |
1784 | 0, /* todo_flags_start */ | |
1785 | 0, /* todo_flags_finish */ | |
4c0315d0 | 1786 | }; |
cbe8bda8 | 1787 | |
7620bc82 | 1788 | class pass_lower_tm : public gimple_opt_pass |
cbe8bda8 | 1789 | { |
1790 | public: | |
9af5ce0c | 1791 | pass_lower_tm (gcc::context *ctxt) |
1792 | : gimple_opt_pass (pass_data_lower_tm, ctxt) | |
cbe8bda8 | 1793 | {} |
1794 | ||
1795 | /* opt_pass methods: */ | |
31315c24 | 1796 | virtual bool gate (function *) { return flag_tm; } |
65b0537f | 1797 | virtual unsigned int execute (function *) { return execute_lower_tm (); } |
cbe8bda8 | 1798 | |
1799 | }; // class pass_lower_tm | |
1800 | ||
7620bc82 | 1801 | } // anon namespace |
1802 | ||
cbe8bda8 | 1803 | gimple_opt_pass * |
1804 | make_pass_lower_tm (gcc::context *ctxt) | |
1805 | { | |
1806 | return new pass_lower_tm (ctxt); | |
1807 | } | |
4c0315d0 | 1808 | \f |
1809 | /* Collect region information for each transaction. */ | |
1810 | ||
1811 | struct tm_region | |
1812 | { | |
1a91d914 | 1813 | public: |
1814 | ||
1815 | /* The field "transaction_stmt" is initially a gtransaction *, | |
1816 | but eventually gets lowered to a gcall *(to BUILT_IN_TM_START). | |
1817 | ||
1818 | Helper method to get it as a gtransaction *, with code-checking | |
1819 | in a checked-build. */ | |
1820 | ||
1821 | gtransaction * | |
1822 | get_transaction_stmt () const | |
1823 | { | |
1824 | return as_a <gtransaction *> (transaction_stmt); | |
1825 | } | |
1826 | ||
1827 | public: | |
1828 | ||
4c0315d0 | 1829 | /* Link to the next unnested transaction. */ |
1830 | struct tm_region *next; | |
1831 | ||
1832 | /* Link to the next inner transaction. */ | |
1833 | struct tm_region *inner; | |
1834 | ||
1835 | /* Link to the next outer transaction. */ | |
1836 | struct tm_region *outer; | |
1837 | ||
0cd02a19 | 1838 | /* The GIMPLE_TRANSACTION statement beginning this transaction. |
1839 | After TM_MARK, this gets replaced by a call to | |
1a91d914 | 1840 | BUILT_IN_TM_START. |
1841 | Hence this will be either a gtransaction *or a gcall *. */ | |
42acab1c | 1842 | gimple *transaction_stmt; |
4c0315d0 | 1843 | |
0cd02a19 | 1844 | /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to |
1845 | BUILT_IN_TM_START, this field is true if the transaction is an | |
1846 | outer transaction. */ | |
1847 | bool original_transaction_was_outer; | |
1848 | ||
1849 | /* Return value from BUILT_IN_TM_START. */ | |
1850 | tree tm_state; | |
1851 | ||
1852 | /* The entry block to this region. This will always be the first | |
1853 | block of the body of the transaction. */ | |
4c0315d0 | 1854 | basic_block entry_block; |
1855 | ||
0cd02a19 | 1856 | /* The first block after an expanded call to _ITM_beginTransaction. */ |
1857 | basic_block restart_block; | |
1858 | ||
4c0315d0 | 1859 | /* The set of all blocks that end the region; NULL if only EXIT_BLOCK. |
1860 | These blocks are still a part of the region (i.e., the border is | |
1861 | inclusive). Note that this set is only complete for paths in the CFG | |
1862 | starting at ENTRY_BLOCK, and that there is no exit block recorded for | |
1863 | the edge to the "over" label. */ | |
1864 | bitmap exit_blocks; | |
1865 | ||
1866 | /* The set of all blocks that have an TM_IRREVOCABLE call. */ | |
1867 | bitmap irr_blocks; | |
1868 | }; | |
1869 | ||
1870 | /* True if there are pending edge statements to be committed for the | |
1871 | current function being scanned in the tmmark pass. */ | |
1872 | bool pending_edge_inserts_p; | |
1873 | ||
1874 | static struct tm_region *all_tm_regions; | |
1875 | static bitmap_obstack tm_obstack; | |
1876 | ||
1877 | ||
9d75589a | 1878 | /* A subroutine of tm_region_init. Record the existence of the |
4c0315d0 | 1879 | GIMPLE_TRANSACTION statement in a tree of tm_region elements. */ |
1880 | ||
1881 | static struct tm_region * | |
1a91d914 | 1882 | tm_region_init_0 (struct tm_region *outer, basic_block bb, |
1883 | gtransaction *stmt) | |
4c0315d0 | 1884 | { |
1885 | struct tm_region *region; | |
1886 | ||
1887 | region = (struct tm_region *) | |
1888 | obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region)); | |
1889 | ||
1890 | if (outer) | |
1891 | { | |
1892 | region->next = outer->inner; | |
1893 | outer->inner = region; | |
1894 | } | |
1895 | else | |
1896 | { | |
1897 | region->next = all_tm_regions; | |
1898 | all_tm_regions = region; | |
1899 | } | |
1900 | region->inner = NULL; | |
1901 | region->outer = outer; | |
1902 | ||
1903 | region->transaction_stmt = stmt; | |
0cd02a19 | 1904 | region->original_transaction_was_outer = false; |
1905 | region->tm_state = NULL; | |
4c0315d0 | 1906 | |
1907 | /* There are either one or two edges out of the block containing | |
1908 | the GIMPLE_TRANSACTION, one to the actual region and one to the | |
1909 | "over" label if the region contains an abort. The former will | |
1910 | always be the one marked FALLTHRU. */ | |
1911 | region->entry_block = FALLTHRU_EDGE (bb)->dest; | |
1912 | ||
1913 | region->exit_blocks = BITMAP_ALLOC (&tm_obstack); | |
1914 | region->irr_blocks = BITMAP_ALLOC (&tm_obstack); | |
1915 | ||
1916 | return region; | |
1917 | } | |
1918 | ||
1919 | /* A subroutine of tm_region_init. Record all the exit and | |
1920 | irrevocable blocks in BB into the region's exit_blocks and | |
1921 | irr_blocks bitmaps. Returns the new region being scanned. */ | |
1922 | ||
1923 | static struct tm_region * | |
1924 | tm_region_init_1 (struct tm_region *region, basic_block bb) | |
1925 | { | |
1926 | gimple_stmt_iterator gsi; | |
42acab1c | 1927 | gimple *g; |
4c0315d0 | 1928 | |
1929 | if (!region | |
1930 | || (!region->irr_blocks && !region->exit_blocks)) | |
1931 | return region; | |
1932 | ||
1933 | /* Check to see if this is the end of a region by seeing if it | |
1934 | contains a call to __builtin_tm_commit{,_eh}. Note that the | |
1935 | outermost region for DECL_IS_TM_CLONE need not collect this. */ | |
1936 | for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) | |
1937 | { | |
1938 | g = gsi_stmt (gsi); | |
1939 | if (gimple_code (g) == GIMPLE_CALL) | |
1940 | { | |
1941 | tree fn = gimple_call_fndecl (g); | |
1942 | if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL) | |
1943 | { | |
1944 | if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT | |
1945 | || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH) | |
1946 | && region->exit_blocks) | |
1947 | { | |
1948 | bitmap_set_bit (region->exit_blocks, bb->index); | |
1949 | region = region->outer; | |
1950 | break; | |
1951 | } | |
1952 | if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE) | |
1953 | bitmap_set_bit (region->irr_blocks, bb->index); | |
1954 | } | |
1955 | } | |
1956 | } | |
1957 | return region; | |
1958 | } | |
1959 | ||
1960 | /* Collect all of the transaction regions within the current function | |
1961 | and record them in ALL_TM_REGIONS. The REGION parameter may specify | |
1962 | an "outermost" region for use by tm clones. */ | |
1963 | ||
1964 | static void | |
1965 | tm_region_init (struct tm_region *region) | |
1966 | { | |
42acab1c | 1967 | gimple *g; |
4c0315d0 | 1968 | edge_iterator ei; |
1969 | edge e; | |
1970 | basic_block bb; | |
c2078b80 | 1971 | auto_vec<basic_block> queue; |
4c0315d0 | 1972 | bitmap visited_blocks = BITMAP_ALLOC (NULL); |
1973 | struct tm_region *old_region; | |
04009ada | 1974 | auto_vec<tm_region *> bb_regions; |
4c0315d0 | 1975 | |
1976 | all_tm_regions = region; | |
34154e27 | 1977 | bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
4c0315d0 | 1978 | |
de60f90c | 1979 | /* We could store this information in bb->aux, but we may get called |
1980 | through get_all_tm_blocks() from another pass that may be already | |
1981 | using bb->aux. */ | |
fe672ac0 | 1982 | bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
de60f90c | 1983 | |
f1f41a6c | 1984 | queue.safe_push (bb); |
1985 | bb_regions[bb->index] = region; | |
4c0315d0 | 1986 | do |
1987 | { | |
f1f41a6c | 1988 | bb = queue.pop (); |
1989 | region = bb_regions[bb->index]; | |
1990 | bb_regions[bb->index] = NULL; | |
4c0315d0 | 1991 | |
1992 | /* Record exit and irrevocable blocks. */ | |
1993 | region = tm_region_init_1 (region, bb); | |
1994 | ||
1995 | /* Check for the last statement in the block beginning a new region. */ | |
1996 | g = last_stmt (bb); | |
1997 | old_region = region; | |
1a91d914 | 1998 | if (g) |
1999 | if (gtransaction *trans_stmt = dyn_cast <gtransaction *> (g)) | |
2000 | region = tm_region_init_0 (region, bb, trans_stmt); | |
4c0315d0 | 2001 | |
2002 | /* Process subsequent blocks. */ | |
2003 | FOR_EACH_EDGE (e, ei, bb->succs) | |
2004 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
2005 | { | |
2006 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 2007 | queue.safe_push (e->dest); |
4c0315d0 | 2008 | |
2009 | /* If the current block started a new region, make sure that only | |
2010 | the entry block of the new region is associated with this region. | |
2011 | Other successors are still part of the old region. */ | |
2012 | if (old_region != region && e->dest != region->entry_block) | |
f1f41a6c | 2013 | bb_regions[e->dest->index] = old_region; |
4c0315d0 | 2014 | else |
f1f41a6c | 2015 | bb_regions[e->dest->index] = region; |
4c0315d0 | 2016 | } |
2017 | } | |
f1f41a6c | 2018 | while (!queue.is_empty ()); |
4c0315d0 | 2019 | BITMAP_FREE (visited_blocks); |
2020 | } | |
2021 | ||
2022 | /* The "gate" function for all transactional memory expansion and optimization | |
2023 | passes. We collect region information for each top-level transaction, and | |
2024 | if we don't find any, we skip all of the TM passes. Each region will have | |
2025 | all of the exit blocks recorded, and the originating statement. */ | |
2026 | ||
2027 | static bool | |
2028 | gate_tm_init (void) | |
2029 | { | |
2030 | if (!flag_tm) | |
2031 | return false; | |
2032 | ||
2033 | calculate_dominance_info (CDI_DOMINATORS); | |
2034 | bitmap_obstack_initialize (&tm_obstack); | |
2035 | ||
2036 | /* If the function is a TM_CLONE, then the entire function is the region. */ | |
2037 | if (decl_is_tm_clone (current_function_decl)) | |
2038 | { | |
2039 | struct tm_region *region = (struct tm_region *) | |
2040 | obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region)); | |
2041 | memset (region, 0, sizeof (*region)); | |
34154e27 | 2042 | region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
4c0315d0 | 2043 | /* For a clone, the entire function is the region. But even if |
2044 | we don't need to record any exit blocks, we may need to | |
2045 | record irrevocable blocks. */ | |
2046 | region->irr_blocks = BITMAP_ALLOC (&tm_obstack); | |
2047 | ||
2048 | tm_region_init (region); | |
2049 | } | |
2050 | else | |
2051 | { | |
2052 | tm_region_init (NULL); | |
2053 | ||
2054 | /* If we didn't find any regions, cleanup and skip the whole tree | |
2055 | of tm-related optimizations. */ | |
2056 | if (all_tm_regions == NULL) | |
2057 | { | |
2058 | bitmap_obstack_release (&tm_obstack); | |
2059 | return false; | |
2060 | } | |
2061 | } | |
2062 | ||
2063 | return true; | |
2064 | } | |
2065 | ||
7620bc82 | 2066 | namespace { |
2067 | ||
2068 | const pass_data pass_data_tm_init = | |
cbe8bda8 | 2069 | { |
2070 | GIMPLE_PASS, /* type */ | |
2071 | "*tminit", /* name */ | |
2072 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 2073 | TV_TRANS_MEM, /* tv_id */ |
2074 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
2075 | 0, /* properties_provided */ | |
2076 | 0, /* properties_destroyed */ | |
2077 | 0, /* todo_flags_start */ | |
2078 | 0, /* todo_flags_finish */ | |
4c0315d0 | 2079 | }; |
cbe8bda8 | 2080 | |
7620bc82 | 2081 | class pass_tm_init : public gimple_opt_pass |
cbe8bda8 | 2082 | { |
2083 | public: | |
9af5ce0c | 2084 | pass_tm_init (gcc::context *ctxt) |
2085 | : gimple_opt_pass (pass_data_tm_init, ctxt) | |
cbe8bda8 | 2086 | {} |
2087 | ||
2088 | /* opt_pass methods: */ | |
31315c24 | 2089 | virtual bool gate (function *) { return gate_tm_init (); } |
cbe8bda8 | 2090 | |
2091 | }; // class pass_tm_init | |
2092 | ||
7620bc82 | 2093 | } // anon namespace |
2094 | ||
cbe8bda8 | 2095 | gimple_opt_pass * |
2096 | make_pass_tm_init (gcc::context *ctxt) | |
2097 | { | |
2098 | return new pass_tm_init (ctxt); | |
2099 | } | |
4c0315d0 | 2100 | \f |
2101 | /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region | |
2102 | represented by STATE. */ | |
2103 | ||
2104 | static inline void | |
2105 | transaction_subcode_ior (struct tm_region *region, unsigned flags) | |
2106 | { | |
2107 | if (region && region->transaction_stmt) | |
2108 | { | |
1a91d914 | 2109 | gtransaction *transaction_stmt = region->get_transaction_stmt (); |
2110 | flags |= gimple_transaction_subcode (transaction_stmt); | |
2111 | gimple_transaction_set_subcode (transaction_stmt, flags); | |
4c0315d0 | 2112 | } |
2113 | } | |
2114 | ||
2115 | /* Construct a memory load in a transactional context. Return the | |
2116 | gimple statement performing the load, or NULL if there is no | |
2117 | TM_LOAD builtin of the appropriate size to do the load. | |
2118 | ||
2119 | LOC is the location to use for the new statement(s). */ | |
2120 | ||
1a91d914 | 2121 | static gcall * |
4c0315d0 | 2122 | build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) |
2123 | { | |
2124 | enum built_in_function code = END_BUILTINS; | |
2125 | tree t, type = TREE_TYPE (rhs), decl; | |
1a91d914 | 2126 | gcall *gcall; |
4c0315d0 | 2127 | |
2128 | if (type == float_type_node) | |
2129 | code = BUILT_IN_TM_LOAD_FLOAT; | |
2130 | else if (type == double_type_node) | |
2131 | code = BUILT_IN_TM_LOAD_DOUBLE; | |
2132 | else if (type == long_double_type_node) | |
2133 | code = BUILT_IN_TM_LOAD_LDOUBLE; | |
2134 | else if (TYPE_SIZE_UNIT (type) != NULL | |
cd4547bf | 2135 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) |
4c0315d0 | 2136 | { |
6a0712d4 | 2137 | switch (tree_to_uhwi (TYPE_SIZE_UNIT (type))) |
4c0315d0 | 2138 | { |
2139 | case 1: | |
2140 | code = BUILT_IN_TM_LOAD_1; | |
2141 | break; | |
2142 | case 2: | |
2143 | code = BUILT_IN_TM_LOAD_2; | |
2144 | break; | |
2145 | case 4: | |
2146 | code = BUILT_IN_TM_LOAD_4; | |
2147 | break; | |
2148 | case 8: | |
2149 | code = BUILT_IN_TM_LOAD_8; | |
2150 | break; | |
2151 | } | |
2152 | } | |
2153 | ||
2154 | if (code == END_BUILTINS) | |
2155 | { | |
2156 | decl = targetm.vectorize.builtin_tm_load (type); | |
2157 | if (!decl) | |
2158 | return NULL; | |
2159 | } | |
2160 | else | |
2161 | decl = builtin_decl_explicit (code); | |
2162 | ||
2163 | t = gimplify_addr (gsi, rhs); | |
2164 | gcall = gimple_build_call (decl, 1, t); | |
2165 | gimple_set_location (gcall, loc); | |
2166 | ||
2167 | t = TREE_TYPE (TREE_TYPE (decl)); | |
2168 | if (useless_type_conversion_p (type, t)) | |
2169 | { | |
2170 | gimple_call_set_lhs (gcall, lhs); | |
2171 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2172 | } | |
2173 | else | |
2174 | { | |
42acab1c | 2175 | gimple *g; |
4c0315d0 | 2176 | tree temp; |
2177 | ||
f9e245b2 | 2178 | temp = create_tmp_reg (t); |
4c0315d0 | 2179 | gimple_call_set_lhs (gcall, temp); |
2180 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2181 | ||
2182 | t = fold_build1 (VIEW_CONVERT_EXPR, type, temp); | |
2183 | g = gimple_build_assign (lhs, t); | |
2184 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
2185 | } | |
2186 | ||
2187 | return gcall; | |
2188 | } | |
2189 | ||
2190 | ||
2191 | /* Similarly for storing TYPE in a transactional context. */ | |
2192 | ||
1a91d914 | 2193 | static gcall * |
4c0315d0 | 2194 | build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) |
2195 | { | |
2196 | enum built_in_function code = END_BUILTINS; | |
2197 | tree t, fn, type = TREE_TYPE (rhs), simple_type; | |
1a91d914 | 2198 | gcall *gcall; |
4c0315d0 | 2199 | |
2200 | if (type == float_type_node) | |
2201 | code = BUILT_IN_TM_STORE_FLOAT; | |
2202 | else if (type == double_type_node) | |
2203 | code = BUILT_IN_TM_STORE_DOUBLE; | |
2204 | else if (type == long_double_type_node) | |
2205 | code = BUILT_IN_TM_STORE_LDOUBLE; | |
2206 | else if (TYPE_SIZE_UNIT (type) != NULL | |
cd4547bf | 2207 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) |
4c0315d0 | 2208 | { |
6a0712d4 | 2209 | switch (tree_to_uhwi (TYPE_SIZE_UNIT (type))) |
4c0315d0 | 2210 | { |
2211 | case 1: | |
2212 | code = BUILT_IN_TM_STORE_1; | |
2213 | break; | |
2214 | case 2: | |
2215 | code = BUILT_IN_TM_STORE_2; | |
2216 | break; | |
2217 | case 4: | |
2218 | code = BUILT_IN_TM_STORE_4; | |
2219 | break; | |
2220 | case 8: | |
2221 | code = BUILT_IN_TM_STORE_8; | |
2222 | break; | |
2223 | } | |
2224 | } | |
2225 | ||
2226 | if (code == END_BUILTINS) | |
2227 | { | |
2228 | fn = targetm.vectorize.builtin_tm_store (type); | |
2229 | if (!fn) | |
2230 | return NULL; | |
2231 | } | |
2232 | else | |
2233 | fn = builtin_decl_explicit (code); | |
2234 | ||
2235 | simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn)))); | |
2236 | ||
2237 | if (TREE_CODE (rhs) == CONSTRUCTOR) | |
2238 | { | |
2239 | /* Handle the easy initialization to zero. */ | |
f1f41a6c | 2240 | if (!CONSTRUCTOR_ELTS (rhs)) |
4c0315d0 | 2241 | rhs = build_int_cst (simple_type, 0); |
2242 | else | |
2243 | { | |
2244 | /* ...otherwise punt to the caller and probably use | |
2245 | BUILT_IN_TM_MEMMOVE, because we can't wrap a | |
2246 | VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce | |
2247 | valid gimple. */ | |
2248 | return NULL; | |
2249 | } | |
2250 | } | |
2251 | else if (!useless_type_conversion_p (simple_type, type)) | |
2252 | { | |
42acab1c | 2253 | gimple *g; |
4c0315d0 | 2254 | tree temp; |
2255 | ||
f9e245b2 | 2256 | temp = create_tmp_reg (simple_type); |
4c0315d0 | 2257 | t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs); |
2258 | g = gimple_build_assign (temp, t); | |
2259 | gimple_set_location (g, loc); | |
2260 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
2261 | ||
2262 | rhs = temp; | |
2263 | } | |
2264 | ||
2265 | t = gimplify_addr (gsi, lhs); | |
2266 | gcall = gimple_build_call (fn, 2, t, rhs); | |
2267 | gimple_set_location (gcall, loc); | |
2268 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2269 | ||
2270 | return gcall; | |
2271 | } | |
2272 | ||
2273 | ||
2274 | /* Expand an assignment statement into transactional builtins. */ | |
2275 | ||
2276 | static void | |
2277 | expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi) | |
2278 | { | |
42acab1c | 2279 | gimple *stmt = gsi_stmt (*gsi); |
4c0315d0 | 2280 | location_t loc = gimple_location (stmt); |
2281 | tree lhs = gimple_assign_lhs (stmt); | |
2282 | tree rhs = gimple_assign_rhs1 (stmt); | |
2283 | bool store_p = requires_barrier (region->entry_block, lhs, NULL); | |
2284 | bool load_p = requires_barrier (region->entry_block, rhs, NULL); | |
42acab1c | 2285 | gimple *gcall = NULL; |
4c0315d0 | 2286 | |
2287 | if (!load_p && !store_p) | |
2288 | { | |
2289 | /* Add thread private addresses to log if applicable. */ | |
2290 | requires_barrier (region->entry_block, lhs, stmt); | |
2291 | gsi_next (gsi); | |
2292 | return; | |
2293 | } | |
2294 | ||
0cd02a19 | 2295 | // Remove original load/store statement. |
4c0315d0 | 2296 | gsi_remove (gsi, true); |
2297 | ||
2298 | if (load_p && !store_p) | |
2299 | { | |
2300 | transaction_subcode_ior (region, GTMA_HAVE_LOAD); | |
2301 | gcall = build_tm_load (loc, lhs, rhs, gsi); | |
2302 | } | |
2303 | else if (store_p && !load_p) | |
2304 | { | |
2305 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2306 | gcall = build_tm_store (loc, lhs, rhs, gsi); | |
2307 | } | |
2308 | if (!gcall) | |
2309 | { | |
ea580cf7 | 2310 | tree lhs_addr, rhs_addr, tmp; |
4c0315d0 | 2311 | |
2312 | if (load_p) | |
2313 | transaction_subcode_ior (region, GTMA_HAVE_LOAD); | |
2314 | if (store_p) | |
2315 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2316 | ||
2317 | /* ??? Figure out if there's any possible overlap between the LHS | |
2318 | and the RHS and if not, use MEMCPY. */ | |
ea580cf7 | 2319 | |
28098b5b | 2320 | if (load_p && is_gimple_reg (lhs)) |
ea580cf7 | 2321 | { |
f9e245b2 | 2322 | tmp = create_tmp_var (TREE_TYPE (lhs)); |
ea580cf7 | 2323 | lhs_addr = build_fold_addr_expr (tmp); |
2324 | } | |
2325 | else | |
2326 | { | |
2327 | tmp = NULL_TREE; | |
2328 | lhs_addr = gimplify_addr (gsi, lhs); | |
2329 | } | |
4c0315d0 | 2330 | rhs_addr = gimplify_addr (gsi, rhs); |
2331 | gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE), | |
2332 | 3, lhs_addr, rhs_addr, | |
2333 | TYPE_SIZE_UNIT (TREE_TYPE (lhs))); | |
2334 | gimple_set_location (gcall, loc); | |
2335 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
ea580cf7 | 2336 | |
2337 | if (tmp) | |
2338 | { | |
2339 | gcall = gimple_build_assign (lhs, tmp); | |
2340 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2341 | } | |
4c0315d0 | 2342 | } |
2343 | ||
2344 | /* Now that we have the load/store in its instrumented form, add | |
2345 | thread private addresses to the log if applicable. */ | |
2346 | if (!store_p) | |
2347 | requires_barrier (region->entry_block, lhs, gcall); | |
2348 | ||
0cd02a19 | 2349 | // The calls to build_tm_{store,load} above inserted the instrumented |
2350 | // call into the stream. | |
2351 | // gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
4c0315d0 | 2352 | } |
2353 | ||
2354 | ||
2355 | /* Expand a call statement as appropriate for a transaction. That is, | |
2356 | either verify that the call does not affect the transaction, or | |
2357 | redirect the call to a clone that handles transactions, or change | |
2358 | the transaction state to IRREVOCABLE. Return true if the call is | |
2359 | one of the builtins that end a transaction. */ | |
2360 | ||
2361 | static bool | |
2362 | expand_call_tm (struct tm_region *region, | |
2363 | gimple_stmt_iterator *gsi) | |
2364 | { | |
1a91d914 | 2365 | gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi)); |
4c0315d0 | 2366 | tree lhs = gimple_call_lhs (stmt); |
2367 | tree fn_decl; | |
2368 | struct cgraph_node *node; | |
2369 | bool retval = false; | |
2370 | ||
2371 | fn_decl = gimple_call_fndecl (stmt); | |
2372 | ||
2373 | if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY) | |
2374 | || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE)) | |
2375 | transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD); | |
2376 | if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET)) | |
2377 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2378 | ||
2379 | if (is_tm_pure_call (stmt)) | |
2380 | return false; | |
2381 | ||
2382 | if (fn_decl) | |
2383 | retval = is_tm_ending_fndecl (fn_decl); | |
2384 | if (!retval) | |
2385 | { | |
2386 | /* Assume all non-const/pure calls write to memory, except | |
2387 | transaction ending builtins. */ | |
2388 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2389 | } | |
2390 | ||
2391 | /* For indirect calls, we already generated a call into the runtime. */ | |
2392 | if (!fn_decl) | |
2393 | { | |
2394 | tree fn = gimple_call_fn (stmt); | |
2395 | ||
2396 | /* We are guaranteed never to go irrevocable on a safe or pure | |
2397 | call, and the pure call was handled above. */ | |
2398 | if (is_tm_safe (fn)) | |
2399 | return false; | |
2400 | else | |
2401 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
2402 | ||
2403 | return false; | |
2404 | } | |
2405 | ||
415d1b9a | 2406 | node = cgraph_node::get (fn_decl); |
fce5ce8e | 2407 | /* All calls should have cgraph here. */ |
2408 | if (!node) | |
2409 | { | |
2410 | /* We can have a nodeless call here if some pass after IPA-tm | |
2411 | added uninstrumented calls. For example, loop distribution | |
2412 | can transform certain loop constructs into __builtin_mem* | |
2413 | calls. In this case, see if we have a suitable TM | |
2414 | replacement and fill in the gaps. */ | |
2415 | gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL); | |
2416 | enum built_in_function code = DECL_FUNCTION_CODE (fn_decl); | |
2417 | gcc_assert (code == BUILT_IN_MEMCPY | |
2418 | || code == BUILT_IN_MEMMOVE | |
2419 | || code == BUILT_IN_MEMSET); | |
2420 | ||
2421 | tree repl = find_tm_replacement_function (fn_decl); | |
2422 | if (repl) | |
2423 | { | |
2424 | gimple_call_set_fndecl (stmt, repl); | |
2425 | update_stmt (stmt); | |
415d1b9a | 2426 | node = cgraph_node::create (repl); |
fce5ce8e | 2427 | node->local.tm_may_enter_irr = false; |
2428 | return expand_call_tm (region, gsi); | |
2429 | } | |
2430 | gcc_unreachable (); | |
2431 | } | |
4c0315d0 | 2432 | if (node->local.tm_may_enter_irr) |
2433 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
2434 | ||
2435 | if (is_tm_abort (fn_decl)) | |
2436 | { | |
2437 | transaction_subcode_ior (region, GTMA_HAVE_ABORT); | |
2438 | return true; | |
2439 | } | |
2440 | ||
2441 | /* Instrument the store if needed. | |
2442 | ||
2443 | If the assignment happens inside the function call (return slot | |
2444 | optimization), there is no instrumentation to be done, since | |
2445 | the callee should have done the right thing. */ | |
2446 | if (lhs && requires_barrier (region->entry_block, lhs, stmt) | |
2447 | && !gimple_call_return_slot_opt_p (stmt)) | |
2448 | { | |
f9e245b2 | 2449 | tree tmp = create_tmp_reg (TREE_TYPE (lhs)); |
4c0315d0 | 2450 | location_t loc = gimple_location (stmt); |
2451 | edge fallthru_edge = NULL; | |
1a91d914 | 2452 | gassign *assign_stmt; |
4c0315d0 | 2453 | |
2454 | /* Remember if the call was going to throw. */ | |
2455 | if (stmt_can_throw_internal (stmt)) | |
2456 | { | |
2457 | edge_iterator ei; | |
2458 | edge e; | |
2459 | basic_block bb = gimple_bb (stmt); | |
2460 | ||
2461 | FOR_EACH_EDGE (e, ei, bb->succs) | |
2462 | if (e->flags & EDGE_FALLTHRU) | |
2463 | { | |
2464 | fallthru_edge = e; | |
2465 | break; | |
2466 | } | |
2467 | } | |
2468 | ||
2469 | gimple_call_set_lhs (stmt, tmp); | |
2470 | update_stmt (stmt); | |
1a91d914 | 2471 | assign_stmt = gimple_build_assign (lhs, tmp); |
2472 | gimple_set_location (assign_stmt, loc); | |
4c0315d0 | 2473 | |
2474 | /* We cannot throw in the middle of a BB. If the call was going | |
2475 | to throw, place the instrumentation on the fallthru edge, so | |
2476 | the call remains the last statement in the block. */ | |
2477 | if (fallthru_edge) | |
2478 | { | |
1a91d914 | 2479 | gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (assign_stmt); |
4c0315d0 | 2480 | gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq); |
2481 | expand_assign_tm (region, &fallthru_gsi); | |
2482 | gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq); | |
2483 | pending_edge_inserts_p = true; | |
2484 | } | |
2485 | else | |
2486 | { | |
1a91d914 | 2487 | gsi_insert_after (gsi, assign_stmt, GSI_CONTINUE_LINKING); |
4c0315d0 | 2488 | expand_assign_tm (region, gsi); |
2489 | } | |
2490 | ||
2491 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2492 | } | |
2493 | ||
2494 | return retval; | |
2495 | } | |
2496 | ||
2497 | ||
2498 | /* Expand all statements in BB as appropriate for being inside | |
2499 | a transaction. */ | |
2500 | ||
2501 | static void | |
2502 | expand_block_tm (struct tm_region *region, basic_block bb) | |
2503 | { | |
2504 | gimple_stmt_iterator gsi; | |
2505 | ||
2506 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ) | |
2507 | { | |
42acab1c | 2508 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 2509 | switch (gimple_code (stmt)) |
2510 | { | |
2511 | case GIMPLE_ASSIGN: | |
2512 | /* Only memory reads/writes need to be instrumented. */ | |
73fd1e9a | 2513 | if (gimple_assign_single_p (stmt) |
2514 | && !gimple_clobber_p (stmt)) | |
4c0315d0 | 2515 | { |
2516 | expand_assign_tm (region, &gsi); | |
2517 | continue; | |
2518 | } | |
2519 | break; | |
2520 | ||
2521 | case GIMPLE_CALL: | |
2522 | if (expand_call_tm (region, &gsi)) | |
2523 | return; | |
2524 | break; | |
2525 | ||
2526 | case GIMPLE_ASM: | |
2527 | gcc_unreachable (); | |
2528 | ||
2529 | default: | |
2530 | break; | |
2531 | } | |
2532 | if (!gsi_end_p (gsi)) | |
2533 | gsi_next (&gsi); | |
2534 | } | |
2535 | } | |
2536 | ||
2537 | /* Return the list of basic-blocks in REGION. | |
2538 | ||
2539 | STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks | |
79f4a793 | 2540 | following a TM_IRREVOCABLE call. |
2541 | ||
2542 | INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the | |
2543 | uninstrumented code path blocks in the list of basic blocks | |
2544 | returned, false otherwise. */ | |
4c0315d0 | 2545 | |
f1f41a6c | 2546 | static vec<basic_block> |
4c0315d0 | 2547 | get_tm_region_blocks (basic_block entry_block, |
2548 | bitmap exit_blocks, | |
2549 | bitmap irr_blocks, | |
2550 | bitmap all_region_blocks, | |
79f4a793 | 2551 | bool stop_at_irrevocable_p, |
2552 | bool include_uninstrumented_p = true) | |
4c0315d0 | 2553 | { |
1e094109 | 2554 | vec<basic_block> bbs = vNULL; |
4c0315d0 | 2555 | unsigned i; |
2556 | edge e; | |
2557 | edge_iterator ei; | |
2558 | bitmap visited_blocks = BITMAP_ALLOC (NULL); | |
2559 | ||
2560 | i = 0; | |
f1f41a6c | 2561 | bbs.safe_push (entry_block); |
4c0315d0 | 2562 | bitmap_set_bit (visited_blocks, entry_block->index); |
2563 | ||
2564 | do | |
2565 | { | |
f1f41a6c | 2566 | basic_block bb = bbs[i++]; |
4c0315d0 | 2567 | |
2568 | if (exit_blocks && | |
2569 | bitmap_bit_p (exit_blocks, bb->index)) | |
2570 | continue; | |
2571 | ||
2572 | if (stop_at_irrevocable_p | |
2573 | && irr_blocks | |
2574 | && bitmap_bit_p (irr_blocks, bb->index)) | |
2575 | continue; | |
2576 | ||
2577 | FOR_EACH_EDGE (e, ei, bb->succs) | |
79f4a793 | 2578 | if ((include_uninstrumented_p |
2579 | || !(e->flags & EDGE_TM_UNINSTRUMENTED)) | |
2580 | && !bitmap_bit_p (visited_blocks, e->dest->index)) | |
4c0315d0 | 2581 | { |
2582 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 2583 | bbs.safe_push (e->dest); |
4c0315d0 | 2584 | } |
2585 | } | |
f1f41a6c | 2586 | while (i < bbs.length ()); |
4c0315d0 | 2587 | |
2588 | if (all_region_blocks) | |
2589 | bitmap_ior_into (all_region_blocks, visited_blocks); | |
2590 | ||
2591 | BITMAP_FREE (visited_blocks); | |
2592 | return bbs; | |
2593 | } | |
2594 | ||
79f4a793 | 2595 | // Callback data for collect_bb2reg. |
2596 | struct bb2reg_stuff | |
2597 | { | |
04009ada | 2598 | vec<tm_region *> *bb2reg; |
79f4a793 | 2599 | bool include_uninstrumented_p; |
2600 | }; | |
2601 | ||
0cd02a19 | 2602 | // Callback for expand_regions, collect innermost region data for each bb. |
2603 | static void * | |
2604 | collect_bb2reg (struct tm_region *region, void *data) | |
2605 | { | |
79f4a793 | 2606 | struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data; |
04009ada | 2607 | vec<tm_region *> *bb2reg = stuff->bb2reg; |
f1f41a6c | 2608 | vec<basic_block> queue; |
0cd02a19 | 2609 | unsigned int i; |
2610 | basic_block bb; | |
2611 | ||
2612 | queue = get_tm_region_blocks (region->entry_block, | |
2613 | region->exit_blocks, | |
2614 | region->irr_blocks, | |
2615 | NULL, | |
79f4a793 | 2616 | /*stop_at_irr_p=*/true, |
2617 | stuff->include_uninstrumented_p); | |
0cd02a19 | 2618 | |
2619 | // We expect expand_region to perform a post-order traversal of the region | |
2620 | // tree. Therefore the last region seen for any bb is the innermost. | |
f1f41a6c | 2621 | FOR_EACH_VEC_ELT (queue, i, bb) |
2622 | (*bb2reg)[bb->index] = region; | |
0cd02a19 | 2623 | |
f1f41a6c | 2624 | queue.release (); |
0cd02a19 | 2625 | return NULL; |
2626 | } | |
2627 | ||
2628 | // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to | |
2629 | // which a basic block belongs. Note that we only consider the instrumented | |
79f4a793 | 2630 | // code paths for the region; the uninstrumented code paths are ignored if |
2631 | // INCLUDE_UNINSTRUMENTED_P is false. | |
0cd02a19 | 2632 | // |
2633 | // ??? This data is very similar to the bb_regions array that is collected | |
2634 | // during tm_region_init. Or, rather, this data is similar to what could | |
2635 | // be used within tm_region_init. The actual computation in tm_region_init | |
2636 | // begins and ends with bb_regions entirely full of NULL pointers, due to | |
2637 | // the way in which pointers are swapped in and out of the array. | |
2638 | // | |
2639 | // ??? Our callers expect that blocks are not shared between transactions. | |
2640 | // When the optimizers get too smart, and blocks are shared, then during | |
2641 | // the tm_mark phase we'll add log entries to only one of the two transactions, | |
2642 | // and in the tm_edge phase we'll add edges to the CFG that create invalid | |
2643 | // cycles. The symptom being SSA defs that do not dominate their uses. | |
2644 | // Note that the optimizers were locally correct with their transformation, | |
2645 | // as we have no info within the program that suggests that the blocks cannot | |
2646 | // be shared. | |
2647 | // | |
2648 | // ??? There is currently a hack inside tree-ssa-pre.c to work around the | |
2649 | // only known instance of this block sharing. | |
2650 | ||
04009ada | 2651 | static vec<tm_region *> |
79f4a793 | 2652 | get_bb_regions_instrumented (bool traverse_clones, |
2653 | bool include_uninstrumented_p) | |
0cd02a19 | 2654 | { |
fe672ac0 | 2655 | unsigned n = last_basic_block_for_fn (cfun); |
79f4a793 | 2656 | struct bb2reg_stuff stuff; |
04009ada | 2657 | vec<tm_region *> ret; |
0cd02a19 | 2658 | |
f1f41a6c | 2659 | ret.create (n); |
2660 | ret.safe_grow_cleared (n); | |
79f4a793 | 2661 | stuff.bb2reg = &ret; |
2662 | stuff.include_uninstrumented_p = include_uninstrumented_p; | |
2663 | expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones); | |
0cd02a19 | 2664 | |
2665 | return ret; | |
2666 | } | |
2667 | ||
de60f90c | 2668 | /* Set the IN_TRANSACTION for all gimple statements that appear in a |
2669 | transaction. */ | |
2670 | ||
2671 | void | |
2672 | compute_transaction_bits (void) | |
2673 | { | |
2674 | struct tm_region *region; | |
f1f41a6c | 2675 | vec<basic_block> queue; |
de60f90c | 2676 | unsigned int i; |
de60f90c | 2677 | basic_block bb; |
2678 | ||
2679 | /* ?? Perhaps we need to abstract gate_tm_init further, because we | |
2680 | certainly don't need it to calculate CDI_DOMINATOR info. */ | |
2681 | gate_tm_init (); | |
2682 | ||
fc00614f | 2683 | FOR_EACH_BB_FN (bb, cfun) |
6ad451f8 | 2684 | bb->flags &= ~BB_IN_TRANSACTION; |
2685 | ||
de60f90c | 2686 | for (region = all_tm_regions; region; region = region->next) |
2687 | { | |
2688 | queue = get_tm_region_blocks (region->entry_block, | |
2689 | region->exit_blocks, | |
2690 | region->irr_blocks, | |
2691 | NULL, | |
2692 | /*stop_at_irr_p=*/true); | |
f1f41a6c | 2693 | for (i = 0; queue.iterate (i, &bb); ++i) |
6ad451f8 | 2694 | bb->flags |= BB_IN_TRANSACTION; |
f1f41a6c | 2695 | queue.release (); |
de60f90c | 2696 | } |
2697 | ||
2698 | if (all_tm_regions) | |
2699 | bitmap_obstack_release (&tm_obstack); | |
2700 | } | |
2701 | ||
0cd02a19 | 2702 | /* Replace the GIMPLE_TRANSACTION in this region with the corresponding |
2703 | call to BUILT_IN_TM_START. */ | |
2704 | ||
2705 | static void * | |
2706 | expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED) | |
2707 | { | |
2708 | tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START); | |
2709 | basic_block transaction_bb = gimple_bb (region->transaction_stmt); | |
2710 | tree tm_state = region->tm_state; | |
2711 | tree tm_state_type = TREE_TYPE (tm_state); | |
2712 | edge abort_edge = NULL; | |
2713 | edge inst_edge = NULL; | |
2714 | edge uninst_edge = NULL; | |
2715 | edge fallthru_edge = NULL; | |
2716 | ||
2717 | // Identify the various successors of the transaction start. | |
2718 | { | |
2719 | edge_iterator i; | |
2720 | edge e; | |
2721 | FOR_EACH_EDGE (e, i, transaction_bb->succs) | |
2722 | { | |
2723 | if (e->flags & EDGE_TM_ABORT) | |
2724 | abort_edge = e; | |
2725 | else if (e->flags & EDGE_TM_UNINSTRUMENTED) | |
2726 | uninst_edge = e; | |
2727 | else | |
2728 | inst_edge = e; | |
2729 | if (e->flags & EDGE_FALLTHRU) | |
2730 | fallthru_edge = e; | |
2731 | } | |
2732 | } | |
2733 | ||
2734 | /* ??? There are plenty of bits here we're not computing. */ | |
2735 | { | |
1a91d914 | 2736 | int subcode = gimple_transaction_subcode (region->get_transaction_stmt ()); |
0cd02a19 | 2737 | int flags = 0; |
2738 | if (subcode & GTMA_DOES_GO_IRREVOCABLE) | |
2739 | flags |= PR_DOESGOIRREVOCABLE; | |
2740 | if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0) | |
2741 | flags |= PR_HASNOIRREVOCABLE; | |
2742 | /* If the transaction does not have an abort in lexical scope and is not | |
2743 | marked as an outer transaction, then it will never abort. */ | |
2744 | if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0) | |
2745 | flags |= PR_HASNOABORT; | |
2746 | if ((subcode & GTMA_HAVE_STORE) == 0) | |
2747 | flags |= PR_READONLY; | |
1910089e | 2748 | if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION)) |
0cd02a19 | 2749 | flags |= PR_INSTRUMENTEDCODE; |
2750 | if (uninst_edge) | |
2751 | flags |= PR_UNINSTRUMENTEDCODE; | |
2752 | if (subcode & GTMA_IS_OUTER) | |
2753 | region->original_transaction_was_outer = true; | |
2754 | tree t = build_int_cst (tm_state_type, flags); | |
1a91d914 | 2755 | gcall *call = gimple_build_call (tm_start, 1, t); |
0cd02a19 | 2756 | gimple_call_set_lhs (call, tm_state); |
2757 | gimple_set_location (call, gimple_location (region->transaction_stmt)); | |
2758 | ||
2759 | // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START. | |
2760 | gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb); | |
2761 | gcc_assert (gsi_stmt (gsi) == region->transaction_stmt); | |
2762 | gsi_insert_before (&gsi, call, GSI_SAME_STMT); | |
2763 | gsi_remove (&gsi, true); | |
2764 | region->transaction_stmt = call; | |
2765 | } | |
2766 | ||
2767 | // Generate log saves. | |
f1f41a6c | 2768 | if (!tm_log_save_addresses.is_empty ()) |
0cd02a19 | 2769 | tm_log_emit_saves (region->entry_block, transaction_bb); |
2770 | ||
2771 | // In the beginning, we've no tests to perform on transaction restart. | |
2772 | // Note that after this point, transaction_bb becomes the "most recent | |
2773 | // block containing tests for the transaction". | |
2774 | region->restart_block = region->entry_block; | |
2775 | ||
2776 | // Generate log restores. | |
f1f41a6c | 2777 | if (!tm_log_save_addresses.is_empty ()) |
0cd02a19 | 2778 | { |
2779 | basic_block test_bb = create_empty_bb (transaction_bb); | |
2780 | basic_block code_bb = create_empty_bb (test_bb); | |
2781 | basic_block join_bb = create_empty_bb (code_bb); | |
b3083327 | 2782 | add_bb_to_loop (test_bb, transaction_bb->loop_father); |
2783 | add_bb_to_loop (code_bb, transaction_bb->loop_father); | |
2784 | add_bb_to_loop (join_bb, transaction_bb->loop_father); | |
0cd02a19 | 2785 | if (region->restart_block == region->entry_block) |
2786 | region->restart_block = test_bb; | |
2787 | ||
f9e245b2 | 2788 | tree t1 = create_tmp_reg (tm_state_type); |
0cd02a19 | 2789 | tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES); |
42acab1c | 2790 | gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2); |
0cd02a19 | 2791 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); |
2792 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2793 | ||
2794 | t2 = build_int_cst (tm_state_type, 0); | |
2795 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2796 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2797 | ||
2798 | tm_log_emit_restores (region->entry_block, code_bb); | |
2799 | ||
2800 | edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU); | |
2801 | edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE); | |
2802 | edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE); | |
2803 | redirect_edge_pred (fallthru_edge, join_bb); | |
2804 | ||
2805 | join_bb->frequency = test_bb->frequency = transaction_bb->frequency; | |
2806 | join_bb->count = test_bb->count = transaction_bb->count; | |
2807 | ||
2808 | ei->probability = PROB_ALWAYS; | |
2809 | et->probability = PROB_LIKELY; | |
2810 | ef->probability = PROB_UNLIKELY; | |
9af5ce0c | 2811 | et->count = apply_probability (test_bb->count, et->probability); |
2812 | ef->count = apply_probability (test_bb->count, ef->probability); | |
0cd02a19 | 2813 | |
2814 | code_bb->count = et->count; | |
2815 | code_bb->frequency = EDGE_FREQUENCY (et); | |
2816 | ||
2817 | transaction_bb = join_bb; | |
2818 | } | |
2819 | ||
2820 | // If we have an ABORT edge, create a test to perform the abort. | |
2821 | if (abort_edge) | |
2822 | { | |
2823 | basic_block test_bb = create_empty_bb (transaction_bb); | |
b3083327 | 2824 | add_bb_to_loop (test_bb, transaction_bb->loop_father); |
0cd02a19 | 2825 | if (region->restart_block == region->entry_block) |
2826 | region->restart_block = test_bb; | |
2827 | ||
f9e245b2 | 2828 | tree t1 = create_tmp_reg (tm_state_type); |
0cd02a19 | 2829 | tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION); |
42acab1c | 2830 | gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2); |
0cd02a19 | 2831 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); |
2832 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2833 | ||
2834 | t2 = build_int_cst (tm_state_type, 0); | |
2835 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2836 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2837 | ||
2838 | edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU); | |
2839 | test_bb->frequency = transaction_bb->frequency; | |
2840 | test_bb->count = transaction_bb->count; | |
2841 | ei->probability = PROB_ALWAYS; | |
2842 | ||
2843 | // Not abort edge. If both are live, chose one at random as we'll | |
2844 | // we'll be fixing that up below. | |
2845 | redirect_edge_pred (fallthru_edge, test_bb); | |
2846 | fallthru_edge->flags = EDGE_FALSE_VALUE; | |
2847 | fallthru_edge->probability = PROB_VERY_LIKELY; | |
2848 | fallthru_edge->count | |
9af5ce0c | 2849 | = apply_probability (test_bb->count, fallthru_edge->probability); |
0cd02a19 | 2850 | |
2851 | // Abort/over edge. | |
2852 | redirect_edge_pred (abort_edge, test_bb); | |
2853 | abort_edge->flags = EDGE_TRUE_VALUE; | |
2854 | abort_edge->probability = PROB_VERY_UNLIKELY; | |
2855 | abort_edge->count | |
9af5ce0c | 2856 | = apply_probability (test_bb->count, abort_edge->probability); |
0cd02a19 | 2857 | |
2858 | transaction_bb = test_bb; | |
2859 | } | |
2860 | ||
2861 | // If we have both instrumented and uninstrumented code paths, select one. | |
2862 | if (inst_edge && uninst_edge) | |
2863 | { | |
2864 | basic_block test_bb = create_empty_bb (transaction_bb); | |
b3083327 | 2865 | add_bb_to_loop (test_bb, transaction_bb->loop_father); |
0cd02a19 | 2866 | if (region->restart_block == region->entry_block) |
2867 | region->restart_block = test_bb; | |
2868 | ||
f9e245b2 | 2869 | tree t1 = create_tmp_reg (tm_state_type); |
0cd02a19 | 2870 | tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE); |
2871 | ||
42acab1c | 2872 | gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2); |
0cd02a19 | 2873 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); |
2874 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2875 | ||
2876 | t2 = build_int_cst (tm_state_type, 0); | |
2877 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2878 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2879 | ||
2880 | // Create the edge into test_bb first, as we want to copy values | |
2881 | // out of the fallthru edge. | |
2882 | edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags); | |
2883 | e->probability = fallthru_edge->probability; | |
2884 | test_bb->count = e->count = fallthru_edge->count; | |
2885 | test_bb->frequency = EDGE_FREQUENCY (e); | |
2886 | ||
2887 | // Now update the edges to the inst/uninist implementations. | |
2888 | // For now assume that the paths are equally likely. When using HTM, | |
2889 | // we'll try the uninst path first and fallback to inst path if htm | |
2890 | // buffers are exceeded. Without HTM we start with the inst path and | |
2891 | // use the uninst path when falling back to serial mode. | |
2892 | redirect_edge_pred (inst_edge, test_bb); | |
2893 | inst_edge->flags = EDGE_FALSE_VALUE; | |
2894 | inst_edge->probability = REG_BR_PROB_BASE / 2; | |
2895 | inst_edge->count | |
9af5ce0c | 2896 | = apply_probability (test_bb->count, inst_edge->probability); |
0cd02a19 | 2897 | |
2898 | redirect_edge_pred (uninst_edge, test_bb); | |
2899 | uninst_edge->flags = EDGE_TRUE_VALUE; | |
2900 | uninst_edge->probability = REG_BR_PROB_BASE / 2; | |
2901 | uninst_edge->count | |
9af5ce0c | 2902 | = apply_probability (test_bb->count, uninst_edge->probability); |
0cd02a19 | 2903 | } |
2904 | ||
2905 | // If we have no previous special cases, and we have PHIs at the beginning | |
2906 | // of the atomic region, this means we have a loop at the beginning of the | |
2907 | // atomic region that shares the first block. This can cause problems with | |
2908 | // the transaction restart abnormal edges to be added in the tm_edges pass. | |
2909 | // Solve this by adding a new empty block to receive the abnormal edges. | |
2910 | if (region->restart_block == region->entry_block | |
2911 | && phi_nodes (region->entry_block)) | |
2912 | { | |
2913 | basic_block empty_bb = create_empty_bb (transaction_bb); | |
2914 | region->restart_block = empty_bb; | |
b3083327 | 2915 | add_bb_to_loop (empty_bb, transaction_bb->loop_father); |
0cd02a19 | 2916 | |
2917 | redirect_edge_pred (fallthru_edge, empty_bb); | |
2918 | make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU); | |
2919 | } | |
2920 | ||
2921 | return NULL; | |
2922 | } | |
2923 | ||
2924 | /* Generate the temporary to be used for the return value of | |
2925 | BUILT_IN_TM_START. */ | |
2926 | ||
2927 | static void * | |
2928 | generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED) | |
2929 | { | |
2930 | tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START); | |
2931 | region->tm_state = | |
2932 | create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state"); | |
2933 | ||
2934 | // Reset the subcode, post optimizations. We'll fill this in | |
2935 | // again as we process blocks. | |
2936 | if (region->exit_blocks) | |
2937 | { | |
1a91d914 | 2938 | gtransaction *transaction_stmt = region->get_transaction_stmt (); |
2939 | unsigned int subcode = gimple_transaction_subcode (transaction_stmt); | |
0cd02a19 | 2940 | |
2941 | if (subcode & GTMA_DOES_GO_IRREVOCABLE) | |
2942 | subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE | |
1910089e | 2943 | | GTMA_MAY_ENTER_IRREVOCABLE |
2944 | | GTMA_HAS_NO_INSTRUMENTATION); | |
0cd02a19 | 2945 | else |
2946 | subcode &= GTMA_DECLARATION_MASK; | |
1a91d914 | 2947 | gimple_transaction_set_subcode (transaction_stmt, subcode); |
0cd02a19 | 2948 | } |
2949 | ||
2950 | return NULL; | |
2951 | } | |
2952 | ||
2953 | // Propagate flags from inner transactions outwards. | |
2954 | static void | |
2955 | propagate_tm_flags_out (struct tm_region *region) | |
2956 | { | |
2957 | if (region == NULL) | |
2958 | return; | |
2959 | propagate_tm_flags_out (region->inner); | |
2960 | ||
2961 | if (region->outer && region->outer->transaction_stmt) | |
2962 | { | |
1a91d914 | 2963 | unsigned s |
2964 | = gimple_transaction_subcode (region->get_transaction_stmt ()); | |
0cd02a19 | 2965 | s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE |
2966 | | GTMA_MAY_ENTER_IRREVOCABLE); | |
1a91d914 | 2967 | s |= gimple_transaction_subcode (region->outer->get_transaction_stmt ()); |
2968 | gimple_transaction_set_subcode (region->outer->get_transaction_stmt (), | |
2969 | s); | |
0cd02a19 | 2970 | } |
2971 | ||
2972 | propagate_tm_flags_out (region->next); | |
2973 | } | |
2974 | ||
4c0315d0 | 2975 | /* Entry point to the MARK phase of TM expansion. Here we replace |
2976 | transactional memory statements with calls to builtins, and function | |
2977 | calls with their transactional clones (if available). But we don't | |
2978 | yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */ | |
2979 | ||
2980 | static unsigned int | |
2981 | execute_tm_mark (void) | |
2982 | { | |
4c0315d0 | 2983 | pending_edge_inserts_p = false; |
2984 | ||
00d83cc8 | 2985 | expand_regions (all_tm_regions, generate_tm_state, NULL, |
2986 | /*traverse_clones=*/true); | |
4c0315d0 | 2987 | |
0cd02a19 | 2988 | tm_log_init (); |
4c0315d0 | 2989 | |
04009ada | 2990 | vec<tm_region *> bb_regions |
79f4a793 | 2991 | = get_bb_regions_instrumented (/*traverse_clones=*/true, |
2992 | /*include_uninstrumented_p=*/false); | |
0cd02a19 | 2993 | struct tm_region *r; |
2994 | unsigned i; | |
4c0315d0 | 2995 | |
0cd02a19 | 2996 | // Expand memory operations into calls into the runtime. |
2997 | // This collects log entries as well. | |
f1f41a6c | 2998 | FOR_EACH_VEC_ELT (bb_regions, i, r) |
ded1a556 | 2999 | { |
3000 | if (r != NULL) | |
3001 | { | |
3002 | if (r->transaction_stmt) | |
3003 | { | |
1a91d914 | 3004 | unsigned sub |
3005 | = gimple_transaction_subcode (r->get_transaction_stmt ()); | |
ded1a556 | 3006 | |
3007 | /* If we're sure to go irrevocable, there won't be | |
3008 | anything to expand, since the run-time will go | |
3009 | irrevocable right away. */ | |
3010 | if (sub & GTMA_DOES_GO_IRREVOCABLE | |
3011 | && sub & GTMA_MAY_ENTER_IRREVOCABLE) | |
3012 | continue; | |
3013 | } | |
f5a6b05f | 3014 | expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i)); |
ded1a556 | 3015 | } |
3016 | } | |
0cd02a19 | 3017 | |
4aac6cf8 | 3018 | bb_regions.release (); |
3019 | ||
0cd02a19 | 3020 | // Propagate flags from inner transactions outwards. |
3021 | propagate_tm_flags_out (all_tm_regions); | |
3022 | ||
3023 | // Expand GIMPLE_TRANSACTIONs into calls into the runtime. | |
00d83cc8 | 3024 | expand_regions (all_tm_regions, expand_transaction, NULL, |
3025 | /*traverse_clones=*/false); | |
0cd02a19 | 3026 | |
3027 | tm_log_emit (); | |
3028 | tm_log_delete (); | |
4c0315d0 | 3029 | |
3030 | if (pending_edge_inserts_p) | |
3031 | gsi_commit_edge_inserts (); | |
0cd02a19 | 3032 | free_dominance_info (CDI_DOMINATORS); |
4c0315d0 | 3033 | return 0; |
3034 | } | |
3035 | ||
7620bc82 | 3036 | namespace { |
3037 | ||
3038 | const pass_data pass_data_tm_mark = | |
cbe8bda8 | 3039 | { |
3040 | GIMPLE_PASS, /* type */ | |
3041 | "tmmark", /* name */ | |
3042 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 3043 | TV_TRANS_MEM, /* tv_id */ |
3044 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
3045 | 0, /* properties_provided */ | |
3046 | 0, /* properties_destroyed */ | |
3047 | 0, /* todo_flags_start */ | |
8b88439e | 3048 | TODO_update_ssa, /* todo_flags_finish */ |
4c0315d0 | 3049 | }; |
cbe8bda8 | 3050 | |
7620bc82 | 3051 | class pass_tm_mark : public gimple_opt_pass |
cbe8bda8 | 3052 | { |
3053 | public: | |
9af5ce0c | 3054 | pass_tm_mark (gcc::context *ctxt) |
3055 | : gimple_opt_pass (pass_data_tm_mark, ctxt) | |
cbe8bda8 | 3056 | {} |
3057 | ||
3058 | /* opt_pass methods: */ | |
65b0537f | 3059 | virtual unsigned int execute (function *) { return execute_tm_mark (); } |
cbe8bda8 | 3060 | |
3061 | }; // class pass_tm_mark | |
3062 | ||
7620bc82 | 3063 | } // anon namespace |
3064 | ||
cbe8bda8 | 3065 | gimple_opt_pass * |
3066 | make_pass_tm_mark (gcc::context *ctxt) | |
3067 | { | |
3068 | return new pass_tm_mark (ctxt); | |
3069 | } | |
4c0315d0 | 3070 | \f |
0cd02a19 | 3071 | |
3072 | /* Create an abnormal edge from STMT at iter, splitting the block | |
3073 | as necessary. Adjust *PNEXT as needed for the split block. */ | |
4c0315d0 | 3074 | |
3075 | static inline void | |
42acab1c | 3076 | split_bb_make_tm_edge (gimple *stmt, basic_block dest_bb, |
0cd02a19 | 3077 | gimple_stmt_iterator iter, gimple_stmt_iterator *pnext) |
4c0315d0 | 3078 | { |
0cd02a19 | 3079 | basic_block bb = gimple_bb (stmt); |
3080 | if (!gsi_one_before_end_p (iter)) | |
3081 | { | |
3082 | edge e = split_block (bb, stmt); | |
3083 | *pnext = gsi_start_bb (e->dest); | |
3084 | } | |
3085 | make_edge (bb, dest_bb, EDGE_ABNORMAL); | |
4c0315d0 | 3086 | |
0cd02a19 | 3087 | // Record the need for the edge for the benefit of the rtl passes. |
4c0315d0 | 3088 | if (cfun->gimple_df->tm_restart == NULL) |
b7aa58e4 | 3089 | cfun->gimple_df->tm_restart |
3090 | = hash_table<tm_restart_hasher>::create_ggc (31); | |
4c0315d0 | 3091 | |
0cd02a19 | 3092 | struct tm_restart_node dummy; |
4c0315d0 | 3093 | dummy.stmt = stmt; |
0cd02a19 | 3094 | dummy.label_or_list = gimple_block_label (dest_bb); |
3095 | ||
b7aa58e4 | 3096 | tm_restart_node **slot = cfun->gimple_df->tm_restart->find_slot (&dummy, |
3097 | INSERT); | |
3098 | struct tm_restart_node *n = *slot; | |
4c0315d0 | 3099 | if (n == NULL) |
3100 | { | |
25a27413 | 3101 | n = ggc_alloc<tm_restart_node> (); |
4c0315d0 | 3102 | *n = dummy; |
3103 | } | |
3104 | else | |
3105 | { | |
3106 | tree old = n->label_or_list; | |
3107 | if (TREE_CODE (old) == LABEL_DECL) | |
0cd02a19 | 3108 | old = tree_cons (NULL, old, NULL); |
4c0315d0 | 3109 | n->label_or_list = tree_cons (NULL, dummy.label_or_list, old); |
3110 | } | |
4c0315d0 | 3111 | } |
3112 | ||
4c0315d0 | 3113 | /* Split block BB as necessary for every builtin function we added, and |
3114 | wire up the abnormal back edges implied by the transaction restart. */ | |
3115 | ||
3116 | static void | |
0cd02a19 | 3117 | expand_block_edges (struct tm_region *const region, basic_block bb) |
4c0315d0 | 3118 | { |
0cd02a19 | 3119 | gimple_stmt_iterator gsi, next_gsi; |
4c0315d0 | 3120 | |
0cd02a19 | 3121 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi) |
4c0315d0 | 3122 | { |
42acab1c | 3123 | gimple *stmt = gsi_stmt (gsi); |
1a91d914 | 3124 | gcall *call_stmt; |
4c0315d0 | 3125 | |
0cd02a19 | 3126 | next_gsi = gsi; |
3127 | gsi_next (&next_gsi); | |
3128 | ||
3129 | // ??? Shouldn't we split for any non-pure, non-irrevocable function? | |
1a91d914 | 3130 | call_stmt = dyn_cast <gcall *> (stmt); |
3131 | if ((!call_stmt) | |
3132 | || (gimple_call_flags (call_stmt) & ECF_TM_BUILTIN) == 0) | |
0cd02a19 | 3133 | continue; |
3134 | ||
1a91d914 | 3135 | if (DECL_FUNCTION_CODE (gimple_call_fndecl (call_stmt)) |
3136 | == BUILT_IN_TM_ABORT) | |
4c0315d0 | 3137 | { |
0cd02a19 | 3138 | // If we have a ``_transaction_cancel [[outer]]'', there is only |
3139 | // one abnormal edge: to the transaction marked OUTER. | |
3140 | // All compiler-generated instances of BUILT_IN_TM_ABORT have a | |
3141 | // constant argument, which we can examine here. Users invoking | |
3142 | // TM_ABORT directly get what they deserve. | |
1a91d914 | 3143 | tree arg = gimple_call_arg (call_stmt, 0); |
0cd02a19 | 3144 | if (TREE_CODE (arg) == INTEGER_CST |
3145 | && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0 | |
3146 | && !decl_is_tm_clone (current_function_decl)) | |
4c0315d0 | 3147 | { |
0cd02a19 | 3148 | // Find the GTMA_IS_OUTER transaction. |
3149 | for (struct tm_region *o = region; o; o = o->outer) | |
3150 | if (o->original_transaction_was_outer) | |
3151 | { | |
1a91d914 | 3152 | split_bb_make_tm_edge (call_stmt, o->restart_block, |
0cd02a19 | 3153 | gsi, &next_gsi); |
3154 | break; | |
3155 | } | |
3156 | ||
3157 | // Otherwise, the front-end should have semantically checked | |
3158 | // outer aborts, but in either case the target region is not | |
3159 | // within this function. | |
3160 | continue; | |
4c0315d0 | 3161 | } |
3162 | ||
0cd02a19 | 3163 | // Non-outer, TM aborts have an abnormal edge to the inner-most |
3164 | // transaction, the one being aborted; | |
1a91d914 | 3165 | split_bb_make_tm_edge (call_stmt, region->restart_block, gsi, |
3166 | &next_gsi); | |
4c0315d0 | 3167 | } |
3168 | ||
0cd02a19 | 3169 | // All TM builtins have an abnormal edge to the outer-most transaction. |
3170 | // We never restart inner transactions. For tm clones, we know a-priori | |
3171 | // that the outer-most transaction is outside the function. | |
3172 | if (decl_is_tm_clone (current_function_decl)) | |
3173 | continue; | |
4c0315d0 | 3174 | |
0cd02a19 | 3175 | if (cfun->gimple_df->tm_restart == NULL) |
3176 | cfun->gimple_df->tm_restart | |
b7aa58e4 | 3177 | = hash_table<tm_restart_hasher>::create_ggc (31); |
4c0315d0 | 3178 | |
0cd02a19 | 3179 | // All TM builtins have an abnormal edge to the outer-most transaction. |
3180 | // We never restart inner transactions. | |
3181 | for (struct tm_region *o = region; o; o = o->outer) | |
3182 | if (!o->outer) | |
3183 | { | |
1a91d914 | 3184 | split_bb_make_tm_edge (call_stmt, o->restart_block, gsi, &next_gsi); |
0cd02a19 | 3185 | break; |
3186 | } | |
4c0315d0 | 3187 | |
0cd02a19 | 3188 | // Delete any tail-call annotation that may have been added. |
3189 | // The tail-call pass may have mis-identified the commit as being | |
3190 | // a candidate because we had not yet added this restart edge. | |
1a91d914 | 3191 | gimple_call_set_tail (call_stmt, false); |
4c0315d0 | 3192 | } |
3193 | } | |
3194 | ||
3195 | /* Entry point to the final expansion of transactional nodes. */ | |
3196 | ||
7620bc82 | 3197 | namespace { |
3198 | ||
3199 | const pass_data pass_data_tm_edges = | |
cbe8bda8 | 3200 | { |
3201 | GIMPLE_PASS, /* type */ | |
3202 | "tmedge", /* name */ | |
3203 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 3204 | TV_TRANS_MEM, /* tv_id */ |
3205 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
3206 | 0, /* properties_provided */ | |
3207 | 0, /* properties_destroyed */ | |
3208 | 0, /* todo_flags_start */ | |
8b88439e | 3209 | TODO_update_ssa, /* todo_flags_finish */ |
4c0315d0 | 3210 | }; |
cbe8bda8 | 3211 | |
7620bc82 | 3212 | class pass_tm_edges : public gimple_opt_pass |
cbe8bda8 | 3213 | { |
3214 | public: | |
9af5ce0c | 3215 | pass_tm_edges (gcc::context *ctxt) |
3216 | : gimple_opt_pass (pass_data_tm_edges, ctxt) | |
cbe8bda8 | 3217 | {} |
3218 | ||
3219 | /* opt_pass methods: */ | |
65b0537f | 3220 | virtual unsigned int execute (function *); |
cbe8bda8 | 3221 | |
3222 | }; // class pass_tm_edges | |
3223 | ||
65b0537f | 3224 | unsigned int |
3225 | pass_tm_edges::execute (function *fun) | |
3226 | { | |
04009ada | 3227 | vec<tm_region *> bb_regions |
65b0537f | 3228 | = get_bb_regions_instrumented (/*traverse_clones=*/false, |
3229 | /*include_uninstrumented_p=*/true); | |
3230 | struct tm_region *r; | |
3231 | unsigned i; | |
3232 | ||
3233 | FOR_EACH_VEC_ELT (bb_regions, i, r) | |
3234 | if (r != NULL) | |
3235 | expand_block_edges (r, BASIC_BLOCK_FOR_FN (fun, i)); | |
3236 | ||
3237 | bb_regions.release (); | |
3238 | ||
3239 | /* We've got to release the dominance info now, to indicate that it | |
3240 | must be rebuilt completely. Otherwise we'll crash trying to update | |
3241 | the SSA web in the TODO section following this pass. */ | |
3242 | free_dominance_info (CDI_DOMINATORS); | |
3243 | bitmap_obstack_release (&tm_obstack); | |
3244 | all_tm_regions = NULL; | |
3245 | ||
3246 | return 0; | |
3247 | } | |
3248 | ||
7620bc82 | 3249 | } // anon namespace |
3250 | ||
cbe8bda8 | 3251 | gimple_opt_pass * |
3252 | make_pass_tm_edges (gcc::context *ctxt) | |
3253 | { | |
3254 | return new pass_tm_edges (ctxt); | |
3255 | } | |
0cd02a19 | 3256 | \f |
3257 | /* Helper function for expand_regions. Expand REGION and recurse to | |
3258 | the inner region. Call CALLBACK on each region. CALLBACK returns | |
3259 | NULL to continue the traversal, otherwise a non-null value which | |
00d83cc8 | 3260 | this function will return as well. TRAVERSE_CLONES is true if we |
3261 | should traverse transactional clones. */ | |
0cd02a19 | 3262 | |
3263 | static void * | |
3264 | expand_regions_1 (struct tm_region *region, | |
3265 | void *(*callback)(struct tm_region *, void *), | |
00d83cc8 | 3266 | void *data, |
3267 | bool traverse_clones) | |
0cd02a19 | 3268 | { |
3269 | void *retval = NULL; | |
00d83cc8 | 3270 | if (region->exit_blocks |
3271 | || (traverse_clones && decl_is_tm_clone (current_function_decl))) | |
0cd02a19 | 3272 | { |
3273 | retval = callback (region, data); | |
3274 | if (retval) | |
3275 | return retval; | |
3276 | } | |
3277 | if (region->inner) | |
3278 | { | |
00d83cc8 | 3279 | retval = expand_regions (region->inner, callback, data, traverse_clones); |
0cd02a19 | 3280 | if (retval) |
3281 | return retval; | |
3282 | } | |
3283 | return retval; | |
3284 | } | |
3285 | ||
3286 | /* Traverse the regions enclosed and including REGION. Execute | |
3287 | CALLBACK for each region, passing DATA. CALLBACK returns NULL to | |
3288 | continue the traversal, otherwise a non-null value which this | |
00d83cc8 | 3289 | function will return as well. TRAVERSE_CLONES is true if we should |
3290 | traverse transactional clones. */ | |
0cd02a19 | 3291 | |
3292 | static void * | |
3293 | expand_regions (struct tm_region *region, | |
3294 | void *(*callback)(struct tm_region *, void *), | |
00d83cc8 | 3295 | void *data, |
3296 | bool traverse_clones) | |
0cd02a19 | 3297 | { |
3298 | void *retval = NULL; | |
3299 | while (region) | |
3300 | { | |
00d83cc8 | 3301 | retval = expand_regions_1 (region, callback, data, traverse_clones); |
0cd02a19 | 3302 | if (retval) |
3303 | return retval; | |
3304 | region = region->next; | |
3305 | } | |
3306 | return retval; | |
3307 | } | |
3308 | ||
4c0315d0 | 3309 | \f |
3310 | /* A unique TM memory operation. */ | |
04009ada | 3311 | struct tm_memop |
4c0315d0 | 3312 | { |
3313 | /* Unique ID that all memory operations to the same location have. */ | |
3314 | unsigned int value_id; | |
3315 | /* Address of load/store. */ | |
3316 | tree addr; | |
04009ada | 3317 | }; |
4c0315d0 | 3318 | |
d9dd21a8 | 3319 | /* TM memory operation hashtable helpers. */ |
3320 | ||
298e7f9a | 3321 | struct tm_memop_hasher : free_ptr_hash <tm_memop> |
d9dd21a8 | 3322 | { |
9969c043 | 3323 | static inline hashval_t hash (const tm_memop *); |
3324 | static inline bool equal (const tm_memop *, const tm_memop *); | |
d9dd21a8 | 3325 | }; |
3326 | ||
3327 | /* Htab support. Return a hash value for a `tm_memop'. */ | |
3328 | inline hashval_t | |
9969c043 | 3329 | tm_memop_hasher::hash (const tm_memop *mem) |
d9dd21a8 | 3330 | { |
3331 | tree addr = mem->addr; | |
3332 | /* We drill down to the SSA_NAME/DECL for the hash, but equality is | |
3333 | actually done with operand_equal_p (see tm_memop_eq). */ | |
3334 | if (TREE_CODE (addr) == ADDR_EXPR) | |
3335 | addr = TREE_OPERAND (addr, 0); | |
3336 | return iterative_hash_expr (addr, 0); | |
3337 | } | |
3338 | ||
3339 | /* Htab support. Return true if two tm_memop's are the same. */ | |
3340 | inline bool | |
9969c043 | 3341 | tm_memop_hasher::equal (const tm_memop *mem1, const tm_memop *mem2) |
d9dd21a8 | 3342 | { |
3343 | return operand_equal_p (mem1->addr, mem2->addr, 0); | |
3344 | } | |
3345 | ||
4c0315d0 | 3346 | /* Sets for solving data flow equations in the memory optimization pass. */ |
3347 | struct tm_memopt_bitmaps | |
3348 | { | |
3349 | /* Stores available to this BB upon entry. Basically, stores that | |
3350 | dominate this BB. */ | |
3351 | bitmap store_avail_in; | |
3352 | /* Stores available at the end of this BB. */ | |
3353 | bitmap store_avail_out; | |
3354 | bitmap store_antic_in; | |
3355 | bitmap store_antic_out; | |
3356 | /* Reads available to this BB upon entry. Basically, reads that | |
3357 | dominate this BB. */ | |
3358 | bitmap read_avail_in; | |
3359 | /* Reads available at the end of this BB. */ | |
3360 | bitmap read_avail_out; | |
3361 | /* Reads performed in this BB. */ | |
3362 | bitmap read_local; | |
3363 | /* Writes performed in this BB. */ | |
3364 | bitmap store_local; | |
3365 | ||
3366 | /* Temporary storage for pass. */ | |
3367 | /* Is the current BB in the worklist? */ | |
3368 | bool avail_in_worklist_p; | |
3369 | /* Have we visited this BB? */ | |
3370 | bool visited_p; | |
3371 | }; | |
3372 | ||
3373 | static bitmap_obstack tm_memopt_obstack; | |
3374 | ||
3375 | /* Unique counter for TM loads and stores. Loads and stores of the | |
3376 | same address get the same ID. */ | |
3377 | static unsigned int tm_memopt_value_id; | |
c1f445d2 | 3378 | static hash_table<tm_memop_hasher> *tm_memopt_value_numbers; |
4c0315d0 | 3379 | |
3380 | #define STORE_AVAIL_IN(BB) \ | |
3381 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in | |
3382 | #define STORE_AVAIL_OUT(BB) \ | |
3383 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out | |
3384 | #define STORE_ANTIC_IN(BB) \ | |
3385 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in | |
3386 | #define STORE_ANTIC_OUT(BB) \ | |
3387 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out | |
3388 | #define READ_AVAIL_IN(BB) \ | |
3389 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in | |
3390 | #define READ_AVAIL_OUT(BB) \ | |
3391 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out | |
3392 | #define READ_LOCAL(BB) \ | |
3393 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local | |
3394 | #define STORE_LOCAL(BB) \ | |
3395 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local | |
3396 | #define AVAIL_IN_WORKLIST_P(BB) \ | |
3397 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p | |
3398 | #define BB_VISITED_P(BB) \ | |
3399 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p | |
3400 | ||
4c0315d0 | 3401 | /* Given a TM load/store in STMT, return the value number for the address |
3402 | it accesses. */ | |
3403 | ||
3404 | static unsigned int | |
42acab1c | 3405 | tm_memopt_value_number (gimple *stmt, enum insert_option op) |
4c0315d0 | 3406 | { |
3407 | struct tm_memop tmpmem, *mem; | |
d9dd21a8 | 3408 | tm_memop **slot; |
4c0315d0 | 3409 | |
3410 | gcc_assert (is_tm_load (stmt) || is_tm_store (stmt)); | |
3411 | tmpmem.addr = gimple_call_arg (stmt, 0); | |
c1f445d2 | 3412 | slot = tm_memopt_value_numbers->find_slot (&tmpmem, op); |
4c0315d0 | 3413 | if (*slot) |
d9dd21a8 | 3414 | mem = *slot; |
4c0315d0 | 3415 | else if (op == INSERT) |
3416 | { | |
3417 | mem = XNEW (struct tm_memop); | |
3418 | *slot = mem; | |
3419 | mem->value_id = tm_memopt_value_id++; | |
3420 | mem->addr = tmpmem.addr; | |
3421 | } | |
3422 | else | |
3423 | gcc_unreachable (); | |
3424 | return mem->value_id; | |
3425 | } | |
3426 | ||
3427 | /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */ | |
3428 | ||
3429 | static void | |
3430 | tm_memopt_accumulate_memops (basic_block bb) | |
3431 | { | |
3432 | gimple_stmt_iterator gsi; | |
3433 | ||
3434 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3435 | { | |
42acab1c | 3436 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 3437 | bitmap bits; |
3438 | unsigned int loc; | |
3439 | ||
3440 | if (is_tm_store (stmt)) | |
3441 | bits = STORE_LOCAL (bb); | |
3442 | else if (is_tm_load (stmt)) | |
3443 | bits = READ_LOCAL (bb); | |
3444 | else | |
3445 | continue; | |
3446 | ||
3447 | loc = tm_memopt_value_number (stmt, INSERT); | |
3448 | bitmap_set_bit (bits, loc); | |
3449 | if (dump_file) | |
3450 | { | |
3451 | fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=", | |
3452 | is_tm_load (stmt) ? "LOAD" : "STORE", loc, | |
3453 | gimple_bb (stmt)->index); | |
3454 | print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0); | |
3455 | fprintf (dump_file, "\n"); | |
3456 | } | |
3457 | } | |
3458 | } | |
3459 | ||
3460 | /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */ | |
3461 | ||
3462 | static void | |
3463 | dump_tm_memopt_set (const char *set_name, bitmap bits) | |
3464 | { | |
3465 | unsigned i; | |
3466 | bitmap_iterator bi; | |
3467 | const char *comma = ""; | |
3468 | ||
3469 | fprintf (dump_file, "TM memopt: %s: [", set_name); | |
3470 | EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi) | |
3471 | { | |
c1f445d2 | 3472 | hash_table<tm_memop_hasher>::iterator hi; |
d9dd21a8 | 3473 | struct tm_memop *mem = NULL; |
4c0315d0 | 3474 | |
3475 | /* Yeah, yeah, yeah. Whatever. This is just for debugging. */ | |
c1f445d2 | 3476 | FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers, mem, tm_memop_t, hi) |
4c0315d0 | 3477 | if (mem->value_id == i) |
3478 | break; | |
3479 | gcc_assert (mem->value_id == i); | |
3480 | fprintf (dump_file, "%s", comma); | |
3481 | comma = ", "; | |
3482 | print_generic_expr (dump_file, mem->addr, 0); | |
3483 | } | |
3484 | fprintf (dump_file, "]\n"); | |
3485 | } | |
3486 | ||
3487 | /* Prettily dump all of the memopt sets in BLOCKS. */ | |
3488 | ||
3489 | static void | |
f1f41a6c | 3490 | dump_tm_memopt_sets (vec<basic_block> blocks) |
4c0315d0 | 3491 | { |
3492 | size_t i; | |
3493 | basic_block bb; | |
3494 | ||
f1f41a6c | 3495 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3496 | { |
3497 | fprintf (dump_file, "------------BB %d---------\n", bb->index); | |
3498 | dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb)); | |
3499 | dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb)); | |
3500 | dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb)); | |
3501 | dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb)); | |
3502 | dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb)); | |
3503 | dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb)); | |
3504 | } | |
3505 | } | |
3506 | ||
3507 | /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */ | |
3508 | ||
3509 | static void | |
3510 | tm_memopt_compute_avin (basic_block bb) | |
3511 | { | |
3512 | edge e; | |
3513 | unsigned ix; | |
3514 | ||
3515 | /* Seed with the AVOUT of any predecessor. */ | |
3516 | for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++) | |
3517 | { | |
3518 | e = EDGE_PRED (bb, ix); | |
3519 | /* Make sure we have already visited this BB, and is thus | |
3520 | initialized. | |
3521 | ||
3522 | If e->src->aux is NULL, this predecessor is actually on an | |
3523 | enclosing transaction. We only care about the current | |
3524 | transaction, so ignore it. */ | |
3525 | if (e->src->aux && BB_VISITED_P (e->src)) | |
3526 | { | |
3527 | bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src)); | |
3528 | bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src)); | |
3529 | break; | |
3530 | } | |
3531 | } | |
3532 | ||
3533 | for (; ix < EDGE_COUNT (bb->preds); ix++) | |
3534 | { | |
3535 | e = EDGE_PRED (bb, ix); | |
3536 | if (e->src->aux && BB_VISITED_P (e->src)) | |
3537 | { | |
3538 | bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src)); | |
3539 | bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src)); | |
3540 | } | |
3541 | } | |
3542 | ||
3543 | BB_VISITED_P (bb) = true; | |
3544 | } | |
3545 | ||
3546 | /* Compute the STORE_ANTIC_IN for the basic block BB. */ | |
3547 | ||
3548 | static void | |
3549 | tm_memopt_compute_antin (basic_block bb) | |
3550 | { | |
3551 | edge e; | |
3552 | unsigned ix; | |
3553 | ||
3554 | /* Seed with the ANTIC_OUT of any successor. */ | |
3555 | for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++) | |
3556 | { | |
3557 | e = EDGE_SUCC (bb, ix); | |
3558 | /* Make sure we have already visited this BB, and is thus | |
3559 | initialized. */ | |
3560 | if (BB_VISITED_P (e->dest)) | |
3561 | { | |
3562 | bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest)); | |
3563 | break; | |
3564 | } | |
3565 | } | |
3566 | ||
3567 | for (; ix < EDGE_COUNT (bb->succs); ix++) | |
3568 | { | |
3569 | e = EDGE_SUCC (bb, ix); | |
3570 | if (BB_VISITED_P (e->dest)) | |
3571 | bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest)); | |
3572 | } | |
3573 | ||
3574 | BB_VISITED_P (bb) = true; | |
3575 | } | |
3576 | ||
3577 | /* Compute the AVAIL sets for every basic block in BLOCKS. | |
3578 | ||
3579 | We compute {STORE,READ}_AVAIL_{OUT,IN} as follows: | |
3580 | ||
3581 | AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb]) | |
3582 | AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors]) | |
3583 | ||
3584 | This is basically what we do in lcm's compute_available(), but here | |
3585 | we calculate two sets of sets (one for STOREs and one for READs), | |
3586 | and we work on a region instead of the entire CFG. | |
3587 | ||
3588 | REGION is the TM region. | |
3589 | BLOCKS are the basic blocks in the region. */ | |
3590 | ||
3591 | static void | |
3592 | tm_memopt_compute_available (struct tm_region *region, | |
f1f41a6c | 3593 | vec<basic_block> blocks) |
4c0315d0 | 3594 | { |
3595 | edge e; | |
3596 | basic_block *worklist, *qin, *qout, *qend, bb; | |
3597 | unsigned int qlen, i; | |
3598 | edge_iterator ei; | |
3599 | bool changed; | |
3600 | ||
3601 | /* Allocate a worklist array/queue. Entries are only added to the | |
3602 | list if they were not already on the list. So the size is | |
3603 | bounded by the number of basic blocks in the region. */ | |
f1f41a6c | 3604 | qlen = blocks.length () - 1; |
4c0315d0 | 3605 | qin = qout = worklist = |
3606 | XNEWVEC (basic_block, qlen); | |
3607 | ||
3608 | /* Put every block in the region on the worklist. */ | |
f1f41a6c | 3609 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3610 | { |
3611 | /* Seed AVAIL_OUT with the LOCAL set. */ | |
3612 | bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb)); | |
3613 | bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb)); | |
3614 | ||
3615 | AVAIL_IN_WORKLIST_P (bb) = true; | |
3616 | /* No need to insert the entry block, since it has an AVIN of | |
3617 | null, and an AVOUT that has already been seeded in. */ | |
3618 | if (bb != region->entry_block) | |
3619 | *qin++ = bb; | |
3620 | } | |
3621 | ||
3622 | /* The entry block has been initialized with the local sets. */ | |
3623 | BB_VISITED_P (region->entry_block) = true; | |
3624 | ||
3625 | qin = worklist; | |
3626 | qend = &worklist[qlen]; | |
3627 | ||
3628 | /* Iterate until the worklist is empty. */ | |
3629 | while (qlen) | |
3630 | { | |
3631 | /* Take the first entry off the worklist. */ | |
3632 | bb = *qout++; | |
3633 | qlen--; | |
3634 | ||
3635 | if (qout >= qend) | |
3636 | qout = worklist; | |
3637 | ||
3638 | /* This block can be added to the worklist again if necessary. */ | |
3639 | AVAIL_IN_WORKLIST_P (bb) = false; | |
3640 | tm_memopt_compute_avin (bb); | |
3641 | ||
3642 | /* Note: We do not add the LOCAL sets here because we already | |
3643 | seeded the AVAIL_OUT sets with them. */ | |
3644 | changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb)); | |
3645 | changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb)); | |
3646 | if (changed | |
3647 | && (region->exit_blocks == NULL | |
3648 | || !bitmap_bit_p (region->exit_blocks, bb->index))) | |
3649 | /* If the out state of this block changed, then we need to add | |
3650 | its successors to the worklist if they are not already in. */ | |
3651 | FOR_EACH_EDGE (e, ei, bb->succs) | |
34154e27 | 3652 | if (!AVAIL_IN_WORKLIST_P (e->dest) |
3653 | && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) | |
4c0315d0 | 3654 | { |
3655 | *qin++ = e->dest; | |
3656 | AVAIL_IN_WORKLIST_P (e->dest) = true; | |
3657 | qlen++; | |
3658 | ||
3659 | if (qin >= qend) | |
3660 | qin = worklist; | |
3661 | } | |
3662 | } | |
3663 | ||
3664 | free (worklist); | |
3665 | ||
3666 | if (dump_file) | |
3667 | dump_tm_memopt_sets (blocks); | |
3668 | } | |
3669 | ||
3670 | /* Compute ANTIC sets for every basic block in BLOCKS. | |
3671 | ||
3672 | We compute STORE_ANTIC_OUT as follows: | |
3673 | ||
3674 | STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb]) | |
3675 | STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors]) | |
3676 | ||
3677 | REGION is the TM region. | |
3678 | BLOCKS are the basic blocks in the region. */ | |
3679 | ||
3680 | static void | |
3681 | tm_memopt_compute_antic (struct tm_region *region, | |
f1f41a6c | 3682 | vec<basic_block> blocks) |
4c0315d0 | 3683 | { |
3684 | edge e; | |
3685 | basic_block *worklist, *qin, *qout, *qend, bb; | |
3686 | unsigned int qlen; | |
3687 | int i; | |
3688 | edge_iterator ei; | |
3689 | ||
3690 | /* Allocate a worklist array/queue. Entries are only added to the | |
3691 | list if they were not already on the list. So the size is | |
3692 | bounded by the number of basic blocks in the region. */ | |
f1f41a6c | 3693 | qin = qout = worklist = XNEWVEC (basic_block, blocks.length ()); |
4c0315d0 | 3694 | |
f1f41a6c | 3695 | for (qlen = 0, i = blocks.length () - 1; i >= 0; --i) |
4c0315d0 | 3696 | { |
f1f41a6c | 3697 | bb = blocks[i]; |
4c0315d0 | 3698 | |
3699 | /* Seed ANTIC_OUT with the LOCAL set. */ | |
3700 | bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb)); | |
3701 | ||
3702 | /* Put every block in the region on the worklist. */ | |
3703 | AVAIL_IN_WORKLIST_P (bb) = true; | |
3704 | /* No need to insert exit blocks, since their ANTIC_IN is NULL, | |
3705 | and their ANTIC_OUT has already been seeded in. */ | |
3706 | if (region->exit_blocks | |
3707 | && !bitmap_bit_p (region->exit_blocks, bb->index)) | |
3708 | { | |
3709 | qlen++; | |
3710 | *qin++ = bb; | |
3711 | } | |
3712 | } | |
3713 | ||
3714 | /* The exit blocks have been initialized with the local sets. */ | |
3715 | if (region->exit_blocks) | |
3716 | { | |
3717 | unsigned int i; | |
3718 | bitmap_iterator bi; | |
3719 | EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi) | |
f5a6b05f | 3720 | BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun, i)) = true; |
4c0315d0 | 3721 | } |
3722 | ||
3723 | qin = worklist; | |
3724 | qend = &worklist[qlen]; | |
3725 | ||
3726 | /* Iterate until the worklist is empty. */ | |
3727 | while (qlen) | |
3728 | { | |
3729 | /* Take the first entry off the worklist. */ | |
3730 | bb = *qout++; | |
3731 | qlen--; | |
3732 | ||
3733 | if (qout >= qend) | |
3734 | qout = worklist; | |
3735 | ||
3736 | /* This block can be added to the worklist again if necessary. */ | |
3737 | AVAIL_IN_WORKLIST_P (bb) = false; | |
3738 | tm_memopt_compute_antin (bb); | |
3739 | ||
3740 | /* Note: We do not add the LOCAL sets here because we already | |
3741 | seeded the ANTIC_OUT sets with them. */ | |
3742 | if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb)) | |
3743 | && bb != region->entry_block) | |
3744 | /* If the out state of this block changed, then we need to add | |
3745 | its predecessors to the worklist if they are not already in. */ | |
3746 | FOR_EACH_EDGE (e, ei, bb->preds) | |
3747 | if (!AVAIL_IN_WORKLIST_P (e->src)) | |
3748 | { | |
3749 | *qin++ = e->src; | |
3750 | AVAIL_IN_WORKLIST_P (e->src) = true; | |
3751 | qlen++; | |
3752 | ||
3753 | if (qin >= qend) | |
3754 | qin = worklist; | |
3755 | } | |
3756 | } | |
3757 | ||
3758 | free (worklist); | |
3759 | ||
3760 | if (dump_file) | |
3761 | dump_tm_memopt_sets (blocks); | |
3762 | } | |
3763 | ||
3764 | /* Offsets of load variants from TM_LOAD. For example, | |
3765 | BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*. | |
3766 | See gtm-builtins.def. */ | |
3767 | #define TRANSFORM_RAR 1 | |
3768 | #define TRANSFORM_RAW 2 | |
3769 | #define TRANSFORM_RFW 3 | |
3770 | /* Offsets of store variants from TM_STORE. */ | |
3771 | #define TRANSFORM_WAR 1 | |
3772 | #define TRANSFORM_WAW 2 | |
3773 | ||
3774 | /* Inform about a load/store optimization. */ | |
3775 | ||
3776 | static void | |
42acab1c | 3777 | dump_tm_memopt_transform (gimple *stmt) |
4c0315d0 | 3778 | { |
3779 | if (dump_file) | |
3780 | { | |
3781 | fprintf (dump_file, "TM memopt: transforming: "); | |
3782 | print_gimple_stmt (dump_file, stmt, 0, 0); | |
3783 | fprintf (dump_file, "\n"); | |
3784 | } | |
3785 | } | |
3786 | ||
3787 | /* Perform a read/write optimization. Replaces the TM builtin in STMT | |
3788 | by a builtin that is OFFSET entries down in the builtins table in | |
3789 | gtm-builtins.def. */ | |
3790 | ||
3791 | static void | |
3792 | tm_memopt_transform_stmt (unsigned int offset, | |
1a91d914 | 3793 | gcall *stmt, |
4c0315d0 | 3794 | gimple_stmt_iterator *gsi) |
3795 | { | |
3796 | tree fn = gimple_call_fn (stmt); | |
3797 | gcc_assert (TREE_CODE (fn) == ADDR_EXPR); | |
3798 | TREE_OPERAND (fn, 0) | |
3799 | = builtin_decl_explicit ((enum built_in_function) | |
3800 | (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0)) | |
3801 | + offset)); | |
3802 | gimple_call_set_fn (stmt, fn); | |
3803 | gsi_replace (gsi, stmt, true); | |
3804 | dump_tm_memopt_transform (stmt); | |
3805 | } | |
3806 | ||
3807 | /* Perform the actual TM memory optimization transformations in the | |
3808 | basic blocks in BLOCKS. */ | |
3809 | ||
3810 | static void | |
f1f41a6c | 3811 | tm_memopt_transform_blocks (vec<basic_block> blocks) |
4c0315d0 | 3812 | { |
3813 | size_t i; | |
3814 | basic_block bb; | |
3815 | gimple_stmt_iterator gsi; | |
3816 | ||
f1f41a6c | 3817 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3818 | { |
3819 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3820 | { | |
42acab1c | 3821 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 3822 | bitmap read_avail = READ_AVAIL_IN (bb); |
3823 | bitmap store_avail = STORE_AVAIL_IN (bb); | |
3824 | bitmap store_antic = STORE_ANTIC_OUT (bb); | |
3825 | unsigned int loc; | |
3826 | ||
3827 | if (is_tm_simple_load (stmt)) | |
3828 | { | |
1a91d914 | 3829 | gcall *call_stmt = as_a <gcall *> (stmt); |
4c0315d0 | 3830 | loc = tm_memopt_value_number (stmt, NO_INSERT); |
3831 | if (store_avail && bitmap_bit_p (store_avail, loc)) | |
1a91d914 | 3832 | tm_memopt_transform_stmt (TRANSFORM_RAW, call_stmt, &gsi); |
4c0315d0 | 3833 | else if (store_antic && bitmap_bit_p (store_antic, loc)) |
3834 | { | |
1a91d914 | 3835 | tm_memopt_transform_stmt (TRANSFORM_RFW, call_stmt, &gsi); |
4c0315d0 | 3836 | bitmap_set_bit (store_avail, loc); |
3837 | } | |
3838 | else if (read_avail && bitmap_bit_p (read_avail, loc)) | |
1a91d914 | 3839 | tm_memopt_transform_stmt (TRANSFORM_RAR, call_stmt, &gsi); |
4c0315d0 | 3840 | else |
3841 | bitmap_set_bit (read_avail, loc); | |
3842 | } | |
3843 | else if (is_tm_simple_store (stmt)) | |
3844 | { | |
1a91d914 | 3845 | gcall *call_stmt = as_a <gcall *> (stmt); |
4c0315d0 | 3846 | loc = tm_memopt_value_number (stmt, NO_INSERT); |
3847 | if (store_avail && bitmap_bit_p (store_avail, loc)) | |
1a91d914 | 3848 | tm_memopt_transform_stmt (TRANSFORM_WAW, call_stmt, &gsi); |
4c0315d0 | 3849 | else |
3850 | { | |
3851 | if (read_avail && bitmap_bit_p (read_avail, loc)) | |
1a91d914 | 3852 | tm_memopt_transform_stmt (TRANSFORM_WAR, call_stmt, &gsi); |
4c0315d0 | 3853 | bitmap_set_bit (store_avail, loc); |
3854 | } | |
3855 | } | |
3856 | } | |
3857 | } | |
3858 | } | |
3859 | ||
3860 | /* Return a new set of bitmaps for a BB. */ | |
3861 | ||
3862 | static struct tm_memopt_bitmaps * | |
3863 | tm_memopt_init_sets (void) | |
3864 | { | |
3865 | struct tm_memopt_bitmaps *b | |
3866 | = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps); | |
3867 | b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3868 | b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3869 | b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3870 | b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3871 | b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3872 | b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3873 | b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3874 | b->read_local = BITMAP_ALLOC (&tm_memopt_obstack); | |
3875 | b->store_local = BITMAP_ALLOC (&tm_memopt_obstack); | |
3876 | return b; | |
3877 | } | |
3878 | ||
3879 | /* Free sets computed for each BB. */ | |
3880 | ||
3881 | static void | |
f1f41a6c | 3882 | tm_memopt_free_sets (vec<basic_block> blocks) |
4c0315d0 | 3883 | { |
3884 | size_t i; | |
3885 | basic_block bb; | |
3886 | ||
f1f41a6c | 3887 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3888 | bb->aux = NULL; |
3889 | } | |
3890 | ||
3891 | /* Clear the visited bit for every basic block in BLOCKS. */ | |
3892 | ||
3893 | static void | |
f1f41a6c | 3894 | tm_memopt_clear_visited (vec<basic_block> blocks) |
4c0315d0 | 3895 | { |
3896 | size_t i; | |
3897 | basic_block bb; | |
3898 | ||
f1f41a6c | 3899 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3900 | BB_VISITED_P (bb) = false; |
3901 | } | |
3902 | ||
3903 | /* Replace TM load/stores with hints for the runtime. We handle | |
3904 | things like read-after-write, write-after-read, read-after-read, | |
3905 | read-for-write, etc. */ | |
3906 | ||
3907 | static unsigned int | |
3908 | execute_tm_memopt (void) | |
3909 | { | |
3910 | struct tm_region *region; | |
f1f41a6c | 3911 | vec<basic_block> bbs; |
4c0315d0 | 3912 | |
3913 | tm_memopt_value_id = 0; | |
c1f445d2 | 3914 | tm_memopt_value_numbers = new hash_table<tm_memop_hasher> (10); |
4c0315d0 | 3915 | |
3916 | for (region = all_tm_regions; region; region = region->next) | |
3917 | { | |
3918 | /* All the TM stores/loads in the current region. */ | |
3919 | size_t i; | |
3920 | basic_block bb; | |
3921 | ||
3922 | bitmap_obstack_initialize (&tm_memopt_obstack); | |
3923 | ||
3924 | /* Save all BBs for the current region. */ | |
3925 | bbs = get_tm_region_blocks (region->entry_block, | |
3926 | region->exit_blocks, | |
3927 | region->irr_blocks, | |
3928 | NULL, | |
3929 | false); | |
3930 | ||
3931 | /* Collect all the memory operations. */ | |
f1f41a6c | 3932 | for (i = 0; bbs.iterate (i, &bb); ++i) |
4c0315d0 | 3933 | { |
3934 | bb->aux = tm_memopt_init_sets (); | |
3935 | tm_memopt_accumulate_memops (bb); | |
3936 | } | |
3937 | ||
3938 | /* Solve data flow equations and transform each block accordingly. */ | |
3939 | tm_memopt_clear_visited (bbs); | |
3940 | tm_memopt_compute_available (region, bbs); | |
3941 | tm_memopt_clear_visited (bbs); | |
3942 | tm_memopt_compute_antic (region, bbs); | |
3943 | tm_memopt_transform_blocks (bbs); | |
3944 | ||
3945 | tm_memopt_free_sets (bbs); | |
f1f41a6c | 3946 | bbs.release (); |
4c0315d0 | 3947 | bitmap_obstack_release (&tm_memopt_obstack); |
c1f445d2 | 3948 | tm_memopt_value_numbers->empty (); |
4c0315d0 | 3949 | } |
3950 | ||
c1f445d2 | 3951 | delete tm_memopt_value_numbers; |
3952 | tm_memopt_value_numbers = NULL; | |
4c0315d0 | 3953 | return 0; |
3954 | } | |
3955 | ||
7620bc82 | 3956 | namespace { |
3957 | ||
3958 | const pass_data pass_data_tm_memopt = | |
cbe8bda8 | 3959 | { |
3960 | GIMPLE_PASS, /* type */ | |
3961 | "tmmemopt", /* name */ | |
3962 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 3963 | TV_TRANS_MEM, /* tv_id */ |
3964 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
3965 | 0, /* properties_provided */ | |
3966 | 0, /* properties_destroyed */ | |
3967 | 0, /* todo_flags_start */ | |
3968 | 0, /* todo_flags_finish */ | |
4c0315d0 | 3969 | }; |
3970 | ||
7620bc82 | 3971 | class pass_tm_memopt : public gimple_opt_pass |
cbe8bda8 | 3972 | { |
3973 | public: | |
9af5ce0c | 3974 | pass_tm_memopt (gcc::context *ctxt) |
3975 | : gimple_opt_pass (pass_data_tm_memopt, ctxt) | |
cbe8bda8 | 3976 | {} |
3977 | ||
3978 | /* opt_pass methods: */ | |
31315c24 | 3979 | virtual bool gate (function *) { return flag_tm && optimize > 0; } |
65b0537f | 3980 | virtual unsigned int execute (function *) { return execute_tm_memopt (); } |
cbe8bda8 | 3981 | |
3982 | }; // class pass_tm_memopt | |
3983 | ||
7620bc82 | 3984 | } // anon namespace |
3985 | ||
cbe8bda8 | 3986 | gimple_opt_pass * |
3987 | make_pass_tm_memopt (gcc::context *ctxt) | |
3988 | { | |
3989 | return new pass_tm_memopt (ctxt); | |
3990 | } | |
3991 | ||
4c0315d0 | 3992 | \f |
3993 | /* Interprocedual analysis for the creation of transactional clones. | |
3994 | The aim of this pass is to find which functions are referenced in | |
3995 | a non-irrevocable transaction context, and for those over which | |
3996 | we have control (or user directive), create a version of the | |
3997 | function which uses only the transactional interface to reference | |
3998 | protected memories. This analysis proceeds in several steps: | |
3999 | ||
4000 | (1) Collect the set of all possible transactional clones: | |
4001 | ||
4002 | (a) For all local public functions marked tm_callable, push | |
4003 | it onto the tm_callee queue. | |
4004 | ||
4005 | (b) For all local functions, scan for calls in transaction blocks. | |
4006 | Push the caller and callee onto the tm_caller and tm_callee | |
4007 | queues. Count the number of callers for each callee. | |
4008 | ||
4009 | (c) For each local function on the callee list, assume we will | |
4010 | create a transactional clone. Push *all* calls onto the | |
4011 | callee queues; count the number of clone callers separately | |
4012 | to the number of original callers. | |
4013 | ||
4014 | (2) Propagate irrevocable status up the dominator tree: | |
4015 | ||
4016 | (a) Any external function on the callee list that is not marked | |
4017 | tm_callable is irrevocable. Push all callers of such onto | |
4018 | a worklist. | |
4019 | ||
4020 | (b) For each function on the worklist, mark each block that | |
4021 | contains an irrevocable call. Use the AND operator to | |
4022 | propagate that mark up the dominator tree. | |
4023 | ||
4024 | (c) If we reach the entry block for a possible transactional | |
4025 | clone, then the transactional clone is irrevocable, and | |
4026 | we should not create the clone after all. Push all | |
4027 | callers onto the worklist. | |
4028 | ||
4029 | (d) Place tm_irrevocable calls at the beginning of the relevant | |
4030 | blocks. Special case here is the entry block for the entire | |
4031 | transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for | |
4032 | the library to begin the region in serial mode. Decrement | |
4033 | the call count for all callees in the irrevocable region. | |
4034 | ||
4035 | (3) Create the transactional clones: | |
4036 | ||
4037 | Any tm_callee that still has a non-zero call count is cloned. | |
4038 | */ | |
4039 | ||
4040 | /* This structure is stored in the AUX field of each cgraph_node. */ | |
4041 | struct tm_ipa_cg_data | |
4042 | { | |
4043 | /* The clone of the function that got created. */ | |
4044 | struct cgraph_node *clone; | |
4045 | ||
4046 | /* The tm regions in the normal function. */ | |
4047 | struct tm_region *all_tm_regions; | |
4048 | ||
4049 | /* The blocks of the normal/clone functions that contain irrevocable | |
4050 | calls, or blocks that are post-dominated by irrevocable calls. */ | |
4051 | bitmap irrevocable_blocks_normal; | |
4052 | bitmap irrevocable_blocks_clone; | |
4053 | ||
4054 | /* The blocks of the normal function that are involved in transactions. */ | |
4055 | bitmap transaction_blocks_normal; | |
4056 | ||
4057 | /* The number of callers to the transactional clone of this function | |
4058 | from normal and transactional clones respectively. */ | |
4059 | unsigned tm_callers_normal; | |
4060 | unsigned tm_callers_clone; | |
4061 | ||
4062 | /* True if all calls to this function's transactional clone | |
4063 | are irrevocable. Also automatically true if the function | |
4064 | has no transactional clone. */ | |
4065 | bool is_irrevocable; | |
4066 | ||
4067 | /* Flags indicating the presence of this function in various queues. */ | |
4068 | bool in_callee_queue; | |
4069 | bool in_worklist; | |
4070 | ||
4071 | /* Flags indicating the kind of scan desired while in the worklist. */ | |
4072 | bool want_irr_scan_normal; | |
4073 | }; | |
4074 | ||
415d1b9a | 4075 | typedef vec<cgraph_node *> cgraph_node_queue; |
4c0315d0 | 4076 | |
4077 | /* Return the ipa data associated with NODE, allocating zeroed memory | |
3e426b86 | 4078 | if necessary. TRAVERSE_ALIASES is true if we must traverse aliases |
4079 | and set *NODE accordingly. */ | |
4c0315d0 | 4080 | |
4081 | static struct tm_ipa_cg_data * | |
3e426b86 | 4082 | get_cg_data (struct cgraph_node **node, bool traverse_aliases) |
4c0315d0 | 4083 | { |
3e426b86 | 4084 | struct tm_ipa_cg_data *d; |
4085 | ||
02774f2d | 4086 | if (traverse_aliases && (*node)->alias) |
415d1b9a | 4087 | *node = (*node)->get_alias_target (); |
3e426b86 | 4088 | |
02774f2d | 4089 | d = (struct tm_ipa_cg_data *) (*node)->aux; |
4c0315d0 | 4090 | |
4091 | if (d == NULL) | |
4092 | { | |
4093 | d = (struct tm_ipa_cg_data *) | |
4094 | obstack_alloc (&tm_obstack.obstack, sizeof (*d)); | |
02774f2d | 4095 | (*node)->aux = (void *) d; |
4c0315d0 | 4096 | memset (d, 0, sizeof (*d)); |
4097 | } | |
4098 | ||
4099 | return d; | |
4100 | } | |
4101 | ||
4102 | /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that | |
4103 | it is already present. */ | |
4104 | ||
4105 | static void | |
4106 | maybe_push_queue (struct cgraph_node *node, | |
4107 | cgraph_node_queue *queue_p, bool *in_queue_p) | |
4108 | { | |
4109 | if (!*in_queue_p) | |
4110 | { | |
4111 | *in_queue_p = true; | |
f1f41a6c | 4112 | queue_p->safe_push (node); |
4c0315d0 | 4113 | } |
4114 | } | |
4115 | ||
0cd02a19 | 4116 | /* Duplicate the basic blocks in QUEUE for use in the uninstrumented |
4117 | code path. QUEUE are the basic blocks inside the transaction | |
4118 | represented in REGION. | |
4119 | ||
4120 | Later in split_code_paths() we will add the conditional to choose | |
4121 | between the two alternatives. */ | |
4122 | ||
4123 | static void | |
4124 | ipa_uninstrument_transaction (struct tm_region *region, | |
f1f41a6c | 4125 | vec<basic_block> queue) |
0cd02a19 | 4126 | { |
42acab1c | 4127 | gimple *transaction = region->transaction_stmt; |
0cd02a19 | 4128 | basic_block transaction_bb = gimple_bb (transaction); |
f1f41a6c | 4129 | int n = queue.length (); |
0cd02a19 | 4130 | basic_block *new_bbs = XNEWVEC (basic_block, n); |
4131 | ||
d99f53b2 | 4132 | copy_bbs (queue.address (), n, new_bbs, NULL, 0, NULL, NULL, transaction_bb, |
4133 | true); | |
0cd02a19 | 4134 | edge e = make_edge (transaction_bb, new_bbs[0], EDGE_TM_UNINSTRUMENTED); |
4135 | add_phi_args_after_copy (new_bbs, n, e); | |
4136 | ||
4137 | // Now we will have a GIMPLE_ATOMIC with 3 possible edges out of it. | |
4138 | // a) EDGE_FALLTHRU into the transaction | |
4139 | // b) EDGE_TM_ABORT out of the transaction | |
4140 | // c) EDGE_TM_UNINSTRUMENTED into the uninstrumented blocks. | |
4141 | ||
4142 | free (new_bbs); | |
4143 | } | |
4144 | ||
4c0315d0 | 4145 | /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone. |
4146 | Queue all callees within block BB. */ | |
4147 | ||
4148 | static void | |
4149 | ipa_tm_scan_calls_block (cgraph_node_queue *callees_p, | |
4150 | basic_block bb, bool for_clone) | |
4151 | { | |
4152 | gimple_stmt_iterator gsi; | |
4153 | ||
4154 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4155 | { | |
42acab1c | 4156 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 4157 | if (is_gimple_call (stmt) && !is_tm_pure_call (stmt)) |
4158 | { | |
4159 | tree fndecl = gimple_call_fndecl (stmt); | |
4160 | if (fndecl) | |
4161 | { | |
4162 | struct tm_ipa_cg_data *d; | |
4163 | unsigned *pcallers; | |
4164 | struct cgraph_node *node; | |
4165 | ||
4166 | if (is_tm_ending_fndecl (fndecl)) | |
4167 | continue; | |
4168 | if (find_tm_replacement_function (fndecl)) | |
4169 | continue; | |
4170 | ||
415d1b9a | 4171 | node = cgraph_node::get (fndecl); |
4c0315d0 | 4172 | gcc_assert (node != NULL); |
3e426b86 | 4173 | d = get_cg_data (&node, true); |
4c0315d0 | 4174 | |
4175 | pcallers = (for_clone ? &d->tm_callers_clone | |
4176 | : &d->tm_callers_normal); | |
4177 | *pcallers += 1; | |
4178 | ||
4179 | maybe_push_queue (node, callees_p, &d->in_callee_queue); | |
4180 | } | |
4181 | } | |
4182 | } | |
4183 | } | |
4184 | ||
4185 | /* Scan all calls in NODE that are within a transaction region, | |
4186 | and push the resulting nodes into the callee queue. */ | |
4187 | ||
4188 | static void | |
4189 | ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d, | |
4190 | cgraph_node_queue *callees_p) | |
4191 | { | |
4192 | struct tm_region *r; | |
4193 | ||
4194 | d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack); | |
4195 | d->all_tm_regions = all_tm_regions; | |
4196 | ||
4197 | for (r = all_tm_regions; r; r = r->next) | |
4198 | { | |
f1f41a6c | 4199 | vec<basic_block> bbs; |
4c0315d0 | 4200 | basic_block bb; |
4201 | unsigned i; | |
4202 | ||
4203 | bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL, | |
4204 | d->transaction_blocks_normal, false); | |
4205 | ||
0cd02a19 | 4206 | // Generate the uninstrumented code path for this transaction. |
4207 | ipa_uninstrument_transaction (r, bbs); | |
4208 | ||
f1f41a6c | 4209 | FOR_EACH_VEC_ELT (bbs, i, bb) |
4c0315d0 | 4210 | ipa_tm_scan_calls_block (callees_p, bb, false); |
4211 | ||
f1f41a6c | 4212 | bbs.release (); |
4c0315d0 | 4213 | } |
0cd02a19 | 4214 | |
4215 | // ??? copy_bbs should maintain cgraph edges for the blocks as it is | |
4216 | // copying them, rather than forcing us to do this externally. | |
35ee1c66 | 4217 | cgraph_edge::rebuild_edges (); |
0cd02a19 | 4218 | |
4219 | // ??? In ipa_uninstrument_transaction we don't try to update dominators | |
4220 | // because copy_bbs doesn't return a VEC like iterate_fix_dominators expects. | |
4221 | // Instead, just release dominators here so update_ssa recomputes them. | |
4222 | free_dominance_info (CDI_DOMINATORS); | |
4223 | ||
4224 | // When building the uninstrumented code path, copy_bbs will have invoked | |
4225 | // create_new_def_for starting an "ssa update context". There is only one | |
4226 | // instance of this context, so resolve ssa updates before moving on to | |
4227 | // the next function. | |
4228 | update_ssa (TODO_update_ssa); | |
4c0315d0 | 4229 | } |
4230 | ||
4231 | /* Scan all calls in NODE as if this is the transactional clone, | |
4232 | and push the destinations into the callee queue. */ | |
4233 | ||
4234 | static void | |
4235 | ipa_tm_scan_calls_clone (struct cgraph_node *node, | |
4236 | cgraph_node_queue *callees_p) | |
4237 | { | |
02774f2d | 4238 | struct function *fn = DECL_STRUCT_FUNCTION (node->decl); |
4c0315d0 | 4239 | basic_block bb; |
4240 | ||
4241 | FOR_EACH_BB_FN (bb, fn) | |
4242 | ipa_tm_scan_calls_block (callees_p, bb, true); | |
4243 | } | |
4244 | ||
4245 | /* The function NODE has been detected to be irrevocable. Push all | |
4246 | of its callers onto WORKLIST for the purpose of re-scanning them. */ | |
4247 | ||
4248 | static void | |
4249 | ipa_tm_note_irrevocable (struct cgraph_node *node, | |
4250 | cgraph_node_queue *worklist_p) | |
4251 | { | |
3e426b86 | 4252 | struct tm_ipa_cg_data *d = get_cg_data (&node, true); |
4c0315d0 | 4253 | struct cgraph_edge *e; |
4254 | ||
4255 | d->is_irrevocable = true; | |
4256 | ||
4257 | for (e = node->callers; e ; e = e->next_caller) | |
4258 | { | |
4259 | basic_block bb; | |
3e426b86 | 4260 | struct cgraph_node *caller; |
4c0315d0 | 4261 | |
4262 | /* Don't examine recursive calls. */ | |
4263 | if (e->caller == node) | |
4264 | continue; | |
4265 | /* Even if we think we can go irrevocable, believe the user | |
4266 | above all. */ | |
02774f2d | 4267 | if (is_tm_safe_or_pure (e->caller->decl)) |
4c0315d0 | 4268 | continue; |
4269 | ||
3e426b86 | 4270 | caller = e->caller; |
4271 | d = get_cg_data (&caller, true); | |
4c0315d0 | 4272 | |
4273 | /* Check if the callee is in a transactional region. If so, | |
4274 | schedule the function for normal re-scan as well. */ | |
4275 | bb = gimple_bb (e->call_stmt); | |
4276 | gcc_assert (bb != NULL); | |
4277 | if (d->transaction_blocks_normal | |
4278 | && bitmap_bit_p (d->transaction_blocks_normal, bb->index)) | |
4279 | d->want_irr_scan_normal = true; | |
4280 | ||
3e426b86 | 4281 | maybe_push_queue (caller, worklist_p, &d->in_worklist); |
4c0315d0 | 4282 | } |
4283 | } | |
4284 | ||
4285 | /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement | |
4286 | within the block is irrevocable. */ | |
4287 | ||
4288 | static bool | |
4289 | ipa_tm_scan_irr_block (basic_block bb) | |
4290 | { | |
4291 | gimple_stmt_iterator gsi; | |
4292 | tree fn; | |
4293 | ||
4294 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4295 | { | |
42acab1c | 4296 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 4297 | switch (gimple_code (stmt)) |
4298 | { | |
e153bd50 | 4299 | case GIMPLE_ASSIGN: |
4300 | if (gimple_assign_single_p (stmt)) | |
4301 | { | |
4302 | tree lhs = gimple_assign_lhs (stmt); | |
4303 | tree rhs = gimple_assign_rhs1 (stmt); | |
639b72bd | 4304 | if (volatile_lvalue_p (lhs) || volatile_lvalue_p (rhs)) |
e153bd50 | 4305 | return true; |
4306 | } | |
4307 | break; | |
4308 | ||
4c0315d0 | 4309 | case GIMPLE_CALL: |
e153bd50 | 4310 | { |
4311 | tree lhs = gimple_call_lhs (stmt); | |
639b72bd | 4312 | if (lhs && volatile_lvalue_p (lhs)) |
e153bd50 | 4313 | return true; |
4c0315d0 | 4314 | |
e153bd50 | 4315 | if (is_tm_pure_call (stmt)) |
4316 | break; | |
4c0315d0 | 4317 | |
e153bd50 | 4318 | fn = gimple_call_fn (stmt); |
4c0315d0 | 4319 | |
e153bd50 | 4320 | /* Functions with the attribute are by definition irrevocable. */ |
4321 | if (is_tm_irrevocable (fn)) | |
4322 | return true; | |
4c0315d0 | 4323 | |
e153bd50 | 4324 | /* For direct function calls, go ahead and check for replacement |
4325 | functions, or transitive irrevocable functions. For indirect | |
4326 | functions, we'll ask the runtime. */ | |
4327 | if (TREE_CODE (fn) == ADDR_EXPR) | |
4328 | { | |
4329 | struct tm_ipa_cg_data *d; | |
4330 | struct cgraph_node *node; | |
4c0315d0 | 4331 | |
e153bd50 | 4332 | fn = TREE_OPERAND (fn, 0); |
4333 | if (is_tm_ending_fndecl (fn)) | |
4334 | break; | |
4335 | if (find_tm_replacement_function (fn)) | |
4336 | break; | |
40879ac6 | 4337 | |
415d1b9a | 4338 | node = cgraph_node::get (fn); |
e153bd50 | 4339 | d = get_cg_data (&node, true); |
4340 | ||
4341 | /* Return true if irrevocable, but above all, believe | |
4342 | the user. */ | |
4343 | if (d->is_irrevocable | |
4344 | && !is_tm_safe_or_pure (fn)) | |
4345 | return true; | |
4346 | } | |
4347 | break; | |
4348 | } | |
4c0315d0 | 4349 | |
4350 | case GIMPLE_ASM: | |
4351 | /* ??? The Approved Method of indicating that an inline | |
4352 | assembly statement is not relevant to the transaction | |
4353 | is to wrap it in a __tm_waiver block. This is not | |
4354 | yet implemented, so we can't check for it. */ | |
43156aa3 | 4355 | if (is_tm_safe (current_function_decl)) |
4356 | { | |
4357 | tree t = build1 (NOP_EXPR, void_type_node, size_zero_node); | |
4358 | SET_EXPR_LOCATION (t, gimple_location (stmt)); | |
43156aa3 | 4359 | error ("%Kasm not allowed in %<transaction_safe%> function", t); |
4360 | } | |
4c0315d0 | 4361 | return true; |
4362 | ||
4363 | default: | |
4364 | break; | |
4365 | } | |
4366 | } | |
4367 | ||
4368 | return false; | |
4369 | } | |
4370 | ||
4371 | /* For each of the blocks seeded witin PQUEUE, walk the CFG looking | |
4372 | for new irrevocable blocks, marking them in NEW_IRR. Don't bother | |
4373 | scanning past OLD_IRR or EXIT_BLOCKS. */ | |
4374 | ||
4375 | static bool | |
f1f41a6c | 4376 | ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr, |
4c0315d0 | 4377 | bitmap old_irr, bitmap exit_blocks) |
4378 | { | |
4379 | bool any_new_irr = false; | |
4380 | edge e; | |
4381 | edge_iterator ei; | |
4382 | bitmap visited_blocks = BITMAP_ALLOC (NULL); | |
4383 | ||
4384 | do | |
4385 | { | |
f1f41a6c | 4386 | basic_block bb = pqueue->pop (); |
4c0315d0 | 4387 | |
4388 | /* Don't re-scan blocks we know already are irrevocable. */ | |
4389 | if (old_irr && bitmap_bit_p (old_irr, bb->index)) | |
4390 | continue; | |
4391 | ||
4392 | if (ipa_tm_scan_irr_block (bb)) | |
4393 | { | |
4394 | bitmap_set_bit (new_irr, bb->index); | |
4395 | any_new_irr = true; | |
4396 | } | |
4397 | else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index)) | |
4398 | { | |
4399 | FOR_EACH_EDGE (e, ei, bb->succs) | |
4400 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
4401 | { | |
4402 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 4403 | pqueue->safe_push (e->dest); |
4c0315d0 | 4404 | } |
4405 | } | |
4406 | } | |
f1f41a6c | 4407 | while (!pqueue->is_empty ()); |
4c0315d0 | 4408 | |
4409 | BITMAP_FREE (visited_blocks); | |
4410 | ||
4411 | return any_new_irr; | |
4412 | } | |
4413 | ||
4414 | /* Propagate the irrevocable property both up and down the dominator tree. | |
4415 | BB is the current block being scanned; EXIT_BLOCKS are the edges of the | |
4416 | TM regions; OLD_IRR are the results of a previous scan of the dominator | |
4417 | tree which has been fully propagated; NEW_IRR is the set of new blocks | |
4418 | which are gaining the irrevocable property during the current scan. */ | |
4419 | ||
4420 | static void | |
4421 | ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr, | |
4422 | bitmap old_irr, bitmap exit_blocks) | |
4423 | { | |
f1f41a6c | 4424 | vec<basic_block> bbs; |
4c0315d0 | 4425 | bitmap all_region_blocks; |
4426 | ||
4427 | /* If this block is in the old set, no need to rescan. */ | |
4428 | if (old_irr && bitmap_bit_p (old_irr, entry_block->index)) | |
4429 | return; | |
4430 | ||
4431 | all_region_blocks = BITMAP_ALLOC (&tm_obstack); | |
4432 | bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL, | |
4433 | all_region_blocks, false); | |
4434 | do | |
4435 | { | |
f1f41a6c | 4436 | basic_block bb = bbs.pop (); |
4c0315d0 | 4437 | bool this_irr = bitmap_bit_p (new_irr, bb->index); |
4438 | bool all_son_irr = false; | |
4439 | edge_iterator ei; | |
4440 | edge e; | |
4441 | ||
4442 | /* Propagate up. If my children are, I am too, but we must have | |
4443 | at least one child that is. */ | |
4444 | if (!this_irr) | |
4445 | { | |
4446 | FOR_EACH_EDGE (e, ei, bb->succs) | |
4447 | { | |
4448 | if (!bitmap_bit_p (new_irr, e->dest->index)) | |
4449 | { | |
4450 | all_son_irr = false; | |
4451 | break; | |
4452 | } | |
4453 | else | |
4454 | all_son_irr = true; | |
4455 | } | |
4456 | if (all_son_irr) | |
4457 | { | |
4458 | /* Add block to new_irr if it hasn't already been processed. */ | |
4459 | if (!old_irr || !bitmap_bit_p (old_irr, bb->index)) | |
4460 | { | |
4461 | bitmap_set_bit (new_irr, bb->index); | |
4462 | this_irr = true; | |
4463 | } | |
4464 | } | |
4465 | } | |
4466 | ||
4467 | /* Propagate down to everyone we immediately dominate. */ | |
4468 | if (this_irr) | |
4469 | { | |
4470 | basic_block son; | |
4471 | for (son = first_dom_son (CDI_DOMINATORS, bb); | |
4472 | son; | |
4473 | son = next_dom_son (CDI_DOMINATORS, son)) | |
4474 | { | |
4475 | /* Make sure block is actually in a TM region, and it | |
4476 | isn't already in old_irr. */ | |
4477 | if ((!old_irr || !bitmap_bit_p (old_irr, son->index)) | |
4478 | && bitmap_bit_p (all_region_blocks, son->index)) | |
4479 | bitmap_set_bit (new_irr, son->index); | |
4480 | } | |
4481 | } | |
4482 | } | |
f1f41a6c | 4483 | while (!bbs.is_empty ()); |
4c0315d0 | 4484 | |
4485 | BITMAP_FREE (all_region_blocks); | |
f1f41a6c | 4486 | bbs.release (); |
4c0315d0 | 4487 | } |
4488 | ||
4489 | static void | |
4490 | ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone) | |
4491 | { | |
4492 | gimple_stmt_iterator gsi; | |
4493 | ||
4494 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4495 | { | |
42acab1c | 4496 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 4497 | if (is_gimple_call (stmt) && !is_tm_pure_call (stmt)) |
4498 | { | |
4499 | tree fndecl = gimple_call_fndecl (stmt); | |
4500 | if (fndecl) | |
4501 | { | |
4502 | struct tm_ipa_cg_data *d; | |
4503 | unsigned *pcallers; | |
3e426b86 | 4504 | struct cgraph_node *tnode; |
4c0315d0 | 4505 | |
4506 | if (is_tm_ending_fndecl (fndecl)) | |
4507 | continue; | |
4508 | if (find_tm_replacement_function (fndecl)) | |
4509 | continue; | |
4510 | ||
415d1b9a | 4511 | tnode = cgraph_node::get (fndecl); |
3e426b86 | 4512 | d = get_cg_data (&tnode, true); |
4513 | ||
4c0315d0 | 4514 | pcallers = (for_clone ? &d->tm_callers_clone |
4515 | : &d->tm_callers_normal); | |
4516 | ||
4517 | gcc_assert (*pcallers > 0); | |
4518 | *pcallers -= 1; | |
4519 | } | |
4520 | } | |
4521 | } | |
4522 | } | |
4523 | ||
4524 | /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions, | |
4525 | as well as other irrevocable actions such as inline assembly. Mark all | |
4526 | such blocks as irrevocable and decrement the number of calls to | |
4527 | transactional clones. Return true if, for the transactional clone, the | |
4528 | entire function is irrevocable. */ | |
4529 | ||
4530 | static bool | |
4531 | ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone) | |
4532 | { | |
4533 | struct tm_ipa_cg_data *d; | |
4534 | bitmap new_irr, old_irr; | |
4c0315d0 | 4535 | bool ret = false; |
4536 | ||
40879ac6 | 4537 | /* Builtin operators (operator new, and such). */ |
02774f2d | 4538 | if (DECL_STRUCT_FUNCTION (node->decl) == NULL |
4539 | || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL) | |
40879ac6 | 4540 | return false; |
4541 | ||
02774f2d | 4542 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
4c0315d0 | 4543 | calculate_dominance_info (CDI_DOMINATORS); |
4544 | ||
3e426b86 | 4545 | d = get_cg_data (&node, true); |
4997014d | 4546 | auto_vec<basic_block, 10> queue; |
4c0315d0 | 4547 | new_irr = BITMAP_ALLOC (&tm_obstack); |
4548 | ||
4549 | /* Scan each tm region, propagating irrevocable status through the tree. */ | |
4550 | if (for_clone) | |
4551 | { | |
4552 | old_irr = d->irrevocable_blocks_clone; | |
34154e27 | 4553 | queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))); |
4c0315d0 | 4554 | if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL)) |
4555 | { | |
34154e27 | 4556 | ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), |
4557 | new_irr, | |
4c0315d0 | 4558 | old_irr, NULL); |
34154e27 | 4559 | ret = bitmap_bit_p (new_irr, |
4560 | single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index); | |
4c0315d0 | 4561 | } |
4562 | } | |
4563 | else | |
4564 | { | |
4565 | struct tm_region *region; | |
4566 | ||
4567 | old_irr = d->irrevocable_blocks_normal; | |
4568 | for (region = d->all_tm_regions; region; region = region->next) | |
4569 | { | |
f1f41a6c | 4570 | queue.quick_push (region->entry_block); |
4c0315d0 | 4571 | if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, |
4572 | region->exit_blocks)) | |
4573 | ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr, | |
4574 | region->exit_blocks); | |
4575 | } | |
4576 | } | |
4577 | ||
4578 | /* If we found any new irrevocable blocks, reduce the call count for | |
4579 | transactional clones within the irrevocable blocks. Save the new | |
4580 | set of irrevocable blocks for next time. */ | |
4581 | if (!bitmap_empty_p (new_irr)) | |
4582 | { | |
4583 | bitmap_iterator bmi; | |
4584 | unsigned i; | |
4585 | ||
4586 | EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi) | |
f5a6b05f | 4587 | ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun, i), |
4588 | for_clone); | |
4c0315d0 | 4589 | |
4590 | if (old_irr) | |
4591 | { | |
4592 | bitmap_ior_into (old_irr, new_irr); | |
4593 | BITMAP_FREE (new_irr); | |
4594 | } | |
4595 | else if (for_clone) | |
4596 | d->irrevocable_blocks_clone = new_irr; | |
4597 | else | |
4598 | d->irrevocable_blocks_normal = new_irr; | |
4599 | ||
4600 | if (dump_file && new_irr) | |
4601 | { | |
4602 | const char *dname; | |
4603 | bitmap_iterator bmi; | |
4604 | unsigned i; | |
4605 | ||
4606 | dname = lang_hooks.decl_printable_name (current_function_decl, 2); | |
4607 | EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi) | |
4608 | fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i); | |
4609 | } | |
4610 | } | |
4611 | else | |
4612 | BITMAP_FREE (new_irr); | |
4613 | ||
4c0315d0 | 4614 | pop_cfun (); |
4c0315d0 | 4615 | |
4616 | return ret; | |
4617 | } | |
4618 | ||
4619 | /* Return true if, for the transactional clone of NODE, any call | |
4620 | may enter irrevocable mode. */ | |
4621 | ||
4622 | static bool | |
4623 | ipa_tm_mayenterirr_function (struct cgraph_node *node) | |
4624 | { | |
3e426b86 | 4625 | struct tm_ipa_cg_data *d; |
4626 | tree decl; | |
4627 | unsigned flags; | |
4628 | ||
4629 | d = get_cg_data (&node, true); | |
02774f2d | 4630 | decl = node->decl; |
3e426b86 | 4631 | flags = flags_from_decl_or_type (decl); |
4c0315d0 | 4632 | |
4633 | /* Handle some TM builtins. Ordinarily these aren't actually generated | |
4634 | at this point, but handling these functions when written in by the | |
4635 | user makes it easier to build unit tests. */ | |
4636 | if (flags & ECF_TM_BUILTIN) | |
4637 | return false; | |
4638 | ||
4639 | /* Filter out all functions that are marked. */ | |
4640 | if (flags & ECF_TM_PURE) | |
4641 | return false; | |
4642 | if (is_tm_safe (decl)) | |
4643 | return false; | |
4644 | if (is_tm_irrevocable (decl)) | |
4645 | return true; | |
4646 | if (is_tm_callable (decl)) | |
4647 | return true; | |
4648 | if (find_tm_replacement_function (decl)) | |
4649 | return true; | |
4650 | ||
4651 | /* If we aren't seeing the final version of the function we don't | |
4652 | know what it will contain at runtime. */ | |
415d1b9a | 4653 | if (node->get_availability () < AVAIL_AVAILABLE) |
4c0315d0 | 4654 | return true; |
4655 | ||
4656 | /* If the function must go irrevocable, then of course true. */ | |
4657 | if (d->is_irrevocable) | |
4658 | return true; | |
4659 | ||
4660 | /* If there are any blocks marked irrevocable, then the function | |
4661 | as a whole may enter irrevocable. */ | |
4662 | if (d->irrevocable_blocks_clone) | |
4663 | return true; | |
4664 | ||
4665 | /* We may have previously marked this function as tm_may_enter_irr; | |
4666 | see pass_diagnose_tm_blocks. */ | |
4667 | if (node->local.tm_may_enter_irr) | |
4668 | return true; | |
4669 | ||
4670 | /* Recurse on the main body for aliases. In general, this will | |
4671 | result in one of the bits above being set so that we will not | |
4672 | have to recurse next time. */ | |
02774f2d | 4673 | if (node->alias) |
415d1b9a | 4674 | return ipa_tm_mayenterirr_function (cgraph_node::get (node->thunk.alias)); |
4c0315d0 | 4675 | |
4676 | /* What remains is unmarked local functions without items that force | |
4677 | the function to go irrevocable. */ | |
4678 | return false; | |
4679 | } | |
4680 | ||
4681 | /* Diagnose calls from transaction_safe functions to unmarked | |
4682 | functions that are determined to not be safe. */ | |
4683 | ||
4684 | static void | |
4685 | ipa_tm_diagnose_tm_safe (struct cgraph_node *node) | |
4686 | { | |
4687 | struct cgraph_edge *e; | |
4688 | ||
4689 | for (e = node->callees; e ; e = e->next_callee) | |
02774f2d | 4690 | if (!is_tm_callable (e->callee->decl) |
4c0315d0 | 4691 | && e->callee->local.tm_may_enter_irr) |
4692 | error_at (gimple_location (e->call_stmt), | |
4693 | "unsafe function call %qD within " | |
02774f2d | 4694 | "%<transaction_safe%> function", e->callee->decl); |
4c0315d0 | 4695 | } |
4696 | ||
4697 | /* Diagnose call from atomic transactions to unmarked functions | |
4698 | that are determined to not be safe. */ | |
4699 | ||
4700 | static void | |
4701 | ipa_tm_diagnose_transaction (struct cgraph_node *node, | |
4702 | struct tm_region *all_tm_regions) | |
4703 | { | |
4704 | struct tm_region *r; | |
4705 | ||
4706 | for (r = all_tm_regions; r ; r = r->next) | |
1a91d914 | 4707 | if (gimple_transaction_subcode (r->get_transaction_stmt ()) |
4708 | & GTMA_IS_RELAXED) | |
4c0315d0 | 4709 | { |
4710 | /* Atomic transactions can be nested inside relaxed. */ | |
4711 | if (r->inner) | |
4712 | ipa_tm_diagnose_transaction (node, r->inner); | |
4713 | } | |
4714 | else | |
4715 | { | |
f1f41a6c | 4716 | vec<basic_block> bbs; |
4c0315d0 | 4717 | gimple_stmt_iterator gsi; |
4718 | basic_block bb; | |
4719 | size_t i; | |
4720 | ||
4721 | bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, | |
4722 | r->irr_blocks, NULL, false); | |
4723 | ||
f1f41a6c | 4724 | for (i = 0; bbs.iterate (i, &bb); ++i) |
4c0315d0 | 4725 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
4726 | { | |
42acab1c | 4727 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 4728 | tree fndecl; |
4729 | ||
4730 | if (gimple_code (stmt) == GIMPLE_ASM) | |
4731 | { | |
4732 | error_at (gimple_location (stmt), | |
4733 | "asm not allowed in atomic transaction"); | |
4734 | continue; | |
4735 | } | |
4736 | ||
4737 | if (!is_gimple_call (stmt)) | |
4738 | continue; | |
4739 | fndecl = gimple_call_fndecl (stmt); | |
4740 | ||
4741 | /* Indirect function calls have been diagnosed already. */ | |
4742 | if (!fndecl) | |
4743 | continue; | |
4744 | ||
4745 | /* Stop at the end of the transaction. */ | |
4746 | if (is_tm_ending_fndecl (fndecl)) | |
4747 | { | |
4748 | if (bitmap_bit_p (r->exit_blocks, bb->index)) | |
4749 | break; | |
4750 | continue; | |
4751 | } | |
4752 | ||
4753 | /* Marked functions have been diagnosed already. */ | |
4754 | if (is_tm_pure_call (stmt)) | |
4755 | continue; | |
4756 | if (is_tm_callable (fndecl)) | |
4757 | continue; | |
4758 | ||
35ee1c66 | 4759 | if (cgraph_node::local_info (fndecl)->tm_may_enter_irr) |
4c0315d0 | 4760 | error_at (gimple_location (stmt), |
4761 | "unsafe function call %qD within " | |
4762 | "atomic transaction", fndecl); | |
4763 | } | |
4764 | ||
f1f41a6c | 4765 | bbs.release (); |
4c0315d0 | 4766 | } |
4767 | } | |
4768 | ||
4769 | /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in | |
4770 | OLD_DECL. The returned value is a freshly malloced pointer that | |
4771 | should be freed by the caller. */ | |
4772 | ||
4773 | static tree | |
4774 | tm_mangle (tree old_asm_id) | |
4775 | { | |
4776 | const char *old_asm_name; | |
4777 | char *tm_name; | |
4778 | void *alloc = NULL; | |
4779 | struct demangle_component *dc; | |
4780 | tree new_asm_id; | |
4781 | ||
4782 | /* Determine if the symbol is already a valid C++ mangled name. Do this | |
4783 | even for C, which might be interfacing with C++ code via appropriately | |
4784 | ugly identifiers. */ | |
4785 | /* ??? We could probably do just as well checking for "_Z" and be done. */ | |
4786 | old_asm_name = IDENTIFIER_POINTER (old_asm_id); | |
4787 | dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc); | |
4788 | ||
4789 | if (dc == NULL) | |
4790 | { | |
4791 | char length[8]; | |
4792 | ||
4793 | do_unencoded: | |
4794 | sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id)); | |
4795 | tm_name = concat ("_ZGTt", length, old_asm_name, NULL); | |
4796 | } | |
4797 | else | |
4798 | { | |
4799 | old_asm_name += 2; /* Skip _Z */ | |
4800 | ||
4801 | switch (dc->type) | |
4802 | { | |
4803 | case DEMANGLE_COMPONENT_TRANSACTION_CLONE: | |
4804 | case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE: | |
4805 | /* Don't play silly games, you! */ | |
4806 | goto do_unencoded; | |
4807 | ||
4808 | case DEMANGLE_COMPONENT_HIDDEN_ALIAS: | |
4809 | /* I'd really like to know if we can ever be passed one of | |
4810 | these from the C++ front end. The Logical Thing would | |
4811 | seem that hidden-alias should be outer-most, so that we | |
4812 | get hidden-alias of a transaction-clone and not vice-versa. */ | |
4813 | old_asm_name += 2; | |
4814 | break; | |
4815 | ||
4816 | default: | |
4817 | break; | |
4818 | } | |
4819 | ||
4820 | tm_name = concat ("_ZGTt", old_asm_name, NULL); | |
4821 | } | |
4822 | free (alloc); | |
4823 | ||
4824 | new_asm_id = get_identifier (tm_name); | |
4825 | free (tm_name); | |
4826 | ||
4827 | return new_asm_id; | |
4828 | } | |
4829 | ||
4830 | static inline void | |
8efa224a | 4831 | ipa_tm_mark_force_output_node (struct cgraph_node *node) |
4c0315d0 | 4832 | { |
415d1b9a | 4833 | node->mark_force_output (); |
02774f2d | 4834 | node->analyzed = true; |
4c0315d0 | 4835 | } |
4836 | ||
6a1c0403 | 4837 | static inline void |
4838 | ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node) | |
4839 | { | |
02774f2d | 4840 | node->forced_by_abi = true; |
4841 | node->analyzed = true; | |
6a1c0403 | 4842 | } |
4843 | ||
4c0315d0 | 4844 | /* Callback data for ipa_tm_create_version_alias. */ |
4845 | struct create_version_alias_info | |
4846 | { | |
4847 | struct cgraph_node *old_node; | |
4848 | tree new_decl; | |
4849 | }; | |
4850 | ||
caf29404 | 4851 | /* A subroutine of ipa_tm_create_version, called via |
4c0315d0 | 4852 | cgraph_for_node_and_aliases. Create new tm clones for each of |
4853 | the existing aliases. */ | |
4854 | static bool | |
4855 | ipa_tm_create_version_alias (struct cgraph_node *node, void *data) | |
4856 | { | |
4857 | struct create_version_alias_info *info | |
4858 | = (struct create_version_alias_info *)data; | |
4859 | tree old_decl, new_decl, tm_name; | |
4860 | struct cgraph_node *new_node; | |
4861 | ||
02774f2d | 4862 | if (!node->cpp_implicit_alias) |
4c0315d0 | 4863 | return false; |
4864 | ||
02774f2d | 4865 | old_decl = node->decl; |
4c0315d0 | 4866 | tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl)); |
4867 | new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl), | |
4868 | TREE_CODE (old_decl), tm_name, | |
4869 | TREE_TYPE (old_decl)); | |
4870 | ||
4871 | SET_DECL_ASSEMBLER_NAME (new_decl, tm_name); | |
4872 | SET_DECL_RTL (new_decl, NULL); | |
4873 | ||
4874 | /* Based loosely on C++'s make_alias_for(). */ | |
4875 | TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl); | |
f7c22b34 | 4876 | DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl); |
4877 | DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl); | |
4c0315d0 | 4878 | TREE_READONLY (new_decl) = TREE_READONLY (old_decl); |
4879 | DECL_EXTERNAL (new_decl) = 0; | |
4880 | DECL_ARTIFICIAL (new_decl) = 1; | |
4881 | TREE_ADDRESSABLE (new_decl) = 1; | |
4882 | TREE_USED (new_decl) = 1; | |
4883 | TREE_SYMBOL_REFERENCED (tm_name) = 1; | |
4884 | ||
4885 | /* Perform the same remapping to the comdat group. */ | |
260d0713 | 4886 | if (DECL_ONE_ONLY (new_decl)) |
97221fd7 | 4887 | varpool_node::get (new_decl)->set_comdat_group |
4888 | (tm_mangle (decl_comdat_group_id (old_decl))); | |
4c0315d0 | 4889 | |
415d1b9a | 4890 | new_node = cgraph_node::create_same_body_alias (new_decl, info->new_decl); |
4c0315d0 | 4891 | new_node->tm_clone = true; |
02774f2d | 4892 | new_node->externally_visible = info->old_node->externally_visible; |
6b722052 | 4893 | new_node->no_reorder = info->old_node->no_reorder; |
3e426b86 | 4894 | /* ?? Do not traverse aliases here. */ |
4895 | get_cg_data (&node, false)->clone = new_node; | |
4c0315d0 | 4896 | |
4897 | record_tm_clone_pair (old_decl, new_decl); | |
4898 | ||
02774f2d | 4899 | if (info->old_node->force_output |
51ce5652 | 4900 | || info->old_node->ref_list.first_referring ()) |
8efa224a | 4901 | ipa_tm_mark_force_output_node (new_node); |
02774f2d | 4902 | if (info->old_node->forced_by_abi) |
6a1c0403 | 4903 | ipa_tm_mark_forced_by_abi_node (new_node); |
4c0315d0 | 4904 | return false; |
4905 | } | |
4906 | ||
4907 | /* Create a copy of the function (possibly declaration only) of OLD_NODE, | |
4908 | appropriate for the transactional clone. */ | |
4909 | ||
4910 | static void | |
4911 | ipa_tm_create_version (struct cgraph_node *old_node) | |
4912 | { | |
4913 | tree new_decl, old_decl, tm_name; | |
4914 | struct cgraph_node *new_node; | |
4915 | ||
02774f2d | 4916 | old_decl = old_node->decl; |
4c0315d0 | 4917 | new_decl = copy_node (old_decl); |
4918 | ||
4919 | /* DECL_ASSEMBLER_NAME needs to be set before we call | |
4920 | cgraph_copy_node_for_versioning below, because cgraph_node will | |
4921 | fill the assembler_name_hash. */ | |
4922 | tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl)); | |
4923 | SET_DECL_ASSEMBLER_NAME (new_decl, tm_name); | |
4924 | SET_DECL_RTL (new_decl, NULL); | |
4925 | TREE_SYMBOL_REFERENCED (tm_name) = 1; | |
4926 | ||
4927 | /* Perform the same remapping to the comdat group. */ | |
260d0713 | 4928 | if (DECL_ONE_ONLY (new_decl)) |
97221fd7 | 4929 | varpool_node::get (new_decl)->set_comdat_group |
4930 | (tm_mangle (DECL_COMDAT_GROUP (old_decl))); | |
4c0315d0 | 4931 | |
2cf4c3b3 | 4932 | gcc_assert (!old_node->ipa_transforms_to_apply.exists ()); |
415d1b9a | 4933 | new_node = old_node->create_version_clone (new_decl, vNULL, NULL); |
281dea26 | 4934 | new_node->local.local = false; |
02774f2d | 4935 | new_node->externally_visible = old_node->externally_visible; |
4c0315d0 | 4936 | new_node->lowered = true; |
4937 | new_node->tm_clone = 1; | |
033ab5d3 | 4938 | if (!old_node->implicit_section) |
4939 | new_node->set_section (old_node->get_section ()); | |
3e426b86 | 4940 | get_cg_data (&old_node, true)->clone = new_node; |
4c0315d0 | 4941 | |
415d1b9a | 4942 | if (old_node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 4943 | { |
4944 | /* Remap extern inline to static inline. */ | |
4945 | /* ??? Is it worth trying to use make_decl_one_only? */ | |
4946 | if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl)) | |
4947 | { | |
4948 | DECL_EXTERNAL (new_decl) = 0; | |
4949 | TREE_PUBLIC (new_decl) = 0; | |
7a571443 | 4950 | DECL_WEAK (new_decl) = 0; |
4c0315d0 | 4951 | } |
4952 | ||
f1f41a6c | 4953 | tree_function_versioning (old_decl, new_decl, |
4954 | NULL, false, NULL, | |
4955 | false, NULL, NULL); | |
4c0315d0 | 4956 | } |
4957 | ||
4958 | record_tm_clone_pair (old_decl, new_decl); | |
4959 | ||
35ee1c66 | 4960 | symtab->call_cgraph_insertion_hooks (new_node); |
02774f2d | 4961 | if (old_node->force_output |
51ce5652 | 4962 | || old_node->ref_list.first_referring ()) |
8efa224a | 4963 | ipa_tm_mark_force_output_node (new_node); |
02774f2d | 4964 | if (old_node->forced_by_abi) |
6a1c0403 | 4965 | ipa_tm_mark_forced_by_abi_node (new_node); |
4c0315d0 | 4966 | |
4967 | /* Do the same thing, but for any aliases of the original node. */ | |
4968 | { | |
4969 | struct create_version_alias_info data; | |
4970 | data.old_node = old_node; | |
4971 | data.new_decl = new_decl; | |
415d1b9a | 4972 | old_node->call_for_symbol_thunks_and_aliases (ipa_tm_create_version_alias, |
4973 | &data, true); | |
4c0315d0 | 4974 | } |
4975 | } | |
4976 | ||
4977 | /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */ | |
4978 | ||
4979 | static void | |
4980 | ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region, | |
4981 | basic_block bb) | |
4982 | { | |
4983 | gimple_stmt_iterator gsi; | |
1a91d914 | 4984 | gcall *g; |
4c0315d0 | 4985 | |
4986 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
4987 | ||
4988 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE), | |
4989 | 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE)); | |
4990 | ||
4991 | split_block_after_labels (bb); | |
4992 | gsi = gsi_after_labels (bb); | |
4993 | gsi_insert_before (&gsi, g, GSI_SAME_STMT); | |
4994 | ||
415d1b9a | 4995 | node->create_edge (cgraph_node::get_create |
4996 | (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)), | |
4997 | g, 0, | |
4998 | compute_call_stmt_bb_frequency (node->decl, | |
4999 | gimple_bb (g))); | |
4c0315d0 | 5000 | } |
5001 | ||
5002 | /* Construct a call to TM_GETTMCLONE and insert it before GSI. */ | |
5003 | ||
5004 | static bool | |
5005 | ipa_tm_insert_gettmclone_call (struct cgraph_node *node, | |
5006 | struct tm_region *region, | |
1a91d914 | 5007 | gimple_stmt_iterator *gsi, gcall *stmt) |
4c0315d0 | 5008 | { |
5009 | tree gettm_fn, ret, old_fn, callfn; | |
1a91d914 | 5010 | gcall *g; |
5011 | gassign *g2; | |
4c0315d0 | 5012 | bool safe; |
5013 | ||
5014 | old_fn = gimple_call_fn (stmt); | |
5015 | ||
5016 | if (TREE_CODE (old_fn) == ADDR_EXPR) | |
5017 | { | |
5018 | tree fndecl = TREE_OPERAND (old_fn, 0); | |
5019 | tree clone = get_tm_clone_pair (fndecl); | |
5020 | ||
5021 | /* By transforming the call into a TM_GETTMCLONE, we are | |
5022 | technically taking the address of the original function and | |
5023 | its clone. Explain this so inlining will know this function | |
5024 | is needed. */ | |
415d1b9a | 5025 | cgraph_node::get (fndecl)->mark_address_taken () ; |
4c0315d0 | 5026 | if (clone) |
415d1b9a | 5027 | cgraph_node::get (clone)->mark_address_taken (); |
4c0315d0 | 5028 | } |
5029 | ||
5030 | safe = is_tm_safe (TREE_TYPE (old_fn)); | |
5031 | gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE | |
5032 | : BUILT_IN_TM_GETTMCLONE_IRR); | |
f9e245b2 | 5033 | ret = create_tmp_var (ptr_type_node); |
4c0315d0 | 5034 | |
5035 | if (!safe) | |
5036 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
5037 | ||
5038 | /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */ | |
5039 | if (TREE_CODE (old_fn) == OBJ_TYPE_REF) | |
5040 | old_fn = OBJ_TYPE_REF_EXPR (old_fn); | |
5041 | ||
5042 | g = gimple_build_call (gettm_fn, 1, old_fn); | |
5043 | ret = make_ssa_name (ret, g); | |
5044 | gimple_call_set_lhs (g, ret); | |
5045 | ||
5046 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
5047 | ||
415d1b9a | 5048 | node->create_edge (cgraph_node::get_create (gettm_fn), g, 0, |
5049 | compute_call_stmt_bb_frequency (node->decl, | |
5050 | gimple_bb (g))); | |
4c0315d0 | 5051 | |
5052 | /* Cast return value from tm_gettmclone* into appropriate function | |
5053 | pointer. */ | |
f9e245b2 | 5054 | callfn = create_tmp_var (TREE_TYPE (old_fn)); |
4c0315d0 | 5055 | g2 = gimple_build_assign (callfn, |
5056 | fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret)); | |
5057 | callfn = make_ssa_name (callfn, g2); | |
5058 | gimple_assign_set_lhs (g2, callfn); | |
5059 | gsi_insert_before (gsi, g2, GSI_SAME_STMT); | |
5060 | ||
5061 | /* ??? This is a hack to preserve the NOTHROW bit on the call, | |
5062 | which we would have derived from the decl. Failure to save | |
5063 | this bit means we might have to split the basic block. */ | |
5064 | if (gimple_call_nothrow_p (stmt)) | |
5065 | gimple_call_set_nothrow (stmt, true); | |
5066 | ||
5067 | gimple_call_set_fn (stmt, callfn); | |
5068 | ||
5069 | /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS | |
5070 | for a call statement. Fix it. */ | |
5071 | { | |
5072 | tree lhs = gimple_call_lhs (stmt); | |
5073 | tree rettype = TREE_TYPE (gimple_call_fntype (stmt)); | |
5074 | if (lhs | |
5075 | && !useless_type_conversion_p (TREE_TYPE (lhs), rettype)) | |
5076 | { | |
5077 | tree temp; | |
5078 | ||
f9e245b2 | 5079 | temp = create_tmp_reg (rettype); |
4c0315d0 | 5080 | gimple_call_set_lhs (stmt, temp); |
5081 | ||
5082 | g2 = gimple_build_assign (lhs, | |
5083 | fold_build1 (VIEW_CONVERT_EXPR, | |
5084 | TREE_TYPE (lhs), temp)); | |
5085 | gsi_insert_after (gsi, g2, GSI_SAME_STMT); | |
5086 | } | |
5087 | } | |
5088 | ||
5089 | update_stmt (stmt); | |
d8b5abdb | 5090 | cgraph_edge *e = cgraph_node::get (current_function_decl)->get_edge (stmt); |
5091 | if (e && e->indirect_info) | |
5092 | e->indirect_info->polymorphic = false; | |
4c0315d0 | 5093 | |
5094 | return true; | |
5095 | } | |
5096 | ||
5097 | /* Helper function for ipa_tm_transform_calls*. Given a call | |
5098 | statement in GSI which resides inside transaction REGION, redirect | |
5099 | the call to either its wrapper function, or its clone. */ | |
5100 | ||
5101 | static void | |
5102 | ipa_tm_transform_calls_redirect (struct cgraph_node *node, | |
5103 | struct tm_region *region, | |
5104 | gimple_stmt_iterator *gsi, | |
5105 | bool *need_ssa_rename_p) | |
5106 | { | |
1a91d914 | 5107 | gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi)); |
4c0315d0 | 5108 | struct cgraph_node *new_node; |
415d1b9a | 5109 | struct cgraph_edge *e = node->get_edge (stmt); |
4c0315d0 | 5110 | tree fndecl = gimple_call_fndecl (stmt); |
5111 | ||
5112 | /* For indirect calls, pass the address through the runtime. */ | |
5113 | if (fndecl == NULL) | |
5114 | { | |
5115 | *need_ssa_rename_p |= | |
5116 | ipa_tm_insert_gettmclone_call (node, region, gsi, stmt); | |
5117 | return; | |
5118 | } | |
5119 | ||
5120 | /* Handle some TM builtins. Ordinarily these aren't actually generated | |
5121 | at this point, but handling these functions when written in by the | |
5122 | user makes it easier to build unit tests. */ | |
5123 | if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN) | |
5124 | return; | |
5125 | ||
5126 | /* Fixup recursive calls inside clones. */ | |
5127 | /* ??? Why did cgraph_copy_node_for_versioning update the call edges | |
5128 | for recursion but not update the call statements themselves? */ | |
5129 | if (e->caller == e->callee && decl_is_tm_clone (current_function_decl)) | |
5130 | { | |
5131 | gimple_call_set_fndecl (stmt, current_function_decl); | |
5132 | return; | |
5133 | } | |
5134 | ||
5135 | /* If there is a replacement, use it. */ | |
5136 | fndecl = find_tm_replacement_function (fndecl); | |
5137 | if (fndecl) | |
5138 | { | |
415d1b9a | 5139 | new_node = cgraph_node::get_create (fndecl); |
4c0315d0 | 5140 | |
5141 | /* ??? Mark all transaction_wrap functions tm_may_enter_irr. | |
5142 | ||
5143 | We can't do this earlier in record_tm_replacement because | |
5144 | cgraph_remove_unreachable_nodes is called before we inject | |
5145 | references to the node. Further, we can't do this in some | |
5146 | nice central place in ipa_tm_execute because we don't have | |
5147 | the exact list of wrapper functions that would be used. | |
5148 | Marking more wrappers than necessary results in the creation | |
5149 | of unnecessary cgraph_nodes, which can cause some of the | |
5150 | other IPA passes to crash. | |
5151 | ||
5152 | We do need to mark these nodes so that we get the proper | |
5153 | result in expand_call_tm. */ | |
5154 | /* ??? This seems broken. How is it that we're marking the | |
5155 | CALLEE as may_enter_irr? Surely we should be marking the | |
5156 | CALLER. Also note that find_tm_replacement_function also | |
5157 | contains mappings into the TM runtime, e.g. memcpy. These | |
5158 | we know won't go irrevocable. */ | |
5159 | new_node->local.tm_may_enter_irr = 1; | |
5160 | } | |
5161 | else | |
5162 | { | |
3e426b86 | 5163 | struct tm_ipa_cg_data *d; |
5164 | struct cgraph_node *tnode = e->callee; | |
5165 | ||
5166 | d = get_cg_data (&tnode, true); | |
4c0315d0 | 5167 | new_node = d->clone; |
5168 | ||
5169 | /* As we've already skipped pure calls and appropriate builtins, | |
5170 | and we've already marked irrevocable blocks, if we can't come | |
5171 | up with a static replacement, then ask the runtime. */ | |
5172 | if (new_node == NULL) | |
5173 | { | |
5174 | *need_ssa_rename_p |= | |
5175 | ipa_tm_insert_gettmclone_call (node, region, gsi, stmt); | |
4c0315d0 | 5176 | return; |
5177 | } | |
5178 | ||
02774f2d | 5179 | fndecl = new_node->decl; |
4c0315d0 | 5180 | } |
5181 | ||
35ee1c66 | 5182 | e->redirect_callee (new_node); |
4c0315d0 | 5183 | gimple_call_set_fndecl (stmt, fndecl); |
5184 | } | |
5185 | ||
5186 | /* Helper function for ipa_tm_transform_calls. For a given BB, | |
5187 | install calls to tm_irrevocable when IRR_BLOCKS are reached, | |
5188 | redirect other calls to the generated transactional clone. */ | |
5189 | ||
5190 | static bool | |
5191 | ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region, | |
5192 | basic_block bb, bitmap irr_blocks) | |
5193 | { | |
5194 | gimple_stmt_iterator gsi; | |
5195 | bool need_ssa_rename = false; | |
5196 | ||
5197 | if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index)) | |
5198 | { | |
5199 | ipa_tm_insert_irr_call (node, region, bb); | |
5200 | return true; | |
5201 | } | |
5202 | ||
5203 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
5204 | { | |
42acab1c | 5205 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 5206 | |
5207 | if (!is_gimple_call (stmt)) | |
5208 | continue; | |
5209 | if (is_tm_pure_call (stmt)) | |
5210 | continue; | |
5211 | ||
5212 | /* Redirect edges to the appropriate replacement or clone. */ | |
5213 | ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename); | |
5214 | } | |
5215 | ||
5216 | return need_ssa_rename; | |
5217 | } | |
5218 | ||
5219 | /* Walk the CFG for REGION, beginning at BB. Install calls to | |
5220 | tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to | |
5221 | the generated transactional clone. */ | |
5222 | ||
5223 | static bool | |
5224 | ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region, | |
5225 | basic_block bb, bitmap irr_blocks) | |
5226 | { | |
5227 | bool need_ssa_rename = false; | |
5228 | edge e; | |
5229 | edge_iterator ei; | |
c2078b80 | 5230 | auto_vec<basic_block> queue; |
4c0315d0 | 5231 | bitmap visited_blocks = BITMAP_ALLOC (NULL); |
5232 | ||
f1f41a6c | 5233 | queue.safe_push (bb); |
4c0315d0 | 5234 | do |
5235 | { | |
f1f41a6c | 5236 | bb = queue.pop (); |
4c0315d0 | 5237 | |
5238 | need_ssa_rename |= | |
5239 | ipa_tm_transform_calls_1 (node, region, bb, irr_blocks); | |
5240 | ||
5241 | if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index)) | |
5242 | continue; | |
5243 | ||
5244 | if (region && bitmap_bit_p (region->exit_blocks, bb->index)) | |
5245 | continue; | |
5246 | ||
5247 | FOR_EACH_EDGE (e, ei, bb->succs) | |
5248 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
5249 | { | |
5250 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 5251 | queue.safe_push (e->dest); |
4c0315d0 | 5252 | } |
5253 | } | |
f1f41a6c | 5254 | while (!queue.is_empty ()); |
4c0315d0 | 5255 | |
4c0315d0 | 5256 | BITMAP_FREE (visited_blocks); |
5257 | ||
5258 | return need_ssa_rename; | |
5259 | } | |
5260 | ||
5261 | /* Transform the calls within the TM regions within NODE. */ | |
5262 | ||
5263 | static void | |
5264 | ipa_tm_transform_transaction (struct cgraph_node *node) | |
5265 | { | |
3e426b86 | 5266 | struct tm_ipa_cg_data *d; |
4c0315d0 | 5267 | struct tm_region *region; |
5268 | bool need_ssa_rename = false; | |
5269 | ||
3e426b86 | 5270 | d = get_cg_data (&node, true); |
5271 | ||
02774f2d | 5272 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
4c0315d0 | 5273 | calculate_dominance_info (CDI_DOMINATORS); |
5274 | ||
5275 | for (region = d->all_tm_regions; region; region = region->next) | |
5276 | { | |
5277 | /* If we're sure to go irrevocable, don't transform anything. */ | |
5278 | if (d->irrevocable_blocks_normal | |
5279 | && bitmap_bit_p (d->irrevocable_blocks_normal, | |
5280 | region->entry_block->index)) | |
5281 | { | |
1910089e | 5282 | transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE |
5283 | | GTMA_MAY_ENTER_IRREVOCABLE | |
5284 | | GTMA_HAS_NO_INSTRUMENTATION); | |
4c0315d0 | 5285 | continue; |
5286 | } | |
5287 | ||
5288 | need_ssa_rename |= | |
5289 | ipa_tm_transform_calls (node, region, region->entry_block, | |
5290 | d->irrevocable_blocks_normal); | |
5291 | } | |
5292 | ||
5293 | if (need_ssa_rename) | |
5294 | update_ssa (TODO_update_ssa_only_virtuals); | |
5295 | ||
5296 | pop_cfun (); | |
4c0315d0 | 5297 | } |
5298 | ||
5299 | /* Transform the calls within the transactional clone of NODE. */ | |
5300 | ||
5301 | static void | |
5302 | ipa_tm_transform_clone (struct cgraph_node *node) | |
5303 | { | |
3e426b86 | 5304 | struct tm_ipa_cg_data *d; |
4c0315d0 | 5305 | bool need_ssa_rename; |
5306 | ||
3e426b86 | 5307 | d = get_cg_data (&node, true); |
5308 | ||
4c0315d0 | 5309 | /* If this function makes no calls and has no irrevocable blocks, |
5310 | then there's nothing to do. */ | |
5311 | /* ??? Remove non-aborting top-level transactions. */ | |
2670559d | 5312 | if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone) |
4c0315d0 | 5313 | return; |
5314 | ||
02774f2d | 5315 | push_cfun (DECL_STRUCT_FUNCTION (d->clone->decl)); |
4c0315d0 | 5316 | calculate_dominance_info (CDI_DOMINATORS); |
5317 | ||
5318 | need_ssa_rename = | |
34154e27 | 5319 | ipa_tm_transform_calls (d->clone, NULL, |
5320 | single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), | |
4c0315d0 | 5321 | d->irrevocable_blocks_clone); |
5322 | ||
5323 | if (need_ssa_rename) | |
5324 | update_ssa (TODO_update_ssa_only_virtuals); | |
5325 | ||
5326 | pop_cfun (); | |
4c0315d0 | 5327 | } |
5328 | ||
5329 | /* Main entry point for the transactional memory IPA pass. */ | |
5330 | ||
5331 | static unsigned int | |
5332 | ipa_tm_execute (void) | |
5333 | { | |
9af5ce0c | 5334 | cgraph_node_queue tm_callees = cgraph_node_queue (); |
4c0315d0 | 5335 | /* List of functions that will go irrevocable. */ |
9af5ce0c | 5336 | cgraph_node_queue irr_worklist = cgraph_node_queue (); |
4c0315d0 | 5337 | |
5338 | struct cgraph_node *node; | |
5339 | struct tm_ipa_cg_data *d; | |
5340 | enum availability a; | |
5341 | unsigned int i; | |
5342 | ||
382ecba7 | 5343 | cgraph_node::checking_verify_cgraph_nodes (); |
4c0315d0 | 5344 | |
5345 | bitmap_obstack_initialize (&tm_obstack); | |
0cd02a19 | 5346 | initialize_original_copy_tables (); |
4c0315d0 | 5347 | |
5348 | /* For all local functions marked tm_callable, queue them. */ | |
7c455d87 | 5349 | FOR_EACH_DEFINED_FUNCTION (node) |
02774f2d | 5350 | if (is_tm_callable (node->decl) |
415d1b9a | 5351 | && node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5352 | { |
3e426b86 | 5353 | d = get_cg_data (&node, true); |
4c0315d0 | 5354 | maybe_push_queue (node, &tm_callees, &d->in_callee_queue); |
5355 | } | |
5356 | ||
5357 | /* For all local reachable functions... */ | |
7c455d87 | 5358 | FOR_EACH_DEFINED_FUNCTION (node) |
da751785 | 5359 | if (node->lowered |
415d1b9a | 5360 | && node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5361 | { |
5362 | /* ... marked tm_pure, record that fact for the runtime by | |
5363 | indicating that the pure function is its own tm_callable. | |
5364 | No need to do this if the function's address can't be taken. */ | |
02774f2d | 5365 | if (is_tm_pure (node->decl)) |
4c0315d0 | 5366 | { |
5367 | if (!node->local.local) | |
02774f2d | 5368 | record_tm_clone_pair (node->decl, node->decl); |
4c0315d0 | 5369 | continue; |
5370 | } | |
5371 | ||
02774f2d | 5372 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
4c0315d0 | 5373 | calculate_dominance_info (CDI_DOMINATORS); |
5374 | ||
5375 | tm_region_init (NULL); | |
5376 | if (all_tm_regions) | |
5377 | { | |
3e426b86 | 5378 | d = get_cg_data (&node, true); |
4c0315d0 | 5379 | |
0cd02a19 | 5380 | /* Scan for calls that are in each transaction, and |
5381 | generate the uninstrumented code path. */ | |
4c0315d0 | 5382 | ipa_tm_scan_calls_transaction (d, &tm_callees); |
5383 | ||
40879ac6 | 5384 | /* Put it in the worklist so we can scan the function |
5385 | later (ipa_tm_scan_irr_function) and mark the | |
5386 | irrevocable blocks. */ | |
5387 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
5388 | d->want_irr_scan_normal = true; | |
4c0315d0 | 5389 | } |
5390 | ||
5391 | pop_cfun (); | |
4c0315d0 | 5392 | } |
5393 | ||
5394 | /* For every local function on the callee list, scan as if we will be | |
5395 | creating a transactional clone, queueing all new functions we find | |
5396 | along the way. */ | |
f1f41a6c | 5397 | for (i = 0; i < tm_callees.length (); ++i) |
4c0315d0 | 5398 | { |
f1f41a6c | 5399 | node = tm_callees[i]; |
415d1b9a | 5400 | a = node->get_availability (); |
3e426b86 | 5401 | d = get_cg_data (&node, true); |
4c0315d0 | 5402 | |
40879ac6 | 5403 | /* Put it in the worklist so we can scan the function later |
5404 | (ipa_tm_scan_irr_function) and mark the irrevocable | |
5405 | blocks. */ | |
5406 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
4c0315d0 | 5407 | |
5408 | /* Some callees cannot be arbitrarily cloned. These will always be | |
5409 | irrevocable. Mark these now, so that we need not scan them. */ | |
02774f2d | 5410 | if (is_tm_irrevocable (node->decl)) |
4c0315d0 | 5411 | ipa_tm_note_irrevocable (node, &irr_worklist); |
5412 | else if (a <= AVAIL_NOT_AVAILABLE | |
02774f2d | 5413 | && !is_tm_safe_or_pure (node->decl)) |
4c0315d0 | 5414 | ipa_tm_note_irrevocable (node, &irr_worklist); |
415d1b9a | 5415 | else if (a >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5416 | { |
02774f2d | 5417 | if (!tree_versionable_function_p (node->decl)) |
4c0315d0 | 5418 | ipa_tm_note_irrevocable (node, &irr_worklist); |
5419 | else if (!d->is_irrevocable) | |
5420 | { | |
5421 | /* If this is an alias, make sure its base is queued as well. | |
5422 | we need not scan the callees now, as the base will do. */ | |
02774f2d | 5423 | if (node->alias) |
4c0315d0 | 5424 | { |
415d1b9a | 5425 | node = cgraph_node::get (node->thunk.alias); |
3e426b86 | 5426 | d = get_cg_data (&node, true); |
4c0315d0 | 5427 | maybe_push_queue (node, &tm_callees, &d->in_callee_queue); |
5428 | continue; | |
5429 | } | |
5430 | ||
5431 | /* Add all nodes called by this function into | |
5432 | tm_callees as well. */ | |
5433 | ipa_tm_scan_calls_clone (node, &tm_callees); | |
5434 | } | |
5435 | } | |
5436 | } | |
5437 | ||
5438 | /* Iterate scans until no more work to be done. Prefer not to use | |
f1f41a6c | 5439 | vec::pop because the worklist tends to follow a breadth-first |
4c0315d0 | 5440 | search of the callgraph, which should allow convergance with a |
5441 | minimum number of scans. But we also don't want the worklist | |
5442 | array to grow without bound, so we shift the array up periodically. */ | |
f1f41a6c | 5443 | for (i = 0; i < irr_worklist.length (); ++i) |
4c0315d0 | 5444 | { |
f1f41a6c | 5445 | if (i > 256 && i == irr_worklist.length () / 8) |
4c0315d0 | 5446 | { |
f1f41a6c | 5447 | irr_worklist.block_remove (0, i); |
4c0315d0 | 5448 | i = 0; |
5449 | } | |
5450 | ||
f1f41a6c | 5451 | node = irr_worklist[i]; |
3e426b86 | 5452 | d = get_cg_data (&node, true); |
4c0315d0 | 5453 | d->in_worklist = false; |
5454 | ||
5455 | if (d->want_irr_scan_normal) | |
5456 | { | |
5457 | d->want_irr_scan_normal = false; | |
5458 | ipa_tm_scan_irr_function (node, false); | |
5459 | } | |
5460 | if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true)) | |
5461 | ipa_tm_note_irrevocable (node, &irr_worklist); | |
5462 | } | |
5463 | ||
5464 | /* For every function on the callee list, collect the tm_may_enter_irr | |
5465 | bit on the node. */ | |
f1f41a6c | 5466 | irr_worklist.truncate (0); |
5467 | for (i = 0; i < tm_callees.length (); ++i) | |
4c0315d0 | 5468 | { |
f1f41a6c | 5469 | node = tm_callees[i]; |
4c0315d0 | 5470 | if (ipa_tm_mayenterirr_function (node)) |
5471 | { | |
3e426b86 | 5472 | d = get_cg_data (&node, true); |
4c0315d0 | 5473 | gcc_assert (d->in_worklist == false); |
5474 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
5475 | } | |
5476 | } | |
5477 | ||
5478 | /* Propagate the tm_may_enter_irr bit to callers until stable. */ | |
f1f41a6c | 5479 | for (i = 0; i < irr_worklist.length (); ++i) |
4c0315d0 | 5480 | { |
5481 | struct cgraph_node *caller; | |
5482 | struct cgraph_edge *e; | |
e4a2b488 | 5483 | struct ipa_ref *ref; |
4c0315d0 | 5484 | |
f1f41a6c | 5485 | if (i > 256 && i == irr_worklist.length () / 8) |
4c0315d0 | 5486 | { |
f1f41a6c | 5487 | irr_worklist.block_remove (0, i); |
4c0315d0 | 5488 | i = 0; |
5489 | } | |
5490 | ||
f1f41a6c | 5491 | node = irr_worklist[i]; |
3e426b86 | 5492 | d = get_cg_data (&node, true); |
4c0315d0 | 5493 | d->in_worklist = false; |
5494 | node->local.tm_may_enter_irr = true; | |
5495 | ||
5496 | /* Propagate back to normal callers. */ | |
5497 | for (e = node->callers; e ; e = e->next_caller) | |
5498 | { | |
5499 | caller = e->caller; | |
02774f2d | 5500 | if (!is_tm_safe_or_pure (caller->decl) |
4c0315d0 | 5501 | && !caller->local.tm_may_enter_irr) |
5502 | { | |
3e426b86 | 5503 | d = get_cg_data (&caller, true); |
4c0315d0 | 5504 | maybe_push_queue (caller, &irr_worklist, &d->in_worklist); |
5505 | } | |
5506 | } | |
5507 | ||
5508 | /* Propagate back to referring aliases as well. */ | |
e4a2b488 | 5509 | FOR_EACH_ALIAS (node, ref) |
4c0315d0 | 5510 | { |
415d1b9a | 5511 | caller = dyn_cast<cgraph_node *> (ref->referring); |
e4a2b488 | 5512 | if (!caller->local.tm_may_enter_irr) |
4c0315d0 | 5513 | { |
3e426b86 | 5514 | /* ?? Do not traverse aliases here. */ |
5515 | d = get_cg_data (&caller, false); | |
4c0315d0 | 5516 | maybe_push_queue (caller, &irr_worklist, &d->in_worklist); |
5517 | } | |
5518 | } | |
5519 | } | |
5520 | ||
5521 | /* Now validate all tm_safe functions, and all atomic regions in | |
5522 | other functions. */ | |
7c455d87 | 5523 | FOR_EACH_DEFINED_FUNCTION (node) |
da751785 | 5524 | if (node->lowered |
415d1b9a | 5525 | && node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5526 | { |
3e426b86 | 5527 | d = get_cg_data (&node, true); |
02774f2d | 5528 | if (is_tm_safe (node->decl)) |
4c0315d0 | 5529 | ipa_tm_diagnose_tm_safe (node); |
5530 | else if (d->all_tm_regions) | |
5531 | ipa_tm_diagnose_transaction (node, d->all_tm_regions); | |
5532 | } | |
5533 | ||
5534 | /* Create clones. Do those that are not irrevocable and have a | |
5535 | positive call count. Do those publicly visible functions that | |
5536 | the user directed us to clone. */ | |
f1f41a6c | 5537 | for (i = 0; i < tm_callees.length (); ++i) |
4c0315d0 | 5538 | { |
5539 | bool doit = false; | |
5540 | ||
f1f41a6c | 5541 | node = tm_callees[i]; |
02774f2d | 5542 | if (node->cpp_implicit_alias) |
4c0315d0 | 5543 | continue; |
5544 | ||
415d1b9a | 5545 | a = node->get_availability (); |
3e426b86 | 5546 | d = get_cg_data (&node, true); |
4c0315d0 | 5547 | |
5548 | if (a <= AVAIL_NOT_AVAILABLE) | |
02774f2d | 5549 | doit = is_tm_callable (node->decl); |
5550 | else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl)) | |
4c0315d0 | 5551 | doit = true; |
5552 | else if (!d->is_irrevocable | |
5553 | && d->tm_callers_normal + d->tm_callers_clone > 0) | |
5554 | doit = true; | |
5555 | ||
5556 | if (doit) | |
5557 | ipa_tm_create_version (node); | |
5558 | } | |
5559 | ||
5560 | /* Redirect calls to the new clones, and insert irrevocable marks. */ | |
f1f41a6c | 5561 | for (i = 0; i < tm_callees.length (); ++i) |
4c0315d0 | 5562 | { |
f1f41a6c | 5563 | node = tm_callees[i]; |
02774f2d | 5564 | if (node->analyzed) |
4c0315d0 | 5565 | { |
3e426b86 | 5566 | d = get_cg_data (&node, true); |
4c0315d0 | 5567 | if (d->clone) |
5568 | ipa_tm_transform_clone (node); | |
5569 | } | |
5570 | } | |
7c455d87 | 5571 | FOR_EACH_DEFINED_FUNCTION (node) |
da751785 | 5572 | if (node->lowered |
415d1b9a | 5573 | && node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5574 | { |
3e426b86 | 5575 | d = get_cg_data (&node, true); |
4c0315d0 | 5576 | if (d->all_tm_regions) |
5577 | ipa_tm_transform_transaction (node); | |
5578 | } | |
5579 | ||
5580 | /* Free and clear all data structures. */ | |
f1f41a6c | 5581 | tm_callees.release (); |
5582 | irr_worklist.release (); | |
4c0315d0 | 5583 | bitmap_obstack_release (&tm_obstack); |
0cd02a19 | 5584 | free_original_copy_tables (); |
4c0315d0 | 5585 | |
7c455d87 | 5586 | FOR_EACH_FUNCTION (node) |
02774f2d | 5587 | node->aux = NULL; |
4c0315d0 | 5588 | |
382ecba7 | 5589 | cgraph_node::checking_verify_cgraph_nodes (); |
4c0315d0 | 5590 | |
5591 | return 0; | |
5592 | } | |
5593 | ||
7620bc82 | 5594 | namespace { |
5595 | ||
5596 | const pass_data pass_data_ipa_tm = | |
cbe8bda8 | 5597 | { |
5598 | SIMPLE_IPA_PASS, /* type */ | |
5599 | "tmipa", /* name */ | |
5600 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 5601 | TV_TRANS_MEM, /* tv_id */ |
5602 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
5603 | 0, /* properties_provided */ | |
5604 | 0, /* properties_destroyed */ | |
5605 | 0, /* todo_flags_start */ | |
5606 | 0, /* todo_flags_finish */ | |
4c0315d0 | 5607 | }; |
5608 | ||
7620bc82 | 5609 | class pass_ipa_tm : public simple_ipa_opt_pass |
cbe8bda8 | 5610 | { |
5611 | public: | |
9af5ce0c | 5612 | pass_ipa_tm (gcc::context *ctxt) |
5613 | : simple_ipa_opt_pass (pass_data_ipa_tm, ctxt) | |
cbe8bda8 | 5614 | {} |
5615 | ||
5616 | /* opt_pass methods: */ | |
31315c24 | 5617 | virtual bool gate (function *) { return flag_tm; } |
65b0537f | 5618 | virtual unsigned int execute (function *) { return ipa_tm_execute (); } |
cbe8bda8 | 5619 | |
5620 | }; // class pass_ipa_tm | |
5621 | ||
7620bc82 | 5622 | } // anon namespace |
5623 | ||
cbe8bda8 | 5624 | simple_ipa_opt_pass * |
5625 | make_pass_ipa_tm (gcc::context *ctxt) | |
5626 | { | |
5627 | return new pass_ipa_tm (ctxt); | |
5628 | } | |
5629 | ||
4c0315d0 | 5630 | #include "gt-trans-mem.h" |