]>
Commit | Line | Data |
---|---|---|
4c0315d0 | 1 | /* Passes for transactional memory support. |
fbd26352 | 2 | Copyright (C) 2008-2019 Free Software Foundation, Inc. |
99e3199b | 3 | Contributed by Richard Henderson <rth@redhat.com> |
4 | and Aldy Hernandez <aldyh@redhat.com>. | |
4c0315d0 | 5 | |
6 | This file is part of GCC. | |
7 | ||
8 | GCC is free software; you can redistribute it and/or modify it under | |
9 | the terms of the GNU General Public License as published by the Free | |
10 | Software Foundation; either version 3, or (at your option) any later | |
11 | version. | |
12 | ||
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with GCC; see the file COPYING3. If not see | |
20 | <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #include "config.h" | |
23 | #include "system.h" | |
24 | #include "coretypes.h" | |
9ef16211 | 25 | #include "backend.h" |
7c29e30e | 26 | #include "target.h" |
27 | #include "rtl.h" | |
b20a8bb4 | 28 | #include "tree.h" |
9ef16211 | 29 | #include "gimple.h" |
7c29e30e | 30 | #include "cfghooks.h" |
31 | #include "tree-pass.h" | |
9ef16211 | 32 | #include "ssa.h" |
7c29e30e | 33 | #include "cgraph.h" |
34 | #include "gimple-pretty-print.h" | |
35 | #include "diagnostic-core.h" | |
b20a8bb4 | 36 | #include "fold-const.h" |
bc61cadb | 37 | #include "tree-eh.h" |
9ed99284 | 38 | #include "calls.h" |
a8783bee | 39 | #include "gimplify.h" |
dcf1a1ec | 40 | #include "gimple-iterator.h" |
e795d6e1 | 41 | #include "gimplify-me.h" |
dcf1a1ec | 42 | #include "gimple-walk.h" |
073c1fd5 | 43 | #include "tree-cfg.h" |
073c1fd5 | 44 | #include "tree-into-ssa.h" |
4c0315d0 | 45 | #include "tree-inline.h" |
4c0315d0 | 46 | #include "demangle.h" |
47 | #include "output.h" | |
48 | #include "trans-mem.h" | |
49 | #include "params.h" | |
4c0315d0 | 50 | #include "langhooks.h" |
79f958cb | 51 | #include "cfgloop.h" |
424a4a92 | 52 | #include "tree-ssa-address.h" |
30a86690 | 53 | #include "stringpool.h" |
54 | #include "attribs.h" | |
4c0315d0 | 55 | |
4c0315d0 | 56 | #define A_RUNINSTRUMENTEDCODE 0x0001 |
57 | #define A_RUNUNINSTRUMENTEDCODE 0x0002 | |
58 | #define A_SAVELIVEVARIABLES 0x0004 | |
59 | #define A_RESTORELIVEVARIABLES 0x0008 | |
60 | #define A_ABORTTRANSACTION 0x0010 | |
61 | ||
62 | #define AR_USERABORT 0x0001 | |
63 | #define AR_USERRETRY 0x0002 | |
64 | #define AR_TMCONFLICT 0x0004 | |
65 | #define AR_EXCEPTIONBLOCKABORT 0x0008 | |
66 | #define AR_OUTERABORT 0x0010 | |
67 | ||
68 | #define MODE_SERIALIRREVOCABLE 0x0000 | |
69 | ||
70 | ||
71 | /* The representation of a transaction changes several times during the | |
72 | lowering process. In the beginning, in the front-end we have the | |
73 | GENERIC tree TRANSACTION_EXPR. For example, | |
74 | ||
75 | __transaction { | |
76 | local++; | |
77 | if (++global == 10) | |
78 | __tm_abort; | |
79 | } | |
80 | ||
81 | During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is | |
82 | trivially replaced with a GIMPLE_TRANSACTION node. | |
83 | ||
84 | During pass_lower_tm, we examine the body of transactions looking | |
85 | for aborts. Transactions that do not contain an abort may be | |
86 | merged into an outer transaction. We also add a TRY-FINALLY node | |
87 | to arrange for the transaction to be committed on any exit. | |
88 | ||
89 | [??? Think about how this arrangement affects throw-with-commit | |
90 | and throw-with-abort operations. In this case we want the TRY to | |
91 | handle gotos, but not to catch any exceptions because the transaction | |
92 | will already be closed.] | |
93 | ||
94 | GIMPLE_TRANSACTION [label=NULL] { | |
95 | try { | |
96 | local = local + 1; | |
97 | t0 = global; | |
98 | t1 = t0 + 1; | |
99 | global = t1; | |
100 | if (t1 == 10) | |
101 | __builtin___tm_abort (); | |
102 | } finally { | |
103 | __builtin___tm_commit (); | |
104 | } | |
105 | } | |
106 | ||
107 | During pass_lower_eh, we create EH regions for the transactions, | |
108 | intermixed with the regular EH stuff. This gives us a nice persistent | |
109 | mapping (all the way through rtl) from transactional memory operation | |
110 | back to the transaction, which allows us to get the abnormal edges | |
111 | correct to model transaction aborts and restarts: | |
112 | ||
113 | GIMPLE_TRANSACTION [label=over] | |
114 | local = local + 1; | |
115 | t0 = global; | |
116 | t1 = t0 + 1; | |
117 | global = t1; | |
118 | if (t1 == 10) | |
119 | __builtin___tm_abort (); | |
120 | __builtin___tm_commit (); | |
121 | over: | |
122 | ||
123 | This is the end of all_lowering_passes, and so is what is present | |
124 | during the IPA passes, and through all of the optimization passes. | |
125 | ||
126 | During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all | |
127 | functions and mark functions for cloning. | |
128 | ||
129 | At the end of gimple optimization, before exiting SSA form, | |
130 | pass_tm_edges replaces statements that perform transactional | |
131 | memory operations with the appropriate TM builtins, and swap | |
132 | out function calls with their transactional clones. At this | |
133 | point we introduce the abnormal transaction restart edges and | |
134 | complete lowering of the GIMPLE_TRANSACTION node. | |
135 | ||
136 | x = __builtin___tm_start (MAY_ABORT); | |
137 | eh_label: | |
138 | if (x & abort_transaction) | |
139 | goto over; | |
140 | local = local + 1; | |
141 | t0 = __builtin___tm_load (global); | |
142 | t1 = t0 + 1; | |
143 | __builtin___tm_store (&global, t1); | |
144 | if (t1 == 10) | |
145 | __builtin___tm_abort (); | |
146 | __builtin___tm_commit (); | |
147 | over: | |
148 | */ | |
149 | ||
0cd02a19 | 150 | static void *expand_regions (struct tm_region *, |
151 | void *(*callback)(struct tm_region *, void *), | |
00d83cc8 | 152 | void *, bool); |
0cd02a19 | 153 | |
4c0315d0 | 154 | \f |
155 | /* Return the attributes we want to examine for X, or NULL if it's not | |
156 | something we examine. We look at function types, but allow pointers | |
157 | to function types and function decls and peek through. */ | |
158 | ||
159 | static tree | |
160 | get_attrs_for (const_tree x) | |
161 | { | |
547355ae | 162 | if (x == NULL_TREE) |
163 | return NULL_TREE; | |
164 | ||
4c0315d0 | 165 | switch (TREE_CODE (x)) |
166 | { | |
167 | case FUNCTION_DECL: | |
168 | return TYPE_ATTRIBUTES (TREE_TYPE (x)); | |
4c0315d0 | 169 | |
170 | default: | |
171 | if (TYPE_P (x)) | |
547355ae | 172 | return NULL_TREE; |
4c0315d0 | 173 | x = TREE_TYPE (x); |
174 | if (TREE_CODE (x) != POINTER_TYPE) | |
547355ae | 175 | return NULL_TREE; |
4c0315d0 | 176 | /* FALLTHRU */ |
177 | ||
178 | case POINTER_TYPE: | |
179 | x = TREE_TYPE (x); | |
180 | if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE) | |
547355ae | 181 | return NULL_TREE; |
4c0315d0 | 182 | /* FALLTHRU */ |
183 | ||
184 | case FUNCTION_TYPE: | |
185 | case METHOD_TYPE: | |
186 | return TYPE_ATTRIBUTES (x); | |
187 | } | |
188 | } | |
189 | ||
190 | /* Return true if X has been marked TM_PURE. */ | |
191 | ||
192 | bool | |
193 | is_tm_pure (const_tree x) | |
194 | { | |
195 | unsigned flags; | |
196 | ||
197 | switch (TREE_CODE (x)) | |
198 | { | |
199 | case FUNCTION_DECL: | |
200 | case FUNCTION_TYPE: | |
201 | case METHOD_TYPE: | |
202 | break; | |
203 | ||
204 | default: | |
205 | if (TYPE_P (x)) | |
206 | return false; | |
207 | x = TREE_TYPE (x); | |
208 | if (TREE_CODE (x) != POINTER_TYPE) | |
209 | return false; | |
210 | /* FALLTHRU */ | |
211 | ||
212 | case POINTER_TYPE: | |
213 | x = TREE_TYPE (x); | |
214 | if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE) | |
215 | return false; | |
216 | break; | |
217 | } | |
218 | ||
219 | flags = flags_from_decl_or_type (x); | |
220 | return (flags & ECF_TM_PURE) != 0; | |
221 | } | |
222 | ||
223 | /* Return true if X has been marked TM_IRREVOCABLE. */ | |
224 | ||
225 | static bool | |
226 | is_tm_irrevocable (tree x) | |
227 | { | |
228 | tree attrs = get_attrs_for (x); | |
229 | ||
230 | if (attrs && lookup_attribute ("transaction_unsafe", attrs)) | |
231 | return true; | |
232 | ||
233 | /* A call to the irrevocable builtin is by definition, | |
234 | irrevocable. */ | |
235 | if (TREE_CODE (x) == ADDR_EXPR) | |
236 | x = TREE_OPERAND (x, 0); | |
237 | if (TREE_CODE (x) == FUNCTION_DECL | |
a0e9bfbb | 238 | && fndecl_built_in_p (x, BUILT_IN_TM_IRREVOCABLE)) |
4c0315d0 | 239 | return true; |
240 | ||
241 | return false; | |
242 | } | |
243 | ||
244 | /* Return true if X has been marked TM_SAFE. */ | |
245 | ||
246 | bool | |
247 | is_tm_safe (const_tree x) | |
248 | { | |
249 | if (flag_tm) | |
250 | { | |
251 | tree attrs = get_attrs_for (x); | |
252 | if (attrs) | |
253 | { | |
254 | if (lookup_attribute ("transaction_safe", attrs)) | |
255 | return true; | |
256 | if (lookup_attribute ("transaction_may_cancel_outer", attrs)) | |
257 | return true; | |
258 | } | |
259 | } | |
260 | return false; | |
261 | } | |
262 | ||
263 | /* Return true if CALL is const, or tm_pure. */ | |
264 | ||
265 | static bool | |
42acab1c | 266 | is_tm_pure_call (gimple *call) |
4c0315d0 | 267 | { |
1706ef0f | 268 | return (gimple_call_flags (call) & (ECF_CONST | ECF_TM_PURE)) != 0; |
4c0315d0 | 269 | } |
270 | ||
271 | /* Return true if X has been marked TM_CALLABLE. */ | |
272 | ||
273 | static bool | |
274 | is_tm_callable (tree x) | |
275 | { | |
276 | tree attrs = get_attrs_for (x); | |
277 | if (attrs) | |
278 | { | |
279 | if (lookup_attribute ("transaction_callable", attrs)) | |
280 | return true; | |
281 | if (lookup_attribute ("transaction_safe", attrs)) | |
282 | return true; | |
283 | if (lookup_attribute ("transaction_may_cancel_outer", attrs)) | |
284 | return true; | |
285 | } | |
286 | return false; | |
287 | } | |
288 | ||
289 | /* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */ | |
290 | ||
291 | bool | |
292 | is_tm_may_cancel_outer (tree x) | |
293 | { | |
294 | tree attrs = get_attrs_for (x); | |
295 | if (attrs) | |
296 | return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL; | |
297 | return false; | |
298 | } | |
299 | ||
300 | /* Return true for built in functions that "end" a transaction. */ | |
301 | ||
302 | bool | |
303 | is_tm_ending_fndecl (tree fndecl) | |
304 | { | |
305 | if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
306 | switch (DECL_FUNCTION_CODE (fndecl)) | |
307 | { | |
308 | case BUILT_IN_TM_COMMIT: | |
309 | case BUILT_IN_TM_COMMIT_EH: | |
310 | case BUILT_IN_TM_ABORT: | |
311 | case BUILT_IN_TM_IRREVOCABLE: | |
312 | return true; | |
313 | default: | |
314 | break; | |
315 | } | |
316 | ||
317 | return false; | |
318 | } | |
319 | ||
50a50143 | 320 | /* Return true if STMT is a built in function call that "ends" a |
321 | transaction. */ | |
322 | ||
323 | bool | |
42acab1c | 324 | is_tm_ending (gimple *stmt) |
50a50143 | 325 | { |
326 | tree fndecl; | |
327 | ||
328 | if (gimple_code (stmt) != GIMPLE_CALL) | |
329 | return false; | |
330 | ||
331 | fndecl = gimple_call_fndecl (stmt); | |
332 | return (fndecl != NULL_TREE | |
333 | && is_tm_ending_fndecl (fndecl)); | |
334 | } | |
335 | ||
4c0315d0 | 336 | /* Return true if STMT is a TM load. */ |
337 | ||
338 | static bool | |
42acab1c | 339 | is_tm_load (gimple *stmt) |
4c0315d0 | 340 | { |
341 | tree fndecl; | |
342 | ||
343 | if (gimple_code (stmt) != GIMPLE_CALL) | |
344 | return false; | |
345 | ||
346 | fndecl = gimple_call_fndecl (stmt); | |
a0e9bfbb | 347 | return (fndecl |
348 | && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL) | |
4c0315d0 | 349 | && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl))); |
350 | } | |
351 | ||
352 | /* Same as above, but for simple TM loads, that is, not the | |
353 | after-write, after-read, etc optimized variants. */ | |
354 | ||
355 | static bool | |
42acab1c | 356 | is_tm_simple_load (gimple *stmt) |
4c0315d0 | 357 | { |
358 | tree fndecl; | |
359 | ||
360 | if (gimple_code (stmt) != GIMPLE_CALL) | |
361 | return false; | |
362 | ||
363 | fndecl = gimple_call_fndecl (stmt); | |
a0e9bfbb | 364 | if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)) |
4c0315d0 | 365 | { |
366 | enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); | |
367 | return (fcode == BUILT_IN_TM_LOAD_1 | |
368 | || fcode == BUILT_IN_TM_LOAD_2 | |
369 | || fcode == BUILT_IN_TM_LOAD_4 | |
370 | || fcode == BUILT_IN_TM_LOAD_8 | |
371 | || fcode == BUILT_IN_TM_LOAD_FLOAT | |
372 | || fcode == BUILT_IN_TM_LOAD_DOUBLE | |
373 | || fcode == BUILT_IN_TM_LOAD_LDOUBLE | |
374 | || fcode == BUILT_IN_TM_LOAD_M64 | |
375 | || fcode == BUILT_IN_TM_LOAD_M128 | |
376 | || fcode == BUILT_IN_TM_LOAD_M256); | |
377 | } | |
378 | return false; | |
379 | } | |
380 | ||
381 | /* Return true if STMT is a TM store. */ | |
382 | ||
383 | static bool | |
42acab1c | 384 | is_tm_store (gimple *stmt) |
4c0315d0 | 385 | { |
386 | tree fndecl; | |
387 | ||
388 | if (gimple_code (stmt) != GIMPLE_CALL) | |
389 | return false; | |
390 | ||
391 | fndecl = gimple_call_fndecl (stmt); | |
a0e9bfbb | 392 | return (fndecl |
393 | && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL) | |
4c0315d0 | 394 | && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl))); |
395 | } | |
396 | ||
397 | /* Same as above, but for simple TM stores, that is, not the | |
398 | after-write, after-read, etc optimized variants. */ | |
399 | ||
400 | static bool | |
42acab1c | 401 | is_tm_simple_store (gimple *stmt) |
4c0315d0 | 402 | { |
403 | tree fndecl; | |
404 | ||
405 | if (gimple_code (stmt) != GIMPLE_CALL) | |
406 | return false; | |
407 | ||
408 | fndecl = gimple_call_fndecl (stmt); | |
a0e9bfbb | 409 | if (fndecl |
410 | && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)) | |
4c0315d0 | 411 | { |
412 | enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); | |
413 | return (fcode == BUILT_IN_TM_STORE_1 | |
414 | || fcode == BUILT_IN_TM_STORE_2 | |
415 | || fcode == BUILT_IN_TM_STORE_4 | |
416 | || fcode == BUILT_IN_TM_STORE_8 | |
417 | || fcode == BUILT_IN_TM_STORE_FLOAT | |
418 | || fcode == BUILT_IN_TM_STORE_DOUBLE | |
419 | || fcode == BUILT_IN_TM_STORE_LDOUBLE | |
420 | || fcode == BUILT_IN_TM_STORE_M64 | |
421 | || fcode == BUILT_IN_TM_STORE_M128 | |
422 | || fcode == BUILT_IN_TM_STORE_M256); | |
423 | } | |
424 | return false; | |
425 | } | |
426 | ||
427 | /* Return true if FNDECL is BUILT_IN_TM_ABORT. */ | |
428 | ||
429 | static bool | |
430 | is_tm_abort (tree fndecl) | |
431 | { | |
a0e9bfbb | 432 | return (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_TM_ABORT)); |
4c0315d0 | 433 | } |
434 | ||
435 | /* Build a GENERIC tree for a user abort. This is called by front ends | |
436 | while transforming the __tm_abort statement. */ | |
437 | ||
438 | tree | |
439 | build_tm_abort_call (location_t loc, bool is_outer) | |
440 | { | |
441 | return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1, | |
442 | build_int_cst (integer_type_node, | |
443 | AR_USERABORT | |
444 | | (is_outer ? AR_OUTERABORT : 0))); | |
445 | } | |
4c0315d0 | 446 | \f |
2fbe7a32 | 447 | /* Map for arbitrary function replacement under TM, as created |
4c0315d0 | 448 | by the tm_wrap attribute. */ |
449 | ||
eae1ecb4 | 450 | struct tm_wrapper_hasher : ggc_cache_ptr_hash<tree_map> |
d1023d12 | 451 | { |
452 | static inline hashval_t hash (tree_map *m) { return m->hash; } | |
453 | static inline bool | |
454 | equal (tree_map *a, tree_map *b) | |
455 | { | |
456 | return a->base.from == b->base.from; | |
457 | } | |
458 | ||
99378011 | 459 | static int |
460 | keep_cache_entry (tree_map *&m) | |
461 | { | |
462 | return ggc_marked_p (m->base.from); | |
463 | } | |
d1023d12 | 464 | }; |
465 | ||
466 | static GTY((cache)) hash_table<tm_wrapper_hasher> *tm_wrap_map; | |
4c0315d0 | 467 | |
468 | void | |
469 | record_tm_replacement (tree from, tree to) | |
470 | { | |
471 | struct tree_map **slot, *h; | |
472 | ||
473 | /* Do not inline wrapper functions that will get replaced in the TM | |
474 | pass. | |
475 | ||
476 | Suppose you have foo() that will get replaced into tmfoo(). Make | |
477 | sure the inliner doesn't try to outsmart us and inline foo() | |
478 | before we get a chance to do the TM replacement. */ | |
479 | DECL_UNINLINABLE (from) = 1; | |
480 | ||
481 | if (tm_wrap_map == NULL) | |
d1023d12 | 482 | tm_wrap_map = hash_table<tm_wrapper_hasher>::create_ggc (32); |
4c0315d0 | 483 | |
25a27413 | 484 | h = ggc_alloc<tree_map> (); |
4c0315d0 | 485 | h->hash = htab_hash_pointer (from); |
486 | h->base.from = from; | |
487 | h->to = to; | |
488 | ||
d1023d12 | 489 | slot = tm_wrap_map->find_slot_with_hash (h, h->hash, INSERT); |
4c0315d0 | 490 | *slot = h; |
491 | } | |
492 | ||
493 | /* Return a TM-aware replacement function for DECL. */ | |
494 | ||
495 | static tree | |
496 | find_tm_replacement_function (tree fndecl) | |
497 | { | |
498 | if (tm_wrap_map) | |
499 | { | |
500 | struct tree_map *h, in; | |
501 | ||
502 | in.base.from = fndecl; | |
503 | in.hash = htab_hash_pointer (fndecl); | |
d1023d12 | 504 | h = tm_wrap_map->find_with_hash (&in, in.hash); |
4c0315d0 | 505 | if (h) |
506 | return h->to; | |
507 | } | |
508 | ||
509 | /* ??? We may well want TM versions of most of the common <string.h> | |
510 | functions. For now, we've already these two defined. */ | |
511 | /* Adjust expand_call_tm() attributes as necessary for the cases | |
512 | handled here: */ | |
513 | if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL) | |
514 | switch (DECL_FUNCTION_CODE (fndecl)) | |
515 | { | |
516 | case BUILT_IN_MEMCPY: | |
517 | return builtin_decl_explicit (BUILT_IN_TM_MEMCPY); | |
518 | case BUILT_IN_MEMMOVE: | |
519 | return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE); | |
520 | case BUILT_IN_MEMSET: | |
521 | return builtin_decl_explicit (BUILT_IN_TM_MEMSET); | |
522 | default: | |
523 | return NULL; | |
524 | } | |
525 | ||
526 | return NULL; | |
527 | } | |
528 | ||
529 | /* When appropriate, record TM replacement for memory allocation functions. | |
530 | ||
531 | FROM is the FNDECL to wrap. */ | |
532 | void | |
533 | tm_malloc_replacement (tree from) | |
534 | { | |
535 | const char *str; | |
536 | tree to; | |
537 | ||
538 | if (TREE_CODE (from) != FUNCTION_DECL) | |
539 | return; | |
540 | ||
541 | /* If we have a previous replacement, the user must be explicitly | |
542 | wrapping malloc/calloc/free. They better know what they're | |
543 | doing... */ | |
544 | if (find_tm_replacement_function (from)) | |
545 | return; | |
546 | ||
547 | str = IDENTIFIER_POINTER (DECL_NAME (from)); | |
548 | ||
549 | if (!strcmp (str, "malloc")) | |
550 | to = builtin_decl_explicit (BUILT_IN_TM_MALLOC); | |
551 | else if (!strcmp (str, "calloc")) | |
552 | to = builtin_decl_explicit (BUILT_IN_TM_CALLOC); | |
553 | else if (!strcmp (str, "free")) | |
554 | to = builtin_decl_explicit (BUILT_IN_TM_FREE); | |
555 | else | |
556 | return; | |
557 | ||
558 | TREE_NOTHROW (to) = 0; | |
559 | ||
560 | record_tm_replacement (from, to); | |
561 | } | |
562 | \f | |
563 | /* Diagnostics for tm_safe functions/regions. Called by the front end | |
564 | once we've lowered the function to high-gimple. */ | |
565 | ||
566 | /* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq. | |
567 | Process exactly one statement. WI->INFO is set to non-null when in | |
568 | the context of a tm_safe function, and null for a __transaction block. */ | |
569 | ||
570 | #define DIAG_TM_OUTER 1 | |
571 | #define DIAG_TM_SAFE 2 | |
572 | #define DIAG_TM_RELAXED 4 | |
573 | ||
574 | struct diagnose_tm | |
575 | { | |
576 | unsigned int summary_flags : 8; | |
577 | unsigned int block_flags : 8; | |
578 | unsigned int func_flags : 8; | |
4c0315d0 | 579 | unsigned int saw_volatile : 1; |
42acab1c | 580 | gimple *stmt; |
4c0315d0 | 581 | }; |
582 | ||
639b72bd | 583 | /* Return true if T is a volatile lvalue of some kind. */ |
e153bd50 | 584 | |
585 | static bool | |
639b72bd | 586 | volatile_lvalue_p (tree t) |
e153bd50 | 587 | { |
639b72bd | 588 | return ((SSA_VAR_P (t) || REFERENCE_CLASS_P (t)) |
e153bd50 | 589 | && TREE_THIS_VOLATILE (TREE_TYPE (t))); |
590 | } | |
591 | ||
4c0315d0 | 592 | /* Tree callback function for diagnose_tm pass. */ |
593 | ||
594 | static tree | |
639b72bd | 595 | diagnose_tm_1_op (tree *tp, int *walk_subtrees, void *data) |
4c0315d0 | 596 | { |
597 | struct walk_stmt_info *wi = (struct walk_stmt_info *) data; | |
598 | struct diagnose_tm *d = (struct diagnose_tm *) wi->info; | |
4c0315d0 | 599 | |
639b72bd | 600 | if (TYPE_P (*tp)) |
601 | *walk_subtrees = false; | |
602 | else if (volatile_lvalue_p (*tp) | |
603 | && !d->saw_volatile) | |
4c0315d0 | 604 | { |
605 | d->saw_volatile = 1; | |
639b72bd | 606 | if (d->block_flags & DIAG_TM_SAFE) |
607 | error_at (gimple_location (d->stmt), | |
608 | "invalid use of volatile lvalue inside transaction"); | |
609 | else if (d->func_flags & DIAG_TM_SAFE) | |
610 | error_at (gimple_location (d->stmt), | |
19efce70 | 611 | "invalid use of volatile lvalue inside %<transaction_safe%> " |
639b72bd | 612 | "function"); |
4c0315d0 | 613 | } |
614 | ||
615 | return NULL_TREE; | |
616 | } | |
617 | ||
0e80b01d | 618 | static inline bool |
619 | is_tm_safe_or_pure (const_tree x) | |
620 | { | |
621 | return is_tm_safe (x) || is_tm_pure (x); | |
622 | } | |
623 | ||
4c0315d0 | 624 | static tree |
625 | diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
626 | struct walk_stmt_info *wi) | |
627 | { | |
42acab1c | 628 | gimple *stmt = gsi_stmt (*gsi); |
4c0315d0 | 629 | struct diagnose_tm *d = (struct diagnose_tm *) wi->info; |
630 | ||
631 | /* Save stmt for use in leaf analysis. */ | |
632 | d->stmt = stmt; | |
633 | ||
634 | switch (gimple_code (stmt)) | |
635 | { | |
636 | case GIMPLE_CALL: | |
637 | { | |
638 | tree fn = gimple_call_fn (stmt); | |
639 | ||
640 | if ((d->summary_flags & DIAG_TM_OUTER) == 0 | |
641 | && is_tm_may_cancel_outer (fn)) | |
642 | error_at (gimple_location (stmt), | |
643 | "%<transaction_may_cancel_outer%> function call not within" | |
644 | " outer transaction or %<transaction_may_cancel_outer%>"); | |
645 | ||
646 | if (d->summary_flags & DIAG_TM_SAFE) | |
647 | { | |
648 | bool is_safe, direct_call_p; | |
649 | tree replacement; | |
650 | ||
651 | if (TREE_CODE (fn) == ADDR_EXPR | |
652 | && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL) | |
653 | { | |
654 | direct_call_p = true; | |
655 | replacement = TREE_OPERAND (fn, 0); | |
656 | replacement = find_tm_replacement_function (replacement); | |
657 | if (replacement) | |
658 | fn = replacement; | |
659 | } | |
660 | else | |
661 | { | |
662 | direct_call_p = false; | |
663 | replacement = NULL_TREE; | |
664 | } | |
665 | ||
666 | if (is_tm_safe_or_pure (fn)) | |
667 | is_safe = true; | |
668 | else if (is_tm_callable (fn) || is_tm_irrevocable (fn)) | |
669 | { | |
670 | /* A function explicitly marked transaction_callable as | |
671 | opposed to transaction_safe is being defined to be | |
672 | unsafe as part of its ABI, regardless of its contents. */ | |
673 | is_safe = false; | |
674 | } | |
675 | else if (direct_call_p) | |
676 | { | |
c579aed5 | 677 | if (IS_TYPE_OR_DECL_P (fn) |
678 | && flags_from_decl_or_type (fn) & ECF_TM_BUILTIN) | |
4c0315d0 | 679 | is_safe = true; |
680 | else if (replacement) | |
681 | { | |
682 | /* ??? At present we've been considering replacements | |
683 | merely transaction_callable, and therefore might | |
684 | enter irrevocable. The tm_wrap attribute has not | |
685 | yet made it into the new language spec. */ | |
686 | is_safe = false; | |
687 | } | |
688 | else | |
689 | { | |
690 | /* ??? Diagnostics for unmarked direct calls moved into | |
691 | the IPA pass. Section 3.2 of the spec details how | |
692 | functions not marked should be considered "implicitly | |
693 | safe" based on having examined the function body. */ | |
694 | is_safe = true; | |
695 | } | |
696 | } | |
697 | else | |
698 | { | |
699 | /* An unmarked indirect call. Consider it unsafe even | |
700 | though optimization may yet figure out how to inline. */ | |
701 | is_safe = false; | |
702 | } | |
703 | ||
704 | if (!is_safe) | |
705 | { | |
706 | if (TREE_CODE (fn) == ADDR_EXPR) | |
707 | fn = TREE_OPERAND (fn, 0); | |
708 | if (d->block_flags & DIAG_TM_SAFE) | |
2d3bf658 | 709 | { |
710 | if (direct_call_p) | |
711 | error_at (gimple_location (stmt), | |
712 | "unsafe function call %qD within " | |
713 | "atomic transaction", fn); | |
714 | else | |
300eddba | 715 | { |
9ae1b28a | 716 | if ((!DECL_P (fn) || DECL_NAME (fn)) |
717 | && TREE_CODE (fn) != SSA_NAME) | |
300eddba | 718 | error_at (gimple_location (stmt), |
719 | "unsafe function call %qE within " | |
720 | "atomic transaction", fn); | |
721 | else | |
722 | error_at (gimple_location (stmt), | |
723 | "unsafe indirect function call within " | |
724 | "atomic transaction"); | |
725 | } | |
2d3bf658 | 726 | } |
4c0315d0 | 727 | else |
2d3bf658 | 728 | { |
729 | if (direct_call_p) | |
730 | error_at (gimple_location (stmt), | |
731 | "unsafe function call %qD within " | |
732 | "%<transaction_safe%> function", fn); | |
733 | else | |
300eddba | 734 | { |
9ae1b28a | 735 | if ((!DECL_P (fn) || DECL_NAME (fn)) |
736 | && TREE_CODE (fn) != SSA_NAME) | |
300eddba | 737 | error_at (gimple_location (stmt), |
738 | "unsafe function call %qE within " | |
739 | "%<transaction_safe%> function", fn); | |
740 | else | |
741 | error_at (gimple_location (stmt), | |
742 | "unsafe indirect function call within " | |
743 | "%<transaction_safe%> function"); | |
744 | } | |
2d3bf658 | 745 | } |
4c0315d0 | 746 | } |
747 | } | |
748 | } | |
749 | break; | |
750 | ||
751 | case GIMPLE_ASM: | |
752 | /* ??? We ought to come up with a way to add attributes to | |
753 | asm statements, and then add "transaction_safe" to it. | |
754 | Either that or get the language spec to resurrect __tm_waiver. */ | |
755 | if (d->block_flags & DIAG_TM_SAFE) | |
756 | error_at (gimple_location (stmt), | |
85b9be9b | 757 | "%<asm%> not allowed in atomic transaction"); |
4c0315d0 | 758 | else if (d->func_flags & DIAG_TM_SAFE) |
759 | error_at (gimple_location (stmt), | |
85b9be9b | 760 | "%<asm%> not allowed in %<transaction_safe%> function"); |
4c0315d0 | 761 | break; |
762 | ||
763 | case GIMPLE_TRANSACTION: | |
764 | { | |
1a91d914 | 765 | gtransaction *trans_stmt = as_a <gtransaction *> (stmt); |
4c0315d0 | 766 | unsigned char inner_flags = DIAG_TM_SAFE; |
767 | ||
1a91d914 | 768 | if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_RELAXED) |
4c0315d0 | 769 | { |
770 | if (d->block_flags & DIAG_TM_SAFE) | |
771 | error_at (gimple_location (stmt), | |
772 | "relaxed transaction in atomic transaction"); | |
773 | else if (d->func_flags & DIAG_TM_SAFE) | |
774 | error_at (gimple_location (stmt), | |
775 | "relaxed transaction in %<transaction_safe%> function"); | |
4c0315d0 | 776 | inner_flags = DIAG_TM_RELAXED; |
777 | } | |
1a91d914 | 778 | else if (gimple_transaction_subcode (trans_stmt) & GTMA_IS_OUTER) |
4c0315d0 | 779 | { |
780 | if (d->block_flags) | |
781 | error_at (gimple_location (stmt), | |
782 | "outer transaction in transaction"); | |
783 | else if (d->func_flags & DIAG_TM_OUTER) | |
784 | error_at (gimple_location (stmt), | |
785 | "outer transaction in " | |
786 | "%<transaction_may_cancel_outer%> function"); | |
787 | else if (d->func_flags & DIAG_TM_SAFE) | |
788 | error_at (gimple_location (stmt), | |
789 | "outer transaction in %<transaction_safe%> function"); | |
4c0315d0 | 790 | inner_flags |= DIAG_TM_OUTER; |
791 | } | |
792 | ||
793 | *handled_ops_p = true; | |
1a91d914 | 794 | if (gimple_transaction_body (trans_stmt)) |
4c0315d0 | 795 | { |
796 | struct walk_stmt_info wi_inner; | |
797 | struct diagnose_tm d_inner; | |
798 | ||
799 | memset (&d_inner, 0, sizeof (d_inner)); | |
800 | d_inner.func_flags = d->func_flags; | |
801 | d_inner.block_flags = d->block_flags | inner_flags; | |
802 | d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags; | |
803 | ||
804 | memset (&wi_inner, 0, sizeof (wi_inner)); | |
805 | wi_inner.info = &d_inner; | |
806 | ||
1a91d914 | 807 | walk_gimple_seq (gimple_transaction_body (trans_stmt), |
4c0315d0 | 808 | diagnose_tm_1, diagnose_tm_1_op, &wi_inner); |
4c0315d0 | 809 | } |
810 | } | |
811 | break; | |
812 | ||
813 | default: | |
814 | break; | |
815 | } | |
816 | ||
817 | return NULL_TREE; | |
818 | } | |
819 | ||
820 | static unsigned int | |
821 | diagnose_tm_blocks (void) | |
822 | { | |
823 | struct walk_stmt_info wi; | |
824 | struct diagnose_tm d; | |
825 | ||
826 | memset (&d, 0, sizeof (d)); | |
827 | if (is_tm_may_cancel_outer (current_function_decl)) | |
828 | d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE; | |
829 | else if (is_tm_safe (current_function_decl)) | |
830 | d.func_flags = DIAG_TM_SAFE; | |
831 | d.summary_flags = d.func_flags; | |
832 | ||
833 | memset (&wi, 0, sizeof (wi)); | |
834 | wi.info = &d; | |
835 | ||
836 | walk_gimple_seq (gimple_body (current_function_decl), | |
837 | diagnose_tm_1, diagnose_tm_1_op, &wi); | |
838 | ||
4c0315d0 | 839 | return 0; |
840 | } | |
841 | ||
7620bc82 | 842 | namespace { |
843 | ||
844 | const pass_data pass_data_diagnose_tm_blocks = | |
cbe8bda8 | 845 | { |
846 | GIMPLE_PASS, /* type */ | |
847 | "*diagnose_tm_blocks", /* name */ | |
848 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 849 | TV_TRANS_MEM, /* tv_id */ |
850 | PROP_gimple_any, /* properties_required */ | |
851 | 0, /* properties_provided */ | |
852 | 0, /* properties_destroyed */ | |
853 | 0, /* todo_flags_start */ | |
854 | 0, /* todo_flags_finish */ | |
4c0315d0 | 855 | }; |
cbe8bda8 | 856 | |
7620bc82 | 857 | class pass_diagnose_tm_blocks : public gimple_opt_pass |
cbe8bda8 | 858 | { |
859 | public: | |
9af5ce0c | 860 | pass_diagnose_tm_blocks (gcc::context *ctxt) |
861 | : gimple_opt_pass (pass_data_diagnose_tm_blocks, ctxt) | |
cbe8bda8 | 862 | {} |
863 | ||
864 | /* opt_pass methods: */ | |
31315c24 | 865 | virtual bool gate (function *) { return flag_tm; } |
65b0537f | 866 | virtual unsigned int execute (function *) { return diagnose_tm_blocks (); } |
cbe8bda8 | 867 | |
868 | }; // class pass_diagnose_tm_blocks | |
869 | ||
7620bc82 | 870 | } // anon namespace |
871 | ||
cbe8bda8 | 872 | gimple_opt_pass * |
873 | make_pass_diagnose_tm_blocks (gcc::context *ctxt) | |
874 | { | |
875 | return new pass_diagnose_tm_blocks (ctxt); | |
876 | } | |
4c0315d0 | 877 | \f |
878 | /* Instead of instrumenting thread private memory, we save the | |
879 | addresses in a log which we later use to save/restore the addresses | |
880 | upon transaction start/restart. | |
881 | ||
882 | The log is keyed by address, where each element contains individual | |
883 | statements among different code paths that perform the store. | |
884 | ||
885 | This log is later used to generate either plain save/restore of the | |
886 | addresses upon transaction start/restart, or calls to the ITM_L* | |
887 | logging functions. | |
888 | ||
889 | So for something like: | |
890 | ||
891 | struct large { int x[1000]; }; | |
892 | struct large lala = { 0 }; | |
893 | __transaction { | |
894 | lala.x[i] = 123; | |
895 | ... | |
896 | } | |
897 | ||
898 | We can either save/restore: | |
899 | ||
900 | lala = { 0 }; | |
901 | trxn = _ITM_startTransaction (); | |
902 | if (trxn & a_saveLiveVariables) | |
903 | tmp_lala1 = lala.x[i]; | |
904 | else if (a & a_restoreLiveVariables) | |
905 | lala.x[i] = tmp_lala1; | |
906 | ||
907 | or use the logging functions: | |
908 | ||
909 | lala = { 0 }; | |
910 | trxn = _ITM_startTransaction (); | |
911 | _ITM_LU4 (&lala.x[i]); | |
912 | ||
913 | Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as | |
914 | far up the dominator tree to shadow all of the writes to a given | |
915 | location (thus reducing the total number of logging calls), but not | |
916 | so high as to be called on a path that does not perform a | |
917 | write. */ | |
918 | ||
919 | /* One individual log entry. We may have multiple statements for the | |
920 | same location if neither dominate each other (on different | |
921 | execution paths). */ | |
04009ada | 922 | struct tm_log_entry |
4c0315d0 | 923 | { |
924 | /* Address to save. */ | |
925 | tree addr; | |
926 | /* Entry block for the transaction this address occurs in. */ | |
927 | basic_block entry_block; | |
928 | /* Dominating statements the store occurs in. */ | |
42acab1c | 929 | vec<gimple *> stmts; |
4c0315d0 | 930 | /* Initially, while we are building the log, we place a nonzero |
931 | value here to mean that this address *will* be saved with a | |
932 | save/restore sequence. Later, when generating the save sequence | |
933 | we place the SSA temp generated here. */ | |
934 | tree save_var; | |
04009ada | 935 | }; |
4c0315d0 | 936 | |
4c0315d0 | 937 | |
d9dd21a8 | 938 | /* Log entry hashtable helpers. */ |
4c0315d0 | 939 | |
576d4555 | 940 | struct log_entry_hasher : pointer_hash <tm_log_entry> |
4c0315d0 | 941 | { |
9969c043 | 942 | static inline hashval_t hash (const tm_log_entry *); |
943 | static inline bool equal (const tm_log_entry *, const tm_log_entry *); | |
944 | static inline void remove (tm_log_entry *); | |
d9dd21a8 | 945 | }; |
4c0315d0 | 946 | |
947 | /* Htab support. Return hash value for a `tm_log_entry'. */ | |
d9dd21a8 | 948 | inline hashval_t |
9969c043 | 949 | log_entry_hasher::hash (const tm_log_entry *log) |
4c0315d0 | 950 | { |
4c0315d0 | 951 | return iterative_hash_expr (log->addr, 0); |
952 | } | |
953 | ||
954 | /* Htab support. Return true if two log entries are the same. */ | |
d9dd21a8 | 955 | inline bool |
9969c043 | 956 | log_entry_hasher::equal (const tm_log_entry *log1, const tm_log_entry *log2) |
4c0315d0 | 957 | { |
4c0315d0 | 958 | /* FIXME: |
959 | ||
960 | rth: I suggest that we get rid of the component refs etc. | |
961 | I.e. resolve the reference to base + offset. | |
962 | ||
963 | We may need to actually finish a merge with mainline for this, | |
964 | since we'd like to be presented with Richi's MEM_REF_EXPRs more | |
965 | often than not. But in the meantime your tm_log_entry could save | |
966 | the results of get_inner_reference. | |
967 | ||
968 | See: g++.dg/tm/pr46653.C | |
969 | */ | |
970 | ||
971 | /* Special case plain equality because operand_equal_p() below will | |
972 | return FALSE if the addresses are equal but they have | |
973 | side-effects (e.g. a volatile address). */ | |
974 | if (log1->addr == log2->addr) | |
975 | return true; | |
976 | ||
977 | return operand_equal_p (log1->addr, log2->addr, 0); | |
978 | } | |
979 | ||
980 | /* Htab support. Free one tm_log_entry. */ | |
d9dd21a8 | 981 | inline void |
9969c043 | 982 | log_entry_hasher::remove (tm_log_entry *lp) |
4c0315d0 | 983 | { |
f1f41a6c | 984 | lp->stmts.release (); |
4c0315d0 | 985 | free (lp); |
986 | } | |
987 | ||
d9dd21a8 | 988 | |
989 | /* The actual log. */ | |
c1f445d2 | 990 | static hash_table<log_entry_hasher> *tm_log; |
d9dd21a8 | 991 | |
992 | /* Addresses to log with a save/restore sequence. These should be in | |
993 | dominator order. */ | |
994 | static vec<tree> tm_log_save_addresses; | |
995 | ||
996 | enum thread_memory_type | |
997 | { | |
998 | mem_non_local = 0, | |
999 | mem_thread_local, | |
1000 | mem_transaction_local, | |
1001 | mem_max | |
1002 | }; | |
1003 | ||
04009ada | 1004 | struct tm_new_mem_map |
d9dd21a8 | 1005 | { |
1006 | /* SSA_NAME being dereferenced. */ | |
1007 | tree val; | |
1008 | enum thread_memory_type local_new_memory; | |
04009ada | 1009 | }; |
d9dd21a8 | 1010 | |
1011 | /* Hashtable helpers. */ | |
1012 | ||
04009ada | 1013 | struct tm_mem_map_hasher : free_ptr_hash <tm_new_mem_map> |
d9dd21a8 | 1014 | { |
04009ada | 1015 | static inline hashval_t hash (const tm_new_mem_map *); |
1016 | static inline bool equal (const tm_new_mem_map *, const tm_new_mem_map *); | |
d9dd21a8 | 1017 | }; |
1018 | ||
1019 | inline hashval_t | |
04009ada | 1020 | tm_mem_map_hasher::hash (const tm_new_mem_map *v) |
d9dd21a8 | 1021 | { |
1022 | return (intptr_t)v->val >> 4; | |
1023 | } | |
1024 | ||
1025 | inline bool | |
04009ada | 1026 | tm_mem_map_hasher::equal (const tm_new_mem_map *v, const tm_new_mem_map *c) |
d9dd21a8 | 1027 | { |
1028 | return v->val == c->val; | |
1029 | } | |
1030 | ||
1031 | /* Map for an SSA_NAME originally pointing to a non aliased new piece | |
1032 | of memory (malloc, alloc, etc). */ | |
c1f445d2 | 1033 | static hash_table<tm_mem_map_hasher> *tm_new_mem_hash; |
d9dd21a8 | 1034 | |
4c0315d0 | 1035 | /* Initialize logging data structures. */ |
1036 | static void | |
1037 | tm_log_init (void) | |
1038 | { | |
c1f445d2 | 1039 | tm_log = new hash_table<log_entry_hasher> (10); |
1040 | tm_new_mem_hash = new hash_table<tm_mem_map_hasher> (5); | |
f1f41a6c | 1041 | tm_log_save_addresses.create (5); |
4c0315d0 | 1042 | } |
1043 | ||
1044 | /* Free logging data structures. */ | |
1045 | static void | |
1046 | tm_log_delete (void) | |
1047 | { | |
c1f445d2 | 1048 | delete tm_log; |
1049 | tm_log = NULL; | |
1050 | delete tm_new_mem_hash; | |
1051 | tm_new_mem_hash = NULL; | |
f1f41a6c | 1052 | tm_log_save_addresses.release (); |
4c0315d0 | 1053 | } |
1054 | ||
1055 | /* Return true if MEM is a transaction invariant memory for the TM | |
1056 | region starting at REGION_ENTRY_BLOCK. */ | |
1057 | static bool | |
1058 | transaction_invariant_address_p (const_tree mem, basic_block region_entry_block) | |
1059 | { | |
1060 | if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF) | |
1061 | && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME) | |
1062 | { | |
1063 | basic_block def_bb; | |
1064 | ||
1065 | def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0))); | |
1066 | return def_bb != region_entry_block | |
1067 | && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb); | |
1068 | } | |
1069 | ||
1070 | mem = strip_invariant_refs (mem); | |
1071 | return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem)); | |
1072 | } | |
1073 | ||
1074 | /* Given an address ADDR in STMT, find it in the memory log or add it, | |
1075 | making sure to keep only the addresses highest in the dominator | |
1076 | tree. | |
1077 | ||
1078 | ENTRY_BLOCK is the entry_block for the transaction. | |
1079 | ||
1080 | If we find the address in the log, make sure it's either the same | |
1081 | address, or an equivalent one that dominates ADDR. | |
1082 | ||
1083 | If we find the address, but neither ADDR dominates the found | |
1084 | address, nor the found one dominates ADDR, we're on different | |
1085 | execution paths. Add it. | |
1086 | ||
1087 | If known, ENTRY_BLOCK is the entry block for the region, otherwise | |
1088 | NULL. */ | |
1089 | static void | |
42acab1c | 1090 | tm_log_add (basic_block entry_block, tree addr, gimple *stmt) |
4c0315d0 | 1091 | { |
d9dd21a8 | 1092 | tm_log_entry **slot; |
4c0315d0 | 1093 | struct tm_log_entry l, *lp; |
1094 | ||
1095 | l.addr = addr; | |
c1f445d2 | 1096 | slot = tm_log->find_slot (&l, INSERT); |
4c0315d0 | 1097 | if (!*slot) |
1098 | { | |
1099 | tree type = TREE_TYPE (addr); | |
1100 | ||
1101 | lp = XNEW (struct tm_log_entry); | |
1102 | lp->addr = addr; | |
1103 | *slot = lp; | |
1104 | ||
1105 | /* Small invariant addresses can be handled as save/restores. */ | |
1106 | if (entry_block | |
1107 | && transaction_invariant_address_p (lp->addr, entry_block) | |
1108 | && TYPE_SIZE_UNIT (type) != NULL | |
cd4547bf | 1109 | && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)) |
aa59f000 | 1110 | && ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE_UNIT (type)) |
4c0315d0 | 1111 | < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE)) |
1112 | /* We must be able to copy this type normally. I.e., no | |
1113 | special constructors and the like. */ | |
1114 | && !TREE_ADDRESSABLE (type)) | |
1115 | { | |
54a0a932 | 1116 | lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save"); |
f1f41a6c | 1117 | lp->stmts.create (0); |
4c0315d0 | 1118 | lp->entry_block = entry_block; |
1119 | /* Save addresses separately in dominator order so we don't | |
1120 | get confused by overlapping addresses in the save/restore | |
1121 | sequence. */ | |
f1f41a6c | 1122 | tm_log_save_addresses.safe_push (lp->addr); |
4c0315d0 | 1123 | } |
1124 | else | |
1125 | { | |
1126 | /* Use the logging functions. */ | |
f1f41a6c | 1127 | lp->stmts.create (5); |
1128 | lp->stmts.quick_push (stmt); | |
4c0315d0 | 1129 | lp->save_var = NULL; |
1130 | } | |
1131 | } | |
1132 | else | |
1133 | { | |
1134 | size_t i; | |
42acab1c | 1135 | gimple *oldstmt; |
4c0315d0 | 1136 | |
d9dd21a8 | 1137 | lp = *slot; |
4c0315d0 | 1138 | |
1139 | /* If we're generating a save/restore sequence, we don't care | |
1140 | about statements. */ | |
1141 | if (lp->save_var) | |
1142 | return; | |
1143 | ||
f1f41a6c | 1144 | for (i = 0; lp->stmts.iterate (i, &oldstmt); ++i) |
4c0315d0 | 1145 | { |
1146 | if (stmt == oldstmt) | |
1147 | return; | |
1148 | /* We already have a store to the same address, higher up the | |
1149 | dominator tree. Nothing to do. */ | |
1150 | if (dominated_by_p (CDI_DOMINATORS, | |
1151 | gimple_bb (stmt), gimple_bb (oldstmt))) | |
1152 | return; | |
1153 | /* We should be processing blocks in dominator tree order. */ | |
1154 | gcc_assert (!dominated_by_p (CDI_DOMINATORS, | |
1155 | gimple_bb (oldstmt), gimple_bb (stmt))); | |
1156 | } | |
1157 | /* Store is on a different code path. */ | |
f1f41a6c | 1158 | lp->stmts.safe_push (stmt); |
4c0315d0 | 1159 | } |
1160 | } | |
1161 | ||
1162 | /* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME | |
1163 | result, insert the new statements before GSI. */ | |
1164 | ||
1165 | static tree | |
1166 | gimplify_addr (gimple_stmt_iterator *gsi, tree x) | |
1167 | { | |
1168 | if (TREE_CODE (x) == TARGET_MEM_REF) | |
1169 | x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x); | |
1170 | else | |
1171 | x = build_fold_addr_expr (x); | |
1172 | return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT); | |
1173 | } | |
1174 | ||
1175 | /* Instrument one address with the logging functions. | |
1176 | ADDR is the address to save. | |
1177 | STMT is the statement before which to place it. */ | |
1178 | static void | |
42acab1c | 1179 | tm_log_emit_stmt (tree addr, gimple *stmt) |
4c0315d0 | 1180 | { |
1181 | tree type = TREE_TYPE (addr); | |
4c0315d0 | 1182 | gimple_stmt_iterator gsi = gsi_for_stmt (stmt); |
42acab1c | 1183 | gimple *log; |
4c0315d0 | 1184 | enum built_in_function code = BUILT_IN_TM_LOG; |
1185 | ||
1186 | if (type == float_type_node) | |
1187 | code = BUILT_IN_TM_LOG_FLOAT; | |
1188 | else if (type == double_type_node) | |
1189 | code = BUILT_IN_TM_LOG_DOUBLE; | |
1190 | else if (type == long_double_type_node) | |
1191 | code = BUILT_IN_TM_LOG_LDOUBLE; | |
e5fee0fe | 1192 | else if (TYPE_SIZE (type) != NULL |
1193 | && tree_fits_uhwi_p (TYPE_SIZE (type))) | |
4c0315d0 | 1194 | { |
e5fee0fe | 1195 | unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type)); |
1196 | ||
1197 | if (TREE_CODE (type) == VECTOR_TYPE) | |
4c0315d0 | 1198 | { |
e5fee0fe | 1199 | switch (type_size) |
4c0315d0 | 1200 | { |
e5fee0fe | 1201 | case 64: |
1202 | code = BUILT_IN_TM_LOG_M64; | |
1203 | break; | |
1204 | case 128: | |
1205 | code = BUILT_IN_TM_LOG_M128; | |
1206 | break; | |
1207 | case 256: | |
1208 | code = BUILT_IN_TM_LOG_M256; | |
1209 | break; | |
1210 | default: | |
1211 | goto unhandled_vec; | |
1212 | } | |
1213 | if (!builtin_decl_explicit_p (code)) | |
1214 | goto unhandled_vec; | |
1215 | } | |
1216 | else | |
1217 | { | |
1218 | unhandled_vec: | |
1219 | switch (type_size) | |
1220 | { | |
c847d969 | 1221 | case 8: |
e5fee0fe | 1222 | code = BUILT_IN_TM_LOG_1; |
1223 | break; | |
c847d969 | 1224 | case 16: |
e5fee0fe | 1225 | code = BUILT_IN_TM_LOG_2; |
1226 | break; | |
c847d969 | 1227 | case 32: |
e5fee0fe | 1228 | code = BUILT_IN_TM_LOG_4; |
1229 | break; | |
c847d969 | 1230 | case 64: |
e5fee0fe | 1231 | code = BUILT_IN_TM_LOG_8; |
1232 | break; | |
4c0315d0 | 1233 | } |
4c0315d0 | 1234 | } |
1235 | } | |
1236 | ||
e5fee0fe | 1237 | if (code != BUILT_IN_TM_LOG && !builtin_decl_explicit_p (code)) |
1238 | code = BUILT_IN_TM_LOG; | |
1239 | tree decl = builtin_decl_explicit (code); | |
1240 | ||
4c0315d0 | 1241 | addr = gimplify_addr (&gsi, addr); |
1242 | if (code == BUILT_IN_TM_LOG) | |
e5fee0fe | 1243 | log = gimple_build_call (decl, 2, addr, TYPE_SIZE_UNIT (type)); |
4c0315d0 | 1244 | else |
e5fee0fe | 1245 | log = gimple_build_call (decl, 1, addr); |
4c0315d0 | 1246 | gsi_insert_before (&gsi, log, GSI_SAME_STMT); |
1247 | } | |
1248 | ||
1249 | /* Go through the log and instrument address that must be instrumented | |
1250 | with the logging functions. Leave the save/restore addresses for | |
1251 | later. */ | |
1252 | static void | |
1253 | tm_log_emit (void) | |
1254 | { | |
c1f445d2 | 1255 | hash_table<log_entry_hasher>::iterator hi; |
4c0315d0 | 1256 | struct tm_log_entry *lp; |
1257 | ||
c1f445d2 | 1258 | FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi) |
4c0315d0 | 1259 | { |
1260 | size_t i; | |
42acab1c | 1261 | gimple *stmt; |
4c0315d0 | 1262 | |
1263 | if (dump_file) | |
1264 | { | |
1265 | fprintf (dump_file, "TM thread private mem logging: "); | |
1ffa4346 | 1266 | print_generic_expr (dump_file, lp->addr); |
4c0315d0 | 1267 | fprintf (dump_file, "\n"); |
1268 | } | |
1269 | ||
1270 | if (lp->save_var) | |
1271 | { | |
1272 | if (dump_file) | |
1273 | fprintf (dump_file, "DUMPING to variable\n"); | |
1274 | continue; | |
1275 | } | |
1276 | else | |
1277 | { | |
1278 | if (dump_file) | |
1279 | fprintf (dump_file, "DUMPING with logging functions\n"); | |
f1f41a6c | 1280 | for (i = 0; lp->stmts.iterate (i, &stmt); ++i) |
4c0315d0 | 1281 | tm_log_emit_stmt (lp->addr, stmt); |
1282 | } | |
1283 | } | |
1284 | } | |
1285 | ||
1286 | /* Emit the save sequence for the corresponding addresses in the log. | |
1287 | ENTRY_BLOCK is the entry block for the transaction. | |
1288 | BB is the basic block to insert the code in. */ | |
1289 | static void | |
1290 | tm_log_emit_saves (basic_block entry_block, basic_block bb) | |
1291 | { | |
1292 | size_t i; | |
1293 | gimple_stmt_iterator gsi = gsi_last_bb (bb); | |
42acab1c | 1294 | gimple *stmt; |
4c0315d0 | 1295 | struct tm_log_entry l, *lp; |
1296 | ||
f1f41a6c | 1297 | for (i = 0; i < tm_log_save_addresses.length (); ++i) |
4c0315d0 | 1298 | { |
f1f41a6c | 1299 | l.addr = tm_log_save_addresses[i]; |
c1f445d2 | 1300 | lp = *(tm_log->find_slot (&l, NO_INSERT)); |
4c0315d0 | 1301 | gcc_assert (lp->save_var != NULL); |
1302 | ||
1303 | /* We only care about variables in the current transaction. */ | |
1304 | if (lp->entry_block != entry_block) | |
1305 | continue; | |
1306 | ||
1307 | stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr)); | |
1308 | ||
1309 | /* Make sure we can create an SSA_NAME for this type. For | |
1310 | instance, aggregates aren't allowed, in which case the system | |
1311 | will create a VOP for us and everything will just work. */ | |
1312 | if (is_gimple_reg_type (TREE_TYPE (lp->save_var))) | |
1313 | { | |
1314 | lp->save_var = make_ssa_name (lp->save_var, stmt); | |
1315 | gimple_assign_set_lhs (stmt, lp->save_var); | |
1316 | } | |
1317 | ||
1318 | gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); | |
1319 | } | |
1320 | } | |
1321 | ||
1322 | /* Emit the restore sequence for the corresponding addresses in the log. | |
1323 | ENTRY_BLOCK is the entry block for the transaction. | |
1324 | BB is the basic block to insert the code in. */ | |
1325 | static void | |
1326 | tm_log_emit_restores (basic_block entry_block, basic_block bb) | |
1327 | { | |
1328 | int i; | |
1329 | struct tm_log_entry l, *lp; | |
1330 | gimple_stmt_iterator gsi; | |
42acab1c | 1331 | gimple *stmt; |
4c0315d0 | 1332 | |
f1f41a6c | 1333 | for (i = tm_log_save_addresses.length () - 1; i >= 0; i--) |
4c0315d0 | 1334 | { |
f1f41a6c | 1335 | l.addr = tm_log_save_addresses[i]; |
c1f445d2 | 1336 | lp = *(tm_log->find_slot (&l, NO_INSERT)); |
4c0315d0 | 1337 | gcc_assert (lp->save_var != NULL); |
1338 | ||
1339 | /* We only care about variables in the current transaction. */ | |
1340 | if (lp->entry_block != entry_block) | |
1341 | continue; | |
1342 | ||
1343 | /* Restores are in LIFO order from the saves in case we have | |
1344 | overlaps. */ | |
1345 | gsi = gsi_start_bb (bb); | |
1346 | ||
1347 | stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var); | |
1348 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
1349 | } | |
1350 | } | |
1351 | ||
4c0315d0 | 1352 | \f |
1353 | static tree lower_sequence_tm (gimple_stmt_iterator *, bool *, | |
1354 | struct walk_stmt_info *); | |
1355 | static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *, | |
1356 | struct walk_stmt_info *); | |
1357 | ||
1358 | /* Evaluate an address X being dereferenced and determine if it | |
1359 | originally points to a non aliased new chunk of memory (malloc, | |
1360 | alloca, etc). | |
1361 | ||
1362 | Return MEM_THREAD_LOCAL if it points to a thread-local address. | |
1363 | Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address. | |
1364 | Return MEM_NON_LOCAL otherwise. | |
1365 | ||
1366 | ENTRY_BLOCK is the entry block to the transaction containing the | |
1367 | dereference of X. */ | |
1368 | static enum thread_memory_type | |
1369 | thread_private_new_memory (basic_block entry_block, tree x) | |
1370 | { | |
42acab1c | 1371 | gimple *stmt = NULL; |
4c0315d0 | 1372 | enum tree_code code; |
04009ada | 1373 | tm_new_mem_map **slot; |
1374 | tm_new_mem_map elt, *elt_p; | |
4c0315d0 | 1375 | tree val = x; |
1376 | enum thread_memory_type retval = mem_transaction_local; | |
1377 | ||
1378 | if (!entry_block | |
1379 | || TREE_CODE (x) != SSA_NAME | |
1380 | /* Possible uninitialized use, or a function argument. In | |
1381 | either case, we don't care. */ | |
1382 | || SSA_NAME_IS_DEFAULT_DEF (x)) | |
1383 | return mem_non_local; | |
1384 | ||
1385 | /* Look in cache first. */ | |
1386 | elt.val = x; | |
c1f445d2 | 1387 | slot = tm_new_mem_hash->find_slot (&elt, INSERT); |
d9dd21a8 | 1388 | elt_p = *slot; |
4c0315d0 | 1389 | if (elt_p) |
1390 | return elt_p->local_new_memory; | |
1391 | ||
1392 | /* Optimistically assume the memory is transaction local during | |
1393 | processing. This catches recursion into this variable. */ | |
04009ada | 1394 | *slot = elt_p = XNEW (tm_new_mem_map); |
4c0315d0 | 1395 | elt_p->val = val; |
1396 | elt_p->local_new_memory = mem_transaction_local; | |
1397 | ||
1398 | /* Search DEF chain to find the original definition of this address. */ | |
1399 | do | |
1400 | { | |
1401 | if (ptr_deref_may_alias_global_p (x)) | |
1402 | { | |
1403 | /* Address escapes. This is not thread-private. */ | |
1404 | retval = mem_non_local; | |
1405 | goto new_memory_ret; | |
1406 | } | |
1407 | ||
1408 | stmt = SSA_NAME_DEF_STMT (x); | |
1409 | ||
1410 | /* If the malloc call is outside the transaction, this is | |
1411 | thread-local. */ | |
1412 | if (retval != mem_thread_local | |
1413 | && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block)) | |
1414 | retval = mem_thread_local; | |
1415 | ||
1416 | if (is_gimple_assign (stmt)) | |
1417 | { | |
1418 | code = gimple_assign_rhs_code (stmt); | |
1419 | /* x = foo ==> foo */ | |
1420 | if (code == SSA_NAME) | |
1421 | x = gimple_assign_rhs1 (stmt); | |
1422 | /* x = foo + n ==> foo */ | |
1423 | else if (code == POINTER_PLUS_EXPR) | |
1424 | x = gimple_assign_rhs1 (stmt); | |
1425 | /* x = (cast*) foo ==> foo */ | |
d09ef31a | 1426 | else if (code == VIEW_CONVERT_EXPR || CONVERT_EXPR_CODE_P (code)) |
4c0315d0 | 1427 | x = gimple_assign_rhs1 (stmt); |
43ee99ea | 1428 | /* x = c ? op1 : op2 == > op1 or op2 just like a PHI */ |
1429 | else if (code == COND_EXPR) | |
1430 | { | |
1431 | tree op1 = gimple_assign_rhs2 (stmt); | |
1432 | tree op2 = gimple_assign_rhs3 (stmt); | |
1433 | enum thread_memory_type mem; | |
1434 | retval = thread_private_new_memory (entry_block, op1); | |
1435 | if (retval == mem_non_local) | |
1436 | goto new_memory_ret; | |
1437 | mem = thread_private_new_memory (entry_block, op2); | |
1438 | retval = MIN (retval, mem); | |
1439 | goto new_memory_ret; | |
1440 | } | |
4c0315d0 | 1441 | else |
1442 | { | |
1443 | retval = mem_non_local; | |
1444 | goto new_memory_ret; | |
1445 | } | |
1446 | } | |
1447 | else | |
1448 | { | |
1449 | if (gimple_code (stmt) == GIMPLE_PHI) | |
1450 | { | |
1451 | unsigned int i; | |
1452 | enum thread_memory_type mem; | |
1453 | tree phi_result = gimple_phi_result (stmt); | |
1454 | ||
1455 | /* If any of the ancestors are non-local, we are sure to | |
1456 | be non-local. Otherwise we can avoid doing anything | |
1457 | and inherit what has already been generated. */ | |
1458 | retval = mem_max; | |
1459 | for (i = 0; i < gimple_phi_num_args (stmt); ++i) | |
1460 | { | |
1461 | tree op = PHI_ARG_DEF (stmt, i); | |
1462 | ||
1463 | /* Exclude self-assignment. */ | |
1464 | if (phi_result == op) | |
1465 | continue; | |
1466 | ||
1467 | mem = thread_private_new_memory (entry_block, op); | |
1468 | if (mem == mem_non_local) | |
1469 | { | |
1470 | retval = mem; | |
1471 | goto new_memory_ret; | |
1472 | } | |
1473 | retval = MIN (retval, mem); | |
1474 | } | |
1475 | goto new_memory_ret; | |
1476 | } | |
1477 | break; | |
1478 | } | |
1479 | } | |
1480 | while (TREE_CODE (x) == SSA_NAME); | |
1481 | ||
1482 | if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC) | |
1483 | /* Thread-local or transaction-local. */ | |
1484 | ; | |
1485 | else | |
1486 | retval = mem_non_local; | |
1487 | ||
1488 | new_memory_ret: | |
1489 | elt_p->local_new_memory = retval; | |
1490 | return retval; | |
1491 | } | |
1492 | ||
1493 | /* Determine whether X has to be instrumented using a read | |
1494 | or write barrier. | |
1495 | ||
1496 | ENTRY_BLOCK is the entry block for the region where stmt resides | |
1497 | in. NULL if unknown. | |
1498 | ||
1499 | STMT is the statement in which X occurs in. It is used for thread | |
1500 | private memory instrumentation. If no TPM instrumentation is | |
1501 | desired, STMT should be null. */ | |
1502 | static bool | |
42acab1c | 1503 | requires_barrier (basic_block entry_block, tree x, gimple *stmt) |
4c0315d0 | 1504 | { |
1505 | tree orig = x; | |
1506 | while (handled_component_p (x)) | |
1507 | x = TREE_OPERAND (x, 0); | |
1508 | ||
1509 | switch (TREE_CODE (x)) | |
1510 | { | |
1511 | case INDIRECT_REF: | |
1512 | case MEM_REF: | |
1513 | { | |
1514 | enum thread_memory_type ret; | |
1515 | ||
1516 | ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0)); | |
1517 | if (ret == mem_non_local) | |
1518 | return true; | |
1519 | if (stmt && ret == mem_thread_local) | |
1520 | /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */ | |
1521 | tm_log_add (entry_block, orig, stmt); | |
1522 | ||
1523 | /* Transaction-locals require nothing at all. For malloc, a | |
1524 | transaction restart frees the memory and we reallocate. | |
1525 | For alloca, the stack pointer gets reset by the retry and | |
1526 | we reallocate. */ | |
1527 | return false; | |
1528 | } | |
1529 | ||
1530 | case TARGET_MEM_REF: | |
1531 | if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR) | |
1532 | return true; | |
1533 | x = TREE_OPERAND (TMR_BASE (x), 0); | |
1534 | if (TREE_CODE (x) == PARM_DECL) | |
1535 | return false; | |
53e9c5c4 | 1536 | gcc_assert (VAR_P (x)); |
4c0315d0 | 1537 | /* FALLTHRU */ |
1538 | ||
1539 | case PARM_DECL: | |
1540 | case RESULT_DECL: | |
1541 | case VAR_DECL: | |
1542 | if (DECL_BY_REFERENCE (x)) | |
1543 | { | |
1544 | /* ??? This value is a pointer, but aggregate_value_p has been | |
1545 | jigged to return true which confuses needs_to_live_in_memory. | |
1546 | This ought to be cleaned up generically. | |
1547 | ||
1548 | FIXME: Verify this still happens after the next mainline | |
1549 | merge. Testcase ie g++.dg/tm/pr47554.C. | |
1550 | */ | |
1551 | return false; | |
1552 | } | |
1553 | ||
1554 | if (is_global_var (x)) | |
e0f3ea3e | 1555 | return !TREE_READONLY (x); |
4c0315d0 | 1556 | if (/* FIXME: This condition should actually go below in the |
1557 | tm_log_add() call, however is_call_clobbered() depends on | |
1558 | aliasing info which is not available during | |
1559 | gimplification. Since requires_barrier() gets called | |
1560 | during lower_sequence_tm/gimplification, leave the call | |
1561 | to needs_to_live_in_memory until we eliminate | |
1562 | lower_sequence_tm altogether. */ | |
3c8b8a41 | 1563 | needs_to_live_in_memory (x)) |
4c0315d0 | 1564 | return true; |
e0f3ea3e | 1565 | else |
1566 | { | |
1567 | /* For local memory that doesn't escape (aka thread private | |
1568 | memory), we can either save the value at the beginning of | |
1569 | the transaction and restore on restart, or call a tm | |
1570 | function to dynamically save and restore on restart | |
1571 | (ITM_L*). */ | |
1572 | if (stmt) | |
1573 | tm_log_add (entry_block, orig, stmt); | |
1574 | return false; | |
1575 | } | |
4c0315d0 | 1576 | |
1577 | default: | |
1578 | return false; | |
1579 | } | |
1580 | } | |
1581 | ||
1582 | /* Mark the GIMPLE_ASSIGN statement as appropriate for being inside | |
1583 | a transaction region. */ | |
1584 | ||
1585 | static void | |
1586 | examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi) | |
1587 | { | |
42acab1c | 1588 | gimple *stmt = gsi_stmt (*gsi); |
4c0315d0 | 1589 | |
1590 | if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL)) | |
1591 | *state |= GTMA_HAVE_LOAD; | |
1592 | if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL)) | |
1593 | *state |= GTMA_HAVE_STORE; | |
1594 | } | |
1595 | ||
1596 | /* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */ | |
1597 | ||
1598 | static void | |
1599 | examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi) | |
1600 | { | |
42acab1c | 1601 | gimple *stmt = gsi_stmt (*gsi); |
4c0315d0 | 1602 | tree fn; |
1603 | ||
1604 | if (is_tm_pure_call (stmt)) | |
1605 | return; | |
1606 | ||
1607 | /* Check if this call is a transaction abort. */ | |
1608 | fn = gimple_call_fndecl (stmt); | |
1609 | if (is_tm_abort (fn)) | |
1610 | *state |= GTMA_HAVE_ABORT; | |
1611 | ||
1612 | /* Note that something may happen. */ | |
1613 | *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE; | |
1614 | } | |
1615 | ||
a08574d7 | 1616 | /* Iterate through the statements in the sequence, moving labels |
1617 | (and thus edges) of transactions from "label_norm" to "label_uninst". */ | |
1618 | ||
1619 | static tree | |
1620 | make_tm_uninst (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
1621 | struct walk_stmt_info *) | |
1622 | { | |
1623 | gimple *stmt = gsi_stmt (*gsi); | |
1624 | ||
1625 | if (gtransaction *txn = dyn_cast <gtransaction *> (stmt)) | |
1626 | { | |
1627 | *handled_ops_p = true; | |
1628 | txn->label_uninst = txn->label_norm; | |
1629 | txn->label_norm = NULL; | |
1630 | } | |
1631 | else | |
1632 | *handled_ops_p = !gimple_has_substatements (stmt); | |
1633 | ||
1634 | return NULL_TREE; | |
1635 | } | |
1636 | ||
4c0315d0 | 1637 | /* Lower a GIMPLE_TRANSACTION statement. */ |
1638 | ||
1639 | static void | |
1640 | lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi) | |
1641 | { | |
42acab1c | 1642 | gimple *g; |
1a91d914 | 1643 | gtransaction *stmt = as_a <gtransaction *> (gsi_stmt (*gsi)); |
4c0315d0 | 1644 | unsigned int *outer_state = (unsigned int *) wi->info; |
1645 | unsigned int this_state = 0; | |
1646 | struct walk_stmt_info this_wi; | |
1647 | ||
1648 | /* First, lower the body. The scanning that we do inside gives | |
1649 | us some idea of what we're dealing with. */ | |
1650 | memset (&this_wi, 0, sizeof (this_wi)); | |
1651 | this_wi.info = (void *) &this_state; | |
e3a19533 | 1652 | walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt), |
1653 | lower_sequence_tm, NULL, &this_wi); | |
4c0315d0 | 1654 | |
1655 | /* If there was absolutely nothing transaction related inside the | |
1656 | transaction, we may elide it. Likewise if this is a nested | |
1657 | transaction and does not contain an abort. */ | |
1658 | if (this_state == 0 | |
1659 | || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL)) | |
1660 | { | |
1661 | if (outer_state) | |
1662 | *outer_state |= this_state; | |
1663 | ||
1664 | gsi_insert_seq_before (gsi, gimple_transaction_body (stmt), | |
1665 | GSI_SAME_STMT); | |
1666 | gimple_transaction_set_body (stmt, NULL); | |
1667 | ||
1668 | gsi_remove (gsi, true); | |
1669 | wi->removed_stmt = true; | |
1670 | return; | |
1671 | } | |
1672 | ||
1673 | /* Wrap the body of the transaction in a try-finally node so that | |
1674 | the commit call is always properly called. */ | |
1675 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0); | |
1676 | if (flag_exceptions) | |
1677 | { | |
1678 | tree ptr; | |
1679 | gimple_seq n_seq, e_seq; | |
1680 | ||
1681 | n_seq = gimple_seq_alloc_with_stmt (g); | |
e3a19533 | 1682 | e_seq = NULL; |
4c0315d0 | 1683 | |
1684 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER), | |
1685 | 1, integer_zero_node); | |
f9e245b2 | 1686 | ptr = create_tmp_var (ptr_type_node); |
4c0315d0 | 1687 | gimple_call_set_lhs (g, ptr); |
1688 | gimple_seq_add_stmt (&e_seq, g); | |
1689 | ||
1690 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH), | |
1691 | 1, ptr); | |
1692 | gimple_seq_add_stmt (&e_seq, g); | |
1693 | ||
1694 | g = gimple_build_eh_else (n_seq, e_seq); | |
1695 | } | |
1696 | ||
1697 | g = gimple_build_try (gimple_transaction_body (stmt), | |
1698 | gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY); | |
4c0315d0 | 1699 | |
a08574d7 | 1700 | /* For a (potentially) outer transaction, create two paths. */ |
1701 | gimple_seq uninst = NULL; | |
1702 | if (outer_state == NULL) | |
1703 | { | |
1704 | uninst = copy_gimple_seq_and_replace_locals (g); | |
1705 | /* In the uninstrumented copy, reset inner transactions to have only | |
1706 | an uninstrumented code path. */ | |
1707 | memset (&this_wi, 0, sizeof (this_wi)); | |
1708 | walk_gimple_seq (uninst, make_tm_uninst, NULL, &this_wi); | |
1709 | } | |
1710 | ||
1711 | tree label1 = create_artificial_label (UNKNOWN_LOCATION); | |
1712 | gsi_insert_after (gsi, gimple_build_label (label1), GSI_CONTINUE_LINKING); | |
1713 | gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING); | |
1714 | gimple_transaction_set_label_norm (stmt, label1); | |
4c0315d0 | 1715 | |
1716 | /* If the transaction calls abort or if this is an outer transaction, | |
1717 | add an "over" label afterwards. */ | |
a08574d7 | 1718 | tree label3 = NULL; |
1719 | if ((this_state & GTMA_HAVE_ABORT) | |
1720 | || outer_state == NULL | |
9af5ce0c | 1721 | || (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)) |
4c0315d0 | 1722 | { |
a08574d7 | 1723 | label3 = create_artificial_label (UNKNOWN_LOCATION); |
1724 | gimple_transaction_set_label_over (stmt, label3); | |
4c0315d0 | 1725 | } |
1726 | ||
a08574d7 | 1727 | if (uninst != NULL) |
1728 | { | |
1729 | gsi_insert_after (gsi, gimple_build_goto (label3), GSI_CONTINUE_LINKING); | |
1730 | ||
1731 | tree label2 = create_artificial_label (UNKNOWN_LOCATION); | |
1732 | gsi_insert_after (gsi, gimple_build_label (label2), GSI_CONTINUE_LINKING); | |
1733 | gsi_insert_seq_after (gsi, uninst, GSI_CONTINUE_LINKING); | |
1734 | gimple_transaction_set_label_uninst (stmt, label2); | |
1735 | } | |
1736 | ||
1737 | if (label3 != NULL) | |
1738 | gsi_insert_after (gsi, gimple_build_label (label3), GSI_CONTINUE_LINKING); | |
1739 | ||
1740 | gimple_transaction_set_body (stmt, NULL); | |
1741 | ||
4c0315d0 | 1742 | /* Record the set of operations found for use later. */ |
1743 | this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK; | |
1744 | gimple_transaction_set_subcode (stmt, this_state); | |
1745 | } | |
1746 | ||
1747 | /* Iterate through the statements in the sequence, lowering them all | |
1748 | as appropriate for being in a transaction. */ | |
1749 | ||
1750 | static tree | |
1751 | lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
1752 | struct walk_stmt_info *wi) | |
1753 | { | |
1754 | unsigned int *state = (unsigned int *) wi->info; | |
42acab1c | 1755 | gimple *stmt = gsi_stmt (*gsi); |
4c0315d0 | 1756 | |
1757 | *handled_ops_p = true; | |
1758 | switch (gimple_code (stmt)) | |
1759 | { | |
1760 | case GIMPLE_ASSIGN: | |
1761 | /* Only memory reads/writes need to be instrumented. */ | |
1762 | if (gimple_assign_single_p (stmt)) | |
1763 | examine_assign_tm (state, gsi); | |
1764 | break; | |
1765 | ||
1766 | case GIMPLE_CALL: | |
1767 | examine_call_tm (state, gsi); | |
1768 | break; | |
1769 | ||
1770 | case GIMPLE_ASM: | |
1771 | *state |= GTMA_MAY_ENTER_IRREVOCABLE; | |
1772 | break; | |
1773 | ||
1774 | case GIMPLE_TRANSACTION: | |
1775 | lower_transaction (gsi, wi); | |
1776 | break; | |
1777 | ||
1778 | default: | |
1779 | *handled_ops_p = !gimple_has_substatements (stmt); | |
1780 | break; | |
1781 | } | |
1782 | ||
1783 | return NULL_TREE; | |
1784 | } | |
1785 | ||
1786 | /* Iterate through the statements in the sequence, lowering them all | |
1787 | as appropriate for being outside of a transaction. */ | |
1788 | ||
1789 | static tree | |
1790 | lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p, | |
1791 | struct walk_stmt_info * wi) | |
1792 | { | |
42acab1c | 1793 | gimple *stmt = gsi_stmt (*gsi); |
4c0315d0 | 1794 | |
1795 | if (gimple_code (stmt) == GIMPLE_TRANSACTION) | |
1796 | { | |
1797 | *handled_ops_p = true; | |
1798 | lower_transaction (gsi, wi); | |
1799 | } | |
1800 | else | |
1801 | *handled_ops_p = !gimple_has_substatements (stmt); | |
1802 | ||
1803 | return NULL_TREE; | |
1804 | } | |
1805 | ||
1806 | /* Main entry point for flattening GIMPLE_TRANSACTION constructs. After | |
1807 | this, GIMPLE_TRANSACTION nodes still exist, but the nested body has | |
1808 | been moved out, and all the data required for constructing a proper | |
1809 | CFG has been recorded. */ | |
1810 | ||
1811 | static unsigned int | |
1812 | execute_lower_tm (void) | |
1813 | { | |
1814 | struct walk_stmt_info wi; | |
e3a19533 | 1815 | gimple_seq body; |
4c0315d0 | 1816 | |
1817 | /* Transactional clones aren't created until a later pass. */ | |
1818 | gcc_assert (!decl_is_tm_clone (current_function_decl)); | |
1819 | ||
e3a19533 | 1820 | body = gimple_body (current_function_decl); |
4c0315d0 | 1821 | memset (&wi, 0, sizeof (wi)); |
e3a19533 | 1822 | walk_gimple_seq_mod (&body, lower_sequence_no_tm, NULL, &wi); |
1823 | gimple_set_body (current_function_decl, body); | |
4c0315d0 | 1824 | |
1825 | return 0; | |
1826 | } | |
1827 | ||
7620bc82 | 1828 | namespace { |
1829 | ||
1830 | const pass_data pass_data_lower_tm = | |
cbe8bda8 | 1831 | { |
1832 | GIMPLE_PASS, /* type */ | |
1833 | "tmlower", /* name */ | |
1834 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 1835 | TV_TRANS_MEM, /* tv_id */ |
1836 | PROP_gimple_lcf, /* properties_required */ | |
1837 | 0, /* properties_provided */ | |
1838 | 0, /* properties_destroyed */ | |
1839 | 0, /* todo_flags_start */ | |
1840 | 0, /* todo_flags_finish */ | |
4c0315d0 | 1841 | }; |
cbe8bda8 | 1842 | |
7620bc82 | 1843 | class pass_lower_tm : public gimple_opt_pass |
cbe8bda8 | 1844 | { |
1845 | public: | |
9af5ce0c | 1846 | pass_lower_tm (gcc::context *ctxt) |
1847 | : gimple_opt_pass (pass_data_lower_tm, ctxt) | |
cbe8bda8 | 1848 | {} |
1849 | ||
1850 | /* opt_pass methods: */ | |
31315c24 | 1851 | virtual bool gate (function *) { return flag_tm; } |
65b0537f | 1852 | virtual unsigned int execute (function *) { return execute_lower_tm (); } |
cbe8bda8 | 1853 | |
1854 | }; // class pass_lower_tm | |
1855 | ||
7620bc82 | 1856 | } // anon namespace |
1857 | ||
cbe8bda8 | 1858 | gimple_opt_pass * |
1859 | make_pass_lower_tm (gcc::context *ctxt) | |
1860 | { | |
1861 | return new pass_lower_tm (ctxt); | |
1862 | } | |
4c0315d0 | 1863 | \f |
1864 | /* Collect region information for each transaction. */ | |
1865 | ||
1866 | struct tm_region | |
1867 | { | |
1a91d914 | 1868 | public: |
1869 | ||
1870 | /* The field "transaction_stmt" is initially a gtransaction *, | |
1871 | but eventually gets lowered to a gcall *(to BUILT_IN_TM_START). | |
1872 | ||
1873 | Helper method to get it as a gtransaction *, with code-checking | |
1874 | in a checked-build. */ | |
1875 | ||
1876 | gtransaction * | |
1877 | get_transaction_stmt () const | |
1878 | { | |
1879 | return as_a <gtransaction *> (transaction_stmt); | |
1880 | } | |
1881 | ||
1882 | public: | |
1883 | ||
4c0315d0 | 1884 | /* Link to the next unnested transaction. */ |
1885 | struct tm_region *next; | |
1886 | ||
1887 | /* Link to the next inner transaction. */ | |
1888 | struct tm_region *inner; | |
1889 | ||
1890 | /* Link to the next outer transaction. */ | |
1891 | struct tm_region *outer; | |
1892 | ||
0cd02a19 | 1893 | /* The GIMPLE_TRANSACTION statement beginning this transaction. |
1894 | After TM_MARK, this gets replaced by a call to | |
1a91d914 | 1895 | BUILT_IN_TM_START. |
1896 | Hence this will be either a gtransaction *or a gcall *. */ | |
42acab1c | 1897 | gimple *transaction_stmt; |
4c0315d0 | 1898 | |
0cd02a19 | 1899 | /* After TM_MARK expands the GIMPLE_TRANSACTION into a call to |
1900 | BUILT_IN_TM_START, this field is true if the transaction is an | |
1901 | outer transaction. */ | |
1902 | bool original_transaction_was_outer; | |
1903 | ||
1904 | /* Return value from BUILT_IN_TM_START. */ | |
1905 | tree tm_state; | |
1906 | ||
1907 | /* The entry block to this region. This will always be the first | |
1908 | block of the body of the transaction. */ | |
4c0315d0 | 1909 | basic_block entry_block; |
1910 | ||
0cd02a19 | 1911 | /* The first block after an expanded call to _ITM_beginTransaction. */ |
1912 | basic_block restart_block; | |
1913 | ||
4c0315d0 | 1914 | /* The set of all blocks that end the region; NULL if only EXIT_BLOCK. |
1915 | These blocks are still a part of the region (i.e., the border is | |
1916 | inclusive). Note that this set is only complete for paths in the CFG | |
1917 | starting at ENTRY_BLOCK, and that there is no exit block recorded for | |
1918 | the edge to the "over" label. */ | |
1919 | bitmap exit_blocks; | |
1920 | ||
1921 | /* The set of all blocks that have an TM_IRREVOCABLE call. */ | |
1922 | bitmap irr_blocks; | |
1923 | }; | |
1924 | ||
1925 | /* True if there are pending edge statements to be committed for the | |
1926 | current function being scanned in the tmmark pass. */ | |
1927 | bool pending_edge_inserts_p; | |
1928 | ||
1929 | static struct tm_region *all_tm_regions; | |
1930 | static bitmap_obstack tm_obstack; | |
1931 | ||
1932 | ||
9d75589a | 1933 | /* A subroutine of tm_region_init. Record the existence of the |
4c0315d0 | 1934 | GIMPLE_TRANSACTION statement in a tree of tm_region elements. */ |
1935 | ||
1936 | static struct tm_region * | |
1a91d914 | 1937 | tm_region_init_0 (struct tm_region *outer, basic_block bb, |
1938 | gtransaction *stmt) | |
4c0315d0 | 1939 | { |
1940 | struct tm_region *region; | |
1941 | ||
1942 | region = (struct tm_region *) | |
1943 | obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region)); | |
1944 | ||
1945 | if (outer) | |
1946 | { | |
1947 | region->next = outer->inner; | |
1948 | outer->inner = region; | |
1949 | } | |
1950 | else | |
1951 | { | |
1952 | region->next = all_tm_regions; | |
1953 | all_tm_regions = region; | |
1954 | } | |
1955 | region->inner = NULL; | |
1956 | region->outer = outer; | |
1957 | ||
1958 | region->transaction_stmt = stmt; | |
0cd02a19 | 1959 | region->original_transaction_was_outer = false; |
1960 | region->tm_state = NULL; | |
4c0315d0 | 1961 | |
1962 | /* There are either one or two edges out of the block containing | |
1963 | the GIMPLE_TRANSACTION, one to the actual region and one to the | |
1964 | "over" label if the region contains an abort. The former will | |
1965 | always be the one marked FALLTHRU. */ | |
1966 | region->entry_block = FALLTHRU_EDGE (bb)->dest; | |
1967 | ||
1968 | region->exit_blocks = BITMAP_ALLOC (&tm_obstack); | |
1969 | region->irr_blocks = BITMAP_ALLOC (&tm_obstack); | |
1970 | ||
1971 | return region; | |
1972 | } | |
1973 | ||
1974 | /* A subroutine of tm_region_init. Record all the exit and | |
1975 | irrevocable blocks in BB into the region's exit_blocks and | |
1976 | irr_blocks bitmaps. Returns the new region being scanned. */ | |
1977 | ||
1978 | static struct tm_region * | |
1979 | tm_region_init_1 (struct tm_region *region, basic_block bb) | |
1980 | { | |
1981 | gimple_stmt_iterator gsi; | |
42acab1c | 1982 | gimple *g; |
4c0315d0 | 1983 | |
1984 | if (!region | |
1985 | || (!region->irr_blocks && !region->exit_blocks)) | |
1986 | return region; | |
1987 | ||
1988 | /* Check to see if this is the end of a region by seeing if it | |
1989 | contains a call to __builtin_tm_commit{,_eh}. Note that the | |
1990 | outermost region for DECL_IS_TM_CLONE need not collect this. */ | |
1991 | for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) | |
1992 | { | |
1993 | g = gsi_stmt (gsi); | |
1994 | if (gimple_code (g) == GIMPLE_CALL) | |
1995 | { | |
1996 | tree fn = gimple_call_fndecl (g); | |
a0e9bfbb | 1997 | if (fn && fndecl_built_in_p (fn, BUILT_IN_NORMAL)) |
4c0315d0 | 1998 | { |
1999 | if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT | |
2000 | || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH) | |
2001 | && region->exit_blocks) | |
2002 | { | |
2003 | bitmap_set_bit (region->exit_blocks, bb->index); | |
2004 | region = region->outer; | |
2005 | break; | |
2006 | } | |
2007 | if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE) | |
2008 | bitmap_set_bit (region->irr_blocks, bb->index); | |
2009 | } | |
2010 | } | |
2011 | } | |
2012 | return region; | |
2013 | } | |
2014 | ||
2015 | /* Collect all of the transaction regions within the current function | |
2016 | and record them in ALL_TM_REGIONS. The REGION parameter may specify | |
2017 | an "outermost" region for use by tm clones. */ | |
2018 | ||
2019 | static void | |
2020 | tm_region_init (struct tm_region *region) | |
2021 | { | |
42acab1c | 2022 | gimple *g; |
4c0315d0 | 2023 | edge_iterator ei; |
2024 | edge e; | |
2025 | basic_block bb; | |
c2078b80 | 2026 | auto_vec<basic_block> queue; |
4c0315d0 | 2027 | bitmap visited_blocks = BITMAP_ALLOC (NULL); |
2028 | struct tm_region *old_region; | |
04009ada | 2029 | auto_vec<tm_region *> bb_regions; |
4c0315d0 | 2030 | |
de60f90c | 2031 | /* We could store this information in bb->aux, but we may get called |
2032 | through get_all_tm_blocks() from another pass that may be already | |
2033 | using bb->aux. */ | |
fe672ac0 | 2034 | bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
de60f90c | 2035 | |
c3f86f60 | 2036 | all_tm_regions = region; |
2037 | bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); | |
f1f41a6c | 2038 | queue.safe_push (bb); |
c3f86f60 | 2039 | bitmap_set_bit (visited_blocks, bb->index); |
f1f41a6c | 2040 | bb_regions[bb->index] = region; |
c3f86f60 | 2041 | |
4c0315d0 | 2042 | do |
2043 | { | |
f1f41a6c | 2044 | bb = queue.pop (); |
2045 | region = bb_regions[bb->index]; | |
2046 | bb_regions[bb->index] = NULL; | |
4c0315d0 | 2047 | |
2048 | /* Record exit and irrevocable blocks. */ | |
2049 | region = tm_region_init_1 (region, bb); | |
2050 | ||
2051 | /* Check for the last statement in the block beginning a new region. */ | |
2052 | g = last_stmt (bb); | |
2053 | old_region = region; | |
1a91d914 | 2054 | if (g) |
2055 | if (gtransaction *trans_stmt = dyn_cast <gtransaction *> (g)) | |
2056 | region = tm_region_init_0 (region, bb, trans_stmt); | |
4c0315d0 | 2057 | |
2058 | /* Process subsequent blocks. */ | |
2059 | FOR_EACH_EDGE (e, ei, bb->succs) | |
2060 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
2061 | { | |
2062 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 2063 | queue.safe_push (e->dest); |
4c0315d0 | 2064 | |
2065 | /* If the current block started a new region, make sure that only | |
2066 | the entry block of the new region is associated with this region. | |
2067 | Other successors are still part of the old region. */ | |
2068 | if (old_region != region && e->dest != region->entry_block) | |
f1f41a6c | 2069 | bb_regions[e->dest->index] = old_region; |
4c0315d0 | 2070 | else |
f1f41a6c | 2071 | bb_regions[e->dest->index] = region; |
4c0315d0 | 2072 | } |
2073 | } | |
f1f41a6c | 2074 | while (!queue.is_empty ()); |
4c0315d0 | 2075 | BITMAP_FREE (visited_blocks); |
2076 | } | |
2077 | ||
2078 | /* The "gate" function for all transactional memory expansion and optimization | |
2079 | passes. We collect region information for each top-level transaction, and | |
2080 | if we don't find any, we skip all of the TM passes. Each region will have | |
2081 | all of the exit blocks recorded, and the originating statement. */ | |
2082 | ||
2083 | static bool | |
2084 | gate_tm_init (void) | |
2085 | { | |
2086 | if (!flag_tm) | |
2087 | return false; | |
2088 | ||
2089 | calculate_dominance_info (CDI_DOMINATORS); | |
2090 | bitmap_obstack_initialize (&tm_obstack); | |
2091 | ||
2092 | /* If the function is a TM_CLONE, then the entire function is the region. */ | |
2093 | if (decl_is_tm_clone (current_function_decl)) | |
2094 | { | |
2095 | struct tm_region *region = (struct tm_region *) | |
2096 | obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region)); | |
2097 | memset (region, 0, sizeof (*region)); | |
34154e27 | 2098 | region->entry_block = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
4c0315d0 | 2099 | /* For a clone, the entire function is the region. But even if |
2100 | we don't need to record any exit blocks, we may need to | |
2101 | record irrevocable blocks. */ | |
2102 | region->irr_blocks = BITMAP_ALLOC (&tm_obstack); | |
2103 | ||
2104 | tm_region_init (region); | |
2105 | } | |
2106 | else | |
2107 | { | |
2108 | tm_region_init (NULL); | |
2109 | ||
2110 | /* If we didn't find any regions, cleanup and skip the whole tree | |
2111 | of tm-related optimizations. */ | |
2112 | if (all_tm_regions == NULL) | |
2113 | { | |
2114 | bitmap_obstack_release (&tm_obstack); | |
2115 | return false; | |
2116 | } | |
2117 | } | |
2118 | ||
2119 | return true; | |
2120 | } | |
2121 | ||
7620bc82 | 2122 | namespace { |
2123 | ||
2124 | const pass_data pass_data_tm_init = | |
cbe8bda8 | 2125 | { |
2126 | GIMPLE_PASS, /* type */ | |
2127 | "*tminit", /* name */ | |
2128 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 2129 | TV_TRANS_MEM, /* tv_id */ |
2130 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
2131 | 0, /* properties_provided */ | |
2132 | 0, /* properties_destroyed */ | |
2133 | 0, /* todo_flags_start */ | |
2134 | 0, /* todo_flags_finish */ | |
4c0315d0 | 2135 | }; |
cbe8bda8 | 2136 | |
7620bc82 | 2137 | class pass_tm_init : public gimple_opt_pass |
cbe8bda8 | 2138 | { |
2139 | public: | |
9af5ce0c | 2140 | pass_tm_init (gcc::context *ctxt) |
2141 | : gimple_opt_pass (pass_data_tm_init, ctxt) | |
cbe8bda8 | 2142 | {} |
2143 | ||
2144 | /* opt_pass methods: */ | |
31315c24 | 2145 | virtual bool gate (function *) { return gate_tm_init (); } |
cbe8bda8 | 2146 | |
2147 | }; // class pass_tm_init | |
2148 | ||
7620bc82 | 2149 | } // anon namespace |
2150 | ||
cbe8bda8 | 2151 | gimple_opt_pass * |
2152 | make_pass_tm_init (gcc::context *ctxt) | |
2153 | { | |
2154 | return new pass_tm_init (ctxt); | |
2155 | } | |
4c0315d0 | 2156 | \f |
2157 | /* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region | |
2158 | represented by STATE. */ | |
2159 | ||
2160 | static inline void | |
2161 | transaction_subcode_ior (struct tm_region *region, unsigned flags) | |
2162 | { | |
2163 | if (region && region->transaction_stmt) | |
2164 | { | |
1a91d914 | 2165 | gtransaction *transaction_stmt = region->get_transaction_stmt (); |
2166 | flags |= gimple_transaction_subcode (transaction_stmt); | |
2167 | gimple_transaction_set_subcode (transaction_stmt, flags); | |
4c0315d0 | 2168 | } |
2169 | } | |
2170 | ||
2171 | /* Construct a memory load in a transactional context. Return the | |
2172 | gimple statement performing the load, or NULL if there is no | |
2173 | TM_LOAD builtin of the appropriate size to do the load. | |
2174 | ||
2175 | LOC is the location to use for the new statement(s). */ | |
2176 | ||
1a91d914 | 2177 | static gcall * |
4c0315d0 | 2178 | build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) |
2179 | { | |
e5fee0fe | 2180 | tree t, type = TREE_TYPE (rhs); |
1a91d914 | 2181 | gcall *gcall; |
4c0315d0 | 2182 | |
e5fee0fe | 2183 | built_in_function code; |
4c0315d0 | 2184 | if (type == float_type_node) |
2185 | code = BUILT_IN_TM_LOAD_FLOAT; | |
2186 | else if (type == double_type_node) | |
2187 | code = BUILT_IN_TM_LOAD_DOUBLE; | |
2188 | else if (type == long_double_type_node) | |
2189 | code = BUILT_IN_TM_LOAD_LDOUBLE; | |
e5fee0fe | 2190 | else |
4c0315d0 | 2191 | { |
e5fee0fe | 2192 | if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type))) |
2193 | return NULL; | |
2194 | unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type)); | |
2195 | ||
2196 | if (TREE_CODE (type) == VECTOR_TYPE) | |
4c0315d0 | 2197 | { |
e5fee0fe | 2198 | switch (type_size) |
2199 | { | |
2200 | case 64: | |
2201 | code = BUILT_IN_TM_LOAD_M64; | |
2202 | break; | |
2203 | case 128: | |
2204 | code = BUILT_IN_TM_LOAD_M128; | |
2205 | break; | |
2206 | case 256: | |
2207 | code = BUILT_IN_TM_LOAD_M256; | |
2208 | break; | |
2209 | default: | |
2210 | goto unhandled_vec; | |
2211 | } | |
2212 | if (!builtin_decl_explicit_p (code)) | |
2213 | goto unhandled_vec; | |
2214 | } | |
2215 | else | |
2216 | { | |
2217 | unhandled_vec: | |
2218 | switch (type_size) | |
2219 | { | |
2220 | case 8: | |
2221 | code = BUILT_IN_TM_LOAD_1; | |
2222 | break; | |
2223 | case 16: | |
2224 | code = BUILT_IN_TM_LOAD_2; | |
2225 | break; | |
2226 | case 32: | |
2227 | code = BUILT_IN_TM_LOAD_4; | |
2228 | break; | |
2229 | case 64: | |
2230 | code = BUILT_IN_TM_LOAD_8; | |
2231 | break; | |
2232 | default: | |
2233 | return NULL; | |
2234 | } | |
4c0315d0 | 2235 | } |
2236 | } | |
2237 | ||
e5fee0fe | 2238 | tree decl = builtin_decl_explicit (code); |
2239 | gcc_assert (decl); | |
4c0315d0 | 2240 | |
2241 | t = gimplify_addr (gsi, rhs); | |
2242 | gcall = gimple_build_call (decl, 1, t); | |
2243 | gimple_set_location (gcall, loc); | |
2244 | ||
2245 | t = TREE_TYPE (TREE_TYPE (decl)); | |
2246 | if (useless_type_conversion_p (type, t)) | |
2247 | { | |
2248 | gimple_call_set_lhs (gcall, lhs); | |
2249 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2250 | } | |
2251 | else | |
2252 | { | |
42acab1c | 2253 | gimple *g; |
4c0315d0 | 2254 | tree temp; |
2255 | ||
f9e245b2 | 2256 | temp = create_tmp_reg (t); |
4c0315d0 | 2257 | gimple_call_set_lhs (gcall, temp); |
2258 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2259 | ||
2260 | t = fold_build1 (VIEW_CONVERT_EXPR, type, temp); | |
2261 | g = gimple_build_assign (lhs, t); | |
2262 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
2263 | } | |
2264 | ||
2265 | return gcall; | |
2266 | } | |
2267 | ||
2268 | ||
2269 | /* Similarly for storing TYPE in a transactional context. */ | |
2270 | ||
1a91d914 | 2271 | static gcall * |
4c0315d0 | 2272 | build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) |
2273 | { | |
4c0315d0 | 2274 | tree t, fn, type = TREE_TYPE (rhs), simple_type; |
1a91d914 | 2275 | gcall *gcall; |
4c0315d0 | 2276 | |
e5fee0fe | 2277 | built_in_function code; |
4c0315d0 | 2278 | if (type == float_type_node) |
2279 | code = BUILT_IN_TM_STORE_FLOAT; | |
2280 | else if (type == double_type_node) | |
2281 | code = BUILT_IN_TM_STORE_DOUBLE; | |
2282 | else if (type == long_double_type_node) | |
2283 | code = BUILT_IN_TM_STORE_LDOUBLE; | |
e5fee0fe | 2284 | else |
4c0315d0 | 2285 | { |
e5fee0fe | 2286 | if (TYPE_SIZE (type) == NULL || !tree_fits_uhwi_p (TYPE_SIZE (type))) |
2287 | return NULL; | |
2288 | unsigned HOST_WIDE_INT type_size = tree_to_uhwi (TYPE_SIZE (type)); | |
2289 | ||
2290 | if (TREE_CODE (type) == VECTOR_TYPE) | |
4c0315d0 | 2291 | { |
e5fee0fe | 2292 | switch (type_size) |
2293 | { | |
2294 | case 64: | |
2295 | code = BUILT_IN_TM_STORE_M64; | |
2296 | break; | |
2297 | case 128: | |
2298 | code = BUILT_IN_TM_STORE_M128; | |
2299 | break; | |
2300 | case 256: | |
2301 | code = BUILT_IN_TM_STORE_M256; | |
2302 | break; | |
2303 | default: | |
2304 | goto unhandled_vec; | |
2305 | } | |
2306 | if (!builtin_decl_explicit_p (code)) | |
2307 | goto unhandled_vec; | |
2308 | } | |
2309 | else | |
2310 | { | |
2311 | unhandled_vec: | |
2312 | switch (type_size) | |
2313 | { | |
2314 | case 8: | |
2315 | code = BUILT_IN_TM_STORE_1; | |
2316 | break; | |
2317 | case 16: | |
2318 | code = BUILT_IN_TM_STORE_2; | |
2319 | break; | |
2320 | case 32: | |
2321 | code = BUILT_IN_TM_STORE_4; | |
2322 | break; | |
2323 | case 64: | |
2324 | code = BUILT_IN_TM_STORE_8; | |
2325 | break; | |
2326 | default: | |
2327 | return NULL; | |
2328 | } | |
4c0315d0 | 2329 | } |
2330 | } | |
2331 | ||
e5fee0fe | 2332 | fn = builtin_decl_explicit (code); |
2333 | gcc_assert (fn); | |
4c0315d0 | 2334 | |
2335 | simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn)))); | |
2336 | ||
2337 | if (TREE_CODE (rhs) == CONSTRUCTOR) | |
2338 | { | |
2339 | /* Handle the easy initialization to zero. */ | |
f1f41a6c | 2340 | if (!CONSTRUCTOR_ELTS (rhs)) |
4c0315d0 | 2341 | rhs = build_int_cst (simple_type, 0); |
2342 | else | |
2343 | { | |
2344 | /* ...otherwise punt to the caller and probably use | |
2345 | BUILT_IN_TM_MEMMOVE, because we can't wrap a | |
2346 | VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce | |
2347 | valid gimple. */ | |
2348 | return NULL; | |
2349 | } | |
2350 | } | |
2351 | else if (!useless_type_conversion_p (simple_type, type)) | |
2352 | { | |
42acab1c | 2353 | gimple *g; |
4c0315d0 | 2354 | tree temp; |
2355 | ||
f9e245b2 | 2356 | temp = create_tmp_reg (simple_type); |
4c0315d0 | 2357 | t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs); |
2358 | g = gimple_build_assign (temp, t); | |
2359 | gimple_set_location (g, loc); | |
2360 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
2361 | ||
2362 | rhs = temp; | |
2363 | } | |
2364 | ||
2365 | t = gimplify_addr (gsi, lhs); | |
2366 | gcall = gimple_build_call (fn, 2, t, rhs); | |
2367 | gimple_set_location (gcall, loc); | |
2368 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2369 | ||
2370 | return gcall; | |
2371 | } | |
2372 | ||
2373 | ||
2374 | /* Expand an assignment statement into transactional builtins. */ | |
2375 | ||
2376 | static void | |
2377 | expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi) | |
2378 | { | |
42acab1c | 2379 | gimple *stmt = gsi_stmt (*gsi); |
4c0315d0 | 2380 | location_t loc = gimple_location (stmt); |
2381 | tree lhs = gimple_assign_lhs (stmt); | |
2382 | tree rhs = gimple_assign_rhs1 (stmt); | |
2383 | bool store_p = requires_barrier (region->entry_block, lhs, NULL); | |
2384 | bool load_p = requires_barrier (region->entry_block, rhs, NULL); | |
42acab1c | 2385 | gimple *gcall = NULL; |
4c0315d0 | 2386 | |
2387 | if (!load_p && !store_p) | |
2388 | { | |
2389 | /* Add thread private addresses to log if applicable. */ | |
2390 | requires_barrier (region->entry_block, lhs, stmt); | |
2391 | gsi_next (gsi); | |
2392 | return; | |
2393 | } | |
2394 | ||
e5fee0fe | 2395 | if (load_p) |
2396 | transaction_subcode_ior (region, GTMA_HAVE_LOAD); | |
2397 | if (store_p) | |
2398 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2399 | ||
0cd02a19 | 2400 | // Remove original load/store statement. |
4c0315d0 | 2401 | gsi_remove (gsi, true); |
2402 | ||
e5fee0fe | 2403 | // Attempt to use a simple load/store helper function. |
4c0315d0 | 2404 | if (load_p && !store_p) |
e5fee0fe | 2405 | gcall = build_tm_load (loc, lhs, rhs, gsi); |
4c0315d0 | 2406 | else if (store_p && !load_p) |
e5fee0fe | 2407 | gcall = build_tm_store (loc, lhs, rhs, gsi); |
2408 | ||
2409 | // If gcall has not been set, then we do not have a simple helper | |
2410 | // function available for the type. This may be true of larger | |
2411 | // structures, vectors, and non-standard float types. | |
4c0315d0 | 2412 | if (!gcall) |
2413 | { | |
e5fee0fe | 2414 | tree lhs_addr, rhs_addr, ltmp = NULL, copy_fn; |
4c0315d0 | 2415 | |
e5fee0fe | 2416 | // If this is a type that we couldn't handle above, but it's |
2417 | // in a register, we must spill it to memory for the copy. | |
2418 | if (is_gimple_reg (lhs)) | |
2419 | { | |
2420 | ltmp = create_tmp_var (TREE_TYPE (lhs)); | |
2421 | lhs_addr = build_fold_addr_expr (ltmp); | |
2422 | } | |
2423 | else | |
2424 | lhs_addr = gimplify_addr (gsi, lhs); | |
2425 | if (is_gimple_reg (rhs)) | |
2426 | { | |
2427 | tree rtmp = create_tmp_var (TREE_TYPE (rhs)); | |
2428 | rhs_addr = build_fold_addr_expr (rtmp); | |
2429 | gcall = gimple_build_assign (rtmp, rhs); | |
2430 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
2431 | } | |
2432 | else | |
2433 | rhs_addr = gimplify_addr (gsi, rhs); | |
ea580cf7 | 2434 | |
e5fee0fe | 2435 | // Choose the appropriate memory transfer function. |
2436 | if (load_p && store_p) | |
ea580cf7 | 2437 | { |
e5fee0fe | 2438 | // ??? Figure out if there's any possible overlap between |
2439 | // the LHS and the RHS and if not, use MEMCPY. | |
2440 | copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMMOVE); | |
2441 | } | |
2442 | else if (load_p) | |
2443 | { | |
2444 | // Note that the store is non-transactional and cannot overlap. | |
2445 | copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RTWN); | |
ea580cf7 | 2446 | } |
2447 | else | |
2448 | { | |
e5fee0fe | 2449 | // Note that the load is non-transactional and cannot overlap. |
2450 | copy_fn = builtin_decl_explicit (BUILT_IN_TM_MEMCPY_RNWT); | |
ea580cf7 | 2451 | } |
e5fee0fe | 2452 | |
2453 | gcall = gimple_build_call (copy_fn, 3, lhs_addr, rhs_addr, | |
4c0315d0 | 2454 | TYPE_SIZE_UNIT (TREE_TYPE (lhs))); |
2455 | gimple_set_location (gcall, loc); | |
2456 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); | |
ea580cf7 | 2457 | |
e5fee0fe | 2458 | if (ltmp) |
ea580cf7 | 2459 | { |
e5fee0fe | 2460 | gcall = gimple_build_assign (lhs, ltmp); |
ea580cf7 | 2461 | gsi_insert_before (gsi, gcall, GSI_SAME_STMT); |
2462 | } | |
4c0315d0 | 2463 | } |
2464 | ||
e5fee0fe | 2465 | // Now that we have the load/store in its instrumented form, add |
2466 | // thread private addresses to the log if applicable. | |
4c0315d0 | 2467 | if (!store_p) |
2468 | requires_barrier (region->entry_block, lhs, gcall); | |
4c0315d0 | 2469 | } |
2470 | ||
2471 | ||
2472 | /* Expand a call statement as appropriate for a transaction. That is, | |
2473 | either verify that the call does not affect the transaction, or | |
2474 | redirect the call to a clone that handles transactions, or change | |
2475 | the transaction state to IRREVOCABLE. Return true if the call is | |
2476 | one of the builtins that end a transaction. */ | |
2477 | ||
2478 | static bool | |
2479 | expand_call_tm (struct tm_region *region, | |
2480 | gimple_stmt_iterator *gsi) | |
2481 | { | |
1a91d914 | 2482 | gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi)); |
4c0315d0 | 2483 | tree lhs = gimple_call_lhs (stmt); |
2484 | tree fn_decl; | |
2485 | struct cgraph_node *node; | |
2486 | bool retval = false; | |
2487 | ||
2488 | fn_decl = gimple_call_fndecl (stmt); | |
2489 | ||
2490 | if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY) | |
2491 | || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE)) | |
2492 | transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD); | |
2493 | if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET)) | |
2494 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2495 | ||
2496 | if (is_tm_pure_call (stmt)) | |
2497 | return false; | |
2498 | ||
2499 | if (fn_decl) | |
2500 | retval = is_tm_ending_fndecl (fn_decl); | |
2501 | if (!retval) | |
2502 | { | |
2503 | /* Assume all non-const/pure calls write to memory, except | |
2504 | transaction ending builtins. */ | |
2505 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2506 | } | |
2507 | ||
2508 | /* For indirect calls, we already generated a call into the runtime. */ | |
2509 | if (!fn_decl) | |
2510 | { | |
2511 | tree fn = gimple_call_fn (stmt); | |
2512 | ||
2513 | /* We are guaranteed never to go irrevocable on a safe or pure | |
2514 | call, and the pure call was handled above. */ | |
2515 | if (is_tm_safe (fn)) | |
2516 | return false; | |
2517 | else | |
2518 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
2519 | ||
2520 | return false; | |
2521 | } | |
2522 | ||
415d1b9a | 2523 | node = cgraph_node::get (fn_decl); |
fce5ce8e | 2524 | /* All calls should have cgraph here. */ |
2525 | if (!node) | |
2526 | { | |
2527 | /* We can have a nodeless call here if some pass after IPA-tm | |
2528 | added uninstrumented calls. For example, loop distribution | |
2529 | can transform certain loop constructs into __builtin_mem* | |
2530 | calls. In this case, see if we have a suitable TM | |
2531 | replacement and fill in the gaps. */ | |
2532 | gcc_assert (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL); | |
2533 | enum built_in_function code = DECL_FUNCTION_CODE (fn_decl); | |
2534 | gcc_assert (code == BUILT_IN_MEMCPY | |
2535 | || code == BUILT_IN_MEMMOVE | |
2536 | || code == BUILT_IN_MEMSET); | |
2537 | ||
2538 | tree repl = find_tm_replacement_function (fn_decl); | |
2539 | if (repl) | |
2540 | { | |
2541 | gimple_call_set_fndecl (stmt, repl); | |
2542 | update_stmt (stmt); | |
415d1b9a | 2543 | node = cgraph_node::create (repl); |
fce5ce8e | 2544 | node->local.tm_may_enter_irr = false; |
2545 | return expand_call_tm (region, gsi); | |
2546 | } | |
2547 | gcc_unreachable (); | |
2548 | } | |
4c0315d0 | 2549 | if (node->local.tm_may_enter_irr) |
2550 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
2551 | ||
2552 | if (is_tm_abort (fn_decl)) | |
2553 | { | |
2554 | transaction_subcode_ior (region, GTMA_HAVE_ABORT); | |
2555 | return true; | |
2556 | } | |
2557 | ||
2558 | /* Instrument the store if needed. | |
2559 | ||
2560 | If the assignment happens inside the function call (return slot | |
2561 | optimization), there is no instrumentation to be done, since | |
2562 | the callee should have done the right thing. */ | |
2563 | if (lhs && requires_barrier (region->entry_block, lhs, stmt) | |
2564 | && !gimple_call_return_slot_opt_p (stmt)) | |
2565 | { | |
f9e245b2 | 2566 | tree tmp = create_tmp_reg (TREE_TYPE (lhs)); |
4c0315d0 | 2567 | location_t loc = gimple_location (stmt); |
2568 | edge fallthru_edge = NULL; | |
1a91d914 | 2569 | gassign *assign_stmt; |
4c0315d0 | 2570 | |
2571 | /* Remember if the call was going to throw. */ | |
aac19106 | 2572 | if (stmt_can_throw_internal (cfun, stmt)) |
4c0315d0 | 2573 | { |
2574 | edge_iterator ei; | |
2575 | edge e; | |
2576 | basic_block bb = gimple_bb (stmt); | |
2577 | ||
2578 | FOR_EACH_EDGE (e, ei, bb->succs) | |
2579 | if (e->flags & EDGE_FALLTHRU) | |
2580 | { | |
2581 | fallthru_edge = e; | |
2582 | break; | |
2583 | } | |
2584 | } | |
2585 | ||
2586 | gimple_call_set_lhs (stmt, tmp); | |
2587 | update_stmt (stmt); | |
1a91d914 | 2588 | assign_stmt = gimple_build_assign (lhs, tmp); |
2589 | gimple_set_location (assign_stmt, loc); | |
4c0315d0 | 2590 | |
2591 | /* We cannot throw in the middle of a BB. If the call was going | |
2592 | to throw, place the instrumentation on the fallthru edge, so | |
2593 | the call remains the last statement in the block. */ | |
2594 | if (fallthru_edge) | |
2595 | { | |
1a91d914 | 2596 | gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (assign_stmt); |
4c0315d0 | 2597 | gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq); |
2598 | expand_assign_tm (region, &fallthru_gsi); | |
2599 | gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq); | |
2600 | pending_edge_inserts_p = true; | |
2601 | } | |
2602 | else | |
2603 | { | |
1a91d914 | 2604 | gsi_insert_after (gsi, assign_stmt, GSI_CONTINUE_LINKING); |
4c0315d0 | 2605 | expand_assign_tm (region, gsi); |
2606 | } | |
2607 | ||
2608 | transaction_subcode_ior (region, GTMA_HAVE_STORE); | |
2609 | } | |
2610 | ||
2611 | return retval; | |
2612 | } | |
2613 | ||
2614 | ||
2615 | /* Expand all statements in BB as appropriate for being inside | |
2616 | a transaction. */ | |
2617 | ||
2618 | static void | |
2619 | expand_block_tm (struct tm_region *region, basic_block bb) | |
2620 | { | |
2621 | gimple_stmt_iterator gsi; | |
2622 | ||
2623 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ) | |
2624 | { | |
42acab1c | 2625 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 2626 | switch (gimple_code (stmt)) |
2627 | { | |
2628 | case GIMPLE_ASSIGN: | |
2629 | /* Only memory reads/writes need to be instrumented. */ | |
73fd1e9a | 2630 | if (gimple_assign_single_p (stmt) |
2631 | && !gimple_clobber_p (stmt)) | |
4c0315d0 | 2632 | { |
2633 | expand_assign_tm (region, &gsi); | |
2634 | continue; | |
2635 | } | |
2636 | break; | |
2637 | ||
2638 | case GIMPLE_CALL: | |
2639 | if (expand_call_tm (region, &gsi)) | |
2640 | return; | |
2641 | break; | |
2642 | ||
2643 | case GIMPLE_ASM: | |
2644 | gcc_unreachable (); | |
2645 | ||
2646 | default: | |
2647 | break; | |
2648 | } | |
2649 | if (!gsi_end_p (gsi)) | |
2650 | gsi_next (&gsi); | |
2651 | } | |
2652 | } | |
2653 | ||
2654 | /* Return the list of basic-blocks in REGION. | |
2655 | ||
2656 | STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks | |
79f4a793 | 2657 | following a TM_IRREVOCABLE call. |
2658 | ||
2659 | INCLUDE_UNINSTRUMENTED_P is TRUE if we should include the | |
2660 | uninstrumented code path blocks in the list of basic blocks | |
2661 | returned, false otherwise. */ | |
4c0315d0 | 2662 | |
f1f41a6c | 2663 | static vec<basic_block> |
4c0315d0 | 2664 | get_tm_region_blocks (basic_block entry_block, |
2665 | bitmap exit_blocks, | |
2666 | bitmap irr_blocks, | |
2667 | bitmap all_region_blocks, | |
79f4a793 | 2668 | bool stop_at_irrevocable_p, |
2669 | bool include_uninstrumented_p = true) | |
4c0315d0 | 2670 | { |
1e094109 | 2671 | vec<basic_block> bbs = vNULL; |
4c0315d0 | 2672 | unsigned i; |
2673 | edge e; | |
2674 | edge_iterator ei; | |
2675 | bitmap visited_blocks = BITMAP_ALLOC (NULL); | |
2676 | ||
2677 | i = 0; | |
f1f41a6c | 2678 | bbs.safe_push (entry_block); |
4c0315d0 | 2679 | bitmap_set_bit (visited_blocks, entry_block->index); |
2680 | ||
2681 | do | |
2682 | { | |
f1f41a6c | 2683 | basic_block bb = bbs[i++]; |
4c0315d0 | 2684 | |
2685 | if (exit_blocks && | |
2686 | bitmap_bit_p (exit_blocks, bb->index)) | |
2687 | continue; | |
2688 | ||
2689 | if (stop_at_irrevocable_p | |
2690 | && irr_blocks | |
2691 | && bitmap_bit_p (irr_blocks, bb->index)) | |
2692 | continue; | |
2693 | ||
2694 | FOR_EACH_EDGE (e, ei, bb->succs) | |
79f4a793 | 2695 | if ((include_uninstrumented_p |
2696 | || !(e->flags & EDGE_TM_UNINSTRUMENTED)) | |
2697 | && !bitmap_bit_p (visited_blocks, e->dest->index)) | |
4c0315d0 | 2698 | { |
2699 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 2700 | bbs.safe_push (e->dest); |
4c0315d0 | 2701 | } |
2702 | } | |
f1f41a6c | 2703 | while (i < bbs.length ()); |
4c0315d0 | 2704 | |
2705 | if (all_region_blocks) | |
2706 | bitmap_ior_into (all_region_blocks, visited_blocks); | |
2707 | ||
2708 | BITMAP_FREE (visited_blocks); | |
2709 | return bbs; | |
2710 | } | |
2711 | ||
79f4a793 | 2712 | // Callback data for collect_bb2reg. |
2713 | struct bb2reg_stuff | |
2714 | { | |
04009ada | 2715 | vec<tm_region *> *bb2reg; |
79f4a793 | 2716 | bool include_uninstrumented_p; |
2717 | }; | |
2718 | ||
0cd02a19 | 2719 | // Callback for expand_regions, collect innermost region data for each bb. |
2720 | static void * | |
2721 | collect_bb2reg (struct tm_region *region, void *data) | |
2722 | { | |
79f4a793 | 2723 | struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data; |
04009ada | 2724 | vec<tm_region *> *bb2reg = stuff->bb2reg; |
f1f41a6c | 2725 | vec<basic_block> queue; |
0cd02a19 | 2726 | unsigned int i; |
2727 | basic_block bb; | |
2728 | ||
2729 | queue = get_tm_region_blocks (region->entry_block, | |
2730 | region->exit_blocks, | |
2731 | region->irr_blocks, | |
2732 | NULL, | |
79f4a793 | 2733 | /*stop_at_irr_p=*/true, |
2734 | stuff->include_uninstrumented_p); | |
0cd02a19 | 2735 | |
2736 | // We expect expand_region to perform a post-order traversal of the region | |
2737 | // tree. Therefore the last region seen for any bb is the innermost. | |
f1f41a6c | 2738 | FOR_EACH_VEC_ELT (queue, i, bb) |
2739 | (*bb2reg)[bb->index] = region; | |
0cd02a19 | 2740 | |
f1f41a6c | 2741 | queue.release (); |
0cd02a19 | 2742 | return NULL; |
2743 | } | |
2744 | ||
2745 | // Returns a vector, indexed by BB->INDEX, of the innermost tm_region to | |
2746 | // which a basic block belongs. Note that we only consider the instrumented | |
79f4a793 | 2747 | // code paths for the region; the uninstrumented code paths are ignored if |
2748 | // INCLUDE_UNINSTRUMENTED_P is false. | |
0cd02a19 | 2749 | // |
2750 | // ??? This data is very similar to the bb_regions array that is collected | |
2751 | // during tm_region_init. Or, rather, this data is similar to what could | |
2752 | // be used within tm_region_init. The actual computation in tm_region_init | |
2753 | // begins and ends with bb_regions entirely full of NULL pointers, due to | |
2754 | // the way in which pointers are swapped in and out of the array. | |
2755 | // | |
2756 | // ??? Our callers expect that blocks are not shared between transactions. | |
2757 | // When the optimizers get too smart, and blocks are shared, then during | |
2758 | // the tm_mark phase we'll add log entries to only one of the two transactions, | |
2759 | // and in the tm_edge phase we'll add edges to the CFG that create invalid | |
2760 | // cycles. The symptom being SSA defs that do not dominate their uses. | |
2761 | // Note that the optimizers were locally correct with their transformation, | |
2762 | // as we have no info within the program that suggests that the blocks cannot | |
2763 | // be shared. | |
2764 | // | |
2765 | // ??? There is currently a hack inside tree-ssa-pre.c to work around the | |
2766 | // only known instance of this block sharing. | |
2767 | ||
04009ada | 2768 | static vec<tm_region *> |
79f4a793 | 2769 | get_bb_regions_instrumented (bool traverse_clones, |
2770 | bool include_uninstrumented_p) | |
0cd02a19 | 2771 | { |
fe672ac0 | 2772 | unsigned n = last_basic_block_for_fn (cfun); |
79f4a793 | 2773 | struct bb2reg_stuff stuff; |
04009ada | 2774 | vec<tm_region *> ret; |
0cd02a19 | 2775 | |
f1f41a6c | 2776 | ret.create (n); |
2777 | ret.safe_grow_cleared (n); | |
79f4a793 | 2778 | stuff.bb2reg = &ret; |
2779 | stuff.include_uninstrumented_p = include_uninstrumented_p; | |
2780 | expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones); | |
0cd02a19 | 2781 | |
2782 | return ret; | |
2783 | } | |
2784 | ||
de60f90c | 2785 | /* Set the IN_TRANSACTION for all gimple statements that appear in a |
2786 | transaction. */ | |
2787 | ||
2788 | void | |
2789 | compute_transaction_bits (void) | |
2790 | { | |
2791 | struct tm_region *region; | |
f1f41a6c | 2792 | vec<basic_block> queue; |
de60f90c | 2793 | unsigned int i; |
de60f90c | 2794 | basic_block bb; |
2795 | ||
2796 | /* ?? Perhaps we need to abstract gate_tm_init further, because we | |
2797 | certainly don't need it to calculate CDI_DOMINATOR info. */ | |
2798 | gate_tm_init (); | |
2799 | ||
fc00614f | 2800 | FOR_EACH_BB_FN (bb, cfun) |
6ad451f8 | 2801 | bb->flags &= ~BB_IN_TRANSACTION; |
2802 | ||
de60f90c | 2803 | for (region = all_tm_regions; region; region = region->next) |
2804 | { | |
2805 | queue = get_tm_region_blocks (region->entry_block, | |
2806 | region->exit_blocks, | |
2807 | region->irr_blocks, | |
2808 | NULL, | |
2809 | /*stop_at_irr_p=*/true); | |
f1f41a6c | 2810 | for (i = 0; queue.iterate (i, &bb); ++i) |
6ad451f8 | 2811 | bb->flags |= BB_IN_TRANSACTION; |
f1f41a6c | 2812 | queue.release (); |
de60f90c | 2813 | } |
2814 | ||
2815 | if (all_tm_regions) | |
2816 | bitmap_obstack_release (&tm_obstack); | |
2817 | } | |
2818 | ||
0cd02a19 | 2819 | /* Replace the GIMPLE_TRANSACTION in this region with the corresponding |
2820 | call to BUILT_IN_TM_START. */ | |
2821 | ||
2822 | static void * | |
2823 | expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED) | |
2824 | { | |
2825 | tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START); | |
2826 | basic_block transaction_bb = gimple_bb (region->transaction_stmt); | |
2827 | tree tm_state = region->tm_state; | |
2828 | tree tm_state_type = TREE_TYPE (tm_state); | |
2829 | edge abort_edge = NULL; | |
2830 | edge inst_edge = NULL; | |
2831 | edge uninst_edge = NULL; | |
2832 | edge fallthru_edge = NULL; | |
2833 | ||
2834 | // Identify the various successors of the transaction start. | |
2835 | { | |
2836 | edge_iterator i; | |
2837 | edge e; | |
2838 | FOR_EACH_EDGE (e, i, transaction_bb->succs) | |
2839 | { | |
2840 | if (e->flags & EDGE_TM_ABORT) | |
2841 | abort_edge = e; | |
2842 | else if (e->flags & EDGE_TM_UNINSTRUMENTED) | |
2843 | uninst_edge = e; | |
2844 | else | |
2845 | inst_edge = e; | |
2846 | if (e->flags & EDGE_FALLTHRU) | |
2847 | fallthru_edge = e; | |
2848 | } | |
2849 | } | |
2850 | ||
2851 | /* ??? There are plenty of bits here we're not computing. */ | |
2852 | { | |
1a91d914 | 2853 | int subcode = gimple_transaction_subcode (region->get_transaction_stmt ()); |
0cd02a19 | 2854 | int flags = 0; |
2855 | if (subcode & GTMA_DOES_GO_IRREVOCABLE) | |
2856 | flags |= PR_DOESGOIRREVOCABLE; | |
2857 | if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0) | |
2858 | flags |= PR_HASNOIRREVOCABLE; | |
2859 | /* If the transaction does not have an abort in lexical scope and is not | |
2860 | marked as an outer transaction, then it will never abort. */ | |
2861 | if ((subcode & GTMA_HAVE_ABORT) == 0 && (subcode & GTMA_IS_OUTER) == 0) | |
2862 | flags |= PR_HASNOABORT; | |
2863 | if ((subcode & GTMA_HAVE_STORE) == 0) | |
2864 | flags |= PR_READONLY; | |
1910089e | 2865 | if (inst_edge && !(subcode & GTMA_HAS_NO_INSTRUMENTATION)) |
0cd02a19 | 2866 | flags |= PR_INSTRUMENTEDCODE; |
2867 | if (uninst_edge) | |
2868 | flags |= PR_UNINSTRUMENTEDCODE; | |
2869 | if (subcode & GTMA_IS_OUTER) | |
2870 | region->original_transaction_was_outer = true; | |
2871 | tree t = build_int_cst (tm_state_type, flags); | |
1a91d914 | 2872 | gcall *call = gimple_build_call (tm_start, 1, t); |
0cd02a19 | 2873 | gimple_call_set_lhs (call, tm_state); |
2874 | gimple_set_location (call, gimple_location (region->transaction_stmt)); | |
2875 | ||
2876 | // Replace the GIMPLE_TRANSACTION with the call to BUILT_IN_TM_START. | |
2877 | gimple_stmt_iterator gsi = gsi_last_bb (transaction_bb); | |
2878 | gcc_assert (gsi_stmt (gsi) == region->transaction_stmt); | |
2879 | gsi_insert_before (&gsi, call, GSI_SAME_STMT); | |
2880 | gsi_remove (&gsi, true); | |
2881 | region->transaction_stmt = call; | |
2882 | } | |
2883 | ||
2884 | // Generate log saves. | |
f1f41a6c | 2885 | if (!tm_log_save_addresses.is_empty ()) |
0cd02a19 | 2886 | tm_log_emit_saves (region->entry_block, transaction_bb); |
2887 | ||
2888 | // In the beginning, we've no tests to perform on transaction restart. | |
2889 | // Note that after this point, transaction_bb becomes the "most recent | |
2890 | // block containing tests for the transaction". | |
2891 | region->restart_block = region->entry_block; | |
2892 | ||
2893 | // Generate log restores. | |
f1f41a6c | 2894 | if (!tm_log_save_addresses.is_empty ()) |
0cd02a19 | 2895 | { |
2896 | basic_block test_bb = create_empty_bb (transaction_bb); | |
2897 | basic_block code_bb = create_empty_bb (test_bb); | |
2898 | basic_block join_bb = create_empty_bb (code_bb); | |
b3083327 | 2899 | add_bb_to_loop (test_bb, transaction_bb->loop_father); |
2900 | add_bb_to_loop (code_bb, transaction_bb->loop_father); | |
2901 | add_bb_to_loop (join_bb, transaction_bb->loop_father); | |
0cd02a19 | 2902 | if (region->restart_block == region->entry_block) |
2903 | region->restart_block = test_bb; | |
2904 | ||
f9e245b2 | 2905 | tree t1 = create_tmp_reg (tm_state_type); |
0cd02a19 | 2906 | tree t2 = build_int_cst (tm_state_type, A_RESTORELIVEVARIABLES); |
42acab1c | 2907 | gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2); |
0cd02a19 | 2908 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); |
2909 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2910 | ||
2911 | t2 = build_int_cst (tm_state_type, 0); | |
2912 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2913 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2914 | ||
2915 | tm_log_emit_restores (region->entry_block, code_bb); | |
2916 | ||
2917 | edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU); | |
2918 | edge et = make_edge (test_bb, code_bb, EDGE_TRUE_VALUE); | |
2919 | edge ef = make_edge (test_bb, join_bb, EDGE_FALSE_VALUE); | |
2920 | redirect_edge_pred (fallthru_edge, join_bb); | |
2921 | ||
0cd02a19 | 2922 | join_bb->count = test_bb->count = transaction_bb->count; |
2923 | ||
720cfc43 | 2924 | ei->probability = profile_probability::always (); |
2925 | et->probability = profile_probability::likely (); | |
2926 | ef->probability = profile_probability::unlikely (); | |
0cd02a19 | 2927 | |
ea5d3981 | 2928 | code_bb->count = et->count (); |
0cd02a19 | 2929 | |
2930 | transaction_bb = join_bb; | |
2931 | } | |
2932 | ||
2933 | // If we have an ABORT edge, create a test to perform the abort. | |
2934 | if (abort_edge) | |
2935 | { | |
2936 | basic_block test_bb = create_empty_bb (transaction_bb); | |
b3083327 | 2937 | add_bb_to_loop (test_bb, transaction_bb->loop_father); |
0cd02a19 | 2938 | if (region->restart_block == region->entry_block) |
2939 | region->restart_block = test_bb; | |
2940 | ||
f9e245b2 | 2941 | tree t1 = create_tmp_reg (tm_state_type); |
0cd02a19 | 2942 | tree t2 = build_int_cst (tm_state_type, A_ABORTTRANSACTION); |
42acab1c | 2943 | gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2); |
0cd02a19 | 2944 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); |
2945 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2946 | ||
2947 | t2 = build_int_cst (tm_state_type, 0); | |
2948 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2949 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2950 | ||
2951 | edge ei = make_edge (transaction_bb, test_bb, EDGE_FALLTHRU); | |
0cd02a19 | 2952 | test_bb->count = transaction_bb->count; |
720cfc43 | 2953 | ei->probability = profile_probability::always (); |
0cd02a19 | 2954 | |
2955 | // Not abort edge. If both are live, chose one at random as we'll | |
2956 | // we'll be fixing that up below. | |
2957 | redirect_edge_pred (fallthru_edge, test_bb); | |
2958 | fallthru_edge->flags = EDGE_FALSE_VALUE; | |
720cfc43 | 2959 | fallthru_edge->probability = profile_probability::very_likely (); |
0cd02a19 | 2960 | |
2961 | // Abort/over edge. | |
2962 | redirect_edge_pred (abort_edge, test_bb); | |
2963 | abort_edge->flags = EDGE_TRUE_VALUE; | |
720cfc43 | 2964 | abort_edge->probability = profile_probability::unlikely (); |
0cd02a19 | 2965 | |
2966 | transaction_bb = test_bb; | |
2967 | } | |
2968 | ||
2969 | // If we have both instrumented and uninstrumented code paths, select one. | |
2970 | if (inst_edge && uninst_edge) | |
2971 | { | |
2972 | basic_block test_bb = create_empty_bb (transaction_bb); | |
b3083327 | 2973 | add_bb_to_loop (test_bb, transaction_bb->loop_father); |
0cd02a19 | 2974 | if (region->restart_block == region->entry_block) |
2975 | region->restart_block = test_bb; | |
2976 | ||
f9e245b2 | 2977 | tree t1 = create_tmp_reg (tm_state_type); |
0cd02a19 | 2978 | tree t2 = build_int_cst (tm_state_type, A_RUNUNINSTRUMENTEDCODE); |
2979 | ||
42acab1c | 2980 | gimple *stmt = gimple_build_assign (t1, BIT_AND_EXPR, tm_state, t2); |
0cd02a19 | 2981 | gimple_stmt_iterator gsi = gsi_last_bb (test_bb); |
2982 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2983 | ||
2984 | t2 = build_int_cst (tm_state_type, 0); | |
2985 | stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL); | |
2986 | gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); | |
2987 | ||
2988 | // Create the edge into test_bb first, as we want to copy values | |
2989 | // out of the fallthru edge. | |
2990 | edge e = make_edge (transaction_bb, test_bb, fallthru_edge->flags); | |
2991 | e->probability = fallthru_edge->probability; | |
ea5d3981 | 2992 | test_bb->count = fallthru_edge->count (); |
0cd02a19 | 2993 | |
2994 | // Now update the edges to the inst/uninist implementations. | |
2995 | // For now assume that the paths are equally likely. When using HTM, | |
2996 | // we'll try the uninst path first and fallback to inst path if htm | |
2997 | // buffers are exceeded. Without HTM we start with the inst path and | |
2998 | // use the uninst path when falling back to serial mode. | |
2999 | redirect_edge_pred (inst_edge, test_bb); | |
3000 | inst_edge->flags = EDGE_FALSE_VALUE; | |
720cfc43 | 3001 | inst_edge->probability = profile_probability::even (); |
0cd02a19 | 3002 | |
3003 | redirect_edge_pred (uninst_edge, test_bb); | |
3004 | uninst_edge->flags = EDGE_TRUE_VALUE; | |
720cfc43 | 3005 | uninst_edge->probability = profile_probability::even (); |
0cd02a19 | 3006 | } |
3007 | ||
3008 | // If we have no previous special cases, and we have PHIs at the beginning | |
3009 | // of the atomic region, this means we have a loop at the beginning of the | |
3010 | // atomic region that shares the first block. This can cause problems with | |
3011 | // the transaction restart abnormal edges to be added in the tm_edges pass. | |
3012 | // Solve this by adding a new empty block to receive the abnormal edges. | |
3013 | if (region->restart_block == region->entry_block | |
3014 | && phi_nodes (region->entry_block)) | |
3015 | { | |
3016 | basic_block empty_bb = create_empty_bb (transaction_bb); | |
3017 | region->restart_block = empty_bb; | |
b3083327 | 3018 | add_bb_to_loop (empty_bb, transaction_bb->loop_father); |
0cd02a19 | 3019 | |
3020 | redirect_edge_pred (fallthru_edge, empty_bb); | |
3021 | make_edge (transaction_bb, empty_bb, EDGE_FALLTHRU); | |
3022 | } | |
3023 | ||
3024 | return NULL; | |
3025 | } | |
3026 | ||
3027 | /* Generate the temporary to be used for the return value of | |
3028 | BUILT_IN_TM_START. */ | |
3029 | ||
3030 | static void * | |
3031 | generate_tm_state (struct tm_region *region, void *data ATTRIBUTE_UNUSED) | |
3032 | { | |
3033 | tree tm_start = builtin_decl_explicit (BUILT_IN_TM_START); | |
3034 | region->tm_state = | |
3035 | create_tmp_reg (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state"); | |
3036 | ||
3037 | // Reset the subcode, post optimizations. We'll fill this in | |
3038 | // again as we process blocks. | |
3039 | if (region->exit_blocks) | |
3040 | { | |
1a91d914 | 3041 | gtransaction *transaction_stmt = region->get_transaction_stmt (); |
3042 | unsigned int subcode = gimple_transaction_subcode (transaction_stmt); | |
0cd02a19 | 3043 | |
3044 | if (subcode & GTMA_DOES_GO_IRREVOCABLE) | |
3045 | subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE | |
1910089e | 3046 | | GTMA_MAY_ENTER_IRREVOCABLE |
3047 | | GTMA_HAS_NO_INSTRUMENTATION); | |
0cd02a19 | 3048 | else |
3049 | subcode &= GTMA_DECLARATION_MASK; | |
1a91d914 | 3050 | gimple_transaction_set_subcode (transaction_stmt, subcode); |
0cd02a19 | 3051 | } |
3052 | ||
3053 | return NULL; | |
3054 | } | |
3055 | ||
3056 | // Propagate flags from inner transactions outwards. | |
3057 | static void | |
3058 | propagate_tm_flags_out (struct tm_region *region) | |
3059 | { | |
3060 | if (region == NULL) | |
3061 | return; | |
3062 | propagate_tm_flags_out (region->inner); | |
3063 | ||
3064 | if (region->outer && region->outer->transaction_stmt) | |
3065 | { | |
1a91d914 | 3066 | unsigned s |
3067 | = gimple_transaction_subcode (region->get_transaction_stmt ()); | |
0cd02a19 | 3068 | s &= (GTMA_HAVE_ABORT | GTMA_HAVE_LOAD | GTMA_HAVE_STORE |
3069 | | GTMA_MAY_ENTER_IRREVOCABLE); | |
1a91d914 | 3070 | s |= gimple_transaction_subcode (region->outer->get_transaction_stmt ()); |
3071 | gimple_transaction_set_subcode (region->outer->get_transaction_stmt (), | |
3072 | s); | |
0cd02a19 | 3073 | } |
3074 | ||
3075 | propagate_tm_flags_out (region->next); | |
3076 | } | |
3077 | ||
4c0315d0 | 3078 | /* Entry point to the MARK phase of TM expansion. Here we replace |
3079 | transactional memory statements with calls to builtins, and function | |
3080 | calls with their transactional clones (if available). But we don't | |
3081 | yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */ | |
3082 | ||
3083 | static unsigned int | |
3084 | execute_tm_mark (void) | |
3085 | { | |
4c0315d0 | 3086 | pending_edge_inserts_p = false; |
3087 | ||
00d83cc8 | 3088 | expand_regions (all_tm_regions, generate_tm_state, NULL, |
3089 | /*traverse_clones=*/true); | |
4c0315d0 | 3090 | |
0cd02a19 | 3091 | tm_log_init (); |
4c0315d0 | 3092 | |
04009ada | 3093 | vec<tm_region *> bb_regions |
79f4a793 | 3094 | = get_bb_regions_instrumented (/*traverse_clones=*/true, |
3095 | /*include_uninstrumented_p=*/false); | |
0cd02a19 | 3096 | struct tm_region *r; |
3097 | unsigned i; | |
4c0315d0 | 3098 | |
0cd02a19 | 3099 | // Expand memory operations into calls into the runtime. |
3100 | // This collects log entries as well. | |
f1f41a6c | 3101 | FOR_EACH_VEC_ELT (bb_regions, i, r) |
ded1a556 | 3102 | { |
3103 | if (r != NULL) | |
3104 | { | |
3105 | if (r->transaction_stmt) | |
3106 | { | |
1a91d914 | 3107 | unsigned sub |
3108 | = gimple_transaction_subcode (r->get_transaction_stmt ()); | |
ded1a556 | 3109 | |
3110 | /* If we're sure to go irrevocable, there won't be | |
3111 | anything to expand, since the run-time will go | |
3112 | irrevocable right away. */ | |
3113 | if (sub & GTMA_DOES_GO_IRREVOCABLE | |
3114 | && sub & GTMA_MAY_ENTER_IRREVOCABLE) | |
3115 | continue; | |
3116 | } | |
f5a6b05f | 3117 | expand_block_tm (r, BASIC_BLOCK_FOR_FN (cfun, i)); |
ded1a556 | 3118 | } |
3119 | } | |
0cd02a19 | 3120 | |
4aac6cf8 | 3121 | bb_regions.release (); |
3122 | ||
0cd02a19 | 3123 | // Propagate flags from inner transactions outwards. |
3124 | propagate_tm_flags_out (all_tm_regions); | |
3125 | ||
3126 | // Expand GIMPLE_TRANSACTIONs into calls into the runtime. | |
00d83cc8 | 3127 | expand_regions (all_tm_regions, expand_transaction, NULL, |
3128 | /*traverse_clones=*/false); | |
0cd02a19 | 3129 | |
3130 | tm_log_emit (); | |
3131 | tm_log_delete (); | |
4c0315d0 | 3132 | |
3133 | if (pending_edge_inserts_p) | |
3134 | gsi_commit_edge_inserts (); | |
0cd02a19 | 3135 | free_dominance_info (CDI_DOMINATORS); |
4c0315d0 | 3136 | return 0; |
3137 | } | |
3138 | ||
7620bc82 | 3139 | namespace { |
3140 | ||
3141 | const pass_data pass_data_tm_mark = | |
cbe8bda8 | 3142 | { |
3143 | GIMPLE_PASS, /* type */ | |
3144 | "tmmark", /* name */ | |
3145 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 3146 | TV_TRANS_MEM, /* tv_id */ |
3147 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
3148 | 0, /* properties_provided */ | |
3149 | 0, /* properties_destroyed */ | |
3150 | 0, /* todo_flags_start */ | |
8b88439e | 3151 | TODO_update_ssa, /* todo_flags_finish */ |
4c0315d0 | 3152 | }; |
cbe8bda8 | 3153 | |
7620bc82 | 3154 | class pass_tm_mark : public gimple_opt_pass |
cbe8bda8 | 3155 | { |
3156 | public: | |
9af5ce0c | 3157 | pass_tm_mark (gcc::context *ctxt) |
3158 | : gimple_opt_pass (pass_data_tm_mark, ctxt) | |
cbe8bda8 | 3159 | {} |
3160 | ||
3161 | /* opt_pass methods: */ | |
65b0537f | 3162 | virtual unsigned int execute (function *) { return execute_tm_mark (); } |
cbe8bda8 | 3163 | |
3164 | }; // class pass_tm_mark | |
3165 | ||
7620bc82 | 3166 | } // anon namespace |
3167 | ||
cbe8bda8 | 3168 | gimple_opt_pass * |
3169 | make_pass_tm_mark (gcc::context *ctxt) | |
3170 | { | |
3171 | return new pass_tm_mark (ctxt); | |
3172 | } | |
4c0315d0 | 3173 | \f |
0cd02a19 | 3174 | |
3175 | /* Create an abnormal edge from STMT at iter, splitting the block | |
3176 | as necessary. Adjust *PNEXT as needed for the split block. */ | |
4c0315d0 | 3177 | |
3178 | static inline void | |
42acab1c | 3179 | split_bb_make_tm_edge (gimple *stmt, basic_block dest_bb, |
0cd02a19 | 3180 | gimple_stmt_iterator iter, gimple_stmt_iterator *pnext) |
4c0315d0 | 3181 | { |
0cd02a19 | 3182 | basic_block bb = gimple_bb (stmt); |
3183 | if (!gsi_one_before_end_p (iter)) | |
3184 | { | |
3185 | edge e = split_block (bb, stmt); | |
3186 | *pnext = gsi_start_bb (e->dest); | |
3187 | } | |
eaa16535 | 3188 | edge e = make_edge (bb, dest_bb, EDGE_ABNORMAL); |
3189 | if (e) | |
ea5d3981 | 3190 | e->probability = profile_probability::guessed_never (); |
4c0315d0 | 3191 | |
0cd02a19 | 3192 | // Record the need for the edge for the benefit of the rtl passes. |
4c0315d0 | 3193 | if (cfun->gimple_df->tm_restart == NULL) |
b7aa58e4 | 3194 | cfun->gimple_df->tm_restart |
3195 | = hash_table<tm_restart_hasher>::create_ggc (31); | |
4c0315d0 | 3196 | |
0cd02a19 | 3197 | struct tm_restart_node dummy; |
4c0315d0 | 3198 | dummy.stmt = stmt; |
0cd02a19 | 3199 | dummy.label_or_list = gimple_block_label (dest_bb); |
3200 | ||
b7aa58e4 | 3201 | tm_restart_node **slot = cfun->gimple_df->tm_restart->find_slot (&dummy, |
3202 | INSERT); | |
3203 | struct tm_restart_node *n = *slot; | |
4c0315d0 | 3204 | if (n == NULL) |
3205 | { | |
25a27413 | 3206 | n = ggc_alloc<tm_restart_node> (); |
4c0315d0 | 3207 | *n = dummy; |
3208 | } | |
3209 | else | |
3210 | { | |
3211 | tree old = n->label_or_list; | |
3212 | if (TREE_CODE (old) == LABEL_DECL) | |
0cd02a19 | 3213 | old = tree_cons (NULL, old, NULL); |
4c0315d0 | 3214 | n->label_or_list = tree_cons (NULL, dummy.label_or_list, old); |
3215 | } | |
4c0315d0 | 3216 | } |
3217 | ||
4c0315d0 | 3218 | /* Split block BB as necessary for every builtin function we added, and |
3219 | wire up the abnormal back edges implied by the transaction restart. */ | |
3220 | ||
3221 | static void | |
0cd02a19 | 3222 | expand_block_edges (struct tm_region *const region, basic_block bb) |
4c0315d0 | 3223 | { |
0cd02a19 | 3224 | gimple_stmt_iterator gsi, next_gsi; |
4c0315d0 | 3225 | |
0cd02a19 | 3226 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi = next_gsi) |
4c0315d0 | 3227 | { |
42acab1c | 3228 | gimple *stmt = gsi_stmt (gsi); |
1a91d914 | 3229 | gcall *call_stmt; |
4c0315d0 | 3230 | |
0cd02a19 | 3231 | next_gsi = gsi; |
3232 | gsi_next (&next_gsi); | |
3233 | ||
3234 | // ??? Shouldn't we split for any non-pure, non-irrevocable function? | |
1a91d914 | 3235 | call_stmt = dyn_cast <gcall *> (stmt); |
3236 | if ((!call_stmt) | |
3237 | || (gimple_call_flags (call_stmt) & ECF_TM_BUILTIN) == 0) | |
0cd02a19 | 3238 | continue; |
3239 | ||
50b8400c | 3240 | if (gimple_call_builtin_p (call_stmt, BUILT_IN_TM_ABORT)) |
4c0315d0 | 3241 | { |
0cd02a19 | 3242 | // If we have a ``_transaction_cancel [[outer]]'', there is only |
3243 | // one abnormal edge: to the transaction marked OUTER. | |
3244 | // All compiler-generated instances of BUILT_IN_TM_ABORT have a | |
3245 | // constant argument, which we can examine here. Users invoking | |
3246 | // TM_ABORT directly get what they deserve. | |
1a91d914 | 3247 | tree arg = gimple_call_arg (call_stmt, 0); |
0cd02a19 | 3248 | if (TREE_CODE (arg) == INTEGER_CST |
3249 | && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0 | |
3250 | && !decl_is_tm_clone (current_function_decl)) | |
4c0315d0 | 3251 | { |
0cd02a19 | 3252 | // Find the GTMA_IS_OUTER transaction. |
3253 | for (struct tm_region *o = region; o; o = o->outer) | |
3254 | if (o->original_transaction_was_outer) | |
3255 | { | |
1a91d914 | 3256 | split_bb_make_tm_edge (call_stmt, o->restart_block, |
0cd02a19 | 3257 | gsi, &next_gsi); |
3258 | break; | |
3259 | } | |
3260 | ||
3261 | // Otherwise, the front-end should have semantically checked | |
3262 | // outer aborts, but in either case the target region is not | |
3263 | // within this function. | |
3264 | continue; | |
4c0315d0 | 3265 | } |
3266 | ||
0cd02a19 | 3267 | // Non-outer, TM aborts have an abnormal edge to the inner-most |
3268 | // transaction, the one being aborted; | |
1a91d914 | 3269 | split_bb_make_tm_edge (call_stmt, region->restart_block, gsi, |
3270 | &next_gsi); | |
4c0315d0 | 3271 | } |
3272 | ||
0cd02a19 | 3273 | // All TM builtins have an abnormal edge to the outer-most transaction. |
3274 | // We never restart inner transactions. For tm clones, we know a-priori | |
3275 | // that the outer-most transaction is outside the function. | |
3276 | if (decl_is_tm_clone (current_function_decl)) | |
3277 | continue; | |
4c0315d0 | 3278 | |
0cd02a19 | 3279 | if (cfun->gimple_df->tm_restart == NULL) |
3280 | cfun->gimple_df->tm_restart | |
b7aa58e4 | 3281 | = hash_table<tm_restart_hasher>::create_ggc (31); |
4c0315d0 | 3282 | |
0cd02a19 | 3283 | // All TM builtins have an abnormal edge to the outer-most transaction. |
3284 | // We never restart inner transactions. | |
3285 | for (struct tm_region *o = region; o; o = o->outer) | |
3286 | if (!o->outer) | |
3287 | { | |
1a91d914 | 3288 | split_bb_make_tm_edge (call_stmt, o->restart_block, gsi, &next_gsi); |
0cd02a19 | 3289 | break; |
3290 | } | |
4c0315d0 | 3291 | |
0cd02a19 | 3292 | // Delete any tail-call annotation that may have been added. |
3293 | // The tail-call pass may have mis-identified the commit as being | |
3294 | // a candidate because we had not yet added this restart edge. | |
1a91d914 | 3295 | gimple_call_set_tail (call_stmt, false); |
4c0315d0 | 3296 | } |
3297 | } | |
3298 | ||
3299 | /* Entry point to the final expansion of transactional nodes. */ | |
3300 | ||
7620bc82 | 3301 | namespace { |
3302 | ||
3303 | const pass_data pass_data_tm_edges = | |
cbe8bda8 | 3304 | { |
3305 | GIMPLE_PASS, /* type */ | |
3306 | "tmedge", /* name */ | |
3307 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 3308 | TV_TRANS_MEM, /* tv_id */ |
3309 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
3310 | 0, /* properties_provided */ | |
3311 | 0, /* properties_destroyed */ | |
3312 | 0, /* todo_flags_start */ | |
8b88439e | 3313 | TODO_update_ssa, /* todo_flags_finish */ |
4c0315d0 | 3314 | }; |
cbe8bda8 | 3315 | |
7620bc82 | 3316 | class pass_tm_edges : public gimple_opt_pass |
cbe8bda8 | 3317 | { |
3318 | public: | |
9af5ce0c | 3319 | pass_tm_edges (gcc::context *ctxt) |
3320 | : gimple_opt_pass (pass_data_tm_edges, ctxt) | |
cbe8bda8 | 3321 | {} |
3322 | ||
3323 | /* opt_pass methods: */ | |
65b0537f | 3324 | virtual unsigned int execute (function *); |
cbe8bda8 | 3325 | |
3326 | }; // class pass_tm_edges | |
3327 | ||
65b0537f | 3328 | unsigned int |
3329 | pass_tm_edges::execute (function *fun) | |
3330 | { | |
04009ada | 3331 | vec<tm_region *> bb_regions |
65b0537f | 3332 | = get_bb_regions_instrumented (/*traverse_clones=*/false, |
3333 | /*include_uninstrumented_p=*/true); | |
3334 | struct tm_region *r; | |
3335 | unsigned i; | |
3336 | ||
3337 | FOR_EACH_VEC_ELT (bb_regions, i, r) | |
3338 | if (r != NULL) | |
3339 | expand_block_edges (r, BASIC_BLOCK_FOR_FN (fun, i)); | |
3340 | ||
3341 | bb_regions.release (); | |
3342 | ||
3343 | /* We've got to release the dominance info now, to indicate that it | |
3344 | must be rebuilt completely. Otherwise we'll crash trying to update | |
3345 | the SSA web in the TODO section following this pass. */ | |
3346 | free_dominance_info (CDI_DOMINATORS); | |
6d9dcf16 | 3347 | /* We'ge also wrecked loops badly with inserting of abnormal edges. */ |
3348 | loops_state_set (LOOPS_NEED_FIXUP); | |
65b0537f | 3349 | bitmap_obstack_release (&tm_obstack); |
3350 | all_tm_regions = NULL; | |
3351 | ||
3352 | return 0; | |
3353 | } | |
3354 | ||
7620bc82 | 3355 | } // anon namespace |
3356 | ||
cbe8bda8 | 3357 | gimple_opt_pass * |
3358 | make_pass_tm_edges (gcc::context *ctxt) | |
3359 | { | |
3360 | return new pass_tm_edges (ctxt); | |
3361 | } | |
0cd02a19 | 3362 | \f |
3363 | /* Helper function for expand_regions. Expand REGION and recurse to | |
3364 | the inner region. Call CALLBACK on each region. CALLBACK returns | |
3365 | NULL to continue the traversal, otherwise a non-null value which | |
00d83cc8 | 3366 | this function will return as well. TRAVERSE_CLONES is true if we |
3367 | should traverse transactional clones. */ | |
0cd02a19 | 3368 | |
3369 | static void * | |
3370 | expand_regions_1 (struct tm_region *region, | |
3371 | void *(*callback)(struct tm_region *, void *), | |
00d83cc8 | 3372 | void *data, |
3373 | bool traverse_clones) | |
0cd02a19 | 3374 | { |
3375 | void *retval = NULL; | |
00d83cc8 | 3376 | if (region->exit_blocks |
3377 | || (traverse_clones && decl_is_tm_clone (current_function_decl))) | |
0cd02a19 | 3378 | { |
3379 | retval = callback (region, data); | |
3380 | if (retval) | |
3381 | return retval; | |
3382 | } | |
3383 | if (region->inner) | |
3384 | { | |
00d83cc8 | 3385 | retval = expand_regions (region->inner, callback, data, traverse_clones); |
0cd02a19 | 3386 | if (retval) |
3387 | return retval; | |
3388 | } | |
3389 | return retval; | |
3390 | } | |
3391 | ||
3392 | /* Traverse the regions enclosed and including REGION. Execute | |
3393 | CALLBACK for each region, passing DATA. CALLBACK returns NULL to | |
3394 | continue the traversal, otherwise a non-null value which this | |
00d83cc8 | 3395 | function will return as well. TRAVERSE_CLONES is true if we should |
3396 | traverse transactional clones. */ | |
0cd02a19 | 3397 | |
3398 | static void * | |
3399 | expand_regions (struct tm_region *region, | |
3400 | void *(*callback)(struct tm_region *, void *), | |
00d83cc8 | 3401 | void *data, |
3402 | bool traverse_clones) | |
0cd02a19 | 3403 | { |
3404 | void *retval = NULL; | |
3405 | while (region) | |
3406 | { | |
00d83cc8 | 3407 | retval = expand_regions_1 (region, callback, data, traverse_clones); |
0cd02a19 | 3408 | if (retval) |
3409 | return retval; | |
3410 | region = region->next; | |
3411 | } | |
3412 | return retval; | |
3413 | } | |
3414 | ||
4c0315d0 | 3415 | \f |
3416 | /* A unique TM memory operation. */ | |
04009ada | 3417 | struct tm_memop |
4c0315d0 | 3418 | { |
3419 | /* Unique ID that all memory operations to the same location have. */ | |
3420 | unsigned int value_id; | |
3421 | /* Address of load/store. */ | |
3422 | tree addr; | |
04009ada | 3423 | }; |
4c0315d0 | 3424 | |
d9dd21a8 | 3425 | /* TM memory operation hashtable helpers. */ |
3426 | ||
298e7f9a | 3427 | struct tm_memop_hasher : free_ptr_hash <tm_memop> |
d9dd21a8 | 3428 | { |
9969c043 | 3429 | static inline hashval_t hash (const tm_memop *); |
3430 | static inline bool equal (const tm_memop *, const tm_memop *); | |
d9dd21a8 | 3431 | }; |
3432 | ||
3433 | /* Htab support. Return a hash value for a `tm_memop'. */ | |
3434 | inline hashval_t | |
9969c043 | 3435 | tm_memop_hasher::hash (const tm_memop *mem) |
d9dd21a8 | 3436 | { |
3437 | tree addr = mem->addr; | |
3438 | /* We drill down to the SSA_NAME/DECL for the hash, but equality is | |
3439 | actually done with operand_equal_p (see tm_memop_eq). */ | |
3440 | if (TREE_CODE (addr) == ADDR_EXPR) | |
3441 | addr = TREE_OPERAND (addr, 0); | |
3442 | return iterative_hash_expr (addr, 0); | |
3443 | } | |
3444 | ||
3445 | /* Htab support. Return true if two tm_memop's are the same. */ | |
3446 | inline bool | |
9969c043 | 3447 | tm_memop_hasher::equal (const tm_memop *mem1, const tm_memop *mem2) |
d9dd21a8 | 3448 | { |
3449 | return operand_equal_p (mem1->addr, mem2->addr, 0); | |
3450 | } | |
3451 | ||
4c0315d0 | 3452 | /* Sets for solving data flow equations in the memory optimization pass. */ |
3453 | struct tm_memopt_bitmaps | |
3454 | { | |
3455 | /* Stores available to this BB upon entry. Basically, stores that | |
3456 | dominate this BB. */ | |
3457 | bitmap store_avail_in; | |
3458 | /* Stores available at the end of this BB. */ | |
3459 | bitmap store_avail_out; | |
3460 | bitmap store_antic_in; | |
3461 | bitmap store_antic_out; | |
3462 | /* Reads available to this BB upon entry. Basically, reads that | |
3463 | dominate this BB. */ | |
3464 | bitmap read_avail_in; | |
3465 | /* Reads available at the end of this BB. */ | |
3466 | bitmap read_avail_out; | |
3467 | /* Reads performed in this BB. */ | |
3468 | bitmap read_local; | |
3469 | /* Writes performed in this BB. */ | |
3470 | bitmap store_local; | |
3471 | ||
3472 | /* Temporary storage for pass. */ | |
3473 | /* Is the current BB in the worklist? */ | |
3474 | bool avail_in_worklist_p; | |
3475 | /* Have we visited this BB? */ | |
3476 | bool visited_p; | |
3477 | }; | |
3478 | ||
3479 | static bitmap_obstack tm_memopt_obstack; | |
3480 | ||
3481 | /* Unique counter for TM loads and stores. Loads and stores of the | |
3482 | same address get the same ID. */ | |
3483 | static unsigned int tm_memopt_value_id; | |
c1f445d2 | 3484 | static hash_table<tm_memop_hasher> *tm_memopt_value_numbers; |
4c0315d0 | 3485 | |
3486 | #define STORE_AVAIL_IN(BB) \ | |
3487 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in | |
3488 | #define STORE_AVAIL_OUT(BB) \ | |
3489 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out | |
3490 | #define STORE_ANTIC_IN(BB) \ | |
3491 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in | |
3492 | #define STORE_ANTIC_OUT(BB) \ | |
3493 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out | |
3494 | #define READ_AVAIL_IN(BB) \ | |
3495 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in | |
3496 | #define READ_AVAIL_OUT(BB) \ | |
3497 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out | |
3498 | #define READ_LOCAL(BB) \ | |
3499 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local | |
3500 | #define STORE_LOCAL(BB) \ | |
3501 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local | |
3502 | #define AVAIL_IN_WORKLIST_P(BB) \ | |
3503 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p | |
3504 | #define BB_VISITED_P(BB) \ | |
3505 | ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p | |
3506 | ||
4c0315d0 | 3507 | /* Given a TM load/store in STMT, return the value number for the address |
3508 | it accesses. */ | |
3509 | ||
3510 | static unsigned int | |
42acab1c | 3511 | tm_memopt_value_number (gimple *stmt, enum insert_option op) |
4c0315d0 | 3512 | { |
3513 | struct tm_memop tmpmem, *mem; | |
d9dd21a8 | 3514 | tm_memop **slot; |
4c0315d0 | 3515 | |
3516 | gcc_assert (is_tm_load (stmt) || is_tm_store (stmt)); | |
3517 | tmpmem.addr = gimple_call_arg (stmt, 0); | |
c1f445d2 | 3518 | slot = tm_memopt_value_numbers->find_slot (&tmpmem, op); |
4c0315d0 | 3519 | if (*slot) |
d9dd21a8 | 3520 | mem = *slot; |
4c0315d0 | 3521 | else if (op == INSERT) |
3522 | { | |
3523 | mem = XNEW (struct tm_memop); | |
3524 | *slot = mem; | |
3525 | mem->value_id = tm_memopt_value_id++; | |
3526 | mem->addr = tmpmem.addr; | |
3527 | } | |
3528 | else | |
3529 | gcc_unreachable (); | |
3530 | return mem->value_id; | |
3531 | } | |
3532 | ||
3533 | /* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */ | |
3534 | ||
3535 | static void | |
3536 | tm_memopt_accumulate_memops (basic_block bb) | |
3537 | { | |
3538 | gimple_stmt_iterator gsi; | |
3539 | ||
3540 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3541 | { | |
42acab1c | 3542 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 3543 | bitmap bits; |
3544 | unsigned int loc; | |
3545 | ||
3546 | if (is_tm_store (stmt)) | |
3547 | bits = STORE_LOCAL (bb); | |
3548 | else if (is_tm_load (stmt)) | |
3549 | bits = READ_LOCAL (bb); | |
3550 | else | |
3551 | continue; | |
3552 | ||
3553 | loc = tm_memopt_value_number (stmt, INSERT); | |
3554 | bitmap_set_bit (bits, loc); | |
3555 | if (dump_file) | |
3556 | { | |
3557 | fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=", | |
3558 | is_tm_load (stmt) ? "LOAD" : "STORE", loc, | |
3559 | gimple_bb (stmt)->index); | |
1ffa4346 | 3560 | print_generic_expr (dump_file, gimple_call_arg (stmt, 0)); |
4c0315d0 | 3561 | fprintf (dump_file, "\n"); |
3562 | } | |
3563 | } | |
3564 | } | |
3565 | ||
3566 | /* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */ | |
3567 | ||
3568 | static void | |
3569 | dump_tm_memopt_set (const char *set_name, bitmap bits) | |
3570 | { | |
3571 | unsigned i; | |
3572 | bitmap_iterator bi; | |
3573 | const char *comma = ""; | |
3574 | ||
3575 | fprintf (dump_file, "TM memopt: %s: [", set_name); | |
3576 | EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi) | |
3577 | { | |
c1f445d2 | 3578 | hash_table<tm_memop_hasher>::iterator hi; |
d9dd21a8 | 3579 | struct tm_memop *mem = NULL; |
4c0315d0 | 3580 | |
3581 | /* Yeah, yeah, yeah. Whatever. This is just for debugging. */ | |
c1f445d2 | 3582 | FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers, mem, tm_memop_t, hi) |
4c0315d0 | 3583 | if (mem->value_id == i) |
3584 | break; | |
3585 | gcc_assert (mem->value_id == i); | |
3586 | fprintf (dump_file, "%s", comma); | |
3587 | comma = ", "; | |
1ffa4346 | 3588 | print_generic_expr (dump_file, mem->addr); |
4c0315d0 | 3589 | } |
3590 | fprintf (dump_file, "]\n"); | |
3591 | } | |
3592 | ||
3593 | /* Prettily dump all of the memopt sets in BLOCKS. */ | |
3594 | ||
3595 | static void | |
f1f41a6c | 3596 | dump_tm_memopt_sets (vec<basic_block> blocks) |
4c0315d0 | 3597 | { |
3598 | size_t i; | |
3599 | basic_block bb; | |
3600 | ||
f1f41a6c | 3601 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3602 | { |
3603 | fprintf (dump_file, "------------BB %d---------\n", bb->index); | |
3604 | dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb)); | |
3605 | dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb)); | |
3606 | dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb)); | |
3607 | dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb)); | |
3608 | dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb)); | |
3609 | dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb)); | |
3610 | } | |
3611 | } | |
3612 | ||
3613 | /* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */ | |
3614 | ||
3615 | static void | |
3616 | tm_memopt_compute_avin (basic_block bb) | |
3617 | { | |
3618 | edge e; | |
3619 | unsigned ix; | |
3620 | ||
3621 | /* Seed with the AVOUT of any predecessor. */ | |
3622 | for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++) | |
3623 | { | |
3624 | e = EDGE_PRED (bb, ix); | |
3625 | /* Make sure we have already visited this BB, and is thus | |
3626 | initialized. | |
3627 | ||
3628 | If e->src->aux is NULL, this predecessor is actually on an | |
3629 | enclosing transaction. We only care about the current | |
3630 | transaction, so ignore it. */ | |
3631 | if (e->src->aux && BB_VISITED_P (e->src)) | |
3632 | { | |
3633 | bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src)); | |
3634 | bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src)); | |
3635 | break; | |
3636 | } | |
3637 | } | |
3638 | ||
3639 | for (; ix < EDGE_COUNT (bb->preds); ix++) | |
3640 | { | |
3641 | e = EDGE_PRED (bb, ix); | |
3642 | if (e->src->aux && BB_VISITED_P (e->src)) | |
3643 | { | |
3644 | bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src)); | |
3645 | bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src)); | |
3646 | } | |
3647 | } | |
3648 | ||
3649 | BB_VISITED_P (bb) = true; | |
3650 | } | |
3651 | ||
3652 | /* Compute the STORE_ANTIC_IN for the basic block BB. */ | |
3653 | ||
3654 | static void | |
3655 | tm_memopt_compute_antin (basic_block bb) | |
3656 | { | |
3657 | edge e; | |
3658 | unsigned ix; | |
3659 | ||
3660 | /* Seed with the ANTIC_OUT of any successor. */ | |
3661 | for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++) | |
3662 | { | |
3663 | e = EDGE_SUCC (bb, ix); | |
3664 | /* Make sure we have already visited this BB, and is thus | |
3665 | initialized. */ | |
3666 | if (BB_VISITED_P (e->dest)) | |
3667 | { | |
3668 | bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest)); | |
3669 | break; | |
3670 | } | |
3671 | } | |
3672 | ||
3673 | for (; ix < EDGE_COUNT (bb->succs); ix++) | |
3674 | { | |
3675 | e = EDGE_SUCC (bb, ix); | |
3676 | if (BB_VISITED_P (e->dest)) | |
3677 | bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest)); | |
3678 | } | |
3679 | ||
3680 | BB_VISITED_P (bb) = true; | |
3681 | } | |
3682 | ||
3683 | /* Compute the AVAIL sets for every basic block in BLOCKS. | |
3684 | ||
3685 | We compute {STORE,READ}_AVAIL_{OUT,IN} as follows: | |
3686 | ||
3687 | AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb]) | |
3688 | AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors]) | |
3689 | ||
3690 | This is basically what we do in lcm's compute_available(), but here | |
3691 | we calculate two sets of sets (one for STOREs and one for READs), | |
3692 | and we work on a region instead of the entire CFG. | |
3693 | ||
3694 | REGION is the TM region. | |
3695 | BLOCKS are the basic blocks in the region. */ | |
3696 | ||
3697 | static void | |
3698 | tm_memopt_compute_available (struct tm_region *region, | |
f1f41a6c | 3699 | vec<basic_block> blocks) |
4c0315d0 | 3700 | { |
3701 | edge e; | |
3702 | basic_block *worklist, *qin, *qout, *qend, bb; | |
3703 | unsigned int qlen, i; | |
3704 | edge_iterator ei; | |
3705 | bool changed; | |
3706 | ||
3707 | /* Allocate a worklist array/queue. Entries are only added to the | |
3708 | list if they were not already on the list. So the size is | |
3709 | bounded by the number of basic blocks in the region. */ | |
c7d7137a | 3710 | gcc_assert (!blocks.is_empty ()); |
f1f41a6c | 3711 | qlen = blocks.length () - 1; |
c7d7137a | 3712 | qin = qout = worklist = XNEWVEC (basic_block, qlen); |
4c0315d0 | 3713 | |
3714 | /* Put every block in the region on the worklist. */ | |
f1f41a6c | 3715 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3716 | { |
3717 | /* Seed AVAIL_OUT with the LOCAL set. */ | |
3718 | bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb)); | |
3719 | bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb)); | |
3720 | ||
3721 | AVAIL_IN_WORKLIST_P (bb) = true; | |
3722 | /* No need to insert the entry block, since it has an AVIN of | |
3723 | null, and an AVOUT that has already been seeded in. */ | |
3724 | if (bb != region->entry_block) | |
3725 | *qin++ = bb; | |
3726 | } | |
3727 | ||
3728 | /* The entry block has been initialized with the local sets. */ | |
3729 | BB_VISITED_P (region->entry_block) = true; | |
3730 | ||
3731 | qin = worklist; | |
3732 | qend = &worklist[qlen]; | |
3733 | ||
3734 | /* Iterate until the worklist is empty. */ | |
3735 | while (qlen) | |
3736 | { | |
3737 | /* Take the first entry off the worklist. */ | |
3738 | bb = *qout++; | |
3739 | qlen--; | |
3740 | ||
3741 | if (qout >= qend) | |
3742 | qout = worklist; | |
3743 | ||
3744 | /* This block can be added to the worklist again if necessary. */ | |
3745 | AVAIL_IN_WORKLIST_P (bb) = false; | |
3746 | tm_memopt_compute_avin (bb); | |
3747 | ||
3748 | /* Note: We do not add the LOCAL sets here because we already | |
3749 | seeded the AVAIL_OUT sets with them. */ | |
3750 | changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb)); | |
3751 | changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb)); | |
3752 | if (changed | |
3753 | && (region->exit_blocks == NULL | |
3754 | || !bitmap_bit_p (region->exit_blocks, bb->index))) | |
3755 | /* If the out state of this block changed, then we need to add | |
3756 | its successors to the worklist if they are not already in. */ | |
3757 | FOR_EACH_EDGE (e, ei, bb->succs) | |
34154e27 | 3758 | if (!AVAIL_IN_WORKLIST_P (e->dest) |
3759 | && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) | |
4c0315d0 | 3760 | { |
3761 | *qin++ = e->dest; | |
3762 | AVAIL_IN_WORKLIST_P (e->dest) = true; | |
3763 | qlen++; | |
3764 | ||
3765 | if (qin >= qend) | |
3766 | qin = worklist; | |
3767 | } | |
3768 | } | |
3769 | ||
3770 | free (worklist); | |
3771 | ||
3772 | if (dump_file) | |
3773 | dump_tm_memopt_sets (blocks); | |
3774 | } | |
3775 | ||
3776 | /* Compute ANTIC sets for every basic block in BLOCKS. | |
3777 | ||
3778 | We compute STORE_ANTIC_OUT as follows: | |
3779 | ||
3780 | STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb]) | |
3781 | STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors]) | |
3782 | ||
3783 | REGION is the TM region. | |
3784 | BLOCKS are the basic blocks in the region. */ | |
3785 | ||
3786 | static void | |
3787 | tm_memopt_compute_antic (struct tm_region *region, | |
f1f41a6c | 3788 | vec<basic_block> blocks) |
4c0315d0 | 3789 | { |
3790 | edge e; | |
3791 | basic_block *worklist, *qin, *qout, *qend, bb; | |
3792 | unsigned int qlen; | |
3793 | int i; | |
3794 | edge_iterator ei; | |
3795 | ||
3796 | /* Allocate a worklist array/queue. Entries are only added to the | |
3797 | list if they were not already on the list. So the size is | |
3798 | bounded by the number of basic blocks in the region. */ | |
f1f41a6c | 3799 | qin = qout = worklist = XNEWVEC (basic_block, blocks.length ()); |
4c0315d0 | 3800 | |
f1f41a6c | 3801 | for (qlen = 0, i = blocks.length () - 1; i >= 0; --i) |
4c0315d0 | 3802 | { |
f1f41a6c | 3803 | bb = blocks[i]; |
4c0315d0 | 3804 | |
3805 | /* Seed ANTIC_OUT with the LOCAL set. */ | |
3806 | bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb)); | |
3807 | ||
3808 | /* Put every block in the region on the worklist. */ | |
3809 | AVAIL_IN_WORKLIST_P (bb) = true; | |
3810 | /* No need to insert exit blocks, since their ANTIC_IN is NULL, | |
3811 | and their ANTIC_OUT has already been seeded in. */ | |
3812 | if (region->exit_blocks | |
3813 | && !bitmap_bit_p (region->exit_blocks, bb->index)) | |
3814 | { | |
3815 | qlen++; | |
3816 | *qin++ = bb; | |
3817 | } | |
3818 | } | |
3819 | ||
3820 | /* The exit blocks have been initialized with the local sets. */ | |
3821 | if (region->exit_blocks) | |
3822 | { | |
3823 | unsigned int i; | |
3824 | bitmap_iterator bi; | |
3825 | EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi) | |
f5a6b05f | 3826 | BB_VISITED_P (BASIC_BLOCK_FOR_FN (cfun, i)) = true; |
4c0315d0 | 3827 | } |
3828 | ||
3829 | qin = worklist; | |
3830 | qend = &worklist[qlen]; | |
3831 | ||
3832 | /* Iterate until the worklist is empty. */ | |
3833 | while (qlen) | |
3834 | { | |
3835 | /* Take the first entry off the worklist. */ | |
3836 | bb = *qout++; | |
3837 | qlen--; | |
3838 | ||
3839 | if (qout >= qend) | |
3840 | qout = worklist; | |
3841 | ||
3842 | /* This block can be added to the worklist again if necessary. */ | |
3843 | AVAIL_IN_WORKLIST_P (bb) = false; | |
3844 | tm_memopt_compute_antin (bb); | |
3845 | ||
3846 | /* Note: We do not add the LOCAL sets here because we already | |
3847 | seeded the ANTIC_OUT sets with them. */ | |
3848 | if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb)) | |
3849 | && bb != region->entry_block) | |
3850 | /* If the out state of this block changed, then we need to add | |
3851 | its predecessors to the worklist if they are not already in. */ | |
3852 | FOR_EACH_EDGE (e, ei, bb->preds) | |
3853 | if (!AVAIL_IN_WORKLIST_P (e->src)) | |
3854 | { | |
3855 | *qin++ = e->src; | |
3856 | AVAIL_IN_WORKLIST_P (e->src) = true; | |
3857 | qlen++; | |
3858 | ||
3859 | if (qin >= qend) | |
3860 | qin = worklist; | |
3861 | } | |
3862 | } | |
3863 | ||
3864 | free (worklist); | |
3865 | ||
3866 | if (dump_file) | |
3867 | dump_tm_memopt_sets (blocks); | |
3868 | } | |
3869 | ||
3870 | /* Offsets of load variants from TM_LOAD. For example, | |
3871 | BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*. | |
3872 | See gtm-builtins.def. */ | |
3873 | #define TRANSFORM_RAR 1 | |
3874 | #define TRANSFORM_RAW 2 | |
3875 | #define TRANSFORM_RFW 3 | |
3876 | /* Offsets of store variants from TM_STORE. */ | |
3877 | #define TRANSFORM_WAR 1 | |
3878 | #define TRANSFORM_WAW 2 | |
3879 | ||
3880 | /* Inform about a load/store optimization. */ | |
3881 | ||
3882 | static void | |
42acab1c | 3883 | dump_tm_memopt_transform (gimple *stmt) |
4c0315d0 | 3884 | { |
3885 | if (dump_file) | |
3886 | { | |
3887 | fprintf (dump_file, "TM memopt: transforming: "); | |
1ffa4346 | 3888 | print_gimple_stmt (dump_file, stmt, 0); |
4c0315d0 | 3889 | fprintf (dump_file, "\n"); |
3890 | } | |
3891 | } | |
3892 | ||
3893 | /* Perform a read/write optimization. Replaces the TM builtin in STMT | |
3894 | by a builtin that is OFFSET entries down in the builtins table in | |
3895 | gtm-builtins.def. */ | |
3896 | ||
3897 | static void | |
3898 | tm_memopt_transform_stmt (unsigned int offset, | |
1a91d914 | 3899 | gcall *stmt, |
4c0315d0 | 3900 | gimple_stmt_iterator *gsi) |
3901 | { | |
3902 | tree fn = gimple_call_fn (stmt); | |
3903 | gcc_assert (TREE_CODE (fn) == ADDR_EXPR); | |
3904 | TREE_OPERAND (fn, 0) | |
3905 | = builtin_decl_explicit ((enum built_in_function) | |
3906 | (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0)) | |
3907 | + offset)); | |
3908 | gimple_call_set_fn (stmt, fn); | |
3909 | gsi_replace (gsi, stmt, true); | |
3910 | dump_tm_memopt_transform (stmt); | |
3911 | } | |
3912 | ||
3913 | /* Perform the actual TM memory optimization transformations in the | |
3914 | basic blocks in BLOCKS. */ | |
3915 | ||
3916 | static void | |
f1f41a6c | 3917 | tm_memopt_transform_blocks (vec<basic_block> blocks) |
4c0315d0 | 3918 | { |
3919 | size_t i; | |
3920 | basic_block bb; | |
3921 | gimple_stmt_iterator gsi; | |
3922 | ||
f1f41a6c | 3923 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3924 | { |
3925 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
3926 | { | |
42acab1c | 3927 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 3928 | bitmap read_avail = READ_AVAIL_IN (bb); |
3929 | bitmap store_avail = STORE_AVAIL_IN (bb); | |
3930 | bitmap store_antic = STORE_ANTIC_OUT (bb); | |
3931 | unsigned int loc; | |
3932 | ||
3933 | if (is_tm_simple_load (stmt)) | |
3934 | { | |
1a91d914 | 3935 | gcall *call_stmt = as_a <gcall *> (stmt); |
4c0315d0 | 3936 | loc = tm_memopt_value_number (stmt, NO_INSERT); |
3937 | if (store_avail && bitmap_bit_p (store_avail, loc)) | |
1a91d914 | 3938 | tm_memopt_transform_stmt (TRANSFORM_RAW, call_stmt, &gsi); |
4c0315d0 | 3939 | else if (store_antic && bitmap_bit_p (store_antic, loc)) |
3940 | { | |
1a91d914 | 3941 | tm_memopt_transform_stmt (TRANSFORM_RFW, call_stmt, &gsi); |
4c0315d0 | 3942 | bitmap_set_bit (store_avail, loc); |
3943 | } | |
3944 | else if (read_avail && bitmap_bit_p (read_avail, loc)) | |
1a91d914 | 3945 | tm_memopt_transform_stmt (TRANSFORM_RAR, call_stmt, &gsi); |
4c0315d0 | 3946 | else |
3947 | bitmap_set_bit (read_avail, loc); | |
3948 | } | |
3949 | else if (is_tm_simple_store (stmt)) | |
3950 | { | |
1a91d914 | 3951 | gcall *call_stmt = as_a <gcall *> (stmt); |
4c0315d0 | 3952 | loc = tm_memopt_value_number (stmt, NO_INSERT); |
3953 | if (store_avail && bitmap_bit_p (store_avail, loc)) | |
1a91d914 | 3954 | tm_memopt_transform_stmt (TRANSFORM_WAW, call_stmt, &gsi); |
4c0315d0 | 3955 | else |
3956 | { | |
3957 | if (read_avail && bitmap_bit_p (read_avail, loc)) | |
1a91d914 | 3958 | tm_memopt_transform_stmt (TRANSFORM_WAR, call_stmt, &gsi); |
4c0315d0 | 3959 | bitmap_set_bit (store_avail, loc); |
3960 | } | |
3961 | } | |
3962 | } | |
3963 | } | |
3964 | } | |
3965 | ||
3966 | /* Return a new set of bitmaps for a BB. */ | |
3967 | ||
3968 | static struct tm_memopt_bitmaps * | |
3969 | tm_memopt_init_sets (void) | |
3970 | { | |
3971 | struct tm_memopt_bitmaps *b | |
3972 | = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps); | |
3973 | b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3974 | b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3975 | b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3976 | b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3977 | b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3978 | b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack); | |
3979 | b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack); | |
3980 | b->read_local = BITMAP_ALLOC (&tm_memopt_obstack); | |
3981 | b->store_local = BITMAP_ALLOC (&tm_memopt_obstack); | |
3982 | return b; | |
3983 | } | |
3984 | ||
3985 | /* Free sets computed for each BB. */ | |
3986 | ||
3987 | static void | |
f1f41a6c | 3988 | tm_memopt_free_sets (vec<basic_block> blocks) |
4c0315d0 | 3989 | { |
3990 | size_t i; | |
3991 | basic_block bb; | |
3992 | ||
f1f41a6c | 3993 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 3994 | bb->aux = NULL; |
3995 | } | |
3996 | ||
3997 | /* Clear the visited bit for every basic block in BLOCKS. */ | |
3998 | ||
3999 | static void | |
f1f41a6c | 4000 | tm_memopt_clear_visited (vec<basic_block> blocks) |
4c0315d0 | 4001 | { |
4002 | size_t i; | |
4003 | basic_block bb; | |
4004 | ||
f1f41a6c | 4005 | for (i = 0; blocks.iterate (i, &bb); ++i) |
4c0315d0 | 4006 | BB_VISITED_P (bb) = false; |
4007 | } | |
4008 | ||
4009 | /* Replace TM load/stores with hints for the runtime. We handle | |
4010 | things like read-after-write, write-after-read, read-after-read, | |
4011 | read-for-write, etc. */ | |
4012 | ||
4013 | static unsigned int | |
4014 | execute_tm_memopt (void) | |
4015 | { | |
4016 | struct tm_region *region; | |
f1f41a6c | 4017 | vec<basic_block> bbs; |
4c0315d0 | 4018 | |
4019 | tm_memopt_value_id = 0; | |
c1f445d2 | 4020 | tm_memopt_value_numbers = new hash_table<tm_memop_hasher> (10); |
4c0315d0 | 4021 | |
4022 | for (region = all_tm_regions; region; region = region->next) | |
4023 | { | |
4024 | /* All the TM stores/loads in the current region. */ | |
4025 | size_t i; | |
4026 | basic_block bb; | |
4027 | ||
4028 | bitmap_obstack_initialize (&tm_memopt_obstack); | |
4029 | ||
4030 | /* Save all BBs for the current region. */ | |
4031 | bbs = get_tm_region_blocks (region->entry_block, | |
4032 | region->exit_blocks, | |
4033 | region->irr_blocks, | |
4034 | NULL, | |
4035 | false); | |
4036 | ||
4037 | /* Collect all the memory operations. */ | |
f1f41a6c | 4038 | for (i = 0; bbs.iterate (i, &bb); ++i) |
4c0315d0 | 4039 | { |
4040 | bb->aux = tm_memopt_init_sets (); | |
4041 | tm_memopt_accumulate_memops (bb); | |
4042 | } | |
4043 | ||
4044 | /* Solve data flow equations and transform each block accordingly. */ | |
4045 | tm_memopt_clear_visited (bbs); | |
4046 | tm_memopt_compute_available (region, bbs); | |
4047 | tm_memopt_clear_visited (bbs); | |
4048 | tm_memopt_compute_antic (region, bbs); | |
4049 | tm_memopt_transform_blocks (bbs); | |
4050 | ||
4051 | tm_memopt_free_sets (bbs); | |
f1f41a6c | 4052 | bbs.release (); |
4c0315d0 | 4053 | bitmap_obstack_release (&tm_memopt_obstack); |
c1f445d2 | 4054 | tm_memopt_value_numbers->empty (); |
4c0315d0 | 4055 | } |
4056 | ||
c1f445d2 | 4057 | delete tm_memopt_value_numbers; |
4058 | tm_memopt_value_numbers = NULL; | |
4c0315d0 | 4059 | return 0; |
4060 | } | |
4061 | ||
7620bc82 | 4062 | namespace { |
4063 | ||
4064 | const pass_data pass_data_tm_memopt = | |
cbe8bda8 | 4065 | { |
4066 | GIMPLE_PASS, /* type */ | |
4067 | "tmmemopt", /* name */ | |
4068 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 4069 | TV_TRANS_MEM, /* tv_id */ |
4070 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
4071 | 0, /* properties_provided */ | |
4072 | 0, /* properties_destroyed */ | |
4073 | 0, /* todo_flags_start */ | |
4074 | 0, /* todo_flags_finish */ | |
4c0315d0 | 4075 | }; |
4076 | ||
7620bc82 | 4077 | class pass_tm_memopt : public gimple_opt_pass |
cbe8bda8 | 4078 | { |
4079 | public: | |
9af5ce0c | 4080 | pass_tm_memopt (gcc::context *ctxt) |
4081 | : gimple_opt_pass (pass_data_tm_memopt, ctxt) | |
cbe8bda8 | 4082 | {} |
4083 | ||
4084 | /* opt_pass methods: */ | |
31315c24 | 4085 | virtual bool gate (function *) { return flag_tm && optimize > 0; } |
65b0537f | 4086 | virtual unsigned int execute (function *) { return execute_tm_memopt (); } |
cbe8bda8 | 4087 | |
4088 | }; // class pass_tm_memopt | |
4089 | ||
7620bc82 | 4090 | } // anon namespace |
4091 | ||
cbe8bda8 | 4092 | gimple_opt_pass * |
4093 | make_pass_tm_memopt (gcc::context *ctxt) | |
4094 | { | |
4095 | return new pass_tm_memopt (ctxt); | |
4096 | } | |
4097 | ||
4c0315d0 | 4098 | \f |
4099 | /* Interprocedual analysis for the creation of transactional clones. | |
4100 | The aim of this pass is to find which functions are referenced in | |
4101 | a non-irrevocable transaction context, and for those over which | |
4102 | we have control (or user directive), create a version of the | |
4103 | function which uses only the transactional interface to reference | |
4104 | protected memories. This analysis proceeds in several steps: | |
4105 | ||
4106 | (1) Collect the set of all possible transactional clones: | |
4107 | ||
4108 | (a) For all local public functions marked tm_callable, push | |
4109 | it onto the tm_callee queue. | |
4110 | ||
4111 | (b) For all local functions, scan for calls in transaction blocks. | |
4112 | Push the caller and callee onto the tm_caller and tm_callee | |
4113 | queues. Count the number of callers for each callee. | |
4114 | ||
4115 | (c) For each local function on the callee list, assume we will | |
4116 | create a transactional clone. Push *all* calls onto the | |
4117 | callee queues; count the number of clone callers separately | |
4118 | to the number of original callers. | |
4119 | ||
4120 | (2) Propagate irrevocable status up the dominator tree: | |
4121 | ||
4122 | (a) Any external function on the callee list that is not marked | |
4123 | tm_callable is irrevocable. Push all callers of such onto | |
4124 | a worklist. | |
4125 | ||
4126 | (b) For each function on the worklist, mark each block that | |
4127 | contains an irrevocable call. Use the AND operator to | |
4128 | propagate that mark up the dominator tree. | |
4129 | ||
4130 | (c) If we reach the entry block for a possible transactional | |
4131 | clone, then the transactional clone is irrevocable, and | |
4132 | we should not create the clone after all. Push all | |
4133 | callers onto the worklist. | |
4134 | ||
4135 | (d) Place tm_irrevocable calls at the beginning of the relevant | |
4136 | blocks. Special case here is the entry block for the entire | |
4137 | transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for | |
4138 | the library to begin the region in serial mode. Decrement | |
4139 | the call count for all callees in the irrevocable region. | |
4140 | ||
4141 | (3) Create the transactional clones: | |
4142 | ||
4143 | Any tm_callee that still has a non-zero call count is cloned. | |
4144 | */ | |
4145 | ||
4146 | /* This structure is stored in the AUX field of each cgraph_node. */ | |
4147 | struct tm_ipa_cg_data | |
4148 | { | |
4149 | /* The clone of the function that got created. */ | |
4150 | struct cgraph_node *clone; | |
4151 | ||
4152 | /* The tm regions in the normal function. */ | |
4153 | struct tm_region *all_tm_regions; | |
4154 | ||
4155 | /* The blocks of the normal/clone functions that contain irrevocable | |
4156 | calls, or blocks that are post-dominated by irrevocable calls. */ | |
4157 | bitmap irrevocable_blocks_normal; | |
4158 | bitmap irrevocable_blocks_clone; | |
4159 | ||
4160 | /* The blocks of the normal function that are involved in transactions. */ | |
4161 | bitmap transaction_blocks_normal; | |
4162 | ||
4163 | /* The number of callers to the transactional clone of this function | |
4164 | from normal and transactional clones respectively. */ | |
4165 | unsigned tm_callers_normal; | |
4166 | unsigned tm_callers_clone; | |
4167 | ||
4168 | /* True if all calls to this function's transactional clone | |
4169 | are irrevocable. Also automatically true if the function | |
4170 | has no transactional clone. */ | |
4171 | bool is_irrevocable; | |
4172 | ||
4173 | /* Flags indicating the presence of this function in various queues. */ | |
4174 | bool in_callee_queue; | |
4175 | bool in_worklist; | |
4176 | ||
4177 | /* Flags indicating the kind of scan desired while in the worklist. */ | |
4178 | bool want_irr_scan_normal; | |
4179 | }; | |
4180 | ||
415d1b9a | 4181 | typedef vec<cgraph_node *> cgraph_node_queue; |
4c0315d0 | 4182 | |
4183 | /* Return the ipa data associated with NODE, allocating zeroed memory | |
3e426b86 | 4184 | if necessary. TRAVERSE_ALIASES is true if we must traverse aliases |
4185 | and set *NODE accordingly. */ | |
4c0315d0 | 4186 | |
4187 | static struct tm_ipa_cg_data * | |
3e426b86 | 4188 | get_cg_data (struct cgraph_node **node, bool traverse_aliases) |
4c0315d0 | 4189 | { |
3e426b86 | 4190 | struct tm_ipa_cg_data *d; |
4191 | ||
02774f2d | 4192 | if (traverse_aliases && (*node)->alias) |
415d1b9a | 4193 | *node = (*node)->get_alias_target (); |
3e426b86 | 4194 | |
02774f2d | 4195 | d = (struct tm_ipa_cg_data *) (*node)->aux; |
4c0315d0 | 4196 | |
4197 | if (d == NULL) | |
4198 | { | |
4199 | d = (struct tm_ipa_cg_data *) | |
4200 | obstack_alloc (&tm_obstack.obstack, sizeof (*d)); | |
02774f2d | 4201 | (*node)->aux = (void *) d; |
4c0315d0 | 4202 | memset (d, 0, sizeof (*d)); |
4203 | } | |
4204 | ||
4205 | return d; | |
4206 | } | |
4207 | ||
4208 | /* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that | |
4209 | it is already present. */ | |
4210 | ||
4211 | static void | |
4212 | maybe_push_queue (struct cgraph_node *node, | |
4213 | cgraph_node_queue *queue_p, bool *in_queue_p) | |
4214 | { | |
4215 | if (!*in_queue_p) | |
4216 | { | |
4217 | *in_queue_p = true; | |
f1f41a6c | 4218 | queue_p->safe_push (node); |
4c0315d0 | 4219 | } |
4220 | } | |
4221 | ||
4222 | /* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone. | |
4223 | Queue all callees within block BB. */ | |
4224 | ||
4225 | static void | |
4226 | ipa_tm_scan_calls_block (cgraph_node_queue *callees_p, | |
4227 | basic_block bb, bool for_clone) | |
4228 | { | |
4229 | gimple_stmt_iterator gsi; | |
4230 | ||
4231 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4232 | { | |
42acab1c | 4233 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 4234 | if (is_gimple_call (stmt) && !is_tm_pure_call (stmt)) |
4235 | { | |
4236 | tree fndecl = gimple_call_fndecl (stmt); | |
4237 | if (fndecl) | |
4238 | { | |
4239 | struct tm_ipa_cg_data *d; | |
4240 | unsigned *pcallers; | |
4241 | struct cgraph_node *node; | |
4242 | ||
4243 | if (is_tm_ending_fndecl (fndecl)) | |
4244 | continue; | |
4245 | if (find_tm_replacement_function (fndecl)) | |
4246 | continue; | |
4247 | ||
415d1b9a | 4248 | node = cgraph_node::get (fndecl); |
4c0315d0 | 4249 | gcc_assert (node != NULL); |
3e426b86 | 4250 | d = get_cg_data (&node, true); |
4c0315d0 | 4251 | |
4252 | pcallers = (for_clone ? &d->tm_callers_clone | |
4253 | : &d->tm_callers_normal); | |
4254 | *pcallers += 1; | |
4255 | ||
4256 | maybe_push_queue (node, callees_p, &d->in_callee_queue); | |
4257 | } | |
4258 | } | |
4259 | } | |
4260 | } | |
4261 | ||
4262 | /* Scan all calls in NODE that are within a transaction region, | |
4263 | and push the resulting nodes into the callee queue. */ | |
4264 | ||
4265 | static void | |
4266 | ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d, | |
4267 | cgraph_node_queue *callees_p) | |
4268 | { | |
4c0315d0 | 4269 | d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack); |
4270 | d->all_tm_regions = all_tm_regions; | |
4271 | ||
a08574d7 | 4272 | for (tm_region *r = all_tm_regions; r; r = r->next) |
4c0315d0 | 4273 | { |
f1f41a6c | 4274 | vec<basic_block> bbs; |
4c0315d0 | 4275 | basic_block bb; |
4276 | unsigned i; | |
4277 | ||
4278 | bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL, | |
a08574d7 | 4279 | d->transaction_blocks_normal, false, false); |
0cd02a19 | 4280 | |
f1f41a6c | 4281 | FOR_EACH_VEC_ELT (bbs, i, bb) |
4c0315d0 | 4282 | ipa_tm_scan_calls_block (callees_p, bb, false); |
4283 | ||
f1f41a6c | 4284 | bbs.release (); |
4c0315d0 | 4285 | } |
4286 | } | |
4287 | ||
4288 | /* Scan all calls in NODE as if this is the transactional clone, | |
4289 | and push the destinations into the callee queue. */ | |
4290 | ||
4291 | static void | |
4292 | ipa_tm_scan_calls_clone (struct cgraph_node *node, | |
4293 | cgraph_node_queue *callees_p) | |
4294 | { | |
02774f2d | 4295 | struct function *fn = DECL_STRUCT_FUNCTION (node->decl); |
4c0315d0 | 4296 | basic_block bb; |
4297 | ||
4298 | FOR_EACH_BB_FN (bb, fn) | |
4299 | ipa_tm_scan_calls_block (callees_p, bb, true); | |
4300 | } | |
4301 | ||
4302 | /* The function NODE has been detected to be irrevocable. Push all | |
4303 | of its callers onto WORKLIST for the purpose of re-scanning them. */ | |
4304 | ||
4305 | static void | |
4306 | ipa_tm_note_irrevocable (struct cgraph_node *node, | |
4307 | cgraph_node_queue *worklist_p) | |
4308 | { | |
3e426b86 | 4309 | struct tm_ipa_cg_data *d = get_cg_data (&node, true); |
4c0315d0 | 4310 | struct cgraph_edge *e; |
4311 | ||
4312 | d->is_irrevocable = true; | |
4313 | ||
4314 | for (e = node->callers; e ; e = e->next_caller) | |
4315 | { | |
4316 | basic_block bb; | |
3e426b86 | 4317 | struct cgraph_node *caller; |
4c0315d0 | 4318 | |
4319 | /* Don't examine recursive calls. */ | |
4320 | if (e->caller == node) | |
4321 | continue; | |
4322 | /* Even if we think we can go irrevocable, believe the user | |
4323 | above all. */ | |
02774f2d | 4324 | if (is_tm_safe_or_pure (e->caller->decl)) |
4c0315d0 | 4325 | continue; |
4326 | ||
3e426b86 | 4327 | caller = e->caller; |
4328 | d = get_cg_data (&caller, true); | |
4c0315d0 | 4329 | |
4330 | /* Check if the callee is in a transactional region. If so, | |
4331 | schedule the function for normal re-scan as well. */ | |
4332 | bb = gimple_bb (e->call_stmt); | |
4333 | gcc_assert (bb != NULL); | |
4334 | if (d->transaction_blocks_normal | |
4335 | && bitmap_bit_p (d->transaction_blocks_normal, bb->index)) | |
4336 | d->want_irr_scan_normal = true; | |
4337 | ||
3e426b86 | 4338 | maybe_push_queue (caller, worklist_p, &d->in_worklist); |
4c0315d0 | 4339 | } |
4340 | } | |
4341 | ||
4342 | /* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement | |
4343 | within the block is irrevocable. */ | |
4344 | ||
4345 | static bool | |
4346 | ipa_tm_scan_irr_block (basic_block bb) | |
4347 | { | |
4348 | gimple_stmt_iterator gsi; | |
4349 | tree fn; | |
4350 | ||
4351 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4352 | { | |
42acab1c | 4353 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 4354 | switch (gimple_code (stmt)) |
4355 | { | |
e153bd50 | 4356 | case GIMPLE_ASSIGN: |
4357 | if (gimple_assign_single_p (stmt)) | |
4358 | { | |
4359 | tree lhs = gimple_assign_lhs (stmt); | |
4360 | tree rhs = gimple_assign_rhs1 (stmt); | |
639b72bd | 4361 | if (volatile_lvalue_p (lhs) || volatile_lvalue_p (rhs)) |
e153bd50 | 4362 | return true; |
4363 | } | |
4364 | break; | |
4365 | ||
4c0315d0 | 4366 | case GIMPLE_CALL: |
e153bd50 | 4367 | { |
4368 | tree lhs = gimple_call_lhs (stmt); | |
639b72bd | 4369 | if (lhs && volatile_lvalue_p (lhs)) |
e153bd50 | 4370 | return true; |
4c0315d0 | 4371 | |
e153bd50 | 4372 | if (is_tm_pure_call (stmt)) |
4373 | break; | |
4c0315d0 | 4374 | |
e153bd50 | 4375 | fn = gimple_call_fn (stmt); |
4c0315d0 | 4376 | |
e153bd50 | 4377 | /* Functions with the attribute are by definition irrevocable. */ |
4378 | if (is_tm_irrevocable (fn)) | |
4379 | return true; | |
4c0315d0 | 4380 | |
e153bd50 | 4381 | /* For direct function calls, go ahead and check for replacement |
4382 | functions, or transitive irrevocable functions. For indirect | |
4383 | functions, we'll ask the runtime. */ | |
4384 | if (TREE_CODE (fn) == ADDR_EXPR) | |
4385 | { | |
4386 | struct tm_ipa_cg_data *d; | |
4387 | struct cgraph_node *node; | |
4c0315d0 | 4388 | |
e153bd50 | 4389 | fn = TREE_OPERAND (fn, 0); |
4390 | if (is_tm_ending_fndecl (fn)) | |
4391 | break; | |
4392 | if (find_tm_replacement_function (fn)) | |
4393 | break; | |
40879ac6 | 4394 | |
415d1b9a | 4395 | node = cgraph_node::get (fn); |
e153bd50 | 4396 | d = get_cg_data (&node, true); |
4397 | ||
4398 | /* Return true if irrevocable, but above all, believe | |
4399 | the user. */ | |
4400 | if (d->is_irrevocable | |
4401 | && !is_tm_safe_or_pure (fn)) | |
4402 | return true; | |
4403 | } | |
4404 | break; | |
4405 | } | |
4c0315d0 | 4406 | |
4407 | case GIMPLE_ASM: | |
4408 | /* ??? The Approved Method of indicating that an inline | |
4409 | assembly statement is not relevant to the transaction | |
4410 | is to wrap it in a __tm_waiver block. This is not | |
4411 | yet implemented, so we can't check for it. */ | |
43156aa3 | 4412 | if (is_tm_safe (current_function_decl)) |
4413 | { | |
4414 | tree t = build1 (NOP_EXPR, void_type_node, size_zero_node); | |
4415 | SET_EXPR_LOCATION (t, gimple_location (stmt)); | |
85b9be9b | 4416 | error ("%K%<asm%> not allowed in %<transaction_safe%> function", |
4417 | t); | |
43156aa3 | 4418 | } |
4c0315d0 | 4419 | return true; |
4420 | ||
4421 | default: | |
4422 | break; | |
4423 | } | |
4424 | } | |
4425 | ||
4426 | return false; | |
4427 | } | |
4428 | ||
4429 | /* For each of the blocks seeded witin PQUEUE, walk the CFG looking | |
4430 | for new irrevocable blocks, marking them in NEW_IRR. Don't bother | |
4431 | scanning past OLD_IRR or EXIT_BLOCKS. */ | |
4432 | ||
4433 | static bool | |
f1f41a6c | 4434 | ipa_tm_scan_irr_blocks (vec<basic_block> *pqueue, bitmap new_irr, |
4c0315d0 | 4435 | bitmap old_irr, bitmap exit_blocks) |
4436 | { | |
4437 | bool any_new_irr = false; | |
4438 | edge e; | |
4439 | edge_iterator ei; | |
4440 | bitmap visited_blocks = BITMAP_ALLOC (NULL); | |
4441 | ||
4442 | do | |
4443 | { | |
f1f41a6c | 4444 | basic_block bb = pqueue->pop (); |
4c0315d0 | 4445 | |
4446 | /* Don't re-scan blocks we know already are irrevocable. */ | |
4447 | if (old_irr && bitmap_bit_p (old_irr, bb->index)) | |
4448 | continue; | |
4449 | ||
4450 | if (ipa_tm_scan_irr_block (bb)) | |
4451 | { | |
4452 | bitmap_set_bit (new_irr, bb->index); | |
4453 | any_new_irr = true; | |
4454 | } | |
4455 | else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index)) | |
4456 | { | |
4457 | FOR_EACH_EDGE (e, ei, bb->succs) | |
4458 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
4459 | { | |
4460 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 4461 | pqueue->safe_push (e->dest); |
4c0315d0 | 4462 | } |
4463 | } | |
4464 | } | |
f1f41a6c | 4465 | while (!pqueue->is_empty ()); |
4c0315d0 | 4466 | |
4467 | BITMAP_FREE (visited_blocks); | |
4468 | ||
4469 | return any_new_irr; | |
4470 | } | |
4471 | ||
4472 | /* Propagate the irrevocable property both up and down the dominator tree. | |
4473 | BB is the current block being scanned; EXIT_BLOCKS are the edges of the | |
4474 | TM regions; OLD_IRR are the results of a previous scan of the dominator | |
4475 | tree which has been fully propagated; NEW_IRR is the set of new blocks | |
4476 | which are gaining the irrevocable property during the current scan. */ | |
4477 | ||
4478 | static void | |
4479 | ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr, | |
4480 | bitmap old_irr, bitmap exit_blocks) | |
4481 | { | |
f1f41a6c | 4482 | vec<basic_block> bbs; |
4c0315d0 | 4483 | bitmap all_region_blocks; |
4484 | ||
4485 | /* If this block is in the old set, no need to rescan. */ | |
4486 | if (old_irr && bitmap_bit_p (old_irr, entry_block->index)) | |
4487 | return; | |
4488 | ||
4489 | all_region_blocks = BITMAP_ALLOC (&tm_obstack); | |
4490 | bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL, | |
4491 | all_region_blocks, false); | |
4492 | do | |
4493 | { | |
f1f41a6c | 4494 | basic_block bb = bbs.pop (); |
4c0315d0 | 4495 | bool this_irr = bitmap_bit_p (new_irr, bb->index); |
4496 | bool all_son_irr = false; | |
4497 | edge_iterator ei; | |
4498 | edge e; | |
4499 | ||
4500 | /* Propagate up. If my children are, I am too, but we must have | |
4501 | at least one child that is. */ | |
4502 | if (!this_irr) | |
4503 | { | |
4504 | FOR_EACH_EDGE (e, ei, bb->succs) | |
4505 | { | |
4506 | if (!bitmap_bit_p (new_irr, e->dest->index)) | |
4507 | { | |
4508 | all_son_irr = false; | |
4509 | break; | |
4510 | } | |
4511 | else | |
4512 | all_son_irr = true; | |
4513 | } | |
4514 | if (all_son_irr) | |
4515 | { | |
4516 | /* Add block to new_irr if it hasn't already been processed. */ | |
4517 | if (!old_irr || !bitmap_bit_p (old_irr, bb->index)) | |
4518 | { | |
4519 | bitmap_set_bit (new_irr, bb->index); | |
4520 | this_irr = true; | |
4521 | } | |
4522 | } | |
4523 | } | |
4524 | ||
4525 | /* Propagate down to everyone we immediately dominate. */ | |
4526 | if (this_irr) | |
4527 | { | |
4528 | basic_block son; | |
4529 | for (son = first_dom_son (CDI_DOMINATORS, bb); | |
4530 | son; | |
4531 | son = next_dom_son (CDI_DOMINATORS, son)) | |
4532 | { | |
4533 | /* Make sure block is actually in a TM region, and it | |
4534 | isn't already in old_irr. */ | |
4535 | if ((!old_irr || !bitmap_bit_p (old_irr, son->index)) | |
4536 | && bitmap_bit_p (all_region_blocks, son->index)) | |
4537 | bitmap_set_bit (new_irr, son->index); | |
4538 | } | |
4539 | } | |
4540 | } | |
f1f41a6c | 4541 | while (!bbs.is_empty ()); |
4c0315d0 | 4542 | |
4543 | BITMAP_FREE (all_region_blocks); | |
f1f41a6c | 4544 | bbs.release (); |
4c0315d0 | 4545 | } |
4546 | ||
4547 | static void | |
4548 | ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone) | |
4549 | { | |
4550 | gimple_stmt_iterator gsi; | |
4551 | ||
4552 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
4553 | { | |
42acab1c | 4554 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 4555 | if (is_gimple_call (stmt) && !is_tm_pure_call (stmt)) |
4556 | { | |
4557 | tree fndecl = gimple_call_fndecl (stmt); | |
4558 | if (fndecl) | |
4559 | { | |
4560 | struct tm_ipa_cg_data *d; | |
4561 | unsigned *pcallers; | |
3e426b86 | 4562 | struct cgraph_node *tnode; |
4c0315d0 | 4563 | |
4564 | if (is_tm_ending_fndecl (fndecl)) | |
4565 | continue; | |
4566 | if (find_tm_replacement_function (fndecl)) | |
4567 | continue; | |
4568 | ||
415d1b9a | 4569 | tnode = cgraph_node::get (fndecl); |
3e426b86 | 4570 | d = get_cg_data (&tnode, true); |
4571 | ||
4c0315d0 | 4572 | pcallers = (for_clone ? &d->tm_callers_clone |
4573 | : &d->tm_callers_normal); | |
4574 | ||
4575 | gcc_assert (*pcallers > 0); | |
4576 | *pcallers -= 1; | |
4577 | } | |
4578 | } | |
4579 | } | |
4580 | } | |
4581 | ||
4582 | /* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions, | |
4583 | as well as other irrevocable actions such as inline assembly. Mark all | |
4584 | such blocks as irrevocable and decrement the number of calls to | |
4585 | transactional clones. Return true if, for the transactional clone, the | |
4586 | entire function is irrevocable. */ | |
4587 | ||
4588 | static bool | |
4589 | ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone) | |
4590 | { | |
4591 | struct tm_ipa_cg_data *d; | |
4592 | bitmap new_irr, old_irr; | |
4c0315d0 | 4593 | bool ret = false; |
4594 | ||
40879ac6 | 4595 | /* Builtin operators (operator new, and such). */ |
02774f2d | 4596 | if (DECL_STRUCT_FUNCTION (node->decl) == NULL |
4597 | || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL) | |
40879ac6 | 4598 | return false; |
4599 | ||
02774f2d | 4600 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
4c0315d0 | 4601 | calculate_dominance_info (CDI_DOMINATORS); |
4602 | ||
3e426b86 | 4603 | d = get_cg_data (&node, true); |
4997014d | 4604 | auto_vec<basic_block, 10> queue; |
4c0315d0 | 4605 | new_irr = BITMAP_ALLOC (&tm_obstack); |
4606 | ||
4607 | /* Scan each tm region, propagating irrevocable status through the tree. */ | |
4608 | if (for_clone) | |
4609 | { | |
4610 | old_irr = d->irrevocable_blocks_clone; | |
34154e27 | 4611 | queue.quick_push (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))); |
4c0315d0 | 4612 | if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL)) |
4613 | { | |
34154e27 | 4614 | ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), |
4615 | new_irr, | |
4c0315d0 | 4616 | old_irr, NULL); |
34154e27 | 4617 | ret = bitmap_bit_p (new_irr, |
4618 | single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->index); | |
4c0315d0 | 4619 | } |
4620 | } | |
4621 | else | |
4622 | { | |
4623 | struct tm_region *region; | |
4624 | ||
4625 | old_irr = d->irrevocable_blocks_normal; | |
4626 | for (region = d->all_tm_regions; region; region = region->next) | |
4627 | { | |
f1f41a6c | 4628 | queue.quick_push (region->entry_block); |
4c0315d0 | 4629 | if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, |
4630 | region->exit_blocks)) | |
4631 | ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr, | |
4632 | region->exit_blocks); | |
4633 | } | |
4634 | } | |
4635 | ||
4636 | /* If we found any new irrevocable blocks, reduce the call count for | |
4637 | transactional clones within the irrevocable blocks. Save the new | |
4638 | set of irrevocable blocks for next time. */ | |
4639 | if (!bitmap_empty_p (new_irr)) | |
4640 | { | |
4641 | bitmap_iterator bmi; | |
4642 | unsigned i; | |
4643 | ||
4644 | EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi) | |
f5a6b05f | 4645 | ipa_tm_decrement_clone_counts (BASIC_BLOCK_FOR_FN (cfun, i), |
4646 | for_clone); | |
4c0315d0 | 4647 | |
4648 | if (old_irr) | |
4649 | { | |
4650 | bitmap_ior_into (old_irr, new_irr); | |
4651 | BITMAP_FREE (new_irr); | |
4652 | } | |
4653 | else if (for_clone) | |
4654 | d->irrevocable_blocks_clone = new_irr; | |
4655 | else | |
4656 | d->irrevocable_blocks_normal = new_irr; | |
4657 | ||
4658 | if (dump_file && new_irr) | |
4659 | { | |
4660 | const char *dname; | |
4661 | bitmap_iterator bmi; | |
4662 | unsigned i; | |
4663 | ||
4664 | dname = lang_hooks.decl_printable_name (current_function_decl, 2); | |
4665 | EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi) | |
4666 | fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i); | |
4667 | } | |
4668 | } | |
4669 | else | |
4670 | BITMAP_FREE (new_irr); | |
4671 | ||
4c0315d0 | 4672 | pop_cfun (); |
4c0315d0 | 4673 | |
4674 | return ret; | |
4675 | } | |
4676 | ||
4677 | /* Return true if, for the transactional clone of NODE, any call | |
4678 | may enter irrevocable mode. */ | |
4679 | ||
4680 | static bool | |
4681 | ipa_tm_mayenterirr_function (struct cgraph_node *node) | |
4682 | { | |
3e426b86 | 4683 | struct tm_ipa_cg_data *d; |
4684 | tree decl; | |
4685 | unsigned flags; | |
4686 | ||
4687 | d = get_cg_data (&node, true); | |
02774f2d | 4688 | decl = node->decl; |
3e426b86 | 4689 | flags = flags_from_decl_or_type (decl); |
4c0315d0 | 4690 | |
4691 | /* Handle some TM builtins. Ordinarily these aren't actually generated | |
4692 | at this point, but handling these functions when written in by the | |
4693 | user makes it easier to build unit tests. */ | |
4694 | if (flags & ECF_TM_BUILTIN) | |
4695 | return false; | |
4696 | ||
4697 | /* Filter out all functions that are marked. */ | |
4698 | if (flags & ECF_TM_PURE) | |
4699 | return false; | |
4700 | if (is_tm_safe (decl)) | |
4701 | return false; | |
4702 | if (is_tm_irrevocable (decl)) | |
4703 | return true; | |
4704 | if (is_tm_callable (decl)) | |
4705 | return true; | |
4706 | if (find_tm_replacement_function (decl)) | |
4707 | return true; | |
4708 | ||
4709 | /* If we aren't seeing the final version of the function we don't | |
4710 | know what it will contain at runtime. */ | |
415d1b9a | 4711 | if (node->get_availability () < AVAIL_AVAILABLE) |
4c0315d0 | 4712 | return true; |
4713 | ||
4714 | /* If the function must go irrevocable, then of course true. */ | |
4715 | if (d->is_irrevocable) | |
4716 | return true; | |
4717 | ||
4718 | /* If there are any blocks marked irrevocable, then the function | |
4719 | as a whole may enter irrevocable. */ | |
4720 | if (d->irrevocable_blocks_clone) | |
4721 | return true; | |
4722 | ||
4723 | /* We may have previously marked this function as tm_may_enter_irr; | |
4724 | see pass_diagnose_tm_blocks. */ | |
4725 | if (node->local.tm_may_enter_irr) | |
4726 | return true; | |
4727 | ||
4728 | /* Recurse on the main body for aliases. In general, this will | |
4729 | result in one of the bits above being set so that we will not | |
4730 | have to recurse next time. */ | |
02774f2d | 4731 | if (node->alias) |
415d1b9a | 4732 | return ipa_tm_mayenterirr_function (cgraph_node::get (node->thunk.alias)); |
4c0315d0 | 4733 | |
4734 | /* What remains is unmarked local functions without items that force | |
4735 | the function to go irrevocable. */ | |
4736 | return false; | |
4737 | } | |
4738 | ||
4739 | /* Diagnose calls from transaction_safe functions to unmarked | |
4740 | functions that are determined to not be safe. */ | |
4741 | ||
4742 | static void | |
4743 | ipa_tm_diagnose_tm_safe (struct cgraph_node *node) | |
4744 | { | |
4745 | struct cgraph_edge *e; | |
4746 | ||
4747 | for (e = node->callees; e ; e = e->next_callee) | |
02774f2d | 4748 | if (!is_tm_callable (e->callee->decl) |
4c0315d0 | 4749 | && e->callee->local.tm_may_enter_irr) |
4750 | error_at (gimple_location (e->call_stmt), | |
4751 | "unsafe function call %qD within " | |
02774f2d | 4752 | "%<transaction_safe%> function", e->callee->decl); |
4c0315d0 | 4753 | } |
4754 | ||
4755 | /* Diagnose call from atomic transactions to unmarked functions | |
4756 | that are determined to not be safe. */ | |
4757 | ||
4758 | static void | |
4759 | ipa_tm_diagnose_transaction (struct cgraph_node *node, | |
4760 | struct tm_region *all_tm_regions) | |
4761 | { | |
4762 | struct tm_region *r; | |
4763 | ||
4764 | for (r = all_tm_regions; r ; r = r->next) | |
1a91d914 | 4765 | if (gimple_transaction_subcode (r->get_transaction_stmt ()) |
4766 | & GTMA_IS_RELAXED) | |
4c0315d0 | 4767 | { |
4768 | /* Atomic transactions can be nested inside relaxed. */ | |
4769 | if (r->inner) | |
4770 | ipa_tm_diagnose_transaction (node, r->inner); | |
4771 | } | |
4772 | else | |
4773 | { | |
f1f41a6c | 4774 | vec<basic_block> bbs; |
4c0315d0 | 4775 | gimple_stmt_iterator gsi; |
4776 | basic_block bb; | |
4777 | size_t i; | |
4778 | ||
4779 | bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, | |
4780 | r->irr_blocks, NULL, false); | |
4781 | ||
f1f41a6c | 4782 | for (i = 0; bbs.iterate (i, &bb); ++i) |
4c0315d0 | 4783 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
4784 | { | |
42acab1c | 4785 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 4786 | tree fndecl; |
4787 | ||
4788 | if (gimple_code (stmt) == GIMPLE_ASM) | |
4789 | { | |
4790 | error_at (gimple_location (stmt), | |
85b9be9b | 4791 | "%<asm%> not allowed in atomic transaction"); |
4c0315d0 | 4792 | continue; |
4793 | } | |
4794 | ||
4795 | if (!is_gimple_call (stmt)) | |
4796 | continue; | |
4797 | fndecl = gimple_call_fndecl (stmt); | |
4798 | ||
4799 | /* Indirect function calls have been diagnosed already. */ | |
4800 | if (!fndecl) | |
4801 | continue; | |
4802 | ||
4803 | /* Stop at the end of the transaction. */ | |
4804 | if (is_tm_ending_fndecl (fndecl)) | |
4805 | { | |
4806 | if (bitmap_bit_p (r->exit_blocks, bb->index)) | |
4807 | break; | |
4808 | continue; | |
4809 | } | |
4810 | ||
4811 | /* Marked functions have been diagnosed already. */ | |
4812 | if (is_tm_pure_call (stmt)) | |
4813 | continue; | |
4814 | if (is_tm_callable (fndecl)) | |
4815 | continue; | |
4816 | ||
35ee1c66 | 4817 | if (cgraph_node::local_info (fndecl)->tm_may_enter_irr) |
4c0315d0 | 4818 | error_at (gimple_location (stmt), |
4819 | "unsafe function call %qD within " | |
4820 | "atomic transaction", fndecl); | |
4821 | } | |
4822 | ||
f1f41a6c | 4823 | bbs.release (); |
4c0315d0 | 4824 | } |
4825 | } | |
4826 | ||
4827 | /* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in | |
4828 | OLD_DECL. The returned value is a freshly malloced pointer that | |
4829 | should be freed by the caller. */ | |
4830 | ||
4831 | static tree | |
4832 | tm_mangle (tree old_asm_id) | |
4833 | { | |
4834 | const char *old_asm_name; | |
4835 | char *tm_name; | |
4836 | void *alloc = NULL; | |
4837 | struct demangle_component *dc; | |
4838 | tree new_asm_id; | |
4839 | ||
4840 | /* Determine if the symbol is already a valid C++ mangled name. Do this | |
4841 | even for C, which might be interfacing with C++ code via appropriately | |
4842 | ugly identifiers. */ | |
4843 | /* ??? We could probably do just as well checking for "_Z" and be done. */ | |
4844 | old_asm_name = IDENTIFIER_POINTER (old_asm_id); | |
4845 | dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc); | |
4846 | ||
4847 | if (dc == NULL) | |
4848 | { | |
7a413494 | 4849 | char length[12]; |
4c0315d0 | 4850 | |
4851 | do_unencoded: | |
4852 | sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id)); | |
4853 | tm_name = concat ("_ZGTt", length, old_asm_name, NULL); | |
4854 | } | |
4855 | else | |
4856 | { | |
4857 | old_asm_name += 2; /* Skip _Z */ | |
4858 | ||
4859 | switch (dc->type) | |
4860 | { | |
4861 | case DEMANGLE_COMPONENT_TRANSACTION_CLONE: | |
4862 | case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE: | |
4863 | /* Don't play silly games, you! */ | |
4864 | goto do_unencoded; | |
4865 | ||
4866 | case DEMANGLE_COMPONENT_HIDDEN_ALIAS: | |
4867 | /* I'd really like to know if we can ever be passed one of | |
4868 | these from the C++ front end. The Logical Thing would | |
4869 | seem that hidden-alias should be outer-most, so that we | |
4870 | get hidden-alias of a transaction-clone and not vice-versa. */ | |
4871 | old_asm_name += 2; | |
4872 | break; | |
4873 | ||
4874 | default: | |
4875 | break; | |
4876 | } | |
4877 | ||
4878 | tm_name = concat ("_ZGTt", old_asm_name, NULL); | |
4879 | } | |
4880 | free (alloc); | |
4881 | ||
4882 | new_asm_id = get_identifier (tm_name); | |
4883 | free (tm_name); | |
4884 | ||
4885 | return new_asm_id; | |
4886 | } | |
4887 | ||
4888 | static inline void | |
8efa224a | 4889 | ipa_tm_mark_force_output_node (struct cgraph_node *node) |
4c0315d0 | 4890 | { |
415d1b9a | 4891 | node->mark_force_output (); |
02774f2d | 4892 | node->analyzed = true; |
4c0315d0 | 4893 | } |
4894 | ||
6a1c0403 | 4895 | static inline void |
4896 | ipa_tm_mark_forced_by_abi_node (struct cgraph_node *node) | |
4897 | { | |
02774f2d | 4898 | node->forced_by_abi = true; |
4899 | node->analyzed = true; | |
6a1c0403 | 4900 | } |
4901 | ||
4c0315d0 | 4902 | /* Callback data for ipa_tm_create_version_alias. */ |
4903 | struct create_version_alias_info | |
4904 | { | |
4905 | struct cgraph_node *old_node; | |
4906 | tree new_decl; | |
4907 | }; | |
4908 | ||
caf29404 | 4909 | /* A subroutine of ipa_tm_create_version, called via |
4c0315d0 | 4910 | cgraph_for_node_and_aliases. Create new tm clones for each of |
4911 | the existing aliases. */ | |
4912 | static bool | |
4913 | ipa_tm_create_version_alias (struct cgraph_node *node, void *data) | |
4914 | { | |
4915 | struct create_version_alias_info *info | |
4916 | = (struct create_version_alias_info *)data; | |
4917 | tree old_decl, new_decl, tm_name; | |
4918 | struct cgraph_node *new_node; | |
4919 | ||
02774f2d | 4920 | if (!node->cpp_implicit_alias) |
4c0315d0 | 4921 | return false; |
4922 | ||
02774f2d | 4923 | old_decl = node->decl; |
4c0315d0 | 4924 | tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl)); |
4925 | new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl), | |
4926 | TREE_CODE (old_decl), tm_name, | |
4927 | TREE_TYPE (old_decl)); | |
4928 | ||
4929 | SET_DECL_ASSEMBLER_NAME (new_decl, tm_name); | |
4930 | SET_DECL_RTL (new_decl, NULL); | |
4931 | ||
4932 | /* Based loosely on C++'s make_alias_for(). */ | |
4933 | TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl); | |
f7c22b34 | 4934 | DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl); |
4935 | DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl); | |
4c0315d0 | 4936 | TREE_READONLY (new_decl) = TREE_READONLY (old_decl); |
4937 | DECL_EXTERNAL (new_decl) = 0; | |
4938 | DECL_ARTIFICIAL (new_decl) = 1; | |
4939 | TREE_ADDRESSABLE (new_decl) = 1; | |
4940 | TREE_USED (new_decl) = 1; | |
4941 | TREE_SYMBOL_REFERENCED (tm_name) = 1; | |
4942 | ||
4943 | /* Perform the same remapping to the comdat group. */ | |
260d0713 | 4944 | if (DECL_ONE_ONLY (new_decl)) |
97221fd7 | 4945 | varpool_node::get (new_decl)->set_comdat_group |
4946 | (tm_mangle (decl_comdat_group_id (old_decl))); | |
4c0315d0 | 4947 | |
415d1b9a | 4948 | new_node = cgraph_node::create_same_body_alias (new_decl, info->new_decl); |
4c0315d0 | 4949 | new_node->tm_clone = true; |
02774f2d | 4950 | new_node->externally_visible = info->old_node->externally_visible; |
6b722052 | 4951 | new_node->no_reorder = info->old_node->no_reorder; |
3e426b86 | 4952 | /* ?? Do not traverse aliases here. */ |
4953 | get_cg_data (&node, false)->clone = new_node; | |
4c0315d0 | 4954 | |
4955 | record_tm_clone_pair (old_decl, new_decl); | |
4956 | ||
02774f2d | 4957 | if (info->old_node->force_output |
51ce5652 | 4958 | || info->old_node->ref_list.first_referring ()) |
8efa224a | 4959 | ipa_tm_mark_force_output_node (new_node); |
02774f2d | 4960 | if (info->old_node->forced_by_abi) |
6a1c0403 | 4961 | ipa_tm_mark_forced_by_abi_node (new_node); |
4c0315d0 | 4962 | return false; |
4963 | } | |
4964 | ||
4965 | /* Create a copy of the function (possibly declaration only) of OLD_NODE, | |
4966 | appropriate for the transactional clone. */ | |
4967 | ||
4968 | static void | |
4969 | ipa_tm_create_version (struct cgraph_node *old_node) | |
4970 | { | |
4971 | tree new_decl, old_decl, tm_name; | |
4972 | struct cgraph_node *new_node; | |
4973 | ||
02774f2d | 4974 | old_decl = old_node->decl; |
4c0315d0 | 4975 | new_decl = copy_node (old_decl); |
4976 | ||
4977 | /* DECL_ASSEMBLER_NAME needs to be set before we call | |
4978 | cgraph_copy_node_for_versioning below, because cgraph_node will | |
4979 | fill the assembler_name_hash. */ | |
4980 | tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl)); | |
4981 | SET_DECL_ASSEMBLER_NAME (new_decl, tm_name); | |
4982 | SET_DECL_RTL (new_decl, NULL); | |
4983 | TREE_SYMBOL_REFERENCED (tm_name) = 1; | |
4984 | ||
4985 | /* Perform the same remapping to the comdat group. */ | |
260d0713 | 4986 | if (DECL_ONE_ONLY (new_decl)) |
97221fd7 | 4987 | varpool_node::get (new_decl)->set_comdat_group |
4988 | (tm_mangle (DECL_COMDAT_GROUP (old_decl))); | |
4c0315d0 | 4989 | |
2cf4c3b3 | 4990 | gcc_assert (!old_node->ipa_transforms_to_apply.exists ()); |
415d1b9a | 4991 | new_node = old_node->create_version_clone (new_decl, vNULL, NULL); |
281dea26 | 4992 | new_node->local.local = false; |
02774f2d | 4993 | new_node->externally_visible = old_node->externally_visible; |
4c0315d0 | 4994 | new_node->lowered = true; |
4995 | new_node->tm_clone = 1; | |
033ab5d3 | 4996 | if (!old_node->implicit_section) |
4997 | new_node->set_section (old_node->get_section ()); | |
3e426b86 | 4998 | get_cg_data (&old_node, true)->clone = new_node; |
4c0315d0 | 4999 | |
415d1b9a | 5000 | if (old_node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5001 | { |
5002 | /* Remap extern inline to static inline. */ | |
5003 | /* ??? Is it worth trying to use make_decl_one_only? */ | |
5004 | if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl)) | |
5005 | { | |
5006 | DECL_EXTERNAL (new_decl) = 0; | |
5007 | TREE_PUBLIC (new_decl) = 0; | |
7a571443 | 5008 | DECL_WEAK (new_decl) = 0; |
4c0315d0 | 5009 | } |
5010 | ||
f1f41a6c | 5011 | tree_function_versioning (old_decl, new_decl, |
5012 | NULL, false, NULL, | |
5013 | false, NULL, NULL); | |
4c0315d0 | 5014 | } |
5015 | ||
5016 | record_tm_clone_pair (old_decl, new_decl); | |
5017 | ||
35ee1c66 | 5018 | symtab->call_cgraph_insertion_hooks (new_node); |
02774f2d | 5019 | if (old_node->force_output |
51ce5652 | 5020 | || old_node->ref_list.first_referring ()) |
8efa224a | 5021 | ipa_tm_mark_force_output_node (new_node); |
02774f2d | 5022 | if (old_node->forced_by_abi) |
6a1c0403 | 5023 | ipa_tm_mark_forced_by_abi_node (new_node); |
4c0315d0 | 5024 | |
5025 | /* Do the same thing, but for any aliases of the original node. */ | |
5026 | { | |
5027 | struct create_version_alias_info data; | |
5028 | data.old_node = old_node; | |
5029 | data.new_decl = new_decl; | |
415d1b9a | 5030 | old_node->call_for_symbol_thunks_and_aliases (ipa_tm_create_version_alias, |
5031 | &data, true); | |
4c0315d0 | 5032 | } |
5033 | } | |
5034 | ||
5035 | /* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */ | |
5036 | ||
5037 | static void | |
5038 | ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region, | |
5039 | basic_block bb) | |
5040 | { | |
5041 | gimple_stmt_iterator gsi; | |
1a91d914 | 5042 | gcall *g; |
4c0315d0 | 5043 | |
5044 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
5045 | ||
5046 | g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE), | |
5047 | 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE)); | |
5048 | ||
5049 | split_block_after_labels (bb); | |
5050 | gsi = gsi_after_labels (bb); | |
5051 | gsi_insert_before (&gsi, g, GSI_SAME_STMT); | |
5052 | ||
415d1b9a | 5053 | node->create_edge (cgraph_node::get_create |
5054 | (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)), | |
151b9ff5 | 5055 | g, gimple_bb (g)->count); |
4c0315d0 | 5056 | } |
5057 | ||
5058 | /* Construct a call to TM_GETTMCLONE and insert it before GSI. */ | |
5059 | ||
5060 | static bool | |
5061 | ipa_tm_insert_gettmclone_call (struct cgraph_node *node, | |
5062 | struct tm_region *region, | |
1a91d914 | 5063 | gimple_stmt_iterator *gsi, gcall *stmt) |
4c0315d0 | 5064 | { |
5065 | tree gettm_fn, ret, old_fn, callfn; | |
1a91d914 | 5066 | gcall *g; |
5067 | gassign *g2; | |
4c0315d0 | 5068 | bool safe; |
5069 | ||
5070 | old_fn = gimple_call_fn (stmt); | |
5071 | ||
5072 | if (TREE_CODE (old_fn) == ADDR_EXPR) | |
5073 | { | |
5074 | tree fndecl = TREE_OPERAND (old_fn, 0); | |
5075 | tree clone = get_tm_clone_pair (fndecl); | |
5076 | ||
5077 | /* By transforming the call into a TM_GETTMCLONE, we are | |
5078 | technically taking the address of the original function and | |
5079 | its clone. Explain this so inlining will know this function | |
5080 | is needed. */ | |
415d1b9a | 5081 | cgraph_node::get (fndecl)->mark_address_taken () ; |
4c0315d0 | 5082 | if (clone) |
415d1b9a | 5083 | cgraph_node::get (clone)->mark_address_taken (); |
4c0315d0 | 5084 | } |
5085 | ||
5086 | safe = is_tm_safe (TREE_TYPE (old_fn)); | |
5087 | gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE | |
5088 | : BUILT_IN_TM_GETTMCLONE_IRR); | |
f9e245b2 | 5089 | ret = create_tmp_var (ptr_type_node); |
4c0315d0 | 5090 | |
5091 | if (!safe) | |
5092 | transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE); | |
5093 | ||
5094 | /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */ | |
5095 | if (TREE_CODE (old_fn) == OBJ_TYPE_REF) | |
5096 | old_fn = OBJ_TYPE_REF_EXPR (old_fn); | |
5097 | ||
5098 | g = gimple_build_call (gettm_fn, 1, old_fn); | |
5099 | ret = make_ssa_name (ret, g); | |
5100 | gimple_call_set_lhs (g, ret); | |
5101 | ||
5102 | gsi_insert_before (gsi, g, GSI_SAME_STMT); | |
5103 | ||
151b9ff5 | 5104 | node->create_edge (cgraph_node::get_create (gettm_fn), g, gimple_bb (g)->count); |
4c0315d0 | 5105 | |
5106 | /* Cast return value from tm_gettmclone* into appropriate function | |
5107 | pointer. */ | |
f9e245b2 | 5108 | callfn = create_tmp_var (TREE_TYPE (old_fn)); |
4c0315d0 | 5109 | g2 = gimple_build_assign (callfn, |
5110 | fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret)); | |
5111 | callfn = make_ssa_name (callfn, g2); | |
5112 | gimple_assign_set_lhs (g2, callfn); | |
5113 | gsi_insert_before (gsi, g2, GSI_SAME_STMT); | |
5114 | ||
5115 | /* ??? This is a hack to preserve the NOTHROW bit on the call, | |
5116 | which we would have derived from the decl. Failure to save | |
5117 | this bit means we might have to split the basic block. */ | |
5118 | if (gimple_call_nothrow_p (stmt)) | |
5119 | gimple_call_set_nothrow (stmt, true); | |
5120 | ||
5121 | gimple_call_set_fn (stmt, callfn); | |
5122 | ||
5123 | /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS | |
5124 | for a call statement. Fix it. */ | |
5125 | { | |
5126 | tree lhs = gimple_call_lhs (stmt); | |
5127 | tree rettype = TREE_TYPE (gimple_call_fntype (stmt)); | |
5128 | if (lhs | |
5129 | && !useless_type_conversion_p (TREE_TYPE (lhs), rettype)) | |
5130 | { | |
5131 | tree temp; | |
5132 | ||
f9e245b2 | 5133 | temp = create_tmp_reg (rettype); |
4c0315d0 | 5134 | gimple_call_set_lhs (stmt, temp); |
5135 | ||
5136 | g2 = gimple_build_assign (lhs, | |
5137 | fold_build1 (VIEW_CONVERT_EXPR, | |
5138 | TREE_TYPE (lhs), temp)); | |
5139 | gsi_insert_after (gsi, g2, GSI_SAME_STMT); | |
5140 | } | |
5141 | } | |
5142 | ||
5143 | update_stmt (stmt); | |
d8b5abdb | 5144 | cgraph_edge *e = cgraph_node::get (current_function_decl)->get_edge (stmt); |
5145 | if (e && e->indirect_info) | |
5146 | e->indirect_info->polymorphic = false; | |
4c0315d0 | 5147 | |
5148 | return true; | |
5149 | } | |
5150 | ||
5151 | /* Helper function for ipa_tm_transform_calls*. Given a call | |
5152 | statement in GSI which resides inside transaction REGION, redirect | |
5153 | the call to either its wrapper function, or its clone. */ | |
5154 | ||
5155 | static void | |
5156 | ipa_tm_transform_calls_redirect (struct cgraph_node *node, | |
5157 | struct tm_region *region, | |
5158 | gimple_stmt_iterator *gsi, | |
5159 | bool *need_ssa_rename_p) | |
5160 | { | |
1a91d914 | 5161 | gcall *stmt = as_a <gcall *> (gsi_stmt (*gsi)); |
4c0315d0 | 5162 | struct cgraph_node *new_node; |
415d1b9a | 5163 | struct cgraph_edge *e = node->get_edge (stmt); |
4c0315d0 | 5164 | tree fndecl = gimple_call_fndecl (stmt); |
5165 | ||
5166 | /* For indirect calls, pass the address through the runtime. */ | |
5167 | if (fndecl == NULL) | |
5168 | { | |
5169 | *need_ssa_rename_p |= | |
5170 | ipa_tm_insert_gettmclone_call (node, region, gsi, stmt); | |
5171 | return; | |
5172 | } | |
5173 | ||
5174 | /* Handle some TM builtins. Ordinarily these aren't actually generated | |
5175 | at this point, but handling these functions when written in by the | |
5176 | user makes it easier to build unit tests. */ | |
5177 | if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN) | |
5178 | return; | |
5179 | ||
5180 | /* Fixup recursive calls inside clones. */ | |
5181 | /* ??? Why did cgraph_copy_node_for_versioning update the call edges | |
5182 | for recursion but not update the call statements themselves? */ | |
5183 | if (e->caller == e->callee && decl_is_tm_clone (current_function_decl)) | |
5184 | { | |
5185 | gimple_call_set_fndecl (stmt, current_function_decl); | |
5186 | return; | |
5187 | } | |
5188 | ||
5189 | /* If there is a replacement, use it. */ | |
5190 | fndecl = find_tm_replacement_function (fndecl); | |
5191 | if (fndecl) | |
5192 | { | |
415d1b9a | 5193 | new_node = cgraph_node::get_create (fndecl); |
4c0315d0 | 5194 | |
5195 | /* ??? Mark all transaction_wrap functions tm_may_enter_irr. | |
5196 | ||
5197 | We can't do this earlier in record_tm_replacement because | |
5198 | cgraph_remove_unreachable_nodes is called before we inject | |
5199 | references to the node. Further, we can't do this in some | |
5200 | nice central place in ipa_tm_execute because we don't have | |
5201 | the exact list of wrapper functions that would be used. | |
5202 | Marking more wrappers than necessary results in the creation | |
5203 | of unnecessary cgraph_nodes, which can cause some of the | |
5204 | other IPA passes to crash. | |
5205 | ||
5206 | We do need to mark these nodes so that we get the proper | |
5207 | result in expand_call_tm. */ | |
5208 | /* ??? This seems broken. How is it that we're marking the | |
5209 | CALLEE as may_enter_irr? Surely we should be marking the | |
5210 | CALLER. Also note that find_tm_replacement_function also | |
5211 | contains mappings into the TM runtime, e.g. memcpy. These | |
5212 | we know won't go irrevocable. */ | |
5213 | new_node->local.tm_may_enter_irr = 1; | |
5214 | } | |
5215 | else | |
5216 | { | |
3e426b86 | 5217 | struct tm_ipa_cg_data *d; |
5218 | struct cgraph_node *tnode = e->callee; | |
5219 | ||
5220 | d = get_cg_data (&tnode, true); | |
4c0315d0 | 5221 | new_node = d->clone; |
5222 | ||
5223 | /* As we've already skipped pure calls and appropriate builtins, | |
5224 | and we've already marked irrevocable blocks, if we can't come | |
5225 | up with a static replacement, then ask the runtime. */ | |
5226 | if (new_node == NULL) | |
5227 | { | |
5228 | *need_ssa_rename_p |= | |
5229 | ipa_tm_insert_gettmclone_call (node, region, gsi, stmt); | |
4c0315d0 | 5230 | return; |
5231 | } | |
5232 | ||
02774f2d | 5233 | fndecl = new_node->decl; |
4c0315d0 | 5234 | } |
5235 | ||
35ee1c66 | 5236 | e->redirect_callee (new_node); |
4c0315d0 | 5237 | gimple_call_set_fndecl (stmt, fndecl); |
5238 | } | |
5239 | ||
5240 | /* Helper function for ipa_tm_transform_calls. For a given BB, | |
5241 | install calls to tm_irrevocable when IRR_BLOCKS are reached, | |
5242 | redirect other calls to the generated transactional clone. */ | |
5243 | ||
5244 | static bool | |
5245 | ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region, | |
5246 | basic_block bb, bitmap irr_blocks) | |
5247 | { | |
5248 | gimple_stmt_iterator gsi; | |
5249 | bool need_ssa_rename = false; | |
5250 | ||
5251 | if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index)) | |
5252 | { | |
5253 | ipa_tm_insert_irr_call (node, region, bb); | |
5254 | return true; | |
5255 | } | |
5256 | ||
5257 | for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
5258 | { | |
42acab1c | 5259 | gimple *stmt = gsi_stmt (gsi); |
4c0315d0 | 5260 | |
5261 | if (!is_gimple_call (stmt)) | |
5262 | continue; | |
5263 | if (is_tm_pure_call (stmt)) | |
5264 | continue; | |
5265 | ||
5266 | /* Redirect edges to the appropriate replacement or clone. */ | |
5267 | ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename); | |
5268 | } | |
5269 | ||
5270 | return need_ssa_rename; | |
5271 | } | |
5272 | ||
5273 | /* Walk the CFG for REGION, beginning at BB. Install calls to | |
5274 | tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to | |
5275 | the generated transactional clone. */ | |
5276 | ||
5277 | static bool | |
5278 | ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region, | |
5279 | basic_block bb, bitmap irr_blocks) | |
5280 | { | |
5281 | bool need_ssa_rename = false; | |
5282 | edge e; | |
5283 | edge_iterator ei; | |
c2078b80 | 5284 | auto_vec<basic_block> queue; |
4c0315d0 | 5285 | bitmap visited_blocks = BITMAP_ALLOC (NULL); |
5286 | ||
f1f41a6c | 5287 | queue.safe_push (bb); |
4c0315d0 | 5288 | do |
5289 | { | |
f1f41a6c | 5290 | bb = queue.pop (); |
4c0315d0 | 5291 | |
5292 | need_ssa_rename |= | |
5293 | ipa_tm_transform_calls_1 (node, region, bb, irr_blocks); | |
5294 | ||
5295 | if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index)) | |
5296 | continue; | |
5297 | ||
5298 | if (region && bitmap_bit_p (region->exit_blocks, bb->index)) | |
5299 | continue; | |
5300 | ||
5301 | FOR_EACH_EDGE (e, ei, bb->succs) | |
5302 | if (!bitmap_bit_p (visited_blocks, e->dest->index)) | |
5303 | { | |
5304 | bitmap_set_bit (visited_blocks, e->dest->index); | |
f1f41a6c | 5305 | queue.safe_push (e->dest); |
4c0315d0 | 5306 | } |
5307 | } | |
f1f41a6c | 5308 | while (!queue.is_empty ()); |
4c0315d0 | 5309 | |
4c0315d0 | 5310 | BITMAP_FREE (visited_blocks); |
5311 | ||
5312 | return need_ssa_rename; | |
5313 | } | |
5314 | ||
5315 | /* Transform the calls within the TM regions within NODE. */ | |
5316 | ||
5317 | static void | |
5318 | ipa_tm_transform_transaction (struct cgraph_node *node) | |
5319 | { | |
3e426b86 | 5320 | struct tm_ipa_cg_data *d; |
4c0315d0 | 5321 | struct tm_region *region; |
5322 | bool need_ssa_rename = false; | |
5323 | ||
3e426b86 | 5324 | d = get_cg_data (&node, true); |
5325 | ||
02774f2d | 5326 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
4c0315d0 | 5327 | calculate_dominance_info (CDI_DOMINATORS); |
5328 | ||
5329 | for (region = d->all_tm_regions; region; region = region->next) | |
5330 | { | |
5331 | /* If we're sure to go irrevocable, don't transform anything. */ | |
5332 | if (d->irrevocable_blocks_normal | |
5333 | && bitmap_bit_p (d->irrevocable_blocks_normal, | |
5334 | region->entry_block->index)) | |
5335 | { | |
1910089e | 5336 | transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE |
5337 | | GTMA_MAY_ENTER_IRREVOCABLE | |
5338 | | GTMA_HAS_NO_INSTRUMENTATION); | |
4c0315d0 | 5339 | continue; |
5340 | } | |
5341 | ||
5342 | need_ssa_rename |= | |
5343 | ipa_tm_transform_calls (node, region, region->entry_block, | |
5344 | d->irrevocable_blocks_normal); | |
5345 | } | |
5346 | ||
5347 | if (need_ssa_rename) | |
5348 | update_ssa (TODO_update_ssa_only_virtuals); | |
5349 | ||
5350 | pop_cfun (); | |
4c0315d0 | 5351 | } |
5352 | ||
5353 | /* Transform the calls within the transactional clone of NODE. */ | |
5354 | ||
5355 | static void | |
5356 | ipa_tm_transform_clone (struct cgraph_node *node) | |
5357 | { | |
3e426b86 | 5358 | struct tm_ipa_cg_data *d; |
4c0315d0 | 5359 | bool need_ssa_rename; |
5360 | ||
3e426b86 | 5361 | d = get_cg_data (&node, true); |
5362 | ||
4c0315d0 | 5363 | /* If this function makes no calls and has no irrevocable blocks, |
5364 | then there's nothing to do. */ | |
5365 | /* ??? Remove non-aborting top-level transactions. */ | |
2670559d | 5366 | if (!node->callees && !node->indirect_calls && !d->irrevocable_blocks_clone) |
4c0315d0 | 5367 | return; |
5368 | ||
02774f2d | 5369 | push_cfun (DECL_STRUCT_FUNCTION (d->clone->decl)); |
4c0315d0 | 5370 | calculate_dominance_info (CDI_DOMINATORS); |
5371 | ||
5372 | need_ssa_rename = | |
34154e27 | 5373 | ipa_tm_transform_calls (d->clone, NULL, |
5374 | single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)), | |
4c0315d0 | 5375 | d->irrevocable_blocks_clone); |
5376 | ||
5377 | if (need_ssa_rename) | |
5378 | update_ssa (TODO_update_ssa_only_virtuals); | |
5379 | ||
5380 | pop_cfun (); | |
4c0315d0 | 5381 | } |
5382 | ||
5383 | /* Main entry point for the transactional memory IPA pass. */ | |
5384 | ||
5385 | static unsigned int | |
5386 | ipa_tm_execute (void) | |
5387 | { | |
9af5ce0c | 5388 | cgraph_node_queue tm_callees = cgraph_node_queue (); |
4c0315d0 | 5389 | /* List of functions that will go irrevocable. */ |
9af5ce0c | 5390 | cgraph_node_queue irr_worklist = cgraph_node_queue (); |
4c0315d0 | 5391 | |
5392 | struct cgraph_node *node; | |
5393 | struct tm_ipa_cg_data *d; | |
5394 | enum availability a; | |
5395 | unsigned int i; | |
5396 | ||
382ecba7 | 5397 | cgraph_node::checking_verify_cgraph_nodes (); |
4c0315d0 | 5398 | |
5399 | bitmap_obstack_initialize (&tm_obstack); | |
0cd02a19 | 5400 | initialize_original_copy_tables (); |
4c0315d0 | 5401 | |
5402 | /* For all local functions marked tm_callable, queue them. */ | |
7c455d87 | 5403 | FOR_EACH_DEFINED_FUNCTION (node) |
02774f2d | 5404 | if (is_tm_callable (node->decl) |
415d1b9a | 5405 | && node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5406 | { |
3e426b86 | 5407 | d = get_cg_data (&node, true); |
4c0315d0 | 5408 | maybe_push_queue (node, &tm_callees, &d->in_callee_queue); |
5409 | } | |
5410 | ||
5411 | /* For all local reachable functions... */ | |
7c455d87 | 5412 | FOR_EACH_DEFINED_FUNCTION (node) |
da751785 | 5413 | if (node->lowered |
415d1b9a | 5414 | && node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5415 | { |
5416 | /* ... marked tm_pure, record that fact for the runtime by | |
5417 | indicating that the pure function is its own tm_callable. | |
5418 | No need to do this if the function's address can't be taken. */ | |
02774f2d | 5419 | if (is_tm_pure (node->decl)) |
4c0315d0 | 5420 | { |
5421 | if (!node->local.local) | |
02774f2d | 5422 | record_tm_clone_pair (node->decl, node->decl); |
4c0315d0 | 5423 | continue; |
5424 | } | |
5425 | ||
02774f2d | 5426 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
4c0315d0 | 5427 | calculate_dominance_info (CDI_DOMINATORS); |
5428 | ||
5429 | tm_region_init (NULL); | |
5430 | if (all_tm_regions) | |
5431 | { | |
3e426b86 | 5432 | d = get_cg_data (&node, true); |
4c0315d0 | 5433 | |
0cd02a19 | 5434 | /* Scan for calls that are in each transaction, and |
5435 | generate the uninstrumented code path. */ | |
4c0315d0 | 5436 | ipa_tm_scan_calls_transaction (d, &tm_callees); |
5437 | ||
40879ac6 | 5438 | /* Put it in the worklist so we can scan the function |
5439 | later (ipa_tm_scan_irr_function) and mark the | |
5440 | irrevocable blocks. */ | |
5441 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
5442 | d->want_irr_scan_normal = true; | |
4c0315d0 | 5443 | } |
5444 | ||
5445 | pop_cfun (); | |
4c0315d0 | 5446 | } |
5447 | ||
5448 | /* For every local function on the callee list, scan as if we will be | |
5449 | creating a transactional clone, queueing all new functions we find | |
5450 | along the way. */ | |
f1f41a6c | 5451 | for (i = 0; i < tm_callees.length (); ++i) |
4c0315d0 | 5452 | { |
f1f41a6c | 5453 | node = tm_callees[i]; |
415d1b9a | 5454 | a = node->get_availability (); |
3e426b86 | 5455 | d = get_cg_data (&node, true); |
4c0315d0 | 5456 | |
40879ac6 | 5457 | /* Put it in the worklist so we can scan the function later |
5458 | (ipa_tm_scan_irr_function) and mark the irrevocable | |
5459 | blocks. */ | |
5460 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
4c0315d0 | 5461 | |
5462 | /* Some callees cannot be arbitrarily cloned. These will always be | |
5463 | irrevocable. Mark these now, so that we need not scan them. */ | |
02774f2d | 5464 | if (is_tm_irrevocable (node->decl)) |
4c0315d0 | 5465 | ipa_tm_note_irrevocable (node, &irr_worklist); |
5466 | else if (a <= AVAIL_NOT_AVAILABLE | |
02774f2d | 5467 | && !is_tm_safe_or_pure (node->decl)) |
4c0315d0 | 5468 | ipa_tm_note_irrevocable (node, &irr_worklist); |
415d1b9a | 5469 | else if (a >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5470 | { |
02774f2d | 5471 | if (!tree_versionable_function_p (node->decl)) |
4c0315d0 | 5472 | ipa_tm_note_irrevocable (node, &irr_worklist); |
5473 | else if (!d->is_irrevocable) | |
5474 | { | |
5475 | /* If this is an alias, make sure its base is queued as well. | |
5476 | we need not scan the callees now, as the base will do. */ | |
02774f2d | 5477 | if (node->alias) |
4c0315d0 | 5478 | { |
415d1b9a | 5479 | node = cgraph_node::get (node->thunk.alias); |
3e426b86 | 5480 | d = get_cg_data (&node, true); |
4c0315d0 | 5481 | maybe_push_queue (node, &tm_callees, &d->in_callee_queue); |
5482 | continue; | |
5483 | } | |
5484 | ||
5485 | /* Add all nodes called by this function into | |
5486 | tm_callees as well. */ | |
5487 | ipa_tm_scan_calls_clone (node, &tm_callees); | |
5488 | } | |
5489 | } | |
5490 | } | |
5491 | ||
5492 | /* Iterate scans until no more work to be done. Prefer not to use | |
f1f41a6c | 5493 | vec::pop because the worklist tends to follow a breadth-first |
4c0315d0 | 5494 | search of the callgraph, which should allow convergance with a |
5495 | minimum number of scans. But we also don't want the worklist | |
5496 | array to grow without bound, so we shift the array up periodically. */ | |
f1f41a6c | 5497 | for (i = 0; i < irr_worklist.length (); ++i) |
4c0315d0 | 5498 | { |
f1f41a6c | 5499 | if (i > 256 && i == irr_worklist.length () / 8) |
4c0315d0 | 5500 | { |
f1f41a6c | 5501 | irr_worklist.block_remove (0, i); |
4c0315d0 | 5502 | i = 0; |
5503 | } | |
5504 | ||
f1f41a6c | 5505 | node = irr_worklist[i]; |
3e426b86 | 5506 | d = get_cg_data (&node, true); |
4c0315d0 | 5507 | d->in_worklist = false; |
5508 | ||
5509 | if (d->want_irr_scan_normal) | |
5510 | { | |
5511 | d->want_irr_scan_normal = false; | |
5512 | ipa_tm_scan_irr_function (node, false); | |
5513 | } | |
5514 | if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true)) | |
5515 | ipa_tm_note_irrevocable (node, &irr_worklist); | |
5516 | } | |
5517 | ||
5518 | /* For every function on the callee list, collect the tm_may_enter_irr | |
5519 | bit on the node. */ | |
f1f41a6c | 5520 | irr_worklist.truncate (0); |
5521 | for (i = 0; i < tm_callees.length (); ++i) | |
4c0315d0 | 5522 | { |
f1f41a6c | 5523 | node = tm_callees[i]; |
4c0315d0 | 5524 | if (ipa_tm_mayenterirr_function (node)) |
5525 | { | |
3e426b86 | 5526 | d = get_cg_data (&node, true); |
4c0315d0 | 5527 | gcc_assert (d->in_worklist == false); |
5528 | maybe_push_queue (node, &irr_worklist, &d->in_worklist); | |
5529 | } | |
5530 | } | |
5531 | ||
5532 | /* Propagate the tm_may_enter_irr bit to callers until stable. */ | |
f1f41a6c | 5533 | for (i = 0; i < irr_worklist.length (); ++i) |
4c0315d0 | 5534 | { |
5535 | struct cgraph_node *caller; | |
5536 | struct cgraph_edge *e; | |
e4a2b488 | 5537 | struct ipa_ref *ref; |
4c0315d0 | 5538 | |
f1f41a6c | 5539 | if (i > 256 && i == irr_worklist.length () / 8) |
4c0315d0 | 5540 | { |
f1f41a6c | 5541 | irr_worklist.block_remove (0, i); |
4c0315d0 | 5542 | i = 0; |
5543 | } | |
5544 | ||
f1f41a6c | 5545 | node = irr_worklist[i]; |
3e426b86 | 5546 | d = get_cg_data (&node, true); |
4c0315d0 | 5547 | d->in_worklist = false; |
5548 | node->local.tm_may_enter_irr = true; | |
5549 | ||
5550 | /* Propagate back to normal callers. */ | |
5551 | for (e = node->callers; e ; e = e->next_caller) | |
5552 | { | |
5553 | caller = e->caller; | |
02774f2d | 5554 | if (!is_tm_safe_or_pure (caller->decl) |
4c0315d0 | 5555 | && !caller->local.tm_may_enter_irr) |
5556 | { | |
3e426b86 | 5557 | d = get_cg_data (&caller, true); |
4c0315d0 | 5558 | maybe_push_queue (caller, &irr_worklist, &d->in_worklist); |
5559 | } | |
5560 | } | |
5561 | ||
5562 | /* Propagate back to referring aliases as well. */ | |
e4a2b488 | 5563 | FOR_EACH_ALIAS (node, ref) |
4c0315d0 | 5564 | { |
415d1b9a | 5565 | caller = dyn_cast<cgraph_node *> (ref->referring); |
e4a2b488 | 5566 | if (!caller->local.tm_may_enter_irr) |
4c0315d0 | 5567 | { |
3e426b86 | 5568 | /* ?? Do not traverse aliases here. */ |
5569 | d = get_cg_data (&caller, false); | |
4c0315d0 | 5570 | maybe_push_queue (caller, &irr_worklist, &d->in_worklist); |
5571 | } | |
5572 | } | |
5573 | } | |
5574 | ||
5575 | /* Now validate all tm_safe functions, and all atomic regions in | |
5576 | other functions. */ | |
7c455d87 | 5577 | FOR_EACH_DEFINED_FUNCTION (node) |
da751785 | 5578 | if (node->lowered |
415d1b9a | 5579 | && node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5580 | { |
3e426b86 | 5581 | d = get_cg_data (&node, true); |
02774f2d | 5582 | if (is_tm_safe (node->decl)) |
4c0315d0 | 5583 | ipa_tm_diagnose_tm_safe (node); |
5584 | else if (d->all_tm_regions) | |
5585 | ipa_tm_diagnose_transaction (node, d->all_tm_regions); | |
5586 | } | |
5587 | ||
5588 | /* Create clones. Do those that are not irrevocable and have a | |
5589 | positive call count. Do those publicly visible functions that | |
5590 | the user directed us to clone. */ | |
f1f41a6c | 5591 | for (i = 0; i < tm_callees.length (); ++i) |
4c0315d0 | 5592 | { |
5593 | bool doit = false; | |
5594 | ||
f1f41a6c | 5595 | node = tm_callees[i]; |
02774f2d | 5596 | if (node->cpp_implicit_alias) |
4c0315d0 | 5597 | continue; |
5598 | ||
415d1b9a | 5599 | a = node->get_availability (); |
3e426b86 | 5600 | d = get_cg_data (&node, true); |
4c0315d0 | 5601 | |
5602 | if (a <= AVAIL_NOT_AVAILABLE) | |
02774f2d | 5603 | doit = is_tm_callable (node->decl); |
5604 | else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl)) | |
4c0315d0 | 5605 | doit = true; |
5606 | else if (!d->is_irrevocable | |
5607 | && d->tm_callers_normal + d->tm_callers_clone > 0) | |
5608 | doit = true; | |
5609 | ||
5610 | if (doit) | |
5611 | ipa_tm_create_version (node); | |
5612 | } | |
5613 | ||
5614 | /* Redirect calls to the new clones, and insert irrevocable marks. */ | |
f1f41a6c | 5615 | for (i = 0; i < tm_callees.length (); ++i) |
4c0315d0 | 5616 | { |
f1f41a6c | 5617 | node = tm_callees[i]; |
02774f2d | 5618 | if (node->analyzed) |
4c0315d0 | 5619 | { |
3e426b86 | 5620 | d = get_cg_data (&node, true); |
4c0315d0 | 5621 | if (d->clone) |
5622 | ipa_tm_transform_clone (node); | |
5623 | } | |
5624 | } | |
7c455d87 | 5625 | FOR_EACH_DEFINED_FUNCTION (node) |
da751785 | 5626 | if (node->lowered |
415d1b9a | 5627 | && node->get_availability () >= AVAIL_INTERPOSABLE) |
4c0315d0 | 5628 | { |
3e426b86 | 5629 | d = get_cg_data (&node, true); |
4c0315d0 | 5630 | if (d->all_tm_regions) |
5631 | ipa_tm_transform_transaction (node); | |
5632 | } | |
5633 | ||
5634 | /* Free and clear all data structures. */ | |
f1f41a6c | 5635 | tm_callees.release (); |
5636 | irr_worklist.release (); | |
4c0315d0 | 5637 | bitmap_obstack_release (&tm_obstack); |
0cd02a19 | 5638 | free_original_copy_tables (); |
4c0315d0 | 5639 | |
7c455d87 | 5640 | FOR_EACH_FUNCTION (node) |
02774f2d | 5641 | node->aux = NULL; |
4c0315d0 | 5642 | |
382ecba7 | 5643 | cgraph_node::checking_verify_cgraph_nodes (); |
4c0315d0 | 5644 | |
5645 | return 0; | |
5646 | } | |
5647 | ||
7620bc82 | 5648 | namespace { |
5649 | ||
5650 | const pass_data pass_data_ipa_tm = | |
cbe8bda8 | 5651 | { |
5652 | SIMPLE_IPA_PASS, /* type */ | |
5653 | "tmipa", /* name */ | |
5654 | OPTGROUP_NONE, /* optinfo_flags */ | |
cbe8bda8 | 5655 | TV_TRANS_MEM, /* tv_id */ |
5656 | ( PROP_ssa | PROP_cfg ), /* properties_required */ | |
5657 | 0, /* properties_provided */ | |
5658 | 0, /* properties_destroyed */ | |
5659 | 0, /* todo_flags_start */ | |
5660 | 0, /* todo_flags_finish */ | |
4c0315d0 | 5661 | }; |
5662 | ||
7620bc82 | 5663 | class pass_ipa_tm : public simple_ipa_opt_pass |
cbe8bda8 | 5664 | { |
5665 | public: | |
9af5ce0c | 5666 | pass_ipa_tm (gcc::context *ctxt) |
5667 | : simple_ipa_opt_pass (pass_data_ipa_tm, ctxt) | |
cbe8bda8 | 5668 | {} |
5669 | ||
5670 | /* opt_pass methods: */ | |
31315c24 | 5671 | virtual bool gate (function *) { return flag_tm; } |
65b0537f | 5672 | virtual unsigned int execute (function *) { return ipa_tm_execute (); } |
cbe8bda8 | 5673 | |
5674 | }; // class pass_ipa_tm | |
5675 | ||
7620bc82 | 5676 | } // anon namespace |
5677 | ||
cbe8bda8 | 5678 | simple_ipa_opt_pass * |
5679 | make_pass_ipa_tm (gcc::context *ctxt) | |
5680 | { | |
5681 | return new pass_ipa_tm (ctxt); | |
5682 | } | |
5683 | ||
4c0315d0 | 5684 | #include "gt-trans-mem.h" |